diff --git a/cSpell.json b/cSpell.json
index b33d802007..734fc863b6 100644
--- a/cSpell.json
+++ b/cSpell.json
@@ -3,85 +3,91 @@
"language": "en",
"words": [
"Amplication",
+ "Astro",
+ "autocompletions",
"Autoincrementing",
+ "autoscale",
+ "backoff",
"baselined",
"Baselining",
+ "CASL",
"cockroachdb",
"codemod",
"codemods",
+ "Coolify",
"datamodel",
"datasource",
+ "Dittofeed",
+ "Documenso",
+ "Docusign",
+ "Dundring",
+ "Dyrector",
"earlyaccess",
"ecommerce",
"Fastify",
+ "Formbricks",
"fulltext",
+ "ghostfolio",
"Hasura",
+ "healthcheck",
+ "Hitori",
+ "hostable",
"hotfix",
"hotfixing",
"idempotency",
"InnoDB",
"ISAM",
+ "Kysely",
+ "leaderboard",
+ "Letterpad",
"livestream",
"microservices",
"Middlewares",
"MSSQLSERVER",
"neovim",
"Nestjs",
+ "Nuxt",
+ "Openform",
"Overfetching",
"pgbouncer",
"planetscale",
"pooler",
"poolers",
+ "Postico",
+ "Pothos",
"preconfigure",
"preconfigured",
"Quickstart",
+ "Rallly",
+ "Redistributable",
"refactorings",
+ "Replibyte",
"roadmap",
+ "runtimes",
+ "Scholarsome",
+ "serverful",
"signup",
+ "Snaplet",
+ "Solidstart",
"sqlserver",
+ "Streamdal",
+ "substep",
+ "substeps",
+ "Supabase",
+ "Sveltekit",
+ "Tabnine",
"TLDR",
"triaging",
+ "Turso",
+ "typesense",
+ "unikernel",
+ "unikernels",
"upsert",
"upserts",
"upvote",
"walkthrough",
- "healthcheck",
- "runtimes",
- "substep",
- "substeps",
- "Supabase",
- "Redistributable",
- "Nuxt",
- "Sveltekit",
- "Pothos",
- "backoff",
- "Replibyte",
- "Snaplet",
- "Kysely",
- "Turso",
- "autocompletions",
- "Formbricks",
- "Openform",
- "Documenso",
- "Docusign",
- "ghostfolio",
- "Scholarsome",
- "Dittofeed",
"Webstudio",
- "Dyrector",
- "Coolify",
- "hostable",
- "Rallly",
- "Dundring",
- "Letterpad",
- "Hitori",
"Zenstack",
- "Streamdal",
- "leaderboard",
- "typesense",
- "Solidstart",
- "Astro",
- "unikernels",
"Postico",
"CASL",
"serverful",
@@ -118,7 +124,8 @@
"Turborepo",
"Deepgram",
"PGSSLMODE",
- "pgloader"
+ "pgloader",
+ "unikernel"
],
"patterns": [
{
diff --git a/content/100-getting-started/01-quickstart-prismaPostgres.mdx b/content/100-getting-started/01-quickstart-prismaPostgres.mdx
index bd28adde35..64885f0f63 100644
--- a/content/100-getting-started/01-quickstart-prismaPostgres.mdx
+++ b/content/100-getting-started/01-quickstart-prismaPostgres.mdx
@@ -100,7 +100,7 @@ npx prisma studio
## 6. Explore caching with Prisma Accelerate
-The [`src/caching.ts`](https://github.com/prisma/prisma-examples/blob/latest/databases/prisma-postgres/src/caching.ts) script contains a sample query that uses [Stale-While-Revalidate](/accelerate/caching#stale-while-revalidate-swr) (SWR) and [Time-To-Live](/accelerate/caching#time-to-live-ttl) (TTL) to cache a database query using Prisma Accelerate. You can execute it as follows:
+The [`src/caching.ts`](https://github.com/prisma/prisma-examples/blob/latest/databases/prisma-postgres/src/caching.ts) script contains a sample query that uses [Stale-While-Revalidate](/postgres/database/caching#stale-while-revalidate-swr) (SWR) and [Time-To-Live](/postgres/database/caching#time-to-live-ttl) (TTL) to cache a database query using Prisma Accelerate. You can execute it as follows:
```terminal
npm run caching
diff --git a/content/100-getting-started/03-prisma-postgres/110-import-from-existing-database-postgresql.mdx b/content/100-getting-started/03-prisma-postgres/110-import-from-existing-database-postgresql.mdx
index f873f2b4ac..7a23078ca0 100644
--- a/content/100-getting-started/03-prisma-postgres/110-import-from-existing-database-postgresql.mdx
+++ b/content/100-getting-started/03-prisma-postgres/110-import-from-existing-database-postgresql.mdx
@@ -17,7 +17,7 @@ You can accomplish this migration in three steps:
1. Export your existing data via `pg_dump`.
1. Import the previously exported data into Prisma Postgres via `pg_restore`.
-In the third step, you will be using the [TCP tunnel](/postgres/tcp-tunnel) to securely connect to your Prisma Postgres database during to run `pg_restore`.
+In the third step, you will be using the [TCP tunnel](/postgres/database/tcp-tunnel) to securely connect to your Prisma Postgres database during to run `pg_restore`.
## Prerequisites
@@ -155,7 +155,7 @@ Running this command will create a backup file named `db_dump.bak` which you wil
## 3. Import data into Prisma Postgres
-In this step, you'll use the [TCP tunnel](/postgres/tcp-tunnel) to connect to your Prisma Postgres instance and import data via `pg_restore`.
+In this step, you'll use the [TCP tunnel](/postgres/database/tcp-tunnel) to connect to your Prisma Postgres instance and import data via `pg_restore`.
You'll also need the Prisma Postgres connection URL from step 1, it should look similar to this:
@@ -231,7 +231,7 @@ You don't need to provide username and password credentials to this command beca
You now successfully imported the data from your your existing PostgreSQL database into Prisma Postgres 🎉
-To validate that the import worked, you can use [Prisma Studio](/postgres/tooling#viewing-and-editing-data-in-prisma-studio). Either open it in the [Platform Console](https://console.prisma.io) by clicking the **Studio** tab in the left-hand sidenav in your project or run this command to launch Prisma Studio locally:
+To validate that the import worked, you can use [Prisma Studio](/postgres/database/tooling#viewing-and-editing-data-in-prisma-studio). Either open it in the [Platform Console](https://console.prisma.io) by clicking the **Studio** tab in the left-hand sidenav in your project or run this command to launch Prisma Studio locally:
```terminal
npx prisma studio
@@ -248,7 +248,7 @@ If you already using Prisma ORM, the only things you need to do are:
#### 4.A.1. Add the Prisma Accelerate extension
-Th Prisma Accelerate extension is [required](/postgres/overview#using-the-client-extension-for-prisma-accelerate-required) when using Prisma Postgres. If you are not currently using Prisma Accelerate with Prisma ORM, go through the following steps to make Prisma ORM work with Prisma Postgres.
+Th Prisma Accelerate extension is [required](/postgres/introduction/overview#using-the-client-extension-for-prisma-accelerate-required) when using Prisma Postgres. If you are not currently using Prisma Accelerate with Prisma ORM, go through the following steps to make Prisma ORM work with Prisma Postgres.
First, install the `@prisma/extension-accelerate` package in your project:
diff --git a/content/100-getting-started/03-prisma-postgres/115-import-from-existing-database-mysql.mdx b/content/100-getting-started/03-prisma-postgres/115-import-from-existing-database-mysql.mdx
index a595f1360a..6e1abcb712 100644
--- a/content/100-getting-started/03-prisma-postgres/115-import-from-existing-database-mysql.mdx
+++ b/content/100-getting-started/03-prisma-postgres/115-import-from-existing-database-mysql.mdx
@@ -42,7 +42,7 @@ Once your database was provisioned, find your Prisma Postgres connection URL in
## 2. Connect directly to a Prisma Postgres instance
-In this step, you'll use a secure [TCP tunnel](/postgres/tcp-tunnel) to connect to your Prisma Postgres instance.
+In this step, you'll use a secure [TCP tunnel](/postgres/database/tcp-tunnel) to connect to your Prisma Postgres instance.
You'll need the Prisma Postgres connection URL from [step 1](/getting-started/prisma-postgres/import-from-existing-database-mysql#1-create-a-new-prisma-postgres-database):
@@ -72,7 +72,7 @@ npx @prisma/ppg-tunnel --host 127.0.0.1 --port 5433
:::note
-You can [specify a different host and port](/postgres/tcp-tunnel#customizing-host-and-port) by providing your own host and port values using the `--port` and `--host` flags. Just be sure to use the same host and port values consistently throughout the guide.
+You can [specify a different host and port](/postgres/database/tcp-tunnel#customizing-host-and-port) by providing your own host and port values using the `--port` and `--host` flags. Just be sure to use the same host and port values consistently throughout the guide.
:::
@@ -177,7 +177,7 @@ If you see output like this, it means your data has been successfully exported t
:::note
-You also can use [Prisma Studio](/postgres/tooling#viewing-and-editing-data-in-prisma-studio) and verify whether the migration was successful:
+You also can use [Prisma Studio](/postgres/database/tooling#viewing-and-editing-data-in-prisma-studio) and verify whether the migration was successful:
```terminal
npx prisma studio
diff --git a/content/200-orm/500-reference/200-prisma-cli-reference.mdx b/content/200-orm/500-reference/200-prisma-cli-reference.mdx
index 33b94dfa33..31112e07cc 100644
--- a/content/200-orm/500-reference/200-prisma-cli-reference.mdx
+++ b/content/200-orm/500-reference/200-prisma-cli-reference.mdx
@@ -1377,7 +1377,7 @@ You can find the complete list of available commands with the arguments [here](/
### `mcp`
-Starts the [Prisma MCP server](/postgres/mcp-server).
+Starts the [Prisma MCP server](/postgres/integrations/mcp-server).
## Studio
diff --git a/content/200-orm/500-reference/250-error-reference.mdx b/content/200-orm/500-reference/250-error-reference.mdx
index dcc7673a7b..f1bc4244e2 100644
--- a/content/200-orm/500-reference/250-error-reference.mdx
+++ b/content/200-orm/500-reference/250-error-reference.mdx
@@ -472,13 +472,13 @@ The included usage of the current plan has been exceeded. This can only occur on
#### `P6004` (`QueryTimeout`)
-The global timeout of Accelerate has been exceeded. You can find the limit [here](/accelerate/connection-pooling#query-timeout-limit).
+The global timeout of Accelerate has been exceeded. You can find the limit [here](/postgres/database/connection-pooling#query-timeout-limit).
> Also see the [troubleshooting guide](/accelerate/troubleshoot#p6004-querytimeout) for more information.
#### `P6005` (`InvalidParameters`)
-The user supplied invalid parameters. Currently only relevant for transaction methods. For example, setting a timeout that is too high. You can find the limit [here](/accelerate/connection-pooling#interactive-transactions-query-timeout-limit).
+The user supplied invalid parameters. Currently only relevant for transaction methods. For example, setting a timeout that is too high. You can find the limit [here](/postgres/database/connection-pooling#interactive-transactions-query-timeout-limit).
#### `P6006` (`VersionNotSupported`)
@@ -492,7 +492,7 @@ The engine failed to start. For example, it couldn't establish a connection to t
#### `P6009` (`ResponseSizeLimitExceeded`)
-The global response size limit of Accelerate has been exceeded. You can find the limit [here](/accelerate/connection-pooling#response-size-limit).
+The global response size limit of Accelerate has been exceeded. You can find the limit [here](/postgres/database/connection-pooling#response-size-limit).
> Also see the [troubleshooting guide](/accelerate/troubleshoot#p6009-responsesizelimitexceeded) for more information.
diff --git a/content/200-orm/800-more/350-ai-tools/100-cursor.mdx b/content/200-orm/800-more/350-ai-tools/100-cursor.mdx
index 9d7a06eb14..135ae86643 100644
--- a/content/200-orm/800-more/350-ai-tools/100-cursor.mdx
+++ b/content/200-orm/800-more/350-ai-tools/100-cursor.mdx
@@ -22,7 +22,7 @@ While this guide is focused on Cursor, these patterns should work with any AI ed
## Prisma MCP server
-Prisma provides its own [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) server that lets you manage Prisma Postgres databases, model database schemas and chat through migrations. Learn more about how you can add it to Cursor [here](/postgres/mcp-server#cursor).
+Prisma provides its own [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) server that lets you manage Prisma Postgres databases, model database schemas and chat through migrations. Learn more about how you can add it to Cursor [here](/postgres/integrations/mcp-server#cursor).
## Defining project-specific rules with `.cursorrules`
diff --git a/content/200-orm/800-more/350-ai-tools/300-windsurf.mdx b/content/200-orm/800-more/350-ai-tools/300-windsurf.mdx
index f712121603..753a5b7915 100644
--- a/content/200-orm/800-more/350-ai-tools/300-windsurf.mdx
+++ b/content/200-orm/800-more/350-ai-tools/300-windsurf.mdx
@@ -22,7 +22,7 @@ While this guide is focused on Windsurf, these patterns should work with any AI
## Prisma MCP server
-Prisma provides its own [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) server that lets you manage Prisma Postgres databases, model database schemas, and even chat through migrations. Learn more about how you can add it to Windsurf [here](/postgres/mcp-server#windsurf).
+Prisma provides its own [Model Context Protocol (MCP)](https://modelcontextprotocol.io/introduction) server that lets you manage Prisma Postgres databases, model database schemas, and even chat through migrations. Learn more about how you can add it to Windsurf [here](/postgres/integrations/mcp-server#windsurf).
## Defining project-specific rules with `.windsurfrules`
diff --git a/content/200-orm/800-more/350-ai-tools/index.mdx b/content/200-orm/800-more/350-ai-tools/index.mdx
index e1e22e5b49..5d1b0e6d75 100644
--- a/content/200-orm/800-more/350-ai-tools/index.mdx
+++ b/content/200-orm/800-more/350-ai-tools/index.mdx
@@ -19,4 +19,4 @@ This will generate a Prisma schema based on the prompt and deploy it to a fresh
## Prisma MCP server
-Prisma provides its own [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) server that lets you manage Prisma Postgres databases, model database schemas and chat through migrations. Learn more about it [here](/postgres/mcp-server).
\ No newline at end of file
+Prisma provides its own [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) server that lets you manage Prisma Postgres databases, model database schemas and chat through migrations. Learn more about it [here](/postgres/integrations/mcp-server).
\ No newline at end of file
diff --git a/content/250-postgres/200-getting-started.mdx b/content/250-postgres/100-introduction/200-getting-started.mdx
similarity index 100%
rename from content/250-postgres/200-getting-started.mdx
rename to content/250-postgres/100-introduction/200-getting-started.mdx
diff --git a/content/250-postgres/250-overview.mdx b/content/250-postgres/100-introduction/250-overview.mdx
similarity index 99%
rename from content/250-postgres/250-overview.mdx
rename to content/250-postgres/100-introduction/250-overview.mdx
index 0ac8a9e90e..e3abdcba26 100644
--- a/content/250-postgres/250-overview.mdx
+++ b/content/250-postgres/100-introduction/250-overview.mdx
@@ -6,7 +6,6 @@ tocDepth: 3
toc: true
---
-
[Prisma Postgres](https://www.prisma.io/postgres?utm_source=docs) is a managed PostgreSQL database service that easily lets you create a new database, interact with it through Prisma ORM, and build applications that start small and cheap but can scale to millions of users.
It supports the following workflows:
diff --git a/content/250-postgres/100-introduction/_category_.json b/content/250-postgres/100-introduction/_category_.json
new file mode 100644
index 0000000000..671f5df3ac
--- /dev/null
+++ b/content/250-postgres/100-introduction/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Introduction",
+ "collapsible": false,
+ "collapsed": false
+}
diff --git a/content/250-postgres/100-introduction/index.mdx b/content/250-postgres/100-introduction/index.mdx
new file mode 100644
index 0000000000..0664f7201e
--- /dev/null
+++ b/content/250-postgres/100-introduction/index.mdx
@@ -0,0 +1,13 @@
+---
+title: 'Introduction'
+metaTitle: 'Prisma Postgres introduction'
+metaDescription: 'Understand the basics of Prisma Postgres, including key features and how to get started.'
+hide_table_of_contents: true
+---
+
+
+Get familiar with Prisma Postgres and its core concepts. This section covers what Prisma Postgres is and how to begin using it with minimal setup.
+
+## In this section
+
+
diff --git a/content/250-postgres/1000-faq.mdx b/content/250-postgres/1000-faq.mdx
deleted file mode 100644
index d0384f16ed..0000000000
--- a/content/250-postgres/1000-faq.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title: 'FAQ'
-metaTitle: 'FAQ | Prisma Postgres'
-metaDescription: 'Learn about the FAQ regarding Prisma Postgres.'
-tocDepth: 3
-toc: true
----
-
-## Does query execution time affect pricing in Prisma Postgres?
-
-No, cost for Prisma Postgres is based solely on the _number of operations_ (i.e. Prisma ORM queries), not the amount of compute required to execute them.
-
-Whether a query takes 10ms or 10sec to execute, its pricing impact remains the same.
-
-## Do read and write queries cost the same?
-
-Yes, read and write queries are counted equally as _operations_ and are billed the same way.
-
-## Does a `SELECT 1` query count as a billable operation?
-
-Yes, if submitted via Prisma ORM, a query like `SELECT 1` counts as an operation and will be billed accordingly (even if no actual data is accessed in the query).
-
-## Can I use Prisma Postgres without Prisma ORM?
-
-Prisma Postgres is designed to be used with Prisma ORM and the Accelerate extension for best performance and scalability.
-
-While it is technically possible to connect via the [TCP tunnel](/postgres/tcp-tunnel), this approach is only intended for _tooling_ purposes, such as command line tools or query editors.
-
-:::warning
-
-We strongly advise against using the TCP tunnel for application-level access. It is not designed for that purpose and using it that way will likely have negative performance implications for your application.
-
-:::
-
-We are planning to enable application-level TCP access in the future so that you can use Prisma Postgres with other ORMs.
\ No newline at end of file
diff --git a/content/250-postgres/1100-integrations/100-netlify.mdx b/content/250-postgres/1100-integrations/100-netlify.mdx
index a529dc5d44..9cbd243363 100644
--- a/content/250-postgres/1100-integrations/100-netlify.mdx
+++ b/content/250-postgres/1100-integrations/100-netlify.mdx
@@ -6,9 +6,6 @@ tocDepth: 3
toc: true
---
-
-## Overview
-
The [Netlify extension for Prisma Postgres](https://www.netlify.com/integrations/prisma) connects your Netlify sites with Prisma Postgres instances. Once connected, the extension will automatically set the `DATABASE_URL` environment variable on your deployed Netlify sites.
## Features
diff --git a/content/250-postgres/1100-integrations/200-vercel.mdx b/content/250-postgres/1100-integrations/200-vercel.mdx
index 3b89ee5653..1a3434ecee 100644
--- a/content/250-postgres/1100-integrations/200-vercel.mdx
+++ b/content/250-postgres/1100-integrations/200-vercel.mdx
@@ -6,8 +6,6 @@ tocDepth: 3
toc: true
---
-
-
The [Vercel Marketplace integration for Prisma Postgres](https://www.vercel.com/marketplace/prisma) connects your Vercel projects with Prisma Postgres instances. Once connected, the integration will automatically set the `DATABASE_URL` environment variable on your deployed Vercel app.
## Features
diff --git a/content/250-postgres/1100-integrations/300-idx.mdx b/content/250-postgres/1100-integrations/300-idx.mdx
index 35b0cc292c..49cfddf01a 100644
--- a/content/250-postgres/1100-integrations/300-idx.mdx
+++ b/content/250-postgres/1100-integrations/300-idx.mdx
@@ -6,9 +6,6 @@ tocDepth: 3
toc: true
---
-
-## Overview
-
If you want to explore Prisma Postgres without leaving your browser, you can try it out the via Google's [Firebase Studio](https://studio.firebase.google.com/), a fully-fledged online IDE:
1. Open the [**Prisma**](https://pris.ly/idx-starter) template.
diff --git a/content/250-postgres/650-mcp-server.mdx b/content/250-postgres/1100-integrations/400-mcp-server.mdx
similarity index 100%
rename from content/250-postgres/650-mcp-server.mdx
rename to content/250-postgres/1100-integrations/400-mcp-server.mdx
diff --git a/content/250-postgres/1100-integrations/index.mdx b/content/250-postgres/1100-integrations/index.mdx
index ad6213f435..233be354cc 100644
--- a/content/250-postgres/1100-integrations/index.mdx
+++ b/content/250-postgres/1100-integrations/index.mdx
@@ -1,12 +1,11 @@
---
title: 'Integrations'
metaTitle: '3rd-Party Integrations for Prisma Postgres'
-metaDescription: 'Learn about 3rd-party integrations to use Prisma Postgres, like Netlify, Vercel and Firebase Studio...'
+metaDescription: 'Discover how to use Prisma Postgres with 3rd-party platforms like Vercel, Netlify, and Firebase Studio.'
hide_table_of_contents: true
---
-
-Learn how to use Prisma Postgres via 3rd party integrations.
+Learn how Prisma Postgres works with popular 3rd-party platforms such as Vercel, Netlify, and Firebase Studio.
## In this section
diff --git a/content/250-postgres/1200-more/1000-faq.mdx b/content/250-postgres/1200-more/1000-faq.mdx
new file mode 100644
index 0000000000..ee09084dac
--- /dev/null
+++ b/content/250-postgres/1200-more/1000-faq.mdx
@@ -0,0 +1,223 @@
+---
+title: 'FAQ'
+metaTitle: 'FAQ | Prisma Postgres'
+metaDescription: 'Learn about the FAQ regarding Prisma Postgres.'
+tocDepth: 3
+toc: true
+---
+
+
+Common questions about how Prisma Postgres works, how queries are billed, and how it integrates with the Prisma ORM.
+
+## General
+
+### Can I use Prisma Postgres without Prisma ORM?
+
+Prisma Postgres is designed to be used with Prisma ORM and the Accelerate extension for best performance and scalability.
+
+While it is technically possible to connect via the [TCP tunnel](/postgres/database/tcp-tunnel), this approach is only intended for _tooling_ purposes, such as command line tools or query editors.
+
+:::warning
+
+We strongly advise against using the TCP tunnel for application-level access. It is not designed for that purpose and using it that way will likely have negative performance implications for your application.
+
+:::
+
+We are planning to enable application-level TCP access in the future so that you can use Prisma Postgres with other ORMs.
+
+### How do I switch from GitHub login to email and password login?
+
+If you previously signed up using GitHub and want to switch to email and password login, follow these steps:
+
+#### 1. Verify Your GitHub Email Address
+- Check the primary email address associated with your GitHub account (e.g., from your GitHub profile or notification settings).
+
+#### 2. Create a New Email/Password Account
+- Go to the email/password sign-up page.
+- Use the **same email address** linked to your GitHub account to create the new account.
+- Our system will automatically connect your new email/password account to your existing data.
+
+#### 3. Test Your Login
+- Log out and try logging in with your email and the password you just created.
+
+:::note
+
+If you encounter any issues, please contact our support team for help linking your accounts.
+
+:::
+
+### VS Code does not recognize the `$extends` method
+
+If you add the Prisma Client extension for Accelerate to an existing project that is currently open in VS Code, the editor might not immediately recognize the `$extends` method.
+
+This might be an issue with the TypeScript server not yet recognizing the regenerated Prisma Client. To resolve this, you need to restart TypeScript.
+
+1. In VS Code, open the Command Palette. You can do so when you press F1 or select **View** > **Command Palette**.
+2. Enter `typescript` and select and run the **TypeScript: Restart TS server** command.
+
+VS Code should now recognize the `$extends` method.
+
+## Pricing
+
+### Does query execution time affect pricing in Prisma Postgres?
+
+No, cost for Prisma Postgres is based solely on the _number of operations_ (i.e. Prisma ORM queries), not the amount of compute required to execute them.
+
+Whether a query takes 10ms or 10sec to execute, its pricing impact remains the same.
+
+### Do read and write queries cost the same?
+
+Yes, read and write queries are counted equally as _operations_ and are billed the same way.
+
+### Does a `SELECT 1` query count as a billable operation?
+
+Yes, if submitted via Prisma ORM, a query like `SELECT 1` counts as an operation and will be billed accordingly (even if no actual data is accessed in the query).
+
+## Caching
+
+Prisma Postgres includes built-in connection pooling and global caching. These features improve performance by optimizing how your queries are routed and cached.
+
+
+### How does Prisma Postgres's cache layer know what region to fetch the cache from?
+
+Under the hood, Prisma Postgres's cache layer uses Cloudflare, which uses [Anycast](https://www.cloudflare.com/learning/cdn/glossary/anycast-network/) for network addressing and routing. An incoming request will be routed to the nearest data center or "node" in their network that has the capacity to process the request efficiently. To learn more about how this works, we recommend looking into [Anycast](https://www.cloudflare.com/learning/cdn/glossary/anycast-network/).
+
+### How can I invalidate a cache for Prisma Postgres?
+
+You can invalidate the cache on-demand via the [`$accelerate.invalidate` API](/postgres/database/api-reference#accelerateinvalidate) if you're on a [paid plan](https://www.prisma.io/pricing#accelerate), or you can invalidate your entire cache, on a project level, a maximum of five times a day. This limit is set based on [your plan](https://www.prisma.io/pricing). You can manage this via the Accelerate configuration page.
+
+### What is Prisma Postgres's caching layer's consistency model?
+
+The caching layer in Prisma Postgres does not have a consistency model. It is not a distributed system where nodes need to reach a consensus (because data is only stored in the cache node(s) closest to the user). However, the data cached in Prisma Postgres's cache nodes doesn't propagate to other nodes, so the cache layer by design doesn't need a consistency model.
+
+Prisma Postgres implements a [read-through caching strategy](https://www.prisma.io/dataguide/managing-databases/introduction-database-caching#read-through) particularly suitable for read-heavy workloads.
+
+The freshness of the data served by the cache depends on the cache strategy defined in your query. Refer to [this section](/postgres/database/caching#selecting-a-cache-strategy) for more information on selecting the right cache strategy for your query.
+
+### How is Prisma Postgres's caching layer different from other caching tools, such as Redis?
+
+The caching layer of Prisma Postgres:
+
+- Is a _specialized_ cache that allows you to optimize data access in code at the query level with a cache strategy. On the other hand, tools such as Redis and Memcached are _general-purpose_ caches designed to be adaptable and flexible.
+- Is a managed service that reduces the time, risk, and engineering effort of building and maintaining a cache service.
+- Is globally distributed, by default, reducing the latency of your queries. Other cache tools would require additional configuration to make them available globally.
+
+### When should I not use Prisma Postgres's caching features?
+
+The caching layer of Prisma Postgres is a global data cache and connection pool that allows you to optimize data access in code at the query level. While caching with Prisma Postgres can greatly boost the performance of your app, it may not always the best choice for your use case.
+
+This global cache feature may not be a good fit for your app if:
+
+- Your app is exclusively used within a specific region and both your application server and database are situated in that same region on the same network. For example, database queries will likely be much faster if your application server and database are in the same region and network. However, If your application server is in different regions or networks from your database, the cache nodes will speed up your queries because the data will be cached in the closest data center to your application.
+
+- Your application data _always_ needs to be up-to-date on retrieval, making it difficult to establish a reasonable cache strategy.
+
+### What is the maximum allowed value for the `ttl` parameter when configuring `cacheStrategy`?
+
+The [Time-to-live](/postgres/database/caching#time-to-live-ttl) (`ttl`) parameter can be set for up to a _year_. However, it's important to note that items within the cache may be evicted if they are not frequently accessed.
+
+Based on our experimentation, we’ve seen cache items persist for around 18 hours. While items may remain in the cache for an extended period if they are actively accessed, there is no guarantee.
+
+:::[note]
+
+Even frequently accessed items may occasionally be evicted from the cache. It's unlikely for an item to survive for up to or longer than a month, regardless of its activity level.
+
+:::
+
+### Why do I sometimes see unexpected cache behavior?
+
+Prisma Postgres's cache layer performs best when it observes a higher load from a project. Many cache operations, such as committing data to cache and refreshing stale data, happen asynchronously. When benchmarking the cache layer, we recommend doing so with loops or a load testing approach. This will mimic higher load scenarios better and reduce outliers from low frequency operations.
+
+Prisma operations are sent to Prisma Postgres over HTTP. As a result, the first request to Prisma Postgres must establish an HTTP handshake and may have additional latency as a result. We're exploring ways to reduce this initial request latency in the future.
+
+### What regions are Prisma Postgres's cache nodes available in?
+
+Prisma Postgres's cache layer runs on Cloudflare's network and cache hits are served from Cloudflare's 300+ locations. You can find the regions where Prisma Postgres's cache nodes are available here: [https://www.cloudflare.com/network/](https://www.cloudflare.com/network/).
+
+### How long does it take to invalidate a cache query result?
+
+As the cache needs to be cleared globally, it is difficult to provide a specific time frame. However, the cached data is eventually consistent and typically propagates to all PoPs within a few seconds. In very rare cases, it may take longer.
+
+Here is a [demo app](https://pris.ly/test-cache-invalidation) to test the time it takes to invalidate a cache query result.
+
+### What is the difference between **Invalidate** and **Revalidate**?
+
+**Invalidate**: The cache entry is deleted, and new data will be fetched on the next request, causing a cache miss. This removes stale data but may lead to slower responses until the cache is repopulated.
+
+**Revalidate**: The cache entry is updated proactively, ensuring the next request uses fresh data from the cache. This keeps the cache valid and maintains faster response times by avoiding cache misses.
+
+### What is on-demand cache invalidation?
+
+[On-demand cache invalidation](/postgres/database/caching#on-demand-cache-invalidation) lets applications instantly update specific cached data when it changes, instead of waiting for regular cache refresh cycles. This keeps information accurate and up-to-date for users.
+
+### When should I use the cache invalidate API?
+
+The [cache invalidate API](/postgres/database/caching#on-demand-cache-invalidation) is essential when data consistency cannot wait for the cache’s standard expiration or revalidation. Key use cases include:
+
+- **Content updates**: When critical changes occur, such as edits to a published article, product updates, or profile modifications, that need to be visible immediately.
+- **Inventory management**: In real-time applications, like inventory or booking systems, where stock levels, availability, or reservation statuses must reflect the latest information.
+- **High-priority data**: For time-sensitive data, like breaking news or urgent notifications, where it’s essential for users to see the most current information right away.
+
+Using on-demand cache invalidation in these scenarios helps keep only the necessary data refreshed, preserving system performance while ensuring accurate, up-to-date information for users.
+
+
+## Connection pooling
+
+#### Can I increase the query duration and response size limits for my Prisma Postgres instance?
+
+Yes, you can increase your Prisma Postgres limits based on your subscription plan. Here are the configurable limits:
+
+| Limit | Starter | Pro Plan | Business Plan |
+|--------------------------------|-------------------|-------------------|-------------------|
+| **Query timeout** | Up to 10 seconds | Up to 20 seconds | Up to 60 seconds |
+| **Interactive transactions timeout** | Up to 15 seconds | Up to 30 seconds | Up to 90 seconds |
+| **Response size** | Up to 5 MB | Up to 10 MB | Up to 20 MB |
+
+Check the [pricing page](https://www.prisma.io/pricing) for more details on the available plans and their corresponding limits.
+
+:::warning
+While you can increase these limits based on your subscription plan, it's *still* recommended to optimize your database operations. [Learn more in our troubleshooting guide.](/postgres/database/error-reference)
+:::
+
+
+## Query optimization
+
+Prisma Postgres allows query optimization via [Prisma Optimize](/optimize) and provides performance recommendations to help improve your database queries during development. You can enable it with Prisma Postgres or [also use it with your own database](/optimize/getting-started), but setup and integration steps differ.
+
+### Can you automatically implement optimizations?
+
+Prisma Postgres's query optimization feature offers insights and recommendations on how to improve your database queries. It does not alter any existing queries or your Prisma schema.
+
+### How long is a recording session retained?
+
+There are no limits on the storage retention period. A query performance recording session will be stored until you explicitly delete it.
+
+### Do recommendation limits reset monthly?
+
+Yes, the recommendation usage resets at the beginning of each calendar month. For example, if you use `5` recommendations by the end of the month, your usage will reset to `0` at the start of the next month.
+
+### Can I get charged for exceeding the recommendation limit on the starter plan?
+
+Yes, if you’re on the starter plan, exceeding `5` recommendations in a billing cycle will result in a `$5` charge at the end of that cycle. For more information, visit [our pricing page](https://www.prisma.io/pricing#optimize).
+
+### How are viewed Prisma AI recommendations tracked for billing? Are they counted based on generated or viewed recommendations?
+
+They are counted based on viewed recommendations. Once you click on a recommendation from the recommendations table and view the recommendation's detail page, it counts as being seen.
+
+### Can I enable query optimizations for Prisma Postgres in production?
+
+No, query optimizations for Prisma Postgres is not meant to be enabled for production use. It is specifically designed for local development, providing valuable insights and optimizations during that phase. While it's technically possible to run it in a production environment, doing so could result in performance problems or unexpected behaviors, as this is not built to handle the complexity and scale of production workloads. For the best experience, we recommend testing query optimization solely in your development environment.
+
+You can use the `enable` property in the client extension to run it [only in development environment](https://www.npmjs.com/package/@prisma/extension-optimize). By default, the `enable` property is set to `true`.
+
+```ts file=script.ts copy showLineNumbers
+import { PrismaClient } from '@prisma/client'
+import { withOptimize } from "@prisma/extension-optimize"
+
+const prisma = new PrismaClient().$extends(
+ withOptimize({
+ apiKey: process.env.OPTIMIZE_API_KEY,
+ enable: process.env.ENVIRONMENT === 'development',
+ })
+);
+```
diff --git a/content/250-postgres/800-known-limitations.mdx b/content/250-postgres/1200-more/800-known-limitations.mdx
similarity index 100%
rename from content/250-postgres/800-known-limitations.mdx
rename to content/250-postgres/1200-more/800-known-limitations.mdx
diff --git a/content/250-postgres/900-troubleshooting.mdx b/content/250-postgres/1200-more/900-troubleshooting.mdx
similarity index 100%
rename from content/250-postgres/900-troubleshooting.mdx
rename to content/250-postgres/1200-more/900-troubleshooting.mdx
diff --git a/content/250-postgres/1200-more/_category_.json b/content/250-postgres/1200-more/_category_.json
new file mode 100644
index 0000000000..f354ac9996
--- /dev/null
+++ b/content/250-postgres/1200-more/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "More",
+ "collapsible": false,
+ "collapsed": false
+}
diff --git a/content/250-postgres/1200-more/index.mdx b/content/250-postgres/1200-more/index.mdx
new file mode 100644
index 0000000000..4cefddb489
--- /dev/null
+++ b/content/250-postgres/1200-more/index.mdx
@@ -0,0 +1,13 @@
+---
+title: 'More'
+metaTitle: 'Additional Resources for Prisma Postgres'
+metaDescription: 'Explore known limitations, troubleshooting steps, and common FAQs for Prisma Postgres.'
+hide_table_of_contents: true
+---
+
+
+Explore additional topics related to Prisma Postgres, including known limitations, troubleshooting guides, and frequently asked questions.
+
+## In this section
+
+
diff --git a/content/250-postgres/300-connection-pooling.mdx b/content/250-postgres/300-connection-pooling.mdx
deleted file mode 100644
index a672ff4acc..0000000000
--- a/content/250-postgres/300-connection-pooling.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 'Connection pooling'
-metaTitle: 'Connection pooling in Prisma Postgres'
-metaDescription: 'Learn about connection pooling in Prisma Postgres'
-tocDepth: 3
-toc: true
----
-
-Prisma Postgres includes built-in connection pooling, enabled by [Prisma Accelerate](/accelerate/). This reduces the number of direct database connections and helps manage concurrent requests efficiently.
-
-For more details, see the [connection pooling documentation](/accelerate/connection-pooling).
-
-
diff --git a/content/250-postgres/300-database/350-caching.mdx b/content/250-postgres/300-database/350-caching.mdx
new file mode 100644
index 0000000000..3f82207733
--- /dev/null
+++ b/content/250-postgres/300-database/350-caching.mdx
@@ -0,0 +1,248 @@
+---
+title: 'Caching'
+metaTitle: 'Caching queries in Prisma Postgres'
+metaDescription: 'Learn about caching queries in Prisma Postgres'
+tocDepth: 3
+toc: true
+---
+
+Prisma Postgres supports built-in query caching to reduce database load and improve query performance. You can configure cache behavior using the `cacheStrategy` option available in all read queries.
+
+This feature is powered by an internal caching layer enabled through [Prisma Accelerate](/accelerate), but you do not need to interact with Accelerate directly unless you're using your own database.
+
+## Cache strategies
+
+For all read queries in Prisma Client, you can define the `cacheStrategy` parameter that configures cache behavior. The cache strategy allows you to define two main characteristics of the cache:
+
+- **Time-to-live (TTL):** Duration in seconds a cached response is considered _fresh_.
+- **Stale-while-Revalidating (SWR):** Duration in seconds a stale cache response is considered acceptable while the cache is refreshed in the background
+
+## Time-to-live (TTL)
+
+Time-to-Live (TTL) determines how long cached data is considered fresh. By specifying the `ttl` in seconds, you can control the duration for which data in the cache remains valid. When a read query is executed, if the cached response is within the `ttl` limit, Prisma Client retrieves the data from the cache without querying the database. If the cached data is not available or has expired, Prisma Client queries the database and stores the results in the cache for future requests.
+
+Use `ttl` in `cacheStrategy` and specify the TTL of the query in seconds:
+
+```javascript
+await prisma.user.findMany({
+ cacheStrategy: {
+ //add-next-line
+ ttl: 60,
+ },
+});
+```
+
+With a specified TTL of 60 seconds, the majority of requests will result in
+a cache hit throughout the TTL duration:
+
+
+
+TTL is useful for reducing database load and latency for data that does not require frequent updates.
+
+### Invalidate the TTL and keep your cached query results up-to-date
+
+If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `ttl` (Time-To-Live). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
+
+For example, if a dashboard displays customer information and a customer’s contact details change, TTL (Time-To-Live) settings ensure the cache automatically expires after a set duration. This allows the system to refresh only the updated data at the next access, ensuring support staff always see the latest information without manually refreshing the cache.
+
+However, in cases where immediate updates are required before the TTL expires, cache invalidation allows the system to proactively clear specific data from the cache. This forces a refresh of the updated information instantly, so support staff always have the most current details without waiting for the TTL to trigger.
+
+
+To invalidate a cached query result, you can add tags and then use the `$accelerate.invalidate` API.
+
+:::note
+
+On-demand cache invalidation is available with our paid plans. For more details, please see our [pricing](https://www.prisma.io/pricing#accelerate).
+
+:::
+
+To invalidate the query below, you need to provide the cache tag in the `$accelerate.invalidate` API:
+
+```ts
+await prisma.user.findMany({
+ cacheStrategy: {
+ ttl: 60,
+ //add-next-line
+ tags: ["findMany_users"],
+ },
+});
+
+// This is how you would invalidate the cached query above.
+//add-start
+await prisma.$accelerate.invalidate({
+ tags: ["findMany_users"],
+});
+//add-end
+```
+
+## Stale-While-Revalidate (SWR)
+
+Stale-While-Revalidate (SWR) allows you to control how long Prisma Postgres can serve stale cache data while fetching fresh data in the background. When a read query is executed, Prisma Postgres checks the age of the cached response against the `swr` duration. If the cache data is within the `swr` limit, Prisma Postgres serves the stale data while simultaneously refreshing the cache by fetching the latest data from the database.
+
+Use `swr` in `cacheStrategy` and specify the SWR of the query in seconds:
+
+```javascript
+await prisma.user.findMany({
+ cacheStrategy: {
+ //add-next-line
+ swr: 60,
+ },
+});
+```
+
+When specifying a SWR of 60 seconds, the cache serves stale data until the cache refreshes itself in the background after each request:
+
+
+
+### Invalidate the SWR and keep your cached query results up-to-date
+
+If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `swr` (Stale-While-Revalidate). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
+
+For example, consider a dashboard that displays stock levels for products in a warehouse. With SWR (Stale-While-Revalidate) settings, the dashboard can immediately display the last known stock data, even if it’s slightly outdated, while new data is fetched in the background. This ensures that staff can continue working with recent information without waiting, with the stock levels updating as soon as revalidation completes.
+
+However, in cases where stock data needs to be updated immediately—for instance, if a product is low in stock and the count needs real-time accuracy—cache invalidation allows the system to proactively clear specific data from the cache. This forces a refresh of the latest stock data instantly, so staff always have the most up-to-date information without waiting for SWR to complete the revalidation.
+
+To invalidate a cached query result, you can add tags and then use the `$accelerate.invalidate` API.
+
+:::note
+
+On-demand cache invalidation is available with our paid plans. For more details, please see our [pricing](https://www.prisma.io/pricing#accelerate).
+
+:::
+
+To invalidate the query below, you need to provide the cache tag in the `$accelerate.invalidate` API:
+
+```ts
+await prisma.user.findMany({
+ cacheStrategy: {
+ swr: 60,
+ //add-next-line
+ tags: ["findMany_users"],
+ },
+});
+
+// This is how you would invalidate the cached query above.
+//add-start
+await prisma.$accelerate.invalidate({
+ tags: ["findMany_users"],
+});
+//add-end
+```
+
+## Selecting a cache strategy
+
+Caching helps you improve query response times and reduce database load. However, it also means you might serve stale data to the client. Whether or not serving stale data is acceptable and to what extent depends on your use case. `ttl` and `swr` are parameters you can use the tweak the cache behavior.
+
+### Cache strategy using TTL
+
+Use TTL to reduce database load when stale cached data is acceptable.
+
+#### Use case: Product catalog in e-commerce applications
+
+Consider an e-commerce application with a product catalog that doesn't frequently change. By setting a `ttl` of, let's say, 1 hour, Prisma Client can serve cached product data for subsequent user requests within that hour without hitting the database. This significantly reduces the database load and improves the response time for product listing pages.
+
+**When to invalidate:** If there are critical updates to the catalog, such as a major price change or product availability adjustment, the [cache should be invalidated](/postgres/database/caching#on-demand-cache-invalidation) immediately to prevent customers from seeing outdated information.
+
+### Cache strategy using SWR
+
+Use SWR to respond quickly to requests with minimal stale data. While it does not reduce database load, it can improve response times significantly.
+
+#### Use case: User profile in social media platforms
+
+Imagine a social media platform where user profiles are frequently accessed. By leveraging `swr` with a duration of, let's say, 5 minutes, Prisma Postgres can serve the cached user profile information quickly, reducing the latency for profile pages. Meanwhile, in the background, it refreshes the cache after every request, ensuring that any updates made to the profile are eventually reflected for subsequent requests.
+
+**When to invalidate:** If a user makes significant updates to their profile, such as changing their profile picture or bio, the cache should be [invalidated](/postgres/database/caching#on-demand-cache-invalidation) immediately to ensure that followers see the latest updates without waiting for the SWR refresh.
+
+### Cache strategy using TTL + SWR
+
+For very fast response times and reduced database load, use both TTL and SWR. You can use this strategy to fine-tune your application’s tolerance for stale data.
+
+Use `ttl` and `swr` in `cacheStrategy` and specify the TTL and SWR of the query in seconds:
+
+```javascript
+await prisma.user.findMany({
+ cacheStrategy: {
+ //add-start
+ ttl: 30,
+ swr: 60,
+ //add-end
+ },
+});
+```
+
+When specifying a TTL of 30 seconds and SWR of 60 seconds, the cache serves fresh data for the initial 30 seconds. Subsequently, it serves stale data until the cache refreshes itself in the background after each request:
+
+
+
+#### Use case: News articles
+
+Consider a news application where articles are frequently accessed but don't require real-time updates. By setting a `ttl` of 2 hours and an `swr` duration of 5 minutes, Prisma Client can serve cached articles quickly, reducing latency for readers. As long as the articles are within the `ttl`, users get fast responses. After the `ttl` expires, Prisma Client continues to serve the stale articles for up to an additional 5 minutes, revalidating the cache with the latest news from the database in response to a new query. This helps maintain a balance between performance and freshness.
+
+**When to invalidate:** If a critical update or breaking news article is published, the cache should be [invalidated](/postgres/database/caching#on-demand-cache-invalidation) immediately to ensure readers see the latest information without delay. This approach is especially useful for applications where certain news items may need to override the normal cache cycle for timeliness.
+
+## On-demand cache invalidation
+
+If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `ttl` (Time-To-Live) or `swr` (Stale-While-Revalidate) [cache strategy](/postgres/database/caching#cache-strategies). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
+
+You can invalidate the cache using the [`$accelerate.invalidate` API](/postgres/database/api-reference#accelerateinvalidate):
+
+:::note
+
+To programmatically invalidate cached queries, a paid plan is required. See our [pricing for more details](https://www.prisma.io/pricing#accelerate).
+
+:::
+
+```ts
+await prisma.user.findMany({
+ where: {
+ email: {
+ contains: "alice@prisma.io",
+ },
+ },
+ cacheStrategy: {
+ swr: 60,
+ ttl: 60,
+ // highlight-start
+ tags: ["emails_with_alice"],
+ // highlight-end
+ },
+});
+```
+
+You need to provide the cache tag in the `$accelerate.invalidate` API:
+
+```ts
+try {
+ // highlight-start
+ await prisma.$accelerate.invalidate({
+ tags: ["emails_with_alice"],
+ });
+ // highlight-end
+} catch (e) {
+ if (e instanceof Prisma.PrismaClientKnownRequestError) {
+ // The .code property can be accessed in a type-safe manner
+ if (e.code === "P6003") {
+ console.log(
+ "The cache invalidation rate limit has been reached. Please try again later."
+ );
+ }
+ }
+ throw e;
+}
+```
+
+Explore the [demo app](https://pris.ly/test-cache-invalidation) to see how cached query results in Prisma Postgres are invalidated on demand, shown in a clear timeline.
+
+## Default cache strategy
+
+Prisma Postgres defaults to **no cache** to avoid unexpected issues. While caching can improve performance, incorrect usage may lead to errors.
+
+For instance, if a query is executed on a critical path without specifying a cache strategy, the result may be incorrect, with no clear explanation. This issue often arises when implicit caching is unintentionally left enabled.
+
+To avoid such problems, you must explicitly opt-in to caching. This ensures you are aware that caching is not enabled by default, preventing potential errors.
+
+:::note
+
+When no cache strategy is specified or during a cache miss, the cache layer routes all queries to the database through a connection pool instance near the database region.
+
+:::
\ No newline at end of file
diff --git a/content/250-postgres/300-database/400-connection-pooling.mdx b/content/250-postgres/300-database/400-connection-pooling.mdx
new file mode 100644
index 0000000000..415fca40f1
--- /dev/null
+++ b/content/250-postgres/300-database/400-connection-pooling.mdx
@@ -0,0 +1,155 @@
+---
+title: 'Connection pooling'
+metaTitle: 'Connection pooling in Prisma Postgres'
+metaDescription: 'Learn about connection pooling in Prisma Postgres'
+tocDepth: 3
+toc: true
+---
+
+Prisma Postgres provides built-in [connection pooling](https://www.prisma.io/dataguide/database-tools/connection-pooling) by default, enabled by [Prisma Accelerate](/accelerate/). By using Prisma Postgres, you get the benefits of connection pooling without having to configure anything. The efficient management of database connections allows the database to process more queries without exhausting the available database connections, making your application more scalable.
+
+In some cases, however, it may be beneficial to further configure connection pooling in order to optimize the performance of your application.
+
+:::note
+
+This document focuses on the connection pooling features of Prisma Postgres. For more information about the internal connection pool of Prisma ORM specifically, see our [ORM connection pooling documentation](/orm/prisma-client/setup-and-configuration/databases-connections/connection-pool).
+
+:::
+
+
+### Connection pooling in Prisma Postgres
+
+Currently, Prisma Postgres allows a maximum of 10 concurrent database connections per Prisma Schema. This limit is typically sufficient due to Prisma Postgres's efficient unikernel-based architecture, which minimizes the need for large connection pools.
+
+:::note
+
+If you're using **your own database** with Prisma Accelerate, the connection limits differ:
+
+- **Starter plan**: Maximum of `10` connections. This is often sufficient for most workloads, but if you're expecting high traffic or intensive compute operations, you may want to [increase this limit](#configuring-the-connection-pool-size).
+- **Pro plan**: Supports up to `100` concurrent connections.
+- **Business plan**: Supports up to `1000` concurrent connections.
+
+You can compare plans on the [Prisma pricing page](https://www.prisma.io/pricing).
+
+:::
+
+
+### Configuring the connection pool size
+
+If you're **not using Prisma Postgres**, you can configure the connection pool size for Prisma ORM by specifying it [in the connection string](/orm/prisma-client/setup-and-configuration/databases-connections/connection-pool#setting-the-connection-pool-size).
+
+For Prisma Postgres, the connection limit is currently **fixed at `10`** and cannot be changed.
+
+If you're using Prisma Accelerate with your own database, you can configure the connection pool size through the `Connection limit` setting in your project on the [Accelerate setup page](/accelerate/getting-started).
+
+### Configuring the connection pool timeout
+
+The connection pool timeout is the maximum number of seconds that a query will block while waiting for a connection from Prisma Postgres's internal connection pool. This occurs if the number of concurrent requests exceeds the connection limit, resulting in queueing of additional requests until a free connection becomes available. An exception is thrown if a free connection does not become available within the pool timeout. The connection pool timeout can be disabled by setting the value to 0.
+
+Similar to the connection pool size, you may also configure the connection pool timeout via the _database connection string_. To adjust this value, you may add the `pool_timeout` parameter to the database connection string.
+
+For example:
+
+```env no-copy
+postgresql://user:password@localhost:5432/db?connection_limit=10&pool_timeout=20
+```
+
+
+
+The default value for `pool_timeout` is `10` seconds.
+
+
+
+## Configuring query limits
+
+You can configure the minimum and maximum query response size, query duration, and transaction limits when using Prisma Accelerate from the **Settings** tab in your Prisma Postgres project environment.
+
+### Query timeout limit
+
+Prisma Postgres has a default global timeout of `10s` for each query, configurable using the slider labeled **Query duration**, based on your subscription plan:
+
+| Plan | Starter | Pro | Business |
+|----------|---------------|--------------|---------------|
+| Query timeout | Up to `10` seconds | Up to `20` seconds | Up to `60` seconds |
+
+See the [error reference](/postgres/database/error-reference#p6004-querytimeout) and our [pricing page](https://www.prisma.io/pricing) for more information.
+
+:::warning
+
+While you can increase the query timeout, it's recommended to inspect and optimize your database queries if they take longer than `10` seconds. This helps reduce stress on your underlying database, as long-running queries often indicate a need for optimization. Learn more in the [error reference](/postgres/database/error-reference#p6004-querytimeout).
+:::
+
+### Interactive transactions query timeout limit
+
+Prisma Postgres has a default global timeout of `15s` for each [interactive transaction](/orm/prisma-client/queries/transactions#interactive-transactions), configurable using the slider labeled **Transaction duration**, based on your subscription plan:
+
+| Plan | Starter | Pro | Business |
+|----------------------------|---------------|--------------|---------------|
+| Interactive transaction limit | Up to `15` seconds | Up to `30` seconds | Up to `90` seconds |
+
+See the [error reference](/postgres/database/error-reference#p6004-querytimeout) and our [pricing page](https://www.prisma.io/pricing#accelerate) for more information.
+
+When you set a higher interactive transaction timeout in the Prisma Console, you **must also** specify a matching `timeout` value in your interactive transaction query via timeout [transaction option](/orm/prisma-client/queries/transactions#transaction-options). Otherwise, transactions will still time out at the lower default (e.g., 5 seconds limit when no timeout value is specified). Here's an example of how to set a `30`-second timeout in your code:
+
+```ts
+await prisma.$transaction(
+ async (tx) => {
+ // Your queries go here
+ },
+ {
+ timeout: 30000, // 30s
+ }
+);
+```
+
+:::warning
+
+While you can increase the interactive transaction timeout limit, it's recommended to inspect and optimize your database transactions if they take longer than 15 seconds. Long-running transactions can negatively impact performance and often signal the need for optimization. Learn more in the [error reference](/postgres/database/error-reference#p6004-querytimeout) and review the [warning in the Interactive Transactions section](/orm/prisma-client/queries/transactions#interactive-transactions-1) in our documentation.
+
+:::
+
+### Response size limit
+
+Prisma Postgres has a default global response size limit of `5MB`, configurable using the slider labeled **Response size**, based on your subscription plan:
+
+| Plan | Starter | Pro | Business |
+|----------|---------------|--------------|---------------|
+| Query size | Up to `5MB` | Up to `10MB` | Up to `20MB` |
+
+See the [error reference](/postgres/database/error-reference#p6009-responsesizelimitexceeded) and our [pricing page](https://www.prisma.io/pricing#accelerate) for more information.
+
+:::warning
+
+While you can increase the query response size, it’s recommended to limit data retrieval to what you actually need. This improves database performance, reduces stress on your database, and makes your frontend applications more responsive. Queries exceeding `5` MB in size often indicate a need for optimization. Learn more in the [error reference](/postgres/database/error-reference#p6009-responsesizelimitexceeded).
+
+:::
+
+## Autoscaling (Accelerate + Your own database only)
+
+Autoscaling is currently available **only when using Prisma Accelerate with your own database**. It enables dynamic resource allocation based on your application's traffic. As usage nears the defined connection limit, Prisma will begin provisioning additional resources to handle the load. If traffic continues to grow, the system will scale out further. When traffic decreases, it scales back down—ensuring efficient use of resources.
+
+### How it works
+
+Autoscaling is powered by a **connection pooler** that horizontally scales your environment by distributing total available connections across multiple **Query Engine instances**.
+
+Here’s how this works in practice:
+
+- Suppose your environment’s connection limit is set to `1000`.
+- Prisma Accelerate will scale up to multiple Query Engine instances (e.g., 100 instances).
+- Each instance is allocated a share of the total—**10 connections per instance**, in this example.
+- This is why each Query Engine instance reports a limit of 10, even though the full environment supports 1000 concurrent connections.
+
+This distributed model allows your application to handle increased traffic by spinning up more Query Engine instances, while efficiently managing connection usage.
+
+### Enabling Autoscaling
+
+Autoscaling is automatically enabled **when your Accelerate connection limit is set above the default (10)**. This feature is **not available on the Starter plan**.
+
+Your environment's maximum connection limit is based on your [Prisma Data Platform plan](https://www.prisma.io/pricing):
+
+| Plan | Max Connection Limit |
+|-------------|--------------------------------------|
+| Starter | `10` |
+| Pro | `100` |
+| Business | `1000` |
+| Enterprise | [Contact Us](mailto:sales@prisma.io) |
diff --git a/content/250-postgres/500-backups.mdx b/content/250-postgres/300-database/500-backups.mdx
similarity index 100%
rename from content/250-postgres/500-backups.mdx
rename to content/250-postgres/300-database/500-backups.mdx
diff --git a/content/250-postgres/600-tcp-tunnel.mdx b/content/250-postgres/300-database/600-tcp-tunnel.mdx
similarity index 88%
rename from content/250-postgres/600-tcp-tunnel.mdx
rename to content/250-postgres/300-database/600-tcp-tunnel.mdx
index dfff329524..cdc9e52057 100644
--- a/content/250-postgres/600-tcp-tunnel.mdx
+++ b/content/250-postgres/300-database/600-tcp-tunnel.mdx
@@ -93,7 +93,7 @@ password:
This will start the tunnel on a randomly assigned TCP port. The proxy automatically handles authentication, so any database credentials are accepted. The tunnel also encrypts traffic, meaning clients should be set to not require SSL.
-You can now connet to your Prisma Postgres editor using your favorite PostgreSQL client, e.g. `psql` or a GUI like [TablePlus](/postgres/tooling#2a-connect-to-prisma-postgres-using-tableplus) or [DataGrip](/postgres/tooling#2b-connect-to-prisma-postgres-using-datagrip). To do so, you only need to provide the **`host`** and **`port`** from the output above. The TCP tunnel will handle authentication via the API key in your Prisma Postgres connection URL, so you can omit the values for **`username`** and **`password`**.
+You can now connet to your Prisma Postgres editor using your favorite PostgreSQL client, e.g. `psql` or a GUI like [TablePlus](/postgres/database/tooling#2a-connect-to-prisma-postgres-using-tableplus) or [DataGrip](/postgres/database/tooling#2b-connect-to-prisma-postgres-using-datagrip). To do so, you only need to provide the **`host`** and **`port`** from the output above. The TCP tunnel will handle authentication via the API key in your Prisma Postgres connection URL, so you can omit the values for **`username`** and **`password`**.
### Customizing host and port
@@ -106,7 +106,7 @@ npx @prisma/ppg-tunnel --host 127.0.0.1 --port 5432
## Next steps
-The local tunnel enables you to access Prisma Postgres from 3rd party database editors such as Postico, DataGrip, TablePlus and pgAdmin. Learn more in this [section](/postgres/tooling).
+The local tunnel enables you to access Prisma Postgres from 3rd party database editors such as Postico, DataGrip, TablePlus and pgAdmin. Learn more in this [section](/postgres/database/tooling).
## Security considerations
diff --git a/content/250-postgres/700-tooling.mdx b/content/250-postgres/300-database/700-tooling.mdx
similarity index 100%
rename from content/250-postgres/700-tooling.mdx
rename to content/250-postgres/300-database/700-tooling.mdx
diff --git a/content/250-postgres/300-database/800-api-reference.mdx b/content/250-postgres/300-database/800-api-reference.mdx
new file mode 100644
index 0000000000..2ad1425b3f
--- /dev/null
+++ b/content/250-postgres/300-database/800-api-reference.mdx
@@ -0,0 +1,236 @@
+---
+title: 'API reference'
+metaTitle: 'Prisma Postgres: API Reference'
+metaDescription: 'API reference documentation for Prisma Postgres.'
+tocDepth: 3
+toc: true
+---
+
+The Prisma Postgres API reference documentation is based on the following schema:
+
+```prisma
+model User {
+ id Int @id @default(autoincrement())
+ name String?
+ email String @unique
+}
+```
+
+All example are based on the `User` model.
+
+## `cacheStrategy`
+
+With [the Prisma client extension for Prisma Postgres](https://www.npmjs.com/package/@prisma/extension-accelerate), you can use the `cacheStrategy` parameter for model queries and use the [`ttl`](/postgres/database/caching#time-to-live-ttl) and [`swr`](/postgres/database/caching#stale-while-revalidate-swr) parameters to define a cache strategy for your Prisma Postgres queries. The client extension requires that you install Prisma Client version `4.10.0`.
+
+### Options
+
+The `cacheStrategy` parameter takes an option with the following keys:
+
+| Option | Example | Type | Required | Description |
+| ------ | ------- | ----- | -------- | ------------------------------------------- |
+| `swr` | `60` | `Int` | No | The stale-while-revalidate time in seconds. |
+| `ttl` | `60` | `Int` | No | The time-to-live time in seconds. |
+| `tags` | `["user"]` | `String[]` | No | The `tag` serves as a variable to control the invalidation of specific queries within your application. It is an optional array of strings to [invalidate](/accelerate/api-reference#accelerateinvalidate) the cache, with each tag containing only alphanumeric characters and underscores, and a maximum length of 64 characters.
+|
+
+### Examples
+
+Add a caching strategy to the query, defining a 60-second stale-while-revalidate (SWR) value, a 60-second time-to-live (TTL) value, and a cache tag of `"emails_with_alice"`:
+
+```ts highlight=7:11;normal
+await prisma.user.findMany({
+ where: {
+ email: {
+ contains: "alice@prisma.io",
+ },
+ },
+ // highlight-start
+ cacheStrategy: {
+ swr: 60,
+ ttl: 60,
+ tags: ["emails_with_alice"],
+ },
+ // highlight-end
+});
+```
+### Supported Prisma Client operations
+
+The following is a list of all read query operations that support `cacheStrategy`:
+
+- [`findUnique()`](/orm/reference/prisma-client-reference#findunique)
+- [`findUniqueOrThrow()`](/orm/reference/prisma-client-reference#finduniqueorthrow)
+- [`findFirst()`](/orm/reference/prisma-client-reference#findfirst)
+- [`findFirstOrThrow()`](/orm/reference/prisma-client-reference#findfirstorthrow)
+- [`findMany()`](/orm/reference/prisma-client-reference#findmany)
+- [`count()`](/orm/reference/prisma-client-reference#count)
+- [`aggregate()`](/orm/reference/prisma-client-reference#aggregate)
+- [`groupBy()`](/orm/reference/prisma-client-reference#groupby)
+
+
+
+The `cacheStrategy` parameter is not supported on any write operations, such as `create()`.
+
+
+
+## `withAccelerateInfo`
+
+Any query that supports the `cacheStrategy` can append `withAccelerateInfo()` to wrap the response data and include additional information about the cached response.
+
+To retrieve the status of the response, use:
+
+```ts
+const { data, info } = await prisma.user
+ .count({
+ cacheStrategy: { ttl: 60, swr: 600 },
+ where: { myField: 'value' },
+ })
+ .withAccelerateInfo()
+
+console.dir(info)
+```
+
+
+
+Notice the `info` property of the response object. This is where the request information is stored.
+
+
+
+### Return type
+
+The `info` object is of type `AccelerateInfo` and follows the interface below:
+
+```ts
+interface AccelerateInfo {
+ cacheStatus: 'ttl' | 'swr' | 'miss' | 'none'
+ lastModified: Date
+ region: string
+ requestId: string
+ signature: string
+}
+```
+
+| Property | Type | Description |
+| -------------- | ------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `cacheStatus` | `"ttl" \| "swr" \| "miss" \| "none" ` | The cache status of the response.
- `ttl` indicates a cache hit within the `ttl` duration and no database query was executed
- `swr` indicates a cache hit within the `swr` duration and the data is being refreshed by Prisma Postgres in the background
- `miss` indicates that both `ttl` and `swr` have expired and the database query was executed by the request
- `none` indicates that no cache strategy was specified and the database query was executed by the request
|
+| `lastModified` | `Date` | The date the response was last refreshed. |
+| `region` | `String` | The data center region that received the request. |
+| `requestId` | `String` | Unique identifier of the request. Useful for troubleshooting. |
+| `signature` | `String` | The unique signature of the Prisma operation. |
+
+## `$accelerate.invalidate`
+
+You can invalidate the cache using the [`$accelerate.invalidate` API](/accelerate/).
+
+:::note
+
+To invalidate cached query results on-demand, a paid plan is required. Each plan has specific limits on the number of cache tag-based invalidations allowed per day, though there are no limits on calling the `$accelerate.invalidate` API itself. See our [pricing for more details](https://www.prisma.io/pricing#accelerate).
+
+:::
+
+### Example
+
+To invalidate the query below:
+
+```ts
+await prisma.user.findMany({
+ where: {
+ email: {
+ contains: "alice@prisma.io",
+ },
+ },
+ cacheStrategy: {
+ swr: 60,
+ ttl: 60,
+ // highlight-start
+ tags: ["emails_with_alice"],
+ // highlight-end
+ },
+});
+```
+
+You need to provide the cache tag in the `$accelerate.invalidate` API:
+
+```ts
+try {
+ // highlight-start
+ await prisma.$accelerate.invalidate({
+ tags: ["emails_with_alice"],
+ });
+ // highlight-end
+} catch (e) {
+ if (e instanceof Prisma.PrismaClientKnownRequestError) {
+ // The .code property can be accessed in a type-safe manner
+ if (e.code === "P6003") {
+ console.log(
+ "The cache invalidation rate limit has been reached. Please try again later."
+ );
+ }
+ }
+ throw e;
+}
+```
+
+:::note
+You can invalidate up to 5 tags per call.
+:::
+
+## `$accelerate.invalidateAll`
+
+You can invalidate the entire cache using the `$accelerate.invalidateAll` API.
+
+### Example
+
+To invalidate the query below:
+
+```ts
+await prisma.user.findMany({
+ where: {
+ email: {
+ contains: "alice@prisma.io",
+ },
+ },
+ cacheStrategy: {
+ swr: 60,
+ ttl: 60,
+ // highlight-start
+ tags: ["emails_with_alice"],
+ // highlight-end
+ },
+});
+```
+
+Just call the `$accelerate.invalidateAll` API:
+
+```ts
+try {
+ // highlight-start
+ await prisma.$accelerate.invalidateAll();
+ // highlight-end
+} catch (e) {
+ if (e instanceof Prisma.PrismaClientKnownRequestError) {
+ if (e.code === "P6003") {
+ console.log(
+ "The cache invalidation rate limit has been reached. Please try again later."
+ );
+ }
+ }
+ throw e;
+}
+
+```
+
+### Why use `$accelerate.invalidateAll`?
+
+This method offers better editor support (e.g. IntelliSense) than alternatives like `invalidate("all")`.
+
+:::warning
+
+This clears cache for the entire environment—use with care.
+
+:::
+
+## Errors
+
+Prisma Postgres-related errors start with `P6xxx`.
+
+You can find the full error code reference for Prisma Postgres [here](/orm/reference/error-reference#prisma-accelerate).
diff --git a/content/250-postgres/300-database/900-error-reference.mdx b/content/250-postgres/300-database/900-error-reference.mdx
new file mode 100644
index 0000000000..0b5e253e56
--- /dev/null
+++ b/content/250-postgres/300-database/900-error-reference.mdx
@@ -0,0 +1,128 @@
+---
+title: 'Error reference'
+metaTitle: 'Prisma Postgres: Error Reference'
+metaDescription: 'Error reference documentation for Prisma Postgres.'
+tocDepth: 3
+toc: true
+---
+
+When working with Prisma Postgres, you may encounter errors often highlighted by specific error codes during development and operations.
+
+It is important to understand the meaning of these errors, why they occur, and how to resolve them in order to ensure the smooth operation of your applications. This guide aims to provide insights and steps to troubleshoot specific error codes encountered with Prisma Postgres.
+
+
+## [`P6009`](/orm/reference/error-reference#p6009-responsesizelimitexceeded) (`ResponseSizeLimitExceeded`)
+
+This error is triggered when the response size from a database query exceeds [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit). We've implemented this restriction to safeguard your application performance, as retrieving data over `5MB` can significantly slow down your application due to multiple network layers.
+
+Typically, transmitting more than `5MB` of data is common when conducting ETL (Extract, Transform, Load) operations. However, for other scenarios such as transactional queries, real-time data fetching for user interfaces, bulk data updates, or aggregating large datasets for analytics outside of ETL contexts, it should generally be avoided. These use cases, while essential, can often be optimized to work within [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit), ensuring smoother performance and a better user experience.
+
+### Possible causes for [`P6009`](/orm/reference/error-reference#p6009-responsesizelimitexceeded)
+
+#### Transmitting images/files in response
+
+This error may arise if images or files stored within your table are being fetched, resulting in a large response size. Storing assets directly in the database is generally discouraged because it significantly impacts database performance and scalability. In addition to performance, it makes database backups slow and significantly increases the cost of storing routine backups.
+
+**Suggested solution:** Configure the [query response size limit](/postgres/database/connection-pooling#response-size-limit) to be larger. If the limit is still exceeded, consider storing the image or file in a BLOB store like [Cloudflare R2](https://developers.cloudflare.com/r2/), [AWS S3](https://aws.amazon.com/pm/serv-s3/), or [Cloudinary](https://cloudinary.com/). These services allow you to store assets optimally and return a URL for access. Instead of storing the asset directly in the database, store the URL, which will substantially reduce the response size.
+
+
+#### Over-fetching of data
+
+In certain cases, a large number of records or fields are unintentionally fetched, which results in exceeding [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit). This could happen when [the `where` clause](/orm/reference/prisma-client-reference#where) in the query is incorrect or entirely missing.
+
+**Suggested solution:** Configure the [query response size limit](/postgres/database/connection-pooling#response-size-limit) to be larger. If the limit is still exceeded, double-check that the `where` clause is filtering data as expected. To prevent fetching too many records, consider using [pagination](/orm/prisma-client/queries/pagination). Additionally, use the [`select`](/orm/reference/prisma-client-reference#select) clause to return only the necessary fields, reducing the response size.
+
+#### Fetching a large volume of data
+
+In many data processing workflows, especially those involving ETL (Extract-Transform-Load) processes or scheduled CRON jobs, there's a need to extract large amounts of data from data sources (like databases, APIs, or file systems) for analysis, reporting, or further processing. If you are running an ETL/CRON workload that fetches a huge chunk of data for analytical processing then you might run into this limit.
+
+**Suggested solution:** Configure the [query response size limit](/postgres/database/connection-pooling#response-size-limit) to be larger. If the limit is exceeded, consider splitting your query into batches. This approach ensures that each batch fetches only a portion of the data, preventing you from exceeding the size limit for a single operation.
+
+
+## [`P6004`](/orm/reference/error-reference#p6004-querytimeout) (`QueryTimeout`)
+
+This error occurs when a database query fails to return a response within [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit). The query timeout limit includes the duration of waiting for a connection from the pool, network latency to the database, and the execution time of the query itself. We enforce this limit to prevent unintentional long-running queries that can overload system resources.
+
+:::info
+
+The time for Prisma Postgres's cross-region networking is excluded from [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) limit.
+
+:::
+
+### Possible causes for [`P6004`](/orm/reference/error-reference#p6004-querytimeout)
+
+This error could be caused by numerous reasons. Some of the prominent ones are:
+
+#### High traffic and insufficient connections
+
+If the application is receiving very high traffic and there are not a sufficient number of connections available to the database, then the queries would need to wait for a connection to become available. This situation can lead to queries waiting longer than [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) for a connection, ultimately triggering a timeout error if they do not get serviced within this duration.
+
+**Suggested solution**: Review and possibly increase the `connection_limit` specified in the connection string parameter when setting up Accelerate in a platform environment ([reference](/postgres/database/connection-pooling#configuring-the-connection-pool-size)). This limit should align with your database's maximum number of connections.
+
+By default, the connection limit is set to 10 unless a different `connection_limit` is specified in your database connection string.
+
+#### Long-running queries
+
+Queries may be slow to respond, hitting [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) even when connections are available. This could happen if a very large amount of data is being fetched in a single query or if appropriate indexes are missing from the table.
+
+**Suggested solution**: Configure the [query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) to be larger. If the limit is exceeded, identify the slow-running queries and fetch only the necessary data. Use the `select` clause to retrieve specific fields and avoid fetching unnecessary data. Additionally, consider adding appropriate indexes to improve query efficiency. You might also isolate long-running queries into separate environments to prevent them from affecting transactional queries.
+
+#### Database resource contention
+
+A common yet challenging issue is when other services operating on the same database perform heavy analytics or data processing tasks, significantly consuming database resources. These operations can monopolize database connections and processing power, leading to a scenario where even simple queries cannot be executed in a timely manner. This "busy" or "noisy" database environment can cause queries that are typically fast to run slowly or even timeout, particularly during periods of high activity from other services.
+
+Users often rely on CPU and memory usage metrics to gauge database load, which can be misleading. While these are important indicators, they might not fully represent the database's operational state. Direct metrics like the number of reads, writes, and wait times offer a clearer view of the database's performance and should be monitored closely. A noticeable degradation in these metrics, especially in the absence of changes to the queries or data model, suggests that external pressures are affecting database performance.
+
+**Suggested solution**: If normally quick queries are intermittently slow or timing out without any modifications to them, it's probable that competing queries are exerting pressure on the same database tables. To diagnose this, adopt monitoring tools or leverage your database's inherent capabilities to observe reads, writes, and wait times. Such monitoring will unveil activity patterns or spikes that align with the observed performance dips.
+
+Moreover, it's crucial to periodically scrutinize and refine essential queries and verify that tables are properly indexed. This proactive approach minimizes the vulnerability of these queries to slowdowns caused by competing workloads.
+
+
+## [`P6008`](/orm/reference/error-reference#p6008-connectionerrorenginestarterror) (`ConnectionError|EngineStartError`)
+
+This error indicates that Prisma ORM cannot establish a connection to your Prisma Postgres database, potentially due to several reasons.
+
+### Possible causes for [`P6008`](/orm/reference/error-reference#p6008-connectionerrorenginestarterror)
+
+#### Unreachable Database Host/Port
+
+If the database's server address (hostname) and port are incorrect or unreachable then you may encounter this error.
+
+**Suggested solution:** Verify the hostname/port of the database connection string that was provided while creating the Prisma Accelerate project. Additionally, attempt to connect to the database using a Database GUI tool (e.g., [Prisma Studio](https://www.prisma.io/studio), [TablePlus](https://tableplus.com/), or [DataGrip](https://www.jetbrains.com/datagrip/)) for further investigation.
+
+#### Incorrect username/password/database name
+
+This error can happen when the wrong credentials are provided, preventing it from establishing a connection to your database.
+
+**Suggested solution:** Verify the correctness of your database's username, password, and name in the connection string provided to Prisma Accelerate. Ensure that these credentials match those required by your database. Testing the connection using a direct database GUI tool can also help in confirming if the provided credentials are correct.
+
+## [`P5011`](/orm/reference/error-reference#p5011-too-many-requests) (`TooManyRequests`)
+
+This error occurs when Prisma Postgres detects a high volume of requests that surpasses allowable thresholds. It acts as a protective measure to safeguard both Prisma Postgres and your underlying database from excessive load.
+
+### Possible causes for [`P5011`](/orm/reference/error-reference#p5011-too-many-requests)
+
+#### Aggressive retry loops
+
+If your application retries queries immediately or with minimal delay, especially after receiving certain errors, the rapid accumulation of requests can surpass the threshold.
+
+**Suggested solution:**
+
+- Implement an exponential backoff strategy. Rather than retrying immediately or with a fixed delay, gradually increase the delay period after each failed attempt.
+- This allows the system time to recover and reduces the likelihood of overwhelming Prisma Accelerate and your database.
+
+#### Sudden traffic spikes
+
+Unpredicted traffic surges (for example, during product launches, flash sales, or viral growth events) can cause the threshold to be met and result into `P5011`.
+
+**Suggested solution:**
+- Consider proactive scaling strategies for both Prisma Accelerate and your database.
+- Monitor traffic and resource usage. If you anticipate a surge, please contact [support](/platform/support) for capacity planning and potential configuration adjustments.
+
+#### Prolonged or planned high workloads
+
+Certain processes, such as bulk data imports, ETL operations, or extended CRON jobs, can generate continuous high query volume over time.
+
+**Suggested solution:**
+- Use batching or chunking techniques to break large operations into smaller parts.
+- Establish throttling or scheduling to distribute the load more evenly.
\ No newline at end of file
diff --git a/content/250-postgres/300-database/_category_.json b/content/250-postgres/300-database/_category_.json
new file mode 100644
index 0000000000..e42b981b74
--- /dev/null
+++ b/content/250-postgres/300-database/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Database",
+ "collapsible": false,
+ "collapsed": false
+}
diff --git a/content/250-postgres/300-database/index.mdx b/content/250-postgres/300-database/index.mdx
new file mode 100644
index 0000000000..b5f9657265
--- /dev/null
+++ b/content/250-postgres/300-database/index.mdx
@@ -0,0 +1,13 @@
+---
+title: 'Database'
+metaTitle: 'Prisma Postgres database features'
+metaDescription: 'Explore core database features in Prisma Postgres, including caching, connection pooling, backups, and API access.'
+hide_table_of_contents: true
+---
+
+Learn how Prisma Postgres implements foundational database features such as built-in caching, connection pooling, backups, and secure access. These features are powered by Prisma Postgres infrastructure and designed to optimize application performance and reliability.
+
+
+## In this section
+
+
diff --git a/content/250-postgres/350-caching.mdx b/content/250-postgres/350-caching.mdx
deleted file mode 100644
index b532d3f6a6..0000000000
--- a/content/250-postgres/350-caching.mdx
+++ /dev/null
@@ -1,23 +0,0 @@
----
-title: 'Caching'
-metaTitle: 'Caching queries in Prisma Postgres'
-metaDescription: 'Learn about caching queries in Prisma Postgres'
-tocDepth: 3
-toc: true
----
-
-
-Prisma Postgres comes with a built-in global cache (enabled by [Prisma Accelerate](/accelerate/)) that helps you speed up your database queries.
-
-You can cache results on a per-query level using the `cacheStrategy` option in any Prisma ORM query, e.g.:
-
-```ts
-const users = await prisma.user.findMany({
- cacheStrategy: {
- ttl: 60,
- swr: 60
- }
-})
-```
-
-Check out the [caching](/accelerate/caching) documentation for details.
diff --git a/content/250-postgres/400-query-optimization/100-setup.mdx b/content/250-postgres/400-query-optimization/100-setup.mdx
new file mode 100644
index 0000000000..c5d87ea33d
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/100-setup.mdx
@@ -0,0 +1,106 @@
+---
+title: 'Setup'
+metaTitle: 'Getting started with optimizing queries in Prisma Postgres'
+metaDescription: 'Learn how to quickly set up and start optimizing Prisma Postgres queries.'
+tocDepth: 3
+toc: true
+---
+
+## Prerequisites
+
+Before you begin with Prisma Optimize for Prisma Postgres, ensure you have the following:
+
+- A [Prisma Data Platform account](https://console.prisma.io/optimize?utm_source=docs&utm_medium=optimize-page).
+- A project using [Prisma Client](/orm/prisma-client) version `5.0.0` or higher (we recommend using the latest version).
+- A [Prisma Postgres database](/postgres/introduction/getting-started).
+
+:::note
+
+Prisma Optimize is intended for use in local environments. Learn more in the [FAQ](/postgres/more/faq#can-i-enable-query-optimizations-for-prisma-postgres-in-production).
+
+:::
+
+## 1. Launch Optimize
+
+1. Log in to your [Prisma Data Platform account](https://console.prisma.io/optimize?utm_source=docs&utm_medium=ppg_optimize_page).
+2. Click the **Optimize** tab on the left navigation.
+3. Click the **Generate API key** button.
+4. Copy the API key that appears and paste it somewhere safe, like a password manager.
+5. Click the copy icons to continue through each setup screen until you see the **Finish & optimize** button. Click that to complete the setup.
+6. Once you're done, Optimize will automatically begin a new recording session in the background.
+
+## 2. Add Optimize to your application
+
+### 2.1. Install the required Prisma Client extension
+
+Run the following command in your terminal to install the necessary dependencies:
+
+```bash
+npm install @prisma/extension-optimize
+```
+
+
+Enabling tracing in older versions of Prisma ORM
+
+For versions of Prisma ORM between `4.2.0` and `6.1.0`, you need to enable the `tracing` preview feature in your Prisma schema file.
+
+```prisma
+generator client {
+ provider = "prisma-client-js"
+ previewFeatures = ["tracing"]
+}
+```
+
+
+
+### 2.2. Add the Optimize API Key to your `.env` file
+
+Copy the Prisma Optimize API key and add it to your `.env` file:
+
+```bash
+OPTIMIZE_API_KEY="YOUR_OPTIMIZE_API_KEY"
+```
+
+### 2.3. Extend your Prisma Client instance
+
+Extend your existing Prisma Client instance with the Optimize extension:
+
+```ts
+import { PrismaClient } from "@prisma/client";
+import { withAccelerate } from "@prisma/extension-optimize";
+import { withOptimize } from "@prisma/extension-optimize";
+
+const prisma = new PrismaClient().$extends(
+ withOptimize({ apiKey: process.env.OPTIMIZE_API_KEY }),
+).$extends(withAccelerate());
+```
+
+#### Using the Optimize extension with other extensions or middleware
+
+Since [extensions are applied one after another](/orm/prisma-client/client-extensions#conflicts-in-combined-extensions), make sure you apply them in the correct order. Extensions cannot share behavior and the last extension applied takes precedence.
+
+```ts
+const prisma = new PrismaClient().$extends(withOptimize()).$extends(withAccelerate())
+```
+
+If you are using [Prisma Middleware](/orm/prisma-client/client-extensions/middleware) in your application, make sure they are added before any Prisma Client extensions (like Optimize). For example:
+
+```ts
+const prisma = new PrismaClient().$use(middleware).$extends(withOptimize()).$extends(withAccelerate())
+```
+
+### 2.5. Use Prisma Optimize to generate insights
+
+Follow these steps to start generating query insights with Prisma Optimize:
+
+1. Run your app and execute some Prisma queries while recording is active.
+2. After your app runs and generates insights based on the executed Prisma queries, click the red **Recording** button.
+3. Explore [individual query details](/postgres/query-optimization/recordings#data-captured-in-a-recording-session) by clicking on them, and check the **Recommendations** tab for any suggested improvements to enhance query performance.
+
+ :::info
+ Use [Prisma AI](/postgres/query-optimization/prisma-ai) to understand recommendations and apply them within your Prisma model context.
+ :::
+
+## Need help?
+
+If you need assistance, reach out in the `#help-and-questions` channel on our [Discord](https://pris.ly/discord?utm_source=docs&utm_medium=generated_text_cta), or connect with [our community](https://www.prisma.io/community) to see how others are using Optimize.
diff --git a/content/250-postgres/400-query-optimization/200-recordings.mdx b/content/250-postgres/400-query-optimization/200-recordings.mdx
new file mode 100644
index 0000000000..352c1f5430
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/200-recordings.mdx
@@ -0,0 +1,58 @@
+---
+title: 'Recordings'
+metaTitle: 'Prisma Postgres: Recordings'
+metaDescription: "Learn about using Optimize's recording feature."
+tocDepth: 3
+toc: true
+---
+
+The recordings feature helps developers debug and isolate sets of queries into distinct sessions, known as recordings. This targeted approach enables precise performance analysis and optimization by preventing the mixing of queries from different applications or test rounds, leading to clearer insights and more effective debugging.
+
+## Managing a recording session
+
+You can manually start and stop recording sessions via the [Optimize dashboard](https://console.prisma.io/optimize) by clicking the **Start Recording** and **Stop Recording** buttons.
+
+:::warning
+A recording will automatically stop if the 10k query limit is reached or if the Prisma schema of the app is changed.
+:::
+
+## Identifying a recording session
+
+You can rename and tag your recording sessions for easy identification and context. Click on the default name of the session and type the desired name.
+
+## Data captured in a recording session
+
+Each recording session captures detailed insights about the queries executed in your app, including:
+
+- All queries executed during the session.
+- The raw query generated and sent to the database by Prisma ORM as **Raw Query**.
+- The number of times a query pattern was executed, listed as **Count**.
+- [Query performance metrics](/postgres/query-optimization/performance-metrics).
+- Errors encountered during query execution.
+
+Each recording can include up to 10k queries. There are no limits on storage retention.
+
+## Recommendations from a recording session
+
+When a recording session ends, Optimize generates recommendations such as:
+
+- [Excessive number of rows returned](/postgres/query-optimization/recommendations/excessive-number-of-rows-returned)
+- [Full table scans caused by LIKE operations](/postgres/query-optimization/recommendations/full-table-scans-caused-by-like-operations)
+- [Queries on unindexed columns](/postgres/query-optimization/recommendations/queries-on-unindexed-columns)
+- [Repeated query](/postgres/query-optimization/recommendations/repeated-query)
+- [Overfetching](/postgres/query-optimization/recommendations/select-returning)
+- [Using `@db.Money`](/postgres/query-optimization/recommendations/avoid-db-money)
+- [Using `@db.Char(n)`](/postgres/query-optimization/recommendations/avoid-char)
+- [Using `@db.VarChar(n)`](/postgres/query-optimization/recommendations/avoid-varchar)
+- [Using `timestamp(0)` or `timestamptz(0)`](/postgres/query-optimization/recommendations/avoid-timestamp-timestampz-0)
+- [Using `CURRENT_TIME`](/postgres/query-optimization/recommendations/current-time)
+- [Storing large objects or BLOBs in the database](/postgres/query-optimization/recommendations/storing-blob-in-database)
+- [Indexing on unique columns](/postgres/query-optimization/recommendations/indexing-on-unique-columns)
+- [Long-running transactions](/postgres/query-optimization/recommendations/long-running-transactions)
+- [Unnecessary indexes](/postgres/query-optimization/recommendations/unnecessary-indexes)
+
+:::info
+Use [Prisma AI](/postgres/query-optimization/prisma-ai) to ask follow-up questions about a recommendation.
+:::
+
+Learn more about the recommendations generated by Optimize [here](/postgres/query-optimization/recommendations).
\ No newline at end of file
diff --git a/content/700-optimize/400-recommendations/100-excessive-number-of-rows-returned.mdx b/content/250-postgres/400-query-optimization/300-recommendations/100-excessive-number-of-rows-returned.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/100-excessive-number-of-rows-returned.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/100-excessive-number-of-rows-returned.mdx
diff --git a/content/700-optimize/400-recommendations/1000-current-time.mdx b/content/250-postgres/400-query-optimization/300-recommendations/1000-current-time.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/1000-current-time.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/1000-current-time.mdx
diff --git a/content/700-optimize/400-recommendations/1100-unnecessary-indexes.mdx b/content/250-postgres/400-query-optimization/300-recommendations/1100-unnecessary-indexes.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/1100-unnecessary-indexes.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/1100-unnecessary-indexes.mdx
diff --git a/content/700-optimize/400-recommendations/1200-long-running-transactions.mdx b/content/250-postgres/400-query-optimization/300-recommendations/1200-long-running-transactions.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/1200-long-running-transactions.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/1200-long-running-transactions.mdx
diff --git a/content/700-optimize/400-recommendations/1300-indexing-on-unique-columns.mdx b/content/250-postgres/400-query-optimization/300-recommendations/1300-indexing-on-unique-columns.mdx
similarity index 78%
rename from content/700-optimize/400-recommendations/1300-indexing-on-unique-columns.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/1300-indexing-on-unique-columns.mdx
index dea66ea352..dd21dbec47 100644
--- a/content/700-optimize/400-recommendations/1300-indexing-on-unique-columns.mdx
+++ b/content/250-postgres/400-query-optimization/300-recommendations/1300-indexing-on-unique-columns.mdx
@@ -14,4 +14,8 @@ Unique constraints inherently enforce uniqueness by generating an underlying ind
This redundancy increases write costs and slows down updates, as the database must synchronize multiple indexes.
-This guideline applies broadly to relational databases like PostgreSQL, MySQL, MariaDB, SQLite, and SQL Server, which automatically create indexes for unique constraints.
+:::note
+
+This guideline also applies broadly to relational databases like PostgreSQL, MySQL, MariaDB, SQLite, and SQL Server, which automatically create indexes for unique constraints.
+
+:::
\ No newline at end of file
diff --git a/content/700-optimize/400-recommendations/1400-storing-blob-in-database.mdx b/content/250-postgres/400-query-optimization/300-recommendations/1400-storing-blob-in-database.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/1400-storing-blob-in-database.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/1400-storing-blob-in-database.mdx
diff --git a/content/700-optimize/400-recommendations/200-queries-on-unindexed-columns.mdx b/content/250-postgres/400-query-optimization/300-recommendations/200-queries-on-unindexed-columns.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/200-queries-on-unindexed-columns.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/200-queries-on-unindexed-columns.mdx
diff --git a/content/700-optimize/400-recommendations/300-full-table-scans-caused-by-like-operations.mdx b/content/250-postgres/400-query-optimization/300-recommendations/300-full-table-scans-caused-by-like-operations.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/300-full-table-scans-caused-by-like-operations.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/300-full-table-scans-caused-by-like-operations.mdx
diff --git a/content/700-optimize/400-recommendations/400-repeated-query.mdx b/content/250-postgres/400-query-optimization/300-recommendations/400-repeated-query.mdx
similarity index 88%
rename from content/700-optimize/400-recommendations/400-repeated-query.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/400-repeated-query.mdx
index d03524f230..2da4c4d17e 100644
--- a/content/700-optimize/400-recommendations/400-repeated-query.mdx
+++ b/content/250-postgres/400-query-optimization/300-recommendations/400-repeated-query.mdx
@@ -29,6 +29,6 @@ When the same query is executed multiple times with the same parameters within a
:::info
-To learn more about avoiding repeated queries with caching using Prisma Accelerate, refer to the [Prisma Accelerate documentation](/accelerate/caching).
+To learn more about avoiding repeated queries with caching in Prisma Postgres, refer to the [caching documentation](/postgres/database/caching).
:::
\ No newline at end of file
diff --git a/content/700-optimize/400-recommendations/500-select-returning.mdx b/content/250-postgres/400-query-optimization/300-recommendations/500-select-returning.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/500-select-returning.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/500-select-returning.mdx
diff --git a/content/700-optimize/400-recommendations/600-avoid-db-money.mdx b/content/250-postgres/400-query-optimization/300-recommendations/600-avoid-db-money.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/600-avoid-db-money.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/600-avoid-db-money.mdx
diff --git a/content/700-optimize/400-recommendations/700-avoid-char.mdx b/content/250-postgres/400-query-optimization/300-recommendations/700-avoid-char.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/700-avoid-char.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/700-avoid-char.mdx
diff --git a/content/700-optimize/400-recommendations/700-avoid-varchar.mdx b/content/250-postgres/400-query-optimization/300-recommendations/700-avoid-varchar.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/700-avoid-varchar.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/700-avoid-varchar.mdx
diff --git a/content/700-optimize/400-recommendations/900-avoid-timestamp-timestampz-0.mdx b/content/250-postgres/400-query-optimization/300-recommendations/900-avoid-timestamp-timestampz-0.mdx
similarity index 100%
rename from content/700-optimize/400-recommendations/900-avoid-timestamp-timestampz-0.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/900-avoid-timestamp-timestampz-0.mdx
diff --git a/content/250-postgres/400-query-optimization/300-recommendations/_category_.json b/content/250-postgres/400-query-optimization/300-recommendations/_category_.json
new file mode 100644
index 0000000000..79378e317a
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/300-recommendations/_category_.json
@@ -0,0 +1,4 @@
+{
+ "collapsed": true,
+ "collapsible": true
+}
diff --git a/content/700-optimize/400-recommendations/index.mdx b/content/250-postgres/400-query-optimization/300-recommendations/index.mdx
similarity index 77%
rename from content/700-optimize/400-recommendations/index.mdx
rename to content/250-postgres/400-query-optimization/300-recommendations/index.mdx
index cb86fc11bf..79cb40f18e 100644
--- a/content/700-optimize/400-recommendations/index.mdx
+++ b/content/250-postgres/400-query-optimization/300-recommendations/index.mdx
@@ -9,7 +9,7 @@ toc: true
Optimize provides recommendations focused on performance improvements such as indexing issues, excessive data retrieval, and inefficient query patterns. Recommendations include:
:::info
-Use [Prisma AI](/optimize/prisma-ai) to ask follow-up questions about any recommendation.
+Use [Prisma AI](/postgres/query-optimization/prisma-ai) to ask follow-up questions about any recommendation.
:::
diff --git a/content/250-postgres/400-query-optimization/400-prisma-ai.mdx b/content/250-postgres/400-query-optimization/400-prisma-ai.mdx
new file mode 100644
index 0000000000..698b5cb0fb
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/400-prisma-ai.mdx
@@ -0,0 +1,18 @@
+---
+title: 'Prisma AI'
+metaTitle: 'Prisma Postgres: Prisma AI'
+metaDescription: "Learn about using Optimize's Prisma AI feature."
+tocDepth: 3
+toc: true
+---
+
+Prisma AI enables you to ask follow-up questions on a provided [recommendation](/postgres/query-optimization/recommendations) for additional clarity.
+
+## Using the Prisma AI
+
+To interact with the Prisma AI:
+
+1. Select a recommendation from an Optimize [recording](/postgres/query-optimization/recordings).
+2. Click the **Ask AI** button.
+
+Prisma AI helps you gain deeper insights into a recommendation and learn how to apply it effectively to your Prisma model.
\ No newline at end of file
diff --git a/content/250-postgres/400-query-optimization/500-performance-metrics.mdx b/content/250-postgres/400-query-optimization/500-performance-metrics.mdx
new file mode 100644
index 0000000000..5139640e31
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/500-performance-metrics.mdx
@@ -0,0 +1,29 @@
+---
+title: 'Performance metrics'
+metaTitle: 'Prisma Postgres: Query performance metrics'
+metaDescription: 'Learn about the query performance metrics provided by Optimize.'
+tocDepth: 3
+toc: true
+---
+
+An Optimize recording session provides detailed insights into the latencies of executed queries, capturing key metrics such as average duration, 50th percentile, 99th percentile, and maximal query execution time.
+
+## Total query durations
+
+Prisma Optimize measures total latency for query patterns, enabling you to analyze and debug slow queries effectively.
+
+### Average query duration (**`AVG`**)
+
+The average query duration reveals the mean execution time across all queries, helping you assess overall performance trends and identify inefficiencies that impact the user experience.
+
+### 50th percentile (**`P50`**)
+
+The 50th percentile, or median, query duration indicates the time within which half of your queries complete. This metric offers a clear view of typical user performance, unaffected by outliers.
+
+### 99th percentile (**`P99`**)
+
+The 99th percentile query duration highlights the execution time for the slowest 1% of queries. This metric is crucial for uncovering and addressing performance bottlenecks that, while infrequent, can significantly impact user satisfaction.
+
+### Maximal query duration (**`MAX`**)
+
+The maximal query duration measures the time taken by the single slowest query. This metric helps identify extreme cases, providing insights into the worst performance scenarios your system might face, so you can diagnose and resolve outliers.
diff --git a/content/250-postgres/400-query-optimization/_category_.json b/content/250-postgres/400-query-optimization/_category_.json
new file mode 100644
index 0000000000..127964a1af
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Query Optimization",
+ "collapsible": false,
+ "collapsed": false
+}
diff --git a/content/250-postgres/400-query-optimization/index.mdx b/content/250-postgres/400-query-optimization/index.mdx
new file mode 100644
index 0000000000..3318ef33c1
--- /dev/null
+++ b/content/250-postgres/400-query-optimization/index.mdx
@@ -0,0 +1,12 @@
+---
+title: 'Query optimization'
+metaTitle: 'Query optimization with Prisma Postgres'
+metaDescription: 'Learn how to record, analyze, and improve query performance using Prisma Optimize in Prisma Postgres.'
+hide_table_of_contents: true
+---
+
+Understand how Prisma Postgres helps you optimize query performance using the Prisma Optimize toolkit. This section covers setup, recommendations, recording queries, performance metrics, and using Prisma AI for guided improvements.
+
+## In this section
+
+
diff --git a/content/250-postgres/index.mdx b/content/250-postgres/index.mdx
index b5363771df..5d3f1640a8 100644
--- a/content/250-postgres/index.mdx
+++ b/content/250-postgres/index.mdx
@@ -5,14 +5,15 @@ metaDescription: 'Prisma Postgres®'
sidebar_label: 'Prisma Postgres'
sidebar_position: 0
hide_table_of_contents: false
-pagination_next: 'postgres/getting-started'
+pagination_next: 'postgres/introduction/getting-started'
---
## In this section
+:::info[note]
----
+Postgres, PostgreSQL and the Slonik Logo are trademarks or registered trademarks of the PostgreSQL Community Association of Canada, and used with their permission
-> **Note**: Postgres, PostgreSQL and the Slonik Logo are trademarks or registered trademarks of the PostgreSQL Community Association of Canada, and used with their permission
+:::
\ No newline at end of file
diff --git a/content/300-accelerate/200-getting-started.mdx b/content/300-accelerate/200-getting-started.mdx
index f0a8d9c1dc..8f3259f368 100644
--- a/content/300-accelerate/200-getting-started.mdx
+++ b/content/300-accelerate/200-getting-started.mdx
@@ -196,7 +196,7 @@ You should now see improved performance for your cached queries.
-For information about which strategy best serves your application, see [Select a cache strategy](/accelerate/caching#selecting-a-cache-strategy).
+For information about which strategy best serves your application, see [Select a cache strategy](/postgres/database/caching#selecting-a-cache-strategy).
@@ -208,7 +208,7 @@ As of Prisma version `5.2.0` you can use Prisma Studio with the Accelerate conne
#### Invalidate the cache and keep your cached query results up-to-date
-If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `ttl` (Time-To-Live) or `swr` (Stale-While-Revalidate) [cache strategy](/accelerate/caching#cache-strategies). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
+If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `ttl` (Time-To-Live) or `swr` (Stale-While-Revalidate) [cache strategy](/postgres/database/caching#cache-strategies). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
For example, if a dashboard displays customer information and a customer’s contact details change, cache invalidation allows you to refresh only that data instantly, ensuring support staff always see the latest information without waiting for the cache to expire.
diff --git a/content/300-accelerate/250-connection-pooling.mdx b/content/300-accelerate/250-connection-pooling.mdx
index 6855a01fb1..5d5ce43f70 100644
--- a/content/300-accelerate/250-connection-pooling.mdx
+++ b/content/300-accelerate/250-connection-pooling.mdx
@@ -4,129 +4,5 @@ metaTitle: 'Prisma Accelerate: Connection Pooling'
metaDescription: "Learn about everything you need to know to use Accelerate's connection pooling."
---
-:::info
-
-To learn more about connection pooling generally, be sure to check out [Prisma's Data Guide](https://www.prisma.io/dataguide/database-tools/connection-pooling) for a comprehensive overview on what connection pooling is, what it seeks to address, and how it works.
-
-:::
-
-Accelerate provides built-in connection pooling by default. By using Accelerate, you get the benefits of connection pooling without having to configure anything. The efficient management of database connections allows the database to process more queries without exhausting the available database connections, making your application more scalable.
-
-In some cases, however, it may be beneficial to further configure connection pooling in order to optimize the performance of your application.
-
-:::note
-
-This document focuses on the connection pooling features of Prisma Accelerate. For more information about the internal connection pool of Prisma ORM specifically, see our [ORM connection pooling documentation](/orm/prisma-client/setup-and-configuration/databases-connections/connection-pool).
-
-:::
-
-### Connection Pooling in Accelerate
-
-By default, Accelerate has a connection limit of `10`. This means that Prisma Accelerate will open at most 10 database connections to your database per Prisma Schema used in Accelerate. For the Starter plan, this is also the maximum connection limit.
-
-In most cases, this connection limit is sufficient. However, in high traffic or heavy compute environments, you may need to [increase this value](#configuring-the-connection-pool-size). If your project is on the [Pro plan](https://www.prisma.io/pricing), the maximum connection limit is `100`. For [Business plan](https://www.prisma.io/pricing) projects the maximum is `1000`.
-
-### Configuring the connection pool size
-
-When not using Prisma Accelerate, you can configure the connection pool of Prisma ORM [via the connection string](/orm/prisma-client/setup-and-configuration/databases-connections/connection-pool#setting-the-connection-pool-size). When using Prisma Accelerate *this feature is not available*. Instead, you can configure the connection pool size in your Platform project via the `Connection limit` setting [on Accelerate setup](/accelerate/getting-started).
-
-#### Autoscaling
-
-Autoscaling allows Accelerate to dynamically allocate resources based on your application's traffic. As consistent usage approaches the current connection limit, Accelerate will begin allocating new resources to handle the increased load. If traffic continues to rise, Accelerate will scale up further. Conversely, when traffic decreases, Accelerate will scale down accordingly.
-
-*To provide more context, Accelerate horizontally scales your environment by distributing the total available connections across multiple Query Engine instances. For example:*
-
-- If your environment connection limit is set to 1000, Accelerate will scale up to multiple Query Engine instances (e.g., 100 instances).
-- Each Query Engine instance is then assigned a share of the total connections— in this case, 10 connections per instance.
-- This is why an individual Query Engine instance reports a connection limit of 10, even though the total connection limit for the environment is 1000.
-
-*This scaling mechanism ensures that your application can handle higher traffic loads by increasing the number of Query Engine instances while keeping the connection pool management efficient and distributed.*
-
-
-Autoscaling is enabled when the Accelerate connection limit is set above the default value. This feature is not available on the Starter plan.
-
-The maximum value of your Accelerate connection limit is based on your [Prisma Data Platform plan](https://www.prisma.io/pricing).
-
-| Plan | Maximum Connection Limit |
-|------------|--------------------------------------|
-| Starter | 10 |
-| Pro | 100 |
-| Business | 1000 |
-| Enterprise | [Contact Us](mailto:sales@prisma.io) |
-
-### Configuring the connection pool timeout
-
-The connection pool timeout is the maximum number of seconds that a query will block while waiting for a connection from Accelerate's internal connection pool. This occurs if the number of concurrent requests exceeds the connection limit, resulting in queueing of additional requests until a free connection becomes available. An exception is thrown if a free connection does not become available within the pool timeout. The connection pool timeout can be disabled by setting the value to 0.
-
-Similar to the connection pool size, you may also configure the connection pool timeout via the _database connection string_. To adjust this value, you may add the `pool_timeout` parameter to the database connection string.
-
-For example:
-
-```env no-copy
-postgresql://user:password@localhost:5432/db?connection_limit=10&pool_timeout=20
-```
-
-
-
-The default value for `pool_timeout` is `10` seconds.
-
-
-
-## Configuring query limits
-
-You can configure the minimum and maximum query response size, query duration, and transaction limits when using Prisma Accelerate from the **Settings** tab in your Accelerate-enabled project environment.
-
-### Query timeout limit
-
-Accelerate has a default global timeout of `10s` for each query, configurable using the slider labeled **Query duration**, based on your subscription plan:
-
-| Plan | Starter | Pro | Business |
-|----------|---------------|--------------|---------------|
-| Query timeout | Up to 10 seconds | Up to 20 seconds | Up to 60 seconds |
-
-See the [troubleshooting guide](/accelerate/troubleshoot#p6004-querytimeout) and our [pricing page](https://www.prisma.io/pricing#accelerate) for more information.
-
-:::warning
-While you can increase the query timeout, it’s recommended to inspect and optimize your database queries if they take longer than 10 seconds. This helps reduce stress on your underlying database, as long-running queries often indicate a need for optimization. Learn more in the [troubleshooting guide](/accelerate/troubleshoot#p6004-querytimeout).
-:::
-
-### Interactive transactions query timeout limit
-
-Accelerate has a default global timeout of `15s` for each [interactive transaction](/orm/prisma-client/queries/transactions#interactive-transactions), configurable using the slider labeled **Transaction duration**, based on your subscription plan:
-
-| Plan | Starter | Pro | Business |
-|----------------------------|---------------|--------------|---------------|
-| Interactive transaction limit | Up to 15 seconds | Up to 30 seconds | Up to 90 seconds |
-
-See the [troubleshooting guide](/accelerate/troubleshoot#p6004-querytimeout) and our [pricing page](https://www.prisma.io/pricing#accelerate) for more information.
-
-When you set a higher interactive transaction timeout in the Prisma Console, you **must also** specify a matching `timeout` value in your interactive transaction query via timeout [transaction option](/orm/prisma-client/queries/transactions#transaction-options). Otherwise, transactions will still time out at the lower default (e.g., 5 seconds limit when no timeout value is specified). Here’s an example of how to set a 30-second timeout in your code:
-
-```ts
-await prisma.$transaction(
- async (tx) => {
- // Your queries go here
- },
- {
- timeout: 30000, // 30s
- }
-);
-```
-
-:::warning
-While you can increase the interactive transaction timeout limit, it’s recommended to inspect and optimize your database transactions if they take longer than 15 seconds. Long-running transactions can negatively impact performance and often signal the need for optimization. Learn more in the [troubleshooting guide](/accelerate/troubleshoot#p6004-querytimeout) and review the [warning in the Interactive Transactions section](/orm/prisma-client/queries/transactions#interactive-transactions-1) in our documentation.
-:::
-
-### Response size limit
-
-Accelerate has a default global response size limit of `5MB`, configurable using the slider labeled **Response size**, based on your subscription plan:
-
-| Plan | Starter | Pro | Business |
-|----------|---------------|--------------|---------------|
-| Query size | Up to 5MB | Up to 10MB | Up to 20MB |
-
-See the [troubleshooting guide](/accelerate/troubleshoot#p6009-responsesizelimitexceeded) and our [pricing page](https://www.prisma.io/pricing#accelerate) for more information.
-
-:::warning
-While you can increase the query response size, it’s recommended to limit data retrieval to what you actually need. This improves database performance, reduces stress on your database, and makes your frontend applications more responsive. Queries exceeding 5 MB in size often indicate a need for optimization. Learn more in the [troubleshooting guide](/accelerate/troubleshoot#p6009-responsesizelimitexceeded).
-:::
+Accelerate provides built-in connection pooling to efficiently manage database connections. It's included as part of [Prisma Postgres](/postgres), but you can also use it with your own database by enabling Accelerate in the [Prisma Data Platform](https://console.prisma.io?utm_source=docs) and [connecting it to your database](/accelerate/getting-started).
+This page has moved, connection pooling in Prisma Accelerate is now documented in the [Prisma Postgres section](/postgres/database/connection-pooling).
diff --git a/content/300-accelerate/300-caching.mdx b/content/300-accelerate/300-caching.mdx
index 792b468b0a..f1c29fc35f 100644
--- a/content/300-accelerate/300-caching.mdx
+++ b/content/300-accelerate/300-caching.mdx
@@ -5,236 +5,6 @@ metaDescription: "Learn everything you need to know to use Accelerate's global d
toc_max_heading_level: 2
---
+Prisma Accelerate provides global caching for read queries using TTL, Stale-While-Revalidate (SWR), or a combination of both. It's included as part of Prisma Postgres, but can also be used with your own database by enabling Accelerate in the [Prisma Data Platform](https://console.prisma.io?utm_source=docs) and [configuring it with your database](/accelerate/getting-started).
-## Cache strategies
-
-For all read queries in Prisma Client, you can define the `cacheStrategy` parameter that configures cache behavior. The cache strategy allows you to define two main characteristics of the cache:
-
-- **Time-to-live (TTL):** Duration in seconds a cached response is considered _fresh_.
-- **Stale-while-Revalidating (SWR):** Duration in seconds a stale cache response is considered acceptable while the cache is refreshed in the background
-
-## Time-to-live (TTL)
-
-Time-to-Live (TTL) determines how long cached data is considered fresh. By specifying the `ttl` in seconds, you can control the duration for which data in the cache remains valid. When a read query is executed, if the cached response is within the `ttl` limit, Prisma Client retrieves the data from the cache without querying the database. If the cached data is not available or has expired, Prisma Client queries the database and stores the results in the cache for future requests.
-
-Use `ttl` in `cacheStrategy` and specify the TTL of the query in seconds:
-
-```javascript
-await prisma.user.findMany({
- cacheStrategy: {
- //add-next-line
- ttl: 60,
- },
-});
-```
-
-With a specified TTL of 60 seconds, the majority of requests will result in
-a cache hit throughout the TTL duration:
-
-
-
-TTL is useful for reducing database load and latency for data that does not require frequent updates.
-
-### Invalidate the TTL and keep your cached query results up-to-date
-
-If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `ttl` (Time-To-Live). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
-
-For example, if a dashboard displays customer information and a customer’s contact details change, TTL (Time-To-Live) settings ensure the cache automatically expires after a set duration. This allows the system to refresh only the updated data at the next access, ensuring support staff always see the latest information without manually refreshing the cache.
-
-However, in cases where immediate updates are required before the TTL expires, cache invalidation allows the system to proactively clear specific data from the cache. This forces a refresh of the updated information instantly, so support staff always have the most current details without waiting for the TTL to trigger.
-
-
-To invalidate a cached query result, you can add tags and then use the `$accelerate.invalidate` API.
-
-:::note
-
-On-demand cache invalidation is available with our paid plans. For more details, please see our [pricing](https://www.prisma.io/pricing#accelerate).
-
-:::
-
-To invalidate the query below, you need to provide the cache tag in the `$accelerate.invalidate` API:
-
-```ts
-await prisma.user.findMany({
- cacheStrategy: {
- ttl: 60,
- //add-next-line
- tags: ["findMany_users"],
- },
-});
-
-// This is how you would invalidate the cached query above.
-//add-start
-await prisma.$accelerate.invalidate({
- tags: ["findMany_users"],
-});
-//add-end
-```
-
-## Stale-While-Revalidate (SWR)
-
-Stale-While-Revalidate (SWR) allows you to control how long Accelerate can serve stale cache data while fetching fresh data in the background. When a read query is executed, Accelerate checks the age of the cached response against the `swr` duration. If the cache data is within the `swr` limit, Accelerate serves the stale data while simultaneously refreshing the cache by fetching the latest data from the database.
-
-Use `swr` in `cacheStrategy` and specify the SWR of the query in seconds:
-
-```javascript
-await prisma.user.findMany({
- cacheStrategy: {
- //add-next-line
- swr: 60,
- },
-});
-```
-
-When specifying a SWR of 60 seconds, the cache serves stale data until the cache refreshes itself in the background after each request:
-
-
-
-### Invalidate the SWR and keep your cached query results up-to-date
-
-If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `swr` (Stale-While-Revalidate). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
-
-For example, consider a dashboard that displays stock levels for products in a warehouse. With SWR (Stale-While-Revalidate) settings, the dashboard can immediately display the last known stock data, even if it’s slightly outdated, while new data is fetched in the background. This ensures that staff can continue working with recent information without waiting, with the stock levels updating as soon as revalidation completes.
-
-However, in cases where stock data needs to be updated immediately—for instance, if a product is low in stock and the count needs real-time accuracy—cache invalidation allows the system to proactively clear specific data from the cache. This forces a refresh of the latest stock data instantly, so staff always have the most up-to-date information without waiting for SWR to complete the revalidation.
-
-To invalidate a cached query result, you can add tags and then use the `$accelerate.invalidate` API.
-
-:::note
-
-On-demand cache invalidation is available with our paid plans. For more details, please see our [pricing](https://www.prisma.io/pricing#accelerate).
-
-:::
-
-To invalidate the query below, you need to provide the cache tag in the `$accelerate.invalidate` API:
-
-```ts
-await prisma.user.findMany({
- cacheStrategy: {
- swr: 60,
- //add-next-line
- tags: ["findMany_users"],
- },
-});
-
-// This is how you would invalidate the cached query above.
-//add-start
-await prisma.$accelerate.invalidate({
- tags: ["findMany_users"],
-});
-//add-end
-```
-
-## Selecting a cache strategy
-
-Caching helps you improve query response times and reduce database load. However, it also means you might serve stale data to the client. Whether or not serving stale data is acceptable and to what extent depends on your use case. `ttl` and `swr` are parameters you can use the tweak the cache behavior.
-
-### Cache strategy using TTL
-
-Use TTL to reduce database load when stale cached data is acceptable.
-
-#### Use case: Product catalog in e-commerce applications
-
-Consider an e-commerce application with a product catalog that doesn't frequently change. By setting a `ttl` of, let's say, 1 hour, Prisma Client can serve cached product data for subsequent user requests within that hour without hitting the database. This significantly reduces the database load and improves the response time for product listing pages.
-
-**When to invalidate:** If there are critical updates to the catalog, such as a major price change or product availability adjustment, the cache should be [invalidated](/accelerate/caching#on-demand-cache-invalidation) immediately to prevent customers from seeing outdated information.
-
-### Cache strategy using SWR
-
-Use SWR to respond quickly to requests with minimal stale data. While it does not reduce database load, it can improve response times significantly.
-
-#### Use case: User profile in social media platforms
-
-Imagine a social media platform where user profiles are frequently accessed. By leveraging `swr` with a duration of, let's say, 5 minutes, Accelerate can serve the cached user profile information quickly, reducing the latency for profile pages. Meanwhile, in the background, it refreshes the cache after every request, ensuring that any updates made to the profile are eventually reflected for subsequent requests.
-
-**When to invalidate:** If a user makes significant updates to their profile, such as changing their profile picture or bio, the cache should be [invalidated](/accelerate/caching#on-demand-cache-invalidation) immediately to ensure that followers see the latest updates without waiting for the SWR refresh.
-
-### Cache strategy using TTL + SWR
-
-For very fast response times and reduced database load, use both TTL and SWR. You can use this strategy to fine-tune your application’s tolerance for stale data.
-
-Use `ttl` and `swr` in `cacheStrategy` and specify the TTL and SWR of the query in seconds:
-
-```javascript
-await prisma.user.findMany({
- cacheStrategy: {
- //add-start
- ttl: 30,
- swr: 60,
- //add-end
- },
-});
-```
-
-When specifying a TTL of 30 seconds and SWR of 60 seconds, the cache serves fresh data for the initial 30 seconds. Subsequently, it serves stale data until the cache refreshes itself in the background after each request:
-
-
-
-#### Use case: News articles
-
-Consider a news application where articles are frequently accessed but don't require real-time updates. By setting a `ttl` of 2 hours and an `swr` duration of 5 minutes, Prisma Client can serve cached articles quickly, reducing latency for readers. As long as the articles are within the `ttl`, users get fast responses. After the `ttl` expires, Prisma Client continues to serve the stale articles for up to an additional 5 minutes, revalidating the cache with the latest news from the database in response to a new query. This helps maintain a balance between performance and freshness.
-
-**When to invalidate:** If a critical update or breaking news article is published, the cache should be [invalidated](/accelerate/caching#on-demand-cache-invalidation) immediately to ensure readers see the latest information without delay. This approach is especially useful for applications where certain news items may need to override the normal cache cycle for timeliness.
-
-## On-demand cache invalidation
-
-If your application requires real-time or near-real-time data, cache invalidation ensures that users see the most current data, even when using a large `ttl` (Time-To-Live) or `swr` (Stale-While-Revalidate) [cache strategy](/accelerate/caching#cache-strategies). By invalidating your cache, you can bypass extended caching periods to show live data whenever it's needed.
-
-You can invalidate the cache using the [`$accelerate.invalidate` API](/accelerate/api-reference#accelerateinvalidate):
-
-:::note
-
-To programmatically invalidate cached queries, a paid plan is required. See our [pricing for more details](https://www.prisma.io/pricing#accelerate).
-
-:::
-
-```ts
-await prisma.user.findMany({
- where: {
- email: {
- contains: "alice@prisma.io",
- },
- },
- cacheStrategy: {
- swr: 60,
- ttl: 60,
- // highlight-start
- tags: ["emails_with_alice"],
- // highlight-end
- },
-});
-```
-
-You need to provide the cache tag in the `$accelerate.invalidate` API:
-
-```ts
-try {
- // highlight-start
- await prisma.$accelerate.invalidate({
- tags: ["emails_with_alice"],
- });
- // highlight-end
-} catch (e) {
- if (e instanceof Prisma.PrismaClientKnownRequestError) {
- // The .code property can be accessed in a type-safe manner
- if (e.code === "P6003") {
- console.log(
- "The cache invalidation rate limit has been reached. Please try again later."
- );
- }
- }
- throw e;
-}
-```
-
-Explore the [demo app](https://pris.ly/test-cache-invalidation) to see how cached query results in Prisma Accelerate are invalidated on demand, shown in a clear timeline.
-
-## Default cache strategyÂ
-
-Accelerate defaults to **no cache** to avoid unexpected issues. While caching can improve performance, incorrect usage may lead to errors.
-
-For instance, if a query is executed on a critical path without specifying a cache strategy, the result may be incorrect, with no clear explanation. This issue often arises when implicit caching is unintentionally left enabled.
-
-To avoid such problems, you must explicitly opt-in to caching. This ensures you are aware that caching is not enabled by default, preventing potential errors.
-
-> When no cache strategy is specified or during a cache miss, a Prisma Client with the Accelerate extension routes all queries to the database through a connection pool instance near the database region.
+This content has moved — learn more on the updated [Caching in Accelerate](/postgres/database/caching) page.
diff --git a/content/300-accelerate/400-api-reference.mdx b/content/300-accelerate/400-api-reference.mdx
index a3930bfc0a..c6ea144dbe 100644
--- a/content/300-accelerate/400-api-reference.mdx
+++ b/content/300-accelerate/400-api-reference.mdx
@@ -20,7 +20,7 @@ All example are based on the `User` model.
## `cacheStrategy`
-With the Accelerate extension for Prisma Client, you can use the `cacheStrategy` parameter for model queries and use the [`ttl`](/accelerate/caching#time-to-live-ttl) and [`swr`](/accelerate/caching#stale-while-revalidate-swr) parameters to define a cache strategy for Accelerate. The Accelerate extension requires that you install Prisma Client version `4.10.0`.
+With the Accelerate extension for Prisma Client, you can use the `cacheStrategy` parameter for model queries and use the [`ttl`](/postgres/database/caching#time-to-live-ttl) and [`swr`](/postgres/database/caching#stale-while-revalidate-swr) parameters to define a cache strategy for Accelerate. The Accelerate extension requires that you install Prisma Client version `4.10.0`.
### Options
@@ -30,7 +30,7 @@ The `cacheStrategy` parameter takes an option with the following keys:
| ------ | ------- | ----- | -------- | ------------------------------------------- |
| `swr` | `60` | `Int` | No | The stale-while-revalidate time in seconds. |
| `ttl` | `60` | `Int` | No | The time-to-live time in seconds. |
-| `tags` | `["user"]` | `String[]` | No | The `tag` serves as a variable to control the invalidation of specific queries within your application. It is an optional array of strings to [invalidate](/accelerate/api-reference#accelerateinvalidate) the cache, with each tag containing only alphanumeric characters and underscores, and a maximum length of 64 characters.
+| `tags` | `["user"]` | `String[]` | No | The `tag` serves as a variable to control the invalidation of specific queries within your application. It is an optional array of strings to [invalidate](/postgres/database/api-reference#accelerateinvalidate) the cache, with each tag containing only alphanumeric characters and underscores, and a maximum length of 64 characters.
|
### Examples
diff --git a/content/300-accelerate/600-faq.mdx b/content/300-accelerate/600-faq.mdx
index d3ca1e186e..b4d27804ba 100644
--- a/content/300-accelerate/600-faq.mdx
+++ b/content/300-accelerate/600-faq.mdx
@@ -81,7 +81,7 @@ Under the hood, Accelerate uses Cloudflare, which uses [Anycast](https://www.clo
## How can I invalidate a cache on Accelerate?
-You can invalidate the cache on-demand via the [`$accelerate.invalidate` API](/accelerate/api-reference#accelerateinvalidate) if you're on a [paid plan](https://www.prisma.io/pricing#accelerate), or you can invalidate your entire cache, on a project level, a maximum of five times a day. This limit is set based on [your plan](https://www.prisma.io/pricing#accelerate). You can manage this via the Accelerate configuration page.
+You can invalidate the cache on-demand via the [`$accelerate.invalidate` API](/postgres/database/api-reference#accelerateinvalidate) if you're on a [paid plan](https://www.prisma.io/pricing#accelerate), or you can invalidate your entire cache, on a project level, a maximum of five times a day. This limit is set based on [your plan](https://www.prisma.io/pricing#accelerate). You can manage this via the Accelerate configuration page.
## What is Accelerate's consistency model?
@@ -89,7 +89,7 @@ Accelerate does not have a consistency model. It is not a distributed system whe
Accelerate implements a [read-through caching strategy](https://www.prisma.io/dataguide/managing-databases/introduction-database-caching#read-through) particularly suitable for read-heavy workloads.
-The freshness of the data served by the cache depends on the cache strategy defined in your query. Refer to [this section](/accelerate/caching#selecting-a-cache-strategy) for more information on selecting the right cache strategy for your query.
+The freshness of the data served by the cache depends on the cache strategy defined in your query. Refer to [this section](/postgres/database/caching#selecting-a-cache-strategy) for more information on selecting the right cache strategy for your query.
## How is Accelerate different from other caching tools, such as Redis?
@@ -117,7 +117,7 @@ No. We currently do not have any plans for supporting other ORMs/query builders
## What is the maximum allowed value for the `ttl` parameter when configuring `cacheStrategy`?
-The [Time-to-live](/accelerate/caching#time-to-live-ttl) (`ttl`) parameter can be set for up to a _year_. However, it's important to note that items within the cache may be evicted if they are not frequently accessed.
+The [Time-to-live](/postgres/database/caching#time-to-live-ttl) (`ttl`) parameter can be set for up to a _year_. However, it's important to note that items within the cache may be evicted if they are not frequently accessed.
Based on our experimentation, we’ve seen cache items persist for around 18 hours. While items may remain in the cache for an extended period if they are actively accessed, there is no guarantee.
@@ -168,7 +168,7 @@ Yes, you can increase your Accelerate limits based on your subscription plan. He
Check the [pricing page](https://www.prisma.io/pricing#accelerate) for more details on the available plans and their corresponding limits.
:::warning
-While you can increase these limits based on your subscription plan, it's *still* recommended to optimize your database operations. [Learn more in our troubleshooting guide.](/accelerate/troubleshoot)
+While you can increase these limits based on your subscription plan, it's *still* recommended to optimize your database operations. [Learn more in our troubleshooting guide.](/postgres/database/error-reference)
:::
## How long does it take to invalidate a cache query result?
@@ -185,11 +185,11 @@ Here is a [demo app](https://pris.ly/test-cache-invalidation) to test the time i
## What is on-demand cache invalidation?
-[On-demand cache invalidation](/accelerate/caching#on-demand-cache-invalidation) lets applications instantly update specific cached data when it changes, instead of waiting for regular cache refresh cycles. This keeps information accurate and up-to-date for users.
+[On-demand cache invalidation](/postgres/database/caching#on-demand-cache-invalidation) lets applications instantly update specific cached data when it changes, instead of waiting for regular cache refresh cycles. This keeps information accurate and up-to-date for users.
## When should I use the cache invalidate API?
-The [cache invalidate API](/accelerate/caching#on-demand-cache-invalidation) is essential when data consistency cannot wait for the cache’s standard expiration or revalidation. Key use cases include:
+The [cache invalidate API](/postgres/database/caching#on-demand-cache-invalidation) is essential when data consistency cannot wait for the cache’s standard expiration or revalidation. Key use cases include:
- **Content updates**: When critical changes occur, such as edits to a published article, product updates, or profile modifications, that need to be visible immediately.
- **Inventory management**: In real-time applications, like inventory or booking systems, where stock levels, availability, or reservation statuses must reflect the latest information.
diff --git a/content/300-accelerate/650-troubleshoot.mdx b/content/300-accelerate/650-troubleshoot.mdx
index e217a16dea..ad77c26b73 100644
--- a/content/300-accelerate/650-troubleshoot.mdx
+++ b/content/300-accelerate/650-troubleshoot.mdx
@@ -6,15 +6,13 @@ tocDepth: 3
toc: true
---
-
When working with Prisma Accelerate, you may encounter errors often highlighted by specific error codes during development and operations. It is important to understand the meaning of these errors, why they occur, and how to resolve them in order to ensure the smooth operation of your applications. This guide aims to provide insights and steps to troubleshoot specific error codes encountered with Prisma Accelerate.
-
## [`P6009`](/orm/reference/error-reference#p6009-responsesizelimitexceeded) (`ResponseSizeLimitExceeded`)
-This error is triggered when the response size from a database query exceeds [the configured query response size limit](/accelerate/connection-pooling#response-size-limit). We've implemented this restriction to safeguard your application performance, as retrieving data over 5MB can significantly slow down your application due to multiple network layers. Typically, transmitting more than 5MB of data is common when conducting ETL (Extract, Transform, Load) operations. However, for other scenarios such as transactional queries, real-time data fetching for user interfaces, bulk data updates, or aggregating large datasets for analytics outside of ETL contexts, it should generally be avoided. These use cases, while essential, can often be optimized to work within [the configured query response size limit](/accelerate/connection-pooling#response-size-limit), ensuring smoother performance and a better user experience.
+This error is triggered when the response size from a database query exceeds [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit). We've implemented this restriction to safeguard your application performance, as retrieving data over 5MB can significantly slow down your application due to multiple network layers. Typically, transmitting more than 5MB of data is common when conducting ETL (Extract, Transform, Load) operations. However, for other scenarios such as transactional queries, real-time data fetching for user interfaces, bulk data updates, or aggregating large datasets for analytics outside of ETL contexts, it should generally be avoided. These use cases, while essential, can often be optimized to work within [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit), ensuring smoother performance and a better user experience.
### Possible causes for [`P6009`](/orm/reference/error-reference#p6009-responsesizelimitexceeded)
@@ -22,27 +20,27 @@ This error is triggered when the response size from a database query exceeds [th
This error may arise if images or files stored within your table are being fetched, resulting in a large response size. Storing assets directly in the database is generally discouraged because it significantly impacts database performance and scalability. In addition to performance, it makes database backups slow and significantly increases the cost of storing routine backups.
-**Suggested solution:** Configure the [query response size limit](/accelerate/connection-pooling#response-size-limit) to be larger. If the limit is still exceeded, consider storing the image or file in a BLOB store like [Cloudflare R2](https://developers.cloudflare.com/r2/), [AWS S3](https://aws.amazon.com/pm/serv-s3/), or [Cloudinary](https://cloudinary.com/). These services allow you to store assets optimally and return a URL for access. Instead of storing the asset directly in the database, store the URL, which will substantially reduce the response size.
+**Suggested solution:** Configure the [query response size limit](/postgres/database/connection-pooling#response-size-limit) to be larger. If the limit is still exceeded, consider storing the image or file in a BLOB store like [Cloudflare R2](https://developers.cloudflare.com/r2/), [AWS S3](https://aws.amazon.com/pm/serv-s3/), or [Cloudinary](https://cloudinary.com/). These services allow you to store assets optimally and return a URL for access. Instead of storing the asset directly in the database, store the URL, which will substantially reduce the response size.
#### Over-fetching of data
-In certain cases, a large number of records or fields are unintentionally fetched, which results in exceeding [the configured query response size limit](/accelerate/connection-pooling#response-size-limit). This could happen when the [`where`](/orm/reference/prisma-client-reference#where) clause in the query is incorrect or entirely missing.
+In certain cases, a large number of records or fields are unintentionally fetched, which results in exceeding [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit). This could happen when the [`where`](/orm/reference/prisma-client-reference#where) clause in the query is incorrect or entirely missing.
-**Suggested solution:** Configure the [query response size limit](/accelerate/connection-pooling#response-size-limit) to be larger. If the limit is still exceeded, double-check that the `where` clause is filtering data as expected. To prevent fetching too many records, consider using [pagination](/orm/prisma-client/queries/pagination). Additionally, use the [`select`](/orm/reference/prisma-client-reference#select) clause to return only the necessary fields, reducing the response size.
+**Suggested solution:** Configure the [query response size limit](/postgres/database/connection-pooling#response-size-limit) to be larger. If the limit is still exceeded, double-check that the `where` clause is filtering data as expected. To prevent fetching too many records, consider using [pagination](/orm/prisma-client/queries/pagination). Additionally, use the [`select`](/orm/reference/prisma-client-reference#select) clause to return only the necessary fields, reducing the response size.
#### Fetching a large volume of data
In many data processing workflows, especially those involving ETL (Extract-Transform-Load) processes or scheduled CRON jobs, there's a need to extract large amounts of data from data sources (like databases, APIs, or file systems) for analysis, reporting, or further processing. If you are running an ETL/CRON workload that fetches a huge chunk of data for analytical processing then you might run into this limit.
-**Suggested solution:** Configure the [query response size limit](/accelerate/connection-pooling#response-size-limit) to be larger. If the limit is exceeded, consider splitting your query into batches. This approach ensures that each batch fetches only a portion of the data, preventing you from exceeding the size limit for a single operation.
+**Suggested solution:** Configure the [query response size limit](/postgres/database/connection-pooling#response-size-limit) to be larger. If the limit is exceeded, consider splitting your query into batches. This approach ensures that each batch fetches only a portion of the data, preventing you from exceeding the size limit for a single operation.
## [`P6004`](/orm/reference/error-reference#p6004-querytimeout) (`QueryTimeout`)
-This error occurs when a database query fails to return a response within [the configured query timeout limit](/accelerate/connection-pooling#query-timeout-limit). The query timeout limit includes the duration of waiting for a connection from the pool, network latency to the database, and the execution time of the query itself. We enforce this limit to prevent unintentional long-running queries that can overload system resources.
+This error occurs when a database query fails to return a response within [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit). The query timeout limit includes the duration of waiting for a connection from the pool, network latency to the database, and the execution time of the query itself. We enforce this limit to prevent unintentional long-running queries that can overload system resources.
-> The time for Accelerate's cross-region networking is excluded from [the configured query timeout limit](/accelerate/connection-pooling#query-timeout-limit) limit.
+> The time for Accelerate's cross-region networking is excluded from [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) limit.
### Possible causes for [`P6004`](/orm/reference/error-reference#p6004-querytimeout)
@@ -50,17 +48,17 @@ This error could be caused by numerous reasons. Some of the prominent ones are:
#### High traffic and insufficient connections
-If the application is receiving very high traffic and there are not a sufficient number of connections available to the database, then the queries would need to wait for a connection to become available. This situation can lead to queries waiting longer than [the configured query timeout limit](/accelerate/connection-pooling#query-timeout-limit) for a connection, ultimately triggering a timeout error if they do not get serviced within this duration.
+If the application is receiving very high traffic and there are not a sufficient number of connections available to the database, then the queries would need to wait for a connection to become available. This situation can lead to queries waiting longer than [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) for a connection, ultimately triggering a timeout error if they do not get serviced within this duration.
-**Suggested solution**: Review and possibly increase the `connection_limit` specified in the connection string parameter when setting up Accelerate in a platform environment ([reference](/accelerate/connection-pooling#configuring-the-connection-pool-size)). This limit should align with your database's maximum number of connections.
+**Suggested solution**: Review and possibly increase the `connection_limit` specified in the connection string parameter when setting up Accelerate in a platform environment ([reference](/postgres/database/connection-pooling#configuring-the-connection-pool-size)). This limit should align with your database's maximum number of connections.
By default, the connection limit is set to 10 unless a different `connection_limit` is specified in your database connection string.
#### Long-running queries
-Queries may be slow to respond, hitting [the configured query timeout limit](/accelerate/connection-pooling#query-timeout-limit) even when connections are available. This could happen if a very large amount of data is being fetched in a single query or if appropriate indexes are missing from the table.
+Queries may be slow to respond, hitting [the configured query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) even when connections are available. This could happen if a very large amount of data is being fetched in a single query or if appropriate indexes are missing from the table.
-**Suggested solution**: Configure the [query timeout limit](/accelerate/connection-pooling#query-timeout-limit) to be larger. If the limit is exceeded, identify the slow-running queries and fetch only the necessary data. Use the `select` clause to retrieve specific fields and avoid fetching unnecessary data. Additionally, consider adding appropriate indexes to improve query efficiency. You might also isolate long-running queries into separate environments to prevent them from affecting transactional queries.
+**Suggested solution**: Configure the [query timeout limit](/postgres/database/connection-pooling#query-timeout-limit) to be larger. If the limit is exceeded, identify the slow-running queries and fetch only the necessary data. Use the `select` clause to retrieve specific fields and avoid fetching unnecessary data. Additionally, consider adding appropriate indexes to improve query efficiency. You might also isolate long-running queries into separate environments to prevent them from affecting transactional queries.
#### Database resource contention
@@ -85,7 +83,7 @@ Additionally, direct connections could have a significant impact on your databas
If your application's runtime environment supports Prisma ORM natively and you're considering this strategy to circumvent P6009 and P6004 errors, you might create two `PrismaClient` instances:
1. An instance using the Accelerate connection string (prefixed with `prisma://`) for general operations.
-2. Another instance with the direct database connection string (e.g., prefixed with `postgres://`, `mysql://`, etc.) for specific operations anticipated to exceed [the configured query limit timeout](/accelerate/connection-pooling#query-timeout-limit) or to result in responses larger than [the configured query response size limit](/accelerate/connection-pooling#response-size-limit).
+2. Another instance with the direct database connection string (e.g., prefixed with `postgres://`, `mysql://`, etc.) for specific operations anticipated to exceed [the configured query limit timeout](/postgres/database/connection-pooling#query-timeout-limit) or to result in responses larger than [the configured query response size limit](/postgres/database/connection-pooling#response-size-limit).
```jsx
export const prisma = new PrismaClient({
diff --git a/content/300-accelerate/900-compare.mdx b/content/300-accelerate/900-compare.mdx
index d826e19ed9..a7c95d72d6 100644
--- a/content/300-accelerate/900-compare.mdx
+++ b/content/300-accelerate/900-compare.mdx
@@ -42,7 +42,7 @@ Prisma Accelerate offers a powerful global cache, so you can serve data to your
**Why are these important?**
-- Since Accelerate extends the Prisma client, you can control caching policies directly from your codebase with just an extra line of code. Integration is seamless. Here is an example using [the stale-while-revalidating caching strategy](/accelerate/caching#stale-while-revalidate-swr):
+- Since Accelerate extends the Prisma client, you can control caching policies directly from your codebase with just an extra line of code. Integration is seamless. Here is an example using [the stale-while-revalidating caching strategy](/postgres/database/caching#stale-while-revalidate-swr):
```jsx
await prisma.user.findMany({
@@ -54,7 +54,7 @@ Prisma Accelerate offers a powerful global cache, so you can serve data to your
- Query level cache policies are critical for serious applications, so that you can control which queries are cached, and the characteristics of the policy. You may want certain data in your app to be cached for several days, other data to be cached for a just a few minutes, and other data to be not cached at all. This is only possible with Prisma Accelerate.
- Authenticating with an API key can be a helpful security measure, allowing you to decouple database credentials from application secrets. Easily rotate API keys as often as you like, without needing any credential changes in your database
-- Automatic cache updates means that the cache is automatically updated when a change in the database occurs. With Accelerate, you are in control of how the cache is invalidated, using [various caching strategies](/accelerate/caching).
+- Automatic cache updates means that the cache is automatically updated when a change in the database occurs. With Accelerate, you are in control of how the cache is invalidated, using [various caching strategies](/postgres/database/caching).
## Accelerate connection pool
diff --git a/content/500-platform/10-about.mdx b/content/500-platform/10-about.mdx
index bb921271be..fd75ad502f 100644
--- a/content/500-platform/10-about.mdx
+++ b/content/500-platform/10-about.mdx
@@ -54,14 +54,14 @@ You can access Optimize within your [Prisma Data Platform account](https://conso
To access the Optimize dashboard in your desired workspace:
1. Click the **Optimize** tab on the left navigation.
-2. Click the **Launch Optimize** button.
+2. Click the **Generate API key** button.
##### Generating an Optimize API key
To obtain the Optimize API key:
1. Navigate to the workspace where you want to use Optimize.
-2. Ensure that Optimize is launched. If it isn't, click the **Launch Optimize** button.
+2. Ensure that Optimize is launched. If it isn't, click the **Generate API key** button.
3. In Optimize, click your profile name in the top right corner of the navbar.
4. Select **Settings**.
5. Click **Create API key**.
diff --git a/content/700-optimize/200-getting-started.mdx b/content/700-optimize/200-getting-started.mdx
index 287cadbccf..d3e4fa9078 100644
--- a/content/700-optimize/200-getting-started.mdx
+++ b/content/700-optimize/200-getting-started.mdx
@@ -16,7 +16,7 @@ Before you begin with Prisma Optimize, ensure you have the following:
:::note
-Prisma Optimize is intended for use in local environments. Learn more in the [FAQ](/optimize/faq#can-i-use-prisma-optimize-in-production).
+Prisma Optimize is intended for use in local environments. Learn more in the [FAQ](/postgres/more/faq#can-i-enable-query-optimizations-for-prisma-postgres-in-production).
:::
@@ -92,10 +92,10 @@ Follow these steps to start generating query insights with Prisma Optimize:
1. In the Optimize dashboard, click the **Start recording** button, then run your app and execute some Prisma queries while recording is active.
2. After your app runs and generates insights based on the executed Prisma queries, click the **Stop recording** button.
-3. Explore [individual query details](/optimize/recordings#data-captured-in-a-recording-session) by clicking on them, and check the **Recommendations** tab for any suggested improvements to enhance query performance.
+3. Explore [individual query details](/postgres/query-optimization/recordings#data-captured-in-a-recording-session) by clicking on them, and check the **Recommendations** tab for any suggested improvements to enhance query performance.
:::info
- Use [Prisma AI](/optimize/prisma-ai) to understand recommendations and apply them within your Prisma model context.
+ Use [Prisma AI](/postgres/query-optimization/prisma-ai) to understand recommendations and apply them within your Prisma model context.
:::
For a hands-on learning experience, try out the [step-by-step example](https://github.com/prisma/prisma-examples/tree/latest/optimize/starter).
diff --git a/content/700-optimize/300-recordings.mdx b/content/700-optimize/300-recordings.mdx
index 0216f08370..ec3b1bbcb9 100644
--- a/content/700-optimize/300-recordings.mdx
+++ b/content/700-optimize/300-recordings.mdx
@@ -8,51 +8,4 @@ toc: true
The recordings feature helps developers debug and isolate sets of queries into distinct sessions, known as recordings. This targeted approach enables precise performance analysis and optimization by preventing the mixing of queries from different applications or test rounds, leading to clearer insights and more effective debugging.
-## Managing a recording session
-
-You can manually start and stop recording sessions via the [Optimize dashboard](https://console.prisma.io/optimize) by clicking the **Start Recording** and **Stop Recording** buttons.
-
-:::warning
-A recording will automatically stop if the 10k query limit is reached or if the Prisma schema of the app is changed.
-:::
-
-## Identifying a recording session
-
-You can rename and tag your recording sessions for easy identification and context. Click on the default name of the session and type the desired name.
-
-## Data captured in a recording session
-
-Each recording session captures detailed insights about the queries executed in your app, including:
-
-- All queries executed during the session.
-- The raw query generated and sent to the database by Prisma ORM as **Raw Query**.
-- The number of times a query pattern was executed, listed as **Count**.
-- [Query performance metrics](/optimize/performance-metrics).
-- Errors encountered during query execution.
-
-Each recording can include up to 10k queries. There are no limits on storage retention.
-
-## Recommendations from a recording session
-
-When a recording session ends, Optimize generates recommendations such as:
-
-- [Excessive number of rows returned](/optimize/recommendations/excessive-number-of-rows-returned)
-- [Full table scans caused by LIKE operations](/optimize/recommendations/full-table-scans-caused-by-like-operations)
-- [Queries on unindexed columns](/optimize/recommendations/queries-on-unindexed-columns)
-- [Repeated query](/optimize/recommendations/repeated-query)
-- [Overfetching](/optimize/recommendations/select-returning)
-- [Using `@db.Money`](/optimize/recommendations/avoid-db-money)
-- [Using `@db.Char(n)`](/optimize/recommendations/avoid-char)
-- [Using `@db.VarChar(n)`](/optimize/recommendations/avoid-varchar)
-- [Using `timestamp(0)` or `timestamptz(0)`](/optimize/recommendations/avoid-timestamp-timestampz-0)
-- [Using `CURRENT_TIME`](/optimize/recommendations/current-time)
-- [Storing large objects or BLOBs in the database](/optimize/recommendations/storing-blob-in-database)
-- [Indexing on unique columns](/optimize/recommendations/indexing-on-unique-columns)
-- [Long-running transactions](/optimize/recommendations/long-running-transactions)
-- [Unnecessary indexes](/optimize/recommendations/unnecessary-indexes)
-
-:::info
-Use [Prisma AI](/optimize/prisma-ai) to ask follow-up questions about a recommendation.
-:::
-
-Learn more about the recommendations generated by Optimize [here](/optimize/recommendations).
+Learn more about the [Optimize recordings here](/postgres/query-optimization/recommendations).
\ No newline at end of file
diff --git a/content/700-optimize/400-recommendations.mdx b/content/700-optimize/400-recommendations.mdx
new file mode 100644
index 0000000000..e37df4d377
--- /dev/null
+++ b/content/700-optimize/400-recommendations.mdx
@@ -0,0 +1,27 @@
+---
+title: 'Recommendations'
+metaTitle: 'Optimize: Recommendations'
+metaDescription: "Learn about using Optimize's recommendations."
+tocDepth: 3
+toc: true
+---
+
+Optimize provides recommendations focused on performance improvements such as indexing issues, excessive data retrieval, and inefficient query patterns. Recommendations include:
+
+- [Excessive number of rows returned](/postgres/query-optimization/recommendations/excessive-number-of-rows-returned)
+- [Full table scans caused by LIKE operations](/postgres/query-optimization/recommendations/full-table-scans-caused-by-like-operations)
+- [Queries on unindexed columns](/postgres/query-optimization/recommendations/queries-on-unindexed-columns)
+- [Repeated query](/postgres/query-optimization/recommendations/repeated-query)
+- [Overfetching](/postgres/query-optimization/recommendations/select-returning)
+- [Using `@db.Money`](/postgres/query-optimization/recommendations/avoid-db-money)
+- [Using `@db.Char(n)`](/postgres/query-optimization/recommendations/avoid-char)
+- [Using `@db.VarChar(n)`](/postgres/query-optimization/recommendations/avoid-varchar)
+- [Using `timestamp(0)` or `timestamptz(0)`](/postgres/query-optimization/recommendations/avoid-timestamp-timestampz-0)
+- [Using `CURRENT_TIME`](/postgres/query-optimization/recommendations/current-time)
+- [Storing large objects or BLOBs in the database](/postgres/query-optimization/recommendations/storing-blob-in-database)
+- [Indexing on unique columns](/postgres/query-optimization/recommendations/indexing-on-unique-columns)
+- [Long-running transactions](/postgres/query-optimization/recommendations/long-running-transactions)
+- [Unnecessary indexes](/postgres/query-optimization/recommendations/unnecessary-indexes)
+
+
+Learn more about the [recommendations generated by Optimize here](/postgres/query-optimization/recommendations).
\ No newline at end of file
diff --git a/content/700-optimize/400-recommendations/_category_.json b/content/700-optimize/400-recommendations/_category_.json
deleted file mode 100644
index 258ab00c98..0000000000
--- a/content/700-optimize/400-recommendations/_category_.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "collapsed": false,
- "collapsible": false
-}
diff --git a/content/700-optimize/450-prisma-ai.mdx b/content/700-optimize/450-prisma-ai.mdx
index 188aba804a..eb58467826 100644
--- a/content/700-optimize/450-prisma-ai.mdx
+++ b/content/700-optimize/450-prisma-ai.mdx
@@ -6,13 +6,4 @@ tocDepth: 3
toc: true
---
-Prisma AI enables you to ask follow-up questions on a provided [recommendation](/optimize/recommendations) for additional clarity.
-
-## Using the Prisma AI
-
-To interact with the Prisma AI:
-
-1. Select a recommendation from an Optimize [recording](/optimize/recordings).
-2. Click the **Ask AI** button.
-
-Prisma AI helps you gain deeper insights into a recommendation and learn how to apply it effectively to your Prisma model.
\ No newline at end of file
+Prisma AI enables you to ask follow-up questions on a provided [recommendation](/postgres/query-optimization/recommendations) for additional clarity. Learn more about [Prisma AI here](/postgres/query-optimization/prisma-ai).
\ No newline at end of file
diff --git a/content/700-optimize/500-performance-metrics.mdx b/content/700-optimize/500-performance-metrics.mdx
index b636f2fd30..030dba74da 100644
--- a/content/700-optimize/500-performance-metrics.mdx
+++ b/content/700-optimize/500-performance-metrics.mdx
@@ -8,22 +8,4 @@ toc: true
An Optimize recording session provides detailed insights into the latencies of executed queries, capturing key metrics such as average duration, 50th percentile, 99th percentile, and maximal query execution time.
-## Total query durations
-
-Prisma Optimize measures total latency for query patterns, enabling you to analyze and debug slow queries effectively.
-
-### Average query duration (**`AVG`**)
-
-The average query duration reveals the mean execution time across all queries, helping you assess overall performance trends and identify inefficiencies that impact the user experience.
-
-### 50th percentile (**`P50`**)
-
-The 50th percentile, or median, query duration indicates the time within which half of your queries complete. This metric offers a clear view of typical user performance, unaffected by outliers.
-
-### 99th percentile (**`P99`**)
-
-The 99th percentile query duration highlights the execution time for the slowest 1% of queries. This metric is crucial for uncovering and addressing performance bottlenecks that, while infrequent, can significantly impact user satisfaction.
-
-### Maximal query duration (**`MAX`**)
-
-The maximal query duration measures the time taken by the single slowest query. This metric helps identify extreme cases, providing insights into the worst performance scenarios your system might face, so you can diagnose and resolve outliers.
+Learn more about [the performance metrics captured by Optimize here](/postgres/query-optimization/performance-metrics).
diff --git a/content/700-optimize/600-faq.mdx b/content/700-optimize/600-faq.mdx
index a75f582bd3..0955369770 100644
--- a/content/700-optimize/600-faq.mdx
+++ b/content/700-optimize/600-faq.mdx
@@ -6,40 +6,5 @@ tocDepth: 3
toc: true
---
-## Does Optimize automatically implement optimizations?
+To learn more about frequently asked questions around Prisma Optimize and query recommendations, [visit this page](/postgres/more/faq#query-optimization).
-Prisma Optimize offers insights and recommendations on how to improve your database queries. It does not alter any existing queries or your Prisma schema.
-
-## How long is a recording session retained in Optimize?
-
-There are no limits on the storage retention period. Optimize will store a recording session until you explicitly delete it.
-
-## Do recommendation limits reset monthly?
-
-Yes, the recommendation usage resets at the beginning of each calendar month. For example, if you use 5 recommendations by the end of the month, your usage will reset to 0 at the start of the next month.
-
-## Can I get charged for exceeding the recommendation limit on the starter plan?
-
-Yes, if you’re on the starter plan, exceeding 5 recommendations in a billing cycle will result in a $5 charge at the end of that cycle. For more information, visit [our pricing page](https://www.prisma.io/pricing#optimize).
-
-## How does Optimize track viewed recommendations for billing? Are they counted based on generated or viewed recommendations?
-
-They are counted based on viewed recommendations. Once you click on a recommendation from the recommendations table and view the recommendation's detail page, it counts as being seen.
-
-## Can I use Prisma Optimize in production?
-
-No, Prisma Optimize is not intended for production use. It is specifically designed for local development, providing valuable insights and optimizations during that phase. While it’s technically possible to run it in a production environment, doing so could result in performance problems or unexpected behaviors, as Optimize is not built to handle the complexity and scale of production workloads. For the best experience, we recommend using Prisma Optimize solely in your development environment.
-
-You can use the `enable` property in the Optimize extension to run Optimize only in development environment. By default, the `enable` property is set to `true`.
-
-```ts file=script.ts copy showLineNumbers
-import { PrismaClient } from '@prisma/client'
-import { withOptimize } from "@prisma/extension-optimize"
-
-const prisma = new PrismaClient().$extends(
- withOptimize({
- apiKey: process.env.OPTIMIZE_API_KEY,
- enable: process.env.ENVIRONMENT === 'development',
- })
-);
-```
diff --git a/content/700-optimize/700-known-limitations.mdx b/content/700-optimize/700-known-limitations.mdx
index c4c9780b8a..9fd681f798 100644
--- a/content/700-optimize/700-known-limitations.mdx
+++ b/content/700-optimize/700-known-limitations.mdx
@@ -10,15 +10,15 @@ Below are the known limitations when using Prisma Optimize. If you are aware of
## Query limit on a recording session
-Each [recording session](/optimize/recordings) can contain a maximum of 10,000 queries. Once this limit is reached, the recording session will end.
+Each [recording session](/postgres/query-optimization/recordings) can contain a maximum of 10,000 queries. Once this limit is reached, the recording session will end.
## Recording limit per workspace
-Each [workspace](/platform/about#workspace) can contain a maximum of 100 [recordings](/optimize/recordings).
+Each [workspace](/platform/about#workspace) can contain a maximum of 100 [recordings](/postgres/query-optimization/recordings).
## Scope and constraints for the Prisma AI
-While [Prisma AI](/optimize/prisma-ai) can provide helpful guidance to implement a [recommendation](/optimize/recommendations), there are some important limitations to keep in mind:
+While [Prisma AI](/postgres/query-optimization/prisma-ai) can provide helpful guidance to implement a [recommendation](/postgres/query-optimization/recommendations), there are some important limitations to keep in mind:
- **Information and accuracy**: The AI provides advice based on a broad, general knowledge base and does not have direct access to Prisma ORM documentation. This may occasionally result in inaccuracies or outdated information.
@@ -42,7 +42,7 @@ const prisma = new PrismaClient()
### SQL references in MongoDB recommendations
-Prisma Optimize provides helpful recommendations for MongoDB users, though some explanations from [Prisma AI](/optimize/prisma-ai) may reference SQL-specific concepts. However, the [recommendations](/optimize/recommendations) remain useful and applicable to MongoDB environments.
+Prisma Optimize provides helpful recommendations for MongoDB users, though some explanations from [Prisma AI](/postgres/query-optimization/prisma-ai) may reference SQL-specific concepts. However, the [recommendations](/postgres/query-optimization/recommendations) remain useful and applicable to MongoDB environments.
### Raw query visibility in MongoDB
diff --git a/content/800-guides/100-nuxt.mdx b/content/800-guides/100-nuxt.mdx
index 186833b3dd..4c64d1a62d 100644
--- a/content/800-guides/100-nuxt.mdx
+++ b/content/800-guides/100-nuxt.mdx
@@ -146,7 +146,7 @@ With Prisma configured, the next step is to update your application code to fetc
```
:::note
- We're extending the `usePrismaClient()` composable with the `withAccelerate()` extension method to ensure [compatibility with Prisma Postgres](/postgres/overview#using-the-client-extension-for-prisma-accelerate-required). This extension will also allow you to [cache your queries](/accelerate/caching).
+ We're extending the `usePrismaClient()` composable with the `withAccelerate()` extension method to ensure [compatibility with Prisma Postgres](/postgres/introduction/overview#using-the-client-extension-for-prisma-accelerate-required). This extension will also allow you to [cache your queries](/postgres/database/caching).
:::
3. Modify the `app.vue` file in the root directory to include the new server component using Nuxt Islands:
diff --git a/content/800-guides/140-use-prisma-in-pnpm-workspaces.mdx b/content/800-guides/140-use-prisma-in-pnpm-workspaces.mdx
index 8e95c113a3..8a847e26aa 100644
--- a/content/800-guides/140-use-prisma-in-pnpm-workspaces.mdx
+++ b/content/800-guides/140-use-prisma-in-pnpm-workspaces.mdx
@@ -104,7 +104,7 @@ pnpm add @prisma/extension-accelerate
:::info
-This guide uses [Prisma Postgres](/postgres/getting-started). If you plan to use a different database, you can omit the [@prisma/extension-accelerate package](https://www.npmjs.com/package/@prisma/extension-accelerate/).
+This guide uses [Prisma Postgres](/postgres/introduction/getting-started). If you plan to use a different database, you can omit the [@prisma/extension-accelerate package](https://www.npmjs.com/package/@prisma/extension-accelerate/).
:::
@@ -126,7 +126,7 @@ Enter a name for your project and choose a database region.
:::info
-We're going to be using [Prisma Postgres](/postgres/getting-started) in this guide. If you're not using a Prisma Postgres database, you won't need to add the `--db` flag.
+We're going to be using [Prisma Postgres](/postgres/introduction/getting-started) in this guide. If you're not using a Prisma Postgres database, you won't need to add the `--db` flag.
:::
@@ -181,7 +181,7 @@ Next, add helper scripts to your `package.json` to simplify Prisma commands:
:::info
-If you're not using [Prisma Postgres](/postgres/getting-started) for your database, exclude the `--no-engine` flag from the `db:generate` script.
+If you're not using [Prisma Postgres](/postgres/introduction/getting-started) for your database, exclude the `--no-engine` flag from the `db:generate` script.
:::
@@ -212,7 +212,7 @@ export { prisma };
:::info
-If you're not using [Prisma Postgres](/postgres/getting-started) for your database, exclude the `import { withAccelerate }` line and `.$extends(withAccelerate())` from the line following it.
+If you're not using [Prisma Postgres](/postgres/introduction/getting-started) for your database, exclude the `import { withAccelerate }` line and `.$extends(withAccelerate())` from the line following it.
:::
diff --git a/content/800-guides/190-data-dog.mdx b/content/800-guides/190-data-dog.mdx
index f99e89b284..62a26a1d51 100644
--- a/content/800-guides/190-data-dog.mdx
+++ b/content/800-guides/190-data-dog.mdx
@@ -145,7 +145,7 @@ If you're using Prisma Postgres, also install:
npm i @prisma/extension-accelerate
```
-This extension enables you to [cache your Prisma queries](/postgres/caching).
+This extension enables you to [cache your Prisma queries](/postgres/database/caching).
### 2.2. Define models
diff --git a/content/800-guides/190-sveltekit.mdx b/content/800-guides/190-sveltekit.mdx
index a7b2004f03..5b9bc3724a 100644
--- a/content/800-guides/190-sveltekit.mdx
+++ b/content/800-guides/190-sveltekit.mdx
@@ -371,7 +371,7 @@ Now that you have a working SvelteKit app connected to a Prisma Postgres databas
- Extend your Prisma schema with more models and relationships
- Add create/update/delete routes and forms
- Explore authentication and validation
-- Enable query caching with [Prisma Postgres](/postgres/caching) for better performance
+- Enable query caching with [Prisma Postgres](/postgres/database/caching) for better performance
### More Info
diff --git a/content/800-guides/999-making-guides.mdx b/content/800-guides/999-making-guides.mdx
index 80dcc42214..c8c72c498b 100644
--- a/content/800-guides/999-making-guides.mdx
+++ b/content/800-guides/999-making-guides.mdx
@@ -481,7 +481,7 @@ Now that you have a working __________ app connected to a Prisma Postgres databa
- Extend your Prisma schema with more models and relationships
- Add create/update/delete routes and forms
- Explore authentication and validation
-- Enable query caching with [Prisma Postgres](/postgres/caching) for better performance
+- Enable query caching with [Prisma Postgres](/postgres/database/caching) for better performance
### More Info
diff --git a/docusaurus.config.ts b/docusaurus.config.ts
index c9bc5fb872..9efef8a56c 100644
--- a/docusaurus.config.ts
+++ b/docusaurus.config.ts
@@ -272,23 +272,26 @@ const config: Config = {
label: "Get Started",
className: "indigo first-item",
},
+ {
+ type: "docSidebar",
+ sidebarId: "ormSidebar",
+ position: "left",
+ className: "indigo",
+ label: "ORM",
+ },
+ {
+ type: "docSidebar",
+ sidebarId: "prismaPostgresSidebar",
+ className: "teal",
+ position: "left",
+ label: "Postgres",
+ },
{
type: "dropdown",
- label: "Products",
+ label: "More",
+ className: "teal",
position: "left",
items: [
- {
- type: "docSidebar",
- sidebarId: "ormSidebar",
- className: "indigo",
- label: "ORM",
- },
- {
- type: "docSidebar",
- sidebarId: "prismaPostgresSidebar",
- className: "teal",
- label: "Postgres",
- },
{
className: "indigo",
to: "/orm/tools/prisma-studio",
diff --git a/src/data/indexData.ts b/src/data/indexData.ts
index 7f37c5ae60..544dfb5c08 100644
--- a/src/data/indexData.ts
+++ b/src/data/indexData.ts
@@ -2,8 +2,7 @@ export const ProductLinkData = {
t_orm: {
title: "Talk to your database, seamlessly",
eyebrow: "ORM",
- description:
- "Prisma ORM lets you query your database with an ergonomic TypeScript client",
+ description: "Prisma ORM lets you query your database with an ergonomic TypeScript client",
link: "/orm",
icon: "fa-regular fa-database",
},
@@ -18,8 +17,7 @@ export const ProductLinkData = {
t_studio: {
title: "See your data in style",
eyebrow: "Studio",
- description:
- "Prisma Studio makes it easy to explore your data visually",
+ description: "Prisma Studio makes it easy to explore your data visually",
link: "/orm/tools/prisma-studio",
icon: "fa-regular fa-table",
},
@@ -34,8 +32,7 @@ export const ProductLinkData = {
t_optimize: {
title: "Your queries, smarter",
eyebrow: "Optimize",
- description:
- "Prisma Optimize gives you AI-powered insights on potential issues in your code",
+ description: "Prisma Optimize gives you AI-powered insights on potential issues in your code",
link: "/optimize",
icon: "fa-regular fa-magnifying-glass-chart",
},
@@ -89,180 +86,193 @@ export const get_help = [
{
icon: "fa-brands fa-github",
link: "https://github.com/prisma/prisma/issues/new?assignees=&labels=kind/bug&projects=&template=bug_report.yml",
- label: "Report a bug"
+ label: "Report a bug",
},
{
icon: "fa-brands fa-github",
link: "https://github.com/prisma/prisma/issues/new?assignees=&labels=&projects=&template=feature_request.md&title=",
- label: "Request a new feature"
- }
- ]
+ label: "Request a new feature",
+ },
+ ],
},
{
icon: "fa-regular fa-comments-question-check",
title: "Community Support",
- description: "Support for customers on our Starter plan is provided through our community channels.",
+ description:
+ "Support for customers on our Starter plan is provided through our community channels.",
links: [
{
icon: "fa-brands fa-discord",
link: "https://pris.ly/discord",
- label: "Join our Discord"
- }
- ]
+ label: "Join our Discord",
+ },
+ ],
},
{
icon: "fa-regular fa-headset",
title: "Prisma Support",
- description: "Support for customers in our Pro or Business plan is provided by the Platform Console.",
+ description:
+ "Support for customers in our Pro or Business plan is provided by the Platform Console.",
links: [
{
link: "https://console.prisma.io/login?utm_source=website&utm_medium=default&utm_campaign=login",
- label: "Submit a ticket"
- }
- ]
- }
-]
+ label: "Submit a ticket",
+ },
+ ],
+ },
+];
export const tabs = [
{
icon: "fa-solid fa-chart-pyramid",
label: "Use Prisma Postgres",
- description: "Get started with your favorite framework and Prisma Postgres. With Prisma Postgres you get an instant, fully hosted high-performance database that includes built-in caching, scales to zero, and integrates deeply with Prisma ORM and Prisma Studio—all backed by a generous free tier.",
+ description:
+ "Get started with your favorite framework and Prisma Postgres. With Prisma Postgres you get an instant, fully hosted high-performance database that includes built-in caching, scales to zero, and integrates deeply with Prisma ORM and Prisma Studio—all backed by a generous free tier.",
list: [
{
url: "/guides/nextjs",
image: "/img/technologies/nextjs.svg",
- tech: "Nextjs"
+ tech: "Nextjs",
},
{
url: "/guides/nuxt",
image: "/img/technologies/nuxtjs.svg",
- tech: "Nuxtjs"
+ tech: "Nuxtjs",
},
{
url: "/guides/react-router-7",
image: "/img/technologies/reactrouter.svg",
- tech: "React Router"
+ tech: "React Router",
},
{
url: "/guides/solid-start",
image: "/img/technologies/solidstart.svg",
- tech: "SolidStart"
+ tech: "SolidStart",
},
{
url: "/guides/tanstack-start",
image: "/img/technologies/tanstack.png",
- tech: "TanStack"
+ tech: "TanStack",
},
],
link: {
url: "/getting-started/prisma-postgres/import-from-existing-database-postgresql",
label: "How to migrate to Prisma Postgres",
- icon: "fa-regular fa-book-open"
- }
+ icon: "fa-regular fa-book-open",
+ },
},
{
icon: "fa-solid fa-database",
label: "Bring your own database",
- description: "Already have a database? With Prisma ORM and Prisma Data Platform, you can supercharge your existing stack. Add connection pooling and caching with generous free tiers.",
+ description:
+ "Already have a database? With Prisma ORM and Prisma Data Platform, you can supercharge your existing stack. Add connection pooling and caching with generous free tiers.",
list: [
{
url: "/getting-started/setup-prisma/add-to-existing-project/relational-databases-typescript-postgresql",
image: "/img/technologies/postgresqlsimple.svg",
- tech: "PostgreSQL"
+ tech: "PostgreSQL",
},
{
url: "/getting-started/setup-prisma/add-to-existing-project/relational-databases-typescript-planetscale",
image: "/img/technologies/planetscale.svg",
imageDark: "/img/technologies/planetscaledark.svg",
- tech: "Planetscale"
+ tech: "Planetscale",
},
{
url: "/getting-started/setup-prisma/add-to-existing-project/relational-databases-typescript-sqlserver",
image: "/img/technologies/sqlserver.svg",
- tech: "SQL Server"
+ tech: "SQL Server",
},
{
url: "/getting-started/setup-prisma/add-to-existing-project/relational-databases-typescript-mysql",
image: "/img/technologies/mysqlsimple.svg",
- tech: "MySQL"
+ tech: "MySQL",
},
{
url: "/getting-started/setup-prisma/add-to-existing-project/relational-databases-typescript-cockroachdb",
image: "/img/technologies/cockroachdb.svg",
imageDark: "/img/technologies/cockroachdbdark.svg",
- tech: "CockroachDB"
- }
+ tech: "CockroachDB",
+ },
],
link: {
url: "/getting-started/setup-prisma/add-to-existing-project",
label: "How to migrate to Prisma ORM",
- icon: "fa-regular fa-book-open"
- }
- }
+ icon: "fa-regular fa-book-open",
+ },
+ },
];
export const how_do_i = [
{
title: "model my schema?",
- description: "The Prisma Schema (or schema for short) is the main method of configuration for your Prisma ORM setup. It consists of the following parts: Data sources: Specify the details of the data sources Prisma ORM should connect to (e.g. a PostgreSQL database) Generators: Specifies what clients should be generated based on the data model (e.g. Prisma Client)",
+ description:
+ "The Prisma Schema (or schema for short) is the main method of configuration for your Prisma ORM setup. It consists of the following parts: Data sources: Specify the details of the data sources Prisma ORM should connect to (e.g. a PostgreSQL database) Generators: Specifies what clients should be generated based on the data model (e.g. Prisma Client)",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-schema/overview",
// time: 15
},
{
title: "cache my queries?",
- description: "Prisma Postgres comes with a built-in global cache (enabled by Prisma Accelerate) that helps you speed up your database queries. You can cache results on a per-query level using the cacheStrategy option in any Prisma ORM query, e.g.:",
+ description:
+ "Prisma Postgres comes with a built-in global cache (enabled by Prisma Accelerate) that helps you speed up your database queries. You can cache results on a per-query level using the cacheStrategy option in any Prisma ORM query, e.g.:",
// tags: ["guides", "schema", "orm"],
- url: "/postgres/caching",
+ url: "/postgres/database/caching",
// time: 15
},
{
title: "integrate with an existing DB?",
- description: "Baselining is the process of initializing a migration history for a database that: âś” Existed before you started using Prisma Migrate âś” Contains data that must be maintained (like production), which means that the database cannot be reset. Baselining tells Prisma Migrate to assume that one or more migrations have already been applied. This prevents generated migrations from failing when they try to create tables and fields that already exist.",
+ description:
+ "Baselining is the process of initializing a migration history for a database that: âś” Existed before you started using Prisma Migrate âś” Contains data that must be maintained (like production), which means that the database cannot be reset. Baselining tells Prisma Migrate to assume that one or more migrations have already been applied. This prevents generated migrations from failing when they try to create tables and fields that already exist.",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-migrate/workflows/baselining",
// time: 15
},
{
title: "make CRUD routes?",
- description: "This page describes how to perform CRUD operations with your generated Prisma Client API. CRUD is an acronym that stands for: Create Read Update Delete Refer to the Prisma Client API reference documentation for detailed explanations of each method.",
+ description:
+ "This page describes how to perform CRUD operations with your generated Prisma Client API. CRUD is an acronym that stands for: Create Read Update Delete Refer to the Prisma Client API reference documentation for detailed explanations of each method.",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-client/queries/crud",
// time: 15
},
{
title: "get started with models?",
- description: "The data model definition part of the Prisma schema defines your application models (also called Prisma models). Models: Represent the entities of your application domain Map to the tables (relational databases like PostgreSQL) or collections (MongoDB) in your database Form the foundation of the queries available in the generated Prisma Client API",
+ description:
+ "The data model definition part of the Prisma schema defines your application models (also called Prisma models). Models: Represent the entities of your application domain Map to the tables (relational databases like PostgreSQL) or collections (MongoDB) in your database Form the foundation of the queries available in the generated Prisma Client API",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-schema/data-model/models",
},
{
title: "filter and sort?",
- description: "Prisma Client supports filtering with the where query option, and sorting with the orderBy query option. Filtering Prisma Client allows you to filter records on any combination of model fields, including related models, and supports a variety of filter conditions.",
+ description:
+ "Prisma Client supports filtering with the where query option, and sorting with the orderBy query option. Filtering Prisma Client allows you to filter records on any combination of model fields, including related models, and supports a variety of filter conditions.",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-client/queries/filtering-and-sorting",
// time: 15
},
{
title: "query relations?",
- description: "A key feature of Prisma Client is the ability to query relations between two or more models. Relation queries include: Nested reads (sometimes referred to as eager loading) via select and include Nested writes with transactional guarantees Filtering on related records Prisma Client also has a fluent API for traversing relations.",
+ description:
+ "A key feature of Prisma Client is the ability to query relations between two or more models. Relation queries include: Nested reads (sometimes referred to as eager loading) via select and include Nested writes with transactional guarantees Filtering on related records Prisma Client also has a fluent API for traversing relations.",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-client/queries/relation-queries",
// time: 15
},
{
title: "migrate my schema?",
- description: "When working in a team, managing database schema changes can be challenging. This guide shows you how to effectively collaborate on schema changes using Prisma Migrate, ensuring that all team members can safely contribute to and incorporate schema changes.",
+ description:
+ "When working in a team, managing database schema changes can be challenging. This guide shows you how to effectively collaborate on schema changes using Prisma Migrate, ensuring that all team members can safely contribute to and incorporate schema changes.",
// tags: ["guides", "schema", "orm"],
url: "/guides/implementing-schema-changes",
// time: 15
},
{
title: "generate Prisma Client?",
- description: "Prisma Client is a generated database client that's tailored to your database schema. By default, Prisma Client is generated into the node_modules/.prisma/client folder, but we highly recommend you specify an output location.",
+ description:
+ "Prisma Client is a generated database client that's tailored to your database schema. By default, Prisma Client is generated into the node_modules/.prisma/client folder, but we highly recommend you specify an output location.",
// tags: ["guides", "schema", "orm"],
url: "/orm/prisma-client/setup-and-configuration/generating-prisma-client",
// time: 15
- }
-];
\ No newline at end of file
+ },
+];
diff --git a/static/_redirects b/static/_redirects
index e3e54ed64c..ef3fb509fc 100644
--- a/static/_redirects
+++ b/static/_redirects
@@ -533,6 +533,36 @@
/guides/cursor /docs/orm/more/ai-tools/cursor
/guides/multiple-prisma-clients /docs/guides/multiple-databases
+/postgres/getting-started /docs/postgres/introduction/getting-started
+/postgres/overview /docs/postgres/introduction/overview
+/postgres/faq /postgres/more/faq
+/postgres/mcp-server /docs/postgres/integrations/mcp-server
+/postgres/known-limitations /docs/postgres/more/known-limitations
+/postgres/troubleshooting /docs/postgres/more/troubleshooting
+/postgres/connection-pooling /docs/##( TODO: Path of page that replaces deleted page )##
+/postgres/backups /docs/postgres/database/backups
+/postgres/tcp-tunnel /docs/postgres/database/tcp-tunnel
+/postgres/tooling /docs/postgres/database/tooling
+/postgres/caching /postgres/database/caching
+/postgres/connection-pooling /postgres/database/connection-pooling
+
+/optimize/recommendations/excessive-number-of-rows-returned /docs/postgres/query-optimization/recommendations/excessive-number-of-rows-returned
+/optimize/recommendations/current-time /docs/postgres/query-optimization/recommendations/current-time
+/optimize/recommendations/unnecessary-indexes /docs/postgres/query-optimization/recommendations/unnecessary-indexes
+/optimize/recommendations/long-running-transactions /docs/postgres/query-optimization/recommendations/long-running-transactions
+/optimize/recommendations/indexing-on-unique-columns /docs/postgres/query-optimization/recommendations/indexing-on-unique-columns
+/optimize/recommendations/storing-blob-in-database /docs/postgres/query-optimization/recommendations/storing-blob-in-database
+/optimize/recommendations/queries-on-unindexed-columns /docs/postgres/query-optimization/recommendations/queries-on-unindexed-columns
+/optimize/recommendations/full-table-scans-caused-by-like-operations /docs/postgres/query-optimization/recommendations/full-table-scans-caused-by-like-operations
+/optimize/recommendations/repeated-query /docs/postgres/query-optimization/recommendations/repeated-query
+/optimize/recommendations/select-returning /docs/postgres/query-optimization/recommendations/select-returning
+/optimize/recommendations/avoid-db-money /docs/postgres/query-optimization/recommendations/avoid-db-money
+/optimize/recommendations/avoid-char /docs/postgres/query-optimization/recommendations/avoid-char
+/optimize/recommendations/avoid-varchar /docs/postgres/query-optimization/recommendations/avoid-varchar
+/optimize/recommendations/avoid-timestamp-timestampz-0 /docs/postgres/query-optimization/recommendations/avoid-timestamp-timestampz-0
+/optimize/recommendations /docs/postgres/query-optimization/recommendations
+/optimize/recommendations/_category_.json /postgres/query-optimization/recommendations/_category_.json
+
### Dynamic redirects ###
/faq/* https://v1.prisma.io/docs/1.34/faq/:splat
/understand-prisma/* https://v1.prisma.io/docs/1.34/understand-prisma/:splat
@@ -580,4 +610,18 @@
/pulse* /docs/postgres
+/postgres/getting-started* /postgres/introduction/getting-started
+/postgres/overview* /postgres/introduction/overview
+
+/postgres/caching* /postgres/database/caching
+/postgres/connection-pooling* /postgres/database/connection-pooling
+/postgres/backups* /postgres/database/backups
+/postgres/tcp-tunnel* /postgres/database/tcp-tunnel
+/postgres/tooling* /postgres/database/tooling
+/postgres/api-reference* /postgres/database/api-reference
+
+/optimize/recommendations/* /postgres/query-optimization/recommendations
+/optimize/performance-metrics* /postgres/query-optimization/performance-metrics
+
+
### NO REDIRECTS BELOW THIS LINE. ADD REDIRECTS ABOVE THIS SECTION ###