diff --git a/_data/navigation.yml b/_data/navigation.yml
index c89a2870a..c75f94c73 100644
--- a/_data/navigation.yml
+++ b/_data/navigation.yml
@@ -9,36 +9,32 @@ items:
title: Getting Started Tutorial
items:
- url: /tutorial/load/
- title: Part 1 - Loading Data
+ title: "Part 1: Loading Data"
items:
- - url: /tutorial/load/googledrive/
- title: Loading with GoogleDrive Extractor
+ - url: /tutorial/load/googlesheets/
+ title: Loading Data from Google Sheets
- url: /tutorial/load/database/
- title: Loading with Database Extractor
+ title: Loading Data from Database
- url: /tutorial/manipulate/
- title: Part 2 - Data Manipulation
+ title: "Part 2: Data Manipulation"
items:
- - url: /tutorial/manipulate/sandbox/
- title: Using Sandbox
+ - url: /tutorial/manipulate/workspace/
+ title: Using Workspace
- url: /tutorial/write/
- title: Part 3 - Writing Data
- items:
- - url: /tutorial/write/gooddata/
- title: Writing to GoodData
+ title: "Part 3: Writing Data"
- url: /tutorial/automate/
- title: Part 4 - Automation
+ title: "Part 4: Flow Automation"
- url: /tutorial/ad-hoc/
- title: Part 5 - Ad-Hoc Data Analysis
+ title: "Part 5: Ad-Hoc Data Analysis"
- url: /tutorial/branches/
- title: Part 6 - Development Branches
+ title: "Part 6: Development Branches"
items:
-
- url: /tutorial/branches/prepare-tables/
title: Prepare Tables
@@ -57,11 +53,29 @@ items:
- title: Merge to Production
url: /tutorial/branches/merge-to-production/
+ - url: /tutorial/onboarding/
+ title: Keboola Platform Onboarding
+ items:
+ - url: /tutorial/onboarding/usage-blueprint/
+ title: Usage Blueprint
+
+ - url: /tutorial/onboarding/architecture-guide/
+ title: Multi-Project Architecture
+ items:
+ - url: /tutorial/onboarding/architecture-guide/bdm-guide/
+ title: Business Data Model
+
+ - url: /tutorial/onboarding/cheat-sheet/
+ title: Best Practices
+
+ - url: /tutorial/onboarding/governance-guide/
+ title: Governance Guide
+
- url: /components/
title: Components
items:
- url: /components/extractors/
- title: Extractors
+ title: Data Source Connectors
items:
- url: /components/extractors/communication/
title: Communication
@@ -301,7 +315,7 @@ items:
title: YourPass
- url: /components/writers/
- title: Writers
+ title: Data Destination Connectors
items:
- url: /components/writers/bi-tools/
title: Business Intelligence
diff --git a/_includes/tip.html b/_includes/tip.html
new file mode 100644
index 000000000..e9e17dc0a
--- /dev/null
+++ b/_includes/tip.html
@@ -0,0 +1,11 @@
+
+
+
+ {% if include.title %}
+ {% capture title %}{{"ⓘ **Tip: " | append: include.title | append: "**"}}{% endcapture %}
+ {% else %}
+ {% capture title %}ⓘ **Tip**{% endcapture %}
+ {% endif %}
+ {{ title | markdownify}}
+ {{include.content | markdownify}}
+
diff --git a/_includes/warning.html b/_includes/warning.html
index 8cf8518e9..e458b0a36 100644
--- a/_includes/warning.html
+++ b/_includes/warning.html
@@ -1,5 +1,8 @@
-
Warning: {{include.content}}
+ ⓘ
Important:
+
+ {{include.content | markdownify}}
+
diff --git a/_sass/variables.scss b/_sass/variables.scss
index 6946a1432..a3ae95c9a 100644
--- a/_sass/variables.scss
+++ b/_sass/variables.scss
@@ -870,7 +870,7 @@ $headings-small-color: $gray-light !default;
//** Blockquote small color
$blockquote-small-color: $gray-light !default;
//** Blockquote font size
-$blockquote-font-size: ($font-size-base * 1.25) !default;
+$blockquote-font-size: $font-size-base !default;
//** Blockquote border color
$blockquote-border-color: $gray-lighter !default;
//** Page header border color
diff --git a/assets/css/style.scss b/assets/css/style.scss
index 050134506..15afd4810 100644
--- a/assets/css/style.scss
+++ b/assets/css/style.scss
@@ -272,6 +272,10 @@ img {
}
}
+blockquote {
+ color: $gray-light;
+}
+
.footer {
.inside {
text-align: center;
diff --git a/components/extractors/index.md b/components/extractors/index.md
index bba3d3c06..f96bf3879 100644
--- a/components/extractors/index.md
+++ b/components/extractors/index.md
@@ -1,5 +1,5 @@
---
-title: Extractors
+title: Data Source Connectors
permalink: /components/extractors/
redirect_from:
- /extractors/
@@ -9,38 +9,38 @@ redirect_from:
* TOC
{:toc}
-Extractors are [Keboola Connection components](/components/) used for **importing data from external sources into Keboola Connection**.
+Data source connectors (formerly known as Extractors) are [Keboola components](/components/) used for **importing data from external sources into Keboola**.
-## Types of Extractors
-Typically, extractors connect to [APIs](https://en.wikipedia.org/wiki/Application_programming_interface#Web_APIs)
-external services, like Facebook, Youtube, Gmail and so on.
+## Types of Data Source Connectors
+Typically, data source connectors connect to [APIs](https://en.wikipedia.org/wiki/Application_programming_interface#Web_APIs)
+external services, like Facebook, YouTube, Gmail and so on.
They can also connect directly to an arbitrary database.
-Extractors can be grouped by their primary purpose:
+They can be grouped by their primary purpose:
-- Database extractors: [SQL Databases](/components/extractors/database/sqldb/) and [NoSQL MongoDB](/components/extractors/database/mongodb/) and extractors from generic [Storage services](/components/extractors/storage/)
+- Database data source connectors: [SQL Databases](/components/extractors/database/sqldb/) and [NoSQL MongoDB](/components/extractors/database/mongodb/) and data source connectors from generic [Storage services](/components/extractors/storage/)
- [Communication](/components/extractors/communication/),
-[Social Networks](/components/extractors/social/) and [Marketing and Sales](/components/extractors/marketing-sales/) extractors
-- [Other](/components/extractors/other/) extractors such as Geocoding-Augmentation or Weather Forecast
+[Social Networks](/components/extractors/social/) and [Marketing and Sales](/components/extractors/marketing-sales/) data source connectors
+- [Other](/components/extractors/other/) data source connectors, such as Geocoding-Augmentation or Weather Forecast
-For a definitive list of usable extractors, see your project **Extractors** section or the
+For a definitive list of usable data source connectors, see your project **Data Sources** section or the
[public list of components](https://components.keboola.com/components).
-## Working with Extractors
-Even though extractors are generally designed for [**automated and repeated**](/orchestrator/) data collection,
+## Working with Data Source Connectors
+Even though data source connectors are generally designed for [**automated and repeated**](/orchestrator/) data collection,
they can be triggered manually at any time.
- For manual import of ad-hoc data, see [Data Import in Storage](/storage/files/), or our [tutorial on manual data loading](/tutorial/load/).
-- Configure a [sample extractor](/tutorial/load/googledrive/) (GoogleDrive).
-- Configure a [Database extractor](/tutorial/load/database/);
-other SQL database extractors are configured in the exact same way.
+- Configure a [sample data source connector](/tutorial/load/googledrive/) (GoogleDrive).
+- Configure a [database data source connector](/tutorial/load/database/);
+other SQL database data source connectors are configured in the exact same way.
-As bringing data into Keboola Connection is the main purpose of an extractor, go the path of least resistance:
+As bringing data into Keboola is the main purpose of a data source connectors, go the path of least resistance:
**Get your data in first, and then convert it to what you want it to look like.**
To give an example, when connecting to existing information systems, do not modify the data in them.
-Such data conversion can prove to be difficult and expensive. Extract what you need and shape it in Keboola Connection.
+Such data conversion can prove to be difficult and expensive. Extract what you need and shape it in Keboola.
## Limitations
Some extractors have **limits inherent to their sources**. Unfortunately, there is not much we can do about it.
-For example, the Twitter extractor will not let you access the history of a particular tweet account beyond a certain point
+For example, the Twitter data source connectors will not let you access the history of a particular tweet account beyond a certain point
because of the [limitations](https://stackoverflow.com/questions/1662151/getting-historical-data-from-twitter) of Twitter API.
diff --git a/components/extractors/other/telemetry-data/telemetry-data.md b/components/extractors/other/telemetry-data/telemetry-data.md
index d6cb74ea6..800c64829 100644
--- a/components/extractors/other/telemetry-data/telemetry-data.md
+++ b/components/extractors/other/telemetry-data/telemetry-data.md
@@ -286,6 +286,8 @@ data in storage, and the number of users. This combines data from different data
* `Snowflake Sandbox` - `kbc_snowflake_stats` (records with **sandbox** *snowflake_job_type*)
* `Transfromations` - `kbc_job` (jobs with **SQL** *transformation_type*)
* `Writers` - `kbc_job` (jobs with **writer** *component_type*)
+* `BAPI Messages` - Buffer API (data streams) usage; only aggregated values available
+* `BAPI Receiver` - Buffer API endpoints used; only aggregated values available
*Note: `organization_value` and `company_value` are available in **Organization** mode only.
You need data for all projects.*
@@ -327,6 +329,11 @@ organization, or the Keboola Connection platform itself.
| `kbc_token_id` | Unique identifier of the token containing stack identification | `47949_kbc-us-east-1` |
| `context_admin_email` | Email of the user in the context with the event (e.g., invitation or admin removal) | `john.doe@keboola.com` |
| `context_admin_name` | Name of the user in the context with the event (e.g., invitation or admin removal) | `Martin Matejka` |
+| `context_merge_request_id` | ID of the merge request (related to branch merge request events in SOX projects) | `42` |
+| `context_merge_request_name` | Name of the merge request (related to branch merge request events in SOX projects) | `Update of my configuration` |
+| `context_operation` | Type of the merge request operation (`request_review`, `finish_review`, `approve`, `merge`, `publish`) | `request_review` |
+| `context_state_from` | Original state of the merge request operation (`development`, `in_review`, `approved`, `in_merge`) | `in_review` |
+| `context_state_to` | End state of the merge request operation (`in_review`, `approved`, `in_merge`, `published`) | `approved` |
#### Security event operations
@@ -436,6 +443,8 @@ organization, or the Keboola Connection platform itself.
|`auditLog.storageBackendConnection.deleted`
|`auditLog.storageBackendConnection.listed`
|`auditLog.storageBackendConnection.updated`
+|`auditLog.mergeRequest.created`
+|`auditLog.mergeRequest.stateChanged`
#### Operation parameters
@@ -570,6 +579,30 @@ This table shows data about flow tasks.
| `task_kbc_component_id` | Unique KBC component identifier | `keboola.wr-db-snowflake_kbc-us-east-1` |
| `task_kbc_component_configuration_id` | Foreign key to the flow component configuration | `7880_kbc-us-east-1_keboola.wr-db-snowflake_952663182` |
+### kbc_job_input_table
+This table shows data about all input tables of the job.
+
+| **Column** | **Description** | **Example** |
+|---|---|---|
+| `kbc_job_id` (PK) | KBC flow task identifier | `963416992_kbc-us-east-1` |
+| `kbc_project_id` | Foreign key to the KBC project | `7880_kbc-us-east-1` |
+| `table_id` | Identifier of the table | `in.c-in_sh_kbc_internal.kbc_project` |
+| `kbc_project_table_id` (PK) | Foreign key to the KBC Table | `7880_kbc-us-east-1_in.c-in_sh_kbc_internal.kbc_project` |
+| `table_name` | Name of the table | `kbc_project` |
+| `mappings` | Number of times the table was used in the job input (i.e., one table can be used multiple times in the input mapping of the transformation) | `1` |
+
+### kbc_job_output_table
+This table shows data about all output tables of the job.
+
+| **Column** | **Description** | **Example** |
+|---|---|---|
+| `kbc_job_id` (PK) | KBC flow task identifier | `909588277_kbc-us-east-1` |
+| `kbc_project_id` | Foreign key to the KBC project | `7880_kbc-us-east-1` |
+| `table_id` | Identifier of the table | `out.c-kbc_billing.kbc_event` |
+| `kbc_project_table_id` (PK) | Foreign key to the KBC Table | `7880_kbc-us-east-1_out.c-kbc_billing.kbc_event` |
+| `table_name` | Name of the table | `kbc_event` |
+| `mappings` | Number of times the table was used in the job output (i.e., one table can be written multiple times to the storage in the output mapping of the transformation) | `1` |
+
### kbc_notification_subscription
This table shows data subscriptions to notifications sent by Keboola Connection (mostly flow notifications).
diff --git a/components/index.md b/components/index.md
index 2fcef4f8d..ce830e82a 100644
--- a/components/index.md
+++ b/components/index.md
@@ -8,57 +8,95 @@ redirect_from:
* TOC
{:toc}
-In the Keboola Connection platform, most of the data processing functions are implemented in **Components**.
-Components are divided into four main categories:
+In the Keboola platform, most of the data processing functions are implemented in **Components**.
+Components are divided into the following categories:
-- [**Extractors**](/components/extractors/) -- bring data into a Keboola Connection project.
-- [**Writers**](/components/writers/) -- send processed data into a target system.
-- [**Applications**](/components/applications/) -- process complex data.
+- [**Data source connectors (extractors)**](/components/extractors/) – bring data into a Keboola project.
+- [**Data destination connectors (writers)**](/components/writers/) – send processed data into a target system.
+- [**Applications**](/components/applications/) – process complex data.
-All components, regardless of their category, behave the same way. To use a component, you have to
-create a **configuration** first. A configuration is used to set the necessary parameters for each
-component (e.g., credentials and other specification of what to do). Then it can be run
---- a **job** is created and does the actual work.
+All components, regardless of their category, behave the same way. To use a component, you have to create a **configuration** first. A configuration is used to set the necessary parameters for each
+component (e.g., credentials and other specification of what to do). Then it can be run — a **job** is created and does the actual work.
+
+## Component Release Stages
+Our components are released at different stages and are assigned labels to indicate the level of quality and production readiness. We strive to deliver the highest quality components.
+Our goal is to unlock as many interesting integrations as possible, benefiting all our customers. The component release grades allow us to manage the release process effectively
+and align expectations accurately.
+
+| | Experimental | Beta | Production (GA) |
+|---|---|---|---|
+| **Availability** | Available either in the UI or per request | Listed in the official component list and available to all customers | Listed in the official component list and available to all customers |
+| **Support & SLA** | No or limited SLA | Officially supported by Keboola's Standard SLA – in active development | Officially supported by Keboola's Standard SLA |
+| **Production ready** | Functional, but only for limited use cases
Experimental component, not advisable for business critical processes
May not ever move to Beta/GA | Yes – with limitations
Will eventually move to GA
(e.g., still in the process of fine-tuning, tested on fewer production use cases, may contain less robust documentation) | Yes
Stable version, tested on many production use cases
Fully documented |
+| **Updates & Maintenance** | Breaking changes may be introduced (It is still possible to fix certain versions to avoid BC issues)
Maintenance may end at any time → may be deprecated | Monitored via standard processes
Always backward compatible changes
In active development | Monitored via standard processes
Always backward compatible changes, announced via standard channels |
+
+### Production (GA)
+This version is stable and has been well-tested in many production scenarios by numerous customers. GA (General Availability) versions feature comprehensive documentation
+and components with a low error rate, making them suitable for mission-critical production use cases.
+
+All potential updates to the component are guaranteed to be backward compatible.
+
+### Beta
+Components are available to all customers. These components are usually early additions and may still be in rapid development.
+
+All changes are backward compatible.
+
+**Limitations:**
+
+- Being in the early stages, these components are used in fewer projects. Minor bugs, though promptly fixed within the standard SLA, may appear more frequently.
+- The range of features might not be exhaustive and is likely to expand through backward-compatible updates.
+- Documentation may be less comprehensive than in the GA version, often given in the form of README.md files.
+- These components may pose certain limitations like pending Google verification, verification status-related rate limits, and more.
+
+### Experimental
+Developed for specific use cases but not tested in various scenarios, these components, while highly experimental, are functional. They may contain undiscovered bugs when used in untested scenarios
+and might impose other limitations, such as rate limiting, reliance on less stable proprietary libraries, dependency on the source website structure (scraping), and others.
+
+Experimental components may not progress to the Beta and GA stages.
+
+Nevertheless, these components can address obscure use cases and deliver unique integrations. As their code is public, they can serve as a foundation for custom forks,
+which could also include Generic Extractor configurations.
+
+Many of these components also serve for our internal purposes, and we decided to share them publicly for the benefit of our community.
+
+### Private / Unlisted
+Some components are still unlisted for various reasons. The full list of components available in each stack is accessible via the public [Storage API index call](https://keboola.docs.apiary.io/#reference/miscellaneous/api-index/component-list).
+Many of these are 3rd-party components. Users can add these components via their ID, but we cannot guarantee their functionality.
+
+We may share our pre-release versions that exist in private Beta with our test user groups. In such case, you will receive a component ID, using which you can create the configuration.
+These components will eventually transition to a public Beta.
## Creating Component Configuration
-To create a new component configuration, select *Components* from the top navigation and then select one of the
-component categories:
+To create a new component configuration, select *Components* from the top navigation and then select one of the component categories:
{: .image-popup}

-The following page shows a list of the currently existing configurations (extractors in this example)
-in the project. To create a new configuration of a new component,
-use the **Directory** or the **Add New Extractor** button.
+The following page lists the current configurations (e.g., extractors) in the project. To create a new configuration of a new component,
+use the **Directory** or the **Add New Data Source** button.
{: .image-popup}

-Use the search field to find the component you want to use (the Currency Rates extractor in this case) and
-then click the component tile to add it:
+Use the search field to find the component you want to use (the Currency Rates extractor in this case) and then click the component tile to add it:
{: .image-popup}

-The following page describes in detail what the component does and allows you to create a new configuration
-using the **New Configuration** button.
+The following page describes in detail what the component does and allows you to create a new configuration using the **New Configuration** button.
{: .image-popup}

-In the dialog, enter a name for the configuration. If a component has a single configuration in your project,
-the name is not that important. However, for components with multiple configurations
-(e.g., configured with different credentials)
-the names should meaningfully distinguish one configuration from another.
+In the dialog, enter a name for the configuration. If a component has a single configuration in your project, the name is not that important. However, for components with multiple configurations
+(e.g., configured with different credentials) the names should meaningfully distinguish one configuration from another.
{: .image-popup}

The next page shows a form for configuring the component, this varies heavily between different components.
-Component configuration can range from trivial (as in the case of the
-[Currency extractor](/components/extractors/other/currency-rates/)) to very complex
-ones (e.g., [Google Ads extractor](/components/extractors/marketing-sales/google-ads/)). The configuration
-complexity badge shown in the component list gives you a rough idea of what to expect.
+Component configuration can range from trivial (as in the case of the [Currency extractor](/components/extractors/other/currency-rates/)) to very complex ones
+(e.g., [Google Ads extractor](/components/extractors/marketing-sales/google-ads/)). The configuration complexity badge shown in the component list gives you a rough idea of what to expect.
{: .image-popup}

@@ -68,12 +106,11 @@ When you set the parameters and **Save** them, you can actually run the componen
{: .image-popup}

-When you run a component, a [Job](/management/jobs/) is created and subsequently executed. The right panel shows the last executed jobs
-with an indication of their status:
+When you run a component, a [job](/management/jobs/) is created and subsequently executed. The right panel shows the last executed jobs with an indication of their status:
-- yellow -- running
-- red -- failed
-- green -- successful
+- Yellow – Running
+- Red – Failed
+- Green – Successful
You can click each job to view its [details](/management/jobs/), including the tables it reads from your project and
the tables it produced in your project. When running the configuration, its active version (the one with the green tick-mark) will be used.
@@ -86,23 +123,19 @@ The configuration description supports rich text formatting using [Markdown](htt
{: .image-popup}

-The bottom right panel shows a list of the configuration versions. To see their full list, use the links.
-The version list is complete and allows you to compare adjacent versions or revert to any previous version.
-The bottom right panel shows list of the configuration versions. You can use the links to see full list of
-the configuration versions. Use the version list to
+The bottom right panel shows a list of the configuration versions. Use the list to
- copy any version.
- compare any two successive versions.
-- rollback to an older version.
+- roll back to an older version.
-All of the operations can be [accessed via API](https://keboola.docs.apiary.io/#reference/component-configurations/create-config).
+All of the operations can be [accessed via an API](https://keboola.docs.apiary.io/#reference/component-configurations/create-config).
For working with configurations, see the [developer guide](https://developers.keboola.com/integrate/storage/api/configurations/).
**Important**: Component configurations do not count towards your project quota.
The version list is unlimited. Configuration versions are also created when the configurations are manipulated
-programmatically using [the API](https://developers.keboola.com/overview/api/). In other words, there is no way
-to modify a configuration without the changes being recorded.
+programmatically using [the API](https://developers.keboola.com/overview/api/). In other words, all configuration modifications are recorded.
### Compare Versions
You can compare adjacent versions by clicking the *compare* icon:
@@ -110,21 +143,21 @@ You can compare adjacent versions by clicking the *compare* icon:
{: .image-popup}

-When you compare two versions, a difference of the raw JSON configurations is shown.
+When you compare two versions, the differences between the raw JSON configurations are displayed.
{: .image-popup}

-When you rollback a configuration, a new version is created. This means that you never lose any version of
+When you roll back a configuration, a new version is created. This means that you never lose any version of
a configuration and that there is always an option to get back to it. Configuration versions are also created when
the configurations are manipulated programmatically via [the API](https://developers.keboola.com/overview/api/).
### Rollback Version
-If you need to return to an older version of the configuration, you can also rollback to it (the other option is to make its copy).
+If you need to return to an older version of the configuration, you can also roll back to it (the other option is to make its copy).
Rolling back a configuration version actually means that a new configuration version is created (and marked as active)
with the contents of the selected version. A rollback is therefore quite a safe operation.
-Click the *rollback* icon next to the version you want to return to:
+Click the **rollback* icon next to the version you want to return to:
{: .image-popup}

@@ -135,7 +168,7 @@ Confirm the rollback and see the result:

### Copy Configuration
-You can also use the version list to create a **Copy of the configuration**:
+You can also use the version list to create a **copy of the configuration**:
{: .image-popup}

@@ -145,7 +178,7 @@ You can customize the name of the configuration copy:
{: .image-popup}

-The copy of the configuration is created as a new isolated configuration -- i.e., there is no link
+The copy of the configuration is created as a new isolated configuration – i.e., there is no link
between the original configuration and the copy, and the changes to one have no effect on the other.
The new configuration is completely independent on the old one. You may modify or delete
either of them without affecting the other one.
@@ -206,16 +239,16 @@ order of rows has no effect on your project, because a Job is finished only afte
### Configuration Rows Versions
Changes to configuration rows are part of the [configuration versioning](#configuration-versions). The following image shows that
-the versions in the configuration page list changes to the configuration rows -- both that a table was added and that it was modified.
+the versions in the configuration page list changes to the configuration rows – both that a table was added and that it was modified.
{: .image-popup}

That means that each configuration version contains a complete set of its rows. This is important when copying or rolling back a
-version -- you can do these operations safely without worrying about rows.
+version – you can do these operations safely without worrying about rows.
When you edit a configuration row, there is a also a list of **row versions**. Row versions show changes only to the single
-row. You can rollback a row to a previous version without interacting with the other rows.
+row. You can roll back a row to a previous version without interacting with the other rows.
{: .image-popup}

@@ -226,8 +259,8 @@ configuration.*
## Authorization
Many services support authorization using the [OAuth protocol](https://en.wikipedia.org/wiki/OAuth). For you (as the end user)
it means that the service does not require entering credentials (username, password, token, etc.). Instead you are
-redirected to the service itself where you authorize the Keboola Connection component. Then you are redirected back to
-Keboola Connection and you can set other parameters and run the configuration.
+redirected to the service itself where you authorize the Keboola component. Then you are redirected back to
+Keboola and you can set other parameters and run the configuration.
The OAuth authorization process begins with the **Authorize** button (in this example the
[Google calendar extractor](/components/extractors/communication/google-calendar/) is shown):
@@ -238,10 +271,10 @@ The OAuth authorization process begins with the **Authorize** button (in this ex
In the next step, you can choose the authorization method:
- **Instant**: Use this method if you have direct access to the account; the authorization will be done immediately.
-- **External**: If you need to authorize access to the service from someone who does not have an account in Keboola Connection, you can generate an external link, which will guide them through this process.
+- **External**: If you need to authorize access to the service from someone who does not have an account in Keboola, you can generate an external link, which will guide them through this process.
OAuth authorization is a very secure authorization method in which you don't have to hand over the
-credentials to your account. The consumer -- Keboola Connection component -- obtains only the minimal required
+credentials to your account. The consumer -- Keboola component -- obtains only the minimal required
access. The authorization is only valid for the configuration in which it was created and for its **copies**.
### Instant Authorization
diff --git a/components/writers/index.md b/components/writers/index.md
index 7aca46a88..dda0668cd 100644
--- a/components/writers/index.md
+++ b/components/writers/index.md
@@ -1,32 +1,32 @@
---
-title: Writers
+title: Data Destination Connectors
permalink: /components/writers/
redirect_from:
- /writers/
---
-Writers are [Keboola Connection components](/overview/) that take transformed and processed **output data from Keboola Connection**
+Data destination connectors are [Keboola components](/overview/) that take transformed and processed **output data from Keboola**
and deliver it **into its final destination:** the systems and applications where the data gets used/**consumed**.
-## Types of Writers
+## Types of Data Destination Connectors
We integrate your data into most of the top market systems.
-Choose the right consumption point for each project and use case. Like with Extractors,
+Choose the right consumption point for each project and use case. Like with data source connectors,
there are **no limitations** to how and where you can send your data.
-Writers can be grouped by their primary purpose:
+Data destination connectors can be grouped by their primary purpose:
- **Business Intelligence**: [Tableau](/components/writers/bi-tools/tableau/), [GoodData](/components/writers/bi-tools/gooddata/), [Looker](/components/writers/bi-tools/looker/), and more
- **Databases**: [MySQL](/components/writers/database/mysql/), [Oracle](/components/writers/database/oracle/), [PostgreSQL](/components/writers/database/postgresql/), [Amazon Redshift](/components/writers/database/redshift/), [Snowflake](/components/writers/database/snowflake/), [Synapse](/components/writers/database/synapse/), and more
- **Generic Storage**: [AWS S3](/components/writers/storage/aws-s3/), [Dropbox](/components/writers/storage/dropbox/), [Google Drive](/components/writers/storage/google-drive/),
[Google Sheets](/components/writers/storage/google-sheets/), [Keboola Connection Storage](/components/writers/storage/storage-api/), and more
-- [Other](/components/writers/other/) writers such as Azure Event Hub
+- [Other](/components/writers/other/) data destination connectors such as Azure Event Hub
-For a definitive list of usable writers, see your project **Writers** section.
+For a definitive list of usable data destination connectors, see your project **Data Destinations** section.
-## Working with Writers
-Each writer can have multiple configurations. Each configuration usually represents a single destination (database account, BI project, etc.).
-Even though writers are generally designed for [**automated and repeated**](/orchestrator/) data collection,
+## Working with Data Destination Connectors
+Each data destination connector can have multiple configurations. Each configuration usually represents a single destination (database account, BI project, etc.).
+Even though destination connectors are generally designed for [**automated and repeated**](/orchestrator/) data collection,
they can be triggered manually at any time.
We provide tutorials on [writing into GoodData](/tutorial/write/gooddata/) and [writing into Tableau](/tutorial/write/).
diff --git a/index.md b/index.md
index 7f6fe1d95..70636d728 100644
--- a/index.md
+++ b/index.md
@@ -1,53 +1,41 @@
---
-title: Keboola Connection User Documentation
+title: Keboola User Documentation
permalink: /
---
-Welcome to the Keboola Connection Manual pages.
-If you are working with the Keboola Connection UI as an end-user, you are at the right place.
+Welcome to the Keboola documentation—a comprehensive resource offering step-by-step guidance and reference information to help users
+seamlessly navigate the platform. Whether you're a newcomer or an experienced user, this documentation is your go-to companion for mastering
+the ins and outs of Keboola.
* TOC
{:toc}
-## What is Keboola Connection?
-
-Keboola Connection is a powerful data preparation platform composed of many interconnected components,
-extracting data from various sources,
-manipulating and enriching the data, and writing the results to desired Business Intelligence tools.
-It is a safe, open and extendable cloud based environment for working with your data.
-
## Where to Start
+If you are new to Keboola and **would like a quick orientation** to the Keboola ecosystem, explore our comprehensive [overview](/overview/).
-If you are new to Keboola Connection and want a **brief orientation** to the Keboola ecosystem,
-please check out our [overview](/overview/), or watch a short [introductory video](https://www.youtube.com/watch?v=yaA7_N5Ymmc&feature=youtu.be).
-For some hands-on experience, see our [tutorial](/tutorial/).
-
-If you are already familiar with how Keboola Connection works, go directly to [connection.keboola.com](https://connection.keboola.com)
-and log in. If your project is not in the default US region, use the region selector to choose the appropriate region.
-
-We are available to provide [support](/management/support/) whenever needed. In case you don't have access
-to a Keboola Connection project, you can write an email to [support@keboola.com](mailto:support@keboola.com).
-Please use the [support form](/management/support/) inside your project as that provides
-us with valuable context.
-
-## More to Read
-
-- [www.keboola.com](https://www.keboola.com/) --- our main web page
-- [blog.keboola.com](https://blog.keboola.com/) --- something to read for data analysts
-- [500.keboola.com](https://500.keboola.com/) --- something to read for tech geeks
-- [developers.keboola.com](https://developers.keboola.com) --- documentation for extending or integrating Keboola Connection
+Ready to take the next step? Grab your free trial and set up your account [here](https://connection.north-europe.azure.keboola.com/wizard),
+or connect with our team directly through [this link](https://www.keboola.com/contact).
-## Keboola Connection Status Updates
+For some hands-on experience, dive into our Getting Started [tutorial](/tutorial/).
-There are two equivalent places where you can get Keboola Connection Status updates (service status and changelog):
+Rest assured, we're here to support you whenever needed. Reach out via email at [support@keboola.com](support@keboola.com) or use the [support form](https://help.keboola.com/management/support/) directly within your project.
-- [status.keboola.com](https://status.keboola.com/) --- we recommend subscribing to the feed
-- [Twitter](https://twitter.com/keboola_support)
+## Manage Keboola
+- [User management](/management/#user-management)
+- [Account administration](/management/account/)
+- [Data governance](/management/account/)
-## Your Tips and Suggestions
-Use the *Feature Wishlist* in the *Settings* menu or [ideas.keboola.com](https://ideas.keboola.com/)
-to send us your suggestions for new features or improvements.
+## Reference
+- [API reference](https://developers.keboola.com/overview/api/)
+- [Release notes](https://changelog.keboola.com/)
-{: .image-popup}
-
+## Resources
+- [status.keboola.com](http://status.keboola.com/) – get Keboola status updates (we recommend subscribing to the feed)
+- [www.keboola.com](https://www.keboola.com/) – our main web page
+- [blog.keboola.com](https://blog.keboola.com/) – something to read for data analysts
+- [500.keboola.com](https://500.keboola.com/) – something to read for tech geeks
+- [developers.keboola.com](https://developers.keboola.com) – documentation for extending or integrating Keboola Connection
+## We Highly Appreciate Your Input!
+Feel free to utilize the support form within your Keboola project to share feedback, request new features or components,
+or engage in a conversation with us. Your insights are valuable, and we're here to listen and collaborate.
diff --git a/orchestrator/index.md b/orchestrator/index.md
index 9b05e106e..f52ff5d4f 100644
--- a/orchestrator/index.md
+++ b/orchestrator/index.md
@@ -3,6 +3,8 @@ title: Orchestrator
permalink: /orchestrator/
---
+***Note:** This page will be updated soon. *
+
Bringing systems for data loading, manipulation and writing together is what makes
[Keboola Connection](/overview/) so powerful and easy to use. With [extractors](/components/extractors/), you can fetch
data from data sources into [Storage](/storage/). With [transformations](/transformations/) and
diff --git a/overview/index.md b/overview/index.md
index cc5f2f382..be78b1dcb 100644
--- a/overview/index.md
+++ b/overview/index.md
@@ -6,178 +6,161 @@ permalink: /overview/
* TOC
{:toc}
-Keboola Connection is a cloud platform for **interconnecting diverse systems**. It is used to
+Keboola is a cloud-based data integration and transformation platform that provides tools for data engineering, integration, transformation, and orchestration.
-- *extract data* from a source system,
-- *manipulate and augment* the extracted data with other data, and finally,
-- *write the results* to a destination system.
+Key aspects of Keboola include:
+- **Data Integration** – extracting data from diverse sources, such as databases, cloud services, and APIs, as well as loading it into a wide range of data destinations.
+- **Data Storage** – data processed in Keboola is stored in its data warehousing infrastructure (Snowflake, BigQuery, Redshift, Synapse or others), making it easily accessible for analysis.
+- **Data Manipulation** – wide range of tools to interact (clean, enrich, transform, analyse) with the data using SQL, Python, R or other languages.
+- **Automation** – users can build data Flows and automate the entire process end to end.
-To give a simple **example** of what this means, you might use Keboola Connection to *extract* data about your customers from your Salesforce CRM.
-Then you *extract* comments from your Facebook page and find sentiment in them.
-After that, you join those data sets together with weather reports, and *write* everything into Tableau Online
-in order to create reports and analyze your customers' behavior.
+Keboola is a preferred choice for data engineers, data analysts, and data scientists seeking to **optimize data processes and establish a unified platform for data-
+related tasks**. For organizations, one of Keboola’s paramount advantages lies in the consolidation of the entire data stack. By using an all-in-one platform,
+organizations can efficiently **govern and manage the data ecosystem**, making it invaluable for extracting insights, whether for business intelligence, reporting,
+or more advanced data science and machine learning applications.
-## Keboola Connection Architecture
+## Deployment Options
+The Keboola platform is typically **fully managed by Keboola**. However, it also supports **multi-tenant, hybrid, and private cloud deployments**.
-The following chart shows how Keboola Connection platform is structured. The platform is composed of many components which
-are structured into categories described below.
+In the most common multi-tenant deployment, all resources are managed and fully maintained by Keboola.
-{: .img-responsive}
+In the multi-tenant **'bring your own database'** option, you can use your own Snowflake, BigQuery, Redshift, Synapse, or other data storage,
+while the rest is still managed and maintained by Keboola.
-### Configurations
+With the single-tenant option, Keboola is **deployed to your cloud environment** (AWS, Azure, or GCP) and supports authentication via your own identity management.
-Typically, Keboola Connection is fully managed by Keboola.
-However, it also supports multi-, hybrid- and private cloud deployments.
+## Keboola Architecture
+Your Keboola account is structured around [**projects**](/management/project/). While the [Free Plan](/management/payg-project/) includes
+a single project, clients with a subscription to Keboola can enjoy the flexibility of multiple projects organized within
+a versatile [**multi-project architecture**](/tutorial/onboarding/architecture-guide/). This architecture not only accommodates the implementation
+of a Data Mesh approach but also supports a robust data warehouse structure tailored to specific needs and use cases.
-When finishing your innovation cycle in the cloud,
-your data pipeline processes can be compiled and executed in the cloud within your secure Keboola Connection environment.
-However, you can also do an on-premise offload utilising your existing hardware inside your private cloud infrastructure.
+The following diagram illustrates the structure of a single Keboola project, composed of various categorized components described below.
+{: .image-popup}
+
-### Data Sources
-Data sources are systems containing data you wish to [bring into Keboola Connection](/tutorial/load/).
-They can be pretty much anything from Google Analytics, Facebook, SalesForce to on-premise databases,
-legacy systems or even appliances and IoT devices. Data sources are not part of Keboola Connection.
-
-### Extractors
-[Extractors](/components/extractors/) are Keboola Connection components used for gathering data from sources.
-Typically, they connect to [APIs](https://en.wikipedia.org/wiki/Web_API) of external
-services. But they can also connect directly to an arbitrary database, or process incoming e-mails.
+### Data Source Connectors
+[Data sources connectors](/components/extractors/), formerly known as [extractors](/components/extractors/) are Keboola components used
+to gather data from various sources. They can connect to APIs of external services, databases, applications, object storages, and many others.
### Storage
-[Storage](/storage/) is the central Keboola Connection component managing everything related to storing data and accessing it.
-It has two sections: [File Storage](/storage/files/) with all raw files uploaded
-to your project, and [Table Storage](/storage/tables/) where all data tables are organized
-into buckets which are further organized into *in* and *out* stages.
+[Storage](/storage/) is the central component in Keboola responsible for data management and access. It comprises two sections: [File Storage](/storage/files/)
+with all raw files uploaded to your project, and [Table Storage](https://help.keboola.com/storage/tables/) where all data tables are organized into buckets,
+further categorized into in and out stages.
-Storage is implemented as a layer on top of various database engines that we use as our [backends](/transformations/#backends) ([Snowflake](https://www.snowflake.com/) and [Redshift](https://aws.amazon.com/redshift/)).
-It provides an important API (Storage API) access for other components and 3rd party applications.
-Your own **remote storage** can be connected to Keboola Connection as well.
+This component operates as an abstraction layer on top of various [backend](/transformations/#backends) database engines
+including [Snowflake](https://www.snowflake.com/), [Redshift](https://aws.amazon.com/redshift/), [BigQuery](https://cloud.google.com/bigquery/),
+[Synapse](https://azure.microsoft.com/en-us/services/synapse-analytics/) and more. It offers a vital API Storage API
+for interactions with the data, facilitating communication with other components and third-party applications.
### Transformations
-[Transformations](/transformations/) are components which end-users can create by writing a **free-form script** in
-[SQL](https://en.wikipedia.org/wiki/SQL) (Snowflake, Redshift), [Julia](https://julialang.org/),
-[Python](https://www.python.org/about/) and [R](https://www.r-project.org/about.html).
-Keboola Connection provides each user with [Sandbox](/transformations/sandbox/) --- a safe environment for your experiments.
-
-### Applications
-Unlike the free-form Transformations, [Applications](/components/applications/) are **predefined blocks**, which
-can be used to do some pretty advanced stuff like sentiment analysis, association discovery, or histogram grouping.
-Applications can also augment data (for example, add Weather or Exchange Rates) by calling on *3rd party services*
-to bring in additional data.
+[Transformations](/transformations/) allow end-users to create custom scripts in [SQL](https://en.wikipedia.org/wiki/SQL) (Snowflake, Redshift, BigQuery, and
+more), dbt, [Julia](https://julialang.org/), [Python](https://www.python.org/about/), and [R](https://www.r-project.org/about.html).
+Keboola provides Workspaces, offering a safe environment for experimentation, analytics and transformation development.
-### Writers
-[Writers](/components/writers/) are components delivering output data from Keboola Connection into the systems
-and applications where the data gets used/consumed. These can be commonly used [relational databases](/components/writers/database/) or various [BI, reporting and analytics](/components/writers/bi-tools/) tools.
+[Workspaces](/transformations/workspace/) are managed environments for code development. SQL workspaces are accessible through the database provider's IDE or your
+preferred SQL IDE. Python, R, or Julia workspaces are available through Keboola's hosted and managed Jupyter Lab environment. These workspaces can be shared with
+other users to facilitate collaboration.
-### Data Apps
-[Data Apps](/components/data-apps/) are simple, interactive web applications that use data to deliver insight or automatically take action.
-This type of application is usually custom tailored to tackle a specific problem and entails a dynamic, purpose-built user experience.
-Some examples of data apps could be recommendation engines, interactive segmentation, AI integration, data visualization,
-customized internal reporting tools for business teams, financial app to get insights on your spend patterns etc.
-
-### Data Consumption
-*Data Consumption* is represented by 3rd party systems that accept (or extract) data from Keboola Connection and use it further.
-These may be business intelligence analytics or visualization systems, but also e-mail marketing, CRM,
-or simply any system that can help our customers to realize the extra value Keboola Connection adds to the data.
+### Data Destination Connectors
+[Data destination connectors](/components/writers/), formerly known as [writers](/components/writers/), are components responsible
+for output data delivery from Keboola to the systems and applications where the data gets used or consumed. These connectors often interface
+with [relational databases](/components/writers/database/), BI, [reporting and analytics](/components/writers/bi-tools/) platforms, tools or applications.
### Full Automation
+Keboola provides the [Flows](https://help.keboola.com/orchestrator/) component, formerly known as [Orchestrator](https://help.keboola.com/orchestrator/),
+to fully automate end-to-end processes. With flows, you can specify the execution order of individual connectors, transformations, and other components,
+along with setting up parallelization. By adding a **schedule** or trigger, you can [automate](/orchestrator/) processes at specified intervals or times
+of the day.
-In the background, behind the scenes, there is the [**Orchestrator**](/orchestrator/)
-(or Scheduler) component which allows everything to be fully automated.
-Orchestrator enables to specify what components should be executed in what order and when
-(specified intervals, specified times of the day, etc.).
+The platform automatically scales resources for to facilitate the automated processes.
-The whole warehouse or data lake cycle can be fully automated via [API](https://developers.keboola.com/automate/#automation).
-The end-to-end serverless solution automatically enables you to connect data sources, automatically store data
-in the correct format, check for format inconsistencies, and choose different metadata providers based on the
-operation you wish to perform on the data. The platform scales the needed resources automatically across various
-types of data (structured, semi-structured, and non-structured) and processes.
+### Applications
+Unlike the free-form transformations, [applications](/components/applications/) are **predefined blocks**, that enable users to perform advanced tasks such as
+sentiment analysis, association discovery, or histogram grouping. They can also enhance data, for example, by incorporating external data like weather or exchange
+rates through third-party services.
-The whole environment tracks all the [operational metadata](#operational-metadata) and
-can be accessed without a server via [APIs](https://developers.keboola.com/overview/api/).
-This is useful when automating development, testing and production run of data jobs with automatic controls of
-[pipelines](https://keboola.docs.apiary.io/#reference/development-branches).
+### Data Apps
+[Data apps](/components/data-apps/) are user-friendly, interactive web applications designed to leverage data for insights or automated actions. These applications
+are typically custom-built to address specific challenges, providing users with dynamic, purpose-built experiences. Examples of data apps include recommendation
+engines, interactive segmentation tools, AI integration solutions, data visualization platforms, custom internal reporting tools for business teams, and financial
+apps for gaining insights into spending patterns.
+## Keboola Governance
### Operational Metadata
+Keboola diligently collects a diverse array of [operational metadata](/management/jobs/#search-attributes), encompassing user activity, job status, data flow,
+schema evolution, data pipeline performance, and adherence to a client's security rules. All project metadata is readily accessible within the client's Keboola
+environment, empowering users to conduct in-depth analyses, audits, or event-driven actions.
-Keboola Connection collects all kinds of [operational metadata](/management/jobs/#search-attributes),
-describing user activity, job activity, data flow,
-schema evolution, data pipeline performance, compliance with a client’s security rules, etc.
-All project metadata is accessible from within the client’s Keboola Connection environment to perform any kind of analysis, audit, or event action.
+Leveraging this metadata, we dynamically and automatically construct data lineage, providing a real-time understanding of data origin and usage. This capability
+serves both analytical and regulatory purposes, offering invaluable insights into the journey and utilization of data within the platform."
-Based on the metadata, we are able to build **data lineage** on the fly and automatically.
-This makes it possible to understand where the data is coming from and how it is used, both for analytical and regulatory purposes.
+### Cost Monitoring
+Keboola meticulously gathers and organizes telemetry data encompassing every job execution and user activity. Within each job detail, information regarding
+consumed credit units is available, allowing for the precise calculation of the associated dollar amount, effectively quantifying the cost of the process. This
+granular level of detail enables the attribution of costs to specific departments, teams, use-cases, and individual users, providing comprehensive insights into
+resource utilization.
-### Components
+### Identity and Access management
+Effortlessly oversee user accounts within your organization, regulating their access to specific Keboola projects and datasets. Streamline data sharing across
+your organization, ensuring a comprehensive understanding of each user's access privileges and fostering a transparent overview of data accessibility.
-Keboola Connection, as an open environment consisting of many built-in interoperating components (Storage,
-Transformations, Extractors etc.), can be [extended](https://developers.keboola.com/extend/) with
- **arbitrary code to extract, transform or write data**.
+## Extending the Platform
+The Keboola platform, as an open environment consisting of many built-in interoperating components (Storage, transformations, data source connectors, etc.),
+can be [extended](https://developers.keboola.com/extend/) with **arbitrary code to extract, transform or write data**.
-There are two ways of extending the platform:
-creating [Components](https://developers.keboola.com/extend/#component) (used as extractors, applications and writers) and
-creating components based on [Generic Extractor](https://developers.keboola.com/extend/#generic-extractor/).
+There are two ways of extending the platform: creating [components](https://developers.keboola.com/extend/#component) (used as data destination connectors,
+applications and data source connectors) and creating components based on [Generic Extractor](https://developers.keboola.com/extend/#generic-extractor/).
-All components can be created by us, your in-house teams or 3rd parties.
-They can easily use already existing data, ETL processes, and workflows.
-The development platform provides you with automation of infrastructure, user management, data management, and essential services like
-[data catalogue](/catalog/), operational metadata, full governance, and
-reverse billing per job.
-The components can be kept private or offered to other Keboola Connection users.
-Our market place consists of hundreds of applications that are developed mainly by 3rd
-parties and can be natively used as part of the workflows you are creating.
-This provides a great way for our users to really manage their environment and create a composable enterprise.
+All components can be created by us, your in-house teams or 3rd parties. They can easily use already existing data, ETL processes, and workflows.
+The development platform provides you with automation of infrastructure, user management, data management, and essential services like [data catalog](/catalog/),
+operational metadata, full governance, and reverse billing per job. The components can be kept private or offered to other Keboola users. Our market place
+consists of hundreds of applications that are developed mainly by 3rd parties and can be natively used as part of the workflows you are creating. This provides a
+great way for our users to really manage their environment and create a composable enterprise.
-Components can be run as standard pieces of our [orchestrations](/orchestrator/),
-obtaining the full support and services (a link to your [components](https://components.keboola.com/components),
-[logs, etc.](https://developers.keboola.com/extend/common-interface/)).
+Components can be run as standard pieces of our Flows [LINK], obtaining the full support and services (a link to your
+[components](https://components.keboola.com/components), [logs, etc.](https://developers.keboola.com/extend/common-interface/)).
-## Keboola CLI
-[Keboola CLI](https://developers.keboola.com/cli/) (Command Line Interface) is a set of commands for operating your cloud
-data pipeline. It is available to install in the Windows, macOS, and Linux environments.
+### Keboola CLI
+[Keboola CLI](https://developers.keboola.com/cli/) (Command Line Interface) is a set of commands for operating your cloud data pipeline. It is available to
+install in the Windows, macOS, and Linux environments.
## Keboola Support
-When working with Keboola Connection, you are never on your own and there are multiple [ways to obtain support](/management/support/) from us.
+When working with Keboola platform, you are never on your own and there are multiple [ways to obtain support](/management/support/) from us.
To solve your problem or to gain context, our support staff may join your project when requested.
## Other Commonly Used Terms
This section explains a few terms that are often used throughout these documentation pages.
### Stacks
-Keboola Connection is available in multiple stacks, these can be either multi-tenant
-or single-tenant. The current multi-tenant stacks are:
+The Keboola platform is available in multiple stacks, these can be either multi-tenant or single-tenant. The current multi-tenant stacks are:
-- US AWS -- [connection.keboola.com](https://connection.keboola.com),
-- EU AWS -- [connection.eu-central-1.keboola.com](https://connection.eu-central-1.keboola.com),
-- EU Azure -- [connection.north-europe.azure.keboola.com](https://connection.north-europe.azure.keboola.com).
+- US AWS – [connection.keboola.com](https://connection.keboola.com/),
+- EU AWS – [connection.eu-central-1.keboola.com](https://connection.eu-central-1.keboola.com/),
+- EU Azure – [connection.north-europe.azure.keboola.com](https://connection.north-europe.azure.keboola.com/).
-A **stack** is a combination of a datacenter location (region) and a cloud provider, and is identified by
-its domain (URL). The currently supported
-cloud providers are [Amazon AWS](https://aws.amazon.com/) and [Microsoft Azure](https://azure.microsoft.com/en-us/).
-A stack is a completely independent full instance of Keboola Connection services. That means that
-if you have projects in multiple stacks, you need to have multiple Keboola Connection accounts.
+A **stack** is a combination of a datacenter location (region) and a cloud provider, and is identified by its domain (URL). The currently supported cloud
+providers are [Amazon AWS](https://aws.amazon.com/) and [Microsoft Azure](https://azure.microsoft.com/en-us/). A stack is a completely independent full instance
+of Keboola platform services. That means that if you have projects in multiple stacks, you need to have multiple Keboola accounts.
-Each stack uses a different network with a different set of **dedicated [IP addresses](/components/ip-addresses/)**.
-The [Developer documentations](https://developers.keboola.com/overview/api/#regions-and-endpoints) describes in
-more detail how to handle multiple stacks when working with the API.
+Each stack uses a different network with a different set of **dedicated** [IP addresses](/components/ip-addresses/).
+Our [developer documentation](https://developers.keboola.com/overview/api/#regions-and-endpoints) describes in more detail how to handle multiple stacks
+when working with the API.
Single-tenant stacks are available for a single enterprise customer with a domain name in form `connection.CUSTOMER_NAME.keboola.com`.
### Jobs
-Most things in Keboola Connection are done using the batch approach; when you perform an operation, a [job](/management/jobs/) is created
-and executed in the background. We also call these jobs **asynchronous**. Multiple jobs can be running at the same
-time and you can continue your work in the meantime.
+Most things in Keboola platform are done using the batch approach; when you perform an operation, a [job](/management/jobs/) is created and executed
+in the background. We also call these jobs **asynchronous**. Multiple jobs can be running at the same time and you can continue your work in the meantime.
### Tokens
-Every operation done in Keboola Connection must be authorized with a [*token*](/management/project/tokens/). Each Keboola Connection user is automatically assigned a token on their first login.
-Apart from that, tokens with limited access to some Keboola Connection operations can be created (and shared with other people).
-The principle of token authorization allows you, for example, to easily [share a single table](/management/project/tokens/#limited-tokens)
-from your Storage with someone without them having to register to Keboola Connection (enter email/password).
-
-### Input / Output Mapping
-To make sure your transformation does not harm data in Storage, [mapping](/transformations/mappings)
-separates source data from your script. A secure workspace is created with data copied from the tables specified
-in the [input mapping](/transformations/mappings/#input-mapping).
-After the transformation has been executed successfully, only tables and files defined
-in the [output mapping](/transformations/mappings/#output-mapping) are brought back to Storage.
+Every operation done within the Keboola platform must be authorized with a [token](/management/project/tokens/). Each Keboola user is automatically assigned
+a token on their first login. Apart from that, tokens with limited access to some Keboola platform operations can be created (and shared with other people).
+The principle of token authorization allows you, for example, to easily [share a single table](/management/project/tokens/#limited-tokens) from your Storage
+with someone without them having to register to the Keboola platform (enter email/password).
+
+### Input and Output Mapping
+To make sure your transformation does not harm data in Storage, [mapping](/transformations/mappings) separates source data from your script. A secure workspace
+is created with data copied from the tables specified in the [input mapping](/transformations/mappings/#input-mapping). After the transformation has been executed
+successfully, only tables and files defined in the [output mapping](/transformations/mappings/#output-mapping) are brought back to Storage.
diff --git a/overview/project-structure1.png b/overview/project-structure1.png
new file mode 100644
index 000000000..8e41a3f5a
Binary files /dev/null and b/overview/project-structure1.png differ
diff --git a/storage/byodb/external-buckets/figures/3-bq.png b/storage/byodb/external-buckets/figures/3-bq.png
index 03ae623d6..6c3c80cc6 100644
Binary files a/storage/byodb/external-buckets/figures/3-bq.png and b/storage/byodb/external-buckets/figures/3-bq.png differ
diff --git a/storage/byodb/external-buckets/index.md b/storage/byodb/external-buckets/index.md
index 9d9d92880..b98080406 100644
--- a/storage/byodb/external-buckets/index.md
+++ b/storage/byodb/external-buckets/index.md
@@ -63,6 +63,9 @@ Then continue to the next step, where we will provide you with a guide on how to
{: .alert.alert-info}
Note: By adding the Keboola service account as a subscriber, you enable read-only access to the data.
+{: .alert.alert-warning}
+[External tables](https://cloud.google.com/bigquery/docs/external-data-cloud-storage) are not supported, and if the shared dataset contains such tables, they will be ignored.
+
Once you are done, click **Register Bucket**, and you can start using it.
### Considerations
diff --git a/storage/tables/data-types/index.md b/storage/tables/data-types/index.md
index 85c509ad3..2d53f469b 100644
--- a/storage/tables/data-types/index.md
+++ b/storage/tables/data-types/index.md
@@ -1111,10 +1111,11 @@ CREATE TABLE "ctas_table" (
### Pros and Cons
- **Pros**
- - Loading to a workspace is significantly faster than loading to a table without native data types. You don't need just to cast the data when loading to a workspace.
+ - Loading into a workspace is significantly faster than loading into a table without native data types. Casting data is not necessary when loading into a workspace.
- A table accessed in a workspace via the [read-only input mapping](https://help.keboola.com/transformations/workspace/#read-only-input-mapping) already has typed columns.
- - Data types are strictly enforced, so you can be sure your number column will contain only numbers, for example.
+ - Data types are strictly enforced, ensuring that data in a specific column (like a number column) is consistent with its type.
- **Cons**
- - Changing a column type is complicated; see [Changing Types of Typed Columns](#changing-types-of-exising-typed-columns).
- - Keboola won't do any type conversion when loading. Your data must match the column type in the table in Storage exactly.
+ - Changing a column type is complicated; see [How to Change Column Types](#changing-types-of-existing-typed-columns).
+ - Keboola does not perform any type conversion during loading. Your data must exactly match the column type in the table in Storage.
- Any load of data with incompatible types will fail.
+ - The filtering option in the input mapping section is unavailable for tables with defined data types. If filtering is crucial for your workflow, consider using SQL, Python, or even no-code transformations to filter the data and create a new filtered table.
diff --git a/templates/kai-sql-bot/kai-sql-bot.md b/templates/kai-sql-bot/kai-sql-bot.md
index 3819e308f..4f199d7c8 100644
--- a/templates/kai-sql-bot/kai-sql-bot.md
+++ b/templates/kai-sql-bot/kai-sql-bot.md
@@ -12,7 +12,7 @@ and translates your requests into precise SQL commands.
Ideal for both SQL novices and experts, this app revolutionizes the way you interact with your data and replaces traditional querying methods with **real-time,
AI-assisted data insights**. You can now dive into your Snowflake data with ease and efficiency like never before.
-We will guide you through the process of building the UA and GA4 Comparison app using a predefined template.
+We will guide you through the process of building the Kai SQL Bot app using a predefined template.
## How to Use Template
To begin, click on **Templates** in the top menu, and then select **Add Template**.
@@ -21,7 +21,7 @@ To begin, click on **Templates** in the top menu, and then select **Add Template

This will take you to the list of all available templates. Check the box for data app templates and view the list that appears.
-From this list, select **UA and GA4 Comparison** and click on **Use Template**.
+From this list, select **KAI SQL Bot** and click on **Use Template**.
{: .image-popup}

diff --git a/tutorial/ad-hoc/index.md b/tutorial/ad-hoc/index.md
index 4860de964..8983b426b 100644
--- a/tutorial/ad-hoc/index.md
+++ b/tutorial/ad-hoc/index.md
@@ -1,5 +1,5 @@
---
-title: Part 5 - Ad-Hoc Data Analysis
+title: "Part 5: Ad-Hoc Data Analysis"
permalink: /tutorial/ad-hoc/
---
diff --git a/tutorial/automate/automate1.png b/tutorial/automate/automate1.png
new file mode 100644
index 000000000..01666a6bf
Binary files /dev/null and b/tutorial/automate/automate1.png differ
diff --git a/tutorial/automate/automate10.png b/tutorial/automate/automate10.png
new file mode 100644
index 000000000..41f882fdb
Binary files /dev/null and b/tutorial/automate/automate10.png differ
diff --git a/tutorial/automate/automate11.png b/tutorial/automate/automate11.png
new file mode 100644
index 000000000..ac2d10860
Binary files /dev/null and b/tutorial/automate/automate11.png differ
diff --git a/tutorial/automate/automate12.png b/tutorial/automate/automate12.png
new file mode 100644
index 000000000..124b33607
Binary files /dev/null and b/tutorial/automate/automate12.png differ
diff --git a/tutorial/automate/automate13.png b/tutorial/automate/automate13.png
new file mode 100644
index 000000000..09a223af3
Binary files /dev/null and b/tutorial/automate/automate13.png differ
diff --git a/tutorial/automate/automate14.png b/tutorial/automate/automate14.png
new file mode 100644
index 000000000..7b1784609
Binary files /dev/null and b/tutorial/automate/automate14.png differ
diff --git a/tutorial/automate/automate15.png b/tutorial/automate/automate15.png
new file mode 100644
index 000000000..357cccc93
Binary files /dev/null and b/tutorial/automate/automate15.png differ
diff --git a/tutorial/automate/automate2.png b/tutorial/automate/automate2.png
new file mode 100644
index 000000000..27e4c43b7
Binary files /dev/null and b/tutorial/automate/automate2.png differ
diff --git a/tutorial/automate/automate3.png b/tutorial/automate/automate3.png
new file mode 100644
index 000000000..c763c134c
Binary files /dev/null and b/tutorial/automate/automate3.png differ
diff --git a/tutorial/automate/automate4.png b/tutorial/automate/automate4.png
new file mode 100644
index 000000000..a994c371c
Binary files /dev/null and b/tutorial/automate/automate4.png differ
diff --git a/tutorial/automate/automate5.png b/tutorial/automate/automate5.png
new file mode 100644
index 000000000..5e7451f68
Binary files /dev/null and b/tutorial/automate/automate5.png differ
diff --git a/tutorial/automate/automate6.png b/tutorial/automate/automate6.png
new file mode 100644
index 000000000..5ac4d19a1
Binary files /dev/null and b/tutorial/automate/automate6.png differ
diff --git a/tutorial/automate/automate7.png b/tutorial/automate/automate7.png
new file mode 100644
index 000000000..03522e918
Binary files /dev/null and b/tutorial/automate/automate7.png differ
diff --git a/tutorial/automate/automate8.png b/tutorial/automate/automate8.png
new file mode 100644
index 000000000..1e530facb
Binary files /dev/null and b/tutorial/automate/automate8.png differ
diff --git a/tutorial/automate/automate9.png b/tutorial/automate/automate9.png
new file mode 100644
index 000000000..9f3fc4d8f
Binary files /dev/null and b/tutorial/automate/automate9.png differ
diff --git a/tutorial/automate/index.md b/tutorial/automate/index.md
index 9de32e01d..a532da184 100644
--- a/tutorial/automate/index.md
+++ b/tutorial/automate/index.md
@@ -1,128 +1,109 @@
---
-title: Part 4 - Automation - Setting up Orchestrator
+title: "Part 4: Flow Automation"
permalink: /tutorial/automate/
---
-So far, you have learned to use Keboola Connection to
+So far, you have learned to use Keboola to
-- load tables [manually](/tutorial/load/) or [using an extractor](/tutorial/load/database/),
+- load tables [manually](/tutorial/load/) or [using a data source connector](/tutorial/load/database/),
- [manipulate data in SQL](/tutorial/manipulate/), and
-- write data [into Tableau BI](/tutorial/write/) or [into GoodData BI](/tutorial/write/gooddata/).
+- write data [into a Google Spreadsheet using a data destination connector](/tutorial/write/).
-Connecting various systems together alone makes Keboola Connection a powerful and easy-to-use tool.
-However, the above steps must be done repeatedly to bring in the newest data available.
+While connecting various systems together alone makes Keboola a powerful and easy-to-use tool,
+the above steps must be done repeatedly to bring in the newest data available.
-Use **Orchestrator**
+This is where our flows come in:
+- Specify what tasks should be executed in what order (orchestrate tasks) and
+- Configure the automatic execution (schedule flow tasks).
-- to specify what tasks should be executed in what order (orchestrate tasks) and
-- to configure the automatic execution (schedule orchestrated tasks).
+1. Navigate to the **Flows** section of Keboola.
-Go to the **Orchestrations** section of Keboola Connection, and
+ {: .image-popup}
+ 
-{: .image-popup}
-
+2. Click **Create Flow**.
-click on **New Orchestration** to create a new orchestration. Assign it the name *Opportunities*:
+3. Enter a *Name* and *Description* for your flow. Similar to creating a transformation, you can organize flows into folders.
+You can specify the folder name when creating a flow or assign it under a folder later. Click **Create Flow**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-To configure the orchestration, first add some tasks to it:
+4. Click **Select First Step**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Click **New Task**:
+5. Click the **Google Sheets Data Source** component. We extracted the *Levels* table from this data source and we’ll want to extract this data automatically in our flow.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-The automation tasks are displayed based on what steps of the tutorial you have taken.
-It is not possible to automate the [manual upload](/tutorial/load/). If you haven't gone through all parts of the tutorial,
-these are the available steps:
+6. Use the drop down menu to select a particular configuration of this component.
-- [load data using GoogleDrive Extractor](/tutorial/load/googledrive/)
-- [load data using Database Extractor](/tutorial/load/database/)
-- [manipulate data using Transformations](/tutorial/manipulate/)
-- [write data into Tableau BI](/tutorial/write/)
-- [write data into GoodData BI](/tutorial/write/gooddata/)
+ {: .image-popup}
+ 
-First, select GoogleDrive and then click on the configuration *User levels*.
+7. Now use the plus icon to add additional steps. Select the **Snowflake data source** component we used to extract the *User, Opportunity*, and *Account* tables.
+Then select the configuration we created.
-{: .image-popup}
-
-
-Continue adding all the tasks you want. The following configuration will extract data from the database
-and from the Google Drive sheet. After being transformed for Tableau, the data will be written to Tableau.
+ {: .image-popup}
+ 
-{: .image-popup}
-
+8. Extractions of the data are not dependent tasks and, thus, can be executed in parallel.
+You can accomplish this by simply dragging and dropping the second task into the Step 1 box.
-Or, use the next configuration to extract data from the database and the Google Drive sheet,
-transform it for GoodData, and write it to a GoodData project.
+ {: .image-popup}
+ 
-{: .image-popup}
-
+ {: .image-popup}
+ 
-The order of certain [tasks](/orchestrator/tasks/) is important; some must run sequentially and others can run
-in [parallel](/orchestrator/running/#parallel-jobs).
-That is what **orchestration phases** are for. Tasks in a single phase are executed in parallel,
-phases execute sequentially.
+ {: .image-popup}
+ 
-To order the phases, grab the triple bar icon on their left.
-To move a task to a different phase, tick the checkbox on the left. Then go to **Actions**, select
-**Move selected tasks between phases** and assign the desired phase.
+9. Continue to add the **SQL Transformation** step and the **Google Sheets Data Destination** steps. You should now have a flow looking like this
-In the above configuration, each task is in its own phase.
-Therefore, this is a very defensive configuration which executes all tasks sequentially.
+ {: .image-popup}
+ 
-It can be better arranged by using the action *Group tasks into phases by component type*:
-{: .image-popup}
-
+When configuring the **transformation** in the [Data Manipulation](/tutorial/manipulate/) step of this tutorial,
+we used the input tables we loaded manually into Keboola. Now, we need to adjust the **input mapping** of our transformation to use the tables extracted
+from **Google Sheets** and **Snowflake data sources**.
-This will group each of the extractors, transformations and writers into their own phase to follow the common
-ETL scheme. Then **Save** the orchestration. If you do not have all the tasks set up at the moment,
-it does not matter. You can safely continue with the next steps.
-When done configuring the tasks, go back to the orchestration setting.
+You can get to the configuration by selecting the step and clicking **Edit Configuration**.
{: .image-popup}
-
+
-In the orchestration detail, you can now see some tasks configured. Run the orchestration manually to test
-if everything works smoothly; click on the **Run Orchestration** button in the top right corner and select the tasks you want to run.
-This creates a background [job](/orchestrator/running/) which executes all the tasks specified in the orchestration.
-Continue setting up the orchestration in the meantime.
+Remove the current **input mapping** tables and add the ones from the Google Sheet and Snowflake data sources.
+Make sure you edit the *Table name* parameter because those are the names we use in our query to reference those tables.
{: .image-popup}
-
+
-By clicking on the edit icon next to **Schedule**, set the orchestration to run
-[automatically](/orchestrator/running/#automation) at a given time.
+## Set a Schedule
+1. Click **Set Schedule**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-It is recommended to also set up notifications.
-Click on the **Notifications** edit button:
+2. Set the schedule to 6:15am UTC daily execution and click **Set Up Schedule**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Notifications are sent to selected email addresses. Set at least the error notification: enter your email address and
-click on the plus sign next to it. Repeat if you want to add another email address. Then click **Save**.
+## Notifications
+To ensure that responsible persons are notified when the flow fails or runs into warnings, it’s always a good idea to set up **notifications**.
-When an orchestration is run manually, notifications will be sent only to the Keboola Connection user who
-runs the orchestration, not to those specified in Notifications.
+Navigate to the **Notifications** tab and enter/select email addresses of those that should be notified on success/warning/error or processing.
-{: .image-popup}
-
-
-Your orchestration job should be finished by now. From data extraction to data writing, you have set up the full pipeline.
-Any change in your [GoogleDrive sheet](/tutorial/load/googledrive/) will automatically propagate up
-to your Tableau or GoodData project. Or both if you set it that way.
+ {: .image-popup}
+ 
-{: .image-popup}
-
+## What’s Next
+Having mastered the automation process, you may proceed to the [Development Branches](/tutorial/branches/) part of the tutorial.
-Having mastered the automation process, you may proceed to the [ad-hoc data analysis](/tutorial/ad-hoc/) part of the Keboola Connection tutorial.
+## If You Need Help
+Feel free to reach out to our [support team](/management/support/) if there’s anything we can help with.
diff --git a/tutorial/automate/orchestration-detail-1.png b/tutorial/automate/orchestration-detail-1.png
deleted file mode 100644
index 89736e511..000000000
Binary files a/tutorial/automate/orchestration-detail-1.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-detail-2.png b/tutorial/automate/orchestration-detail-2.png
deleted file mode 100644
index f0935a19a..000000000
Binary files a/tutorial/automate/orchestration-detail-2.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-detail-3.png b/tutorial/automate/orchestration-detail-3.png
deleted file mode 100644
index f583cf25a..000000000
Binary files a/tutorial/automate/orchestration-detail-3.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-detail-4.png b/tutorial/automate/orchestration-detail-4.png
deleted file mode 100644
index 273e986d7..000000000
Binary files a/tutorial/automate/orchestration-detail-4.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-notifications.png b/tutorial/automate/orchestration-notifications.png
deleted file mode 100644
index 6d16abc48..000000000
Binary files a/tutorial/automate/orchestration-notifications.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-schedule.png b/tutorial/automate/orchestration-schedule.png
deleted file mode 100644
index 3ffe68de8..000000000
Binary files a/tutorial/automate/orchestration-schedule.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-tasks-1.png b/tutorial/automate/orchestration-tasks-1.png
deleted file mode 100644
index 9a281da65..000000000
Binary files a/tutorial/automate/orchestration-tasks-1.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-tasks-2.png b/tutorial/automate/orchestration-tasks-2.png
deleted file mode 100644
index b5922bfec..000000000
Binary files a/tutorial/automate/orchestration-tasks-2.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-tasks-setup-1.png b/tutorial/automate/orchestration-tasks-setup-1.png
deleted file mode 100644
index c2ad179e0..000000000
Binary files a/tutorial/automate/orchestration-tasks-setup-1.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-tasks-setup-2.png b/tutorial/automate/orchestration-tasks-setup-2.png
deleted file mode 100644
index 409eb2540..000000000
Binary files a/tutorial/automate/orchestration-tasks-setup-2.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-tasks-setup-3.png b/tutorial/automate/orchestration-tasks-setup-3.png
deleted file mode 100644
index 166e313af..000000000
Binary files a/tutorial/automate/orchestration-tasks-setup-3.png and /dev/null differ
diff --git a/tutorial/automate/orchestration-tasks-setup-4.png b/tutorial/automate/orchestration-tasks-setup-4.png
deleted file mode 100644
index b4e864386..000000000
Binary files a/tutorial/automate/orchestration-tasks-setup-4.png and /dev/null differ
diff --git a/tutorial/automate/orchestrator-create-new.png b/tutorial/automate/orchestrator-create-new.png
deleted file mode 100644
index 9d5e43e18..000000000
Binary files a/tutorial/automate/orchestrator-create-new.png and /dev/null differ
diff --git a/tutorial/automate/orchestrator-intro.png b/tutorial/automate/orchestrator-intro.png
deleted file mode 100644
index b7fddbf85..000000000
Binary files a/tutorial/automate/orchestrator-intro.png and /dev/null differ
diff --git a/tutorial/branches/index.md b/tutorial/branches/index.md
index 80c2dfc48..18eff0314 100644
--- a/tutorial/branches/index.md
+++ b/tutorial/branches/index.md
@@ -1,5 +1,5 @@
---
-title: Development Branches
+title: "Part 6: Development Branches"
permalink: /tutorial/branches/
---
diff --git a/tutorial/index.md b/tutorial/index.md
index b4793f091..59f894fc4 100644
--- a/tutorial/index.md
+++ b/tutorial/index.md
@@ -1,44 +1,58 @@
---
-title: Keboola Connection Getting Started Tutorial
+title: Keboola Getting Started Tutorial
permalink: /tutorial/
redirect_from:
- /getting-started/
---
-This tutorial will guide you through basic usage of Keboola Connection (KBC).
-
-Before you start, make sure you have **basic knowledge of** [SQL](https://en.wikipedia.org/wiki/SQL) and
-**access to a KBC project** (preferably empty). To get set up, either ask one of our partners,
-or ping us at [sales@keboola.com](mailto:sales@keboola.com). If you aim to develop new components for
-KBC, you will get a [development project](https://developers.keboola.com/#development-project) automatically
-when you [register as a developer](https://developers.keboola.com/extend/component/tutorial/)
-
-If developing KBC components is the only reason you need a project for, apply for a
-[development project](/#development-project).
-
-## Get Going
-Follow these three basic steps of our tutorial to get going as quickly as possible:
-
-- [Loading Data Manually](/tutorial/load/) --- load four tables into KBC Storage;
-the fastest way to load data when starting with a project or doing any kind of POC.
-- [Data Manipulation: Transformations](/tutorial/manipulate/) --- manipulate data in Storage
-using Transformations, create a denormalized table from the input tables, and
-do some minor modifications to it.
-- [Writing Data into Tableau](/tutorial/write/) --- write data from KBC to Tableau Analytics.
-
-## Advanced Steps
-If you want to try more of KBC features, follow some of the following side steps:
-
-- Loading data using extractors:
- - [Loading data: GoogleDrive Extractor](/tutorial/load/googledrive/) --- load data from an external
- data sheet using the GoogleDrive extractor.
- - [Loading data: Database Extractor](/tutorial/load/database/) --- load data from an external database
-using the [Snowflake Database](https://www.snowflake.com/) extractor (the procedure is the same for [all our database extractors](/components/extractors/database/)).
-- Data Manipulation: [Creating and using Sandbox](/tutorial/manipulate/sandbox/) --- create a separate database
-storage to run arbitrary SQL scripts on the copies of your tables without affecting data in your Storage, or your transformations.
-- [Writing into GoodData](/tutorial/write/gooddata/) --- write data from KBC into GoodData.
-- [Automation: Setting up Orchestrator](/tutorial/automate/) --- specify what tasks should be executed
-in what order, and configure their automatic execution.
-- [Ad-hoc data analysis](/tutorial/ad-hoc/) --- see how you can play with arbitrary data.
-- [Development Branches](/tutorial/branches/) --- see how you can safely modify a running project.
-- [CLI](https://developers.keboola.com/cli/) --- see how you can operate a project using our command-line tool.
+Discover how to leverage the Keboola platform to effortlessly extract data from various sources, transform it, and securely store it within the Keboola platform.
+Uncover the capabilities to not only store the transformed data but also to efficiently write it to a desired destination. Additionally, master the art of
+automating the entire data pipeline for enhanced efficiency and consistency. This tutorial will guide you through basic usage of Keboola platform.
+
+* TOC
+{:toc}
+
+## Prerequisites
+
+If you are new to Keboola, we recommend exploring our comprehensive [platform overview](/overviews/).
+This resource will help you become acquainted with commonly used terms and gain a solid understanding of the Keboola ecosystem.
+
+To get started, ensure you have access to a Keboola project. If you haven’t got one yet, reach out to us at [sales@keboola.com](sales@keboola.com),
+or create a free project [here](https://connection.north-europe.azure.keboola.com/wizard) instantly.
+
+If you are a developer looking to contribute new components to the Keboola platform, your [development project](https://developers.keboola.com/#development-project)
+will be automatically set up upon registering as a developer.
+
+Please be aware that for a comprehensive understanding of the tutorial and to unlock the full capabilities of Keboola, it is recommended to have at least a basic understanding of the [SQL](https://en.wikipedia.org/wiki/SQL) language. SQL is commonly used for data transformation, often with Python or R alongside.
+
+## Getting Started
+Upon completing this tutorial, you will gain confidence in:
+1. Integrating data seamlessly into Keboola
+2. Effectively manipulating data through Keboola transformation
+3. Automating the entire data pipeline
+4. Leveraging Keboola for advanced analytics and transformation development
+
+To expedite your onboarding, we've organized the tutorial into basic and advanced steps.
+
+### Basic Steps
+1. [**Loading data manually**](/tutorial/load/): Load four CSV files into Keboola Storage tables.
+2. [**Data manipulation**](/tutorial/manipulate/): Utilize transformations to create a denormalized table from the input tables and make minor modifications.
+3. [**Writing data into Google Sheets**](/tutorial/write/): Write the transformed data to Google Sheets.
+
+### Advanced Steps
+For a deeper exploration of Keboola features, aligning with real-world usage, consider the following advanced steps:
+1. **Loading data using data source connectors**
+ - [Google Sheets data source](/tutorial/load/googlesheets/): Load data from an external spreadsheet using the Google Sheets data source connector.
+ - [Database data source](/tutorial/load/database/): Load data from an external database utilizing the [Snowflake Database data source connector](/tutorial/load/database/) (applicable to all Keboola-supported [database data sources](/components/extractors/database/)).
+2. [**Data manipulation: creating and using a workspace**](/tutorial/manipulate/workspace/)
+ - Create and utilize a workspace, a secure development and analytical environment. It enables you to interact with data and develop transformation code on a copy of your production data.
+3. [**Automation: setting up a flow**](/tutorial/automate/)
+ - Specify task sequences and configure their automatic execution through the setup of a flow.
+4. [**Ad-hoc data analysis**](/tutorial/ad-hoc/)
+ - Explore how to perform ad-hoc data analysis, allowing flexibility in interacting with arbitrary data.
+5. [**Development branches**](/tutorial/branches/)
+ - Learn how to safely modify a running project using development branches.
+6. [**Command-line interface (CLI)**](https://developers.keboola.com/cli/)
+ - Operate a project efficiently using the Keboola command-line tool.
+
+These advanced steps will provide you with a comprehensive understanding of Keboola's capabilities and their practical application in real-world scenarios.
diff --git a/tutorial/load/access-to-spreadsheets.png b/tutorial/load/access-to-spreadsheets.png
new file mode 100644
index 000000000..a1e647e47
Binary files /dev/null and b/tutorial/load/access-to-spreadsheets.png differ
diff --git a/tutorial/load/extractor-google-drive-authorize-2.png b/tutorial/load/allow.png
similarity index 100%
rename from tutorial/load/extractor-google-drive-authorize-2.png
rename to tutorial/load/allow.png
diff --git a/tutorial/load/csv-import-change-settings.png b/tutorial/load/csv-import-change-settings.png
deleted file mode 100644
index de1fac16c..000000000
Binary files a/tutorial/load/csv-import-change-settings.png and /dev/null differ
diff --git a/tutorial/load/csv-import-create-new-configuration.png b/tutorial/load/csv-import-create-new-configuration.png
deleted file mode 100644
index 9ebdaeac8..000000000
Binary files a/tutorial/load/csv-import-create-new-configuration.png and /dev/null differ
diff --git a/tutorial/load/csv-import-default-configuration.png b/tutorial/load/csv-import-default-configuration.png
deleted file mode 100644
index f9efb4ca4..000000000
Binary files a/tutorial/load/csv-import-default-configuration.png and /dev/null differ
diff --git a/tutorial/load/csv-import-empty-list.png b/tutorial/load/csv-import-empty-list.png
deleted file mode 100644
index 4e96280cb..000000000
Binary files a/tutorial/load/csv-import-empty-list.png and /dev/null differ
diff --git a/tutorial/load/csv-import-storage.png b/tutorial/load/csv-import-storage.png
deleted file mode 100644
index f664a9260..000000000
Binary files a/tutorial/load/csv-import-storage.png and /dev/null differ
diff --git a/tutorial/load/csv-import-upload-before.png b/tutorial/load/csv-import-upload-before.png
deleted file mode 100644
index a6346dd1e..000000000
Binary files a/tutorial/load/csv-import-upload-before.png and /dev/null differ
diff --git a/tutorial/load/csv-import-upload.png b/tutorial/load/csv-import-upload.png
deleted file mode 100644
index a69d4f0f6..000000000
Binary files a/tutorial/load/csv-import-upload.png and /dev/null differ
diff --git a/tutorial/load/database.md b/tutorial/load/database.md
index 858591dff..690d35e2b 100644
--- a/tutorial/load/database.md
+++ b/tutorial/load/database.md
@@ -1,75 +1,72 @@
---
-title: Loading Data with Database Extractor
+title: Loading Data from Database
permalink: /tutorial/load/database/
---
-So far, you have learned to load data into Keboola Connection [manually](/tutorial/load/) and
-via a [GoogleDrive extractor](/tutorial/load/googledrive/).
-Let's now load data from an external database with the help of the [Snowflake Database](https://www.snowflake.com/) extractor
-(the procedure is same for all our [database extractors](/components/extractors/database/).
+So far, you have learned to load data into Keboola [manually](/tutorial/load/) and
+via a [Google Sheets data source connector](/tutorial/load/googlesheets/).
+Now, let's explore loading data from an external database using the Snowflake Database data source (the procedure is the same for all our database data sources).
We will use our own sample Snowflake database, so do not worry about having to get database credentials from anyone.
-## Configure Snowflake Extractor
-Start by going into the **Components -- Extractors** section and create a new extractor.
+## Configure Snowflake Data Source Connector
+1. Start by going into the **Components** section and click **Add Component**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Find **Snowflake**. You can use the search feature to find it quickly.
+2. Use the search box to find the **Snowflake data source**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Similarly to the [GoogleDrive extractor](/tutorial/load/googledrive/), the Snowflake extractor can
-have multiple configurations. As each configuration represents a single database connection, we only
-need one configuration. Continue with **New Configuration**.
+3. Click **Add Component** and select **Connect To My Data**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Name the configuration.
+4. Enter a name and a description and click **Create Configuration**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Now, click on **Set up credentials** to configure the source database. Set
+ Similarly to other components, the Snowflake data source connector can have multiple configurations.
+ As each configuration represents a single database connection, we only need one configuration.
-- **Host Name** to `kebooladev.snowflakecomputing.com`.
-- **Port** to `443`.
-- **Username**, **Password**, **Database** and **Schema** to `HELP_TUTORIAL`.
-- **Warehouse** to `DEV`.
+5. Enter the following credentials:
+ - **Host Name** to `kebooladev.snowflakecomputing.com`.
+ - **Username**, **Password**, **Database**, and **Schema** to `HELP_TUTORIAL`.
+ - **Warehouse** to `DEV`.
-Test the credentials and save them.
+6. Click **Test Connection and Load Available Sources**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Now select the tables to import from the dropdown. Each selected table corresponds to a single table in Storage.
+7. Under **Select sources**, use the dropdown menu to select the `OPPORTUNITY`, `ACCOUNT`, and `USER` tables.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Select the `ACCOUNTS`, `USER`, and `OPPORTUNITY` tables and press **Create**.
+8. After selecting all the required tables, click **Save and Run Configuration**.
+This action will execute the data extraction, generating three new tables in your Storage.
-{: .image-popup}
-
-
-You will get the following configurations. Click on **Run Extraction** to load the data
-from the database into your tables in Storage.
+ {: .image-popup}
+ 
-{: .image-popup}
-
+ Running the component creates a background job that
+ - connects to the database,
+ - executes the queries, and
+ - stores results in the specified tables in Storage.
+For more advanced configuration options, such as incremental fetch, incremental load, or advanced SQL query mode,
+please navigate to Advanced Mode. Note that we will not cover the advanced mode options in this tutorial.
-Running the extractor creates a background job that
-
-- connects to the database,
-- executes the queries, and
-- stores results in the specified tables in Storage.
+{: .image-popup}
+
-When a job is running, a small orange circle appears under *Last runs*, along with RunId and other info on the job.
-Green is for success, red for failure. Click on the indicator, or the info next to it, for more details.
-Once the job is finished, click on the names of the tables to inspect their contents.
+## What's Next
+Proceed to [Data Manipulation](/tutorial/manipulate/).
-Now when you know how to use a database extractor, continue with the [rest of the tutorial](/tutorial/manipulate/).
+## If You Need Help
+Feel free to reach out to our [support team](support@keboola.com) if there’s anything we can help with.
diff --git a/tutorial/load/db-picture1.png b/tutorial/load/db-picture1.png
new file mode 100644
index 000000000..533ef1955
Binary files /dev/null and b/tutorial/load/db-picture1.png differ
diff --git a/tutorial/load/db-picture2.png b/tutorial/load/db-picture2.png
new file mode 100644
index 000000000..8d7622710
Binary files /dev/null and b/tutorial/load/db-picture2.png differ
diff --git a/tutorial/load/db-picture3.png b/tutorial/load/db-picture3.png
new file mode 100644
index 000000000..2380789b3
Binary files /dev/null and b/tutorial/load/db-picture3.png differ
diff --git a/tutorial/load/db-picture4.png b/tutorial/load/db-picture4.png
new file mode 100644
index 000000000..64b0e89fb
Binary files /dev/null and b/tutorial/load/db-picture4.png differ
diff --git a/tutorial/load/db-picture5.png b/tutorial/load/db-picture5.png
new file mode 100644
index 000000000..d613cbe8a
Binary files /dev/null and b/tutorial/load/db-picture5.png differ
diff --git a/tutorial/load/db-picture6.png b/tutorial/load/db-picture6.png
new file mode 100644
index 000000000..ad23a520f
Binary files /dev/null and b/tutorial/load/db-picture6.png differ
diff --git a/tutorial/load/db-picture7.png b/tutorial/load/db-picture7.png
new file mode 100644
index 000000000..00ae84358
Binary files /dev/null and b/tutorial/load/db-picture7.png differ
diff --git a/tutorial/load/db-picture8.png b/tutorial/load/db-picture8.png
new file mode 100644
index 000000000..ac27dc44d
Binary files /dev/null and b/tutorial/load/db-picture8.png differ
diff --git a/tutorial/load/extractor-db-create.png b/tutorial/load/extractor-db-create.png
deleted file mode 100644
index c84278511..000000000
Binary files a/tutorial/load/extractor-db-create.png and /dev/null differ
diff --git a/tutorial/load/extractor-db-credentials.png b/tutorial/load/extractor-db-credentials.png
deleted file mode 100644
index c059a3dc7..000000000
Binary files a/tutorial/load/extractor-db-credentials.png and /dev/null differ
diff --git a/tutorial/load/extractor-db-index-2.png b/tutorial/load/extractor-db-index-2.png
deleted file mode 100644
index 8f9b7f957..000000000
Binary files a/tutorial/load/extractor-db-index-2.png and /dev/null differ
diff --git a/tutorial/load/extractor-db-new.png b/tutorial/load/extractor-db-new.png
deleted file mode 100644
index bd8c2ac8d..000000000
Binary files a/tutorial/load/extractor-db-new.png and /dev/null differ
diff --git a/tutorial/load/extractor-db-tableSelector.png b/tutorial/load/extractor-db-tableSelector.png
deleted file mode 100644
index 6ae7bfb79..000000000
Binary files a/tutorial/load/extractor-db-tableSelector.png and /dev/null differ
diff --git a/tutorial/load/extractor-db-tablesSelected.png b/tutorial/load/extractor-db-tablesSelected.png
deleted file mode 100644
index e081a18b0..000000000
Binary files a/tutorial/load/extractor-db-tablesSelected.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-authorize.png b/tutorial/load/extractor-google-drive-authorize.png
deleted file mode 100644
index cef8666d8..000000000
Binary files a/tutorial/load/extractor-google-drive-authorize.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-create.png b/tutorial/load/extractor-google-drive-create.png
deleted file mode 100644
index 42c7cef15..000000000
Binary files a/tutorial/load/extractor-google-drive-create.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-intro.png b/tutorial/load/extractor-google-drive-intro.png
deleted file mode 100644
index bba749c04..000000000
Binary files a/tutorial/load/extractor-google-drive-intro.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-result.png b/tutorial/load/extractor-google-drive-result.png
deleted file mode 100644
index 45e9e8206..000000000
Binary files a/tutorial/load/extractor-google-drive-result.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-select-2.png b/tutorial/load/extractor-google-drive-select-2.png
deleted file mode 100644
index 5487d0f55..000000000
Binary files a/tutorial/load/extractor-google-drive-select-2.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-select-sheets.png b/tutorial/load/extractor-google-drive-select-sheets.png
deleted file mode 100644
index 76782409d..000000000
Binary files a/tutorial/load/extractor-google-drive-select-sheets.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-select.png b/tutorial/load/extractor-google-drive-select.png
deleted file mode 100644
index 0362a6d85..000000000
Binary files a/tutorial/load/extractor-google-drive-select.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-selected.png b/tutorial/load/extractor-google-drive-selected.png
deleted file mode 100644
index 0fcb8a34d..000000000
Binary files a/tutorial/load/extractor-google-drive-selected.png and /dev/null differ
diff --git a/tutorial/load/extractor-google-drive-table-detail.png b/tutorial/load/extractor-google-drive-table-detail.png
deleted file mode 100644
index 9fb87e58e..000000000
Binary files a/tutorial/load/extractor-google-drive-table-detail.png and /dev/null differ
diff --git a/tutorial/load/extractor-intro-0.png b/tutorial/load/extractor-intro-0.png
deleted file mode 100644
index c7ba35183..000000000
Binary files a/tutorial/load/extractor-intro-0.png and /dev/null differ
diff --git a/tutorial/load/extractor-intro-1.png b/tutorial/load/extractor-intro-1.png
deleted file mode 100644
index 4f8fffccf..000000000
Binary files a/tutorial/load/extractor-intro-1.png and /dev/null differ
diff --git a/tutorial/load/extractor-intro-2.png b/tutorial/load/extractor-intro-2.png
deleted file mode 100644
index b3bea9757..000000000
Binary files a/tutorial/load/extractor-intro-2.png and /dev/null differ
diff --git a/tutorial/load/extractor-intro-3.png b/tutorial/load/extractor-intro-3.png
deleted file mode 100644
index 611a2127d..000000000
Binary files a/tutorial/load/extractor-intro-3.png and /dev/null differ
diff --git a/tutorial/load/extractor-intro.png b/tutorial/load/extractor-intro.png
deleted file mode 100644
index 4b6702325..000000000
Binary files a/tutorial/load/extractor-intro.png and /dev/null differ
diff --git a/tutorial/load/find-spreadsheet.png b/tutorial/load/find-spreadsheet.png
new file mode 100644
index 000000000..83f0e8f13
Binary files /dev/null and b/tutorial/load/find-spreadsheet.png differ
diff --git a/tutorial/load/google-sheets-create.png b/tutorial/load/google-sheets-create.png
new file mode 100644
index 000000000..df395a3d0
Binary files /dev/null and b/tutorial/load/google-sheets-create.png differ
diff --git a/tutorial/load/google-drive-spreadsheet.png b/tutorial/load/google-sheets-spreadsheet.png
similarity index 100%
rename from tutorial/load/google-drive-spreadsheet.png
rename to tutorial/load/google-sheets-spreadsheet.png
diff --git a/tutorial/load/googledrive.md b/tutorial/load/googledrive.md
deleted file mode 100644
index 1add61357..000000000
--- a/tutorial/load/googledrive.md
+++ /dev/null
@@ -1,101 +0,0 @@
----
-title: Loading Data with Google Drive Extractor
-permalink: /tutorial/load/googledrive/
----
-
-In the [previous step](/tutorial/load/), you learned how to quickly load data into Keboola Connection
-using [manual import](/tutorial/load/).
-In real production projects, this is seldom used as most of the data is obtained automatically using *extractors*.
-In this part of the tutorial, you will use a Google Drive extractor to load data from an external data sheet.
-
-* TOC
-{:toc}
-
-Google Drive is a common method for sharing small reference tables between different organizations.
-For our purposes, create a Google spreadsheet from the [level.csv](/tutorial/level.csv) file.
-Let's pretend someone shared the *level* table with you through Google Drive.
-
-## Prepare
-Go to [Google Spreadsheets](https://www.google.com/sheets/about/) and *Start a new Blank Spreadsheet*. Then go to
-*File* - *Open* and Upload the [level.csv](/tutorial/level.csv) file.
-
-{: .image-popup}
-
-
-## Configure Google Drive Extractor
-
-Go to **Components -- Extractors** in Keboola Connection and click the **Add New Extractor** button:
-
-{: .image-popup}
-
-
-Use the search box to find the *Google Drive* extractor. Once you find it, click on it.
-
-{: .image-popup}
-
-
-Each Keboola Connection extractor can have multiple [*configurations*](/components/). This concept allows you to extract data from, for example,
-multiple Google accounts. So far, there are no configurations of the Google Drive Extractor.
-
-Click on **New Configuration** and name the new configuration *User Levels*; the file we
-want to extract contains the seniority level of each user.
-
-{: .image-popup}
-
-
-Then authorize the extractor to access the spreadsheet by clicking the **Authorize Account** button.
-
-{: .image-popup}
-
-
-There are two basic [authorization](/components/#authorization) options: *Instant Authorization* and *External Authorization*. The latter is
-useful when someone wants to share their document with you without sharing their account directly.
-Use *Instant Authorization* now.
-
-{: .image-popup}
-
-
-On the following screen, click **Allow**.
-
-{: .image-popup}
-
-
-Now you want to select the Google Drive files to import.
-
-{: .image-popup}
-
-
-First, you need to select a spreadsheet.
-
-{: .image-popup}
-
-
-Find and select your spreadsheet document named *level*.
-
-{: .image-popup}
-
-
-Then select the individual sheet. Our 'level' document contains only one 'level' sheet, so select that one.
-It will appear on the right side of the screen as one of the *Selected sheets to be added to the project*.
-
-{: .image-popup}
-
-
-When you **Add Sheet**, you should obtain a result like the one below. Then click on the **Run Extraction** command on the right.
-This will create a background [job](/management/jobs/), extracting the selected sheet from the Google Drive document
-and loading it into Storage.
-
-When a job is running, a small orange circle appears under *Last runs*, along with RunId and other info on the job.
-Green is for success, red for failure. Click on the indicator, or the info next to it for more details.
-
-{: .image-popup}
-
-
-The extractor automatically creates an output bucket and a table; here it is
-`in.c-keboola-ex-google-drive-548902224.level-level`. Click on the name of the output table to check its contents.
-
-{: .image-popup}
-
-
-Continue with the [rest of the tutorial](/tutorial/manipulate/), or take a side step
-to configure a [database extractor](/tutorial/load/database/).
diff --git a/tutorial/load/googlesheets.md b/tutorial/load/googlesheets.md
new file mode 100644
index 000000000..da7baadd7
--- /dev/null
+++ b/tutorial/load/googlesheets.md
@@ -0,0 +1,88 @@
+---
+title: Loading Data from Google Sheets
+permalink: /tutorial/load/googlesheets/
+---
+
+In the [previous step](/tutorial/load/), you learned how to quickly load data into Keboola using [manual import](/tutorial/load/).
+However, in real production projects, this is seldom used as most data is obtained automatically using data source connectors.
+In this part of the tutorial, you will use a Google Sheets data source connector to load data from an external spreadsheet.
+
+* TOC
+{:toc}
+
+Google Drive is commonly used for sharing small reference tables between different organizations.
+For our purposes, create a Google spreadsheet from the [level.csv](/tutorial/level.csv) file.
+Imagine someone shared the *level* table with you through Google Drive.
+
+## Prepare
+Go to [Google Spreadsheets](https://www.google.com/sheets/about/) and start a new blank spreadsheet. Then go to
+*File* – *Import* and upload the [level.csv](/tutorial/level.csv) file.
+
+{: .image-popup}
+
+
+## Configure Google Sheets Data Source Connector
+1. Navigate to **Components** section in Keboola and click the **Add Component** button:
+
+ {: .image-popup}
+ 
+
+2. Utilize the search box to locate the *Google Sheets data source connector*. Once found, click on it.
+
+ {: .image-popup}
+ 
+
+3. Click **Connect To My Data**. The 'Use With Demo Data' option will extract datasets prepared by Keboola for your experimentation outside of this tutorial, and it can be found across all commonly used connectors.
+
+4. Enter a name and description and click **Create Configuration**.
+
+ {: .image-popup}
+ 
+
+ Each Keboola component (data source, data destination, or application) can support multiple [*configurations*](/components/).
+ This concept enables you to, for instance, extract data from multiple Google accounts.
+
+5. Authorize the connector to access the spreadsheet by clicking the **Sign in with Google** button.
+
+ {: .image-popup}
+ 
+
+6. On the following screen, click **Allow**.
+
+ {: .image-popup}
+ 
+
+7. Now you want to select the Google Drive files to import.
+
+ {: .image-popup}
+ 
+
+8. In step 5, you authorized Keboola to use your account to access the Drive. In this step, you will be asked to grant access specifically to spreadsheets.
+Click **'Select all'** and then proceed by clicking **'Continue'** on the following screen.
+
+ {: .image-popup}
+ 
+
+9. Use the search box to find your **Level** spreadsheet. Select it and click the **Select** button.
+
+ {: .image-popup}
+ 
+
+10. Keboola has automatically detected all sheets from within your spreadsheet and will now allow you to select the one you want to load.
+11. Select the sheet and click **Save and Run Configuration**. A job will be executed, and once completed, you will see a new table created.
+
+ {: .image-popup}
+ 
+
+12. The Google Sheets data source automatically generates an output bucket and table. Click on the name of the output table to check its contents,
+or navigate directly to the **Storage** section to explore the data.
+
+ {: .image-popup}
+ 
+
+## What’s Next
+Proceed to [Data Manipulation](/tutorial/manipulate/) for the next step in the tutorial.
+Alternatively, take another brief side step to explore loading data with the [Database data source connector](/tutorial/load/database/).
+
+## If You Need Help
+Feel free to reach out to our [support team](/management/support/) if there’s anything we can help with.
diff --git a/tutorial/load/manual.md b/tutorial/load/manual.md
index 5f292a4a7..52bdef374 100644
--- a/tutorial/load/manual.md
+++ b/tutorial/load/manual.md
@@ -1,87 +1,100 @@
---
-title: Loading Data
+title: "Part 1: Loading Data"
permalink: /tutorial/load/
---
-There are multiple ways to load data into Keboola Connection. When you are starting with a project or doing any kind of
-[POC](https://en.wikipedia.org/wiki/Proof_of_concept), it is usually fastest to **load data manually**.
-If everything goes well and the project goes to production, you will later switch to **automatic
-data loading** using [extractors](/components/extractors/); in our tutorial, you can load data with
-the [GoogleDrive](/tutorial/load/googledrive/) and [Database extractors](/tutorial/load/database/).
-## Manually Loading Data
-In this part of our tutorial, you will load four tables into Keboola Connection Storage.
-The tables represent business opportunities, their associated users and accounts.
-Additionally, company levels for each user are specified.
-For our tutorial, we have prepared the tables as CSV files:
+* TOC
+{:toc}
-- opportunity (business opportunities) --- [https://help.keboola.com/tutorial/opportunity.csv](/tutorial/opportunity.csv)
-- account (associated accounts) --- [https://help.keboola.com/tutorial/account.csv](/tutorial/account.csv)
-- user (associated users) --- [https://help.keboola.com/tutorial/user.csv](/tutorial/user.csv)
-- level (company levels) --- [https://help.keboola.com/tutorial/level.csv](/tutorial/level.csv)
+Keboola offers various methods to load data, providing flexibility to suit different project stages. When initiating a project or conducting a Proof of Concept
+(POC), the quickest approach is typically **manual data loading**. As the project advances to production, you may transition to **automatic data loading** using
+data sources connectors.
-Download the files to your computer (they are very small) and start loading data.
+In our tutorial, we demonstrate manual loading, and as you progress, we delve into automated data loading using connectors, specifically the Google Sheets
+and Database data source connectors.
-**Important**: All characters appearing in this data are fictitious.
-Any resemblance to real persons, living or dead or undead or unborn or otherwise semi-existent is purely coincidental.
+## Manual Data Loading
+### Get Ready
+In this section of the tutorial, you will load four tables into the Keboola Storage. These tables represent business opportunities, associated users and accounts, and specified company levels for each user. The tables are available as CSV files for download:
-To manually load data, go to the **Components -- Extractors** section and use the search box to find **CSV Import**:
+- [Opportunity (business opportunities)](/tutorial/opportunity.csv)
+- [Account (associated accounts)](/tutorial/account.csv)
+- [User (associated users)](/tutorial/user.csv)
+- [Level (company levels)](/tutorial/level.csv)
-{: .image-popup}
-
+Download these small files to your computer and proceed with loading the data.
-Click on the **CSV Import** tile to see more details. Here you can store predefined import configurations.
-Each Keboola Connection extractor can have multiple [*configurations*](/components/). This concept allows you to extract data from multiple sources
-of the same type. Each configuration of **CSV Import** will point to a different table and will allow you to reuse it quickly later.
+***Note:** All characters in this data are fictitious, and any resemblance to real persons, living, dead, undead, unborn, or otherwise semi-existent
+is purely coincidental.*
-{: .image-popup}
-
+### Steps to Follow
+1. Before proceeding, ensure you are logged into your Keboola project (refer to the tutorial [Prerequisites](/tutorial/#prerequisites)
+if you need to acquire a project).
-Click on **New Configuration** to continue.
+2. Navigate to the **Components** section and use the search box to find **CSV Import**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-You will be creating configuration for each table, so let's name the first one *Opportunity* and click on **Create Configuration**.
-Each created configuration is filled with defaults; for example, the *CSV format* and the *destination* name of the table in
-[KBC Storage](/storage/). For this tutorial, change only the *Destination* setting by clicking the pen icon.
+3. Click the**Add Component** button, then select **Connect To My Data**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Now change the *Destination* field to `in.c-csv-import.opportunity` and click **Save**.
+4. Enter a **name and description** for your configuration, and click **Create Configuration**.
+
+ {: .image-popup}
+ 
-{: .image-popup}
-
+ ***Note:** You can create multiple configurations for each connector, and maintaining clear naming conventions contributes to a clean and organized project.
+ Check our [best practices guide](/tutorial/onboarding/cheat-sheet/) for suggestions on this topic.*
-Now you can start uploading. Select the downloaded `opportunity.csv` file from your computer and hit the **Upload** button.
+ Adding a description is a beneficial practice for both you and your colleagues, aiding in understanding the purpose of your configuration."
-{: .image-popup}
-
+ In this tutorial, we will create four configurations for this data source connector, dedicating one configuration to each source CSV file.
-Once the upload is finished (you will get a notification), go back to the *CSV import* (you can use navigation on the top) and
-repeat the process for the other three tables (create a configuration, change the destination, upload the requested file).
+ Name the first configuration **[TUTORIAL] Opportunity**.
-{: .image-popup}
-
-
-That's it. You should now have four tables with sample data stored in your Keboola Connection project:
+5. In the **CSV File** section, click **Select file** and choose the [opportunity.csv](/tutorial/opportunity.csv) file you downloaded.
+
+ {: .image-popup}
+ 
+
+6. In the **Upload Settings** section, modify the *Destination* setting by clicking the **pen icon** next to the *Destination* name. Set the name of the table that will be created in your Keboola Storage to `in.c-csv-import.opportunity` and click **Save**.
+
+ {: .image-popup}
+ 
+
+7. Click **Upload**.
+
+ {: .image-popup}
+ 
+
+After the upload is complete, repeat the process for the remaining three tables—create a configuration, change the destination,
+and upload the respective file as requested.
+
+That's it. You should now have four tables containing sample data stored in your Keboola Storage:
- `in.c-csv-import.opportunity`
- `in.c-csv-import.account`
- `in.c-csv-import.user`
- `in.c-csv-import.level`
-To verify that you have loaded all the tables and to peek at the data, go to [**Storage**](/storage/).
-All database tables stored in your project are listed in the *Tables* tab.
-They are grouped together into *Buckets*, and the newly loaded tables can be found in the `in.c-csv-import` bucket.
-To see all tables in a bucket, click the bucket title.
-Click individual table names to see the table details, including *Data Sample*.
+To confirm the successful loading of all tables and review the data, navigate to the [Storage](/storage/) section. Data is organized into **buckets**,
+and each bucket can contain multiple **tables**.
+
+Expand each bucket to view its tables, and click a table name to access details, including the 'Data Sample' for that table.
+
+{: .image-popup}
+
{: .image-popup}
-
+
-You can now take
+## What’s Next
+Proceed to [Data Manipulation](/tutorial/manipulate/) for the next step in the tutorial. Alternatively, take a brief side step to explore
+[Loading Data with Google Sheets Data Source Connector](/tutorial/load/googlesheets/)
+and/or [Loading Data with Database Data Source Connector](/tutorial/load/database/).
-- the next step --- [Data Manipulation](/tutorial/manipulate/), or
-- a brief side step to [Loading data with GoogleDrive Extractor](/tutorial/load/googledrive/), or
-- a brief side step to [Loading data with Database Extractor](/tutorial/load/database/).
+## If You Need Help
+Feel free to reach out to our [support team](support@keboola.com) if there’s anything we can help with.
diff --git a/tutorial/load/picture1.png b/tutorial/load/picture1.png
new file mode 100644
index 000000000..d5d878b23
Binary files /dev/null and b/tutorial/load/picture1.png differ
diff --git a/tutorial/load/picture2.png b/tutorial/load/picture2.png
new file mode 100644
index 000000000..2fed81a4f
Binary files /dev/null and b/tutorial/load/picture2.png differ
diff --git a/tutorial/load/picture3.png b/tutorial/load/picture3.png
new file mode 100644
index 000000000..0e62bcab8
Binary files /dev/null and b/tutorial/load/picture3.png differ
diff --git a/tutorial/load/picture4.png b/tutorial/load/picture4.png
new file mode 100644
index 000000000..78a4253a7
Binary files /dev/null and b/tutorial/load/picture4.png differ
diff --git a/tutorial/load/picture5.png b/tutorial/load/picture5.png
new file mode 100644
index 000000000..f23931194
Binary files /dev/null and b/tutorial/load/picture5.png differ
diff --git a/tutorial/load/picture6.png b/tutorial/load/picture6.png
new file mode 100644
index 000000000..6b9235f79
Binary files /dev/null and b/tutorial/load/picture6.png differ
diff --git a/tutorial/load/picture7.png b/tutorial/load/picture7.png
new file mode 100644
index 000000000..ecb48c085
Binary files /dev/null and b/tutorial/load/picture7.png differ
diff --git a/tutorial/load/picture8.png b/tutorial/load/picture8.png
new file mode 100644
index 000000000..7d9c71882
Binary files /dev/null and b/tutorial/load/picture8.png differ
diff --git a/tutorial/load/save-and-run.png b/tutorial/load/save-and-run.png
new file mode 100644
index 000000000..76a568fb7
Binary files /dev/null and b/tutorial/load/save-and-run.png differ
diff --git a/tutorial/load/select-files.png b/tutorial/load/select-files.png
new file mode 100644
index 000000000..62bf1b0f6
Binary files /dev/null and b/tutorial/load/select-files.png differ
diff --git a/tutorial/load/sign-in-with-google.png b/tutorial/load/sign-in-with-google.png
new file mode 100644
index 000000000..dcdd0e81d
Binary files /dev/null and b/tutorial/load/sign-in-with-google.png differ
diff --git a/tutorial/load/source-intro-0.png b/tutorial/load/source-intro-0.png
new file mode 100644
index 000000000..25fad7e9e
Binary files /dev/null and b/tutorial/load/source-intro-0.png differ
diff --git a/tutorial/load/source-intro.png b/tutorial/load/source-intro.png
new file mode 100644
index 000000000..90d7d685e
Binary files /dev/null and b/tutorial/load/source-intro.png differ
diff --git a/tutorial/load/storage.png b/tutorial/load/storage.png
new file mode 100644
index 000000000..ac20a78a8
Binary files /dev/null and b/tutorial/load/storage.png differ
diff --git a/tutorial/manipulate/IM-add-tables.png b/tutorial/manipulate/IM-add-tables.png
new file mode 100644
index 000000000..935dd0669
Binary files /dev/null and b/tutorial/manipulate/IM-add-tables.png differ
diff --git a/tutorial/manipulate/create-transformation.png b/tutorial/manipulate/create-transformation.png
new file mode 100644
index 000000000..baf2902d5
Binary files /dev/null and b/tutorial/manipulate/create-transformation.png differ
diff --git a/tutorial/manipulate/index.md b/tutorial/manipulate/index.md
index 8abb1f8c5..75b7fef34 100644
--- a/tutorial/manipulate/index.md
+++ b/tutorial/manipulate/index.md
@@ -1,108 +1,95 @@
---
-title: Part 2 - Data Manipulation
+title: "Part 2: Data Manipulation"
permalink: /tutorial/manipulate/
---
-At this point, you already know how to quickly [load data into Keboola Connection](/tutorial/load/),
-and your [Storage](/storage/tables/) contains four new tables:
-*account*, *opportunity*, *level* and *user*.
-In this part of the tutorial, we will show you how to manipulate data in Storage using [Transformations](/transformations/).
-Let's create a denormalized table from the input tables and do some minor modifications to it.
+At this juncture, you're acquainted with the swift process of loading data into Keboola, resulting in four new tables in your Storage:
+`account, opportunity, level`, and `user`.
+
+In this segment of the tutorial, we'll guide you through data manipulation in [Storage](/storage/tables/) using [transformations](/transformations/).
+Our objective is to create a denormalized table from the input tables and make some minor modifications to it.
* TOC
{:toc}
## Creating Transformation
-To start, go to the Keboola Connection **Transformations** section:
+1. To start, navigate to the Keboola **Transformations** section.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Like [tables](/storage/tables/), [Transformations](/transformations/) are organized into *buckets*.
-Each transformation bucket can contain any number of individual transformations.
-It should represent a logical set (container) of operations you want to perform together.
-Before you start with transformations, create a bucket and call it *Opportunity*.
+2. Next, click the **Create Transformation** button and choose **Snowflake SQL Transformation** (or another SQL transformation, depending on your project's backend).
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Then click on the **New Transformation** button to create an individual transformation.
-Enter *Denormalize opportunities* as its *Name* and select **Snowflake** as its *Backend*.
-A backend is the engine running the transformation script; it is either a database server (Snowflake, Redshift)
-or a language interpreter (R, Python, Julia).
+3. Enter `Denormalize opportunities` as its name, and remember to provide a description. Transformations can be organized into folders;
+you can either add it to a folder during creation or move it to any folder later. Now, enter `Opportunity` as the folder name.
+
+ {: .image-popup}
+ 
-{: .image-popup}
-
-When you create a transformation, you need to set up
+Keboola transformations operate on a few fundamental building blocks. It's crucial to note that the transformation process occurs in a dedicated database schema,
+meaning queries are not executed directly against your Storage tables. Instead, the system clones selected tables into the dedicated transformation schema,
+executes queries, and finally unloads created/modified objects back to the Storage.
-1. [**Input Mapping**](/transformations/mappings/#input-mapping) — what tables will be used in your transformation;
-tables not mentioned in Input Mapping cannot be used in the transformation.
-2. [**Output Mapping**](/transformations/#output-mapping) — what tables will be written into Storage;
-tables not mentioned in Output Mapping will never be modified nor permanently stored (i.e. they are temporary).
-3. [**Transformation Script**](/tutorial/manipulate/#transformation-script) — SQL queries defining
-what will happen with the data; it takes the tables from Input Mapping, modifies them
-and produces the tables referenced in Output Mapping.
+1. [**Input Mapping**](/transformations/mappings/#input-mapping): This is where you specify the tables to be used in your transformation. In the default setup, tables not mentioned in Input Mapping cannot be used in the transformation.
+2. [**Output Mapping**](/transformations/#output-mapping): This section deals with tables created or modified within your transformation. Here, you specify the tables that will be written into Storage after the successful execution of the transformation. Tables not mentioned in Output Mapping will neither be modified nor permanently stored; they are considered temporary.
+3. [**Queries**](/tutorial/manipulate/#transformation-script): SQL queries define what will happen with the data. These queries take the tables from Input Mapping, modify them, and produce the tables referenced in Output Mapping. To enhance clarity, queries can be further organized into blocks.
-The concept of [**mapping**](/transformations/mappings) is an important safeguard
-when you are manipulating your data.
-Thanks to it, there is no way to modify the wrong tables by accident.
-The only tables which are modified by your transformation are those explicitly specified in Output Mapping.
+The mapping concept serves as a crucial safeguard when manipulating your data. It ensures that there is no accidental modification of the wrong tables. The only
+tables modified by your transformation are those explicitly specified in the Output Mapping. Additionally, this concept plays a vital role in maintaining a
+detailed data lineage across your project.
{: .image-popup}
-
+
### Input Mapping
-Let's start with setting Input Mapping by clicking the **New Input** button.
+Let’s start with setting Input Mapping by clicking the **New Input** button.
{: .image-popup}
-
+
+
+The *Source* field in the input mapping refers to Storage. Select `in.c-csv-import.account` as the source table. You can do a full text search in the select
+field; typing `acc` will give you the table as well. In the *Table name* field, the table name `account` is automatically filled for you. This is the name of the
+source table inside the transformation. Use the **Add Input** button to create the input mapping.
-The *Source* field in the input mapping refers to Storage. Select `in.c-csv-import.account` as the source table.
-You can do a full text search in the select field; typing `acc` will give you the table as well.
-In the *Destination* field, the table name `account` is automatically filled for you.
-This is the name of the source table inside the transformation. Use the **Create Input** button to create
-the input mapping.
+Please note that there are additional settings you can configure, such as the *Changed in Last* filter, which aids in incremental processing of large tables.
+However, for the purpose of our tutorial, we won't delve into those details. See additional information about [input mapping](/transformations/mappings/#input-mapping)
+(all available options, etc.).
-Add the remaining three tables: `opportunity`, `user` and `level`. If you loaded data using the
-[Database extractor](/tutorial/load/database/) or the [Google Drive extractor](/tutorial/load/googledrive/),
-feel free to use the tables created by them (e.g., `in.c-keboola-ex-db-snowflake-548904898.account` or
-`in.c-keboola-ex-google-drive-548902224.level-level`). In either case, make sure that the destinations
-are set to `account`, `opportunity`, `user`, and `level`.
-You will get the following configuration:
+Add the remaining three tables: `opportunity`, `user` and `level`. You can add multiple tables at once:
{: .image-popup}
-
+
-*See additional information about [Input Mapping](/transformations/mappings/#input-mapping)
-(all available options, etc.).*
+You will get to the following configuration:
+
+{: .image-popup}
+
### Output Mapping
-Continue with setting up Output Mapping by clicking on the **New Output** button.
+Continue with setting up output mapping by clicking on the **New Output** button.
{: .image-popup}
-
+
-Enter `opportunity_denorm` into the *Source* field in the output mapping;
-the *Source* field refers to the transformation. This table does not exist yet.
-We will create it in the transformation.
+Enter `opportunity_denorm` into the *Table name* field in the output mapping; the *Table name* field refers to the transformation. This table does not exist yet.
+We will create it in the transformation query.
-The *Destination* field refers to the name of the output table in Storage.
-It will be auto-generated to `out.c-opportunity.opportunity_denorm`, which is
-perfectly fine. It will create the `opportunity_denorm` table in the `opportunity` [bucket in the output stage](/storage/tables/)
-in Storage. Neither the table nor the bucket exist, but they will be created once the transformation runs.
+The *Destination* field refers to the name of the output table in Storage. It will be auto-generated to create the `opportunity_denorm` table
+in the `denormalize-opportunity` [bucket in the output stage](/storage/tables/) in Storage.
+Neither the table nor the bucket exist, but they will be created once the transformation runs.
-After you finish Output Mapping, you will see this:
+After you finish the output mapping, you will see this:
{: .image-popup}
-
-
-The size of the `opportunity_denorm` table shows as *N/A* because the table does not exist yet.
+
-*See additional information about [Output Mapping](/transformations/mappings/#output-mapping)
-(all available options, etc.).*
+See additional information about [output mapping](/transformations/mappings/#output-mapping) (all available options, etc.).
-### Transformation Script
+### Transformation Queries
To produce that table from the tables `account`, `opportunity` and `user`, write a transformation script.
To save you some time, we have already prepared the necessary SQL queries for you:
@@ -133,33 +120,43 @@ CREATE TABLE "opportunity_denorm" AS
JOIN "tmp_level" ON "user"."Name" = "tmp_level"."Name";
{% endhighlight %}
-{: .image-popup}
-
+Click the **New Code** button. Begin by entering a query name – input *Opportunity denorm*. Next, paste the queries into the editor, and then click **Save**.
-In the first query, we change the user level descriptions into something more clear.
+{: .image-popup}
+
-In the second query, we compute the quality level for each deal opportunity based on the estimated probability
-of closing the deal. Note that here we are excluding the system column "_timestamp"
-which appears in [cloned tables on Snowflake backend](/transformations/snowflake/#_timestamp-system-column).
+In the first query, we enhance user level descriptions for better clarity.
-In the third query, we denormalize all four tables into a single one.
-We have prepared the single table so that it will load nicely into Tableau.
+In the second query, we calculate the quality level for each deal opportunity based on the estimated probability of closing the deal.
-{: .image-popup}
-
+In the third query, we denormalize all tables into a single one.
## Running Transformation
-Save the queries and then click on **Run Transformation**. This will create a background job which will
-
+Click **Run Transformation**. This will create a background job which will
- get the specified tables from Storage,
-- put them in a transformation database,
-- execute the queries/script, and
+- load them in a transformation schema,
+- execute the queries, and
- store the result in Storage again.
-To see if the transformation job was successful, go to [**Jobs**](/management/jobs/), or click on the small
-**Transformations job has been scheduled** window that pops up after a transformation starts running.
+{: .image-popup}
+
+
+To see if the transformation job was successful, navigate to **Jobs**, or click on the small **Snowflake SQL job has been scheduled** window
+that pops up after a transformation starts running.
-Having learned to set up a transformation, you can now
+{: .image-popup}
+
+After a successful execution of the transformation you’ll see a new table created in your **Storage**.
+Please notice also the **Recently updated** by where you can see what component configuration recently updated that table.
+
+{: .image-popup}
+
+
+## What’s Next
+Having learned to set up a transformation, you can now
- continue with the next [Writing Data](/tutorial/write/) step, or
-- take a brief side step to [Using Sandbox](/tutorial/manipulate/sandbox/).
+- take a brief side step to [Using a Workspace](/tutorial/manipulate/workspace/) – while we've configured our transformation with pre-developed queries in this tutorial, in real-life scenarios, you'll typically want to interact with the data, develop, and test your queries. A workspace serves as your safe playground specifically designed for that purpose.
+
+## If You Need Help
+Feel free to reach out to our [support team](support@keboola.com) if there’s anything we can help with.
diff --git a/tutorial/manipulate/input-mapping1.png b/tutorial/manipulate/input-mapping1.png
new file mode 100644
index 000000000..26812c8fe
Binary files /dev/null and b/tutorial/manipulate/input-mapping1.png differ
diff --git a/tutorial/manipulate/input-mapping3.png b/tutorial/manipulate/input-mapping3.png
new file mode 100644
index 000000000..0c53f9bfc
Binary files /dev/null and b/tutorial/manipulate/input-mapping3.png differ
diff --git a/tutorial/manipulate/mapping.png b/tutorial/manipulate/mapping.png
new file mode 100644
index 000000000..847f8c647
Binary files /dev/null and b/tutorial/manipulate/mapping.png differ
diff --git a/tutorial/manipulate/name-transformation.png b/tutorial/manipulate/name-transformation.png
new file mode 100644
index 000000000..1cfc0e918
Binary files /dev/null and b/tutorial/manipulate/name-transformation.png differ
diff --git a/tutorial/manipulate/new-code.png b/tutorial/manipulate/new-code.png
new file mode 100644
index 000000000..bc1d2a015
Binary files /dev/null and b/tutorial/manipulate/new-code.png differ
diff --git a/tutorial/manipulate/output-mapping1.png b/tutorial/manipulate/output-mapping1.png
new file mode 100644
index 000000000..87b6bf49b
Binary files /dev/null and b/tutorial/manipulate/output-mapping1.png differ
diff --git a/tutorial/manipulate/output-mapping2.png b/tutorial/manipulate/output-mapping2.png
new file mode 100644
index 000000000..74bcb7c83
Binary files /dev/null and b/tutorial/manipulate/output-mapping2.png differ
diff --git a/tutorial/manipulate/run-transformation.png b/tutorial/manipulate/run-transformation.png
new file mode 100644
index 000000000..ebf6d5226
Binary files /dev/null and b/tutorial/manipulate/run-transformation.png differ
diff --git a/tutorial/manipulate/sandbox-intro.png b/tutorial/manipulate/sandbox-intro.png
deleted file mode 100644
index 36b716191..000000000
Binary files a/tutorial/manipulate/sandbox-intro.png and /dev/null differ
diff --git a/tutorial/manipulate/sandbox.md b/tutorial/manipulate/sandbox.md
deleted file mode 100644
index f22406ada..000000000
--- a/tutorial/manipulate/sandbox.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title: Using Sandbox
-permalink: /tutorial/manipulate/sandbox/
----
-
-An important part of [setting up a transformation](/tutorial/manipulate/) is
-the SQL (or R or Python) script itself.
-To make writing of these scripts easier for you, we provide the *Sandbox* functionality.
-
-As a separate database storage, Sandbox allows you to run arbitrary SQL scripts on the
-**copies** of your tables without affecting data in your Storage, or your transformations.
-Learn more about Sandbox [here](/transformations/sandbox/).
-
-Let's create a Sandbox from the Transformations page by clicking the **New Sandbox** link:
-
-{: .image-popup}
-
-
-You will be given credentials to your sandbox along with an option to what should be loaded.
-Use the default *Prepare Transformation* option, and click **Create Sandbox**. The data will be loaded to your sandbox in the background.
-
-{: .image-popup}
-
-
-The sandbox credentials do not change. Clicking on the **Create Sandbox** button later will give you the same sandbox.
-Copy the database credentials into your favorite SQL client,
-or, if you want to start right away, use the **Connect** button to connect to the database using a web client.
-
-{: .image-popup}
-
-
-Your sandbox might be deleted after 14 days of inactivity; make sure not to use it as a permanent data storage!
-
-When happy with the outcomes of your SQL queries, go back to [transformations](/tutorial/manipulate/)
-and paste the queries into the transformation script.
diff --git a/tutorial/manipulate/table-in-storage.png b/tutorial/manipulate/table-in-storage.png
new file mode 100644
index 000000000..3c37cb2fc
Binary files /dev/null and b/tutorial/manipulate/table-in-storage.png differ
diff --git a/tutorial/manipulate/transf-successful.png b/tutorial/manipulate/transf-successful.png
new file mode 100644
index 000000000..055d71861
Binary files /dev/null and b/tutorial/manipulate/transf-successful.png differ
diff --git a/tutorial/manipulate/transformation-input-end.png b/tutorial/manipulate/transformation-input-end.png
deleted file mode 100644
index db8de5a07..000000000
Binary files a/tutorial/manipulate/transformation-input-end.png and /dev/null differ
diff --git a/tutorial/manipulate/transformation-input.png b/tutorial/manipulate/transformation-input.png
deleted file mode 100644
index b79df3253..000000000
Binary files a/tutorial/manipulate/transformation-input.png and /dev/null differ
diff --git a/tutorial/manipulate/transformation-output-end.png b/tutorial/manipulate/transformation-output-end.png
deleted file mode 100644
index d549b6708..000000000
Binary files a/tutorial/manipulate/transformation-output-end.png and /dev/null differ
diff --git a/tutorial/manipulate/transformation-output.png b/tutorial/manipulate/transformation-output.png
deleted file mode 100644
index 26bbd741b..000000000
Binary files a/tutorial/manipulate/transformation-output.png and /dev/null differ
diff --git a/tutorial/manipulate/transformation-queries.png b/tutorial/manipulate/transformation-queries.png
deleted file mode 100644
index 2befa0f32..000000000
Binary files a/tutorial/manipulate/transformation-queries.png and /dev/null differ
diff --git a/tutorial/manipulate/transformations-create-bucket.png b/tutorial/manipulate/transformations-create-bucket.png
deleted file mode 100644
index 680325364..000000000
Binary files a/tutorial/manipulate/transformations-create-bucket.png and /dev/null differ
diff --git a/tutorial/manipulate/transformations-create-sandbox.png b/tutorial/manipulate/transformations-create-sandbox.png
deleted file mode 100644
index 7e9b50499..000000000
Binary files a/tutorial/manipulate/transformations-create-sandbox.png and /dev/null differ
diff --git a/tutorial/manipulate/transformations-create.png b/tutorial/manipulate/transformations-create.png
deleted file mode 100644
index dd48190d2..000000000
Binary files a/tutorial/manipulate/transformations-create.png and /dev/null differ
diff --git a/tutorial/manipulate/transformations-created.png b/tutorial/manipulate/transformations-created.png
deleted file mode 100644
index b30309767..000000000
Binary files a/tutorial/manipulate/transformations-created.png and /dev/null differ
diff --git a/tutorial/manipulate/transformations-intro-2.png b/tutorial/manipulate/transformations-intro-2.png
deleted file mode 100644
index b4de0f284..000000000
Binary files a/tutorial/manipulate/transformations-intro-2.png and /dev/null differ
diff --git a/tutorial/manipulate/transformations-intro-3.png b/tutorial/manipulate/transformations-intro-3.png
deleted file mode 100644
index 1779659a7..000000000
Binary files a/tutorial/manipulate/transformations-intro-3.png and /dev/null differ
diff --git a/tutorial/manipulate/workspace.md b/tutorial/manipulate/workspace.md
new file mode 100644
index 000000000..6a7c3d0b2
--- /dev/null
+++ b/tutorial/manipulate/workspace.md
@@ -0,0 +1,62 @@
+---
+title: Using a Workspace
+permalink: /tutorial/manipulate/workspace/
+---
+
+An integral aspect of creating a transformation is the development of the script itself.
+In Keboola, you can use SQL, Python, or R by default. To simplify the process of writing these scripts,
+we offer **workspaces** (see the full documentation of [workspaces](/transformations/workspace/)).
+Workspaces provide a secure development and analytical environment
+where you can interact with the data and develop your scripts with confidence.
+
+1. Navigate to **Workspaces** and click the **Create Workspace** button.
+
+ {: .image-popup}
+ 
+
+2. Select Snowflake SQL Workspace (or other SQL workspace depending on your project’s backend)
+
+ {: .image-popup}
+ 
+
+3. Enter a *Name* and a *Description*. Additionally, take note that you can grant access to the workspace, allowing other users to collaborate with you.
+Click **Create Workspace**.
+
+ {: .image-popup}
+ 
+
+4. A creation job will initiate, and your workspace will soon appear among the configurations.
+
+ {: .image-popup}
+ 
+
+5. Click the workspace name to access the details.
+
+ {: .image-popup}
+ 
+
+ At the outset, you'll need to configure the **table input mapping**, much like we did when setting up a transformation.
+ Subsequently, click **Load Data** to clone the datasets from Storage to your workspace. The data will be cloned to your workspace
+ in a state as of the moment of loading. To refresh the data in a Workspace, you need to click **Load Data** again.
+
+ If you wish to have read access to all data in your Storage without physically cloning it into the workspace,
+ check the *Grant read-only access to all storage data* option when creating a workspace. However, this is a feature we won't delve into in this tutorial.
+
+ {: .image-popup}
+ 
+
+6. Click **Connect**. You'll see the credentials you can use to connect to the workspace using any of your preferred IDEs. Alternatively,
+click the **Connect** button again to access the Web-based Snowflake SQL IDE (please note that this only applies if your project uses a Snowflake backend).
+
+ {: .image-popup}
+ 
+
+
+After completing the development of your queries, you can then copy and paste them into a transformation configuration,
+as we did in the [previous tutorial step])/tutorial/manipulate/).
+
+## What’s Next
+Proceed to [Writing Data](/tutorial/write/) for the next step in the tutorial.
+
+## If You Need Help
+Feel free to reach out to our [support team](support@keboola.com) if there’s anything we can help with.
diff --git a/tutorial/manipulate/workspaces1.png b/tutorial/manipulate/workspaces1.png
new file mode 100644
index 000000000..f26d1d943
Binary files /dev/null and b/tutorial/manipulate/workspaces1.png differ
diff --git a/tutorial/manipulate/workspaces2.png b/tutorial/manipulate/workspaces2.png
new file mode 100644
index 000000000..31d80a2e1
Binary files /dev/null and b/tutorial/manipulate/workspaces2.png differ
diff --git a/tutorial/manipulate/workspaces3.png b/tutorial/manipulate/workspaces3.png
new file mode 100644
index 000000000..a1bbab478
Binary files /dev/null and b/tutorial/manipulate/workspaces3.png differ
diff --git a/tutorial/manipulate/workspaces4.png b/tutorial/manipulate/workspaces4.png
new file mode 100644
index 000000000..27e389deb
Binary files /dev/null and b/tutorial/manipulate/workspaces4.png differ
diff --git a/tutorial/manipulate/workspaces5.png b/tutorial/manipulate/workspaces5.png
new file mode 100644
index 000000000..bdbc58b64
Binary files /dev/null and b/tutorial/manipulate/workspaces5.png differ
diff --git a/tutorial/manipulate/workspaces6.png b/tutorial/manipulate/workspaces6.png
new file mode 100644
index 000000000..74647dbb5
Binary files /dev/null and b/tutorial/manipulate/workspaces6.png differ
diff --git a/tutorial/manipulate/workspaces7.png b/tutorial/manipulate/workspaces7.png
new file mode 100644
index 000000000..51d2af795
Binary files /dev/null and b/tutorial/manipulate/workspaces7.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/bdm-ecommerce-marketing.png b/tutorial/onboarding/architecture-guide/bdm-guide/bdm-ecommerce-marketing.png
new file mode 100644
index 000000000..69757befd
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/bdm-ecommerce-marketing.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/bdm-subscription.png b/tutorial/onboarding/architecture-guide/bdm-guide/bdm-subscription.png
new file mode 100644
index 000000000..d94ab7112
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/bdm-subscription.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/bdm-survey.png b/tutorial/onboarding/architecture-guide/bdm-guide/bdm-survey.png
new file mode 100644
index 000000000..885300398
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/bdm-survey.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/crm-bdm.png b/tutorial/onboarding/architecture-guide/bdm-guide/crm-bdm.png
new file mode 100644
index 000000000..5bf73b27e
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/crm-bdm.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/desk.png b/tutorial/onboarding/architecture-guide/bdm-guide/desk.png
new file mode 100644
index 000000000..072b0c987
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/desk.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/index.md b/tutorial/onboarding/architecture-guide/bdm-guide/index.md
new file mode 100644
index 000000000..de8377a59
--- /dev/null
+++ b/tutorial/onboarding/architecture-guide/bdm-guide/index.md
@@ -0,0 +1,216 @@
+---
+title: Business Data Model Guide
+permalink: /tutorial/onboarding/architecture-guide/bdm-guide/
+---
+
+* TOC
+{:toc}
+
+Welcome to the Business Data Model Guide!
+
+This comprehensive guide is designed to provide you with a clear understanding of the Business Data Model (BDM)
+and guide you on how to effectively utilize it within your data management processes. Whether you're new to BDM or looking to enhance
+your existing practices, this guide aims to equip you with the knowledge and practical insights needed to leverage BDM to its full potential.
+
+## What Is a BDM?
+Well, first of all, let’s talk about what it isn’t. It’s not a database design, it’s not an ERD, it has nothing to do with database performance.
+While we use language that is eerily similar to that of Entity Relationship Diagram or Logical Data Model, BDM lives above them all, and is purely conceptual.
+
+BDM of a company doesn’t change because the technology changed. BDM changes because the business changes. We’ve now been using this methodology for over 5 years,
+and the BDM’s built way back then still stand, despite many technology and requirement overhauls along the way.
+
+Business Data Model is a method of describing a business in the language of data, independent on the underlying technology. It defines and describes “objects”,
+“properties” and “values” that are key to the business operation, providing language that simplifies any and all steps that follow – including decisions about
+technical implementations and integrations.
+
+## Why BDM, and What Happens Without It?
+The reason behind BDM can be summarized by the old adage “slow down to speed up”. Many times we have seen what happens when an eager analyst just “looks
+at the data”, sees what’s in there and builds reporting logic on top of it. That’s how data and logic silos are created, and being left to its devices,
+this approach creates a maze of isolated solutions that becomes incredibly unwieldy as the complexity grows.
+
+One can argue that BDM is nothing new – the terms of Logical Data Model or Conceptual Data Model have been used over the times, and consulting companies are
+selling their services of building those. While the concept is similar, the technique differs, making it available for any business.
+
+By keeping the process simple, we remove the excuse why not to do it. The temptations presented by self-serve BI tools – the just start working with data approach
+– is showing its shortcomings just a little bit later. We see multiple definitions of the same objects, disagreements on meaning of fairly simple concepts and
+words, business logic baked into code all over the place, and pieces of technology serving single “critical” processes that are nearly impossible to retire later.
+
+## Principles of Building a BDM
+### What You Need
+First of all, you need the right people. BDM is way too important to be left just to the techies. Bring an executive responsible for the area you’re describing,
+and ideally their boss, too. Having someone technically inclined from the company helps, mainly for buy-in during later stages. However, their presence is not
+as essential at the end of the day.
+
+Beyond that, it’s just a whiteboard, post-its (good to have 3 different colors), a few markers and you’re good to go.
+
+While we always digitize the model once it’s at least initially laid out, and it is obviously possible to do this completely digital and remote
+(Google Drawings are a good start despite its simplicity as multiple people can be working on it at once), we always find it more effective and
+engaging in person. Often the first workshop happens on-site, and we follow up with one or two rounds that can be remote,
+using the digital drawing to collaborate on.
+
+### Step 0 – Round Up
+In complex organisations and processes, the BDM gets complicated. An e-commerce operation that wants to cover everything from marketing to logistics and
+fulfillment in one go can easily end up with 90+ data objects. So, start with defining the area you are going to cover in the first round, and keep it contained.
+
+Let’s say we start with the actual e-commerce part of the mentioned example – that includes the customer, product, order, perhaps invoice etc.,
+but will not concern itself with campaigns, stores, delivery centers, inventory – those would be left for the next round, working off of the e-commerce base.
+
+How to select the starting point? Either use what is central to the business (the orders are a good example for the e-commerce use cases), or with the area
+of immediate pain and low-hanging-fruit opportunity to deliver value (often marketing and attribution, or inventory control & turns).
+
+It is inevitable that during the following phases the team will encroach on some of those “no-go” areas. Just call it out, put the post-it on the side,
+or mark it visibly as something that no more time is being spent on at this time.
+
+### Step 1 – Storm
+During this step we will create as many “objects” as we can think of. Keep in mind that whatever existing systems are in place or planned are completely
+irrelevant. Don’t limit yourself to “what we have data about today” etc. – work diligently to get your audience out of that mind set
+(sometimes difficult with a technical audience, but necessary nevertheless).
+
+Let’s stick to the e-commerce example. Everyone grabs a marker and a stack of post-its (use the SAME COLOR of post-its for everyone – may we suggest yellow?).
+Then we’ll just start talking about the business – it actually rolls quite simply. Someone says – there is “Order” and writes it down. Someone else says
+“Order has Lines”, on those are “Products”, they belong to “Brands” - and three post-its are used. Brainstorming rules apply, everything goes,
+there’s no editing or arguing, only adding.
+
+Use “Yes, and…” every time. The outcome of this stage is a LOT of post-its, go until no one can think of anything else – while of course staying
+within the boundaries laid out in Step 0. If you’re struggling to get going at this stage, following sample probing questions can help you get fired up:
+
+{: .image-popup}
+
+
+- What do you sell?
+- What “words” pop up in your mind about your business? [Own internal business lingo]
+- How do you categorize them?
+- Is there a business catalog?
+- Who are the key stakeholders?
+- What type of customers exist in the business?
+
+### Step 2 – Edit & Define
+Bring on the whiteboard. Hopefully, it’s clean by this point. Start putting things on. First, the location doesn’t matter, but with practice you will recognize
+patterns (and you’ll put “customer-like” objects to the left, “transaction-like” objects to the right, which will save time later. Every time when putting
+a new post-it on the board, go through these three steps:
+
+#### Group
+Does this post-it belong to some that is already there? (examples - “customer” versus “person” or “account”). Are they really all different things (then keep them separate) or different names for the same thing (then put them on top of each other).
+
+#### Classify & demote
+Is this an object (going to the left) or “transaction” (going to the right). The signs of transaction are usually presence of a date and/or time,
+and possibly a numerical value. Transaction is not an object, it is an interaction between a few (“order” is an object – it exists on its own.
+
+“Order receipt” is a transaction, an event, something that happened to/with the “order” object (we received it). Some objects will lose that status in this step.
+For example, if you have an “eye color” post-it because it came out of the Storming stage - is it really an object, or is it a property of one? Or “Price”,
+is that an object or a value?
+
+This is when the two other colors of the post-its become handy. Use one for “properties” and one for “key values”. When demoting an object to a property or value,
+just rewrite the name on a post-it of the appropriate color, discard the original, and place the new one on the board near the object it is related to – property
+on the left, value on the right of it.
+
+#### Rename & clarify
+Except for very simple use cases and very small teams, inevitably there will be different names/post-its for the same object, and disagreements
+on what a particular object actually is. Our favourite example is a “customer”, which most often suffers from both problems.
+
+We may have post-its saying “customer”, or “contact”, or “person”, or “subscriber”, or “e-mail”. They may or may not be the same thing.
+Can a “person” have more “e-mails”? Is “customer” only a “type” or a “status” of a person?. The nuances depend on the particular business and its habits
+and terminology, but chances are all this can (and should) be reduced into one or two objects and perhaps a property.
+
+The second issue is a definition. Who is a customer? Someone who is in our system? Someone who placed an order? Someone who paid us? Someone who took a delivery?
+Often, every department has its own understanding of some terms. This is the time to clarify and define.
+
+One of the major benefits of having completed a BDM exercise is that it helps to unify the language within a business and amongst the stakeholders.
+When we say “customer”, we mean “X”. Time to take notes to start building our dictionary.
+
+**Important:** It is imperative to confirm everything with the whole group at this stage. Do not make assumptions based on experiences or
+point of views – it is very easy to “steamroll” the participants and insert one’s own opinions rather than learning what the customer truly thinks.
+
+At the end of this step, we have clear groups and well defined objects on the board. Well, the reality is – very often we find out during this step
+(and any of the following one) that we forgot an object. Can we add it? Absolutely. Go ahead. More is better.
+
+### Step 3 – Organize
+{: .image-popup}
+
+
+OK, so at this point we have more or less logically arranged groups of post-its on the whiteboard. The goal of this step is to get from more or less logically
+arranged to true model – something highly arranged. Which really means just moving the groups around a bit and drawing arrows between them.
+Take a higher-altitude view, ignore the properties and values for a moment. How do the major objects relate to each other?
+
+#### Parent – child
+Most of the relationships will be those of Parent – Child, or in data relations speak, one-to-many A.K.A. 1:M. Different words, same thing.
+A customer has (or can have) multiple orders, but each order belongs just to one customer. Each shipment belongs to a particular order, but one order can be
+shipped in multiple packages. An order has multiple products on it. But each product… Wait a minute.
+
+#### Attribution
+You see what I did there? Obviously, order and product have M:N, or many-to-many, relationship. The same product will (we hope) be on multiple orders.
+How are the two linked then? There needs to be a table that ATTRIBUTES one to another. Yeah, I’m being obvious here, in this case it’s clearly the Order Line –
+which, besides other information (such as amount, price etc.) represents the link between Order and Product, being a child of both.
+
+However, attribution is not always that “easy”. Sometimes we know that objects relate to each other, but there isn’t a transaction or another object handy
+that would conveniently connect the two. In those cases, we’re free to make one up. Call it “attribution” for now, it may have no other properties apart from
+the fact of facilitation of a M:N relationship between two objects. An example may be a “product” and a “category”, or any kind of tagging (many objects
+can have the same tag / one object can have many tags).
+
+#### Transitivity
+Transitivity ensues when there are multiple paths from a child (or object on the right of the board) to a parent (or object somewhere more to the left
+of the board). Those are generally undesirable, signs of situations where an object actually represents multiple different entities. Drill in, find out why
+they’re popping up. The only situation when we (reluctantly) let them pass is when there is NO scenario in hell where these multiple paths could lead to different
+“parents”. That usually means that we have either self-reference, or hidden M:N problem.
+
+#### Missing links
+Sometimes we get stuck with a relationship that is 1:M, all good, but there’s just something missing. That is usually exactly what is happening - we’re missing
+an object. Either we forgot about it, or it was never defined in the first place; it is not uncommon for us to name such object and just see the “aha” moment in
+the faces of the customers, even though we’re the outsiders and they (think they) know their business inside out. The customer may say: “but we don’t have data
+for that!” – don’t let that derail you. If it exists, we can infer its existence, and sooner or later, a change will happen to the business that will provide the
+data, because it just makes sense.
+
+### Step 4 – Test
+{: .image-popup}
+
+
+Testing the BDM (congrats, you have the version 0.1 on the board by now) is purely language and mental exercise. Throw business questions at it,
+and answer them using the language on the board, while pointing to the objects you are using. What are the sales by product category? That’s a total of Price
+times Quantity on Order Lines that refer to Products that are Attributed to that Category. If you’re missing an object, something is wrong. If you’re not
+following an arrow (against the direction is permissible) at any given point, something is wrong. Question it and drill in.
+
+### Step 5 – Iterate
+OK, so we’re reasonably happy with it. Now the question - what is missing? Depending on the complexity, time of day, quality of coffee served etc., you may either
+get an enthusiastic response, or just an exhausted “looks good to me” answer. If the former, go at it, if the later, let people sleep on it and reconvene roughly
+a week later.
+
+## Examples & Templates
+### Using Templates
+What follows is a section of templates or patterns that have tendency to repeat themselves. After all, there are only so many ways how to describe invoice, or
+recurring revenue business. Avoid the temptation of just using them as they are, without going through the process. What makes a business unique is the way they
+uniquely think about their business, and forcing standard or template just means we’re reducing them to a clone, denying them (and ourselves) the chance of
+discovering something truly exciting. So use those sparingly, as an inspiration, as a secret weapon that helps speed up the process here and there.
+
+Knowing the examples doesn’t make you an expert - ability to create your own, during a conversation with the customer, does. The process and the customer’s
+involvement in it has a value of its own, EVEN IF YOU END UP WITH 1:1 IMAGE OF AN TEMPLATE. Because it’s not about creating the BDM, it’s about making the
+customer understand and buy into it.
+
+### Survey Data
+Most survey systems and their API suffer from the age of excel. The data coming out is not very useful in terms of structure and requires heavy transformation,
+even though the BDM is rather simple. It’s questions and answers, right? As such, it creates a great example of usefulness of BDM - we would design ourselves in
+the corner very quickly trying to work with the data as is.
+
+{: .image-popup}
+
+
+### Subscription (Recurring Revenue) Business
+Regardless of what is the service or widget, there is benefit in looking at the majority of businesses through the lens of “recurring revenue”.
+This basic template works in most subscription contexts (think Software as a Service, magazine subscription, gym membership and everything in between).
+
+{: .image-popup}
+
+
+### Customer Relationship Management
+That title looked better than “CRM”, but that’s what it means. Various CRM systems are notorious in how they take simple concepts and make the data very
+complicated. Again, that means trap and future headache if you try to take the data as-is, without a bit of thought given to it beforehand (looking at you,
+Tableau SFDC connector!)
+
+{: .image-popup}
+
+
+### e-Commerce – Marketing Side
+An e-Commerce operation is almost by definition a data-heavy affair. As mentioned in the text of this guide, we can carve out the orders, or the marketing,
+or the logistics side - each of them is a project of its own. This one is looking at the marketing side.
+
+{: .image-popup}
+
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/post-its-step3.png b/tutorial/onboarding/architecture-guide/bdm-guide/post-its-step3.png
new file mode 100644
index 000000000..1d904014e
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/post-its-step3.png differ
diff --git a/tutorial/onboarding/architecture-guide/bdm-guide/post-its-step4.png b/tutorial/onboarding/architecture-guide/bdm-guide/post-its-step4.png
new file mode 100644
index 000000000..3a2f5bba1
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/bdm-guide/post-its-step4.png differ
diff --git a/tutorial/onboarding/architecture-guide/horizontal-design.png b/tutorial/onboarding/architecture-guide/horizontal-design.png
new file mode 100644
index 000000000..ff6dad9c5
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/horizontal-design.png differ
diff --git a/tutorial/onboarding/architecture-guide/hybrid-design.png b/tutorial/onboarding/architecture-guide/hybrid-design.png
new file mode 100644
index 000000000..58e67a0c1
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/hybrid-design.png differ
diff --git a/tutorial/onboarding/architecture-guide/index.md b/tutorial/onboarding/architecture-guide/index.md
new file mode 100644
index 000000000..0c10425e6
--- /dev/null
+++ b/tutorial/onboarding/architecture-guide/index.md
@@ -0,0 +1,322 @@
+---
+title: Multi-Project Architecture Guide
+permalink: /tutorial/onboarding/architecture-guide/
+---
+
+* TOC
+{:toc}
+
+Welcome to the Multi-Project Architecture Guide!
+
+This guide is designed to assist you in strategically organizing your Keboola projects, offering insights into standard architectures
+that have proven successful for clients of varying sizes. By delving into this resource, you will gain a deeper understanding of the diverse project structures
+that align with different organizational needs and requirements. Whether you're a small team or a large enterprise, this guide aims to empower you
+with the knowledge needed to make informed decisions on how best to configure your Keboola projects for optimal efficiency and effectiveness.
+
+## Single Keboola Project
+The foundation of implementing and utilizing the **Keboola Platform** begins with the establishment of the initial **Keboola project**.
+
+Conceptually, a Keboola project serves as a logical construct, providing isolation for access rights, responsibilities, data pipelines, storage,
+and embedded processes. This segregation facilitates the distributed allocation of access and responsibilities among individuals and teams
+while maintaining comprehensive control over the data governance layer.
+
+By default, every **Keboola project** encompasses essential building blocks, including:
+1. **Storage**
+ - Relational databases such as Snowflake, Redshift, Synapse, Exasol, and others
+ - Object storage options like S3 or Azure Blob Storage
+2. **Components**
+ - Data sources: Connectors to pull data from various sources into the Keboola storage
+ - Data destinations: Facilitating reverse ETL to push processed data from Keboola to databases, services, or applications.
+ - Applications: Advanced components for tasks like data quality checks, natural language processing (NLP), and more.
+ - Data templates: Predefined packages of components for streamlined pipeline setup.
+3. **Transformations** (SQL, Python, R)
+ - Transformations give you the ability to transform your data into the desired format and structures for further usage and consumption.
+4. **Flows**
+ - Flows bring everything together. A flow is a set of tasks organized in a workflow that has an assigned schedule of execution.
+5. **Governance layer**
+ - Metadata, telemetry, identity management, access control, etc.
+
+{: .image-popup}
+
+
+Many organizations find a single Keboola project sufficient for an extended period, or even indefinitely. It offers all the tools
+integrated into an advanced, modern data stack, allowing users to integrate data sources, develop transformations, orchestrate pipelines,
+and use the project as an analytical environment. Direct interaction with data is possible through SQL, Python, R, or visualization tools
+like Tableau, PowerBI, ThoughtSpot, Qlik, and more.
+
+However, as an organization grows or matures, several reasons may arise for transitioning from using individual data tools to adopting
+a comprehensive data platform. This evolution is encapsulated in the **multi-project architecture (MPA)** within Keboola, signifying the strategic move to split
+the ecosystem into multiple projects. This shift accommodates the evolving needs of a maturing organization and transforms the Keboola Platform
+into a more sophisticated and scalable data solution.
+
+> *"What’s the difference? Proper data platform allows you to fully benefit from the modern approach of data mesh and data democratization.
+> Splitting responsibilities and artifacts into multiple isolated environments while keeping them well integrated and managed is
+> what’ll allow you to move forward with your business use-cases without creating the bottleneck which typically happens
+> as centralized BI teams get overloaded rather sooner than later."*
+
+## What Is MPA?
+Multi-project architecture is a strategic approach that involves the partitioning of data processing pipelines across distinct blocks represented
+by individual Keboola projects. In this framework, each project assumes a specific function, serving as a dedicated entity responsible for delivering a portion of
+the "data catalog." This catalog can then be utilized by any other project, allowing users to subscribe to it or catering to specific use cases within the
+organization.
+
+Essentially, MPA marks a transition from a single project, typically focused on Extract, Transform, Load (ETL) processes, to a comprehensive data platform.
+Beyond simplifying the cognitive complexity of the project, MPA enables the clear separation of responsibilities and access rights within each segment of the data
+ecosystem. It offers a systematic organization and management of data usage across the entire organization, supporting the transformation of the Keboola Platform
+from a tool-centric solution to a fully-fledged data platform.
+
+Key benefits of multi-project architecture include:
+
+1. **Separation of Responsibilities and Access Rights**
+Ideal for organizations with diverse use cases, users, and personas, MPA facilitates the distinct allocation of responsibilities
+and access rights based on functional requirements.
+2. **Functional Designation of Projects**
+Projects can be designated for specific functions such as stable data pipelines, experimental projects, consumption projects, etc.,
+providing clarity in their purpose.
+3. **Clear Separation of Logical Pipeline Steps**
+Each project delineates a logical step in the data pipeline, contributing to a clear and organized structure of data processing.
+4. **Team Assignment to Projects**
+Different teams can be assigned to different projects, promoting collaboration and specialization. For instance, the marketing team may operate
+on a dedicated marketing project.
+5. **Flexibility of the Infrastructure**
+ MPA allows for flexibility in configuring the infrastructure to match the evolving needs of the organization.
+6. **Readiness for Future Expansions**
+ The architectural design of MPA is conducive to future expansions, ensuring scalability and adaptability as the organization grows.
+7. **Mitigation of Unknowns in Architecture Design**
+MPA simplifies the process of adjusting the architecture to changing conditions or scope, offering agility in responding to evolving requirements.
+
+In summary, multi-project architecture is a strategic evolution that optimizes the Keboola Platform. It provides a structured and scalable framework
+to accommodate the complex and dynamic needs of organizations engaged in diverse data processing and analytics activities.
+
+### Determining the Need for Multi-Project Architecture (MPA)
+If you find yourself contemplating the implementation of multi-project architecture (MPA) within your Keboola Organization,
+the following set of questions can serve as a valuable guide to assess whether the transition to multiple isolated projects aligns
+with the specific dynamics and needs of your organization:
+
+1. **Do you want to separate data extraction?**
+Consider dedicating a separate project to data acquisition (e.g., L0 or Staging project) to streamline the setup of connectors and scheduling
+without encumbering data processing. This separation facilitates collaboration among engineers, data source owners, analysts, and other team members.
+2. **Do you have many data sources of various types?**
+Evaluate whether distinct groups of data sources (e.g., marketing, CRM, sales, accounting) could benefit from processing in separate projects,
+thereby maintaining clarity in responsibilities and access permissions.
+3. **Do you have multiple teams/branches/countries with specific needs?**
+Assess whether different teams, branches, or countries possess unique data sources, use cases, and requirements,
+making the case for dedicated isolated projects to cater to specific needs.
+4. **Do you have a team of specialists and data consumers?**
+Explore whether a team of specialists (e.g., BI team, data analysts) can manage core data preparation in one project,
+while data consumers access selected data assets via their dedicated projects, facilitating a balance between specialization and accessibility.
+5. **Do you work with external vendors?**
+If external vendors require managed access to specific data subsets, consider creating a separate project for them to ensure controlled access
+and comprehensive logging.
+6. **What does your organizational structure look like?**
+Align project split with existing company domains, following the Data Mesh philosophy, to embrace decentralized data ownership, self-service,
+and federated management of computing resources.
+7. **Is your team small and collaborative?**
+For small, collaborative teams working on multiple sources, consider minimizing the number of projects and perhaps only splitting the L0 layer
+by source/domain to reduce cognitive complexity.
+8. **How will you respond to failures?**
+Ensure that each project is isolated with a clear owner, facilitating swift responses in the event of failures or issues.
+9. **Are there differences between regions/countries?**
+Assess the level of complexity introduced by regional or country-specific differences, and evaluate whether maintaining the split on L1 layer and merging
+at L2 level provides a balanced approach.
+10. **Do you have sensitive data?**
+Consider the need to keep sensitive data separate for security reasons or to facilitate separate team management, and assess whether data masking is necessary
+at the L0 and L1 levels.
+
+By addressing these questions, you can gain valuable insights into whether multi-project architecture aligns with your organizational structure,
+data processing requirements, and collaboration dynamics, ensuring a strategic and effective implementation of Keboola projects.
+
+## How to Design the MPA
+Designing a multi-project architecture is a nuanced process, and each design is inherently unique, tailored to the specific environment, business needs,
+and organizational structure. While there is no one-size-fits-all solution, the following factors play a crucial role in determining the optimal architecture:
+
+1. **Nature of data/business:**
+Understand the characteristics of the data and business operations to identify related domains and objects.
+2. **Organizational structure:**
+Consider the internal organizational structure, including the roles and responsibilities of data teams within the company.
+3. **Data source variety and separation:**
+Evaluate the variety and separation of data sources to determine if specific projects are needed for different types of sources.
+4. **Security aspects:**
+Address security considerations, including geo-locations, data ownership, and compliance requirements.
+5. **Volume of data and complexity:**
+Assess the volume and complexity of data within particular domains to inform the architecture.
+
+{% include tip.html content="
+One precursor which should serve as a starting point is a defined **business data model (BDM)**.
+This exercise helps you to understand the business in terms of data and discover domains that are related to each other.
+
+Generally speaking, the objects defined in the BDM should be grouped closely within the MPA. For instance, all logic related to a Customer object
+(and any closely related objects) should be in one project. It's common for multiple sources to contribute to this object, possibly coming from
+multiple L0 level projects. However, there should always be a single point of consolidation for this object as soon as possible (e.g., in L1/2).
+
+See our [**BDM methodology guide**](/tutorial/onboarding/architecture-guide/bdm-guide/) for more information.
+"
+%}
+
+### Standard MPA designs
+This chapter describes the most typical MPA designs and demonstrates those in simple examples. In general we have defined two main strategies - vertical and horizontal - of splitting the projects that may be combined together into a so-called hybrid design.
+
+As you embark on the design process, keep in mind the dynamic and evolving nature of your organization. Regularly revisit and adjust the MPA design to accommodate changes in business priorities, data landscape, and organizational structure. The goal is to create an architecture that enhances collaboration, efficiency, and scalability across your data projects.
+
+{: .image-popup}
+
+Vertical and Horizontal Split Design
+
+#### Vertical split design
+The vertical split design strategy involves dedicating different pipeline steps to different teams, often aligning with the transition
+between core engineering/IT and business functions within an organization. This approach is commonly observed in enterprise environments,
+where IT teams design logical blocks of infrastructure, represented as projects in Keboola's terminology, vertically into L0 (data acquisition projects),
+L1, L2, and so on. Each layer corresponds to different data processing stages, from acquisition to transformation and blending, to data consumption projects.
+
+**Vertical split design is suitable when:**
+1. **Dedicated isolation isn't necessary**
+There's no need to dedicate isolated projects to individual data consumers or business users. Instead, the focus is on logically splitting the data processing into distinct layers.
+2. **Managed by BI/data/IT teams**
+BI, data, or IT teams manage the full data environment of the organization. In such cases, these teams are responsible for orchestrating data processing and making datasets available for consumption.
+3. **Centralized data access**
+Individual data consumers only access datasets provided by the data team or consume data via a visualization tool connected to a dedicated part of the underlying database.
+
+**Key considerations:**
+1. **Logical layering**
+The design is structured based on logical layers, allowing each team to focus on specific aspects of data processing without overlapping responsibilities.
+2. **Data team ownership**
+The data team has a central role in managing the full data environment and orchestrating data processing activities across different layers.
+3. **Data access control**
+Data consumers access datasets made available by the data team or utilize data via visualization tools connected to specific sections of the underlying database.
+
+**Benefits:**
+1. **Efficient collaboration**
+Teams can efficiently collaborate within their designated pipeline steps, leading to streamlined processes and enhanced efficiency.
+2. **Clear responsibilities**
+Responsibilities are clearly defined for each team based on the logical layers, avoiding confusion and promoting focused efforts.
+3. **Centralized management**
+Centralized data management by BI, data, or IT teams ensures a structured and organized approach to data processing.
+
+**Considerations for implementation:**
+1. **Team alignment**
+Ensure that team structures align with the logical layers of data processing, allowing each team to specialize in their designated stage.
+2. **Communication channels**
+Establish clear communication channels between teams to facilitate collaboration and information sharing across different stages of data processing.
+3. **Continuous evaluation**
+Regularly evaluate the effectiveness of the Vertical Split Design and make adjustments as needed based on evolving business needs and data requirements.
+
+***Note:** This design is particularly well-suited for environments where a centralized approach to data management and processing is effective,
+and individual data consumers primarily interact with curated datasets provided by the data team.*
+
+{: .image-popup}
+
+Vertical Split Design Example
+
+**L0 – Data acquisition**
+- All data extractions (from Salesforce, Zendesk, MySQL database, Google Analytics and Exponea)
+- Basic data quality checks - to make sure that extracted data is in expected shape and quality
+
+**L1 – Core**
+- The core layer of data preparation and processing - data from L0 are combined into a unified data model
+- Data cleaning
+- Data processing
+
+**L2 – Data-marts**
+- One or more L2 projects that serve for data-mart creation
+- Built datasets are consumed by business/other data consumers
+- Objects in this projects are made accessible from visualization/reporting tools
+
+**LX – Telemetry and governance**
+- Typically we would recommend to keep a separate project for consumption of Keboola’s telemetry data and other metadata that describe overall platform usage and serve for organization’s admins as a detailed monitoring
+
+#### Horizontal split design
+The Horizontal Split Design involves dividing data pipelines and infrastructure based on departments, circles, or other entities within an organization.
+This approach tailors data processing to specific use cases and allows individual entities to independently manage their entire data-related workflow.
+For example, a marketing data catalog can be exclusively maintained by the marketing department, leveraging their domain knowledge.
+
+**Horizontal split design is suitable when:**
+1. **Entities operate independently**
+Individual entities operate as clearly standalone units and are capable of independently handling the entire data-related workflow.
+2. **Use-case-driven infrastructure**
+Infrastructure is designed based on specific use cases, allowing each entity to manage data pipelines aligned with their unique requirements.
+
+**Key considerations:**
+1. **Departmental independence**
+Each department or entity operates independently, taking care of its entire data-related work, from data extractions to full data processing.
+2. **Use-case-driven**
+Infrastructure is driven by specific use cases, ensuring that each department's data needs are addressed within its designated project.
+
+**Horizontal split design example:**
+1. **Sales and CRM**
+ - Data extractions from Salesforce and part of MySQL database.
+ - Full data processing, including eventual data testing.
+ - Consumers access data directly in this project or via a visualization/reporting tool connected to this project.
+2. **Marketing**
+ - Data extractions from Google Analytics, Exponea, and part of MySQL database.
+ - Full data processing, including eventual data testing.
+ - Consumers access data directly in this project or via a visualization/reporting tool connected to this project.
+3. **Operations**
+ - Data extractions from Zendesk and part of MySQL database.
+ - Full data processing, including eventual data testing.
+ - Consumers access data directly in this project or via a visualization/reporting tool connected to this project.
+
+{: .image-popup}
+
+Horizontal Split Design Example
+
+**Benefits:**
+1. **Departmental autonomy**
+Each department or entity enjoys autonomy in managing its specific data-related processes, fostering independence and flexibility.
+2. **Use-case customization**
+Infrastructure is customized based on specific use cases, ensuring that each entity's data processing aligns with its unique requirements.
+3. **Domain knowledge utilization**
+Departments leverage their domain knowledge to curate and manage their respective data catalogs, optimizing for accuracy and relevance.
+
+**Considerations for implementation:**
+1. **Departmental collaboration**
+While entities operate independently, establish communication channels for collaboration and information sharing where needed.
+2. **Data governance**
+Implement data governance practices to ensure consistency and adherence to standards across different horizontal splits.
+3. **Scalability**
+Evaluate the scalability of each horizontal split to accommodate future growth and changing data processing needs within individual departments.
+
+***Note**: This design is effective when entities operate as self-contained units and can manage their entire data workflow independently,
+aligning well with specific departmental requirements and domain knowledge.*
+
+#### Hybrid split design
+The Hybrid Split Design is a comprehensive approach that combines both vertical and horizontal splits within the data environment.
+This design is often the most typical and practical solution for organizations. It acknowledges the need to vertically separate data extractions
+and integrations from processing and consumption. Simultaneously, it recognizes the necessity to horizontally split one or more of these vertical layers,
+dedicating isolated environments to individual business units, departments, or teams.
+
+**Hybrid split design is suitable when:**
+1. **Need for vertical and horizontal separation**
+There is a requirement to separate data extractions and integrations (vertical split) from processing and consumption, and simultaneously,
+there is a need to horizontally split these vertical layers to cater to specific business units, departments, or teams.
+2. **Balance of independence and collaboration**
+The organization seeks a balance between providing independence to different entities for their specific data workflows while fostering collaboration where needed.
+
+**Key considerations:**
+1. **Combination of vertical and horizontal splits**
+Integrates both vertical and horizontal splits to create a nuanced and adaptable data architecture.
+2. **Isolation for business units or teams**
+Allocates isolated environments to individual business units, departments, or teams based on their unique data processing needs.
+
+**Benefits:**
+1. **Balanced independence**
+Achieves a balance between providing independence to different entities for their specific data workflows (vertical split) and facilitating collaboration (horizontal split).
+2. **Customization for entities**
+Allows customization of data processing environments based on specific business unit or team requirements.
+
+**Considerations for implementation:**
+1. **Vertical split Clarity**
+Clearly define the vertical split, ensuring a clear separation between data extractions/integrations and processing/consumption layers.
+2. **Horizontal split alignment**
+Align horizontal splits with specific business units, departments, or teams, ensuring that each entity has an isolated environment.
+3. **Collaboration channels**
+Establish channels for collaboration and information sharing where needed, fostering effective communication between different entities.
+4. **Scalability and adaptability**
+Assess the scalability and adaptability of the hybrid split design to accommodate future growth, changes in data landscape, and evolving business needs.
+
+***Note:** The hybrid split design recognizes the complexity of organizational data requirements and strives to provide a flexible, adaptable,
+and balanced solution. It allows organizations to leverage the benefits of both vertical and horizontal splits, tailoring the data architecture
+to meet diverse and evolving needs.*
+
+{: .image-popup}
+
+Hybrid Split Design Example
diff --git a/tutorial/onboarding/architecture-guide/pic1.png b/tutorial/onboarding/architecture-guide/pic1.png
new file mode 100644
index 000000000..1c82b235c
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/pic1.png differ
diff --git a/tutorial/onboarding/architecture-guide/split-design.png b/tutorial/onboarding/architecture-guide/split-design.png
new file mode 100644
index 000000000..03cca1be4
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/split-design.png differ
diff --git a/tutorial/onboarding/architecture-guide/vertical-design.png b/tutorial/onboarding/architecture-guide/vertical-design.png
new file mode 100644
index 000000000..1b0073893
Binary files /dev/null and b/tutorial/onboarding/architecture-guide/vertical-design.png differ
diff --git a/tutorial/onboarding/cheat-sheet/index.md b/tutorial/onboarding/cheat-sheet/index.md
new file mode 100644
index 000000000..02a6f31fc
--- /dev/null
+++ b/tutorial/onboarding/cheat-sheet/index.md
@@ -0,0 +1,291 @@
+---
+title: "Cheat Sheet: Best Practices"
+permalink: /tutorial/onboarding/cheat-sheet/
+---
+
+In the diverse landscape of Keboola, the nature and design of your use-case may vary widely. Whether you are orchestrating the automation of your data pipeline
+processing through Flows or configuring components like data sources, data destinations, applications, or transformations, this document serves as a guide to
+essential best practices. Regardless of the intricacies of your project, these recommendations aim to optimize your workflow and ensure a smooth experience with
+the core building blocks of a standard use case in Keboola.
+
+* TOC
+{:toc}
+
+## Extracting Data from Sources
+### User Credentials
+When working with data source components in Keboola, proper authorization is crucial. This involves providing credentials and connection details for source
+databases or relevant tokens and API keys for extracting data from services. It is advisable to use technical user credentials created specifically for Keboola
+integrations, as using the credentials of a real person may present challenges related to permissions, potential changes or terminations, and password resets.
+
+### Accessibility of Data Sources
+Ensure that the data sources you intend to integrate are accessible from the Keboola platform. Internal databases running on on-premise servers or private clouds
+may not be accessible by default. In such cases, consider whitelisting Keboola's IP addresses, establishing an SSH tunnel (if supported by the Keboola component),
+or requesting Keboola to set up a VPN server. Collaboration with administrators or owners of the data source on your side, coupled with support from the Keboola
+team, will help address any accessibility issues.
+
+### What You're Extracting
+When integrating typical data sources such as MS SQL Server, PostgreSQL, MySQL, or services like Google Analytics and Facebook Ads, resist the temptation to
+extract everything without evaluating necessity. This approach can lead to unnecessary credit spending. It is recommended to initially extract a limited batch of
+data to verify its relevance before proceeding with a full replication of the entire data history from the source.
+
+### Incremental Fetching and Loading
+**Incremental fetching:** Keboola's ability to read data from the source in increments, either through specific parameters or Keboola's incremental fetching
+options in database connectors, is beneficial for larger datasets. This setup is particularly useful when the full extraction time exceeds that of extracting
+increments only.
+
+**Incremental loading:** Concurrently, incremental loading involves incrementally loading the extracted data into Keboola Storage. Setting a primary key for the
+Keboola Storage table allows for efficient incremental loading, as it processes upsert operations—appending new records and updating existing ones based on
+matching primary key values. Incremental load without a primary key set would always perform an append operation.
+
+It's important to note that certain connectors may automatically implement both incremental fetching and loading without requiring manual setup by users.
+This information is usually highlighted in the configuration UI, providing users with transparency about the implemented behaviour.
+
+### Optimize with Parallelization
+To optimize the overall runtime of your pipeline, consider employing parallelization to execute multiple configurations simultaneously. It's essential to recognize that while parallelization can significantly reduce the total runtime, each individual job consumes credits independently. Therefore, parallelization is a tool for optimizing the execution timeline rather than cost.
+
+**Where to apply parallelization:**
+
+1. **Flow level:** Within a flow, tasks can be organized into phases for parallel execution.
+2. **Components:** For [row-based components](/components/#configuration-rows), you can set parallelization. Examples include database e extractors (data
+sources), where multiple tables share the same credentials. Configuration rows allow for parallel execution, and this can be configured in the
+component UI.
+
+{% include tip.html title="Execute Individual Configurations" content="
+Enhance your workflow efficiency by executing individual configurations separately in Keboola Flow automation. In advanced settings, edit parameters to
+selectively run specific rows, providing nuanced control over your data processing.
+
+Additionally, in the user interface, utilize the three-dotted menu next to each configuration row for a quick and convenient way to execute only the needed
+configurations. This feature streamlines your workflow, saving time and resources while maintaining precision in your data operations.
+" %}
+
+**Storage jobs vs. component jobs:**
+
+Most Keboola components interact with Keboola Storage during execution. Data source components first extract data from the source and then trigger
+a [Storage job](/storage/jobs/) to import the data into Keboola Storage. It's crucial to understand that there is no strict limit on parallel component jobs,
+but there is a limit on the maximum parallel Storage jobs in a project. In multi-tenant deployments, multiple component jobs might wait for available Storage job
+slots, potentially extending the overall runtime. The default limit for parallel Storage jobs is 10, but it can be increased through Keboola Support.
+
+## Developing a Transformation
+### Using a Workspace
+It's common for users to directly dive into the Transformations section of the UI to set up and test scripts. However, this approach may not be optimal. Executing a transformation component, whether it involves Python, R, or SQL transformations, always incurs some overhead from the component execution layered on top of the script execution. This can result in unnecessary credit consumption during code debugging.
+
+Our recommendation is to start by creating a Workspace for development purposes. Develop and test your code within the Workspace environment. Once your script is functioning correctly, you can then transfer it to a Transformation configuration and execute it, ensuring more efficient credit usage.
+
+### Input and Output Mapping
+Every transformation operates within its designated, temporary transformation workspace. When a Transformation is executed, it establishes this distinct
+workspace, which is isolated from the primary Keboola Storage. Consequently, within your code, you cannot directly access all Storage Objects; instead, you must
+load selected Storage objects into your transformation using an input mapping.
+
+Upon execution, a transformation initially processes the configured input mapping, loading the specified datasets into the transformation workspace.
+Subsequently, your script is executed. Towards the end of the transformation, it processes the configured output mapping. Only objects specified in the output
+mapping section will be loaded back into Storage upon completion of the transformation execution.
+
+Let’s consider a hypothetical scenario where your transformation script, developed in your workspace, looks like this:
+
+```
+SELECT
+ "Id" AS ORDER_ID,
+ "Date" AS DATE,
+ "Amount" AS AMOUNT
+FROM "MyInputTable";
+```
+
+To successfully execute this code in a Transformation, you would need to make three essential edits:
+
+1. **Adding** `MyInputTable` **to an input mapping:** Ensure that `MyInputTable` is included in the input mapping of your transformation.
+2. **Creating an object within your script for output mapping:** Include the following statement in your script to create an object that can be processed by output mapping:
+```
+CREATE TABLE MY_NEW_TABLE AS
+SELECT
+ "Id" AS ORDER_ID,
+ "Date" AS DATE,
+ "Amount" AS AMOUNT
+FROM "MyInputTable";
+```
+3. **Adding** `MY_NEW_TABLE` **to the output mapping:** Include `MY_NEW_TABLE` in the output mapping so that it is loaded into Storage after the transformation is executed.
+
+### Snowflake is Case Sensitive
+This is a frequent challenge for new users and is specific to projects utilizing a Snowflake Storage backend. When incorporating a table named “MyInputTable”
+into your input mapping, it is imperative to employ double quotes when referencing that table in your code.
+
+For instance, using `FROM MyInputTable` would be interpreted by Snowflake as `FROM MYINPUTTABLE`, resulting in a non-existent object and causing the script to
+fail. Therefore, it is essential to use `FROM "MyInputTable"` to ensure proper referencing in a Snowflake environment.
+
+### Incremental Processing
+Similar to leveraging incremental fetching and loading with data source components, you can optimize your transformation through incremental processing.
+
+**Increments in the input mapping:** You can configure your transformation to process increments of data each time it runs. This requires the input tables
+to be generated by a component (data source connector or another transformation) using incremental loading. This setup generates a concealed technical column
+named `_timestamp` in Keboola Storage, which is then utilized by the input mapping. Here, you can configure the `Data Changed in the Last` filter, automatically
+detecting records added/changed within the selected timeframe. Consequently, your Transformation Workspace operates solely on that increment, eliminating the need
+to address it in your script.
+
+Alternatively, if you need to identify your increment based on multiple conditions, you can allow the Transformation to clone the entire input table.
+Subsequently, you can define the processed increment using a WHERE condition in your SQL script or an equivalent logic in Python or R.
+
+**Increments in the output mapping:** This is analogous to the incremental loading setup of data source components. You can choose to implement incremental
+loading with or without a primary key, resulting in either upserting or appending data, respectively.
+
+### Using Variables
+Keboola transformations offer the convenience of Variables. A Variable is an element designed to store a value that can be repeatedly utilized in your
+transformation script. This becomes particularly handy when employing a filtering condition in various sections of your script. By setting it as a variable, you
+simplify the process of updating the value. If a change is needed, you can modify it in one place rather than across multiple instances. For more information on
+variables, refer to [this](/transformations/variables/).
+
+It's important to highlight that in more advanced setups, Variables can also be dynamically provided via API calls during the execution of components.
+
+### Shared Codes for Repeated Tasks
+Frequently, there are coding patterns or functions that need to be replicated across multiple Transformations. These could be recurring script segments that serve
+a specific purpose. To streamline this process, you can create what is known as [shared code](/transformations/variables/?ref=changelog.keboola.com#shared-code).
+Shared codes allow you to define and maintain these common script segments in one centralized location. Any modifications made to the shared code are
+automatically reflected in all transformations utilizing it. This ensures consistency and simplifies maintenance across your projects.
+
+### Optimizing Performance with Dynamic Backends
+For **Snowflake SQL transformations**, users have the flexibility to choose between Small (default), Medium, or Large Snowflake Warehouses. While larger warehouses
+generally offer improved performance, they also incur higher costs. The detailed impact on costs is available [here](https://help.keboola.com/management/project/limits/#project-power--time-credits).
+
+In Snowflake, the performance of larger warehouses tends to exhibit a linear increase, with a Transformation executed on Medium potentially being twice as fast as
+on Small. However, the actual impact depends on the specific queries. There is no universal rule for determining whether a larger warehouse will optimize time and
+costs, so the best approach is to experiment and measure the results.
+
+It's worth noting that executing on a larger backend can sometimes be significantly faster, resulting in a lower cost for the Transformation compared to running
+it on a Small backend.
+
+For **Python and R transformations**, the backend size primarily influences available memory, enabling the processing of larger datasets in your scripts. Users
+commonly opt for a larger backend when their Transformation fails due to memory constraints, rather than exploring larger backends to assess potential performance
+improvements.
+
+### Avoiding Using SELECT *
+Resist the temptation to use `SELECT *` in your queries. Opt for a more precise approach by explicitly listing all the columns you intend to select. When you
+employ `SELECT *`, you risk potential issues if new columns are added, existing ones are removed, or if there are any changes in column names.
+
+This practice not only enhances the safety of your queries but also improves readability. By specifying each column explicitly, you maintain clarity in your code,
+making it evident which columns you are selecting from the table. Even if you end up including all columns, the verbatim list ensures transparency and avoids
+unnecessary complications in your output data.
+
+{% include tip.html title="Leverage Development Branches" content="
+For all development tasks, whether adding new components, editing existing configurations, or refining transformations, maximize safety by making use of
+Development branches. This strategic approach ensures a secure and controlled environment for your development efforts. For detailed guidance on utilizing
+Development branches effectively, refer to the documentation [here](https://help.keboola.com/tutorial/branches/).
+This practice not only enhances the safety of your development processes but also provides a structured and organized approach to managing changes in your
+Keboola environment.
+" %}
+
+
+## Automating Your Flow
+### Workflow with Parallel Execution
+In your Flows, you can streamline processing by grouping multiple tasks within one step, also known as a phase. These tasks then run independently in parallel,
+enhancing overall efficiency. Subsequent steps (phases) will commence only after the completion of all tasks within the preceding step.
+
+### Continue on Failure
+Every individual task within your flow features a **Continue on Failure** setting. By default, this setting is *disabled*, meaning an error in any single task
+will halt the entire flow execution, resulting in an error status. Enabling **Continue on Failure** permits the flow to persist even if a single task encounters
+an issue.
+
+This is beneficial for tasks that may regularly fail due to specific conditions, with the expectation that they will be executed successfully in subsequent runs.
+Alternatively, it is suitable for independent tasks whose failure does not impact the rest of the flow. However, monitoring execution statuses becomes crucial to
+promptly address potential errors and implement necessary fixes.
+
+### Notifications for Monitoring
+For a seamless execution of your use-cases, staying informed about errors or unusual execution times in your flows is crucial. Configure **notifications**
+within your flow to receive timely updates. Teams often opt to configure a group mailbox for specific user groups, ensuring that all team members receive
+notifications regarding errors, warnings, or instances where the flow runs longer than the expected duration. This proactive approach enhances awareness and
+facilitates prompt responses to any issues that may arise.
+
+### Automating Flows
+**Date & Time Schedule:** The most common setup for automating flows involves scheduling them to run at specific time slots. In a multi-tenant stack, it's
+advisable to avoid peak time slots, such as midnight, to optimize resource availability. A simple adjustment, like scheduling your flow for 0:15 am, can
+positively impact execution, minimizing competition for resources within the multi-tenant environment.
+
+**Triggered Execution:** A triggered flow is configured to monitor specific tables in the Storage. Once there are updates in the selected Storage table, the flow
+is automatically executed. This setup is particularly useful in multi-project scenarios where one project's flow relies on processes in another project. By
+linking tables through a data catalog and scheduling a flow on trigger, dependencies between projects are efficiently managed.
+
+## Writing Data to a Destination
+While some practices overlap with those for extracting data from sources — specifically, [Proper User Credentials](#proper-user-credentials),
+[Accessibility of Your Data Sources](#accessibility-of-your-data-sources), and
+[Optimizing with Parallelization](#optimizing-with-parallelization) — additional considerations come into play when writing data from Keboola to a destination.
+
+### Verifying Adequate Permissions
+This builds upon the importance emphasized in the [Proper User Credentials](#proper-user-credentials) aspect discussed in the
+[Extracting Data from Sources](extracting-data-from-sources) chapter. It underscores the necessity of ensuring you possess the requisite permissions when
+attempting to write data to a destination. Frequently, specific privileges are essential for this task, and they may not be automatically granted to a broad
+spectrum of users within an organization. Insufficient permissions often manifest as errors when writing data to a destination. In such cases, Keboola Support is
+available to assist in identifying the specific permissions required for a particular component.
+
+### Who Gets Access to Data
+In the Keboola project, you have a precise understanding of who can access the integrated data. However, when writing data to a destination, whether it's a
+database, object storage, or an API/service, you are essentially extending access to those data to users who have privileges for that specific destination. It is
+crucial to be vigilant and ensure that you do not inadvertently share your data with unintended recipients.
+
+### Incremental Processing
+To optimize the efficiency of your data writing operations, consider incorporating incremental processing, a strategy analogous to that used in data extraction or
+transformation processes described earlier. This optimization is particularly beneficial in Keboola, where it enables the selective writing of data that has
+changed in Keboola Storage since the last successful execution, ensuring a more streamlined and resource-efficient process.
+
+For certain components, especially those designed for database data destinations, an additional advantage is the presence of an **Automatic Incremental Load**
+feature. This feature involves the component maintaining an internal state file within its configuration, recording the timestamp of its last successful
+execution. Keboola utilizes this information to identify and capture only the data that has been added or modified in Keboola Storage since the last execution. On
+the destination side, the component facilitates an upsert operation, intelligently inserting new data and updating existing records, rather than opting for a
+complete rewrite or simple append-only approach.
+
+In scenarios where certain APIs or services lack built-in mechanisms for efficient data updates, leveraging the incremental feature of the respective component
+becomes even more critical. Many data destination components share the input mapping logic with transformations, allowing the application of similar principles.
+Some components go a step further by incorporating sophisticated mechanisms, as mentioned earlier, to enhance the incremental processing capabilities.
+
+By adopting incremental processing, you not only optimize the performance of data writing operations but also ensure a more resource-efficient and intelligent
+handling of data updates, tailored to the specific requirements of the destination.
+
+### Caution Before Data Writing
+To be straightforward, it's crucial to thoroughly understand the implications of your actions. While Keboola offers a straightforward process for restoring data in case of accidental corruption, this may not hold true for the destination where you intend to write your data. The restoration of data in such destinations can be challenging, and in certain instances, it might even be impossible. Therefore, exercising heightened caution is strongly advised. Make sure you are well-informed and deliberate in your decisions when it comes to writing data, recognizing that the ease of recovery in Keboola may not necessarily extend to all destinations.
+
+## Job Log and Troubleshooting
+Whether you're a seasoned data engineer or just starting out, encountering errors during development is inevitable.
+Here are some tips for effectively troubleshooting errors.
+
+### Job Log
+The job log is a valuable resource providing insights into the execution of a job, including the entity or process that executed it, the timestamp
+of its execution, and the duration it took.
+
+**Mapping section:** The log incorporates a mapping section that delineates the tables involved in the process. In extraction operations, the output section
+enumerates all tables extracted from the source and loaded into Keboola Storage, essentially representing the job's output. In the context of transformations,
+both input and output sections are typically present, revealing the tables used in the Input and Output mappings of that transformation—clarifying the tables
+loaded and produced.
+
+When writing data to a destination using a data destination component, the input section lists the tables used for the writing operation. However, as the writing
+operation doesn't generate new tables in Keboola, the output section remains empty.
+
+**Log:** The job log further includes a detailed account of individual actions taken during the job execution. This initial section is particularly valuable for
+debugging, providing a chronological overview of actions performed. Identifying the step at which the processing failed can offer crucial insights into what to
+investigate during the debugging process.
+
+By delving into the job log, you gain a comprehensive understanding of the job's execution, aiding in the identification and resolution of errors encountered
+during development.
+
+### AI Error Explanation
+In your project settings, take advantage of the **AI Error Explanation** feature to enhance your error troubleshooting capabilities. This feature utilizes
+artificial intelligence to translate potentially complex error messages into a more user-friendly format. By enabling this feature, Keboola aims to provide
+helpful suggestions on what specific aspects to investigate as a user.
+
+### Internal or Application Errors
+If you encounter an error message indicating "Internal" or "Application Error," you should reach out to the Keboola support team. These errors typically signify
+unexpected issues occurring beneath the surface, and our support engineers will thoroughly examine detailed platform logs to assist you in resolving the problem.
+
+### Debug Job Feature
+Nearly every Keboola job involves interactions with Keboola Storage tables, whether it's loading data during extraction or reading (and producing) data during
+a transformation. Many components utilize the Keboola Storage API to exchange data in the form of files. For example, when extracting data using a data source
+component, the component builds a CSV file, providing it to Keboola Storage along with loading instructions.
+
+To aid in troubleshooting errors, users can activate the **Debug Mode** feature in their user settings (the User Features section). This unlocks a new UI element
+in component configurations, allowing the execution of a debug job. This feature is particularly useful when extracting data into Keboola and encountering errors.
+The **debug job** replicates standard processing steps but halts before loading data into Keboola Storage. This allows users to review each step without the risk
+of data corruption. The debug job generates a zip file containing all files produced during each processing step, accessible in **Keboola File Storage**. This
+enables users to delve into the files for further debugging. For instance, unexpected data encoding on the source or formatting issues leading to extraction
+failure can be identified by exploring these files, facilitating a more precise understanding of the problem and its resolution.
+
+{% include tip.html title="Unlock the /raw mode" content="
+Discover a powerful feature in Keboola: the ability to access and edit the raw JSON of every configuration, be it a component or transformation, using
+the Debug Mode. If you prefer a direct route, simply add /raw to the end of the URL address. This advanced capability proves invaluable for performing intricate
+settings, such as utilizing processors, offering a flexible and efficient way to tailor configurations to your specific needs. Explore the /raw mode to unlock a
+deeper level of control in your Keboola projects.
+" %}
diff --git a/tutorial/onboarding/governance-guide/index.md b/tutorial/onboarding/governance-guide/index.md
new file mode 100644
index 000000000..c1471d052
--- /dev/null
+++ b/tutorial/onboarding/governance-guide/index.md
@@ -0,0 +1,116 @@
+---
+title: Keboola Governance Guide
+permalink: /tutorial/onboarding/governance-guide/
+---
+
+Welcome to the Keboola Governance Guide! Governance, in the context of our platform, encompasses a spectrum of activities related to tracking, understanding usage, cost management, and maintaining adherence to security and other principles outlined in your [Keboola Platform Usage Blueprint](/tutorial/onboarding/usage-blueprint/).
+
+**If you have any questions or need further assistance, feel free to reach out to our support team.**
+
+* TOC
+{:toc}
+
+## Understanding Platform Activity
+### Leveraging Telemetry Data
+The Keboola platform captures metadata for every operation within its ecosystem. This encompasses the configuration of each component, user interactions,
+and the execution of every job. In the background, Keboola systematically processes the raw metadata, transforming it into a well-documented
+[telemetry data model](/components/extractors/other/telemetry-data). Accessing this telemetry data for your project is facilitated through a dedicated data source
+connector called Telemetry Data.
+
+For optimal governance, it is advisable to establish a distinct Admin project, wherein the Organization mode of the Telemetry Data connector can be
+employed. In instances where organizations operate with a limited number of projects, a straightforward approach involves integrating the Telemetry Data connector
+within one of the existing projects.
+
+**Enabling Organization mode in the Telemetry Data data source connector:**
+To activate the Organization mode within the Telemetry Data connector, kindly reach out to our Support team or directly contact your account manager.
+They will ensure that this specific option is enabled for your account, allowing you to leverage telemetry data in an organizational context.
+
+#### Activity center
+The activity center represents an advanced feature within the Telemetry Data connector, accessible to customers with the Activity Center add-on included in their
+contract. This mode offers a more comprehensive view, providing intricate details about individual components. It includes additional information such as
+**detailed metadata** for storage and other objects, user **activities**, and **specifics regarding job** inputs and outputs. This detailed insight is
+particularly valuable for robust data lineage tracking and advanced usage analysis.
+
+### Monitoring and Analysis
+For comprehensive oversight, basic metrics are readily available in the form of a project dashboard on the **Project Consumption** page.
+These fundamental metrics include:
+- Consumed credits
+- Number of executed jobs
+- Error job ratio
+- Active flows
+- Active component configurations
+- Active transformations
+
+Organizations managing multiple projects can utilize the **Organization Usage** page to monitor these metrics across all projects. This broader view encompasses:
+- Total credit consumption
+- Number of projects
+- Number of users
+- Size of data in Storage
+
+All metrics are accompanied by a comparison to contractual limits. Furthermore, the usage is graphically represented over time and categorized by individual
+component types, providing a comprehensive analysis.
+
+The platform also highlights the top consuming projects within your organization, along with insights into the most resource-intensive component configurations in
+terms of credit consumption. This detailed monitoring and analysis functionality offer valuable insights for efficient resource management and optimization.
+
+#### Custom metrics
+In addition to the foundational metrics mentioned earlier, organizations often create more detailed dashboards to visualize custom metrics tailored to their
+specific needs. Whether utilizing Tableau, PowerBI, Looker, or other tools, it is possible to establish a direct connection between these tools and the Keboola
+Storage backend, enabling the visualization of telemetry data.
+
+While certain metrics may require the development of additional SQL queries, many typical metrics can be easily derived from the Telemetry data itself.
+Some examples of these custom metrics include:
+1. **Detailed, multi-dimensional view of consumed credits over time**
+ - Per project, user, component, configuration, etc.
+2. **Detailed view of executed jobs and error rates**
+ - This helps identify users with a high rate of error jobs, indicating a potential need for further assistance or education.
+3. **Monitoring outliers**
+ - Keep track of weekly, monthly, or other periodic changes in the activity of individual projects, users, or specific configurations. This aids in identifying sudden increases in project activity or specific transformations that may require optimization.
+4. **Configuration health metrics**
+ - Monitor whether created flows have assigned schedules and notifications.
+ - Ensure that configurations have descriptions and adhere to naming conventions specified in the [**Platform Usage Blueprint**](/tutorial/onboarding/usage/blueprint/).
+5. **User 360**
+ - Verify whether users have multi-factor authentication (MFA) enabled.
+ - Identify the projects to which users have access.
+ - Track the credit consumption, executed jobs, error rates, number of created and updated configurations, and API tokens created by each user.
+
+By incorporating these custom metrics into your monitoring and analysis strategy, you can gain deeper insights into the performance, health, and compliance aspects of your data platform, facilitating informed decision-making and proactive management.
+
+## Best Practices for Data Management
+While the Keboola platform doesn't serve as a standalone data catalog or data lineage visualization tool, its strength lies in the rich metadata it automatically
+collects.
+
+Recognizing that the needs and requirements of individual organizations vary widely, the platform refrains from providing a one-size-fits-all solution in these
+areas. Instead, it offers seamless integration with a variety of third-party data catalog and lineage visualization tools, such as Informatica, Collibra, DataHub,
+Dawiso, Manta, and others.
+
+To optimize your data management practices:
+1. **Leverage rich metadata**
+ - Capitalize on the extensive metadata automatically generated by the Keboola platform. This metadata provides valuable insights into your data operations.
+2. **Integrate with third-party tools**
+ - Integrate Keboola seamlessly with third-party data catalog and lineage visualization tools that align with your organization's specific needs. This allows you to benefit from specialized solutions without compromising flexibility.
+3. **Explore diverse solutions**
+ - Given the diverse landscape of organizational needs, explore and select data catalog and lineage visualization tools that best suit your unique requirements. Solutions like Informatica, Collibra, DataHub, Dawiso, Manta, among others, offer specialized features to enhance your data management capabilities.
+4. **Maintain flexibility**
+ - Recognize that a single solution may not cater to every organization's requirements. By maintaining flexibility and integrating with external tools, you can adapt your data management strategy to align with evolving needs.
+5. **Facilitate interoperability**
+ - Ensure that the selected third-party tools seamlessly integrate with the Keboola platform. This interoperability promotes a cohesive and efficient data management ecosystem. You can always rely on help of Keboola Professional Services to help you identify the integration options.
+
+By following these best practices, you can enhance your data management capabilities, integrate specialized tools as needed, and ensure that your organization's
+unique requirements are met effectively.
+
+### Data Catalogs, Dictionaries, and Data Lineage
+For a comprehensive understanding of data lineage, we recommend exploring our blog post on the topic [here](https://www.keboola.com/blog/how-to-get-started-with-data-lineage).
+
+Keboola telemetry data provides detailed tables containing information about all available tables, including their columns with essential details such as
+descriptions, data types, and other pertinent information. This data can be transformed into a format compatible with your chosen data catalog tool and
+subsequently integrated into the tool either through a DB connector, an API endpoint, or by reading directly from Keboola's Storage backend via API.
+
+In addition to table information, separate tables house details about all component configurations, including data sources, transformations, and others, that
+either write data into or read data from these tables. This forms the foundational element of data lineage, enabling the construction of a comprehensive path from
+data sources to destinations.
+
+For those employing a data lineage visualization tool that supports the OpenLineage format, Keboola offers the OpenLineage data destination component, accessible
+[here](https://components.keboola.com/components/keboola.wr-openlineage). This component generates structured OpenLineage data from all jobs executed in Keboola,
+allowing you to directly write this data to your OpenLineage endpoint. This streamlined integration facilitates the incorporation of Keboola's data lineage
+information into your chosen visualization tool, enhancing your data management and analysis capabilities.
diff --git a/tutorial/onboarding/index.md b/tutorial/onboarding/index.md
new file mode 100644
index 000000000..1e57cab91
--- /dev/null
+++ b/tutorial/onboarding/index.md
@@ -0,0 +1,73 @@
+---
+title: Keboola Platform Onboarding
+permalink: /tutorial/onboarding/
+---
+
+* TOC
+{:toc}
+
+Welcome to the Keboola onboarding guide, your go-to resource for navigating the initial stages of your journey with our platform.
+Whether you've completed your proof of concept project or graduated beyond the [free tier](https://connection.north-europe.azure.keboola.com/wizard),
+this guide is designed to be your compass.
+
+If you're looking for a comprehensive overview of the Keboola platform, you can find it [here](https://help.keboola.com/overview/). Let's embark on this onboarding adventure together!
+
+## Build a Solid Foundation
+While it's exciting to start working on your project, we suggest beginning with careful planning.
+Establishing fundamental building blocks early on will be a sturdy foundation throughout your entire Keboola platform experience.
+Depending on the scale of your contract, some points may not be applicable—especially if you've opted for just one or two Keboola projects.
+Some of the following steps can be skipped in such situations:
+
+1. [Create a Keboola Platform Usage Blueprint](#create-a-keboola-platform-usage-blueprint)
+2. [Optional: Design a Multi-Project Architecture](#design-a-multi-project-architecture)
+3. [Business Data Model Methodology](#business-data-model-methodology)
+4. [Keboola Governance Details](#keboola-governance-details)
+
+### Create a Keboola Platform Usage Blueprint
+Utilize our [template](https://keboola.atlassian.net/wiki/spaces/KB/pages/3130458213) to design a blueprint tailored to your organization's needs.
+
+- Identify organization administrators.
+- Establish rules for project access requests, considering the need for an approval process.
+- Identify key contact points.
+- Define ground rules, including naming conventions and security principles.
+
+### Design a Multi-Project Architecture
+This step is fully **optional**.
+
+- Use our [MPA guide](https://keboola.atlassian.net/wiki/spaces/KB/pages/2523430919) for assistance.
+- Multi-project architecture involves dividing data processing pipelines among multiple blocks represented by individual Keboola projects.
+
+#### Business Data Model Methodology
+ - Explore Keboola’s Business Data Model (BDM) methodology, detailed in our [guide](https://keboola.atlassian.net/wiki/spaces/KB/pages/3139600519/Business+Data+Model+BDM+Guide).
+ - The BDM aligns seamlessly with multi-project architecture, ensuring flexibility and efficiency in accommodating changes in source systems.
+
+### Keboola Governance Details
+- Consult our [guide](https://keboola.atlassian.net/wiki/spaces/KB/pages/3138420748) for insights into Keboola governance.
+- Learn how to leverage Keboola telemetry data for monitoring platform usage, including cost monitoring.
+- Understand different levels of security-related monitoring, incorporating best practices.
+
+Embark on your Keboola journey with a well-thought-out plan, setting the stage for a successful and streamlined experience.
+
+## Get Your Hands On!
+It's time to roll up your sleeves and dive into the practical implementation! The hands-on experience with the platform is crucial,
+and we're here to guide you through it effectively. Recognizing that different user roles demand varying levels of understanding,
+our platform caters to analysts, data engineers, advanced analysts, and developers alike.
+
+- **Analysts:** Benefit from data ready for analysis in Keboola.
+- **Data Engineers:** Utilize all Keboola features for pipeline development and automation.
+- **Advanced Analysts:** Leverage Keboola's secure analytical workspaces for a range of exercises, from data analytics to developing machine learning models.
+- **Developers:** Explore extending the platform by creating new components or integrating Keboola APIs.
+
+Here are links to valuable resources tailored for your journey:
+- Start with a comprehensive [Keboola Introduction](https://academy.keboola.com/courses/introduction-2023)
+- Follow up with [General Best Practices](https://academy.keboola.com/courses/best-practices-2023)
+- Troubleshoot, debug and find support through our [Debugging Techniques lesson](https://academy.keboola.com/courses/debug-techniques)
+- For those interested in extending the platform by developing new components, check out this [short video](https://www.youtube.com/watch?v=IhET2hDD_1w) and [related documentation](https://developers.keboola.com/extend/). Learn the fundamentals of Keboola Components in respective [academy lessons here](https://academy.keboola.com/courses/common-components-and-processors).
+
+## Cheat Sheet: Embracing Best Practices
+Navigating the expansive landscape of the Keboola platform involves mastering numerous features and functionalities.
+While encapsulating all the best practices in a single document can be challenging, we've compiled a cheat sheet with key points to guide you
+through the process of configuring components, developing transformations, automating flows, and running jobs in Keboola.
+Access your cheat sheet [here](https://keboola.atlassian.net/wiki/spaces/KB/pages/3136978980/Cheat+Sheet+Embracing+Best+Practices).
+
+Now, let's get hands-on and make the most of your Keboola experience!
diff --git a/tutorial/onboarding/usage-blueprint/index.md b/tutorial/onboarding/usage-blueprint/index.md
new file mode 100644
index 000000000..51b8c9ffc
--- /dev/null
+++ b/tutorial/onboarding/usage-blueprint/index.md
@@ -0,0 +1,248 @@
+---
+title: Keboola Platform Usage Blueprint
+permalink: /tutorial/onboarding/usage-blueprint/
+---
+
+> Welcome to your personalized Keboola Platform Usage Blueprint Document!
+>
+> *This detailed guide helps you create your own documentation, explaining each part and what to include.
+> It's designed to fit your organization's specific needs and values. You'll find helpful notes throughout to make customization easy.
+> Once you've added your own details, you can remove these notes. Let's start creating documentation that matches your organization's unique requirements!*
+
+* TOC
+{:toc}
+
+## Getting Access to the Keboola Platform
+### Keboola Administration
+> *At the start, choose a few people (usually 2 to 4, depending on how big your organization is) to be your Keboola organization administrators.
+> They'll have the power to set up new Keboola projects and add the first users. It's best to keep this group small.*
+
+**Keboola organization admin** is a role with permissions to:
+- Leave and re-enter all existing projects in the organization.
+- View and edit billing details.
+- Manage [shared buckets](/catalog/#sharing-types).
+- Create [new projects](/management/organization/#manage-projects).
+- Change [organization settings](/management/organization/#organization-settings).
+- Allow [Keboola support team](/management/support/#require-approval-for-support-access) to join your projects.
+
+Our Keboola organization administrators are:
+- Name, [email@company.com](email@company.com)
+- Name, [email2@company.com](email2@company.com)
+- Name, [email3@company.com](email3@company.com)
+
+#### User requesting access to a project
+> *Usually, Keboola administrators keep a list of all current projects, project owners, and lead project engineers.
+> If you want access to a project, find the project leader on this list and ask them directly. They can use our public guide to add you to the project.*
+>
+> *Keep in mind, if you use a single-tenant Keboola deployment with customized identity and access management, such as Active Directory,
+> this process may not apply to you. In that case, describe your organization's own process.*
+
+To join an existing Keboola project, reach out to the **project owner** directly to invite you to the project. We’ve listed all our current projects below.
+
+| Project ID | Name | Description | Owner |
+|---|---|---|---|
+| 111 | [PROD] Marketing | Production project for marketing campaign automation | Jane Doe; jane@company.com |
+
+#### Member accessing a project
+> *Depending on your selection, you might be operating in a multi-tenant Azure (North Europe region), AWS (US or EU regions),
+> or GCP (Europe region) stack, or a dedicated single-tenant stack. The location of the stack determines the base URL that’ll take you to the platform’s UI.
+> Check below for the link that matches your stack:*
+>
+> - Azure North Europe: [https://connection.north-europe.azure.keboola.com/admin/](https://connection.north-europe.azure.keboola.com/admin/)
+> - AWS EU: [https://connection.eu-central-1.keboola.com/admin](https://connection.eu-central-1.keboola.com/admin)
+> - AWS US: [https://connection.keboola.com/admin](https://connection.keboola.com/admin)
+> - GCP EU: [TODO Please insert the link for GCP EU]
+> - Single tenant stack: All relevant information is available within your Production Design document.
+
+Navigate to the login site of the Keboola platform here: [https://connection.keboola.com/admin](https://connection.keboola.com/admin).
+After you log in, you'll see a list of projects you can access. Click on the selected project name to access the project environment.
+
+#### Requesting a new project
+If you wish to develop your own use cases in Keboola, reach out to one of the organization administrators mentioned above to create a project for you.
+
+> *To have a new project created, contact a Keboola administrator. Organization administrators are the only ones who can set up Keboola projects.*
+>
+> *The way to request a new project might vary based on the company size. In larger companies, using a form or questionnaire for project requests is common.
+> This helps give the administrators the extra information they need to create the project.*
+
+#### User termination
+Terminated users must be manually removed from all projects they are members of. Keboola organization administrators can use telemetry data
+(see the [**Keboola Governance Guide**](/tutorial/onboarding/governance-guide/) for more details) to determine which projects to remove them from.
+
+Or, project owners can remove the terminated user themselves.
+
+***Note:** Removing a user from a project will not affect any configurations they set up.
+All their configurations will remain usable and functional after the user is removed.*
+
+### Project Naming Conventions
+> *It is a good idea to establish and follow a convention for creating project names so they are clear and show what the project is about.
+> We suggest some of the typical conventions below. See the [Multi-Project Architecture Guide](/tutorial/onboarding/architecture-guide/)
+> for more on how to name different project levels, stages, etc.*
+>
+> *In the following example, we use numerical codes like 00, 10, and 20 to show project levels, but you can also use L0, L1, L2, or other styles.*
+
+All projects in our organizaation follow this naming convention:
+
+`[STAGE]{Domain - optional}[Region - optional] Project Name`
+
+In the **Project Name**, we capitalize the first letter of each word, except for conjunctions like "and" and "or."
+
+Here are examples of how we name projects:
+
+- `[10]{Sales}[EU] Financial Reporting`
+- `[00]{Sales} Corporate Rrofiling`
+- `[20]{HR}[GLOBAL] Compensation and Benefits Planning`
+
+## Keboola Project Rules and Principles
+### Managing Project Users
+> *As mentioned before, a project owner is mainly responsible for managing project users. Keboola has different roles for project users.*
+
+The project owner needs to ensure users are added with the right roles. Keboola user roles are documented [here](https://help.keboola.com/management/project/users/#user-roles).
+Remember, being a project owner is a formal role and doesn’t directly correspond to specific Keboola project roles and privileges.
+Usually, users are invited as project admins or with a sharing role unless they need a different one.
+
+To invite or remove a user from your project, follow the steps in the [Keboola documentation](https://help.keboola.com/management/project/users/#inviting-user).
+
+### Naming Conventions
+> *Using naming conventions for all Keboola components is recommended to keep your project well organized, comprehensible, and simpler to manage and navigate.
+> The guidelines below are just suggestions, as there is no universally recognized best practice for naming.*
+
+Make sure to apply the naming rules below to all configurations of Keboola components, including data source and data destination connectors,
+transformations, workspaces, flows, and any other Storage objects you create.
+
+#### Component configurations
+When naming a component (like a data connector or application), include its use case or category and, if relevant, its domain.
+Start each word in the configuration name with a capital letter.
+
+`{Domain}[USECASE - optional] Configuration Name`
+
+Examples:
+- `{Sales}[REPORTING] Payments and Invoices`
+- `{Operations}[PLANNING] Workloads and Plan`
+
+For components with configuration rows, like connectors loading data from a database, name each row after the specific table it connects to.
+The name should reflect the specific endpoint or domain if it's an API or service connector.
+
+#### Transformations
+When naming transformations or optional folders, use a format like `[USECASE - optional] Transformation Name` to keep them organized.
+
+Examples:
+- `[REPORTING] Payment Data Preprocessing`
+- `[REPORTING] Invoices Denormalization`
+
+If transformations are part of a bigger process and run in a set order, add a number to show the sequence:
+
+- `[REPORTING][00] Payment Data Preprocessing`
+- `[REPORTING][01] Invoices Denormalization`
+- `[REPORTING][02] Main Report Calculation`
+
+Transformations can also be grouped into transformation folders. Name such folders using the following format: `[USECASE] Transformation Folder Name`.
+
+Example:
+- `[REPORTING] Financial Reporting`
+
+#### Workspaces
+You can name private workspaces however you like. For shared workspaces, use `[USECASE]{Owner-optional} Workspace Name`.
+
+Example:
+- `[REPORTING]{Jane} ML Model Development`
+
+#### Flows
+Flow names need to state their purpose and use case. If flows depend on each other in a project, use the stage signs like in the Transformations section.
+You can add a domain if it’s not part of the project. The convention is `[USECASE]{Domain}[STAGE] Flow Name`.
+
+Examples:
+- `[REPORTING]{Sales} Main Reporting Calculations`
+- `[PLANNING][00] Data extractions`
+- `[PLANNING][01] Data normalization`
+
+Flows can also be grouped into flow folders. Name such folders using the following format: `{Group} Flow Folder Name`.
+The `{Group}` can be, for example, the `Domain` or `STAGE` or simply anything that helps to organize your flows.
+
+Example:
+- `{Sales} Financial Reporting`
+
+#### Storage
+Keep in mind that Storage bucket and table names are automatically created by Keboola data source connectors.
+The following naming conventions are for objects you create yourself.
+
+For Storage buckets, tables, and columns:
+1. Use uppercase `SNAKE_CASE` naming.
+2. Don't use `OUT` buckets for temporary tables. `OUT` buckets should only have data that's ready to use. See Keboola's documentation for more on IN and OUT Storage buckets [here](/storage/buckets/).
+3. Marke buckets from the [Data Catalog](/catalog/) as `SHARED`, like `SHARED_REPORTING_FINANCIAL`.
+4. Be specific with names. Instead of `MAIN`, use something descriptive like `SALES_METRICS`. Even in specific projects, use `SALES_MAIN` rather than just `MAIN`.
+5. Clearly separate IN/OUT Storage stages:
+ - IN stage: Incoming data, like raw data or shared buckets
+ - OUT stage: Processed data ready to be used elsewhere (in BI tools, Snowflake, other projects).
+6. Set “_PK” and “_ID” columns within each table to mark primary and foreign keys.
+
+
+
+
+
+ Important: Some data source connectors create Storage buckets you cannot rename.
+ Generally, don’t apply the same naming conventions on anything loaded into Keboola via a data source connector (e.g., a table, CSV file, and JSON).
+ Keep their original names and use these naming conventions only for the layers you add on top for consistency and simplicity.
+
+
+
+### Descriptions
+Every component configuration, transformation, or flow should briefly explain (in two or three sentences) its role in the data flow.
+Here are some tips:
+
+1. Add more details further into the description, like any implementation specifics or important points to note.
+2. Use Markdown to highlight important sections.
+3. Include links to related objects or other parts of the project, e.g., `Keep in sync with the [original transformation](https://connection.keboola.com/admin/projects/xx/transformations/bucket/1234/transformation/1)`.
+4. Consider mentioning the original owner of the configuration, even though this can be seen in the version history.
+
+### Sharing Data via Data Catalog
+> *This depends on your multi-project architecture. Here’s a guide for a system with multiple project layers.*
+
+Only users with `organization admin` or `share` role permissions can share buckets.
+
+As a rule, avoid sharing data from higher to lower stages/layers. Limit sharing within the same stage/layer unless necessary.
+
+**Sharing from level 00**
+- Share only with 10-level projects.
+- Share only `OUT` stage buckets. Share tables into the output buckets using aliases if no transformation is needed.
+
+**Sharing from 1+ stages**
+- Use `SHARED` as a prefix for shared output buckets.
+- Share buckets only from the `OUT` stage.
+
+**Linking buckets**
+- Buckets from other projects are always in the `IN` stage, prefixed with `SHARED` and a reference to the input project.
+
+### Using Development Branches
+> *[Development branches](/components/branches/) are separate areas for independently developing or modifying configurations for a specific purpose.
+> Changes made within a branch become accessible to other branches or projects only after they are merged into production.*
+>
+> *Some users implement changes in development branches but create new configurations directly in the production branch. Others might not use branches,
+> depending on their internal rules.*
+
+When creating or modifying configurations, use a development branch. Name it `{Owner}[USE-CASE] Branch Name` to show its purpose, like
+`{Jane}[FINANCIAL REPORTING] New MSSQL data integration`.
+
+Run components like data source connectors, applications, and transformations in the development branch first to test them.
+
+Be careful with data destination connectors, as they can still overwrite production data.
+
+## Security Principles
+> *Include your organization’s specific security principles here. The platform might not directly enforce some,
+> but Organization admins can monitor them using telemetry data.*
+>
+> *Here are some of the commonly followed principles:*
+
+- Only project owners invite users to their projects.
+- Every user must enable an MFA (multi-factor authentication).
+- All changes should be made in a development branch.
+- Don’t store sensitive information like passwords, tokens, or keys in plain text in configurations (e.g., don’t use a Python transformation for integration with an API that requires a token authentication).
+- Consult the project owner before using data destination components to write data out of projects.
+
+### Admin Project
+> *For organizations with multiple projects, creating a dedicated admin project is helpful. This is a central hub for administrators to manage
+> and analyze telemetry data. Find more details in the [Keboola Governance Guide](/tutorial/onboarding/governance-guide/).*
+
+Our admin project link: [https://connection.keboola.com/admin/projects/XXX](https://connection.keboola.com/admin/projects/)
+
+The project extracts the organization's telemetry data and prepares it for visualization and reporting.
diff --git a/tutorial/write/gooddata-dashboard-1.png b/tutorial/write/gooddata-dashboard-1.png
deleted file mode 100644
index f42cd0748..000000000
Binary files a/tutorial/write/gooddata-dashboard-1.png and /dev/null differ
diff --git a/tutorial/write/gooddata-dashboard-2.png b/tutorial/write/gooddata-dashboard-2.png
deleted file mode 100644
index dde200a8c..000000000
Binary files a/tutorial/write/gooddata-dashboard-2.png and /dev/null differ
diff --git a/tutorial/write/gooddata-dashboard-3.png b/tutorial/write/gooddata-dashboard-3.png
deleted file mode 100644
index 8aed749ca..000000000
Binary files a/tutorial/write/gooddata-dashboard-3.png and /dev/null differ
diff --git a/tutorial/write/gooddata-intro.png b/tutorial/write/gooddata-intro.png
deleted file mode 100644
index 5557b443b..000000000
Binary files a/tutorial/write/gooddata-intro.png and /dev/null differ
diff --git a/tutorial/write/gooddata-transformation-create-1.png b/tutorial/write/gooddata-transformation-create-1.png
deleted file mode 100644
index 5bc4860b0..000000000
Binary files a/tutorial/write/gooddata-transformation-create-1.png and /dev/null differ
diff --git a/tutorial/write/gooddata-transformation-create-2.png b/tutorial/write/gooddata-transformation-create-2.png
deleted file mode 100644
index 62cdb9787..000000000
Binary files a/tutorial/write/gooddata-transformation-create-2.png and /dev/null differ
diff --git a/tutorial/write/gooddata-transformation-intro.png b/tutorial/write/gooddata-transformation-intro.png
deleted file mode 100644
index 561af202c..000000000
Binary files a/tutorial/write/gooddata-transformation-intro.png and /dev/null differ
diff --git a/tutorial/write/gooddata-transformation-mapping.png b/tutorial/write/gooddata-transformation-mapping.png
deleted file mode 100644
index d76f25ac7..000000000
Binary files a/tutorial/write/gooddata-transformation-mapping.png and /dev/null differ
diff --git a/tutorial/write/gooddata-transformation-queries.png b/tutorial/write/gooddata-transformation-queries.png
deleted file mode 100644
index 153f613de..000000000
Binary files a/tutorial/write/gooddata-transformation-queries.png and /dev/null differ
diff --git a/tutorial/write/gooddata-transformation-run.png b/tutorial/write/gooddata-transformation-run.png
deleted file mode 100644
index 889f2dcce..000000000
Binary files a/tutorial/write/gooddata-transformation-run.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-add-table.png b/tutorial/write/gooddata-writer-add-table.png
deleted file mode 100644
index 2209329e6..000000000
Binary files a/tutorial/write/gooddata-writer-add-table.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-create-demo-project.png b/tutorial/write/gooddata-writer-create-demo-project.png
deleted file mode 100644
index 2640688df..000000000
Binary files a/tutorial/write/gooddata-writer-create-demo-project.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-date-button.png b/tutorial/write/gooddata-writer-date-button.png
deleted file mode 100644
index 208886ff2..000000000
Binary files a/tutorial/write/gooddata-writer-date-button.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-date-dimension.png b/tutorial/write/gooddata-writer-date-dimension.png
deleted file mode 100644
index dd8c0ec84..000000000
Binary files a/tutorial/write/gooddata-writer-date-dimension.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-date-list.png b/tutorial/write/gooddata-writer-date-list.png
deleted file mode 100644
index 199cd8de3..000000000
Binary files a/tutorial/write/gooddata-writer-date-list.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-1.png b/tutorial/write/gooddata-writer-intro-1.png
deleted file mode 100644
index 2e2b80bcc..000000000
Binary files a/tutorial/write/gooddata-writer-intro-1.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-3.png b/tutorial/write/gooddata-writer-intro-3.png
deleted file mode 100644
index 128413dbb..000000000
Binary files a/tutorial/write/gooddata-writer-intro-3.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-4.png b/tutorial/write/gooddata-writer-intro-4.png
deleted file mode 100644
index c0367e01b..000000000
Binary files a/tutorial/write/gooddata-writer-intro-4.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-5.png b/tutorial/write/gooddata-writer-intro-5.png
deleted file mode 100644
index 90a59c448..000000000
Binary files a/tutorial/write/gooddata-writer-intro-5.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-6.png b/tutorial/write/gooddata-writer-intro-6.png
deleted file mode 100644
index 568c4ade9..000000000
Binary files a/tutorial/write/gooddata-writer-intro-6.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-7.png b/tutorial/write/gooddata-writer-intro-7.png
deleted file mode 100644
index 2d3ca1dd5..000000000
Binary files a/tutorial/write/gooddata-writer-intro-7.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-intro-setup-project.png b/tutorial/write/gooddata-writer-intro-setup-project.png
deleted file mode 100644
index 5edde26a3..000000000
Binary files a/tutorial/write/gooddata-writer-intro-setup-project.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-new-table-button.png b/tutorial/write/gooddata-writer-new-table-button.png
deleted file mode 100644
index a479e40ec..000000000
Binary files a/tutorial/write/gooddata-writer-new-table-button.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-table-config-1.png b/tutorial/write/gooddata-writer-table-config-1.png
deleted file mode 100644
index ce35e1ce9..000000000
Binary files a/tutorial/write/gooddata-writer-table-config-1.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-table-config-2.png b/tutorial/write/gooddata-writer-table-config-2.png
deleted file mode 100644
index 70467cca2..000000000
Binary files a/tutorial/write/gooddata-writer-table-config-2.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-table-config-3.png b/tutorial/write/gooddata-writer-table-config-3.png
deleted file mode 100644
index f35f9da61..000000000
Binary files a/tutorial/write/gooddata-writer-table-config-3.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-table-config-3a.png b/tutorial/write/gooddata-writer-table-config-3a.png
deleted file mode 100644
index f334736cd..000000000
Binary files a/tutorial/write/gooddata-writer-table-config-3a.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-table-config-3b.png b/tutorial/write/gooddata-writer-table-config-3b.png
deleted file mode 100644
index 0e07148c1..000000000
Binary files a/tutorial/write/gooddata-writer-table-config-3b.png and /dev/null differ
diff --git a/tutorial/write/gooddata-writer-table-config.png b/tutorial/write/gooddata-writer-table-config.png
deleted file mode 100644
index a47a9e273..000000000
Binary files a/tutorial/write/gooddata-writer-table-config.png and /dev/null differ
diff --git a/tutorial/write/gooddata.md b/tutorial/write/gooddata.md
deleted file mode 100644
index 5a1de02b3..000000000
--- a/tutorial/write/gooddata.md
+++ /dev/null
@@ -1,248 +0,0 @@
----
-title: Writing to GoodData
-permalink: /tutorial/write/gooddata/
----
-
-* TOC
-{:toc}
-
-After [manipulating data in SQL](/tutorial/manipulate/) and [writing data to Tableau](/tutorial/write/),
-let's now write data to [GoodData](http://www.gooddata.com/).
-
-**You need to have a GoodData account** before you start.
-
-Writing data to GoodData is very similar to writing data to Tableau, although
-there are some changes due to the fundamental differences in both platforms.
-The GoodData platform uses the concept of
-[Logical Models](https://help.gooddata.com/doc/en/building-on-gooddata-platform/data-modeling-in-gooddata)
-where multiple tables are loaded into the platform together with their logical connection model (schema).
-Keboola Connection will assist you in creating the model.
-
-## Prepare Data for Writer
-
-To load individual tables instead of denormalized tables, the [transformation created earlier](/tutorial/manipulate/)
-has to be modified. Go to **Transformations** and create a new transformation bucket and a new transformation.
-For the sake of practicing, let's create a brand new transformation instead of modifying the existing one.
-
-{: .image-popup}
-
-
-Apart from creating a new transformation, we also need a new transformation bucket, since the
-Tableau and GoodData transformations are not really related. If they were more complex, we would take out the
-similar parts into another transformation. Name the new transformation bucket *Opportunity - GoodData* and
-choose the **Snowflake** backend.
-
-{: .image-popup}
-
-
-Then add a new transformation and name it.
-
-{: .image-popup}
-
-
-Now set the transformation input mapping. Include the following tables from the `in.c-csv-import` storage bucket:
-`opportunity`, `account`, `level`, and `user`.
-If you loaded data using the
-[Database extractor](/tutorial/load/database/) or [Google Drive extractor](/tutorial/load/googledrive/),
-feel free to use the tables created by them (e.g., `in.c-keboola-ex-db-snowflake-548904898.account` or `in.c-keboola-ex-google-drive-548902224.level-level`). In either case, make sure that the destinations
-are set to `account`, `opportunity`, `user` and `level`.
-Then create the output mapping for the `out_opportunity`, `out_user`, and `out_account` tables
-to be stored in the `out.c-opportunity-gooddata` output bucket.
-
-{: .image-popup}
-
-
-Use the following four SQL queries to create the output tables.
-
-{% highlight sql %}
-CREATE TABLE "tmp_level" AS
- SELECT "Name", CASE "Level"
- WHEN 'S' THEN 'Senior'
- WHEN 'M' THEN 'Intermediate'
- WHEN 'J' THEN 'Junior' END AS "Level"
- FROM "level";
-
-CREATE TABLE "out_opportunity" AS
- SELECT *, CASE
- WHEN "Probability" < 50 THEN 'Poor'
- WHEN "Probability" < 70 THEN 'Good'
- ELSE 'Excellent' END AS "ProbabilityClass"
- FROM "opportunity";
-
-CREATE TABLE "out_user" AS
- SELECT "user"."Name" AS "Name", "user"."Sales_Market" AS "UserSalesMarket",
- "user"."Global_Market" AS "UserGlobalMarket"
- FROM
- "user" JOIN "tmp_level" ON "user"."Name" = "tmp_level"."Name";
-
-CREATE TABLE "out_account" AS
- SELECT * FROM "account";
-{% endhighlight %}
-
-{: .image-popup}
-
-
-Run the transformation.
-
-{: .image-popup}
-
-
-This will create a background job which will
-
-- take the four tables in the transformation input mapping from Storage,
-- modify them with the SQL queries of the Transformation script, and
-- create three new tables in the `out.c-opportunity-gooddata` output bucket in Storage.
-
-To see if the transformation job has finished, go to **Jobs**, or click on the little **Transformations job has been scheduled** window
-that pops up after a transformation starts running. When finished, or while waiting for the job to end, continue configuring the GoodData writer.
-
-## Configure Writer
-Start by creating a new writer in the **Components -- Writers** section:
-
-{: .image-popup}
-
-
-The GoodData writer can have multiple configurations (as any other writer or extractor). Each configuration represents a set
-of data loaded into a single GoodData project. **New Configuration** to continue:
-
-{: .image-popup}
-
-
-And choose its name:
-
-{: .image-popup}
-
-
-**Set Up GoodData Project** to continue:
-
-{: .image-popup}
-
-
-Keboola Connection can create a free Demo GoodData project for you. However, it expires in one
-month from the date it was created.
-
-{: .image-popup}
-
-
-Let's create a [*Date Dimension*](https://help.gooddata.com/doc/en/reporting-and-dashboards/dates-and-times).
-
-{: .image-popup}
-
-
-Name the dimension `first_order`:
-
-{: .image-popup}
-
-
-Now let's configure the tables that are to be loaded to the project.
-
-{: .image-popup}
-
-
-Add the `account` table from the `out.c-opportunity-gooddata` bucket. When adding a table,
-simplify the table title to just the table name (we have only few tables).
-
-{: .image-popup}
-
-
-Configure the type of each column. Mark
-
-- the [primary key](https://en.wikipedia.org/wiki/Unique_key), identifier, as `CONNECTION_POINT`,
-- columns which we want to measure as `FACT`,
-- all other columns used for slicing and filtering as `ATTRIBUTE`, and
-- date/datetime columns, being an exception, as `DATE`.
-
-Do not set the *Data Type* column.
-
-Set the previously created date dimension `first_order` to the *FirstOrder* column.
-
-{: .image-popup}
-
-
-**Save** the column settings.
-
-{: .image-popup}
-
-
-Then go back to the writer configuration, and add the `out.c-opportunity-gooddata.user` table.
-
-{: .image-popup}
-
-
-Name the table *user* and set the *Name* column to `CONNECTION_POINT` and everything else to `ATTRIBUTE`.
-
-{: .image-popup}
-
-
-Save the table configuration, and go back to the writer configuration.
-
-Add four other date dimensions called `created_date`, `close_date`, `start_date` and `end_date`. In case of `created_date`, tick the **Include Time** checkbox when creating the date dimension. The result should look like this:
-
-{: .image-popup}
-
-
-Add the third table called
-`out.c-opportunity-gooddata.out_opportunity`. Name it *opportunity* and set the columns as follows:
-
-- *Amount* and *Probability* to `FACT`,
-- *AccountId* and *OwnerId* to `REFERENCE` and connect them to tables `account` and `user`,
-- *CreatedDate*, *CloseDate*, *Start_Date* and *End_Date* to `DATE` and connect them to the previously created date dimensions,
-- *Id* to `IGNORE` (we won't be needing it any more), and
-- everything else to `ATTRIBUTE`.
-
-You should obtain the following result:
-
-{: .image-popup}
-
-
-{: .image-popup}
-
-
-{: .image-popup}
-
-
-Save the table configuration, and go back to configuring the writer.
-
-Now click on **Run Component** to push the tables to GoodData:
-
-{: .image-popup}
-
-
-The tables will be written into GoodData by a background job. When a job is running, a small orange circle appears
-under *Last runs*, along with RunId and other info on the job. Green is for success, red for failure.
-Click on the indicator, or the info next to it for more details.
-
-In the meantime, click on **GoodData Project** to reveal other options and **Go To Project**.
-
-{: .image-popup}
-
-
-Then create your report:
-
-{: .image-popup}
-
-
-First, specify a metric. It can be computed from columns we marked as `FACT`
-when setting up the writer (those are *Amount* and *Probability*).
-Let's add a metric for `SUM` of *Amount*,
-
-{: .image-popup}
-
-
-Then specify how this metric should be sliced in the **How** section. Let's slice it by
-`ProbabilityClass`:
-
-{: .image-popup}
-
-
-Additional slices or filters can be added in the dashboard wizard. To close the wizard, click **Done** and the
-result will be shown to you as a table. To change the view, use the appropriate icon in the
-top right corner.
-
-{: .image-popup}
-
-
-This will give you the same chart we produced in the [Tableau Example](/tutorial/write/).
-
-The tutorial on writing data to GoodData BI platform using Keboola Connection ends here.
-Continue to [Setting up Automation](/tutorial/automate/).
diff --git a/tutorial/write/index.md b/tutorial/write/index.md
index 7ab50d220..f5c8cb8a7 100644
--- a/tutorial/write/index.md
+++ b/tutorial/write/index.md
@@ -1,149 +1,78 @@
---
-title: Part 3 - Writing Data
+title: "Part 3: Writing Data"
permalink: /tutorial/write/
---
* TOC
{:toc}
-This part of our tutorial will guide you through the process of writing data from Keboola Connection.
-You have learned to [manipulate data](/tutorial/manipulate/) in Keboola Connection using SQL,
-and have a denormalized table called `opportunity_denorm` ready in the `out.c-tutorial` storage bucket.
-This table is suitable to be loaded into [Tableau Analytics](https://www.tableau.com/).
+This section of our tutorial will walk you through the process of writing data from Keboola to a destination. This step is commonly referred to as reverse ETL.
+Having already learned how to [manipulate data](/tutorial/manipulate/) in Keboola using SQL, you now have a denormalized table called `opportunity_denorm`
+ready in Storage.
-## Getting Started
+In this tutorial, we will push this table to a **Google Sheets** destination. It's important to note that other typical destinations can include BI tools, databases, or even applications/APIs, such as CRM systems, and more.
-**Before you proceed, have [Tableau Desktop](https://www.tableau.com/products/desktop) installed**.
-If you want to try connection to Tableau Server, have credentials for that server as well.
-As an alternative, sign up for a free trial of [Tableau Online](https://www.tableau.com/products/cloud-bi) to test it out.
+1. Navigate to **Components**, click the **Add Component** button and use the search bar to find *Sheets*.
-Writing data from Keboola Connection into a business intelligence and analytics tool such as Tableau is very common.
-Writing data to GoodData BI is covered in the following [side step](/tutorial/write/gooddata/).
-However, keep in mind you can use the processed data in any way you wish.
+ {: .image-popup}
+ 
-There are three options how to load the `opportunity_denorm` table into Tableau:
+2. Click **Add Component** and then click **Connect To My Data**.
-- Writing data to a provisioned Snowflake/Redshift database
-- Generating a [Tableau Data Extract (TDE)](https://www.tableau.com/about/blog/2014/7/understanding-tableau-data-extracts-part1)
-and loading it manually into Tableau Desktop
-- Generating a TDE and loading it into Tableau Server, either manually or automatically
+ {: .image-popup}
+ 
-In either case, you need a writer component from the **Components -- Writers** menu.
+3. Enter a *Name* and *Description* and click **Create Configuration**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-In this tutorial, we'll go with the first option -- configuring the **Snowflake writer** as it is the easiest and fastest to use.
-The description of the [Tableau TDE writer](/components/writers/bi-tools/tableau/) is part of the [writers](/components/writers/)
-documentation. Click **Add New Writer**, find the Snowflake writer and click it.
+4. Now, we need to authorize the Google account to which we want to write the data.
+This process is similar to what we’ve done in the [Loading data from Google Sheets data source](/tutorial/load/googlesheets/) step of this tutorial.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Each writer can have multiple **configurations**. Each configuration represents a combination of data and destination.
-To give an example, you only need a single configuration to write multiple tables into a single Tableau server.
-However, two configurations are needed when you want to write data to two servers, or
-have a set of data loaded manually and a different set automatically.
-Continue with **New Configuration**.
+5. Enter a name for your connection and click **Sign in with Google**. You can also utilize *external authorization* if you need your colleagues
+to authorize their accounts. Please note that this authorization will only allow you to write data into a Google Spreadsheet.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-Name the configuration and click **Create Configuration**.
+6. Click **Allow**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-At this moment, you're probably wondering why we are using the Snowflake database and where and how you are going to
-get credentials to it. The answer is near -- click the **Set up credentials** button:
+7. Click **New Table** now to select the `opportunity_denorm` table from your Storage.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-As part of the Keboola Connection platform we offer a
-[dedicated database workspace](/components/writers/database/snowflake/#keboola-snowflake-database) that you can use to connect
-[external tools](/components/writers/database/snowflake/#using-keboola-provisioned-database). Simply click
-on **Keboola Snowflake Database**:
+8. Select the table and click **Next**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
-You will obtain a dedicated database and credentials to it. Use the lock icon to display
-the password if you wish. Go back to the Snowflake writer configuration.
+9. You can either create a **new spreadsheet** or load data to an **existing spreadsheet**. Click **New spreadsheet** now and then click **Next**.
-{: .image-popup}
-
+ {: .image-popup}
+ 
+
+10. Enter a name of your sheet, select **Update rows** and click **Save Sheet**. This will create a new empty spreadsheet under the authorized account.
-The next step is to add tables -- click the **Add Table** button.
+ {: .image-popup}
+ 
-{: .image-popup}
-
+11. To load the data into the created spreadsheet, click the **Run Component** button.
-Select the table `out.c-opportunity.opportunity_denorm` and click **Add Table**:
+12. After the job is executed, you can click the spreadsheet name to open the Google Drive spreadsheet (assuming you have access to the spreadsheet).
-{: .image-popup}
-
+ {: .image-popup}
+ 
-In the next step, you can specify properties of the columns in the target database, like `Name` and `Data Type`.
-Use the preview column to peek at the column data. Most columns in the `opportunity_denorm` table are strings (characters).
-Start with `Set All Columns to:` and select `string` to set them quickly.
-Then **Preview** the content of each column and set its type accordingly.
-For the purpose of this tutorial, it is enough to set the *Amount* column to the type `number`.
-Don't forget to **Save** the settings.
+## What’s Next
+Proceed to [Flow Automation](/tutorial/automate/) for the next step in the tutorial.
-{: .image-popup}
-
-
-When done, go back to the configuration and click on **Run Component** to write the
-data to the provisioned database.
-
-{: .image-popup}
-
-
-## Connecting with Tableau
-Now that you have prepared the data source, you can connect to it from Tableau. Login to Tableau online
-and **Create Workbook**:
-
-{: .image-popup}
-
-
-A connection to a datasource will be requested. Choose *Connectors* and *Snowflake*:
-
-{: .image-popup}
-
-
-Enter the credentials from the Snowflake writer configuration (you can always review them by
-clicking on the **Database Credentials** button in the right menu).
-
-{: .image-popup}
-
-
-Select *Warehouse*, *Database* and *Schema* -- there is only one option because the database is completely
-isolated and created just for the purpose of your writer configuration. If in doubt, however, you can
-always check the database credentials in the Snowflake writer configuration.
-You will see the *opportunity_denorm* table.
-
-{: .image-popup}
-
-
-You can now work with the data in Tableau.
-You can also check that the *amount* column was converted to numeric.
-
-{: .image-popup}
-
-
-Create charts and reports as usual, and publish them to other people.
-
-{: .image-popup}
-
-
-## Semi-final Note
-This concludes the main steps of the Keboola Connection tutorial. You have learned to load data into **Storage**,
-manipulate it using **Transformations**, and load it into the target system using **Writers**.
-
-At this point, you can
-
-- [return to the tutorial index](/tutorial/) for additional steps,
-- take a brief side-step on how to set up a [writer to GoodData BI](/tutorial/write/gooddata/),
-- continue to [Setting up Automation](/tutorial/automate/), or just
-- [talk to us](/).
+## If You Need Help
+Feel free to reach out to our [support team](/management/support/) if there’s anything we can help with.
diff --git a/tutorial/write/snowflake-columns.png b/tutorial/write/snowflake-columns.png
deleted file mode 100644
index acbf66bb4..000000000
Binary files a/tutorial/write/snowflake-columns.png and /dev/null differ
diff --git a/tutorial/write/snowflake-config.png b/tutorial/write/snowflake-config.png
deleted file mode 100644
index 385780018..000000000
Binary files a/tutorial/write/snowflake-config.png and /dev/null differ
diff --git a/tutorial/write/snowflake-create-config.png b/tutorial/write/snowflake-create-config.png
deleted file mode 100644
index 0a6b6e832..000000000
Binary files a/tutorial/write/snowflake-create-config.png and /dev/null differ
diff --git a/tutorial/write/snowflake-credentials-2.png b/tutorial/write/snowflake-credentials-2.png
deleted file mode 100644
index e15995c57..000000000
Binary files a/tutorial/write/snowflake-credentials-2.png and /dev/null differ
diff --git a/tutorial/write/snowflake-credentials.png b/tutorial/write/snowflake-credentials.png
deleted file mode 100644
index 6f56e6d73..000000000
Binary files a/tutorial/write/snowflake-credentials.png and /dev/null differ
diff --git a/tutorial/write/snowflake-intro.png b/tutorial/write/snowflake-intro.png
deleted file mode 100644
index c14e8ff0a..000000000
Binary files a/tutorial/write/snowflake-intro.png and /dev/null differ
diff --git a/tutorial/write/snowflake-run.png b/tutorial/write/snowflake-run.png
deleted file mode 100644
index e72cb11a2..000000000
Binary files a/tutorial/write/snowflake-run.png and /dev/null differ
diff --git a/tutorial/write/tableau-1.png b/tutorial/write/tableau-1.png
deleted file mode 100644
index 8fcd9db9a..000000000
Binary files a/tutorial/write/tableau-1.png and /dev/null differ
diff --git a/tutorial/write/tableau-2.png b/tutorial/write/tableau-2.png
deleted file mode 100644
index f057e8097..000000000
Binary files a/tutorial/write/tableau-2.png and /dev/null differ
diff --git a/tutorial/write/tableau-3.png b/tutorial/write/tableau-3.png
deleted file mode 100644
index dc3b8d6c1..000000000
Binary files a/tutorial/write/tableau-3.png and /dev/null differ
diff --git a/tutorial/write/tableau-4.png b/tutorial/write/tableau-4.png
deleted file mode 100644
index 9a1952fa6..000000000
Binary files a/tutorial/write/tableau-4.png and /dev/null differ
diff --git a/tutorial/write/tableau-5.png b/tutorial/write/tableau-5.png
deleted file mode 100644
index 45517f2af..000000000
Binary files a/tutorial/write/tableau-5.png and /dev/null differ
diff --git a/tutorial/write/tableau-6.png b/tutorial/write/tableau-6.png
deleted file mode 100644
index ac39ec028..000000000
Binary files a/tutorial/write/tableau-6.png and /dev/null differ
diff --git a/tutorial/write/tableau-select-table-2.png b/tutorial/write/tableau-select-table-2.png
deleted file mode 100644
index f10f282d8..000000000
Binary files a/tutorial/write/tableau-select-table-2.png and /dev/null differ
diff --git a/tutorial/write/tableau-select-table.png b/tutorial/write/tableau-select-table.png
deleted file mode 100644
index 1c1f3e9d9..000000000
Binary files a/tutorial/write/tableau-select-table.png and /dev/null differ
diff --git a/tutorial/write/writers-intro-2.png b/tutorial/write/writers-intro-2.png
deleted file mode 100644
index a93d6265b..000000000
Binary files a/tutorial/write/writers-intro-2.png and /dev/null differ
diff --git a/tutorial/write/writers-intro.png b/tutorial/write/writers-intro.png
deleted file mode 100644
index 5a3602092..000000000
Binary files a/tutorial/write/writers-intro.png and /dev/null differ
diff --git a/tutorial/write/writing1.png b/tutorial/write/writing1.png
new file mode 100644
index 000000000..beaf6c2d4
Binary files /dev/null and b/tutorial/write/writing1.png differ
diff --git a/tutorial/write/writing10.png b/tutorial/write/writing10.png
new file mode 100644
index 000000000..c0f3d3b28
Binary files /dev/null and b/tutorial/write/writing10.png differ
diff --git a/tutorial/write/writing11.png b/tutorial/write/writing11.png
new file mode 100644
index 000000000..74a6bda6c
Binary files /dev/null and b/tutorial/write/writing11.png differ
diff --git a/tutorial/write/writing2.png b/tutorial/write/writing2.png
new file mode 100644
index 000000000..dfd56a3e2
Binary files /dev/null and b/tutorial/write/writing2.png differ
diff --git a/tutorial/write/writing3.png b/tutorial/write/writing3.png
new file mode 100644
index 000000000..e6231a1cb
Binary files /dev/null and b/tutorial/write/writing3.png differ
diff --git a/tutorial/write/writing4.png b/tutorial/write/writing4.png
new file mode 100644
index 000000000..84a54df01
Binary files /dev/null and b/tutorial/write/writing4.png differ
diff --git a/tutorial/write/writing5.png b/tutorial/write/writing5.png
new file mode 100644
index 000000000..abaf703f0
Binary files /dev/null and b/tutorial/write/writing5.png differ
diff --git a/tutorial/write/writing6.png b/tutorial/write/writing6.png
new file mode 100644
index 000000000..c583081f9
Binary files /dev/null and b/tutorial/write/writing6.png differ
diff --git a/tutorial/write/writing7.png b/tutorial/write/writing7.png
new file mode 100644
index 000000000..b82b1231d
Binary files /dev/null and b/tutorial/write/writing7.png differ
diff --git a/tutorial/write/writing8.png b/tutorial/write/writing8.png
new file mode 100644
index 000000000..a2bd5219c
Binary files /dev/null and b/tutorial/write/writing8.png differ
diff --git a/tutorial/write/writing9.png b/tutorial/write/writing9.png
new file mode 100644
index 000000000..858a56c99
Binary files /dev/null and b/tutorial/write/writing9.png differ