diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 27ae02d27..59ded04ec 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -21,16 +21,16 @@ Closes #ISSUE Before merging a pull request, run through this checklist and mark each as complete. -- [ ] I have read the [contributing guidelines](/CONTRIBUTING.md) +- [ ] I have read the [contributing guidelines](https://github.com/nginx/documentation/blob/main/CONTRIBUTING.md) - [ ] I have signed the [F5 Contributor License Agreement (CLA)](https://github.com/f5/.github/blob/main/CLA/cla-markdown.md) -- [ ] I have ensured that documentation content adheres to [the style guide](/templates/style-guide.md) +- [ ] I have ensured that documentation content adheres to [the style guide](https://github.com/nginx/documentation/blob/main/templates/style-guide.md) - [ ] If the change involves potentially sensitive changes, I have assessed the possible impact - [ ] If applicable, I have added tests that prove my fix is effective or that my feature works - [ ] If applicable, I have checked that any relevant tests pass after adding my changes -- [ ] I have updated any relevant documentation ([`README.md`](/README.md) and [`CHANGELOG.md`](/CHANGELOG.md)) +- [ ] I have updated any relevant documentation ([`README.md`](https://github.com/nginx/documentation/blob/main/README.md) and [`CHANGELOG.md`](https://github.com/nginx/documentation/blob/main/CHANGELOG.md) - [ ] I have rebased my branch onto main - [ ] I will ensure my PR is targeting the main branch and pulling from my branch from my own fork Potentially sensitive changes include anything involving code, personally identify information (PII), live URLs or significant amounts of new or revised documentation. -Please refer to [our style guide](/templates/style-guide.md) for guidance about placeholder content. \ No newline at end of file +Please refer to [our style guide](https://github.com/nginx/documentation/blob/main/templates/style-guide.md) for guidance about placeholder content. \ No newline at end of file diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 550436fde..2bac27982 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -59,7 +59,7 @@ jobs: uses: nginxinc/docs-actions/.github/workflows/docs-build-push.yml@9c59fab05a8131f4d691ba6ea2b6a119f3ef832a # v1.0.7 with: production_url_path: "" - preview_url_path: "/previews/docs" + preview_url_path: "${{ vars.PREVIEW_URL_PATH }}" docs_source_path: "public" docs_build_path: "./" doc_type: "hugo" diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index 030dac8e8..23b14112d 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -72,7 +72,7 @@ jobs: uses: azure/login@a65d910e8af852a8061c627c456678983e180302 # v2.2.0 with: creds: ${{secrets.AZURE_CREDENTIALS_DOCS}} - + - name: Retrieve secrets from Keyvault if: env.isProduction != 'true' id: keyvault diff --git a/archetypes/concept.md b/archetypes/concept.md index c40526fa1..df8ce0f50 100644 --- a/archetypes/concept.md +++ b/archetypes/concept.md @@ -35,7 +35,7 @@ It is an example of a , and is closely related to ## Use cases -[//]: # "Name the individual use case sections after the actual use case itself, e.g "Route traffic between applications" +[//]: # "Name the individual use case sections after the actual use case itself, e.g 'Route traffic between applications'" ### Use case 1 diff --git a/config/_default/config.toml b/config/_default/config.toml index 5f1efd57b..812739793 100644 --- a/config/_default/config.toml +++ b/config/_default/config.toml @@ -16,7 +16,9 @@ enableGitInfo = true ngf = '/nginx-gateway-fabric/:sections[1:]/:filename' nim = '/nginx-instance-manager/:sections[1:]/:filename' nms = '/nginx-management-suite/:sections[1:]/:filename' + unit = '/nginx-unit/:sections[1:]/:filename' agent = '/nginx-agent/:sections[1:]/:filename' + nginxaas = '/nginxaas/azure/:sections[1:]/:filename' [caches] [caches.modules] diff --git a/content/includes/nginxaas-azure/logging-analysis-azure-storage.md b/content/includes/nginxaas-azure/logging-analysis-azure-storage.md new file mode 100644 index 000000000..67bc3f358 --- /dev/null +++ b/content/includes/nginxaas-azure/logging-analysis-azure-storage.md @@ -0,0 +1,48 @@ +--- +docs: "DOCS-000" +--- + +If the diagnostic setting destination details included a storage account, logs show up in the storage container "insights-logs-nginxlogs" with the following format: `resourceID=//y=/m=/d=
/h=/PT1H.json` + +{{}} +| **Attribute** | **Description** | +|-----------------------------|-----------------| +| `` | The resourceID of the NGINXaaS deployment in upper case.| +| `` | The four-digit year when the log batch was generated.| +| `` | The two-digit month when the log batch was generated.| +| `
` | The two-digit day when the log batch was generated.| +| `` | The two-digit hour value that indicates the starting hour for the log batch, in 24 hour UTC format| +{{}} + +{{}}It can take up to 90 minutes after adding diagnostic settings for logs to appear in the provided Azure Storage container.{{}} + +Each log event in the "PT1H.json" file is written in a new line delimited JSON text format. The properties that show up in each log line are described in the [Top Level Common Schema](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema#top-level-common-schema) documentation. + +For instance, an access log event logging to a particular file path will have attributes similar to this example: + +```yaml +{ + "category": "NginxLogs", + "location": "westcentralus", + "operationName": "NGINX.NGINXPLUS/NGINXDEPLOYMENTS/LOG", + "properties": { + "message": "172.92.129.50 - \"-\" [18/Jan/2024:17:59:00 +0000] \"GET / HTTP/1.1\" 200 11232 \"-\" \"curl/8.4.0\" \"-\" \"20.69.58.179\" sn=\"localhost\" rt=0.000 ua=\"-\" us=\"-\" ut=\"-\" ul=\"-\" cs=\"-\" ", + "filePath": "/var/log/nginx/access.log" + }, + "resourceId": "/SUBSCRIPTIONS/FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF/RESOURCEGROUPS/RESOURCEGROUP1/PROVIDERS/NGINX.NGINXPLUS/NGINXDEPLOYMENTS/TEST1", + "time": "2024-01-18T17:59:00.363956795Z" +} +``` + +If [syslog-based](#logging-to-syslog) logs are used, the log event entry has different **properties** sub-fields: + +```yaml +#... +"properties": { + "message": "172.92.129.50 - - [16/Jan/2024:18:00:00 +0000] \"GET / HTTP/1.1\" 200 11232 \"-\" \"curl/8.4.0\"", + "tag": "nginx", + "severity": "info", + "facility": "local7" + }, +#... +``` diff --git a/content/includes/nginxaas-azure/logging-analysis-logs-analytics.md b/content/includes/nginxaas-azure/logging-analysis-logs-analytics.md new file mode 100644 index 000000000..8809a824d --- /dev/null +++ b/content/includes/nginxaas-azure/logging-analysis-logs-analytics.md @@ -0,0 +1,30 @@ +--- +docs: "DOCS-000" +--- + +If the diagnostic setting destination details included a Logs Analytics workspace, logs show up in the table "NGXOperationLogs" with the following non-standard attributes: + +{{}} +| **Attribute** | **Description** | +|-----------------------------|-----------------| +| **Location** | The location of the NGINXaaS resource.| +| **Message** | The generated NGINX log line. | +| **FilePath** | The path to which NGINX logs were configured to be logged to if the nginx config used file-based logs. | +| **Tag** | The tag with which NGINX logs were generated if syslog-based log configuration is used. By default this is nginx | +| **Facility** | The syslog facility with which NGINX logs were generated if syslog-based log configuration is used. | +| **Severity** | The syslog severity with which NGINX logs were generated if syslog-based log configuration is used. | + +{{}} + +Using a [KQL](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/), a custom query can be run to view the logs: + +``` +NGXOperationLogs +| where Location contains "eastus" +``` + +For more information on the standard attributes that appear in Logs Analytics,see the [Standard columns in Azure Monitor Logs](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/log-standard-columns) documentation. + +For more information on using [KQL](https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/) see [Queries in Log Analytics](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/queries?tabs=groupby). + +{{}}It can take up to 90 minutes after adding diagnostic settings for logs to appear in the provided Logs Analytics Workspace.{{}} diff --git a/content/includes/nginxaas-azure/logging-config-access-logs.md b/content/includes/nginxaas-azure/logging-config-access-logs.md new file mode 100644 index 000000000..01fba8215 --- /dev/null +++ b/content/includes/nginxaas-azure/logging-config-access-logs.md @@ -0,0 +1,39 @@ +--- +docs: "DOCS-000" +--- + +NGINX access logs are disabled by default. You can enable access logs by adding **access_log** directives to your NGINX configuration to specify the location of the logs and formats. The log path should always be configured to be inside **/var/log/nginx**. + +```nginx +http { + log_format myfmt '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" "$gzip_ratio"'; + + access_log /var/log/nginx/nginx-access.log myfmt; + # ... +} +``` + +{{}} The **$time_local** variable includes the date and time for each log. It helps with ordering logs after export. {{}} + +To explicitly disable access logs, apply the following config: + +```nginx +http { + access_log off; +} +``` + +or + +```nginx +http { + access_log /dev/null; +} +``` + +To learn more about how to specify `access__log` in different configuration levels and their effect, see [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) + +{{}}Unless you use **syslog**, keep NGINX logs in the **/var/log/nginx** directory. Otherwise, you may lose data from your logs. +{{}} diff --git a/content/includes/nginxaas-azure/logging-config-error-logs.md b/content/includes/nginxaas-azure/logging-config-error-logs.md new file mode 100644 index 000000000..c27d8ace8 --- /dev/null +++ b/content/includes/nginxaas-azure/logging-config-error-logs.md @@ -0,0 +1,19 @@ +--- +docs: "DOCS-000" +--- + +By default, NGINXaaS for Azure puts the error log at **/var/log/nginx/error.log**. It includes messages with severity **error** and above. + +While you should configure log files in the **/var/log/nginx** directory, you can change the filename and severity level. For example, the following line in the NGINX configuration sends errors to the `nginx-error.log` file, and limits messages to a severity level of **emerg**: + +```nginx +error_log /var/log/nginx/nginx-error.log emerg; +``` + +Alternatively, you can disable error logs completely with the following line: + +```nginx +error_log /dev/null; +``` + +To learn more about how to specify `error_log` in different configuration levels, see the documentation of the [error_log](https://nginx.org/en/docs/ngx_core_module.html?#error_log) directive. diff --git a/content/includes/nginxaas-azure/logging-limitations.md b/content/includes/nginxaas-azure/logging-limitations.md new file mode 100644 index 000000000..a6b636d66 --- /dev/null +++ b/content/includes/nginxaas-azure/logging-limitations.md @@ -0,0 +1,8 @@ +--- +docs: "DOCS-000" +--- + +1. File-based logs must be configured to use the path **/var/log/nginx**. +1. The **gzip** parameter for the **access_log** directive is not supported, and uploading a config with this parameter will cause an error. +1. Logging **error_log** to a cyclic memory buffer using the **memory:** prefix is not allowed and will cause a config upload error. +1. Egress Networking charges apply for traffic sent from the NGINX deployment to a syslog server present in a different VNet. diff --git a/content/includes/nginxaas-azure/ncu-description.md b/content/includes/nginxaas-azure/ncu-description.md new file mode 100644 index 000000000..138b593e6 --- /dev/null +++ b/content/includes/nginxaas-azure/ncu-description.md @@ -0,0 +1,11 @@ +--- +docs: "DOCS-1476" +--- + +An NGINX Capacity Unit (NCU) quantifies the capacity of an NGINX instance based on the underlying compute resources. This abstraction allows you to specify the desired capacity in NCUs without having to consider the regional hardware differences. + +An NGINX Capacity Unit consists of the following parameters: + +* CPU: an NCU provides 20 [Azure Compute Units](https://learn.microsoft.com/en-us/azure/virtual-machines/acu) (ACUs) +* Bandwidth: an NCU provides 60 Mbps of network throughput +* Concurrent connections: an NCU provides 400 concurrent connections. This performance is not guaranteed when NGINX App Protect WAF is used with NGINXaaS diff --git a/content/includes/nginxaas-azure/ssl-tls-prerequisites.md b/content/includes/nginxaas-azure/ssl-tls-prerequisites.md new file mode 100644 index 000000000..68849fbf5 --- /dev/null +++ b/content/includes/nginxaas-azure/ssl-tls-prerequisites.md @@ -0,0 +1,21 @@ +--- +docs: "DOCS-000" +--- + +- AKV to store certificates that you want to add to the deployment. + +- A user or system assigned identity associated with your NGINXaaS deployment. Ensure that your managed identity (MI) has read access to secrets stored in AKV: + + - If using Azure RBAC for AKV, ensure that your MI has [Key Vault Secrets User](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#key-vault-secrets-user) or higher permissions. + + - If using Access Policies for AKV, ensure that your MI has *GET secrets* or higher permissions. + +- In addition to the MI permissions, if using the Azure portal to manage certificates, ensure that you have read access to list certificates inside the Key Vault: + + - If using Azure RBAC for AKV, ensure that you have [Key Vault Reader](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#key-vault-reader) or higher permissions. + + - If using Access Policies for AKV, ensure that you have *LIST certificates* or higher permissions. + + - If public access is disabled on your key vault, [configure Network Security Perimeter]({{< relref "/nginxaas-azure/quickstart/security-controls/certificates.md#configure-network-security-perimeter-nsp" >}}) and add an inbound access rule to allow your client IP address. + +- If you're unfamiliar with Azure Key Vault, check out the [Azure Key Vault concepts](https://docs.microsoft.com/en-us/azure/key-vault/general/basic-concepts) documentation from Microsoft. \ No newline at end of file diff --git a/content/includes/nginxaas-azure/terraform-prerequisites.md b/content/includes/nginxaas-azure/terraform-prerequisites.md new file mode 100644 index 000000000..b529c9b65 --- /dev/null +++ b/content/includes/nginxaas-azure/terraform-prerequisites.md @@ -0,0 +1,9 @@ +--- +docs: "DOCS-000" +--- + +- Confirm that you meet the [NGINXaaS Prerequisites]({{< relref "/nginxaas-azure/getting-started/prerequisites.md" >}}). +- [Authenticate Terraform to Azure](https://learn.microsoft.com/en-us/azure/developer/terraform/authenticate-to-azure) +- [Install Terraform](https://learn.hashicorp.com/tutorials/terraform/install) + +{{< caution >}} The examples in the NGINXaaS for Azure Snippets GitHub repository use the prerequisites module [available in the same repository](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/prerequisites). {{< /caution >}} diff --git a/content/includes/nginxaas-azure/terraform-resources.md b/content/includes/nginxaas-azure/terraform-resources.md new file mode 100644 index 000000000..727740cc4 --- /dev/null +++ b/content/includes/nginxaas-azure/terraform-resources.md @@ -0,0 +1,6 @@ +--- +docs: "DOCS-000" +--- + +- [NGINXaaS Managed Identity Documentation]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) +- [NGINXaaS Azure Monitor Documentation]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) \ No newline at end of file diff --git a/content/includes/nim/tech-specs/nim-app-protect-support.md b/content/includes/nim/tech-specs/nim-app-protect-support.md index 18c7ef3e2..8a217050b 100644 --- a/content/includes/nim/tech-specs/nim-app-protect-support.md +++ b/content/includes/nim/tech-specs/nim-app-protect-support.md @@ -8,7 +8,7 @@ NGINX Instance Manager supports the following versions of [NGINX App Protect WAF | NGINX Instance Manager | NGINX App Protect WAF | |------------------------|------------------------------------| -| 2.17.0–2.18.0 | Release 4.8.0–4.12.0, 5.1.0–5.4.0 | +| 2.17.0–2.19.0 | Release 4.8.0–4.13.0, 5.1.0–5.5.0 | | 2.15.1–2.16.0 | Release 4.8.0–4.10.0 | | 2.14.1–2.15.0 | Release 4.4.0–4.7.0 | | 2.13.0–2.14.0 | Release 4.3.0–4.5.0 | diff --git a/content/includes/nim/templates/additional-templating-resources.md b/content/includes/nim/templates/additional-templating-resources.md index dbc42704c..cd21b7d82 100644 --- a/content/includes/nim/templates/additional-templating-resources.md +++ b/content/includes/nim/templates/additional-templating-resources.md @@ -2,20 +2,6 @@ docs: DOCS-1500 --- -#### Concepts +
-- **[Understand Config Templates]({{< relref "nim/nginx-configs/config-templates/concepts/config-templates.md" >}})**: Learn about config template types, publication targets, and the template submission process. - -- **[F5 Global Default Base Template]({{< relref "nim/nginx-configs/config-templates/concepts/default-base-template.md" >}})**: Learn about the F5 Global Default Base Template, including its key components and usage. Discover how augment templates can be used to segment portions of the base template. - -- **[Augment Templates]({{< relref "nim/nginx-configs/config-templates/concepts/augment-templates.md" >}})**: Learn how augment templates can be combined with base templates to add specific features like OIDC authentication, or segment (compartmentalize) configuration elements like location and server blocks. - -- **[Template Resource Files]({{< relref "nim/nginx-configs/config-templates/concepts/template-resources.md" >}})**: Learn about template resource files, including config template files, JSON schemas, and auxiliary files. - -- **[Dynamic Form JSON Schema]({{< relref "/nim/nginx-configs/config-templates/reference/json-schema-reference.md" >}})**: Learn how to use JSON schemas for template input and validation in the dynamic web form builder. - -#### How-Tos - -- **[Manage NGINX Configs with Config Templates]({{< relref "/nim/nginx-configs/config-templates/how-to/manage-nginx-configs-with-templates.md" >}})**: Create, import, and deploy NGINX configurations using config templates. - -- **[Access Control for Templates and Template Submissions]({{< relref "/nim/nginx-configs/config-templates/how-to/rbac-config-templates-and-submissions.md" >}})**: Apply role-based access control (RBAC) settings to templates and template submissions. \ No newline at end of file + [Download example config templates for NGINX Instance Manager from GitHub](https://github.com/f5devcentral/n1_nim_template_examples) \ No newline at end of file diff --git a/content/ngf/_index.md b/content/ngf/_index.md index 22e194f7c..c33be6b17 100644 --- a/content/ngf/_index.md +++ b/content/ngf/_index.md @@ -1,4 +1,4 @@ --- -title: "Welcome to the NGINX Gateway Fabric documentation" +title: "NGINX Gateway Fabric" url: /nginx-gateway-fabric/ --- diff --git a/content/ngf/support.md b/content/ngf/support.md index 433927db2..c6f28eed2 100644 --- a/content/ngf/support.md +++ b/content/ngf/support.md @@ -40,6 +40,6 @@ Visit the [project’s GitHub repository](https://github.com/nginxinc/nginx-supp - If you have any suggestions or enhancement requests, please [open an idea](https://github.com/nginx/nginx-gateway-fabric/discussions/categories/ideas) on GitHub discussions. -- You can contact us directly, by sending an email to [kubernetes@nginx.com](mailto:kubernetes@nginx.com) or on the [NGINX Community Slack](https://nginxcommunity.slack.com/channels/nginx-gateway-fabric), in the #nginx-gateway-fabric channel. +- You can also get help through the [NGINX Community Forum](https://community.nginx.org/). - If you need dedicated support for NGINX Gateway Fabric, or you would like to leverage our [advanced NGINX Plus features](https://docs.nginx.com/nginx-gateway-fabric/overview/nginx-plus/), you can contact [F5 Sales](https://www.f5.com/content/f5-com/en_us/products/get-f5). diff --git a/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md b/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md index 8f5ebbd58..b31691588 100644 --- a/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md +++ b/content/nginx/admin-guide/security-controls/controlling-access-by-geoip.md @@ -392,7 +392,7 @@ In this example, the IP address will be checked in the `GeoLite2-Country.mmdb` d ## More Info -- [GeoIP2 Dynamic Module Installation Instructions]({{< relref "geoip2.md" >}}) +- [GeoIP2 Dynamic Module Installation Instructions]({{< relref "/nginx/admin-guide/dynamic-modules/geoip2.md" >}}) - [MaxMind GeoIP2 Databases](https://www.maxmind.com/en/geoip2-databases) diff --git a/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md b/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md index 036dab9c6..e3b173ae4 100644 --- a/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md +++ b/content/nginx/deployment-guides/amazon-web-services/ec2-instances-for-nginx.md @@ -1,186 +1,147 @@ --- +title: Create Amazon EC2 Instances for NGINX Open Source and NGINX Plus +weight: 600 description: Create Amazon Elastic Compute Cloud (EC2) instances for running NGINX Open Source and F5 NGINX Plus. -docs: DOCS-444 -doctypes: -- task -title: Creating Amazon EC2 Instances for NGINX Open Source and NGINX Plus toc: true -weight: 600 +type: how-to +product: NGINX+ +docs: DOCS-444 --- -These instructions explain how to create instances in the Amazon Elastic Compute Cloud (EC2) environment suitable for running NGINX Open Source and F5 NGINX Plus. +This guide explains how to create instances in the Amazon Elastic Compute Cloud (EC2) that can run NGINX Open Source and F5 NGINX Plus. -For NGINX Plus, a faster alternative is to purchase a prebuilt Amazon Machine Image (AMI) in the AWS Marketplace. Several operating systems are available, including Amazon Linux, Red Hat Enterprise Linux, and Ubuntu. For instructions, see [Installing NGINX Plus AMIs on Amazon EC2]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md" >}}). +For NGINX Plus, you can buy a prebuilt Amazon Machine Image (AMI) from the AWS Marketplace for a faster option. You can find AMIs for various operating systems, such as Amazon Linux, Red Hat Enterprise Linux, and Ubuntu. For instructions, see [Installing NGINX Plus AMIs on Amazon EC2]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus-amazon-web-services.md" >}}). - -## Prerequisites +## Before you begin -These instructions assume you have: +To complete this guide, you need the following: - An [AWS account](http://docs.aws.amazon.com/AmazonSimpleDB/latest/DeveloperGuide/AboutAWSAccounts.html). -- If using the instructions in [Automating Installation with Ansible](#automate-ansible), basic Linux system administration skills, including installation of Linux software from vendor‑supplied packages, and file creation and editing. - -In addition, to install NGINX software by following the linked instructions, you need: - -- An NGINX Plus subscription, either paid or a [30‑day free trial](https://www.nginx.com/free-trial-request), if you plan to install that product. -- `root` privilege on the hosts where NGINX Open Source and NGINX Plus are to be installed. If appropriate for your environment, prefix commands with the `sudo` command. - - -## Creating an Amazon EC2 Instance - -1. Log into the [EC2 dashboard](https://console.aws.amazon.com/ec2/) in the AWS Management Console (****). - -2. In the left navigation bar, select **Instances**, then click the  Launch Instance  button. - - - -3. In the **Step 1: Choose an Amazon Machine Image (AMI)** window, click the  Select  button for the Linux distribution of your choice. - - - -4. In the **Step 2: Choose an Instance Type** window, click the radio button for the appropriate instance type. In the screenshot, we are selecting a t2.micro instance, which is normally selected by default and is sufficient for demo purposes. - - **Note:** At the time of publication of this guide, AWS gives you 750 hours of free usage per month with this instance type during the first year of your AWS account. Keep in mind, however, that if they run 24 hours a day, the sets of instances specified in the NGINX deployment guides use up the 750 hours in just a few days (just over 5 days for 6 instances, and just under 4 days for 8 instances). +- Basic Linux system administration skills, including installing software, managing files and folders, and using the command line, to follow the [Ansible instructions]({{< relref "#automate-ansible" >}}). +- A paid subscription or a [30-day free trial](https://www.nginx.com/free-trial-request) for NGINX Plus. +- Root privileges on the hosts where NGINX Open Source or NGINX Plus will be installed, with `sudo` access as needed. - Click the  Next: Configure Instance Details  button to continue to the next step. +## Create an Amazon EC2 Instance {#create-ec2-instances} - +1. Log in to the EC2 dashboard in the AWS Management Console: **[https://console.aws.amazon.com/ec2](https://console.aws.amazon.com/ec2)**. -5. In the **Step 3: Configure Instance Details** window, select the default subnet for your VPC in the **Subnet** field, then click the  Next: Add Storage  button. +1. In the left navigation bar, choose **Instances**. Then select **Launch Instances** in the top right corner. - +1. On the **Launch an Instance** page, give your new instance a name in the **Name and Tags** section. This name will show in the Name column of the summary table on the EC2 Instances dashboard. This guide is using "instance-name." -6. In the **Step 4: Add Storage** window, leave the defaults unchanged. Click the  Next: Add Tags  button. +1. In the **Application and OS Images (Amazon Machine Image)** section select the image of the Linux distribution of your choice. - + {{< img src="/img/aws/aws-nlb-instance-choose-ami.png" alt="Screenshot of AMI section on the EC2 Launch Instance page">}} -7. In the **Step 5: Add Tags** window, click the  Add Tag  button. Type Name in the **Key** field, and in the **Value** field type the instance name (the screenshot shows the result). This name is what will appear in the **Name** column of the summary table on the **Instances** tab of the EC2 dashboard (see the screenshot in Step 12, which shows one instance). +1. In the **Instance Type** section, choose an appropriate instance type. The screenshot shows the **t2.micro** instance type selected by default. This type is sufficient for demo purposes. - If you are following these instructions as directed by an NGINX deployment guide, the **Creating EC2 Instances and Installing the NGINX Software** section of the deployment guide specifies the instance names to use. + {{}}At the time of publication, AWS offered 750 hours of free usage each month for this instance type. This applies during your first year with an AWS account. Keep in mind, though, that several NGINX instances running all day will use the free 750 hours up quickly. For example, 6 instances will use them in just over 5 days. If you use 8 instances, you'll hit the limit in under 4 days.{{}} - Click the  Next: Configure Security Group  button to continue to the next step. + {{< img src="/img/aws/aws-nlb-instance-choose-type.png" alt="Screenshot of Instance Type on the EC2 Launch Instance page">}} - +1. In the **Key pair (login)** section, select an existing key pair or create a new one. If you choose **Create new key pair**, a window appears, allowing you to download the key pair. + {{}} It's best practice — and necessary in production — to create a separate key for each EC2 instance. This way, if a key is compromised, only that one instance is at risk.{{}} -8. In the **Step 6: Configure Security Group** window, select or enter the following values in the indicated fields: +1. Scroll to the **Network settings** section. You can keep the default **VPC** and **Subnet** settings. Under **Firewall (Security Groups),** either create a new security group or use an existing one. + - If this is your first setup, keep **Create security group** selected. + - Select **Allow HTTP traffic from the internet.** + - (Optional) Select **Allow HTTPS traffic from the internet** if needed. - - **Assign a security group** – - - If you are setting up a deployment with multiple instances (one in an NGINX deployment guide, for instance), and this is the first instance you are creating, select Create a **new** security group. - - For subsequent instances, select Select an **existing** security group instead (it makes sense for all instances in a deployment to use the same security group). - - **Security group name** – Name of the group. If you are following these instructions as directed by an NGINX deployment guide, the **Prerequisites and Required AWS Configuration** section of the deployment guide specifies the group name to use. - - **Description** – Description of the group; the group name is often used. + This creates the following inbound security group rules: - /nginx/images/aws-generic-instance-security-group.png + 1. Accept SSH connections from all sources + - **Type** – SSH + - **Protocol** – TCP + - **Port Range** – 22 + - **Source** – Custom `0.0.0.0/0` + 1. Accept unencrypted HTTP connections from all sources + - **Type** – HTTP + - **Protocol** – TCP + - **Port Range** – 80 + - **Source** – Custom `0.0.0.0/0` + 1. Accept encrypted HTTPS connections from all soruces (optional) + - **Type** – HTTPS + - **Protocol** – TCP + - **Port Range** – 443 + - **Source** – Custom `0.0.0.0/0` -9. In the table, modify the default rule for SSH connections, if necessary, by selecting or setting the following values. They allow inbound SSH connections from all sources (any IP address): + {{< img src="/img/aws/aws-generic-instance-details.png" alt="Screenshot of Network Settings on the EC2 Launch Instance page">}} - - **Type** – SSH - - **Protocol** – TCP - - **Port Range** – 22 - - **Source** – Custom 0.0.0.0/0 - - **Description** – Accept SSH connections from all sources + If you are deploying multiple instances, it makes sense to use the same security group for all. In that case, choose "Select existing security group." This will allow you to pick a security group from a list. -10. Create a rule that allows inbound HTTP connections from all sources, by clicking the  Add Rule  button and selecting or setting the following values in the new row: + {{< img src="/img/aws/aws-instance-select-sg.png" alt="Screenshot of Network Settings on the EC2 Launch Instance page with the option of selecting an existing security group">}} - - **Type** – HTTP - - **Protocol** – TCP - - **Port Range** – 80 - - **Source** – Custom 0.0.0.0/0 - - **Description** – Accept unencrypted HTTP connections from all sources +1. In the **Configure Storage** section, leave the defaults unchanged. - If appropriate, repeat this step to create a rule for HTTPS traffic. +1. In the right panel, verify the settings in the **Summary** section. If everything is correct, select **Launch Instance**. - When you've created all desired rules, click the  Review and Launch  button. + {{< img src="/img/aws/aws-instance-summary.png" alt="Screenshot of the Summary panel on the EC2 Launch Instance page with button to launch instance">}} -11. In the **Step 7: Review Instance Launch** window, verify the settings are correct. If so, click the  Launch  button in the lower‑right corner of the window. To change settings, click the  Previous  button to go back to earlier windows. +1. After launching the instance, you are redirected to a confirmation page with a success message. Select the instance ID to return to the **EC2 Instances** page. - + {{< img src="/img/aws/aws-instance-launch-success.png" alt="Screenshot of the Summary panel on the EC2 Launch Instance page with button to launch instance">}} -12. When you click the  Launch  button, a window pops up asking you to select an existing key pair or create a new key pair. Take the appropriate action for your use case, then click the  Launch Instances  button. +1. On the **EC2 Instances** page, you can view all your instances, including the new one. The following screenshot shows a single instance: - **Note:** It's a best practice – and essential in a production environment – to create a separate key for each EC2 instance, so that if a key is compromised only the single associated instance becomes vulnerable. + {{< img src="/img/aws/aws-generic-instance-display-first.png" alt="Screenshot of the EC2 Instances page with a single instance">}} - ![Screen of 'Select an existing key pair or create a new key pair' window during creation of Amazon EC2 instance](/nginx/images/aws-nlb-instance-key-pair.png) - - A **Launch Status** window pops up to confirm that your launch is underway. To confirm the details of your instance when the launch completes, click the  View Instances  button on that page. - - The instances you have created so far are listed on the **Instances** dashboard. The following screenshot shows a single instance. - - - -13. Finalize your security group rules. You need to do this only for the first instance in a given set, because all instances in a set can use the same security group. +1. Finalize your security group rules. You only need to do this for the first instance in a set. All other instances in that set can use the same security group. - In the left navigation bar, select **Security Groups**. - Select the security group by clicking its radio button in the leftmost column of the table. A panel opens in the lower part of the window displaying details about the group. - - Open the **Inbound** tab and verify that the rules you created in Steps 9 and 10 are listed. + - In the **Inbound** tab, verify that the rules you created in Step 7 are listed. + - Open the **Outbound** tab and select **Edit outbound rules** to create a rule for outbound traffic. The rules depend on the ports used for traffic handled by NGINX Open Source or NGINX Plus instances: - + - By default, AWS adds an outbound rule that allows all traffic to all destinations. You can remove this rule by selecting **Delete**. + - If you use port 80 for client traffic and health checks from a load balancer, like [AWS Network Load Balancer]({{< relref "high-availability-network-load-balancer.md" >}}), you only need one rule. + - If you set up different ports for various tasks, or if you use ports like 443 for HTTPS, adjust them accordingly. - - Open the **Outbound** tab and click the  Edit  button to create a rule for outbound traffic. The set of rules depends on which ports you have used for traffic handled by the NGINX Plus instances: + In the **Destination** field, start typing your security group's name or ID. It should appear under **Security Groups**. The example below shows **sg-0dd4d3c5284052f99**. - - If, for example, you have used port 80 both for client traffic and for health checks from a load balancer (for example, [AWS Network Load Balancer]({{< relref "high-availability-network-load-balancer.md" >}})), you need only one rule. - - If you have configured separate ports for different purposes, or ports other than 80 (such as 443 for HTTPS), make the appropriate adjustments. + {{< img src="/img/aws/aws-generic-instance-security-outbound.png" alt="Screenshot of the EC2 Security Group outbound rules page">}} - In the **Destination** field, type the security group's ID, which appears in the **Group ID** column in the upper table (here it's sg-3bdbf55d). +1. To install NGINX software on the instance, first [connect]({{< relref "#connect-to-an-ec2-instance" >}}) to it. Then follow the instructions in the NGINX Plus Admin Guide for [NGINX Open Source]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-open-source#prebuilt" >}}) and [NGINX Plus]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). - - -14. To install NGINX software on the instance, [connect](#connect-to-instance) to it, and follow the instructions in the NGINX Plus Admin Guide for [NGINX Open Source]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-open-source#prebuilt" >}} and [NGINX Plus]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). +--- - -## Connecting to an EC2 Instance -To install and configure NGINX Open Source or NGINX Plus on an instance, you need to open a terminal window and connect to the instance over SSH. +## Connect to an EC2 Instance +To install and configure NGINX Open Source or NGINX Plus on an instance, open a terminal window and connect to the instance over SSH. 1. Navigate to the **Instances** tab on the EC2 Dashboard if you are not there already. +1. Select the row for an instance to highlight it. +1. Select **Connect** above the list of instances. You are redirected to the **Connect to Instance** page, with the **SSH client** tab selected by default. +1. Follow the instructions on the page, which are customized for the selected instance. A sample `ssh` command includes the key file name and the instance hostname. + + {{< img src="/img/aws/aws-nlb-instance-connect.png" alt="Screenshot of the EC2 Instance Connect with SSH page">}} -2. Click the row for an instance to select it. In the screenshot **instance2** is selected. - - - -3. Click the  Connect  button above the list of instances. The **Connect To Your Instance** window pops up. -4. Follow the instructions in the pop‑up window, which are customized for the selected instance (here **instance2**) to provide the name of the key file in the steps and in the sample `ssh` command. - - ![Screenshot of 'Connect To Your Instance' pop-up window for Amazon EC2 instance](/nginx/images/aws-nlb-instance-connect.png) - - -## Installing NGINX Software - -Once you have established a connection with an instance, you can install the NGINX software on it. Follow the instructions in the NGINX Plus Admin Guide for NGINX Open Source and [NGINX Plus]({{< relref "../../admin-guide/installing-nginx/installing-nginx-plus.md" >}}). The [Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) also provides instructions for many maintenance tasks. +--- - -### Automating Installation with a Configuration Manager +## Install NGINX software -You can automate the installation of NGINX Open Source and NGINX Plus. Instructions for Ansible are provided below. For Chef and Puppet, see these articles on the NGINX, Inc. blog: +Once you have established a connection with an instance, you can install the NGINX software on it. Follow the instructions in the NGINX Plus Admin Guide for [NGINX Open Source]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-open-source#prebuilt" >}}) and [NGINX Plus]({{< relref "/nginx/admin-guide/installing-nginx/installing-nginx-plus.md" >}}). The [Admin Guide]({{< relref "/nginx/admin-guide/_index.md" >}}) also provides instructions for many maintenance tasks. -- [Installing NGINX and NGINX Plus with Chef](https://www.nginx.com/blog/installing-nginx-nginx-plus-chef/) -- [Deploying NGINX Plus for High Availability with Chef](https://www.nginx.com/blog/nginx-plus-high-availability-chef/) -- [Installing NGINX and NGINX Plus with Puppet](https://www.nginx.com/blog/installing-nginx-nginx-plus-puppet/) +### Automate installation with a configuration manager - -#### Automating Installation with Ansible +You can automate the installation of NGINX Open Source and NGINX Plus. Instructions for Ansible are provided below. -NGINX, Inc. publishes a unified Ansible role for NGINX Open Source and NGINX Plus on [Ansible Galaxy](https://galaxy.ansible.com/nginxinc/nginx/) and [GitHub](https://github.com/nginxinc/ansible-role-nginx). Perform these steps to install and run it. +#### Automate installation with Ansible {#automate-ansible} -1. [Connect to the EC2 instance](#connect-instance). +NGINX, Inc. releases a combined Ansible role for NGINX Open Source and NGINX Plus on [Ansible Galaxy](https://galaxy.ansible.com/nginxinc/nginx/) and [GitHub](https://github.com/nginxinc/ansible-role-nginx). Perform these steps to install and run it. -2. Install Ansible. These commands are appropriate for Debian and Ubuntu systems: +1. [Connect to the EC2 instance]({{< relref "#connect-instance" >}}). - ```shell - apt update - apt install python-pip -y - pip install ansible - ``` +1. Install Ansible following the [instructions](https://docs.ansible.com/ansible/latest/installation_guide/installation_distros.html) for the operating system on your EC2 instance. -3. Install the official Ansible role from NGINX, Inc.: +1. Install the official Ansible role from NGINX: ```shell ansible-galaxy install nginxinc.nginx ``` -4. (NGINX Plus only) Copy the nginx-repo.key and nginx-repo.crt files provided by NGINX, Inc. to ~/.ssh/ngx-certs/. +1. (NGINX Plus only) Copy the **nginx-repo.key** and **nginx-repo.crt** files provided by NGINX, Inc. to **~/.ssh/ngx-certs/**. -5. Create a file called **playbook.yml** with the following contents: +1. Create a file called **playbook.yml** with the following contents: ```none --- @@ -190,36 +151,35 @@ NGINX, Inc. publishes a unified Ansible role for NGINX Open Source and NGINX P - role: nginxinc.nginx ``` -5. Run the playbook: +1. Run the playbook: ```shell ansible-playbook playbook.yml ``` - -## Optional: Creating an NGINX Open Source AMI +1. Confirm that NGINX is installed by running `nginx -v`. -To streamline the process of installing NGINX Open Source on multiple instances, you can create an AMI from an existing NGINX Open Source instance, and spin up additional instances of the AMI when needed. -1. Follow the instructions in [Creating Amazon EC2 Instances](#create-ec2-instances) and the NGINX Plus Admin Guide to create an instance and install NGINX Open Source on it, if you have not already. +## Optional: Create an NGINX Open Source AMI -2. Navigate to the **Instances** tab on the Amazon EC2 Dashboard. +To simplify installing NGINX Open Source on several instances, create an AMI from an existing NGINX instance. You can then create additional virtual instances based on the AMI. -3. Select the base instance by clicking its row in the table. In the screenshot, **instance2** is selected. +1. Follow the instructions in [Create Amazon EC2 Instance]({{< relref "#create-ec2-instances" >}}) and [Install NGINX software]({{< relref "#install-nginx-software" >}}). - +1. Go to the **Instances** tab on the Amazon EC2 Dashboard. -4. Click the  Actions  button and select Image > Create Image. +1. Select the base instance with NGINX installed by clicking its row in the table. - +1. Select **Actions**, then choose **Image and templates** > **Create Image**. -5. In the window that pops up, fill in the **Image name** and (optionally) **Image description** fields, then click the  Create image   button. + {{< img src="/img/aws/aws-generic-create-image-menu.png" alt="Screenshot of the EC2 Create Image button menu">}} - screenshot of 'Create Image' pop-up window for creating base AMI in Amazon EC2 +1. On the **Create Image** page, enter the **Image name** and optionally add an **Image description**. Then select **Create image**. You are returned to the **Instances** page with a green alert confirming that the image is being created. - A **Create Image** window pops up to confirm that the image‑creation request was received. To verify that the image was created, navigate to the **AMIs** tab. +1. To verify that the image was created, go to the **AMIs** tab and find the new image by its name. ### Revision History +- Version 3 (February 2025) - Update guide to match new AWS instance creation flow, update screenshots, and links. - Version 2 (July 2018) – Substitute links to NGINX Plus Admin Guide for sample installation instructions. - Version 1 (April 2018) – Initial version (NGINX Plus Release 14) diff --git a/content/nginxaas-azure/_index.md b/content/nginxaas-azure/_index.md new file mode 100644 index 000000000..b77d7286e --- /dev/null +++ b/content/nginxaas-azure/_index.md @@ -0,0 +1,10 @@ +--- +title: "NGINXaaS for Azure" +description: | + NGINX as a Service for Azure is an IaaS offering that is tightly integrated into Microsoft Azure public cloud and its ecosystem, making applications fast, efficient, and reliable with full lifecycle management of advanced NGINX traffic services. +linkTitle: "NGINXaaS for Azure" +menu: docs +url: /nginxaas/azure/ +cascade: + logo: NGINX-for-Azure-icon.svg +--- \ No newline at end of file diff --git a/content/nginxaas-azure/app-protect/_index.md b/content/nginxaas-azure/app-protect/_index.md new file mode 100644 index 000000000..1829a1e8e --- /dev/null +++ b/content/nginxaas-azure/app-protect/_index.md @@ -0,0 +1,8 @@ +--- +title: NGINX App Protect WAF (Preview) +weight: 200 +url: /nginxaas/azure/app-protect/ +menu: + docs: + parent: NGINXaaS for Azure +--- diff --git a/content/nginxaas-azure/app-protect/configure-waf.md b/content/nginxaas-azure/app-protect/configure-waf.md new file mode 100644 index 000000000..622a2e0e3 --- /dev/null +++ b/content/nginxaas-azure/app-protect/configure-waf.md @@ -0,0 +1,107 @@ +--- +title: "Configure App Protect WAF" +weight: 300 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/app-protect/configure-waf/ +--- + +## Overview + +This guide explains how to configure the F5 NGINX App Protect WAF security features. + +## Configure + +To use NGINX App Protect apply the following changes to the NGINX config file. + +1. Load the NGINX App Protect WAF module on the main context: + +```nginx +load_module modules/ngx_http_app_protect_module.so; +``` + +2. Set the enforcer address: + +```nginx +app_protect_enforcer_address 127.0.0.1:50000; +``` + +{{}} The app_protect_enforcer_address directive is a required directive for Nginx App Protect to work and must match 127.0.0.1:50000{{}} + + +3. Enable NGINX App Protect WAF with the `app_protect_enable` directives in the appropriate scope. The `app_protect_enable` directive may be set in the `http`, `server`, and `location` contexts. + +It is recommended to have a basic policy enabled in the `http` or `server` context to process malicious requests in a more complete manner. + +```nginx +app_protect_enable on; +``` + +4. Configure the path of the pre-compiled policy file to the `app_protect_policy_file` directive. You can find the list of supported policies and their paths under the [Precompiled Policies](#precompiled-policies) section. + +```nginx +app_protect_policy_file /etc/app_protect/conf/NginxDefaultPolicy.json; +``` + +Sample Config with App Protect configured: + +```nginx +user nginx; +worker_processes auto; +worker_rlimit_nofile 8192; +pid /run/nginx/nginx.pid; + +load_module modules/ngx_http_app_protect_module.so; + +events { + worker_connections 4000; +} + +error_log /var/log/nginx/error.log debug; + +http { + access_log off; + server_tokens ""; + + app_protect_enforcer_address 127.0.0.1:50000; + + server { + listen 80 default_server; + + location / { + app_protect_enable on; + app_protect_policy_file /etc/app_protect/conf/NginxDefaultPolicy.tgz; + proxy_pass http://127.0.0.1:80/proxy/$request_uri; + } + + location /proxy { + default_type text/html; + return 200 "Hello World\n"; + } + } +} +``` + +## Precompiled Policies + +NGINXaaS for Azure ships with the two reference policies (Default and Strict) supported in NGINX App Protect. These policies are supported in both the blocking and transparent enforcement modes. +For more information on these policies refer the NGINX App Protect [configuration guide](https://docs.nginx.com/nginx-app-protect-waf/v5/configuration-guide/configuration/). + +The following table shows the path to the precompiled policy file that needs to be used with the `app_protect_policy_file` directive: + +{{}} + | Policy | Enforcement Mode | Path | + |---------------------------- | ---------------------------- | -------------------------------------------- | + | Default | Strict | /etc/app_protect/conf/NginxDefaultPolicy.json | + | Default | Transparent | /etc/app_protect/conf/NginxDefaultPolicy_transparent.json | + | Strict | Strict | /etc/app_protect/conf/NginxStrictPolicy.json | + | Strict | Transparent | /etc/app_protect/conf/NginxStrictPolicy_transparent.json | +{{}} + +To view the contents of the available security policies, navigate to the azure portal and select the **Security Policies** tab in the App Protect section. + +{{}}Custom policies are not supported at this time.{{}} + +## What's next + +[Enable App Protect WAF Logs]({{< relref "/nginxaas-azure/app-protect/enable-logging.md" >}}) diff --git a/content/nginxaas-azure/app-protect/disable-waf.md b/content/nginxaas-azure/app-protect/disable-waf.md new file mode 100644 index 000000000..29262395e --- /dev/null +++ b/content/nginxaas-azure/app-protect/disable-waf.md @@ -0,0 +1,25 @@ +--- +title: "Disable App Protect WAF" +weight: 400 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/app-protect/disable-waf/ +--- + +## Overview +This guide explains how to disable F5 NGINX App Protect WAF on an NGINX as a Service for Azure (NGINXaaS) deployment. + +## Before you start +You must remove the WAF directives from your NGINX config file before attempting to disable WAF. + +## Disable App Protect WAF + +### Using the Microsoft Azure Portal + +Access the [Microsoft Azure portal](https://portal.azure.com) + +1. Go to your NGINXaaS for Azure deployment. + +2. Select NGINX app protect in the left menu. + +3. Select **Disable**. diff --git a/content/nginxaas-azure/app-protect/enable-logging.md b/content/nginxaas-azure/app-protect/enable-logging.md new file mode 100644 index 000000000..56c1080cc --- /dev/null +++ b/content/nginxaas-azure/app-protect/enable-logging.md @@ -0,0 +1,189 @@ +--- +title: "Enable App Protect WAF Logs" +weight: 300 +categories: ["tasks"] +url: /nginxaas/azure/app-protect/enable-logging/ +toc: true +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) supports exporting NGINX App Protect logs to an Azure Storage account or to a Log Analytics workspace. + +## Setting up operational logs + +NGINX App Protect operational logs are sent to the NGINX error logs. See [Enable NGINX Logs]({{< relref "/nginxaas-azure/monitoring/enable-logging/">}}) to configure error logs. + +## Setting up security logs + +1. Enable the NGINX Security Logs category in **Diagnostic Settings**. For more information on logging, see [Enable NGINX Logs]({{< relref "/nginxaas-azure/monitoring/enable-logging/">}}). + +{{< img src="nginxaas-azure/security-diagnostic-setting.png" alt="Screenshot of the NGINXaaS WAF security logs diagnostic settings" >}} + +2. Update your NGINX configuration to enable security logs in an http/server/location context. + +```nginx +app_protect_security_log_enable on; +``` + +3. Configure the log configuration path and destination to the `app_protect_security_log` directive. More information on supported log configurations and destination can be found in the following sections. +```nginx +app_protect_security_log "/etc/app_protect/conf/log_all.json" syslog:server=localhost:5140; +``` + +Sample NGINX config with security logs enabled: + +```nginx +user nginx; +worker_processes auto; +worker_rlimit_nofile 8192; +pid /run/nginx/nginx.pid; + +load_module modules/ngx_http_app_protect_module.so; + +events { + worker_connections 4000; +} + +error_log /var/log/nginx/error.log debug; + +http { + access_log off; + server_tokens ""; + + app_protect_enforcer_address 127.0.0.1:50000; + + server { + listen 80 default_server; + + location / { + app_protect_enable on; + app_protect_policy_file /etc/app_protect/conf/NginxDefaultPolicy.tgz; + app_protect_security_log_enable on; + app_protect_security_log "/etc/app_protect/conf/log_all.tgz" syslog:server=localhost:5140; + proxy_pass http://127.0.0.1:80/proxy/$request_uri; + } + + location /proxy { + default_type text/html; + return 200 "Hello World\n"; + } + } +} +``` + +You can find more details on these directives in the [Security log](https://docs.nginx.com/nginx-app-protect-waf/v5/logging-overview/security-log/) documentation. + +### Log Configuration + +NGINXaaS for Azure ships with several pre-compiled log configuration bundles. More details on these logging bundles can be found in the [Security log](https://docs.nginx.com/nginx-app-protect-waf/v5/logging-overview/security-log/) documentation. + +The following table shows the path to the log configuration file that needs to be used with the app_protect_security_log directive: + + {{}} + | Profile | Path | + |---------------------------- | -------------------------------------------- | + | log_default | /etc/app_protect/conf/log_default.json | + | log_all | /etc/app_protect/conf/log_all.json | + | log_illegal | /etc/app_protect/conf/log_illegal.json | + | log_blocked | /etc/app_protect/conf/log_blocked.json | + | log_grpc_all | /etc/app_protect/conf/log_grpc_all.json | + | log_grpc_illegal | /etc/app_protect/conf/log_grpc_illegal.json | + | log_grpc_blocked | /etc/app_protect/conf/log_grpc_blocked.json | + {{}} + +To view the contents of the available log configuration, navigate to the azure portal and select the Log Configurations tab in the App Protect section. + +### Logging Destinations + +1. Logging to NGINXaaS syslog (Recommended) + +NGINXaaS for Azure supports a local syslog server running on port 5140. Syslogs forwarded to this destination are sent to the sink configured in the **Diagnostic Setting** section. + +```nginx +app_protect_security_log "/etc/app_protect/conf/log_all.json" syslog:server=localhost:5140; +``` + +{{}} When using a NGINXaaS syslog destination, the syslog server destination needs to match localhost:5140. Configuring log directives to other syslog locations will result in an error in the NGINX config. +{{}} + +2. File Logging + +NGINXaaS for Azure supports logging to a file path. Any logs written under `/var/log/app_protect` will be sent to the sink configured in **Diagnostic Setting**. + +```nginx +app_protect_security_log "/etc/app_protect/conf/log_all.json" /var/log/app_protect/security.log; +``` + +{{}}When using a file destination, the configured path for nginx security logs has to be within `/var/log/app_protect`. Configuring log directives to other file locations will result in an error in the NGINX config. +{{}} + + +## Analyzing NGINX security logs in Azure Log Analytics workspaces. + +If the diagnostic setting destination details included a Logs Analytics workspace, logs appear in the "NGXSecurityLogs" table with the following columns: + +{{}} +| **Attribute** | **Description** | +|-----------------------------|-----------------| +| **Location** | The location of the NGINXaaS resource.| +| **Message** | The generated NGINX security log line. | +| **FilePath** | The path to which NGINX security logs are configured to be logged to if the nginx config uses file-based logs. | +| **Tag** | The tag with which NGINX security logs are generated if syslog-based log configuration is used. | +| **Facility** | The syslog facility that generates the NGINX security logs if syslog-based log configuration is being used. | +| **Severity** | The syslog severity with which NGINX security logs were generated if syslog-based log configuration is used. | +{{}} + +To view the raw data in the NGINX security log, run the following KQL query: +``` +NGXSecurityLogs +| extend JSONLog = extract(@"json_log\s*=\s*""({.*?})""", 1, Message) +| extend Log = parse_json(replace_string(JSONLog, '""', '"')) +| project Log +``` + +{{< img src="nginxaas-azure/log-analytics-security.png" alt="Screenshot showing NGINX security logs in the Logs Analytics Workspace" >}} + +The following sample queries will help you get started with creating visualizations based on security logs. + +Blocked requests by IP + +``` +NGXSecurityLogs +| extend JSONLog = extract(@"json_log\s*=\s*""({.*?})""", 1, Message) +| extend Log = parse_json(replace_string(JSONLog, '""', '"')) +| where Log.enforcementAction == "block" +| project ClientIP = tostring(Log.clientIp), TimeGenerated +| summarize count() by ClientIP, bin(TimeGenerated, 1m) +| render timechart +``` + +Blocked requests by URL + +``` +NGXSecurityLogs +| extend JSONLog = extract(@"json_log\s*=\s*""({.*?})""", 1, Message) +| extend Log = parse_json(replace_string(JSONLog, '""', '"')) +| where Log.enforcementAction == "block" +| project URL = tostring(Log.url), TimeGenerated +| summarize count() by URL, bin(TimeGenerated, 1m) +| render timechart +``` + +Top matched rules + +``` +NGXSecurityLogs +| extend JSONLog = extract(@"json_log\s*=\s*""({.*?})""", 1, Message) +| extend Log = parse_json(replace_string(JSONLog, '""', '"')) +| where Log.enforcementAction == "block" +| project attackType = Log.enforcementState.attackType, TimeGenerated +| mv-expand attackType +| project attackName = tostring(attackType.name), TimeGenerated +| summarize count() by attackName, bin(TimeGenerated, 1m) +| render timechart +``` + +To add a visualization to a dashboard, select the **Pin to dashboard** icon in the top right of the log analytics workspace. + +{{}}It can take up to 90 minutes after adding diagnostic settings for logs to appear in the provided Logs Analytics Workspace.{{}} diff --git a/content/nginxaas-azure/app-protect/enable-waf.md b/content/nginxaas-azure/app-protect/enable-waf.md new file mode 100644 index 000000000..1861eac52 --- /dev/null +++ b/content/nginxaas-azure/app-protect/enable-waf.md @@ -0,0 +1,31 @@ +--- +title: "Enable App Protect WAF" +weight: 200 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/app-protect/enable-waf/ +--- + +## Overview + +This guide explains how to enable F5 NGINX App Protect WAF on a F5 NGINX as a Service for Azure (NGINXaaS) deployment. [F5 NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf/v5) provides web application firewall (WAF) security protection for your web applications, including OWASP Top 10; response inspection; Meta characters check; HTTP protocol compliance; evasion techniques; disallowed file types; JSON & XML well-formedness; sensitive parameters & Data Guard. + +## Before you start +- NGINX App Protect WAF can only be enabled on NGINXaaS for Azure deployments with the **Standard v2** [plan]({{< relref "/nginxaas-azure/billing/overview.md" >}}) + +## Enable NGINX App Protect (Preview) +NGINX App Protect is disabled by default and needs to be explicitly enabled on an NGINXaaS deployment. Follow these steps: + +### Using the Microsoft Azure Portal + +Access the [Microsoft Azure portal](https://portal.azure.com) + +1. Go to your NGINXaaS for Azure deployment. + +2. Select NGINX app protect in the left menu. + +3. Select **Enable App Protect**. + +## What's next + +[Configure App Protect WAF]({{< relref "/nginxaas-azure/app-protect/configure-waf.md" >}}) diff --git a/content/nginxaas-azure/billing/_index.md b/content/nginxaas-azure/billing/_index.md new file mode 100644 index 000000000..a6fbefc39 --- /dev/null +++ b/content/nginxaas-azure/billing/_index.md @@ -0,0 +1,9 @@ +--- +title: Marketplace billing +weight: 400 +draft: false +url: /nginxaas/azure/billing/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/billing/overview.md b/content/nginxaas-azure/billing/overview.md new file mode 100644 index 000000000..1e9227f6d --- /dev/null +++ b/content/nginxaas-azure/billing/overview.md @@ -0,0 +1,79 @@ +--- +title: "Billing overview" +weight: 100 +categories: ["concepts"] +toc: true +docs: "DOCS-885" +url: /nginxaas/azure/billing/overview/ +--- + +## Pricing plans + +F5 NGINX as a Service for Azure (NGINXaaS) provides two pricing plans. + +### Standard V2 plan + +The Standard V2 plan is designed for production workloads offering a [99.95% uptime SLA](https://www.f5.com/pdf/customer-support/eusa-sla.pdf), high availability through active-active deployments, redundancy, autoscaling, lossless rolling upgrades, and more. Choosing the Standard V2 plan will result in billing based on metered consumption of NGINX Capacity Units (NCU). + +When using the Standard V2 plan, NGINXaaS is a consumption-based service, metered hourly, and billed monthly in NGINX Capacity Units (NCUs). + +The SKU for the Standard V2 pricing plan is `standardv2_Monthly`. + +The Standard V2 plan allows for configuration of NGINX App Protect WAF and a higher number of listen ports. + + +### Basic plan + +The Basic plan is ideal for those who are just starting out, as it's intended for early-stage trials, development work, and testing. Please note that it doesn't provide service level agreement (SLA) guarantees, and it lacks both redundancy options and the capability to scale resources as needed. + +When using the Basic plan, each NGINXaaS deployment is billed at the rate specified on the [Azure Marketplace Offer](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/f5-networks.f5-nginx-for-azure?tab=Overview). + +The SKU for the Basic pricing plan is `basic_Monthly`. + +{{< note >}}The costs for your plan will appear on the Azure Portal Cost Analysis page and the Azure Consumption APIs. There may be a 24h delay before usage is visible.{{< /note >}} + + +### Standard plan (deprecated) + +The Standard plan is comparable to the Standard V2 plan except that it doesn't support some features like NGINX App Protect WAF. + +The SKU to use for the Standard pricing plan is `standard_Monthly`. + +{{< note >}} Standard plan is now deprecated in favor of Standard V2 plan.{{< /note >}} + + +## NGINX Capacity Unit (NCU) + +{{< include "/nginxaas-azure/ncu-description.md" >}} + +Each NCU provisioned (not consumed) is billed at the rate specified on the [Azure Marketplace Offer](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/f5-networks.f5-nginx-for-azure?tab=Overview). The minimum usage interval is 1 hour, and the maximum provisioned NCU size is billed for that hour. + +*Billing Example 1*: "I provisioned a 20 NCU NGINXaaS deployment in East US 2 at 9:04AM and then deleted it at 10:45AM." + +* The hourly rate in East US 2 is `$0.03/NCU/hour`. +* 9:00 hour: `20 NCU·hour` +* 10:00 hour: `20 NCU·hour` +* Total NCU·hours: `40 NCU·hour` +* Total: `40 NCU·hour * $0.03/NCU/hour = $1.20`. + +*Billing Example 2*: "I provisioned a 40 NCU NGINXaaS deployment in West Europe at 9:34AM. At 10:04AM I resized it to 20 NCUs. I then deleted it at 11:45AM." + +* The hourly rate in West Europe is `$0.05/NCU/hour`. +* 9:00 hour: `40 NCU·hour` +* 10:00 hour: `40 NCU·hour` +* 11:00 hour: `20 NCU·hour` +* Total NCU·hours: `100 NCU·hour` +* Total: `100 NCU·hours * 0.05$/NCU/hour = $5.00`. + +{{< note >}}Further guidance: +* For how many NCUs should you provision and how to scale to match workload, see the [Scaling Guidance]({{< relref "/nginxaas-azure/quickstart/scaling.md" >}}) +* To learn more about metrics related to NCUs, see the [NGINXaaS Statistics namespace]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md#nginxaas-statistics" >}}) +{{< /note >}} + + +## Bandwidth + +The standard Azure [networking](https://azure.microsoft.com/en-us/pricing/details/virtual-network/) and [bandwidth](https://azure.microsoft.com/en-us/pricing/details/bandwidth/) charges apply to NGINX deployments. + +{{< note >}}The management traffic for NGINX instances is billed as a `Virtual Network Peering - Intra-Region Egress` charge. This charge includes the data for shipping metrics and logs. The cost for shipping metrics data is approximately $0.03/month. If you enable NGINX logging the cost increases by roughly $0.005 per GB of logs NGINX generates. To estimate this, multiply the number of requests by the average log line size of the access_log format you have configured. +{{< /note >}} diff --git a/content/nginxaas-azure/billing/usage-and-cost-estimator.md b/content/nginxaas-azure/billing/usage-and-cost-estimator.md new file mode 100644 index 000000000..1d9480e4c --- /dev/null +++ b/content/nginxaas-azure/billing/usage-and-cost-estimator.md @@ -0,0 +1,146 @@ +--- +title: "Usage and cost estimator" +weight: 200 +categories: ["concepts"] +toc: true +docs: "DOCS-1474" +url: /nginxaas/azure/billing/usage-and-cost-estimator/ +--- + +{{< raw-html >}} + + +
+

+ Cost Estimation for Standard V2 Plan + +

+
+
+
+

1. Estimate NCU Usage

+
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ +
+
+
+
+

+ 2. Estimate Monthly Cost +

+
+ + +
+
+ + + +
+
+ + +
+
+ + +
+
+ + +
+
+
+
+ Total Monthly Payment + -- +
+ The standard Azure networking and bandwidth charges apply to NGINX deployments. +
+
+ Show calculations +
+
+

+ hours * (( NCUs * per NCU per hour) + additional listen ports * ) = +
+

+
+
+ + + + + + + +
RegionTierCost per NCU/hr
+
+
+
+
+
+
+
+
+ +{{< /raw-html >}} diff --git a/content/nginxaas-azure/changelog-archive/_index.md b/content/nginxaas-azure/changelog-archive/_index.md new file mode 100644 index 000000000..005902602 --- /dev/null +++ b/content/nginxaas-azure/changelog-archive/_index.md @@ -0,0 +1,9 @@ +--- +title: Changelog archive +weight: 950 +draft: false +url: /nginxaas/azure/changelog-archive/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/changelog-archive/changelog-2022.md b/content/nginxaas-azure/changelog-archive/changelog-2022.md new file mode 100644 index 000000000..02ae378bc --- /dev/null +++ b/content/nginxaas-azure/changelog-archive/changelog-2022.md @@ -0,0 +1,106 @@ +--- +title: "2022" +weight: 200 +toc: true +url: /nginxaas/azure/changelog-archive/changelog-2022/ +--- + +Learn about the updates, new features, and resolved bugs in F5 NGINX as a Service for Azure during the year 2022. + +To see the latest changes, visit the [Changelog]({{< relref "/nginxaas-azure/changelog" >}}) page. + +To see a list of currently active issues, visit the [Known issues]({{< relref "/nginxaas-azure/known-issues.md" >}}) page. + +## December 14, 2022 + +- {{% icon-resolved %}} **New customer deployments are now functional.** + + We have rolled out a fix that addresses the issue, and new customers or existing customers in a new region can create deployments. + +## December 7, 2022 + +- {{% icon-resolved %}} **System Assigned Managed Identitiy can now be used with a deployment.** + + Users can now leverage System Assigned Managed Identities with their deployment. The lifecycle of the identity is tied to the lifecycle of the corresponding deployment. See [Managed Identity Types](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview#managed-identity-types) for more information. + +## November 29, 2022 + +- {{% icon-resolved %}} **Absolute paths may now be used with the `js_import` directive.** + + NGINXaaS for Azure has new restrictions on file paths for certificate files, njs files, etc. See the [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}) for more information. Existing configurations will not be affected unless they need to be updated. + +## November 22, 2022 + +- {{% icon-feature %}} **Logging support is now available** + + Please visit the [Logging Support]({{< relref "/nginxaas-azure/monitoring/enable-logging/" >}}) documentation for more information on exporting NGINX logs with NGINXaaS for Azure. + +- {{% icon-resolved %}} **NGINXaaS for Azure ARM API schema supports previously unused fields `protectedFiles` and `logging`.** + +## November 14, 2022 + +- {{% icon-feature %}} NGINX deployment can be configured to send [metrics-based alerts]({{< relref "/nginxaas-azure/monitoring/configure-alerts.md" >}}). + +## November 7, 2022 + +- {{% icon-feature %}} New deployments utilize [Availability Zones](https://learn.microsoft.com/en-us/azure/reliability/availability-zones-overview) to ensure data planes are highly available. +- {{% icon-feature %}} Files containing sensitive data can be uploaded as a "Protected File", see: [NGINX Configuration]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) + +## October 24, 2022 + +- {{% icon-feature %}} __NGINXaaS for Azure is now generally available in more regions__ + + NGINXaaS for Azure is now available in the following additional regions: + + - West US 2 + - East US + - Central US + - North Central US + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of supported regions. + +## October 11, 2022 + +- {{% icon-resolved %}} **Resolved known njs filepath issues** + + Affecting new deployments only, two njs files in different subdirectories may share the same filename. For example: + ``` + js_path "njs"; + js_import d1 as d1/test.js; + js_import d2 as d2/test.js; + ``` + +## October 5, 2022 + +- {{% icon-feature %}} __Updated the error returned when a certificate cannot be applied to the NGINX Configuration__ + + This change improves the readability of some errors that may be returned when a certificate cannot be applied. + +## September 22, 2022 + +- {{% icon-feature %}} **NGINX configurations have default logging directives** + + Added default `access_log` and `error_log` to NGINX configurations in preparation for upcoming logging features. Requires a config push to be applied. + +- {{% icon-feature %}} **Improved likelihood of deployment success** +- {{% icon-feature %}} **Improved performance and reliability of backend services** +- {{% icon-resolved %}} **Fixed bug where NGINX version appeared empty** + +## July 21, 2022 + +- {{% icon-feature %}} **Basic caching is now supported** + + For more information on caching with NGINXaaS for Azure, please visit the [Basic Caching]({{< relref "/nginxaas-azure/quickstart/basic-caching.md" >}}) documentation. + +- {{% icon-feature %}} **Rate Limiting is now supported** + + For information on rate limiting with NGINXaaS for Azure, please visit the [Rate Limiting]({{< relref "/nginxaas-azure/quickstart/rate-limiting.md" >}}) documentation. + + +## May 24, 2022 + +### Welcome to the NGINXaaS Public Preview + +NGINXaaS for Azure is now available for public preview. Give it a try! If you find any issues please let us know by [raising a support ticket]({{< relref "/nginxaas-azure/troubleshooting/troubleshooting.md" >}}). + +Visit the [Known issues]({{< relref "/nginxaas-azure/known-issues.md" >}}) section to learn about the issues present in this release. diff --git a/content/nginxaas-azure/changelog-archive/changelog-2023.md b/content/nginxaas-azure/changelog-archive/changelog-2023.md new file mode 100644 index 000000000..f5def3409 --- /dev/null +++ b/content/nginxaas-azure/changelog-archive/changelog-2023.md @@ -0,0 +1,316 @@ +--- +title: "2023" +weight: 100 +toc: true +url: /nginxaas/azure/changelog-archive/changelog-2023/ +--- + +Learn about the updates, new features, and resolved bugs in F5 NGINX as a Service for Azure during the year 2023. + +To see the latest changes, visit the [Changelog]({{< relref "/nginxaas-azure/changelog" >}}) page. + +To see a list of currently active issues, visit the [Known issues]({{< relref "/nginxaas-azure/known-issues.md" >}}) page. + +## December 19, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports new metrics** + + NGINXaaS now supports the following metrics derived from NGINX Plus statistics introduced in + + API version 8: + - SSL statistics for each HTTP upstream and stream upstream + - SSL statistics for each HTTP server zone and stream server zone + - Extended statistics for SSL endpoint + + API version 9: + - Per-worker connection statistics including accepted, dropped, active and idle connections, total and current requests + + For a complete catalog of metrics, see the [Metrics Catalog]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md">}}). + +## December 6, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports NGINX config dry-run** + + NGINXaaS now supports the NGINX config dry-run. See the [Config Validation]({{< relref "/nginxaas-azure/getting-started/nginx-configuration#nginx-configuration-validation" >}}) documentation for instructions on how to use it. + + +## November 2, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports the Image-Filter dynamic module** + + NGINXaaS now supports the [Image-Filter](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html) dynamic module. For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in Japan East. + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## October 31, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports HTTP/3 and QUIC.** + + NGINXaaS can now serve client requests through HTTP/3 connections. NGINX only supports HTTP/3 on the client side and does not support HTTP/3 to upstreams. NGINXaaS utilizes the [OpenSSL](https://openssl.org/) library; however, the OpenSSL compatibility layer it uses does not support [early data](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_early_data). + + + ```nginx + http { + server { + # for better compatibility it's recommended + # to use the same port for http/3 and https + listen 443 quic reuseport; + listen 443 ssl; + ssl_certificate /etc/nginx/foo.pem; + ssl_certificate_key /etc/nginx/foo.key; + # ... + } + } + ``` + + To get started using HTTP/3 and NGINXaaS: + + - Update the [network security group](https://docs.microsoft.com/en-us/azure/virtual-network/tutorial-filter-network-traffic#create-security-rules) associated with the NGINXaaS deployment’s subnet to allow inbound traffic for HTTP/3 UDP ports in the NGINX configuration. + See our [FAQ]({{< relref "/nginxaas-azure/faq" >}}), for limits on how many unique ports may be specified in a configuration and a list of restricted ports. + + - Additionally, add a [Managed Identity]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) to your deployment and create [SSL/TLS Certificates]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/" >}}). For more information on using NGINX with HTTP/3, see the [HTTP/3 module](https://nginx.org/en/docs/http/ngx_http_v3_module.html). + +## October 25, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in North Europe. + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## October 15, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure supports new dynamic modules** + + NGINXaaS now supports the [OpenTelemetry](https://nginx.org/en/docs/ngx_otel_module.html) and [XSLT](https://nginx.org/en/docs/http/ngx_http_xslt_module.html) modules. + +## October 11, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports smaller deployments** + + You can now create or scale deployments to a capacity of 10 NCUs, ideal for small workloads. + +## October 9, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure maximum capacity increased** + + The maximum capacity of NGINXaaS for Azure has been increased from 160 NCUs to **500 NCUs** under the **Standard** plan. Existing deployments can also benefit from this new limit if users choose to scale up. + + To adjust capacity, refer to [Adjusting Capacity]({{< relref "/nginxaas-azure/quickstart/scaling.md#adjusting-capacity" >}}). + + To learn more about capacity restrictions, refer to [Capacity Restrictions]({{< relref "/nginxaas-azure/quickstart/scaling.md#capacity-restrictions" >}}). + +## September 13, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports serving static content** + + An NGINXaaS deployment can now serve static content. See [Hosting Static Content]({{< relref "/nginxaas-azure/quickstart/hosting-static-content.md" >}}) for details. + +## August 23, 2023 + +- {{% icon-resolved %}} **NGINXaaS for Azure now supports attaching a Public IP from a Public IP prefix** + + In the Microsoft Azure portal, you can [create a static public IP address from an IP prefix](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/create-public-ip-prefix-portal?tabs=create-default#create-a-static-public-ip-address-from-a-prefix). This release of NGINXaaS introduces support for attaching public IP addresses associated with a public IP prefix to your NGINXaaS deployments. + +## Aug 7, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now deploys with a default configuration** + + NGINXaaS new deployments will now include a default configuration, providing a smoother setup experience compared to the previous empty configuration. + + To learn more about configuration, refer to [Upload an NGINX Configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}). + + - {{% icon-feature %}} **NGINXaaS for Azure now supports more directives** + + NGINXaaS now supports new directives. For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +## July 27, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports higher capacity** + + NGINXaaS for Azure allowed users to create deployments with a maximum capacity of 80 NCUs under the **Standard** plan. A recent change now allows users to deploy up to **160 NCUs**. Existing NGINXaaS deployments should also scale up to 160 NCUs. + + To adjust capacity, refer [Adjusting Capacity]({{< relref "/nginxaas-azure/quickstart/scaling.md#adjusting-capacity" >}}). + + To learn more about capacity restrictions, refer to [Capacity Restrictions]({{< relref "/nginxaas-azure/quickstart/scaling.md#capacity-restrictions" >}}). + +## July 13, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure automatically rotates SSL/TLS certificates** + + NGINXaaS for Azure now automatically retrieves renewed certificates from Azure Key Vault and applies them to your NGINX deployment. To learn more about this new feature, refer to [Certificate Rotation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md#certificate-rotation" >}}). + +## July 7, 2023 + +- {{% icon-feature %}} **Improve compatibility with Azure Key Vault certificates generated through merging from an external provider** (e.g. [keyvault-acmebot](https://github.com/shibayan/keyvault-acmebot)) + + Key Vault's certificate merge command puts the server certificate as the last certificate in the generated PFX but NGINX requires that it be the first one in the generated PEM. NGINXaaS will dynamically reorder the certificates to be in chain order with the server certificate first. + +- {{% icon-feature %}} **Support NGINX `log_not_found` directive ([docs](http://nginx.org/en/docs/http/ngx_http_core_module.html#log_not_found))** + +## June 29, 2023 + +- {{% icon-feature %}} **NGINXaaS can now proxy and load balance UDP traffic.** + + To configure NGINX to handle UDP traffic, specify the [`stream`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) directive in your NGINX configuration. + + ```nginx + stream { + server { + listen 53 udp; + # ... + } + # ... + } + ``` + + To learn more about load balancing UDP traffic with NGINX, see [TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + +## June 21, 2023 + +- {{% icon-resolved %}} **NGINXaaS for Azure accepts configurations larger than 60kB** + + An NGINXaaS deployment can now accept configurations larger than 60kB. + +## June 6, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure supports new directives** + + NGINXaaS now allows the `ssl_preread` directive and most directives from the `ngx_http_fastcgi_module` module. For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +## May 31, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in the following additional regions: + + - West US 3 + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of supported regions. + +## May 17, 2023 + +- {{% icon-feature %}} **NGINXaaS can now proxy and load balance TCP traffic.** + + To configure NGINX to handle TCP traffic, specify the [`stream`](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream) directive in your NGINX configuration. + + ```nginx + stream { + server { + listen 12345; + # ... + } + # ... + } + ``` + + To learn more about load balancing TCP traffic with NGINX, see [TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + +## May 1, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure supports passing traffic to gRPC servers.** + + NGINXaaS can now be configured as a gateway for gRPC services. Refer to NGINX's [gRPC module](https://nginx.org/en/docs/http/ngx_http_grpc_module.html) for more information. + +## April 26, 2023 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports HTTP/2.** + + NGINXaaS can now serve client requests through HTTP/2 connections. NGINX only supports HTTP/2 on the client side and does not support HTTP/2 to upstreams. + + ```nginx + http { + server { + listen 443 ssl http2; + + ssl_certificate server.crt; + ssl_certificate_key server.key; + # ... + } + } + ``` + + To get started using HTTP/2 and NGINXaaS, add a [Managed Identity]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) to your deployment and create [SSL/TLS Certificates]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/" >}}). For more information on using NGINX with HTTP/2, see the [HTTP/2 module](https://nginx.org/en/docs/http/ngx_http_v2_module.html). + +- {{% icon-resolved %}} NGINXaaS can now serve static files with the `error_page` directive. + +## April 17, 2023 + +- {{% icon-feature %}} **NGINXaaS can now support NGINX configurations to secure HTTP traffic between NGINX and upstreams** + + NGINXaaS now accepts NGINX directives to secure traffic between NGINX and upstream using SSL/TLS certificates. + + Refer to [Securing Upstream Traffic]({{< relref "/nginxaas-azure/quickstart/security-controls/securing-upstream-traffic.md">}}) for more details on how to configure NGINXaaS with these directives. + +## April 7, 2023 + +- {{% icon-feature %}} **NGINX configurations may now listen on ports other than 80 and 443.** + + NGINXaaS now accepts requests on ports in addition to 80 and 443. Inbound ports are specified in the NGINX configuration using the [`listen`](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen) directive. + + NGINXaas can be configured to accept requests on up to 5 unique ports. + + ```nginx + http { + server { + listen 8080; + # ... + } + } + ``` + + Update the [network security group's inbound security rules](https://docs.microsoft.com/en-us/azure/virtual-network/tutorial-filter-network-traffic#create-security-rules) associated with the NGINXaaS deployment's subnet to allow inbound traffic for all listen ports in the NGINX configuration. + + See our [FAQ]({{< relref "/nginxaas-azure/faq" >}}), for limits on how many unique ports may be specified in a configuration and a list of restricted ports. + + +## March 16, 2023 + +- {{% icon-resolved %}} Deployment configuration now succeeds after adding a managed identity. + + After adding a managed identity to a deployment, the deployment transitions from an **Accepted** state to a **Succeeded** state only after the operation to add the managed identity succeeds. The user can then proceed to configure the deployment. + + +## February 21, 2023 + +- {{% icon-feature %}} **Directives `auth_jwt_key_file` and `auth_jwt_require` are now supported.** + + Refer to the [`auth_jwt_key_file`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) and [`auth_jwt_require`](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require) documentation for more information on using these directives. + + - {{% icon-resolved %}} **PKCS12 certificates may now be added to your NGINXaaS deployment.** + + Previously, NGINXaaS only accepted PEM formatted certificates. Now, both PEM and PKCS12 certificates are supported. + + - {{% icon-resolved %}} **State files may now be used with the `keyval_zone` directive.** + + For information on storing the state of a key-value database with a state file, see [`keyval_zone`](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone)'s documentation. + + +## January 11, 2023 + +- {{% icon-feature %}} **NGINXaaS is generally available** + + We are pleased to announce the general availability of NGINX as a Service (NGINXaaS), a first-party-like experience as a service co-developed by Microsoft and NGINX and tightly integrated into the [Azure](https://azure.microsoft.com/) ecosystem. + + NGINXaaS, powered by [NGINX Plus](https://www.nginx.com/products/nginx/), is a fully managed service that removes the burden of deploying your own NGINX Plus cluster, installing libraries, upgrading, and managing it. + + NGINXaaS simplifies the process of moving your [existing NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration#add-nginx-configuration" >}}) to the Azure cloud. Once your configurations are moved to Azure, [securely manage SSL/TLS certificates and keys stored in Azure Key Vault and reference them within your NGINX configurations]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}). You can [watch your application's traffic in real time]({{< relref "/nginxaas-azure/monitoring/enable-monitoring" >}}) with Azure monitoring and alerts, and scale your deployment to fit your needs, maximizing cost efficiency. + You can create, update, and delete your NGINXaaS deployment using the [Azure Resource Manager]({{< relref "/nginxaas-azure/client-tools/templates.md" >}}), the [Azure SDK]({{< relref "/nginxaas-azure/client-tools/sdk" >}}), [CLI]({{< relref "/nginxaas-azure/getting-started/create-deployment/deploy-azure-cli.md" >}}), and [Terraform]({{< relref "/nginxaas-azure/getting-started/create-deployment/deploy-terraform.md" >}}) in addition to the [Azure portal]({{< relref "/nginxaas-azure/getting-started/create-deployment/" >}}). + + Our new "Standard" plan is ready for production workloads. + + To learn more, refer to the following NGINXaaS documentation: + + - [NGINXaaS for Azure overview]({{< relref "/nginxaas-azure/overview/overview.md" >}}) + - [NGINXaaS, NGINX Plus, and NGINX Open Source feature comparison]({{< relref "/nginxaas-azure/overview/feature-comparison.md" >}}) + - [NGINXaaS billing details]({{< relref "/nginxaas-azure/billing/overview.md" >}}) + +## January 10, 2023 + +- {{% icon-resolved %}} **Special parameters in `map` and `geo` directives are now supported.** + +- {{% icon-resolved %}} **The `match` directive is now supported.** \ No newline at end of file diff --git a/content/nginxaas-azure/changelog.md b/content/nginxaas-azure/changelog.md new file mode 100644 index 000000000..62174e020 --- /dev/null +++ b/content/nginxaas-azure/changelog.md @@ -0,0 +1,295 @@ +--- +title: "Changelog" +weight: 900 +toc: true +docs: "DOCS-870" +--- + +Learn about the latest updates, new features, and resolved bugs in F5 NGINX as a Service for Azure. + +To see a list of currently active issues, visit the [Known issues]({{< relref "/nginxaas-azure/known-issues.md" >}}) page. + +To review older entries, visit the [Changelog archive]({{< relref "/nginxaas-azure/changelog-archive" >}}) section. + +## Feb 10, 2025 + +- {{% icon-feature %}} **NGINXaaS Load Balancer for Kubernetes is now Generally Available** + + NGINXaaS can now be used as an external load balancer to route traffic to workloads running in your Azure Kubernetes Cluster. To learn how to set it up, see the [Quickstart Guide]({{< relref "/nginxaas-azure/quickstart/loadbalancer-kubernetes.md">}}). + +## January 23, 2025 + +- {{< icon-feature >}} **In-place SKU Migration from Standard to Standard V2** + + You can now migrate NGINXaaS for Azure from the Standard plan to the Standard V2 plan without redeploying. We recommend upgrading to the Standard V2 plan to access features like NGINX App Protect WAF and more listen ports. The Standard plan will be retired soon. For migration details, see [migrate from standard]({{< relref "/nginxaas-azure/troubleshooting/migrate-from-standard.md">}}). + +## December 17, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure can integrate with Azure Network Security Perimeter** + + NGINXaaS can now integrate with [Azure Network Security Perimeter](https://learn.microsoft.com/en-us/azure/private-link/network-security-perimeter-concepts). This integration allows users to set up access rules, so their NGINXaaS deployment can retrieve certificates from Azure Key Vault, while blocking all other public access to the key vault. For more information, please refer to the [Configure Network Security Perimeter]({{< relref "/nginxaas-azure/quickstart/security-controls/certificates.md#configure-network-security-perimeter-nsp" >}}) documentation. + +## December 3, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports the GeoIP2 dynamic module** + + NGINXaaS now supports the [GeoIP2](https://github.com/leev/ngx_http_geoip2_module) dynamic module. For more information, see [GeoIP2 quickstart]({{< relref "/nginxaas-azure/quickstart/geoip2.md">}}). For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#configuration-directives-list" >}}). + +## November 18, 2024 + +- {{% icon-resolved %}} **Consumed NCUs metric more accurately accounts for concurrent connections** + + The consumed NCUs metric now uses the `system.worker_connections` metric to more accurately determine the number of concurrent connections used by NGINX. Previously NGINXaaS was underreporting the number of concurrent connections used by NGINX, improperly omitting connections to upstreams. + + Users may notice an increase in their deployments' consumed NCUs if their deployments are handling a large number of connections. This fix will help to make autoscaling more responsive to changes in incoming traffic. Customers using autoscaling may notice that their deployments scale out if the number of concurrent connections increases significantly. + +## November 8, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now improves visibility and management of protected files** + + Users can now view the file paths and associated metadata of protected files added to the NGINX configuration of an NGINXaaS deployment, while the file contents remain confidential. Users can also overwrite an existing protected file with new file contents or resubmit it without having to provide the file contents again. + + For more details on protected files, refer to the [Add an NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#add-an-nginx-configuration" >}}) section. + +## October 23, 2024 + +- {{% icon-feature %}} **NGINXaaS Load Balancer for Kubernetes preview release** + + You can now use NGINXaaS as an external load balancer to direct traffic into Kubernetes. For details, see the [quickstart]({{< relref "/nginxaas-azure/quickstart/loadbalancer-kubernetes.md" >}}). + +## October 10, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in the following additional regions: + + - Brazil South + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## September 18, 2024 + +- {{% icon-feature %}} **NGINXaaS is now running NGINX Plus Release 32 (R32) in the Stable Upgrade Channel** + + NGINXaaS for Azure deployments using the **Stable** [Upgrade Channel]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}) have now been automatically upgraded to [NGINX Plus Release 32 (R32)](https://docs.nginx.com/nginx/releases/#nginxplusrelease-32-r32). This upgrade also includes updates to the following NGINX Plus modules: + - nginx-plus-module-headers-more + - nginx-plus-module-image-filter + - nginx-plus-module-lua + - nginx-plus-module-ndk + - nginx-plus-module-njs + - nginx-plus-module-otel + - nginx-plus-module-xslt + + For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +## September 13, 2024 + +- {{< icon-warning >}} **Standard plan retirement** + +NGINXaaS for Azure now supports the [Standard V2](https://docs.nginx.com/nginxaas/azure/billing/overview) plan. We encourage you to use the Standard V2 plan for all new NGINXaaS deployments from now on to take advantage of additional features like NGINX App Protect WAF and a higher number of listen ports. The Standard V2 plan follows a similar pricing model as the Standard plan. + +{{}}The Standard plan will be deprecated and will not be available for new deployments starting November 1, 2024.{{}} + +Your current deployments on the Standard plan will continue to function but won't include any of the new features we've introduced in the Standard V2 plan. Additionally, we intend to phase out the Standard plan in the future. When this happens, we will offer a migration path to the Standard V2 plan for existing NGINXaaS deployments on the Standard plan. + +- **Recommended action:** + + Update your automation scripts to use the Standard V2 plan. The SKU for the Standard V2 pricing plan is `standardv2_Monthly`. + +If you have any questions or concerns, please [contact us](https://portal.azure.com/#view/Microsoft_Azure_Support/HelpAndSupportBlade/~/overview). + +## Aug 29, 2024 + +- {{% icon-feature %}} **The backend subnet of an NGINXaaS deployment can now be updated** + +An NGINXaaS deployment can now be gracefully updated to a new subnet with zero downtime. Currently, this capability is available through ARM templates or the Azure CLI. Support through other client tools is coming soon. + +## Aug 22, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports NGINX App Protect WAF in Preview** + + NGINXaaS now supports [NGINX App Protect WAF](https://docs.nginx.com/nginx-app-protect-waf/v5) in Preview as part of the [Standard v2 plan]({{< relref "/nginxaas-azure/billing/overview.md#standard-v2-plan">}}). For more information, see [enable WAF]({{< relref "/nginxaas-azure/app-protect/enable-waf.md">}}). + +## Aug 16, 2024 + +- {{% icon-feature %}} **Notification on update to deployments using the Stable Upgrade Channel** + + NGINXaaS for Azure deployments using the **Stable** [Upgrade Channel]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}) will be updated to [NGINX Plus Release 32 (R32)](https://docs.nginx.com/nginx/releases/#nginxplusrelease-32-r32) during the week of September 16-22, 2024. This will also include updates to the following NGINX Plus modules: + - nginx-plus-module-headers-more + - nginx-plus-module-image-filter + - nginx-plus-module-lua + - nginx-plus-module-ndk + - nginx-plus-module-njs + - nginx-plus-module-otel + - nginx-plus-module-xslt + + Please review the [NGINX Plus Release 32 (R32)](https://docs.nginx.com/nginx/releases/#nginxplusrelease-32-r32) Release Notes carefully. If you have any concerns, it's recommended to validate your configuration against NGINX Plus R32 by setting up a test deployment using the **Preview** [Upgrade Channel]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}). See [these instructions]({{< relref "/nginxaas-azure/quickstart/recreate.md" >}}) on how to set up a deployment similar to your current one. + + If you have any questions or concerns, please [contact us]({{< relref "troubleshooting/troubleshooting.md" >}}). + +## July 30, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in the following additional regions: + + - Central India + - South India + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## July 23, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in the following additional regions: + + - Germany West Central + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## July 10, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in the following additional regions: + + - Southeast Asia + - Sweden Central + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## June 28, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports the Lua dynamic module** + + NGINXaaS now supports the [Lua](https://github.com/openresty/lua-nginx-module) dynamic module `v0.10.25`. The `lua_capture_error_log` directive is not supported at this time. For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +## June 18, 2024 + +- {{% icon-feature %}} **NGINXaaS now supports NGINX Plus Release 31 (R31)** + + NGINXaaS now supports [NGINX Plus Release 31 (R31)](https://docs.nginx.com/nginx/releases/#nginxplusrelease-31-r31). For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +## June 17, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports the Headers-More dynamic module** + + NGINXaaS now supports the [Headers-More](https://github.com/openresty/headers-more-nginx-module) dynamic module. For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +## June 6, 2024 + +- {{% icon-feature %}} **Notification on update to deployments using the Stable Upgrade Channel** + + NGINXaaS for Azure deployments using the **Stable** [Upgrade Channel]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}) will be updated to [NGINX Plus Release 31 (R31)](https://docs.nginx.com/nginx/releases/#nginxplusrelease-31-r31) during the week of June 17-23, 2024. This will also include updates to the following NGINX Plus modules: + + - nginx-plus-module-headers-more + - nginx-plus-module-image-filter + - nginx-plus-module-lua + - nginx-plus-module-ndk + - nginx-plus-module-njs + - nginx-plus-module-otel + - nginx-plus-module-xslt + + Please review the [NGINX Plus Release 31 (R31)](https://docs.nginx.com/nginx/releases/#nginxplusrelease-31-r31) Release Notes carefully. If you have any concerns, it's recommended to validate your configuration against NGINX Plus R31 by setting up a test deployment using the **Preview** [Upgrade Channel]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}). See [these instructions]({{< relref "/nginxaas-azure/quickstart/recreate.md" >}}) on how to set up a deployment similar to your current one. + + If you have any questions or concerns, please [contact us]({{< relref "troubleshooting/troubleshooting.md" >}}). + +## May 20, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports a Basic plan for dev/test purposes** + + For trial, development and testing purposes without SLA guarantees, redundancy or scaling, NGINXaaS provides the ability to choose a Basic plan deployment. For more information, see [pricing plans]({{< ref "billing/overview.md#pricing-plans">}}). + +## April 18, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports runtime state sharing** + + NGINXaaS instances can now synchronize shared memory zones when configured to enable [Runtime State Sharing](https://docs.nginx.com/nginx/admin-guide/high-availability/zone_sync/). + + This feature allows for some data to be shared between NGINXaaS instances including: + + - [Sticky‑learn session persistence](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_learn) + - [Rate limiting](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) + - [Key‑value store](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) + + [Runtime State Sharing](https://docs.nginx.com/nginx/admin-guide/high-availability/zone_sync/) enables NGINXaaS to be configured for use cases such as [OIDC](https://github.com/nginxinc/nginx-openid-connect) and [SAML](https://github.com/nginxinc/nginx-saml) authentication. + + Refer to [Runtime State Sharing with NGINXaaS for Azure]({{< relref "/nginxaas-azure/quickstart/runtime-state-sharing.md" >}}) for the configuration guide. + +- {{% icon-feature %}} **NGINXaaS for Azure now supports metrics from stream zone_sync statistics** + + For a complete catalog of metrics, see the [Metrics Catalog]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md">}}). + +## April 9, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure supports autoscaling and Upgrade Channels on all client tools** + + In addition to the Azure Portal and the ARM API version `2024-01-01-preview`, you can now use all other client tools, such as the Azure CLI or Terraform, to enable autoscaling or specify an Upgrade Channel. + + For more information on autoscaling, see the [Autoscaling documentation]({{< relref "/nginxaas-azure/quickstart/scaling.md#autoscaling">}}). + For more information on Upgrade Channels, see [Upgrade Channels]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}). + +- {{% icon-feature %}} **NGINXaaS for Azure can now accept a system assigned and a user assigned managed identity** + + NGINXaaS for Azure now allows using a system assigned managed identity and a user assigned managed identity at the same time per deployment. This provides flexibility in assigning role permissions for integrations that NGINXaaS for Azure require. + +## March 21, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports Upgrade Channels** + + An Upgrade Channels lets you control the frequency at which your NGINXaaS deployment receives upgrades for NGINX Plus and its related modules. For more information, see [Upgrade Channels]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}). + +## March 20, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports autoscaling** + + Enable autoscaling to automatically adjust the size of your deployment based on the traffic requirements. Autoscaling can be enabled in the Azure Portal or the ARM API version `2024-01-01-preview`, with other client tools coming soon. + + For more information on autoscaling, see the [Autoscaling documentation]({{< relref "/nginxaas-azure/quickstart/scaling.md#autoscaling">}}). + +## March 13, 2024 + +- {{% icon-resolved %}} **Fixed a known issue causing Terraform to show an error while trying to manage configuration of a new deployment (ID-891)** + + NGINXaaS for Azure now requires users to take an explicit action to create a default NGINX configuration with a deployment. We have added the "Apply default NGINX configuration" field in the [updated deployment creation workflow]({{< relref "/nginxaas-azure/getting-started/create-deployment/deploy-azure-portal.md#networking-tab" >}}) in the Azure portal. For other client tools like Terraform, NGINXaaS for Azure now requires users to explicitly create an NGINX configuration. + +## March 5, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure now supports configuring NGINX Plus as a mail proxy server** + + Enhance your email service’s efficiency by utilizing NGINX Plus as a mail proxy for IMAP, POP3, and SMTP protocols, streamlining configuration for mail servers and external services. + + Please note that NGINXaaS does not support outbound connections on port 25, and an alternative port should be used for SMTP. Additionally, ensure network connectivity from the NGINXaaS deployment to both the mail server and authentication server to support proper mail authentication. + + For a complete list of allowed directives, see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +- {{% icon-feature %}} **NGINXaaS for Azure now supports resolver statistics metrics** + + For a complete catalog of metrics, see the [Metrics Catalog]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md">}}). + +## February 15, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in the following additional regions: + + - Japan West + - Korea South + - Korea Central + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. + +## February 8, 2024 + +- {{% icon-feature %}} ****NGINXaaS for Azure now supports [Diagnostic settings in Azure Monitor](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings) to send NGINX logs to different destinations**** + + An NGINXaaS deployment now supports adding Diagnostic Settings to export NGINX logs. See [Enable NGINX Logs]({{< relref "/nginxaas-azure/monitoring/enable-logging/" >}}) for more details. + +## January 22, 2024 + +- {{% icon-feature %}} **NGINXaaS for Azure is now generally available in more regions** + + NGINXaaS for Azure is now available in Canada Central. + + See the [Supported Regions]({{< relref "/nginxaas-azure/overview/overview.md#supported-regions" >}}) documentation for the full list of regions where NGINXaaS for Azure is available. diff --git a/content/nginxaas-azure/client-tools/_index.md b/content/nginxaas-azure/client-tools/_index.md new file mode 100644 index 000000000..5861b0207 --- /dev/null +++ b/content/nginxaas-azure/client-tools/_index.md @@ -0,0 +1,10 @@ +--- +title: Client tools +weight: 500 +draft: false +toc: true +url: /nginxaas/azure/client-tools/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/client-tools/cli.md b/content/nginxaas-azure/client-tools/cli.md new file mode 100644 index 000000000..ece14e96d --- /dev/null +++ b/content/nginxaas-azure/client-tools/cli.md @@ -0,0 +1,32 @@ +--- +title: "Azure CLI" +weight: 900 +description: "Learn how to setup the Azure CLI to manage NGINXaaS for Azure." +categories: ["platform-management"] +toc: true +docs: "DOCS-1234" +url: /nginxaas/azure/client-tools/cli/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) deployments can be managed using the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/). This document outlines how to install the CLI tool including the NGINX extension. + +## Prerequisites + +- Install Azure CLI version 2.67.0 or greater: [Azure CLI Installation](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli)) +- Log into your Azure account through the CLI: [Azure CLI Authentication](https://learn.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + +## Install NGINXaaS extension + +In order to install and manage your NGINXaaaS deployments using the Azure CLI, you will need to install the `nginx` extension: + +```bash +az extension add --name nginx --allow-preview true +``` + +## Update NGINXaaS extension + +Ensure you are running the latest version of the `nginx` CLI extension to take advantage of the latest capabilities available on your NGINXaaS deployments: + +```bash +az extension update --name nginx --allow-preview true +``` diff --git a/content/nginxaas-azure/client-tools/sdk.md b/content/nginxaas-azure/client-tools/sdk.md new file mode 100644 index 000000000..34edeb349 --- /dev/null +++ b/content/nginxaas-azure/client-tools/sdk.md @@ -0,0 +1,56 @@ +--- +title: "Azure SDK" +weight: 300 +description: "Learn how to use the Python Azure Management SDK to manage NGINXaaS for Azure deployments." +categories: ["platform-management"] +toc: true +docs: "DOCS-1095" +url: /nginxaas/azure/client-tools/sdk/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) deployments can be managed using the multi-language SDK. This document outlines common workflows using the Python SDK. You can find example code to manage NGINXaaS deployments and related objects in the NGINXaaS GitHub repository, [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/sdk/python/). + +## Prerequisites + +- [NGINXaaS Prerequisites]({{< relref "/nginxaas-azure/getting-started/prerequisites.md" >}}) +- Install Azure Identity package - [azure-identity](https://pypi.org/project/azure-identity/) +- Install the NGINX SDK - [azure-mgmt-nginx](https://pypi.org/project/azure-mgmt-nginx/) +- See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/sdk/python/deployments/) for an example script to create prerequisite resources. + +## Workflows + +- For a complete list of NGINXaaS SDK documentation, see the [Azure NGINXaaS SDK Documentation](https://learn.microsoft.com/en-us/python/api/overview/azure/mgmt-nginx-readme) +- [Azure Authentication SDK Documentation](https://learn.microsoft.com/en-us/azure/developer/python/sdk/authentication-overview) + +### Create or update a deployment + +For example scripts to create or update deployment resources, see [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/sdk/python/deployments/) + +- [Azure SDK Deployment Create or Update Documentation](https://learn.microsoft.com/en-us/python/api/azure-mgmt-nginx/azure.mgmt.nginx.operations.deploymentsoperations?view=azure-python#azure-mgmt-nginx-operations-deploymentsoperations-begin-create-or-update) +- [Azure SDK Deployment Delete Documentation](https://learn.microsoft.com/en-us/python/api/azure-mgmt-nginx/azure.mgmt.nginx.operations.deploymentsoperations?view=azure-python#azure-mgmt-nginx-operations-deploymentsoperations-begin-delete) +- [NGINXaaS Managed Identity Documentation]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) +- [NGINXaaS Azure Monitor Documentation]({{< relref "/nginxaas-azure/monitoring/enable-monitoring/" >}}) + +### Create or update a certificate + +Create or update a certificate under a deployment. This references an existing certificate in an Azure Key Vault and makes it available to the NGINX configuration. See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/sdk/python/certificates/) for example scripts to create or update deployment certificate resources. + +- [Azure SDK Certificate Create or Update Documentation](https://learn.microsoft.com/en-us/python/api/azure-mgmt-nginx/azure.mgmt.nginx.operations.certificatesoperations?view=azure-python#azure-mgmt-nginx-operations-certificatesoperations-begin-create-or-update) +- [Azure SDK Certificate Delete Documentation](https://learn.microsoft.com/en-us/python/api/azure-mgmt-nginx/azure.mgmt.nginx.operations.configurationsoperations?view=azure-python#azure-mgmt-nginx-operations-configurationsoperations-begin-delete) +- [NGINXaaS Certificates Documentation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}) +- [Azure SDK Key Vault Documentation](https://learn.microsoft.com/en-us/python/api/overview/azure/key-vault) + +### Create or update a configuration + +Create or update the default configuration for a deployment using a gzipped archive based on the NGINXaaS documentation below. See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/sdk/python/configurations/) for example scripts to create or update deployment configuration resources. + +- [Azure SDK Configuration Create or Update Documentation](https://learn.microsoft.com/en-us/python/api/azure-mgmt-nginx/azure.mgmt.nginx.operations.configurationsoperations?view=azure-python#azure-mgmt-nginx-operations-configurationsoperations-begin-create-or-update) +- [NGINXaaS GZIP Configuration Documentation]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#upload-gzip-nginx-configuration" >}}) + +## Additional Docs + +- [Python Azure SDK Overview](https://learn.microsoft.com/en-us/python/api/overview/azure/nginx) +- [Java Azure SDK Overview](https://learn.microsoft.com/en-us/java/api/overview/azure/nginx) +- [JavaScript Azure SDK Overview](https://learn.microsoft.com/en-us/javascript/api/overview/azure/nginx) +- [Go Azure SDK Documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/nginx/armnginx) +- [.NET Azure SDK Overview (Preview)](https://learn.microsoft.com/en-us/dotnet/api/overview/azure/nginx?view=azure-dotnet-preview) diff --git a/content/nginxaas-azure/client-tools/templates.md b/content/nginxaas-azure/client-tools/templates.md new file mode 100644 index 000000000..e95fb82f6 --- /dev/null +++ b/content/nginxaas-azure/client-tools/templates.md @@ -0,0 +1,47 @@ +--- +title: "Azure Resource Manager templates" +weight: 100 +description: "Learn how to use Azure Resource Manager (ARM) JSON and Bicep templates to manage NGINXaaS for Azure." +categories: ["platform-management"] +toc: true +docs: "DOCS-1097" +url: /nginxaas/azure/client-tools/templates/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) deployments can be managed using the ARM API or the Azure CLI with ARM template deployments using JSON or Bicep formats. These deployments can be made locally or in a continuous integration pipeline. This document outlines common workflows using the ARM API. You can find example code to manage NGINXaaS deployments and related objects in the NGINXaaS GitHub repository, [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets). + +## Prerequisites + +- [NGINXaaS Prerequisites]({{< relref "/nginxaas-azure/getting-started/prerequisites.md" >}}) +- [Azure CLI Installation](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) +- You need to be logged in to your Azure account through the CLI if you are using that for template deployment, see [Azure CLI Authentication](https://learn.microsoft.com/en-us/cli/azure/authenticate-azure-cli) +- See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/arm-templates/deployments/prerequisites) for an example template to create the prerequisite resources. + +## Workflows + +### Create or update a deployment + +See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/arm-templates/deployments/create-or-update) for an example template to create or update deployment resources. + +- [NGINXaaS Managed Identity Documentation]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) +- [NGINXaaS Azure Monitor Documentation]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) + +### Create or update a certificate + +Create or update a certificate under a deployment. This references an existing certificate in an Azure Key Vault and makes it available to the NGINX configuration. See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/arm-templates/certificates/create-or-update) for an example template to create or update certificate resources. + +- [NGINXaaS Certificates Documentation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}) +- [ARM Template Key Vault Documentation](https://learn.microsoft.com/en-us/azure/templates/microsoft.keyvault/vaults) + +### Create or update a configuration + +Create or update the default configuration for a deployment using a gzipped archive based on the NGINXaaS documentation below. See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/arm-templates/configuration) for an example template to create or update configuration resources. + +- [NGINXaaS GZIP Configuration Documentation]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#upload-gzip-nginx-configuration" >}}) + +## Additional Docs + +If you are new to Azure Resource Manager templates, see: + +- [Azure JSON Templates Overview](https://learn.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) +- [Azure Bicep Templates Overview](https://learn.microsoft.com/en-us/azure/azure-resource-manager/bicep/overview) diff --git a/content/nginxaas-azure/client-tools/terraform.md b/content/nginxaas-azure/client-tools/terraform.md new file mode 100644 index 000000000..ee211af83 --- /dev/null +++ b/content/nginxaas-azure/client-tools/terraform.md @@ -0,0 +1,46 @@ +--- +title: "Terraform" +weight: 400 +description: "Learn how to use the Terraform to manage NGINXaaS for Azure." +categories: ["platform-management"] +toc: true +docs: "DOCS-1472" +draft: true +url: /nginxaas/azure/client-tools/terraform/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) deployments can be managed using Terraform. This document outlines common Terraform workflows for NGINXaaS. + +## Prerequisites + +- [NGINXaaS Prerequisites]({{< relref "/nginxaas-azure/getting-started/prerequisites.md" >}}) +- [Authenticate Terraform to Azure](https://learn.microsoft.com/en-us/azure/developer/terraform/authenticate-to-azure) +- [Install Terraform](https://developer.hashicorp.com/terraform/downloads) + +## Workflows + +### Create or update a deployment + +See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/deployments/create-or-update) for an example to create or update deployment resources. + +- [NGINXaaS Managed Identity Documentation]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) +- [NGINXaaS Azure Monitor Documentation]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) + +### Create or update a certificate + +Upload a self-signed certificate created in Azure Key Vault to a deployment. See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/certificates) for an example to create or update certificate resources. + +- [NGINXaaS Certificates Documentation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}) + +### Create or update a configuration + +Upload an example multi-file NGINX configuration to a deployment. See [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/configurations) for an example to create or update configurations resources. + +- [NGINXaaS Configuration Documentation]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}) + +## Additional Docs + +- [Managing an NGINXaaS for Azure deployment](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/nginx_deployment) +- [Managing an NGINXaaS for Azure deployment configuration](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/nginx_configuration) +- [Managing an NGINXaaS for Azure deployment certificate](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/nginx_certificate) +- If you are new to Terraform, see [Terraform Overview](https://www.terraform.io/) diff --git a/content/nginxaas-azure/faq.md b/content/nginxaas-azure/faq.md new file mode 100644 index 000000000..56e427307 --- /dev/null +++ b/content/nginxaas-azure/faq.md @@ -0,0 +1,254 @@ +--- +title: "Frequently Asked Questions" +weight: 800 +categories: ["concepts"] +toc: true +docs: "DOCS-881" +--- + +Common questions about F5 NGINX as a Service for Azure (NGINXaaS). + +### Is NGINXaaS available in my subscription or in F5 subscription? +- Your NGINXaaS deployment resource is visible to you under your subscription. The underlying compute resources of your deployment, which are managed by NGINX on your behalf, are not visible in your subscription. + +### Is NGINXaaS active-active? What is the architecture of NGINXaaS? +- NGINXaaS is deployed as an active-active pattern for high availability. To learn more, see the [user guide]({{< relref "/nginxaas-azure/overview/overview.md#architecture" >}}). + +### In which Azure regions is NGINXaaS currently supported? +- We are constantly adding support for new regions. You can find the updated list of supported regions in the [NGINXaaS documentation]({{< relref "/nginxaas-azure/overview/overview.md" >}}). + +### My servers are located in different geographies, can NGINXaaS load balance for these upstream servers? +- Yes, NGINXaaS can load balance even if upstream servers are located in different geography as long as no networking limitations are mentioned in the [Known Issues]({{< relref "known-issues.md" >}}). + +### How do I analyze traffic statistics for NGINXaaS? +- NGINXaaS is integrated with [Azure monitoring](https://learn.microsoft.com/en-us/azure/azure-monitor/overview). NGINXaaS publishes [traffic statistics]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md" >}}) in Azure monitoring. Customers can analyze the traffic statistics by following the steps mentioned in the [NGINXaaS Monitoring]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) documentation. + +### When should I scale my deployment? +- Consider requesting additional NCUs if the number of consumed NCUs is over 70% of the number of provisioned NCUs. Consider reducing the number of requested NCUs when the number of consumed NCUs is under 60% of the number of provisioned NCUs. For more information on observing the consumed and provisioned NCUs in your deployment, see the [Scaling documentation]({{< relref "/nginxaas-azure/quickstart/scaling.md#metrics" >}}). + +- Alternatively, [enable autoscaling]({{< relref "/nginxaas-azure/quickstart/scaling.md#autoscaling" >}}) to let the system automatically scale your deployment for you. + +### I am an NGINX Plus customer; how can I switch to NGINXaaS? +- In NGINX Plus, customers SSH into the NGINX Plus system, store their certificates in some kind of storage and configure the network and subnet to connect to NGINX Plus. + +- For NGINXaaS, customers store their certificates in the Azure key vault and configure NGINXaaS in the same VNet or peer to the VNet in which NGINXaaS is deployed. + +### How does NGINXaaS react to a workload/traffic spike? +- You can monitor the NCUs consumed by looking at the metrics tab of NGINXaaS. To learn about the NCUs consumed, choose NGINXaaS statistics and select "NCU consumed." If the NCU consumed is close to the requested NCUs, we encourage you to scale your system and increase the NCU units. You can manually scale from your base NCUs (For example, 10) to up to 500 NCUs by selecting the NGINXaaS scaling tab. + +- Currently, we support scaling in 10 NCU intervals (10, 20, 30, and so on). + +- Alternatively, you can enable autoscaling, and NGINXaaS will automatically scale your deployment based on the consumption of NCUs. + +- See the [Scaling Guidance]({{< relref "/nginxaas-azure/quickstart/scaling.md" >}}) documentation for more information. + +### What types and formats of certificates are supported in NGINXaaS? +- NGINXaaS supports self-signed certificates, Domain Validated (DV) certificates, Organization Validated (OV) certificates, and Extended Validation (EV) certificates. + +- Currently, NGINXaaS supports PEM and PKCS12 format certificates. + +- See the [SSL/TLS Certificates documentation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}) to learn how to change certificates. + +### Does NGINXaaS support layer 4 load balancing? +- Yes, NGINXaaS currently supports layer 4 TCP and HTTP layer 7 load balancing. + +### Does NGINXaaS support IP v6? +- No, NGINXaaS does not support IPv6 yet. + +### What protocols do NGINXaaS support? + +- At this time, we support the following protocols: + + - HTTPS + - HTTP + - HTTP/2 + - HTTP/3 + - TCP + - QUIC + - IMAP + - POP3 + - SMTP + +### Does NGINXaaS support multiple public IPs, a mix of public and private IPs? + +- NGINXaaS supports one public or private IP per deployment. NGINXaaS doesn't support a mix of public and private IPs at this time. + +### Can I change the IP address used for an NGINXaaS deployment to be public or private? + +- You cannot change the IP address associated with an NGINXaaS deployment from public to private, or from private to public. + +### How large should I make the subnet for NGINXaaS? + +- The minimum subnet size is `/27` and is sufficient for a single NGINXaaS deployment even at large scales. Multiple NGINXaaS deployments can be placed in a single delegated subnet, along with other resources. When doing so, a larger subnet, e.g. a `/24`, is recommended. + +### Can I deploy more than one NGINXaaS to a single subnet? +- Yes, however, every deployment in the subnet will share the address space (range of IP addresses that resources can use within the VNet), so ensure the subnet is adequately sized to scale the deployments. + +### How long does it take to deploy NGINXaaS? +- Typically you can deploy NGINXaaS in under 5 minutes. + +### Any downtime in the periodic updates? +- There's no downtime during updates to NGINXaaS. + +### Does changing the capacity of NGINXaaS result in any downtime? +- No, there's no downtime while an NGINXaaS deployment changes capacity. + +### How is my application safe at the time of disaster? Any method for disaster recovery? +- In any Azure region with more than one availability zone, NGINXaaS provides cross-zone replication for disaster recovery. See [Architecture]({{< relref "/nginxaas-azure/overview/overview.md#architecture" >}}) for more details. + +### Can I configure the TLS policy to control TLS protocol versions? +- Yes. You can overwrite the NGINX default protocol to configure the desired TLS/SSL policy. Read more about the procedure in the [Module ngx_http_ssl_module](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols) documentation. + +### How many TLS/SSL certificates does NGINXaaS support? +- NGINXaaS supports up to 100 TLS/SSL certificates. + +### Does NGINXaaS natively integrate with Azure Key Vault? +- Yes, NGINXaaS natively integrates with Azure Key Vault, so you can bring your own certificates and manage them in a centralized location. You can learn more about adding certificates in Azure Key Vault in the [SSL/TLS Certificates documentation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}). + +### Can I deploy any other resources in the NGINXaaS subnet? +- Yes, the subnet can contain other resources and is not dedicated to the NGINXaaS for Azure resources; ensure the subnet size is adequate to scale the NGINXaaS deployment. + +### Are NSG (Network Security Group) supported on the NGINXaaS? +- Yes, an NSG is required in the subnet where NGINXaaS will be deployed to ensure that the deployment is secured and inbound connections are allowed to the ports the NGINX service listens to. + +### Can I restrict access to NGINXaaS based on various criteria, such as IP addresses, domain names, and HTTP headers? +- Yes, you can restrict access to NGINXaaS by defining restriction rules at the Network Security Group level or using NGINX's access control list. To learn more, see the [NGINX module ngx_http_access_module](http://nginx.org/en/docs/http/ngx_http_access_module.html) documentation. + +### What are the supported networking services of NGINXaaS? +- NGINX currently supports VNet, and VPN gateway if they do not have limitations. Known limitations can be found in the [Known Issues]({{< relref "known-issues.md" >}}). + +### Does NGINXaaS support end-to-end encryption from client to the upstream server? +- Yes, NGINXaaS supports end-to-end encryption from client to upstream server. + +### What types of logs does NGINXaaS provide? +- NGINXaaS supports the following [two types of logs]({{< relref "/nginxaas-azure/monitoring/enable-logging/">}}). + +- Access Log: To troubleshoot server issues, analyze web traffic patterns and monitor server performance. For more details, please see the [Module ngx_http_log_module](https://nginx.org/en/docs/http/ngx_http_log_module.html?&_ga=2.80762515.545098740.1677716889-256521444.1670450998#access_log) documentation. + +- Error Log: To capture, troubleshoot and identify issues that may occur during the server's operations, such as 400 bad requests, 401 unauthorized, 500 internal server errors, etc. For more details, please see the [Core functionality](https://nginx.org/en/docs/ngx_core_module.html?&_ga=2.8347062.545098740.1677716889-256521444.1670450998#error_log) documentation. + +### What is the retention policy for the above logs? How long are the logs stored? Where are they stored? +- NGINXaaS logs are stored in customer’s storage. Customers can custom define the retention policy. Customers can configure the storage by following the steps outlined in the [NGINXaaS Logging]({{< relref "/nginxaas-azure/monitoring/enable-logging/">}}) documentation. + +### Can I set up an alert with NGINXaaS? +- You can set up an alert with NGINXaaS by following the steps outlined in the [Configure Alerts]({{< relref "/nginxaas-azure/monitoring/configure-alerts.md">}}) documentation. + +### Is request tracing supported in NGINXaaS? +- Yes, see the [Application Performance Management with NGINX Variables](https://www.nginx.com/blog/application-tracing-nginx-plus/) documentation to learn more about tracing. + +### Can I select my desired instance type for NGINXaaS deployment? +- No; NGINXaaS will deploy the right resources to ensure you get the right price-to-performance ratio. + +### Can I migrate from on-prem NGINX+ to NGINXaaS on Azure? +- Yes, you can bring your own configurations or create a new configuration in the cloud. See the [NGINXaaS Deployment]({{< relref "/nginxaas-azure/getting-started/create-deployment/">}}) documentation for more details. + +### Can I associate multiple certificates for the same domain? +- Yes, the "ssl_certificate" directive can be specified multiple times to load certificates of different types. To learn more, see the [Module ngx_http_ssl_module](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate) documentation. + +### What types of redirects does the NGINXaaS support? +- In addition to HTTP to HTTPS, HTTPS to HTTP, and HTTP to HTTP, NGINXaaS provides the ability to create new rules for redirecting. See [How to Create NGINX Rewrite Rules | NGINX](https://www.nginx.com/blog/creating-nginx-rewrite-rules/) for more details. + +### What content types does NGINXaaS support for the message body for upstream/NGINXaaS error status code responses? +- Customers can use any type of response message, including the following: + + - text/plain + - text/css + - text/html + - application/javascript + - application/json + +### Where do I find the NGINXaaS IP (Internet Protocol) address? +- Once you successfully deploy NGINXaaS, you can double-click on NGINXaaS in the Azure portal; you can see both public and private IP addresses, as shown in the following screenshot: + +{{< img src="nginxaas-azure/faq-ip-location-one.png" alt="IP location one" >}} + +{{< img src="nginxaas-azure/faq-ip-location-two.png" alt="IP location two" >}} + +### Does my deployment IP change over time? +- The NGINXaaS deployment IP doesn't change over time. + +### Does NGINXaaS support autoscaling? +- Yes; NGINXaaS supports autoscaling as well as manual scaling. Refer to the [Scaling Guidance]({{< relref "/nginxaas-azure/quickstart/scaling.md#autoscaling" >}}) for more information. + +### How can I manually start/stop NGINXaaS? +- Currently, we can't manually start/stop NGINXaaS. You have the option to delete the deployment and re-deploy at a future date. + +### Can I change the virtual network or subnet for an existing NGINXaaS? +- If the existing NGINXaaS deployment is using a public IP address, you can change the backend virtual network or subnet. Please make sure that the subnet is delegated to `NGINX.NGINXPLUS/nginxDeployments` before creating a deployment in it. To delegate a subnet to an Azure service, see [Delegate a subnet to an Azure service](https://learn.microsoft.com/en-us/azure/virtual-network/manage-subnet-delegation?source=recommendations#delegate-a-subnet-to-an-azure-service). + +- If the existing NGINXaaS deployment is using a private IP address, you can only change the backend subnet. You cannot change the backend virtual network because the frontend and backend subnets must be in the same virtual network. + +### How do I configure HTTPS listeners for .com and .net sites? +- NGINXaaS is a Layer 7 HTTP protocol. To configure .com and .net servers, refer to the server name in the server block within the HTTP context. To learn more, and see examples, follow the instructions in the [NGINX Configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#nginx-configuration-validation" >}}) documentation. + +### If I remove/delete an NGINXaaS deployment, what will happen to the eNICs that were associated with it? +- When you remove or delete an NGINXaaS deployment, the associated eNICs will automatically be deleted. + +### What are the specific permissions that NGINXaaS for Azure needs? + +- The specific permissions required to deploy NGINXaaS are: + + - microsoft.network/publicIPAddresses/join/action + + - nginx.nginxplus/nginxDeployments/Write + + - microsoft.network/virtualNetworks/subnets/join/action + + - nginx.nginxplus/nginxDeployments/configurations/Write + + - nginx.nginxplus/nginxDeployments/certificates/Write + +- Additionally, if you are creating the Virtual Network or IP address resources that NGINXaaS for Azure will be using, then you probably also want those permissions as well. + +- Note that assigning the managed identity permissions normally requires an “Owner” role. + +### Can I reference my upstream servers by internal DNS hostname? + +- Yes. If your DNS nameservers are configured in the same VNet as your deployment, then you can use those DNS nameservers to resolve the hostname of the upstream servers referenced in your NGINX configuration. + +### Will updates to my virtual network's DNS settings automatically apply to my NGINXaaS deployment? + +No, changes to a virtual network's DNS settings will not be applied automatically to your NGINXaaS deployment. To ensure DNS settings are applied, you must add any custom DNS servers to the VNET's DNS settings before creating an NGINXaaS deployment. As a workaround for existing deployments, we recommend using the [`resolver` directive](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver) to explicitly specify your name server(s) and the [`resolve` parameter](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#resolve) to automatically re-resolve the domain name of the server without restarting NGINX. + +For example, + +```nginx +resolver 10.0.0.2 valid=10s; +upstream backends { + zone backends 64k; + server backends.example.com:8080 resolve; +} + +server { + location / { + proxy_pass http://backends; + } +} +``` + +### Does changing the `worker_connections` in the NGINX config have any effect? +- No. While changing the value of the directive in the config is allowed, the change is not applied to the underlying NGINX resource of your deployment. + +### What ports can my deployment listen on? + +- Due to port restrictions on Azure Load Balancer health probes, ports `19`, `21`, `70`, and `119` are not allowed. The NGINXaaS deployment can listen on all other ports. We limit the maximum listen ports in the NGINX configuration to 5 on the Basic and current Standard (v1) plan. Configurations that specify over 5 unique ports are rejected. With the Standard V2 plan, we allow users to listen on more than 5 ports. The first five ports under this plan come at no extra cost and there are charges for each additional port utilized. + +### How often does my deployment get billed? + +- NGINXaaS is [billed monthly]({{< relref "/nginxaas-azure/billing/overview.md" >}}) based on hourly consumption. + +### Why do the metrics show more connections and requests than I was expecting? + +- The NGINX agent periodically gathers connection and request statistics using an internal HTTP request. An Azure service health probe checks for status using a TCP connection for each listen port in the NGINX configuration, incrementing the connection count for each port. This contributes to minimal traffic and should not affect these metrics significantly. + +### Can I use an existing subnet to create my deployment? + +- You can use an existing subnet to create a deployment. Please make sure that the subnet is delegated to `NGINX.NGINXPLUS/nginxDeployments` before creating a deployment in it. To delegate a subnet to an Azure service, see [Delegate a subnet to an Azure service](https://learn.microsoft.com/en-us/azure/virtual-network/manage-subnet-delegation?source=recommendations#delegate-a-subnet-to-an-azure-service). + +### Will my deployment detect a new version of my certificate and apply it? + +- NGINXaaS supports certificate rotation. See the [Certificate Rotation documentation]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md#certificate-rotation" >}}) to learn more. + +### Why are some of my deployment's metrics intermittently missing in Azure monitor? + +- This may indicate that the deployment's underlying compute resources are being exhausted. Monitor the `system.cpu` metric to see the deployment's CPU utilization. If it's nearing 100%, consider increasing the deployment's NCU capacity. See the [Scaling Guidance]({{< relref "/nginxaas-azure/quickstart/scaling.md" >}}) documentation for more information. diff --git a/content/nginxaas-azure/getting-started/_index.md b/content/nginxaas-azure/getting-started/_index.md new file mode 100644 index 000000000..e3770eb81 --- /dev/null +++ b/content/nginxaas-azure/getting-started/_index.md @@ -0,0 +1,9 @@ +--- +title: Getting started +weight: 200 +draft: false +url: /nginxaas/azure/getting-started/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/getting-started/create-deployment/_index.md b/content/nginxaas-azure/getting-started/create-deployment/_index.md new file mode 100644 index 000000000..9dfdcd9ce --- /dev/null +++ b/content/nginxaas-azure/getting-started/create-deployment/_index.md @@ -0,0 +1,8 @@ +--- +title: Create a deployment +weight: 200 +url: /nginxaas/azure/getting-started/create-deployment/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/getting-started/create-deployment/deploy-azure-cli.md b/content/nginxaas-azure/getting-started/create-deployment/deploy-azure-cli.md new file mode 100644 index 000000000..3d172f11b --- /dev/null +++ b/content/nginxaas-azure/getting-started/create-deployment/deploy-azure-cli.md @@ -0,0 +1,147 @@ +--- +title: "Deploy using the Azure CLI" +weight: 200 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/create-deployment/deploy-azure-cli/ +--- + +## Overview + +The Azure CLI has an extension to be used for management of F5 NGINX as a Service for Azure (NGINXaaS) deployments whether that be locally or in continuous integration pipelines. This document links you to information around basic NGINXaaS extension usage. + +## Prerequisites + +- Install [Azure CLI with NGINXaaS extension]({{< relref "/nginxaas-azure/client-tools/cli.md" >}}) + +## Create a deployment + +To create an NGINXaaS for Azure resource use the `az nginx deployment create` command: + +```bash +az nginx deployment create --deployment-name + --resource-group + [--auto-upgrade-profile] + [--enable-diagnostics {0, 1, f, false, n, no, t, true, y, yes}] + [--identity] + [--location] + [--logging] + [--network-profile] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--scaling-properties] + [--sku] + [--tags] + [--user-profile] +``` + +### Examples + +- Create a deployment with public IP: + + ```bash + az nginx deployment create --name myDeployment --resource-group \ + myResourceGroup --location eastus2 --sku name="standardv2_Monthly" \ + --network-profile front-end-ip-configuration="{public-ip-addresses:[{id:/subscriptions/mySubscription/resourceGroups/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/myPublicIP}]}" \ + network-interface-configuration="{subnet-id:/subscriptions/mySubscription/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVNet/subnets/mySubnet}" + ``` + +- Create a deployment with private IP: + + ```bash + az nginx deployment create --name myDeployment --resource-group \ + myResourceGroup --location eastus2 --sku \ + name="standardv2_Monthly" --network-profile \ + front-end-ip-configuration="{private-ip-addresses:[{private-ip-allocation-method:Static,subnet-id:/subscriptions/mySubscription/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVNet/subnets/mySubnet,private-ip-address:10.0.0.2}]}" \ + network-interface-configuration="{subnet-id:/subscriptions/mySubscription/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVNet/subnets/mySubnet}" + ``` + + ```bash + az nginx deployment create --name myDeployment --resource-group \ + myResourceGroup --location eastus2 --sku \ + name="standardv2_Monthly" --network-profile \ + front-end-ip-configuration="{private-ip-addresses:[{private-ip-allocation-method:Dynamic,subnet-id:/subscriptions/mySubscription/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVNet/subnets/mySubnet,private-ip-address:10.0.0.2}]}" \ + network-interface-configuration="{subnet-id:/subscriptions/mySubscription/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVNet/subnets/mySubnet}" + ``` + +- Create a deployment with managed identity, storage account and scaling: + + ```bash + az nginx deployment create --deployment-name myDeployment --resource-group \ + myResourceGroup --location eastus2 --sku name=standardv2_Monthly \ + --network-profile \ + network-interface-configuration='{subnet-id:/subscriptions/subscriptionId/resourcegroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet-azclitest/subnets/mySubnet}' \ + front-end-ip-configuration='{public-ip-addresses:[{id:/subscriptions/subscriptionId/resourceGroups/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/myPublicIP}]}' \ + --identity '{"type":"UserAssigned","userAssignedIdentities":{"/subscriptions/subscriptionId/resourcegroups/myResourceGroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myManagedIdentity":{}}}' \ + --logging storage-account='{"account-name":"myStorageAccount","container-name":"myContainer"}' \ + --scaling-properties capacity=10 + ``` + +See the [Azure CLI Deployment Create Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment#az-nginx-deployment-create) for more details on the required and optional parameters. + +## Update a deployment + +To update an NGINXaaS for Azure resource use the `az nginx deployment update` command: + +```bash +az nginx deployment update [--add] + [--auto-upgrade-profile] + [--deployment-name] + [--enable-diagnostics {0, 1, f, false, n, no, t, true, y, yes}] + [--force-string {0, 1, f, false, n, no, t, true, y, yes}] + [--identity] + [--ids] + [--location] + [--logging] + [--network-profile] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--remove] + [--resource-group] + [--scaling-properties] + [--set] + [--sku] + [--subscription] + [--tags] + [--user-profile] +``` + +### Example + +- Update tags and enable diagnostics support for a deployment: + + ```bash + az nginx deployment update --name myDeployment --resource-group \ + myResourceGroup --location eastus2 --tags tag1="value1" \ + tag2="value2" --enable-diagnostics + ``` + +See the [Azure CLI Deployment Update Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment#az-nginx-deployment-update) for more details on the required and optional parameters. + + +## Delete a deployment + +Use the `az nginx deployment delete` command to delete an NGINXaaS for Azure resource: + +```bash +az nginx deployment delete [--deployment-name] + [--ids] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--resource-group] + [--subscription] + [--yes] +``` + +### Example + +- Delete a deployment: + + ```bash + az nginx deployment delete --name myDeployment \ + --resource-group myResourceGroup + ``` + +See the [Azure CLI Deployment Delete Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment#az-nginx-deployment-delete) for more details on the required and optional parameters. + +## Additional resources + +- [Azure CLI Public IP Documentation](https://learn.microsoft.com/en-us/cli/azure/network/public-ip) +- [Azure CLI Storage Container Documentation](https://learn.microsoft.com/en-us/cli/azure/storage/container) diff --git a/content/nginxaas-azure/getting-started/create-deployment/deploy-azure-portal.md b/content/nginxaas-azure/getting-started/create-deployment/deploy-azure-portal.md new file mode 100644 index 000000000..9c283cd6a --- /dev/null +++ b/content/nginxaas-azure/getting-started/create-deployment/deploy-azure-portal.md @@ -0,0 +1,89 @@ +--- +title: "Deploy using the Azure portal" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-878" +url: /nginxaas/azure/getting-started/create-deployment/deploy-azure-portal/ +--- + +## Overview + +This guide explains how to deploy F5 NGINX as a Service for Azure (NGINXaaS) using [Microsoft Azure portal](https://azure.microsoft.com/en-us/get-started/azure-portal). The deployment process involves creating a new deployment, configuring the deployment, and testing the deployment. + +## Find the NGINX as a Service for Azure offer in the Azure portal + +You can start the NGINXaaS deployment process by visiting the [Create NGINXaaS](https://portal.azure.com/#create/f5-networks.f5-nginx-for-azure) page or finding the NGINXaaS service in the Azure portal: + +1. [Sign in](https://portal.azure.com/) to the Azure portal with your Azure account. +1. Use the search field to find "NGINXaaS" in the Azure Portal. In the Services results, select **NGINXaaS**. +1. Select **+ Create** on the **NGINXaaS** page to start the deployment process. + +## Create a deployment + +### Basics tab + +1. On the Create NGINXaaS Deployment **Basics** page, provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Subscription | Select the appropriate Azure subscription that you have access to.| + | Resource group | Specify whether you want to create a new resource group or use an existing one.
For more information, see [Azure Resource Group overview](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview). | + | Name | Provide a unique name for your deployment. | + | Region | Select the region you want to deploy to. | + | Pricing Plan | Select the Standard V2 plan. For more information, see [Pricing Plans]({{< ref "/nginxaas-azure/billing/overview.md#pricing-plans">}}) | + | Scaling | Select Manual to set the capacity of your deployment in NCUs or select Autoscale to automatically adjust the capacity of your deployment. Learn more about NCUs in [Scaling Guidance]({{< relref "/nginxaas-azure/quickstart/scaling.md" >}}). | + | Email | Provide an email address that can be notified about service alerts, maintenance data and activity reports. | + | Upgrade Channel | Select the desired upgrade channel for your deployment. For more information, see [Upgrade Channels]({{< relref "/nginxaas-azure/quickstart/upgrade-channels.md" >}}). | + + {{
}} + +1. Next, select **Networking**. + +### Networking tab + +1. On the Create NGINXaaS Deployment **Networking** page, provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Virtual Network | A virtual network is required for communication between the resources you create.
You can create a new virtual network or use an existing one (for an existing one see note below).
Additionally, you can peer a new virtual network with existing ones (in any region) to create network access from NGINXaaS for Azure to your upstream servers. To peer the virtual network with another see [Create, change, or delete a virtual network peering](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-manage-peering).| + | Subnet | If you select an existing virtual network, you can select the existing subnet to be used. Before creating a deployment, the existing subnet needs to be delegated to `NGINX.NGINXPLUS/nginxDeployments`. To delegate a subnet to an Azure service, see [Delegate a subnet to an Azure service](https://learn.microsoft.com/en-us/azure/virtual-network/manage-subnet-delegation?source=recommendations#delegate-a-subnet-to-an-azure-service).

Otherwise, if you have chosen to create a new virtual network, a new subnet will be selected by default.

The minimum subnet size is `/27` and is sufficient for a single NGINXaaS deployment even at large scales. Multiple NGINXaaS deployments can be placed in a single delegated subnet, along with other resources. When doing so a larger subnet, e.g. a `/24`, is recommended. | + | Allow NGINX access to Virtual Network | Confirm that you allow:
- Registration of the NGINX provider to your Azure subscription.
- Delegation of the subnet to the NGINX provider.| + | IP address | Set the IP address (public or private) that the service listens to for requests:

If you select a public IP address:
- Create a new public IP or use an existing one (for an existing one see the note below).
- Set the resource name for your public IP address.
Newly created public IPs are [zone-redundant in supported regions](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/public-ip-addresses#availability-zone).

If you select a private IP address:
- Provide a static IP address from the same subnet range set previously. | + | Inbound port rules | Select `None` to disallow inbound access on any port, or choose to allow traffic from one of these common http(s) ports.

**Note:** This option is only available when specifying a new virtual network as part of the create workflow. If you select an existing virtual network which is associated with a subnet and Network Security Group (NSG), you will need to edit the Inbound security rules to add access for the specific ports you want to allow (for example, ports 80 and 443).| + | Apply default NGINX configuration | Confirm that you want your NGINXaaS deployment to be bootstrapped with a default NGINX configuration and a browsable splash page. | + {{
}} + +1. Next, select **Tags**. + +### Tags tab + +1. Add custom tags for the new NGINXaaS Deployment. Each tag consists of a **name** and **value**. + +1. After adding the tags, select **Next: Review+Create** + +### Review + create tab + +1. On the Review + create tab, your configuration is validated. You can review the selections made in the previous screens. + +1. After validation has succeeded and you've reviewed the terms, select **Create** for Azure to start the deployment. + +1. After the deployment finishes, select the NGINX deployment from the list (with "Type: NGINXaaS") to view information about the deployed resource. + + {{< img src="nginxaas-azure/deployment-complete.png" alt="Resource Deployment Completed page showing the available deployments and the new NGINXaaS type deployment in the Deployment details section." >}} + + +## Test your deployment + +1. To test your deployment, you can go to the IP address noted on the overview page. The default NGINX welcome screen should load. + + {{}}You will not see the default NGINX welcome screen if you unchecked "Apply default NGINX configuration" in the [Networking Tab screen]({{< relref "create-deployment.md#networking-tab" >}}) above. You can proceed with providing your own NGINX configuration as outlined in the [NGINX configuration]({{< relref "nginx-configuration.md#networking-tab" >}}) section.{{}} + + {{< img src="nginxaas-azure/test-deployment.png" alt="NGINXaaS Overview page showing the IP address of the deployment in the Essentials section." >}} + + +## What's next + +[Assign Managed Identities]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) diff --git a/content/nginxaas-azure/getting-started/create-deployment/deploy-terraform.md b/content/nginxaas-azure/getting-started/create-deployment/deploy-terraform.md new file mode 100644 index 000000000..901251450 --- /dev/null +++ b/content/nginxaas-azure/getting-started/create-deployment/deploy-terraform.md @@ -0,0 +1,42 @@ +--- +title: "Deploy using Terraform" +weight: 300 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/create-deployment/deploy-terraform/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) deployments can be managed using Terraform. This document outlines common Terraform workflows for NGINXaaS. + +## Prerequisites + +{{< include "/nginxaas-azure/terraform-prerequisites.md" >}} + +## Create a deployment + +You can find examples of Terraform configurations in the [NGINXaaS for Azure Snippets GitHub repository](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/deployments/create-or-update) + +To create a deployment, use the following Terraform commands: + + ```bash + terraform init + terraform plan + terraform apply --auto-approve + ``` + +## Delete a deployment + +Once the deployment is no longer needed, run the following to clean up the deployment and related resources: + + ```bash + terraform destroy --auto-approve + ``` + +## Additional resources + +- If you're just starting with Terraform, you can learn more on their [official website](https://www.terraform.io/). +- [Terraform NGINX deployment documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/nginx_deployment) + +{{< include "/nginxaas-azure/terraform-resources.md" >}} \ No newline at end of file diff --git a/content/nginxaas-azure/getting-started/managed-identity-portal.md b/content/nginxaas-azure/getting-started/managed-identity-portal.md new file mode 100644 index 000000000..a14db7a9d --- /dev/null +++ b/content/nginxaas-azure/getting-started/managed-identity-portal.md @@ -0,0 +1,84 @@ +--- +title: "Assign Managed Identities" +weight: 300 +categories: ["tasks"] +toc: true +docs: "DOCS-872" +url: /nginxaas/azure/getting-started/managed-identity-portal/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) leverages a user assigned and a system assigned managed identity for some of its integrations with Azure, such as: + +- Azure Key Vault (AKV): fetch SSL/TLS certificates from AKV to your NGINXaaS deployment, so that they can be referenced by your NGINX configuration. + +- Azure Monitor: publish metrics from your NGINX deployment to Azure Monitor. + +- Azure Storage: export logs from your NGINX deployment to Azure Blob Storage Container. + +## Prerequisites + +- A user assigned or a system assigned managed identity. If you are unfamiliar with managed identities for Azure resources, refer to the [Managed Identity documentation](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) from Microsoft. + +- Owner access on the resource group or subscription to assign the managed identity to the NGINX deployment. + +## Adding a user assigned managed identity + +1. Go to your NGINXaaS for Azure deployment. + +2. Select **Identity** in the left menu, select the **User Assigned** tab, and select **Add**. + +3. Select the appropriate **subscription** and **user assigned managed identity**, then select **Add**. + +
+ {{}}NGINXaaS supports adding a system assigned managed identity and a user assigned managed identity. Adding more than one user assigned managed identity is not supported.{{}} + +4. The added user assigned managed identity will show up in the main table. + +## Removing a user assigned managed identity + +1. Select the managed identity you want to remove from the list and then select **Remove**. + +2. Confirm the operation by selecting **Yes** on the confirmation prompt. + +## Adding a system assigned managed identity + +1. Go to your NGINXaaS for Azure deployment. + +2. Select **Identity** in the left menu, select the **System Assigned** tab, and then toggle the Status to **On**. + +3. Select **Save**. + +3. To confirm the operation, select **Yes** on the confirmation prompt. + + {{}}NGINXaaS supports using only one type of managed identity per deployment at a time. User assigned and system assigned identities cannot be present simultaneously.{{}} + +4. To provide the role assignments necessary for the deployment, Select **Azure Role Assignments** under Permissions. + +5. Select **Add Role Assignments** + +6. On the **Add role assignment (Preview)** panel, select the appropriate **Scope** and **Role**. Then select **Save**. + +7. The system assigned managed identity will be shown as enabled on the main Identity page. + +## Removing a system assigned managed identity + +1. Select **Identity** in the left menu, then select the **System assigned** tab. + +2. Toggle the Status to **Off** and select **Save**. + +3. Confirm the operation by selecting **Yes** on the confirmation prompt. + +{{}}Removing a Managed Identity from an NGINX deployment has the following effects: + +- If the NGINX deployment uses any SSL/TLS certificates, then any updates to the deployment (including deployment properties, certificates, and configuration) will result in a failure. If the configuration is updated not to use any certificates, then those requests will succeed. + +- If publishing metrics is enabled for the NGINX deployment, then the metrics will no longer be published to Azure Monitor for this deployment until a Managed Identity is added. + +- If logging is enabled for the NGINX deployment, then the logs will no longer be exported to the Azure Blob Storage Container for this deployment until a Managed Identity is added.{{}} + + +## What's next + +[Add SSL/TLS Certificates]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md" >}}) diff --git a/content/nginxaas-azure/getting-started/nginx-configuration/_index.md b/content/nginxaas-azure/getting-started/nginx-configuration/_index.md new file mode 100644 index 000000000..c0c2f4c34 --- /dev/null +++ b/content/nginxaas-azure/getting-started/nginx-configuration/_index.md @@ -0,0 +1,8 @@ +--- +title: Upload an NGINX configuration +weight: 500 +url: /nginxaas/azure/getting-started/nginx-configuration/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-azure-cli.md b/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-azure-cli.md new file mode 100644 index 000000000..1b59b3930 --- /dev/null +++ b/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-azure-cli.md @@ -0,0 +1,227 @@ +--- +title: "Upload using the Azure CLI" +weight: 200 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/nginx-configuration/nginx-configuration-azure-cli/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) configurations can be managed using the Azure CLI. This document outlines common Azure CLI workflows to validate, create, and update NGINX configurations. + +## Prerequisites + +- Install [Azure CLI with NGINXaaS extension]({{< relref "/nginxaas-azure/client-tools/cli.md" >}}) + +- If the NGINX configuration requires SSL/TLS certificates, then a managed identity and integration with Azure Key Vault is required. + +- A contributor role is required to apply the configuration to the deployment. + +## Create a configuration + +To create a new NGINX configuration, use the `az nginx deployment configuration create` command: + +```bash +az nginx deployment configuration create --configuration-name + --deployment-name + --resource-group + [--files] + [--location] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--package] + [--protected-files] + [--root-file] +``` + +### Validate your configuration + +You can use the `analyze` command to validate your configuration before submitting it to the deployment: + +```bash +az nginx deployment configuration analyze --deployment-name $DEPLOYMENT_NAME \ + --resource-group $RESOURCE_GROUP --root-file /etc/nginx/nginx.conf \ + --name default --files "$FILES_CONTENT" +```` + +### Examples + +- Create a single file configuration: + + ```bash + az nginx deployment configuration create --name default \ + --deployment-name myDeployment --resource-group myResourceGroup \ + --root-file /etc/nginx/nginx.conf \ + --files "[{content:'aHR0cCB7CiAgICB1cHN0cmVhbSBhcHAgewogICAgICAgIHpvbmUgYXBw \ + IDY0azsKICAgICAgICBsZWFzdF9jb25uOwogICAgICAgIHNlcnZlciAxMC4wLjEuNDo4 \ + MDAwOwogICAgfQoKICAgIHNlcnZlciB7CiAgICAgICAgbGlzdGVuIDgwOwogICAgICAg \ + IHNlcnZlcl9uYW1lICouZXhhbXBsZS5jb207CgogICAgICAgIGxvY2F0aW9uIC8gewog \ + ICAgICAgICAgICBwcm94eV9zZXRfaGVhZGVyIEhvc3QgJGhvc3Q7CiAgICAgICAgICAg \ + IHByb3h5X3NldF9oZWFkZXIgWC1SZWFsLUlQICRyZW1vdGVfYWRkcjsKICAgICAgICAg \ + ICAgcHJveHlfc2V0X2hlYWRlciBYLVByb3h5LUFwcCBhcHA7CiAgICAgICAgICAgIHBy \ + b3h5X3NldF9oZWFkZXIgR2l0aHViLVJ1bi1JZCAwMDAwMDA7CiAgICAgICAgICAgIHBy \ + b3h5X2J1ZmZlcmluZyBvbjsKICAgICAgICAgICAgcHJveHlfYnVmZmVyX3NpemUgNGs7 \ + CiAgICAgICAgICAgIHByb3h5X2J1ZmZlcnMgOCA4azsKICAgICAgICAgICAgcHJveHlf \ + cmVhZF90aW1lb3V0IDYwczsKICAgICAgICAgICAgcHJveHlfcGFzcyBodHRwOi8vYXBw \ + OwogICAgICAgICAgICBoZWFsdGhfY2hlY2s7CiAgICAgICAgfQogICAgICAgIAogICAg \ + fQp9',virtual-path:'/etc/nginx/nginx.conf'}]" + ``` + +- Create a multiple file configuration: + + ```bash + az nginx deployment configuration create --name default \ + --deployment-name myDeployment --resource-group myResourceGroup \ + --root-file /etc/nginx/nginx.conf \ + --files "[{'content':'aHR0cCB7CiAgICB1cHN0cmVhbSBhcHAgewogICAgICAgIHpvbmUg \ + YXBwIDY0azsKICAgICAgICBsZWFzdF9jb25uOwogICAgICAgIHNlcnZlciAxMC4wLjEu \ + NDo4MDAwOwogICAgfQoKICAgIHNlcnZlciB7CiAgICAgICAgbGlzdGVuIDgwOwogICAg \ + ICAgIHNlcnZlcl9uYW1lICouZXhhbXBsZS5jb207CgogICAgICAgIGxvY2F0aW9uIC8g \ + ewogICAgICAgICAgICBpbmNsdWRlIC9ldGMvbmdpbngvY29uZi5kL3Byb3h5LmNvbmY7 \ + CiAgICAgICAgICAgIHByb3h5X3Bhc3MgaHR0cDovL2FwcDsKICAgICAgICAgICAgaGVh \ + bHRoX2NoZWNrOwogICAgICAgIH0KICAgICAgICAKICAgIH0KfQ==', \ + 'virtual-path':'/etc/nginx/nginx.conf'}, \ + {'content':'cHJveHlfc2V0X2hlYWRlciBIb3N0ICRob3N0Owpwcm94eV9zZXRfaGVhZGVy \ + IFgtUmVhbC1JUCAkcmVtb3RlX2FkZHI7CnByb3h5X3NldF9oZWFkZXIgWC1Qcm94eS1B \ + cHAgYXBwOwpwcm94eV9zZXRfaGVhZGVyIEdpdGh1Yi1SdW4tSWQgMDAwMDAwOwpwcm94 \ + eV9idWZmZXJpbmcgb247CnByb3h5X2J1ZmZlcl9zaXplIDRrOwpwcm94eV9idWZmZXJz \ + IDggOGs7CnByb3h5X3JlYWRfdGltZW91dCA2MHM7', \ + 'virtual-path':'/etc/nginx/conf.d/proxy.conf'}]" + ``` + +- Upload package with config files: + + ```bash + $ tar -czf nginx.tar.gz nginx + $ tar -tzf nginx.tar.gz + nginx/ + nginx/nginx.conf + nginx/njs.js + nginx/servers + nginx/servers/ + nginx/servers/server1.conf + nginx/servers/server2.conf + ``` + + Where `nginx` is a directory with the following structure: + + ```bash + $ tree nginx + nginx + ├── nginx.conf + ├── njs.js + └── servers + ├── server1.conf + └── server2.conf + + 1 directory, 4 files + ``` + + Encode your tar.gz file and create your NGINXaaS configuration + + ```bash + TAR_DATA=$(base64 -i nginx.tar.gz) + az nginx deployment configuration create --deployment-name myDeployment \ + --resource-group myResourceGroup --root-file nginx.conf --name default \ + --package data="$TAR_DATA" + ``` + +- Multiple file configuration with protected files: + + ```bash + az nginx deployment configuration create --name default \ + --deployment-name 0102242023test --resource-group azclitest-geo \ + --root-file /etc/nginx/nginx.conf \ + --files "[{'content':'aHR0cCB7CiAgICB1cHN0cmVhbSBhcHAgewogICAgICAgIHpvbmUg \ + YXBwIDY0azsKICAgICAgICBsZWFzdF9jb25uOwogICAgICAgIHNlcnZlciAxMC4wLjEu \ + NDo4MDAwOwogICAgfQoKICAgIHNlcnZlciB7CiAgICAgICAgbGlzdGVuIDgwOwogICAg \ + ICAgIHNlcnZlcl9uYW1lICouZXhhbXBsZS5jb207CgogICAgICAgIGxvY2F0aW9uIC8g \ + ewogICAgICAgICAgICBpbmNsdWRlIC9ldGMvbmdpbngvY29uZi5kL3Byb3h5LmNvbmY7 \ + CiAgICAgICAgICAgIHByb3h5X3Bhc3MgaHR0cDovL2FwcDsKICAgICAgICAgICAgaGVh \ + bHRoX2NoZWNrOwogICAgICAgIH0KICAgICAgICAKICAgIH0KfQ==', \ + 'virtual-path':'/etc/nginx/nginx.conf'}, \ + {'content':'cHJveHlfc2V0X2hlYWRlciBIb3N0ICRob3N0Owpwcm94eV9zZXRfaGVhZGVy \ + IFgtUmVhbC1JUCAkcmVtb3RlX2FkZHI7CnByb3h5X3NldF9oZWFkZXIgWC1Qcm94eS1B \ + cHAgYXBwOwpwcm94eV9zZXRfaGVhZGVyIEdpdGh1Yi1SdW4tSWQgMDAwMDAwOwpwcm94 \ + eV9idWZmZXJpbmcgb247CnByb3h5X2J1ZmZlcl9zaXplIDRrOwpwcm94eV9idWZmZXJz \ + IDggOGs7CnByb3h5X3JlYWRfdGltZW91dCA2MHM7', \ + 'virtual-path':'/etc/nginx/conf.d/proxy.conf'}]" \ + --protected-files "[{'content':'aHR0cCB7CiAgICB1cHN0cmVhbSBhcHAgewogICAgICAgIHpvbmUg \ + YXBwIDY0azsKICAgICAgICBsZWFzdF9jb25uOwogICAgICAgIHNlcnZlciAxMC4wLjEu \ + NDo4MDAwOwogICAgfQoKICAgIHNlcnZlciB7CiAgICAgICAgbGlzdGVuIDgwOwogICAg \ + ICAgIHNlcnZlcl9uYW1lICouZXhhbXBsZS5jb207CgogICAgICAgIGxvY2F0aW9uIC8g \ + ewogICAgICAgICAgICBpbmNsdWRlIC9ldGMvbmdpbngvY29uZi5kL3Byb3h5LmNvbmY7 \ + CiAgICAgICAgICAgIHByb3h5X3Bhc3MgaHR0cDovL2FwcDsKICAgICAgICAgICAgaGVh \ + bHRoX2NoZWNrOwogICAgICAgIH0KICAgICAgICAKICAgIH0KfQ==', \ + 'virtual-path':'/etc/nginx/nginxprot.conf'}, \ + {'content':'cHJveHlfc2V0X2hlYWRlciBIb3N0ICRob3N0Owpwcm94eV9zZXRfaGVhZGVy \ + IFgtUmVhbC1JUCAkcmVtb3RlX2FkZHI7CnByb3h5X3NldF9oZWFkZXIgWC1Qcm94eS1B \ + cHAgYXBwOwpwcm94eV9zZXRfaGVhZGVyIEdpdGh1Yi1SdW4tSWQgMDAwMDAwOwpwcm94 \ + eV9idWZmZXJpbmcgb247CnByb3h5X2J1ZmZlcl9zaXplIDRrOwpwcm94eV9idWZmZXJz \ + IDggOGs7CnByb3h5X3JlYWRfdGltZW91dCA2MHM7', \ + 'virtual-path':'/etc/nginx/conf.d/proxyprot.conf'}]" + ``` + +See the [Azure CLI Configuration Create Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment/configuration?view=azure-cli-latest#az-nginx-deployment-configuration-create) for more details on the available parameters. + + + +## Update a configuration + +Update a configuration for a deployment using a gzipped archive. + +Use the `az nginx deployment configuration update` command to update an existing NGINX configuration: + +```bash +az nginx deployment configuration update [--add] + [--configuration-name] + [--deployment-name] + [--files] + [--force-string {0, 1, f, false, n, no, t, true, y, yes}] + [--ids] + [--location] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--remove] + [--resource-group] + [--root-file] + [--set] + [--subscription] +``` + +### Example + +- Update content of the first file in a configuration: + + ```bash + az nginx deployment configuration update --name default \ + --deployment-name myDeployment --resource-group myResourceGroup \ + --files [0].content="aHR0cCB7CiAgICB1cHN0cmVhbSBhcHAgewogICAgICAgIHpvbmUg \ + YXBwIDY0azsKICAgICAgICBsZWFzdF9jb25uOwogICAgICAgIHNlcnZlciAxMC4wLjEu \ + NDo4MDAwOwogICAgfQoKICAgIHNlcnZlciB7CiAgICAgICAgbGlzdGVuIDgwOwogICAg \ + ICAgIHNlcnZlcl9uYW1lICouZXhhbXBsZS5jb207CgogICAgICAgIGxvY2F0aW9uIC8g \ + ewogICAgICAgICAgICBwcm94eV9zZXRfaGVhZGVyIEhvc3QgJGhvc3Q7CiAgICAgICAg \ + ICAgIHByb3h5X3NldF9oZWFkZXIgWC1SZWFsLUlQICRyZW1vdGVfYWRkcjsKICAgICAg \ + ICAgICAgcHJveHlfc2V0X2hlYWRlciBYLVByb3h5LUFwcCBhcHA7CiAgICAgICAgICAg \ + IHByb3h5X3NldF9oZWFkZXIgR2l0aHViLVJ1bi1JZCAwMDAwMDA7CiAgICAgICAgICAg \ + IHByb3h5X2J1ZmZlcmluZyBvbjsKICAgICAgICAgICAgcHJveHlfYnVmZmVyX3NpemUg \ + NGs7CiAgICAgICAgICAgIHByb3h5X2J1ZmZlcnMgOCA4azsKICAgICAgICAgICAgcHJv \ + eHlfcmVhZF90aW1lb3V0IDYwczsKICAgICAgICAgICAgcHJveHlfcGFzcyBodHRwOi8v \ + YXBwOwogICAgICAgICAgICBoZWFsdGhfY2hlY2s7CiAgICAgICAgfQogICAgICAgIAog \ + ICAgfQp9" + ``` + +See the [Azure CLI Configuration Update Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment/configuration?view=azure-cli-latest#az-nginx-deployment-configuration-update) for more details on the available parameters. + + +{{< tip >}} + +See the [NGINX connfiguration overview]({{< relref "overview.md" >}}) topic +to learn more about: + +- [NGINX configuration automation workflows]({{< relref "overview.md#nginx-configuration-automation-workflows" >}}) +- [NGINX filesystem restrictions]({{< relref "overview.md#nginx-filesystem-restrictions" >}}) +- [Disallowed configuration directives]({{< relref "overview.md#disallowed-configuration-directives" >}}) +- [Directives that cannot be overridden]({{< relref "overview.md#directives-that-cannot-be-overridden" >}}) +- [Configuration directives list]({{< relref "overview.md#configuration-directives-list" >}}) + +{{< /tip >}} diff --git a/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md b/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md new file mode 100644 index 000000000..f8b9dc620 --- /dev/null +++ b/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md @@ -0,0 +1,164 @@ +--- +title: "Upload using the Azure portal" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-873" +url: /nginxaas/azure/getting-started/nginx-configuration/nginx-configuration-portal/ +--- + +An NGINX configuration can be applied to the deployment using the Azure portal in two different ways: + +- Create a new NGINX configuration from scratch or by pasting it in the Azure portal editor. +- Upload a gzip compressed tar archive containing your NGINX configuration. + +As part of applying your NGINX configuration, the service validates the configuration for syntax and compatibility with F5 NGINX as a Service for Azure (NGINXaaS). The use of certain directives and parameters is not allowed to ensure the NGINX configuration’s compatibility with IaaS deployment model in Azure. Validation errors are reported in the editor for you to correct. For more information, check the [NGINX Configuration Validation]({{< relref "nginx-configuration.md#nginx-configuration-validation" >}}) section. + +## Prerequisites + +- If the NGINX configuration requires SSL/TLS certificates, then a managed identity and integration with Azure Key Vault is required. + +- A contributor role is required to apply the configuration to the deployment. + +## Add an NGINX configuration + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **NGINX configuration** in the left menu and you will see the default configuration that NGINXaaS provides. + + {{}}If you don't see the default configuration, it's likely the deployment was created through a client tool other than the portal (For example, Terraform), or the "Apply default NGINX configuration" was unchecked during the deployment creation process in the portal. You can still proceed with the steps below to provide your own NGINX configuration for the deployment.{{}} + +1. Select {{< fa "fa fa-plus">}}**New File** to add a file path, then **Confirm**. + + {{}} + | Property | Description | + | -------- | ----------- | + | File path | Each NGINX configuration file can be uniquely identified by a file path (for example, nginx.conf or /etc/nginx/nginx.conf) to align with the intended NGINX configuration file structure. | + | Root file | The root file is the main NGINX configuration file.
  • The first file created will be the root file by default. You can designate a different root file if you have more than a single configuration file in your deployment.
  • The root file is designated with a {{< golden-star >}} icon on the portal.
| + | Protected File | Indicates that the file may contain sensitive data such as passwords or represent an ssl/tls certificate.
  • To protect a file, enable the **Protected** {{}} toggle button.
  • You cannot access the file contents of a protected file saved to the NGINX configuration, but you can view its metadata, such as the SHA-256 hash of the file contents.
  • You can provide new contents for an existing protected file using the **Overwrite** link or resubmit it without having to provide the file contents again.
  • To modify the file path of a protected file or convert it to a regular file, delete the original file and create a new one.
  • A protected file is designated with a {{}} icon on the portal.
| + {{
}} + + {{}}If specifying an absolute file path, see the [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}) for the allowed directories the file can be written to.{{}} + +1. Provide your NGINX configuration in the configuration file. + +1. Files like SSL/TLS certificates can be added as well. However, we reccommend using Azure Key Vault to store your certificates. See [Add SSL/TLS certificates]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md#add-ssltls-certificates">}}) for more information. + +1. Select **Submit** to apply the new configuration. + +{{}}We currently only support more than 5 unique listen ports on the Standard V2 plan. NGINX configurations that specify more than 5 ports on other plans will be rejected. For more information on listen port limitations, see our [FAQ]({{< relref "/nginxaas-azure/faq" >}}).{{}} + +### NGINX configuration validation + +NGINX configuration is validated real-time to check for syntax and compatibility with the service. Validation errors are reported in the editor for you to correct. + +For example, if you create/update an NGINX config with a particular directive that is not allowed, the service will analyse your NGINX config and provide real-time feedback. + +{{< img src="nginxaas-azure/validation-error.png" alt="NGINX Configuration validation error" >}} + + +The editing experience consists of a single view for both editing and validation + +- If the config is invalid, then any errors are highlighted in-place in the config editor. Hover over the highlighted errors to learn more about them or check the problems section at the bottom. Corrections can be made in the same panel. + +- If the config is valid, then a green check mark appears next to NGINXaaS Analyzer at the bottom indicating that you can submit the config to deploy it. + +## Upload a GZIP NGINX configuration + +Given the example gzipped archive, + +```bash +$ tar -czf nginx.tar.gz nginx +$ tar -tzf nginx.tar.gz +nginx/ +nginx/nginx.conf +nginx/njs.js +nginx/servers +nginx/servers/ +nginx/servers/server1.conf +nginx/servers/server2.conf +``` + +where `nginx` is a directory with the following structure, + +```bash +$ tree nginx +nginx +├── nginx.conf +├── njs.js +└── servers + ├── server1.conf + └── server2.conf + +1 directory, 4 files +``` + +`nginx.tar.gz` can be uploaded using the following portal workflow. + +Before continuing, ensure the file paths in the archive match the includes in the NGINX config. +For example, + +```nginx +http { + include nginx/servers/server1.conf; + js_import nginx/njs.js; + # ... +} +``` + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **NGINX configuration** from the left menu. + +1. Select **Upload config package**. + +1. Drag and drop or browse for the new gzip compressed archive file to upload. + +1. Specify the root file. + + {{}}Uploading a new file will replace all existing NGINX configuration files in your deployment. You must acknowledge this step before you proceed to upload.{{}} + +1. Select **Upload**. + +## Update an NGINX configuration + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **NGINX configuration** in the left menu. + +1. Select the configuration file you want to update from the File path list. + +1. Make the necessary updates to the configuration. + + - You can also update the file path and/or assign the file as root. + +1. (Optional) Select any other configuration files to make additional updates. + +1. Submit your changes. + +## Delete NGINX configuration Files + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **NGINX configuration** in the left menu. + +1. Select the configuration file you want to delete from the File path list. + +1. Select the delete icon {{< fa "fa fa-trash">}}. + +1. Confirm your action to delete the configuration file. + + {{}}Only non-root configuration files can be deleted.{{}} + +{{< tip >}} + +See the [NGINX connfiguration overview]({{< relref "overview.md" >}}) topic +to learn more about: + +- [NGINX configuration automation workflows]({{< relref "overview.md#nginx-configuration-automation-workflows" >}}) +- [NGINX filesystem restrictions]({{< relref "overview.md#nginx-filesystem-restrictions" >}}) +- [Disallowed configuration directives]({{< relref "overview.md#disallowed-configuration-directives" >}}) +- [Directives that cannot be overridden]({{< relref "overview.md#directives-that-cannot-be-overridden" >}}) +- [Configuration directives list]({{< relref "overview.md#configuration-directives-list" >}}) + +{{< /tip >}} diff --git a/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configurations-terraform.md b/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configurations-terraform.md new file mode 100644 index 000000000..9325bcf9f --- /dev/null +++ b/content/nginxaas-azure/getting-started/nginx-configuration/nginx-configurations-terraform.md @@ -0,0 +1,83 @@ +--- +title: "Upload using Terraform" +weight: 300 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/nginx-configuration/nginx-configurations-terraform/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) configurations can be managed using Terraform. This document outlines common Terraform workflows for NGINXaaS. + +## Prerequisites + +{{< include "/nginxaas-azure/terraform-prerequisites.md" >}} + +## Upload an NGINX configuration + +You can find examples of Terraform configurations in the [NGINXaaS for Azure Snippets GitHub repository](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/configurations) + +To create a deployment and add a configuration, run the following commands: + + ```bash + terraform init + terraform plan + terraform apply --auto-approve + ``` + +## Manage an NGINX configuration + +NGINX configuration files are uploaded and returned as base64 encoded data. We recommend using git or other version control systems to view human-readable differences between configuration files during `terraform plan`. Alternatively, you can decode the file contents to view the whole file. For example, + +```bash +$ terraform plan +... +- config_file { + - content = "aHR0cCB7CiAgICBzZXJ2ZXIgewogICAgICAgIGxvY2F0aW9uIC8gewogICAgICAgICAgICByZXR1cm4gMjAwICJIZWxsbyI7CiAgICAgICAgfQogICAgfQoK" -> null + - virtual_path = "nginx.conf" -> null + } ++ config_file { + + content = "aHR0cCB7CiAgICBzZXJ2ZXIgewogICAgICAgIGxvY2F0aW9uIC8gewogICAgICAgICAgICByZXR1cm4gMjAwICJIZWxsbyBXb3JsZCEiOwogICAgICAgIH0KICAgIH0KfQoK" + + virtual_path = "nginx.conf" + } +... +``` + +``` +$ echo aHR0cCB7CiAgICBzZXJ2ZXIgewogICAgICAgIGxvY2F0aW9uIC8gewogICAgICAgICAgICByZXR1cm4gMjAwICJIZWxsbyBXb3JsZCEiOwogICAgICAgIH0KICAgIH0KfQoK | base64 --decode +http { + server { + location / { + return 200 "Hello World!"; + } + } +} +``` + +## Delete a deployment + +Once the deployment is no longer needed, run the following to clean up the deployment and related resources: + + ```bash + terraform destroy --auto-approve + ``` + +## Additional resources + +- [Terraform NGINX configuration documentation](https://registry.terraform.io/providers/hashicorp/azurerm/3.97.0/docs/resources/nginx_configuration) + +{{< include "/nginxaas-azure/terraform-resources.md" >}} + +{{< tip >}} + +See the [NGINX connfiguration overview]({{< relref "overview.md" >}}) topic +to learn more about: + +- [NGINX configuration automation workflows]({{< relref "overview.md#nginx-configuration-automation-workflows" >}}) +- [NGINX filesystem restrictions]({{< relref "overview.md#nginx-filesystem-restrictions" >}}) +- [Disallowed configuration directives]({{< relref "overview.md#disallowed-configuration-directives" >}}) +- [Directives that cannot be overridden]({{< relref "overview.md#directives-that-cannot-be-overridden" >}}) +- [Configuration directives list]({{< relref "overview.md#configuration-directives-list" >}}) + +{{< /tip >}} diff --git a/content/nginxaas-azure/getting-started/nginx-configuration/overview.md b/content/nginxaas-azure/getting-started/nginx-configuration/overview.md new file mode 100644 index 000000000..9b5d05939 --- /dev/null +++ b/content/nginxaas-azure/getting-started/nginx-configuration/overview.md @@ -0,0 +1,971 @@ +--- +title: "Overview" +weight: 50 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/nginx-configuration/overview/ +--- + +This document provides details about using NGINX configuration files with your +F5 NGINX as a Service for Azure deployment, restrictions, and available directives. + +## NGINX configuration common user workflows + +NGINX configurations can be uploaded to your NGINXaaS for Azure deployment using the Azure portal, Azure CLI, or Terraform. The following documents provide detailed steps on how to upload NGINX configurations: + +- [Upload using the Azure portal]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}) +- [Upload using the Azure CLI]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-azure-cli" >}}) +- [Upload using Terraform]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configurations-terraform.md" >}}) + +The topics below provide information on NGINX configuration restrictions and directives that are supported by NGINXaaS for Azure when using any of the above workflows. + +## NGINX configuration automation workflows + +NGINX configurations stored in GitHub can be applied to existing NGINXaaS for Azure deployments using custom GitHub Action workflows. See [NGINXaaS for Azure Deployment Action](https://github.com/nginxinc/nginx-for-azure-deploy-action) for documentation and examples on how to incorporate these workflows in your GitHub Actions CI/CD pipelines. + +## NGINX filesystem restrictions +NGINXaaS for Azure places restrictions on the instance's filesystem; only a specific set of directories are allowed to be read from and written to. Below is a table describing what directories the NGINX worker process can read and write to and what directories files can be written to. These files include certificate files and any files uploaded to the deployment, excluding NGINX configuration files. + + {{}} + | Allowed Directory | NGINX worker process can read/write to | Files can be written to | + |------------------ | ----------------- | ----------------- | + | /etc/nginx | | ✓ | + | /opt | ✓ | ✓ | + | /srv | ✓ | ✓ | + | /tmp | ✓ | | + | /var/cache/nginx | ✓ | | + | /var/www | ✓ | ✓ | +{{}} + +Attempts to access other directories will be denied and result in a `5xx` error. + +## Disallowed configuration directives +Some directives are not supported because of specific limitations. If you include one of these directives in your NGINX configuration, you'll get an error. + + {{}} + | Disallowed Directive | Reason | + |------------------ | ----------------- | + | ssl_engine | No hardware SSL accelerator is available. | + | debug_points | NGINXaaS does not provide access to NGINX processes for debugging. | + | fastcgi_bind
grpc_bind
memcached_bind
proxy_bind
scgi_bind
uwsgi_bind | Source IP specification for active-active deployments is not allowed. | + | quic_bpf | QUIC connection migration is not currently supported for active-active deployments. | + +{{
}} + +You may find that a few directives are not listed here as either allowed or disallowed. Our team is working on getting these directives supported soon. + +## Directives that cannot be overridden +Some directives cannot be overridden by the user provided configuration. + + {{}} + | Persistent Directive | Value | Reason | + |------------------ | ----------------------- | -----------------| + | `user` | `nginx` | The `nginx` user has the correct permissions for accessing certificates, policy files and other auxfiles. | + | `worker_processes` | `auto` | Set to `auto` to automatically set `worker_processes` to the number of CPU cores. | + | `worker_connections` |
  • standard plan `4000`
  • basic plan `3000`
| To ensure reasonable performance of the NGINXaaS deployment for standard plan the `worker_connections` is fixed at 400/NCU; for basic plan this is set lower. | + | `pid` | `/run/nginx/nginx.pid` | Set to this value to allow NGINXaaS to automatically manage the NGINX master process. | + | `daemon` | `on` | Automatically set to `on` to allow NGINXaaS to manage the NGINX master process. | + | `master_process` | `on` | This directive is intended for NGINX developers. | + | `worker_cpu_affinity` | `auto` | The value `auto` allows binding worker processes automatically to available CPUs based on the current capacity of the deployment. | + +{{
}} + +## Configuration directives list + +
+Alphabetical index of directives + +NGINXaaS for Azure supports a limited set of NGINX directives. + +[absolute_redirect](https://nginx.org/en/docs/http/ngx_http_core_module.html#absolute_redirect)\ +[accept_mutex](https://nginx.org/en/docs/ngx_core_module.html#accept_mutex)\ +[accept_mutex_delay](https://nginx.org/en/docs/ngx_core_module.html#accept_mutex_delay)\ +[access_log (ngx_http_log_module)](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log)\ +[access_log (ngx_stream_log_module)](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#access_log)\ +[add_after_body](https://nginx.org/en/docs/http/ngx_http_addition_module.html#add_after_body)\ +[add_before_body](https://nginx.org/en/docs/http/ngx_http_addition_module.html#add_before_body)\ +[add_header](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header)\ +[add_trailer](https://nginx.org/en/docs/http/ngx_http_headers_module.html#add_trailer)\ +[addition_types](https://nginx.org/en/docs/http/ngx_http_addition_module.html#addition_types)\ +[aio](https://nginx.org/en/docs/http/ngx_http_core_module.html#aio)\ +[aio_write](https://nginx.org/en/docs/http/ngx_http_core_module.html#aio_write)\ +[alias](https://nginx.org/en/docs/http/ngx_http_core_module.html#alias)\ +[allow (ngx_http_access_module)](https://nginx.org/en/docs/http/ngx_http_access_module.html#allow)\ +[allow (ngx_stream_access_module)](https://nginx.org/en/docs/stream/ngx_stream_access_module.html#allow)\ +[ancient_browser](https://nginx.org/en/docs/http/ngx_http_browser_module.html#ancient_browser)\ +[ancient_browser_value](https://nginx.org/en/docs/http/ngx_http_browser_module.html#ancient_browser_value)\ +[auth_basic](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic)\ +[auth_basic_user_file](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file)\ +[auth_delay](https://nginx.org/en/docs/http/ngx_http_core_module.html#auth_delay)\ +[auth_http](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http)\ +[auth_http_header](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http_header)\ +[auth_http_pass_client_cert](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http_pass_client_cert)\ +[auth_http_timeout](https://nginx.org/en/docs/mail/ngx_mail_auth_http_module.html#auth_http_timeout)\ +[auth_jwt](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt)\ +[auth_jwt_claim_set](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_claim_set)\ +[auth_jwt_header_set](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_header_set)\ +[auth_jwt_key_cache](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache)\ +[auth_jwt_key_file](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file)\ +[auth_jwt_key_request](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request)\ +[auth_jwt_leeway](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_leeway)\ +[auth_jwt_require](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_require)\ +[auth_jwt_type](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type)\ +[auth_request](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request)\ +[auth_request_set](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html#auth_request_set)\ +[autoindex](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex)\ +[autoindex_exact_size](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_exact_size)\ +[autoindex_format](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_format)\ +[autoindex_localtime](https://nginx.org/en/docs/http/ngx_http_autoindex_module.html#autoindex_localtime)\ +[break](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#break)\ +[connect_timeout](https://nginx.org/en/docs/ngx_mgmt_module.html#connect_timeout)\ +[charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html#charset)\ +[charset_map](https://nginx.org/en/docs/http/ngx_http_charset_module.html#charset_map)\ +[charset_types](https://nginx.org/en/docs/http/ngx_http_charset_module.html#charset_types)\ +[chunked_transfer_encoding](https://nginx.org/en/docs/http/ngx_http_core_module.html#chunked_transfer_encoding)\ +[client_body_buffer_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size)\ +[client_body_in_file_only](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_file_only)\ +[client_body_in_single_buffer](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_in_single_buffer)\ +[client_body_temp_path](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_temp_path)\ +[client_body_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_timeout)\ +[client_header_buffer_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size)\ +[client_header_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_timeout)\ +[client_max_body_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size)\ +[connection_pool_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#connection_pool_size)\ +[create_full_put_path](https://nginx.org/en/docs/http/ngx_http_dav_module.html#create_full_put_path)\ +[daemon](https://nginx.org/en/docs/ngx_core_module.html#daemon)\ +[dav_access](https://nginx.org/en/docs/http/ngx_http_dav_module.html#dav_access)\ +[dav_methods](https://nginx.org/en/docs/http/ngx_http_dav_module.html#dav_methods)\ +[debug_connection](https://nginx.org/en/docs/ngx_core_module.html#debug_connection)\ +[default_type](https://nginx.org/en/docs/http/ngx_http_core_module.html#default_type)\ +[deny (ngx_http_access_module)](https://nginx.org/en/docs/http/ngx_http_access_module.html#deny)\ +[deny (ngx_stream_access_module)](https://nginx.org/en/docs/stream/ngx_stream_access_module.html#deny)\ +[directio](https://nginx.org/en/docs/http/ngx_http_core_module.html#directio)\ +[directio_alignment](https://nginx.org/en/docs/http/ngx_http_core_module.html#directio_alignment)\ +[disable_symlinks](https://nginx.org/en/docs/http/ngx_http_core_module.html#disable_symlinks)\ +[empty_gif](https://nginx.org/en/docs/http/ngx_http_empty_gif_module.html#empty_gif)\ +[env](https://nginx.org/en/docs/ngx_core_module.html#env)\ +[error_log](https://nginx.org/en/docs/ngx_core_module.html#error_log)\ +[error_page](https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page)\ +[etag](https://nginx.org/en/docs/http/ngx_http_core_module.html#etag)\ +[events](https://nginx.org/en/docs/ngx_core_module.html#events)\ +[expires](https://nginx.org/en/docs/http/ngx_http_headers_module.html#expires)\ +[f4f](https://nginx.org/en/docs/http/ngx_http_f4f_module.html#f4f)\ +[f4f_buffer_size](https://nginx.org/en/docs/http/ngx_http_f4f_module.html#f4f_buffer_size)\ +[fastcgi_buffer_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffer_size)\ +[fastcgi_buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffering)\ +[fastcgi_buffers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffers)\ +[fastcgi_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_busy_buffers_size)\ +[fastcgi_cache](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache)\ +[fastcgi_cache_background_update](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_background_update)\ +[fastcgi_cache_bypass](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_bypass)\ +[fastcgi_cache_key](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_key)\ +[fastcgi_cache_lock](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_lock)\ +[fastcgi_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_lock_age)\ +[fastcgi_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_lock_timeout)\ +[fastcgi_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_max_range_offset)\ +[fastcgi_cache_methods](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_methods)\ +[fastcgi_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_min_uses)\ +[fastcgi_cache_path](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_path)\ +[fastcgi_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_revalidate)\ +[fastcgi_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_use_stale)\ +[fastcgi_cache_valid](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_valid)\ +[fastcgi_catch_stderr](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_catch_stderr)\ +[fastcgi_connect_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_connect_timeout)\ +[fastcgi_force_ranges](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_force_ranges)\ +[fastcgi_hide_header](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_hide_header)\ +[fastcgi_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_ignore_client_abort)\ +[fastcgi_ignore_headers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_ignore_headers)\ +[fastcgi_index](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_index)\ +[fastcgi_intercept_errors](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_intercept_errors)\ +[fastcgi_keep_conn](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_keep_conn)\ +[fastcgi_limit_rate](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_limit_rate)\ +[fastcgi_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_max_temp_file_size)\ +[fastcgi_next_upstream](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream)\ +[fastcgi_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream_timeout)\ +[fastcgi_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream_tries)\ +[fastcgi_no_cache](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_no_cache)\ +[fastcgi_param](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_param)\ +[fastcgi_pass](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass)\ +[fastcgi_pass_header](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass_header)\ +[fastcgi_pass_request_body](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass_request_body)\ +[fastcgi_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass_request_headers)\ +[fastcgi_read_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_read_timeout)\ +[fastcgi_request_buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_request_buffering)\ +[fastcgi_send_lowat](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_send_lowat)\ +[fastcgi_send_timeout](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_send_timeout)\ +[fastcgi_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_socket_keepalive)\ +[fastcgi_split_path_info](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_split_path_info)\ +[fastcgi_store](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_store)\ +[fastcgi_store_access](http://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_store_access)\ +[fastcgi_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_temp_file_write_size)\ +[fastcgi_temp_path](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_temp_path)\ +[flv](https://nginx.org/en/docs/http/ngx_http_flv_module.html#flv)\ +[geo (ngx_http_geo_module)](https://nginx.org/en/docs/http/ngx_http_geo_module.html#geo)\ +[geo (ngx_stream_geo_module)](https://nginx.org/en/docs/stream/ngx_stream_geo_module.html#geo)\ +[grpc_buffer_size](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_buffer_size)\ +[grpc_connect_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_connect_timeout)\ +[grpc_hide_header](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_hide_header)\ +[grpc_ignore_headers](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ignore_headers)\ +[grpc_intercept_errors](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_intercept_errors)\ +[grpc_next_upstream](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_next_upstream)\ +[grpc_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_next_upstream_timeout)\ +[grpc_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_next_upstream_tries)\ +[grpc_pass](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_pass)\ +[grpc_pass_header](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_pass_header)\ +[grpc_read_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_read_timeout)\ +[grpc_send_timeout](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_send_timeout)\ +[grpc_set_header](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_set_header)\ +[grpc_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_socket_keepalive)\ +[grpc_ssl_certificate](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_certificate)\ +[grpc_ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_certificate_key)\ +[grpc_ssl_ciphers](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_ciphers)\ +[grpc_ssl_conf_command](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_conf_command)\ +[grpc_ssl_crl](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_crl)\ +[grpc_ssl_name](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_name)\ +[grpc_ssl_password_file](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_password_file)\ +[grpc_ssl_protocols](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_protocols)\ +[grpc_ssl_server_name](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_server_name)\ +[grpc_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_session_reuse)\ +[grpc_ssl_trusted_certificate](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_trusted_certificate)\ +[grpc_ssl_verify](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_verify)\ +[grpc_ssl_verify_depth](https://nginx.org/en/docs/http/ngx_http_grpc_module.html#grpc_ssl_verify_depth)\ +[gunzip](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html#gunzip)\ +[gunzip_buffers](https://nginx.org/en/docs/http/ngx_http_gunzip_module.html#gunzip_buffers)\ +[gzip](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip)\ +[gzip_buffers](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_buffers)\ +[gzip_comp_level](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level)\ +[gzip_disable](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_disable)\ +[gzip_http_version](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_http_version)\ +[gzip_min_length](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_min_length)\ +[gzip_proxied](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied)\ +[gzip_static](https://nginx.org/en/docs/http/ngx_http_gzip_static_module.html#gzip_static)\ +[gzip_types](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_types)\ +[gzip_vary](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_vary)\ +[hash (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#hash)\ +[hash (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#hash)\ +[health_check (ngx_http_upstream_hc_module)](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check)\ +[health_check (ngx_stream_upstream_hc_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check)\ +[health_check_timeout](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#health_check_timeout)\ +[hls](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls)\ +[hls_buffers](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_buffers)\ +[hls_forward_args](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_forward_args)\ +[hls_fragment](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_fragment)\ +[hls_mp4_buffer_size](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_mp4_buffer_size)\ +[hls_mp4_max_buffer_size](https://nginx.org/en/docs/http/ngx_http_hls_module.html#hls_mp4_max_buffer_size)\ +[http](https://nginx.org/en/docs/http/ngx_http_core_module.html#http)\ +[http2_body_preread_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_body_preread_size)\ +[http2_chunk_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_chunk_size)\ +[http2_idle_timeout](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_idle_timeout)\ +[http2_max_concurrent_pushes](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_pushes)\ +[http2_max_concurrent_streams](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_concurrent_streams)\ +[http2_max_field_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_field_size)\ +[http2_max_header_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_header_size)\ +[http2_max_requests](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_max_requests)\ +[http2_push](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push)\ +[http2_push_preload](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push_preload)\ +[http2_recv_buffer_size](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_recv_buffer_size)\ +[http2_recv_timeout](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_recv_timeout)\ +[http3](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3)\ +[http3_hq](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3_hq)\ +[http3_max_concurrent_streams](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3_max_concurrent_streams)\ +[http3_stream_buffer_size](http://nginx.org/en/docs/http/ngx_http_v3_module.html#http3_stream_buffer_size)\ +[if](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#if)\ +[if_modified_since](https://nginx.org/en/docs/http/ngx_http_core_module.html#if_modified_since)\ +[ignore_invalid_headers](https://nginx.org/en/docs/http/ngx_http_core_module.html#ignore_invalid_headers)\ +[image_filter](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter)\ +[image_filter_buffer](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_buffer)\ +[image_filter_interlace](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_interlace)\ +[image_filter_jpeg_quality](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_jpeg_quality)\ +[image_filter_sharpen](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_sharpen)\ +[image_filter_transparency](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_transparency)\ +[image_filter_webp_quality](http://nginx.org/en/docs/http/ngx_http_image_filter_module.html#image_filter_webp_quality)\ +[imap_auth](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_auth)\ +[imap_capabilities](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_capabilities)\ +[imap_client_buffer](https://nginx.org/en/docs/mail/ngx_mail_imap_module.html#imap_client_buffer)\ +[include](https://nginx.org/en/docs/ngx_core_module.html#include)\ +[index](https://nginx.org/en/docs/http/ngx_http_index_module.html#index)\ +[internal](https://nginx.org/en/docs/http/ngx_http_core_module.html#internal)\ +[internal_redirect](http://nginx.org/en/docs/http/ngx_http_internal_redirect_module.html#internal_redirect)\ +[ip_hash](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ip_hash)\ +[js_access (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_access)\ +[js_body_filter](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_body_filter)\ +[js_content](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_content)\ +[js_fetch_buffer_size (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_buffer_size)\ +[js_fetch_buffer_size (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_buffer_size)\ +[js_fetch_ciphers (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_ciphers)\ +[js_fetch_ciphers (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_ciphers)\ +[js_fetch_max_response_buffer_size (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_max_response_buffer_size)\ +[js_fetch_max_response_buffer_size (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_max_response_buffer_size)\ +[js_fetch_protocols (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_protocols)\ +[js_fetch_protocols (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_protocols)\ +[js_fetch_timeout (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_timeout)\ +[js_fetch_timeout (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_timeout)\ +[js_fetch_trusted_certificate (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_trusted_certificate)\ +[js_fetch_trusted_certificate (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_trusted_certificate)\ +[js_fetch_verify (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_verify)\ +[js_fetch_verify (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_verify)\ +[js_fetch_verify_depth (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_fetch_verify_depth)\ +[js_fetch_verify_depth (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_fetch_verify_depth)\ +[js_filter (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_filter)\ +[js_header_filter](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_header_filter)\ +[js_import (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_import)\ +[js_import (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_import)\ +[js_include (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_include)\ +[js_include (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_include)\ +[js_path (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_path)\ +[js_path (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_path)\ +[js_periodic (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_periodic)\ +[js_periodic (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_periodic)\ +[js_preload_object (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_preload_object)\ +[js_preload_object (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_preload_object)\ +[js_preread (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_preread)\ +[js_set (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_set)\ +[js_set (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_set)\ +[js_var (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_var)\ +[js_var (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_var)\ +[js_shared_dict_zone (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_shared_dict_zone)\ +[js_var (ngx_http_js_module)](https://nginx.org/en/docs/http/ngx_http_js_module.html#js_var)\ +[js_var (ngx_stream_js_module)](https://nginx.org/en/docs/stream/ngx_stream_js_module.html#js_var)\ +[keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive)\ +[keepalive_disable](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_disable)\ +[keepalive_requests (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_requests)\ +[keepalive_time (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_time)\ +[keepalive_timeout (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#keepalive_timeout)\ +[keyval (ngx_http_keyval_module)](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval)\ +[keyval (ngx_stream_keyval_module)](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval)\ +[keyval_zone (ngx_http_keyval_module)](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone)\ +[keyval_zone (ngx_stream_keyval_module)](https://nginx.org/en/docs/stream/ngx_stream_keyval_module.html#keyval_zone)\ +[large_client_header_buffers](https://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers)\ +[least_conn (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_conn)\ +[least_conn (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_conn)\ +[least_time (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#least_time)\ +[least_time (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#least_time)\ +[limit_conn (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn)\ +[limit_conn (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn)\ +[limit_conn_dry_run (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_dry_run)\ +[limit_conn_dry_run (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_dry_run)\ +[limit_conn_log_level (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_log_level)\ +[limit_conn_log_level (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_log_level)\ +[limit_conn_status](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_status)\ +[limit_conn_zone (ngx_http_limit_conn_module)](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone)\ +[limit_conn_zone (ngx_stream_limit_conn_module)](https://nginx.org/en/docs/stream/ngx_stream_limit_conn_module.html#limit_conn_zone)\ +[limit_except](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_except)\ +[limit_rate](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate)\ +[limit_rate_after](https://nginx.org/en/docs/http/ngx_http_core_module.html#limit_rate_after)\ +[limit_req](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req)\ +[limit_req_dry_run](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_dry_run)\ +[limit_req_log_level](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_log_level)\ +[limit_req_status](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_status)\ +[limit_req_zone](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone)\ +[limit_zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_zone)\ +[lingering_close](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_close)\ +[lingering_time](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_time)\ +[lingering_timeout](https://nginx.org/en/docs/http/ngx_http_core_module.html#lingering_timeout)\ +[listen (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#listen)\ +[listen (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#listen)\ +[load_module](https://nginx.org/en/docs/ngx_core_module.html#load_module)\ +[location](https://nginx.org/en/docs/http/ngx_http_core_module.html#location)\ +[lock_file](http://nginx.org/en/docs/ngx_core_module.html#lock_file)\ +[log_format (ngx_http_log_module)](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format)\ +[log_format (ngx_stream_log_module)](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#log_format)\ +[log_not_found](https://nginx.org/en/docs/http/ngx_http_core_module.html#log_not_found)\ +[log_subrequest](https://nginx.org/en/docs/http/ngx_http_core_module.html#log_subrequest)\ +[mail](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#mail)\ +[map (ngx_http_map_module)](https://nginx.org/en/docs/http/ngx_http_map_module.html#map)\ +[map (ngx_stream_map_module)](https://nginx.org/en/docs/stream/ngx_stream_map_module.html#map)\ +[map_hash_bucket_size (ngx_http_map_module)](https://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_bucket_size)\ +[map_hash_bucket_size (ngx_stream_map_module)](https://nginx.org/en/docs/stream/ngx_stream_map_module.html#map_hash_bucket_size)\ +[map_hash_max_size (ngx_http_map_module)](https://nginx.org/en/docs/http/ngx_http_map_module.html#map_hash_max_size)\ +[map_hash_max_size (ngx_stream_map_module)](https://nginx.org/en/docs/stream/ngx_stream_map_module.html#map_hash_max_size)\ +[master_process](https://nginx.org/en/docs/ngx_core_module.html#master_process)\ +[match (ngx_http_upstream_hc_module)](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match)\ +[match (ngx_stream_upstream_hc_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_hc_module.html#match)\ +[max_errors](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#max_errors)\ +[max_ranges](https://nginx.org/en/docs/http/ngx_http_core_module.html#max_ranges)\ +[memcached_buffer_size](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_buffer_size)\ +[memcached_connect_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_connect_timeout)\ +[memcached_gzip_flag](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_gzip_flag)\ +[memcached_next_upstream](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_next_upstream)\ +[memcached_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_next_upstream_timeout)\ +[memcached_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_next_upstream_tries)\ +[memcached_pass](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_pass)\ +[memcached_read_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_read_timeout)\ +[memcached_send_timeout](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_send_timeout)\ +[memcached_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_memcached_module.html#memcached_socket_keepalive)\ +[merge_slashes](https://nginx.org/en/docs/http/ngx_http_core_module.html#merge_slashes)\ +[mgmt](https://nginx.org/en/docs/ngx_mgmt_module.html#mgmt)\ +[min_delete_depth](https://nginx.org/en/docs/http/ngx_http_dav_module.html#min_delete_depth)\ +[mirror](https://nginx.org/en/docs/http/ngx_http_mirror_module.html#mirror)\ +[mirror_request_body](https://nginx.org/en/docs/http/ngx_http_mirror_module.html#mirror_request_body)\ +[modern_browser](https://nginx.org/en/docs/http/ngx_http_browser_module.html#modern_browser)\ +[modern_browser_value](https://nginx.org/en/docs/http/ngx_http_browser_module.html#modern_browser_value)\ +[more_clear_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_clear_headers)\ +[more_clear_input_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_clear_input_headers)\ +[more_set_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_set_headers)\ +[more_set_input_headers](https://github.com/openresty/headers-more-nginx-module?tab=readme-ov-file#more_set_input_headers)\ +[mp4](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4)\ +[mp4_buffer_size](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_buffer_size)\ +[mp4_limit_rate](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_limit_rate)\ +[mp4_limit_rate_after](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_limit_rate_after)\ +[mp4_max_buffer_size](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_max_buffer_size)\ +[mp4_start_key_frame](https://nginx.org/en/docs/http/ngx_http_mp4_module.html#mp4_start_key_frame)\ +[mqtt](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt)\ +[mqtt_rewrite_buffer_size](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt_rewrite_buffer_size)\ +[mqtt_set_connect](https://nginx.org/en/docs/stream/ngx_stream_mqtt_filter_module.html#mqtt_set_connect)\ +[msie_padding](https://nginx.org/en/docs/http/ngx_http_core_module.html#msie_padding)\ +[msie_refresh](https://nginx.org/en/docs/http/ngx_http_core_module.html#msie_refresh)\ +[multi_accept](https://nginx.org/en/docs/ngx_core_module.html#multi_accept)\ +[ntlm](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm)\ +[open_file_cache](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache)\ +[open_file_cache_errors](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_errors)\ +[open_file_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_min_uses)\ +[open_file_cache_valid](https://nginx.org/en/docs/http/ngx_http_core_module.html#open_file_cache_valid)\ +[open_log_file_cache (ngx_http_log_module)](https://nginx.org/en/docs/http/ngx_http_log_module.html#open_log_file_cache)\ +[open_log_file_cache (ngx_stream_log_module)](https://nginx.org/en/docs/stream/ngx_stream_log_module.html#open_log_file_cache)\ +[otel_exporter](https://nginx.org/en/docs/ngx_otel_module.html#otel_exporter)\ +[otel_service_name](https://nginx.org/en/docs/ngx_otel_module.html#otel_service_name)\ +[otel_trace](https://nginx.org/en/docs/ngx_otel_module.html#otel_trace)\ +[otel_trace_context](https://nginx.org/en/docs/ngx_otel_module.html#otel_trace_context)\ +[otel_span_name](https://nginx.org/en/docs/ngx_otel_module.html#otel_span_name)\ +[otel_span_attr](https://nginx.org/en/docs/ngx_otel_module.html#otel_span_attr)\ +[output_buffers](https://nginx.org/en/docs/http/ngx_http_core_module.html#output_buffers)\ +[override_charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html#override_charset)\ +[pass](https://nginx.org/en/docs/stream/ngx_stream_pass_module.html#pass)\ +[pid](https://nginx.org/en/docs/ngx_core_module.html#pid)\ +[pop3_auth](https://nginx.org/en/docs/mail/ngx_mail_pop3_module.html#pop3_auth)\ +[pop3_capabilities](https://nginx.org/en/docs/mail/ngx_mail_pop3_module.html#pop3_capabilities)\ +[port_in_redirect](https://nginx.org/en/docs/http/ngx_http_core_module.html#port_in_redirect)\ +[postpone_output](https://nginx.org/en/docs/http/ngx_http_core_module.html#postpone_output)\ +[preread_buffer_size (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#preread_buffer_size)\ +[preread_timeout (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#preread_timeout)\ +[protocol](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#protocol)\ +[proxy_buffer](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_buffer)\ +[proxy_buffer_size (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size)\ +[proxy_buffer_size (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_buffer_size)\ +[proxy_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering)\ +[proxy_buffers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers)\ +[proxy_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_busy_buffers_size)\ +[proxy_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache)\ +[proxy_cache_background_update](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_background_update)\ +[proxy_cache_bypass](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_bypass)\ +[proxy_cache_convert_head](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_convert_head)\ +[proxy_cache_key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key)\ +[proxy_cache_lock](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock)\ +[proxy_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_age)\ +[proxy_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock_timeout)\ +[proxy_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_max_range_offset)\ +[proxy_cache_methods](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_methods)\ +[proxy_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_min_uses)\ +[proxy_cache_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path)\ +[proxy_cache_purge](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_purge)\ +[proxy_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_revalidate)\ +[proxy_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_use_stale)\ +[proxy_cache_valid](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_valid)\ +[proxy_connect_timeout (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_connect_timeout)\ +[proxy_connect_timeout (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_connect_timeout)\ +[proxy_cookie_domain](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_domain)\ +[proxy_cookie_flags](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_flags)\ +[proxy_cookie_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cookie_path)\ +[proxy_download_rate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_download_rate)\ +[proxy_force_ranges](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_force_ranges)\ +[proxy_half_close (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_half_close)\ +[proxy_headers_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_bucket_size)\ +[proxy_headers_hash_max_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_headers_hash_max_size)\ +[proxy_hide_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header)\ +[proxy_http_version](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_http_version)\ +[proxy_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_client_abort)\ +[proxy_ignore_headers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ignore_headers)\ +[proxy_intercept_errors](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors)\ +[proxy_limit_rate](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_limit_rate)\ +[proxy_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_max_temp_file_size)\ +[proxy_method](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_method)\ +[proxy_next_upstream (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream)\ +[proxy_next_upstream (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_next_upstream)\ +[proxy_next_upstream_timeout (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_timeout)\ +[proxy_next_upstream_timeout (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_next_upstream_timeout)\ +[proxy_next_upstream_tries (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream_tries)\ +[proxy_next_upstream_tries (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_next_upstream_tries)\ +[proxy_no_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_no_cache)\ +[proxy_pass (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass)\ +[proxy_pass (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_pass)\ +[proxy_pass_error_message](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_pass_error_message)\ +[proxy_pass_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_header)\ +[proxy_pass_request_body](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_request_body)\ +[proxy_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_pass_request_headers)\ +[proxy_protocol (ngx_mail_proxy_module)](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_protocol)\ +[proxy_protocol (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_protocol)\ +[proxy_protocol_timeout (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#proxy_protocol_timeout)\ +[proxy_read_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout)\ +[proxy_redirect](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_redirect)\ +[proxy_requests (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_requests)\ +[proxy_request_buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering)\ +[proxy_responses (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_responses)\ +[proxy_send_lowat](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_lowat)\ +[proxy_send_timeout](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout)\ +[proxy_session_drop (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_session_drop)\ +[proxy_set_body](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_body)\ +[proxy_set_header](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_set_header)\ +[proxy_smtp_auth](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_smtp_auth)\ +[proxy_socket_keepalive (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_socket_keepalive)\ +[proxy_socket_keepalive (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_socket_keepalive)\ +[proxy_ssl (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl)\ +[proxy_ssl_certificate (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate)\ +[proxy_ssl_certificate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_certificate)\ +[proxy_ssl_certificate_key (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_certificate_key)\ +[proxy_ssl_certificate_key (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_certificate_key)\ +[proxy_ssl_ciphers (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_ciphers)\ +[proxy_ssl_ciphers (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_ciphers)\ +[proxy_ssl_conf_command (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_conf_command)\ +[proxy_ssl_conf_command (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_conf_command)\ +[proxy_ssl_crl (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_crl)\ +[proxy_ssl_crl (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_crl)\ +[proxy_ssl_name (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_name)\ +[proxy_ssl_name (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_name)\ +[proxy_ssl_password_file (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_password_file)\ +[proxy_ssl_password_file (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_password_file)\ +[proxy_ssl_protocols (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_protocols)\ +[proxy_ssl_protocols (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_protocols)\ +[proxy_ssl_server_name (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_server_name)\ +[proxy_ssl_server_name (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_server_name)\ +[proxy_ssl_session_reuse (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_session_reuse)\ +[proxy_ssl_session_reuse (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_session_reuse)\ +[proxy_ssl_trusted_certificate (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_trusted_certificate)\ +[proxy_ssl_trusted_certificate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_trusted_certificate)\ +[proxy_ssl_verify (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify)\ +[proxy_ssl_verify (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify)\ +[proxy_ssl_verify_depth (ngx_http_proxy_module)](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth)\ +[proxy_ssl_verify_depth (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_ssl_verify_depth)\ +[proxy_store](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_store)\ +[proxy_store_access](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_store_access)\ +[proxy_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_file_write_size)\ +[proxy_temp_path](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_path)\ +[proxy_timeout (ngx_mail_proxy_module)](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#proxy_timeout)\ +[proxy_timeout (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_timeout)\ +[proxy_upload_rate (ngx_stream_proxy_module)](https://nginx.org/en/docs/stream/ngx_stream_proxy_module.html#proxy_upload_rate)\ +[queue](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#queue)\ +[quic_active_connection_id_limit](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_active_connection_id_limit)\ +[quic_gso](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_gso)\ +[quic_host_key](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_host_key)\ +[quic_retry](http://nginx.org/en/docs/http/ngx_http_v3_module.html#quic_retry)\ +[random (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#random)\ +[random (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#random)\ +[random_index](https://nginx.org/en/docs/http/ngx_http_random_index_module.html#random_index)\ +[read_ahead](https://nginx.org/en/docs/http/ngx_http_core_module.html#read_ahead)\ +[read_timeout](https://nginx.org/en/docs/ngx_mgmt_module.html#read_timeout)\ +[real_ip_header](https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header)\ +[real_ip_recursive](https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive)\ +[recursive_error_pages](https://nginx.org/en/docs/http/ngx_http_core_module.html#recursive_error_pages)\ +[referer_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_referer_module.html#referer_hash_bucket_size)\ +[referer_hash_max_size](https://nginx.org/en/docs/http/ngx_http_referer_module.html#referer_hash_max_size)\ +[request_pool_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#request_pool_size)\ +[reset_timedout_connection](https://nginx.org/en/docs/http/ngx_http_core_module.html#reset_timedout_connection)\ +[resolver (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver)\ +[resolver (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#resolver)\ +[resolver (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#resolver)\ +[resolver (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#resolver)\ +[resolver (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#resolver)\ +[resolver_timeout (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#resolver_timeout)\ +[resolver_timeout (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#resolver_timeout)\ +[resolver_timeout (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#resolver_timeout)\ +[resolver_timeout (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#resolver_timeout)\ +[resolver_timeout (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#resolver_timeout)\ +[return (ngx_http_rewrite_module)](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#return)\ +[return (ngx_stream_return_module)](https://nginx.org/en/docs/stream/ngx_stream_return_module.html#return)\ +[rewrite](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#rewrite)\ +[rewrite_log](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#rewrite_log)\ +[root](https://nginx.org/en/docs/http/ngx_http_core_module.html#root)\ +[satisfy](https://nginx.org/en/docs/http/ngx_http_core_module.html#satisfy)\ +[scgi_buffer_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_buffer_size)\ +[scgi_buffering](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_buffering)\ +[scgi_buffers](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_buffers)\ +[scgi_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_busy_buffers_size)\ +[scgi_cache](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache)\ +[scgi_cache_background_update](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_background_update)\ +[scgi_cache_bypass](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_bypass)\ +[scgi_cache_key](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_key)\ +[scgi_cache_lock](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_lock)\ +[scgi_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_lock_age)\ +[scgi_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_lock_timeout)\ +[scgi_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_max_range_offset)\ +[scgi_cache_methods](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_methods)\ +[scgi_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_min_uses)\ +[scgi_cache_path](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_path)\ +[scgi_cache_purge](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_purge)\ +[scgi_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_revalidate)\ +[scgi_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_use_stale)\ +[scgi_cache_valid](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_cache_valid)\ +[scgi_connect_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_connect_timeout)\ +[scgi_force_ranges](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_force_ranges)\ +[scgi_hide_header](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_hide_header)\ +[scgi_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_ignore_client_abort)\ +[scgi_ignore_headers](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_ignore_headers)\ +[scgi_intercept_errors](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_intercept_errors)\ +[scgi_limit_rate](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_limit_rate)\ +[scgi_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_max_temp_file_size)\ +[scgi_next_upstream](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_next_upstream)\ +[scgi_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_next_upstream_timeout)\ +[scgi_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_next_upstream_tries)\ +[scgi_no_cache](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_no_cache)\ +[scgi_param](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_param)\ +[scgi_pass](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass)\ +[scgi_pass_header](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass_header)\ +[scgi_pass_request_body](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass_request_body)\ +[scgi_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_pass_request_headers)\ +[scgi_read_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_read_timeout)\ +[scgi_request_buffering](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_request_buffering)\ +[scgi_send_timeout](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_send_timeout)\ +[scgi_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_socket_keepalive)\ +[scgi_store](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_store)\ +[scgi_store_access](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_store_access)\ +[scgi_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_temp_file_write_size)\ +[scgi_temp_path](https://nginx.org/en/docs/http/ngx_http_scgi_module.html#scgi_temp_path)\ +[secure_link](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link)\ +[secure_link_md5](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_md5)\ +[secure_link_secret](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_secret)\ +[send_lowat](https://nginx.org/en/docs/http/ngx_http_core_module.html#send_lowat)\ +[send_timeout (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#send_timeout)\ +[send_timeout (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#send_timeout)\ +[sendfile](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile)\ +[sendfile_max_chunk](https://nginx.org/en/docs/http/ngx_http_core_module.html#sendfile_max_chunk)\ +[server (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#server)\ +[server (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#server)\ +[server (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server)\ +[server (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#server)\ +[server_name (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name)\ +[server_name (ngx_mail_core_module)](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#server_name)\ +[server_name (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#server_name)\ +[server_name_in_redirect](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_name_in_redirect)\ +[server_tokens](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens)\ +[session_log](https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log)\ +[session_log_format](https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log_format)\ +[session_log_zone](https://nginx.org/en/docs/http/ngx_http_session_log_module.html#session_log_zone)\ +[set (ngx_http_rewrite_module)](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set)\ +[set (ngx_stream_set_module)](https://nginx.org/en/docs/stream/ngx_stream_set_module.html#set)\ +[set_real_ip_from (ngx_http_realip_module)](https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from)\ +[set_real_ip_from (ngx_mail_realip_module)](https://nginx.org/en/docs/mail/ngx_mail_realip_module.html#set_real_ip_from)\ +[set_real_ip_from (ngx_stream_realip_module)](https://nginx.org/en/docs/stream/ngx_stream_realip_module.html#set_real_ip_from)\ +[slice](https://nginx.org/en/docs/http/ngx_http_slice_module.html#slice)\ +[smtp_auth](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_auth)\ +[smtp_capabilities](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_capabilities)\ +[smtp_client_buffer](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_client_buffer)\ +[smtp_greeting_delay](https://nginx.org/en/docs/mail/ngx_mail_smtp_module.html#smtp_greeting_delay)\ +[source_charset](https://nginx.org/en/docs/http/ngx_http_charset_module.html#source_charset) +[spdy_chunk_size](https://nginx.org/en/docs/http/ngx_http_spdy_module.html#spdy_chunk_size)\ +[spdy_headers_comp](https://nginx.org/en/docs/http/ngx_http_spdy_module.html#spdy_headers_comp)\ +[split_clients (ngx_http_split_clients_module)](https://nginx.org/en/docs/http/ngx_http_split_clients_module.html#split_clients)\ +[split_clients (ngx_stream_split_clients_module)](https://nginx.org/en/docs/stream/ngx_stream_split_clients_module.html#split_clients)\ +[ssi](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi)\ +[ssi_last_modified](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_last_modified)\ +[ssi_min_file_chunk](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_min_file_chunk)\ +[ssi_silent_errors](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_silent_errors)\ +[ssi_types](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_types)\ +[ssi_value_length](https://nginx.org/en/docs/http/ngx_http_ssi_module.html#ssi_value_length)\ +[ssl (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl)\ +[ssl (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl)\ +[ssl (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl)\ +[ssl_buffer_size](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_buffer_size)\ +[ssl_certificate (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate)\ +[ssl_certificate (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate)\ +[ssl_certificate (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate)\ +[ssl_certificate (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_certificate)\ +[ssl_certificate_key (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_certificate_key)\ +[ssl_certificate_key (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_certificate_key)\ +[ssl_certificate_key (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_certificate_key)\ +[ssl_certificate_key (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_certificate_key)\ +[ssl_ciphers (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ciphers)\ +[ssl_ciphers (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_ciphers)\ +[ssl_ciphers (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_ciphers)\ +[ssl_client_certificate (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_client_certificate)\ +[ssl_client_certificate (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_client_certificate)\ +[ssl_client_certificate (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_client_certificate)\ +[ssl_conf_command (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_conf_command)\ +[ssl_conf_command (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_conf_command)\ +[ssl_conf_command (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_conf_command)\ +[ssl_crl (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_crl)\ +[ssl_crl (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_crl)\ +[ssl_crl (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_crl)\ +[ssl_dhparam (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam)\ +[ssl_dhparam (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_dhparam)\ +[ssl_early_data](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_early_data)\ +[ssl_ecdh_curve (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ecdh_curve)\ +[ssl_ecdh_curve (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_ecdh_curve)\ +[ssl_ecdh_curve (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_ecdh_curve)\ +[ssl_handshake_timeout](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_handshake_timeout)\ +[ssl_name](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_name)\ +[ssl_ocsp](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp)\ +[ssl_ocsp_cache](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp_cache)\ +[ssl_ocsp_responder](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_ocsp_responder)\ +[ssl_password_file (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_password_file)\ +[ssl_password_file (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_password_file)\ +[ssl_password_file (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_password_file)\ +[ssl_prefer_server_ciphers (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers)\ +[ssl_prefer_server_ciphers (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_prefer_server_ciphers)\ +[ssl_prefer_server_ciphers (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_prefer_server_ciphers)\ +[ssl_preread (ngx_stream_ssl_preread_module)](http://nginx.org/en/docs/stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_protocol)\ +[ssl_protocols (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols)\ +[ssl_protocols (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_protocols)\ +[ssl_protocols (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_protocols)\ +[ssl_protocols (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_protocols)\ +[ssl_reject_handshake](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_reject_handshake)\ +[ssl_server_name](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_server_name)\ +[ssl_session_cache (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache)\ +[ssl_session_cache (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_cache)\ +[ssl_session_cache (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_cache)\ +[ssl_session_ticket_key (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_ticket_key)\ +[ssl_session_ticket_key (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_ticket_key)\ +[ssl_session_ticket_key (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_ticket_key)\ +[ssl_session_tickets (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets)\ +[ssl_session_tickets (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_tickets)\ +[ssl_session_tickets (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_tickets)\ +[ssl_session_timeout (ngx_http_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_session_timeout)\ +[ssl_session_timeout (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_session_timeout)\ +[ssl_session_timeout (ngx_stream_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout)\ +[ssl_stapling](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling)\ +[ssl_stapling_file](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_file)\ +[ssl_stapling_responder](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_responder)\ +[ssl_stapling_verify](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling_verify)\ +[ssl_trusted_certificate (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_trusted_certificate)\ +[ssl_trusted_certificate (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_trusted_certificate)\ +[ssl_trusted_certificate (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_trusted_certificate)\ +[ssl_trusted_certificate (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_trusted_certificate)\ +[ssl_verify](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_verify)\ +[ssl_verify_client (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_verify_client)\ +[ssl_verify_client (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_verify_client)\ +[ssl_verify_client (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_verify_client)\ +[ssl_verify_depth (ngx_http_ssl_module)](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_verify_depth)\ +[ssl_verify_depth (ngx_mail_ssl_module)](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#ssl_verify_depth)\ +[ssl_verify_depth (ngx_stream_ssl_module)](https://nginx.org/en/docs/stream/ngx_stream_ssl_module.html#ssl_verify_depth)\ +[ssl_verify_depth (ngx_mgmt_module)](https://nginx.org/en/docs/ngx_mgmt_module.html#ssl_verify_depth)\ +[starttls](https://nginx.org/en/docs/mail/ngx_mail_ssl_module.html#starttls)\ +[state (ngx_http_upstream_module)](http://nginx.org/en/docs/http/ngx_http_upstream_module.html#state)\ +[status_zone (ngx_http_api_module)](https://nginx.org/en/docs/http/ngx_http_api_module.html#status_zone)\ +[sticky](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky)\ +[sticky_cookie_insert](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_cookie_insert)\ +[stream (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#stream)\ +[stub_status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html#stub_status)\ +[sub_filter](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter)\ +[sub_filter_last_modified](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_last_modified)\ +[sub_filter_once](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_once)\ +[sub_filter_types](https://nginx.org/en/docs/http/ngx_http_sub_module.html#sub_filter_types)\ +[subrequest_output_buffer_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#subrequest_output_buffer_size)\ +[tcp_nodelay (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nodelay)\ +[tcp_nodelay (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#tcp_nodelay)\ +[tcp_nopush](https://nginx.org/en/docs/http/ngx_http_core_module.html#tcp_nopush)\ +[thread_pool](https://nginx.org/en/docs/ngx_core_module.html#thread_pool)\ +[timeout](https://nginx.org/en/docs/mail/ngx_mail_core_module.html#timeout)\ +[timer_resolution](https://nginx.org/en/docs/ngx_core_module.html#timer_resolution)\ +[try_files](https://nginx.org/en/docs/http/ngx_http_core_module.html#try_files)\ +[types](https://nginx.org/en/docs/http/ngx_http_core_module.html#types)\ +[types_hash_bucket_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#types_hash_bucket_size)\ +[types_hash_max_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#types_hash_max_size)\ +[underscores_in_headers](https://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers)\ +[uninitialized_variable_warn](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#uninitialized_variable_warn)\ +[upstream (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream)\ +[upstream (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#upstream)\ +[upstream_conf](https://nginx.org/en/docs/http/ngx_http_upstream_conf_module.html#upstream_conf)\ +[usage_report](https://nginx.org/en/docs/ngx_mgmt_module.html#usage_report)\ +[use](https://nginx.org/en/docs/ngx_core_module.html#use)\ +[user](https://nginx.org/en/docs/ngx_core_module.html#user)\ +[userid](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid)\ +[userid_domain](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_domain)\ +[userid_expires](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_expires)\ +[userid_flags](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_flags)\ +[userid_mark](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_mark)\ +[userid_name](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_name)\ +[userid_p3p](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_p3p)\ +[userid_path](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_path)\ +[userid_service](https://nginx.org/en/docs/http/ngx_http_userid_module.html#userid_service)\ +[uuid_file](https://nginx.org/en/docs/ngx_mgmt_module.html#uuid_file)\ +[uwsgi_buffer_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_buffer_size)\ +[uwsgi_buffering](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_buffering)\ +[uwsgi_buffers](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_buffers)\ +[uwsgi_busy_buffers_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_busy_buffers_size)\ +[uwsgi_cache](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache)\ +[uwsgi_cache_background_update](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_background_update)\ +[uwsgi_cache_bypass](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_bypass)\ +[uwsgi_cache_key](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_key)\ +[uwsgi_cache_lock](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_lock)\ +[uwsgi_cache_lock_age](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_lock_age)\ +[uwsgi_cache_lock_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_lock_timeout)\ +[uwsgi_cache_max_range_offset](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_max_range_offset)\ +[uwsgi_cache_methods](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_methods)\ +[uwsgi_cache_min_uses](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_min_uses)\ +[uwsgi_cache_path](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_path)\ +[uwsgi_cache_purge](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_purge)\ +[uwsgi_cache_revalidate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_revalidate)\ +[uwsgi_cache_use_stale](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_use_stale)\ +[uwsgi_cache_valid](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_cache_valid)\ +[uwsgi_connect_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_connect_timeout)\ +[uwsgi_force_ranges](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_force_ranges)\ +[uwsgi_hide_header](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_hide_header)\ +[uwsgi_ignore_client_abort](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ignore_client_abort)\ +[uwsgi_ignore_headers](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ignore_headers)\ +[uwsgi_intercept_errors](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_intercept_errors)\ +[uwsgi_limit_rate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_limit_rate)\ +[uwsgi_max_temp_file_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_max_temp_file_size)\ +[uwsgi_modifier1](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_modifier1)\ +[uwsgi_modifier2](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_modifier2)\ +[uwsgi_next_upstream](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_next_upstream)\ +[uwsgi_next_upstream_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_next_upstream_timeout)\ +[uwsgi_next_upstream_tries](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_next_upstream_tries)\ +[uwsgi_no_cache](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_no_cache)\ +[uwsgi_param](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_param)\ +[uwsgi_pass](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass)\ +[uwsgi_pass_header](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass_header)\ +[uwsgi_pass_request_body](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass_request_body)\ +[uwsgi_pass_request_headers](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_pass_request_headers)\ +[uwsgi_read_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_read_timeout)\ +[uwsgi_request_buffering](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_request_buffering)\ +[uwsgi_send_timeout](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_send_timeout)\ +[uwsgi_socket_keepalive](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_socket_keepalive)\ +[uwsgi_ssl_certificate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate)\ +[uwsgi_ssl_certificate_key](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate_key)\ +[uwsgi_ssl_conf_command](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_conf_command)\ +[uwsgi_ssl_crl](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_crl)\ +[uwsgi_ssl_name](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_name)\ +[uwsgi_ssl_password_file](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_password_file)\ +[uwsgi_ssl_protocols](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_protocols)\ +[uwsgi_ssl_server_name](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_server_name)\ +[uwsgi_ssl_session_reuse](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_session_reuse)\ +[uwsgi_ssl_trusted_certificate](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_trusted_certificate)\ +[uwsgi_ssl_verify](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_verify)\ +[uwsgi_ssl_verify_depth](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_ssl_verify_depth)\ +[uwsgi_store](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_store)\ +[uwsgi_store_access](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_store_access)\ +[uwsgi_temp_file_write_size](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_temp_file_write_size)\ +[uwsgi_temp_path](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html#uwsgi_temp_path)\ +[valid_referers](https://nginx.org/en/docs/http/ngx_http_referer_module.html#valid_referers)\ +[variables_hash_bucket_size (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#variables_hash_bucket_size)\ +[variables_hash_bucket_size (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#variables_hash_bucket_size)\ +[variables_hash_max_size (ngx_http_core_module)](https://nginx.org/en/docs/http/ngx_http_core_module.html#variables_hash_max_size)\ +[variables_hash_max_size (ngx_stream_core_module)](https://nginx.org/en/docs/stream/ngx_stream_core_module.html#variables_hash_max_size)\ +[worker_aio_requests](https://nginx.org/en/docs/ngx_core_module.html#worker_aio_requests)\ +[worker_connections](https://nginx.org/en/docs/ngx_core_module.html#worker_connections)\ +[worker_cpu_affinity](https://nginx.org/en/docs/ngx_core_module.html#worker_cpu_affinity)\ +[worker_priority](https://nginx.org/en/docs/ngx_core_module.html#worker_priority)\ +[worker_processes](https://nginx.org/en/docs/ngx_core_module.html#worker_processes)\ +[worker_rlimit_core](https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_core)\ +[worker_rlimit_nofile](https://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile)\ +[worker_shutdown_timeout](https://nginx.org/en/docs/ngx_core_module.html#worker_shutdown_timeout)\ +[working_directory](https://nginx.org/en/docs/ngx_core_module.html#working_directory)\ +[xclient](https://nginx.org/en/docs/mail/ngx_mail_proxy_module.html#xclient)\ +[xml_entities](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xml_entities)\ +[xslt_last_modified](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_last_modified)\ +[xslt_param](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_param)\ +[xslt_string_param](http://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_string_param)\ +[xslt_stylesheet](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_stylesheet)\ +[xslt_types](https://nginx.org/en/docs/http/ngx_http_xslt_module.html#xslt_types)\ +[zone (ngx_http_upstream_module)](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone)\ +[zone (ngx_stream_upstream_module)](https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html#zone)\ +[zone_sync](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync)\ +[zone_sync_buffers](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_buffers)\ +[zone_sync_connect_retry_interval](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_connect_retry_interval)\ +[zone_sync_connect_timeout](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_connect_timeout)\ +[zone_sync_interval](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_interval)\ +[zone_sync_recv_buffer_size](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_recv_buffer_size)\ +[zone_sync_server](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_server)\ +[zone_sync_ssl](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl)\ +[zone_sync_ssl_certificate](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_certificate)\ +[zone_sync_ssl_certificate_key](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_certificate_key)\ +[zone_sync_ssl_ciphers](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_ciphers)\ +[zone_sync_ssl_conf_command](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_conf_command)\ +[zone_sync_ssl_crl](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_crl)\ +[zone_sync_ssl_name](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_name)\ +[zone_sync_ssl_password_file](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_password_file)\ +[zone_sync_ssl_protocols](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_protocols)\ +[zone_sync_ssl_server_name](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_server_name)\ +[zone_sync_ssl_trusted_certificate](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_trusted_certificate)\ +[zone_sync_ssl_verify](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_verify)\ +[zone_sync_ssl_verify_depth](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_verify_depth)\ +[zone_sync_timeout](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html#zone_sync_timeout) +
+ +
+Lua dynamic module directives + +[lua_load_resty_core](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_load_resty_core)\ +[lua_use_default_type](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_use_default_type)\ +[lua_malloc_trim](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_malloc_trim)\ +[lua_code_cache](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_code_cache)\ +[lua_thread_cache_max_entries](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_thread_cache_max_entries)\ +[lua_regex_cache_max_entries](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_regex_cache_max_entries)\ +[lua_regex_match_limit](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_regex_match_limit)\ +[lua_package_path](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_package_path)\ +[lua_package_cpath](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_package_cpath)\ +[init_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#init_by_lua)\ +[init_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#init_by_lua_block)\ +[init_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#init_by_lua_file)\ +[init_worker_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#init_worker_by_lua)\ +[init_worker_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#init_worker_by_lua_block)\ +[init_worker_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#init_worker_by_lua_file)\ +[exit_worker_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#exit_worker_by_lua_block)\ +[exit_worker_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#exit_worker_by_lua_file)\ +[set_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#set_by_lua)\ +[set_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#set_by_lua_block)\ +[set_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#set_by_lua_file)\ +[content_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#content_by_lua)\ +[content_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#content_by_lua_block)\ +[content_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#content_by_lua_file)\ +[server_rewrite_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#server_rewrite_by_lua_block)\ +[server_rewrite_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#server_rewrite_by_lua_file)\ +[rewrite_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#rewrite_by_lua)\ +[rewrite_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#rewrite_by_lua_block)\ +[rewrite_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#rewrite_by_lua_file)\ +[access_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#access_by_lua)\ +[access_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#access_by_lua_block)\ +[access_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#access_by_lua_file)\ +[header_filter_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#header_filter_by_lua)\ +[header_filter_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#header_filter_by_lua_block)\ +[header_filter_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#header_filter_by_lua_file)\ +[body_filter_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#body_filter_by_lua)\ +[body_filter_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#body_filter_by_lua_block)\ +[body_filter_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#body_filter_by_lua_file)\ +[log_by_lua](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#log_by_lua)\ +[log_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#log_by_lua_block)\ +[log_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#log_by_lua_file)\ +[balancer_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#balancer_by_lua_block)\ +[balancer_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#balancer_by_lua_file)\ +[lua_need_request_body](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_need_request_body)\ +[ssl_client_hello_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_client_hello_by_lua_block)\ +[ssl_client_hello_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_client_hello_by_lua_file)\ +[ssl_certificate_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_certificate_by_lua_block)\ +[ssl_certificate_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_certificate_by_lua_file)\ +[ssl_session_fetch_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_session_fetch_by_lua_block)\ +[ssl_session_fetch_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_session_fetch_by_lua_file)\ +[ssl_session_store_by_lua_block](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_session_store_by_lua_block)\ +[ssl_session_store_by_lua_file](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#ssl_session_store_by_lua_file)\ +[lua_shared_dict](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_shared_dict)\ +[lua_socket_connect_timeout](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_connect_timeout)\ +[lua_socket_send_timeout](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_send_timeout)\ +[lua_socket_send_lowat](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_send_lowat)\ +[lua_socket_read_timeout](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_read_timeout)\ +[lua_socket_buffer_size](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_buffer_size)\ +[lua_socket_pool_size](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_pool_size)\ +[lua_socket_keepalive_timeout](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_keepalive_timeout)\ +[lua_socket_log_errors](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_socket_log_errors)\ +[lua_ssl_ciphers](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_ssl_ciphers)\ +[lua_ssl_crl](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_ssl_crl)\ +[lua_ssl_protocols](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_ssl_protocols)\ +[lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_ssl_trusted_certificate)\ +[lua_ssl_verify_depth](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_ssl_verify_depth)\ +[lua_ssl_conf_command](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_ssl_conf_command)\ +[lua_http10_buffering](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_http10_buffering)\ +[rewrite_by_lua_no_postpone](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#rewrite_by_lua_no_postpone)\ +[access_by_lua_no_postpone](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#access_by_lua_no_postpone)\ +[lua_transform_underscores_in_response_headers](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_transform_underscores_in_response_headers)\ +[lua_check_client_abort](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_check_client_abort)\ +[lua_max_pending_timers](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_max_pending_timers)\ +[lua_max_running_timers](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_max_running_timers)\ +[lua_sa_restart](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_sa_restart)\ +[lua_worker_thread_vm_pool_size](https://github.com/openresty/lua-nginx-module?tab=readme-ov-file#lua_worker_thread_vm_pool_size) +
+ + +
+GeoIP2 dynamic module directives + +[geoip2 (ngx_http_geo2_module)](https://github.com/leev/ngx_http_geoip2_module#user-content-download-maxmind-geolite2-database-optional)\ +[geoip2 (ngx_stream_geo2_module)](https://github.com/leev/ngx_http_geoip2_module#user-content-download-maxmind-geolite2-database-optional)\ +[geoip2_proxy (ngx_http_geo2_module)](https://github.com/leev/ngx_http_geoip2_module#user-content-download-maxmind-geolite2-database-optional)\ +[geoip2_proxy_recursive (ngx_http_geo2_module)](https://github.com/leev/ngx_http_geoip2_module#user-content-download-maxmind-geolite2-database-optional)\ +
diff --git a/content/nginxaas-azure/getting-started/prerequisites.md b/content/nginxaas-azure/getting-started/prerequisites.md new file mode 100644 index 000000000..0acc7a452 --- /dev/null +++ b/content/nginxaas-azure/getting-started/prerequisites.md @@ -0,0 +1,24 @@ +--- +title: "Prerequisites" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-880" +url: /nginxaas/azure/getting-started/prerequisites/ +--- + +Before you deploy F5 NGINX as a Service for Azure (NGINXaaS) you need to meet the following prerequisites: + +- An Azure account with an active subscription (if you don’t have one, [create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)). + +- [Confirm that you have the appropriate access](https://docs.microsoft.com/en-us/azure/role-based-access-control/check-access) before starting the setup: + + - The simplest approach is to use Azure’s built-in [Owner](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#owner) role on either a specific resource group or the subscription. + + - It's possible to complete a limited setup with the built-in [Contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#contributor) role. + +For specific permissions check the [NGINXaaS for Azure Frequently Asked Questions]({{< relref "/nginxaas-azure/faq" >}}). + +## What's next + +[Create a Deployment]({{< relref "/nginxaas-azure/getting-started/create-deployment/" >}}) diff --git a/content/nginxaas-azure/getting-started/ssl-tls-certificates/_index.md b/content/nginxaas-azure/getting-started/ssl-tls-certificates/_index.md new file mode 100644 index 000000000..1064e0730 --- /dev/null +++ b/content/nginxaas-azure/getting-started/ssl-tls-certificates/_index.md @@ -0,0 +1,8 @@ +--- +title: "Add SSL-TLS certificates" +weight: 400 +url: /nginxaas/azure/getting-started/ssl-tls-certificates/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md b/content/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md new file mode 100644 index 000000000..bf17c242c --- /dev/null +++ b/content/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md @@ -0,0 +1,321 @@ +--- +title: "Overview" +weight: 50 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/ssl-tls-certificates/overview/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) enables customers to secure traffic by adding SSL/TLS certificates to a deployment. NGINXaaS can fetch certificates directly from Azure Key Vault, rotate certificates, and provide observability on the status of your certificates. + +This document provides details about using SSL/TLS certificates with your F5 NGINX as a Service for Azure deployment. + +## Add SSL/TLS certificates + +Add a certificate from an Azure Key Vault to your NGINXaaS deployment using your preferred client tool: + +* [Add certificates using the Azure portal]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md">}}) +* [Add certificates using the Azure CLI]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-azure-cli.md">}}) +* [Add certificates using Terraform]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-terraform.md">}}) + +### Add SSL/TLS certificates bundled with NGINXaaS configuration + +You can also add your certificate as a file to your NGINX configuration filesystem; refer to [Upload an NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview.md">}}) to learn about the different options. Although this is a quick method for adding SSL/TLS certificates to your NGINXaaS deployment, we recommend adding certificates through Azure Key Vault (AKV) for enhanced security, certificate rotation, and monitoring. + +Once a certificate has been added, update your NGINX configuration to reference your SSL/TLS certificate and key file paths. + +```nginx +http { + server { + listen 443 ssl; + ssl_certificate /etc/nginx/certs/mycert.cert; + ssl_certificate_key /etc/nginx/certs/mycert.key; + # ... + } +} +``` + +## Certificate rotation + +NGINXaaS for Azure regularly polls the AKV to check if the certificate has been updated. If an updated certificate is found, it is automatically rotated on the deployment within 4 hours. Any change to the NGINX configuration will trigger all SSL/TLS certificates to be rotated immediately. + +For Azure client tools, such as the Azure CLI or Azure Resource Manager, the certificate is referenced from AKV using its Key Vault secret identifier. If the secret identifier specifies a version, NGINXaaS will not rotate the certificate. To enable certificate rotation, ensure the secret id does not contain a version, for example, `https://myvault.vault.azure.net/secrets/mysecret`. Certificates added using the Azure Portal will automatically be rotated. + +{{}}If any of your SSL/TLS certificates or your NGINX configuration has issues, the certificates will not be rotated.{{}} + +## Monitor certificates + +To view the status of your SSL/TLS certificates, [enable monitoring]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) for your NGINXaaS deployment and navigate to the **Metrics** tab in the Azure portal. View the `nginxaas.certificates` metric under the `nginxaas statistics` metric namespace. The `nginxaas.certificates` metric allows you to filter by certificate name and the status of the certificate. The status dimension reports the health of your certificates through the following values: + + {{}} + + | Status | Description | + | ------------- | ------------- | + | `active` | The certificate was successfully fetched from AKV. | + | `unauthorized`| Azure returned a 401/403 error when fetching the certificate from AKV, which usually indicates an issue with the deployment's [Managed Identity]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}). | + | `not found` | Azure returned a 404 error when fetching the certificate from AKV. | + | `incompatible`| An error occurred while fetching or processing the certificate from AKV.

The possible reasons include:

  • Error while downloading certificate and key
  • Missing content type in certificate
  • Missing content in certificate
  • Unrecognized content type, certificate not in PEM or PKCS12 format
| + + {{
}} + + {{< img src="nginxaas-azure/azure-metrics-nginxaas.certificates.png" alt="Interface screenshot showing the Azure metric nginxaas.certificates" >}} + +## Common certificate errors + +The following section describes common errors you might encounter while adding SSL/TLS certificates to your NGINXaaS deployment and how to resolve them. + +#### Error code: `ForbiddenByRbac` + +**Description:** The [Managed Identity]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) associated with the NGINXaaS deployment does not have permissions to fetch certificates from key vault. This error is returned when the key vault's permission model is set to [Azure role-based access control](https://learn.microsoft.com/en-us/azure/role-based-access-control/overview?WT.mc_id=Portal-Microsoft_Azure_KeyVault). + +**Resolution:** Assign the [Key Vault Secrets User](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#key-vault-secrets-user) role to the managed identity associated with your NGINXaaS deployment. + +
+Create a role assignment - Azure CLI + +1. Get the principal ID of the user or system assigned managed identity. + + - **User assigned managed identity** + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `MI_NAME`: the name of the managed identity + - `MI_RESOURCE_GROUP`: the name of the resource group the managed identity is in + ```bash + mi_principal_id=$(az identity show --name $MI_NAME \ + --resource-group $MI_RESOURCE_GROUP \ + --query principalId --output tsv) + ``` + + - **System assigned managed identity** + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `DEP_NAME`: the name of the NGINXaaS deployment + - `DEP_RESOURCE_GROUP`: the name of the resource group the NGINXaaS deployment is in + ```bash + mi_principal_id=$(az nginx deployment show --name $DEP_NAME \ + --resource-group $DEP_RESOURCE_GROUP \ + --query identity.principalId --output tsv) + ``` +1. Get the resource ID of the key vault. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `KV_NAME`: the name of the key vault + - `KV_RESOURCE_GROUP`: the name of the resource group the key vault is in + ```bash + key_vault_id=$(az keyvault show --name $KV_NAME \ + --resource-group $KV_RESOURCE_GROUP \ + --query id --output tsv) + ``` +1. Create the role assignment. + ```bash + az role assignment create --assignee $mi_principal_id \ + --role "Key Vault Secrets User" \ + --scope $key_vault_id + ``` +
+ +#### Error code: `AccessDenied` + +**Description:** The [Managed Identity]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) associated with the NGINXaaS deployment has not been assigned to an access policy on the key vault. This error is returned when the key vault's permission model is set to [Vault access policy](https://learn.microsoft.com/en-us/azure/key-vault/general/assign-access-policy?WT.mc_id=Portal-Microsoft_Azure_KeyVault&tabs=azure-portal). + +**Resolution:** Assign an access policy to the managed identity associated with your NGINXaaS deployment with *Get secrets* permissions or higher. If you are using the Azure portal, assign an additional access policy to your user with *List certificates* permissions or higher. + +
+Create an access policy - Azure CLI + +1. Get the principal ID of the user or system assigned managed identity. + + - **User assigned managed identity** + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `MI_NAME`: the name of the managed identity + - `MI_RESOURCE_GROUP`: the name of the resource group the managed identity is in + ```bash + mi_principal_id=$(az identity show --name $MI_NAME \ + --resource-group $MI_RESOURCE_GROUP \ + --query principalId --output tsv) + ``` + + - **System assigned managed identity** + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `DEP_NAME`: the name of the NGINXaaS deployment + - `DEP_RESOURCE_GROUP`: the name of the resource group the NGINXaaS deployment is in + ```bash + mi_principal_id=$(az nginx deployment show --name $DEP_NAME \ + --resource-group $DEP_RESOURCE_GROUP \ + --query identity.principalId --output tsv) + ``` + +1. Create the access policy. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `KV_NAME`: the name of the key vault + - `KV_RESOURCE_GROUP`: the name of the resource group the key vault is in + ```bash + az keyvault set-policy --name $KV_NAME \ + --resource-group $KV_RESOURCE_GROUP \ + --object-id $mi_principal_id \ + --secret-permissions get + ``` +
+ +#### Error code: `ForbiddenByFirewall` + +**Description:** The key vault's firewall is enabled and NGINXaaS is not authorized to fetch certificates. + +**Resolution:** [Configure Network Security Perimeter]({{< relref "/nginxaas-azure/quickstart/security-controls/certificates.md#configure-network-security-perimeter-nsp" >}}) to allow the subscription of the NGINXaaS deployment to access the key vault. + +
+Create a network security perimeter - Azure CLI + +1. Create a network security perimeter. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `NSP_NAME`: the name of the network security perimeter + - `NSP_RESOURCE_GROUP`: the name of the resource group the network security perimeter will be in + ```bash + az network perimeter create --name $NSP_NAME --resource-group $NSP_RESOURCE_GROUP + ``` +1. Create a profile for the network security perimeter. + + Please ensure the following environment variable is set before copying the below Azure CLI command. + - `PROFILE_NAME`: the name of the network security perimeter profile + ```bash + az network perimeter profile create --name $PROFILE_NAME \ + --resource-group $NSP_RESOURCE_GROUP \ + --perimeter-name $NSP_NAME + ``` +1. Get the resource ID of the key vault. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `KV_NAME`: the name of the key vault + - `KV_RESOURCE_GROUP`: the name of the resource group the key vault is in + ```bash + key_vault_id=$(az keyvault show --name $KV_NAME \ + --resource-group $KV_RESOURCE_GROUP \ + --query id --output tsv) + ``` +1. Get the resource ID of the network security profile. + ```bash + nsp_profile_id=$(az network perimeter profile show --name $PROFILE_NAME \ + --resource-group $NSP_RESOURCE_GROUP \ + --perimeter-name $NSP_NAME --query id --output tsv) + ``` +1. Associate the key vault with the network security perimeter + ```bash + az network perimeter association create --name key-vault-association \ + --perimeter-name $NSP_NAME \ + --resource-group $NSP_RESOURCE_GROUP \ + --private-link-resource "{id:$key_vault_id}" \ + --profile "{id:$nsp_profile_id}" + ``` +1. Add an inbound access rule to allow the NGINXaaS deployment's subscription. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `RULE_NAME`: the name of the access rule + - `DEP_SUBSCRIPTION_ID`: the subscription ID of the NGINXaaS deployment + ```bash + az network perimeter profile access-rule create --name $RULE_NAME \ + --profile-name $PROFILE_NAME \ + --perimeter-name $NSP_NAME \ + --resource-group $NSP_RESOURCE_GROUP \ + --subscriptions [0].id="/subscriptions/$DEP_SUBSCRIPTION_ID" + ``` +
+ +#### Error code: `AnotherOperationInProgress` + +**Description:** Another operation on this, or a dependent resource, is in progress. + +**Resolution:** Retry the operation after the current operation reaches a terminal state. + +#### Error code: `SecretNotFound` + +**Description:** The certificate's key vault secret ID was not found in the key vault. + +**Resolution:** Ensure the specified key vault secret ID exists and has the correct format, for example, `https://myvault.vault.azure.net/secrets/abcd/v1`. + +#### Error code: `CertificateInUse` + +**Description:** The certificate being deleted or modified is referenced in the NGINX configuration. The attempted modification would prevent the NGINX config from being applied. + +**Resolution:** Remove references to the certificate in the NGINX config, or add a new certificate resource to the NGINXaaS deployment with the modified certificate and key paths. + +#### Error code: `ForbiddenByPolicy` + +**Description:** The [Managed Identity]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) associated with the NGINXaaS deployment does not have permissions to fetch certificates from key vault. This error is returned when the key vault's permission model is set to [Vault access policy](https://learn.microsoft.com/en-us/azure/key-vault/general/assign-access-policy?WT.mc_id=Portal-Microsoft_Azure_KeyVault&tabs=azure-portal). + +**Resolution:** Assign an access policy to the managed identity associated with your NGINXaaS deployment with *Get secrets* permissions or higher. If you are using the Azure portal, assign an additional access policy to your user with *List certificates* permissions or higher. + +
+Create an access policy - Azure CLI + +1. Get the principal ID of the user or system assigned managed identity. + + - **User assigned managed identity** + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `MI_NAME`: the name of the managed identity + - `MI_RESOURCE_GROUP`: the name of the resource group the managed identity is in + ```bash + mi_principal_id=$(az identity show --name $MI_NAME \ + --resource-group $MI_RESOURCE_GROUP \ + --query principalId --output tsv) + ``` + + - **System assigned managed identity** + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `DEP_NAME`: the name of the NGINXaaS deployment + - `DEP_RESOURCE_GROUP`: the name of the resource group the NGINXaaS deployment is in + ```bash + mi_principal_id=$(az nginx deployment show --name $DEP_NAME \ + --resource-group $DEP_RESOURCE_GROUP \ + --query identity.principalId --output tsv) + ``` + +1. Create the access policy. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `KV_NAME`: the name of the key vault + - `KV_RESOURCE_GROUP`: the name of the resource group the key vault is in + ```bash + az keyvault set-policy --name $KV_NAME \ + --resource-group $KV_RESOURCE_GROUP \ + --object-id $mi_principal_id \ + --secret-permissions get + ``` +
+ +#### Error code: `DuplicateFilePathError` + +**Description:** A file already exists on the instance's filesystem with the certificate or key's file path. + +**Resolution:** Rename the certificate or key path, so there are no collisions with existing certificate and NGINX config file paths. + +#### Error code: `SecretDisabled` + +**Description:** The certificate is set to disabled in the key vault. + +**Resolution:** Enable the certificate in the key vault. + +
+Enable a certificate in key vault - Azure CLI + +1. Get the resource ID of the certificate. + + Please ensure the following environment variables are set before copying the below Azure CLI command. + - `CERT_NAME`: the name of the certificate + - `KV_NAME`: the name of the key vault + ```bash + certificate_id=$(az keyvault certificate show --name $CERT_NAME \ + --vault-name $KV_NAME \ + --query id --output tsv) + ``` + +1. Enable the certificate. + ```bash + az keyvault certificate set-attributes --enabled true --id $certificate_id + ``` +
diff --git a/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-azure-cli.md b/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-azure-cli.md new file mode 100644 index 000000000..14fbad6dd --- /dev/null +++ b/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-azure-cli.md @@ -0,0 +1,105 @@ +--- +title: "Add certificates using the Azure CLI" +weight: 200 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-azure-cli/ +--- + +You can use Azure Key Vault (AKV) to store SSL/TLS certificates and keys to use in your F5 NGINX as a Service for Azure (NGINXaaS) configuration. + +### Prerequisites + +{{< include "/nginxaas-azure/ssl-tls-prerequisites.md" >}} + +- Install [Azure CLI with NGINXaaS extension]({{< relref "/nginxaas-azure/client-tools/cli.md" >}}) + +## Create a certificate + +Create a certificate under a deployment. This references an existing certificate in an Azure Key Vault and makes it available to NGINX configuration + +To create a certificate, use the `az nginx deployment certificate create` command: + +```bash +az nginx deployment certificate create --certificate-name + --deployment-name + --resource-group + [--certificate-path] + [--key-path] + [--key-vault-secret-id] + [--location] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] +``` + +### Example + +- Create a certificate with a certificate path, key path, and key vault secret ID: + + ```bash + az nginx deployment certificate create --certificate-name myCertificate \ + --deployment-name myDeployment --resource-group myResourceGroup \ + --certificate-path /etc/nginx/test.cert --key-path /etc/nginx/test.key \ + --key-vault-secret-id keyVaultSecretId + ``` + +See [Azure CLI Certificate Create Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment/certificate#az-nginx-deployment-certificate-create) for more details on the available parameters. + +## Update a certificate + +To update a certificate, use the `az nginx deployment certificate update` command: + +```bash +az nginx deployment certificate update [--add] + [--certificate-name] + [--certificate-path] + [--deployment-name] + [--force-string {0, 1, f, false, n, no, t, true, y, yes}] + [--ids] + [--key-path] + [--key-vault-secret-id] + [--location] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--remove] + [--resource-group] + [--set] + [--subscription] +``` + +### Example + +- Update the certificate virtual path, key virtual path and certificate: + + ```bash + az nginx deployment certificate update --certificate-name myCertificate \ + --deployment-name myDeployment --resource-group myResourceGroup \ + --certificate-path /etc/nginx/testupdated.cert \ + --key-path /etc/nginx/testupdated.key \ + --key-vault-secret-id newKeyVaultSecretId + ``` + +See [Azure CLI Certificate Create Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment/certificate#az-nginx-deployment-certificate-update) for more details on the available parameters. + +## Delete a certificate + +To delete a certificate, use the `az nginx deployment certificate delete` command: + +```bash +az nginx deployment certificate delete [--certificate-name] + [--deployment-name] + [--ids] + [--no-wait {0, 1, f, false, n, no, t, true, y, yes}] + [--resource-group] + [--subscription] + [--yes] +``` + +### Example + +- Delete a certificate: + + ```bash + az nginx deployment certificate delete --certificate-name myCertificate \ + --deployment-name myDeployment --resource-group myResourceGroup + ``` + +See [Azure CLI Certificate Delete Documentation](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment/certificate#az-nginx-deployment-certificate-delete) for more details on the available parameters. diff --git a/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md b/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md new file mode 100644 index 000000000..8b7dff63b --- /dev/null +++ b/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md @@ -0,0 +1,96 @@ +--- +title: "Add certificates using the Azure portal" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-875" +url: /nginxaas/azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal/ +--- + +## Overview + +You can manage SSL/TSL certificates for F5 NGINX as a Service for Azure (NGINXaaS) using the Azure portal. + +## Prerequisites + +{{< include "/nginxaas-azure/ssl-tls-prerequisites.md" >}} + +### Adding an SSL/TLS certificate + +Before you begin, refer Azure documentation to [Import a certificate to your Key Vault](https://learn.microsoft.com/en-us/azure/key-vault/certificates/tutorial-import-certificate?tabs=azure-portal#import-a-certificate-to-your-key-vault). + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **NGINX certificates** in the left menu. + +1. Select {{< fa "plus">}}**Add certificate**. + +1. Provide the required information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Name | A unique name for the certificate. | + | Certificate path | This path can match one or more `ssl_certificate` directive file arguments in your NGINX configuration.
The certificate path must be unique within the same deployment. | + | Key path | This path can match one or more `ssl_certificate_key` directive file arguments in your NGINX configuration.
The key path must be unique within the same deployment.
The key path and certificate path can be the same within the certificate. | + {{
}} + + - The **Select certificate** button will take you to a new screen where you will need to provide the following information: + + {{}} + | Field | Description | + |----------------------- | ---------------------------- | + | Key vault | Select from the available key vaults. | + | Certificate | Select the certificate you want to add from the previously selected key vault. | + {{}} + + If you need to create a new key vault or certificate, you can do so by selecting **Create new key vault** or **Create new** under the **Key Vault** and **Certificate** fields, respectively. + + {{}}If specifying an absolute file path as the `Certificate path` or `Key path`, see the [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}) for the allowed directories the file can be written to.{{}} + + {{}}A certificate added to an NGINXaaS for Azure deployment using the Azure Portal refers to an unversioned Azure Key Vault (AKV) secret identifier. To add a certificate with a versioned AKV secret identifier, follow the documented steps with alternative [Client tools]({{< relref "/nginxaas-azure/client-tools/_index.md" >}}) for NGINXaaS for Azure.{{}} + +1. Select **Add certificate**. + +1. Repeat the same steps to add as many certificates as needed. + +1. Now you can [provide an NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}) that references the certificate you just added by the **path** value. + +### View certificate details + +1. Go to your NGINXaaS for Azure deployment and select **NGINX certificates** in the left menu. + +1. Select the name of the certificate from the list. + +1. View the certificate details, including the certificate path, key path, thumbprint, and the certificate's status. + This view will also show in a red box any errors that occurred during the certificate fetch process. + +### Edit an SSL/TLS certificate + +1. Go to your NGINXaaS for Azure deployment and select **NGINX certificates** in the left menu. + +1. Select the checkbox next to the certificate you want to edit. + +1. Select {{< fa "pencil">}} **Edit**. + +1. Update the Name, Certificate path, Key path fields as needed. + +1. Use the **Select certificate** option to update the Key vault, and Certificate fields as needed. + +1. Select **Update**. + +### Delete an SSL/TLS certificate + +1. Go to your NGINXaaS for Azure deployment and select **NGINX certificates** in the left menu. + +1. Select the checkbox next to the certificate you want to delete. + +1. Select {{< fa "trash">}}**Delete**. + +1. Confirm the delete action. + +{{}}Deleting a TLS/SSL certificate currently in-use by the NGINXaaS for Azure deployment will cause an error.{{}} + +## What's next + +[Upload an NGINX Configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}) diff --git a/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-terraform.md b/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-terraform.md new file mode 100644 index 000000000..4d2ce72e4 --- /dev/null +++ b/content/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-terraform.md @@ -0,0 +1,41 @@ +--- +title: "Add certificates using Terraform" +weight: 300 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-terraform/ +--- + +## Overview + +You can manage SSL/TSL certificates for F5 NGINX as a Service for Azure (NGINXaaS) using Terraform. + +## Prerequisites + +{{< include "/nginxaas-azure/terraform-prerequisites.md" >}} + +## Upload and manage a certificate + +You can find examples of Terraform configurations in the [NGINXaaS for Azure Snippets GitHub repository](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/certificates) + +To create a deployment, add a certificate, and use it in a configuration, run the following commands: + + ```bash + terraform init + terraform plan + terraform apply --auto-approve + ``` + +## Delete a deployment + +Once the deployment is no longer needed, run the following to clean up the deployment and related resources: + + ```bash + terraform destroy --auto-approve + ``` + +## Additional resources + +- [Terraform NGINX certificate documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/nginx_certificate) + +{{< include "/nginxaas-azure/terraform-resources.md" >}} \ No newline at end of file diff --git a/content/nginxaas-azure/known-issues.md b/content/nginxaas-azure/known-issues.md new file mode 100644 index 000000000..d84808867 --- /dev/null +++ b/content/nginxaas-azure/known-issues.md @@ -0,0 +1,193 @@ +--- +title: "Known issues" +weight: 1000 +toc: true +docs: "DOCS-871" +--- + +List of known issues in the latest release of F5 NGINX as a Service for Azure (NGINXaaS). + +### {{% icon-bug %}} Terraform fails to apply due to validation errors, but creates "Failed" resources in Azure (ID-4424) + +Some validation errors are caught later in the creation process, and can leave behind "Failed" resources in Azure. An example initial failure might look like: + +```shell +$ terraform apply + +│ Error: creating Nginx Deployment (Subscription: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" +│ Resource Group Name: "XXXXXXXX" +│ Nginx Deployment Name: "XXXXXXXX"): polling after DeploymentsCreateOrUpdate: polling failed: the Azure API returned the following +│ error: +│ +│ Status: "Failed" +│ Code: "NginxSaaSError" +│ Message: "{\"Content\":\"{\\\"error\\\":{\\\"code\\\":\\\"CapacityOutOfRange\\\",\\\"message\\\":\\\"The deployment's capacity must +│ be between 10 and 500 inclusive for marketplace plan standard. For more information about setting capacity see +│ https://docs.nginx.com/nginxaas/azure/quickstart/scaling/.\\\"}}\\n\",\"StatusCode\":400}" +``` + +The error message describes how to fix the vailidation problem. In the Azure portal, you'll be able to see your NGINXaaS, but it will have a "Failed" status. Future **terraform apply** will fail with **Error: A resource with the ID "..." already exists**. + + +**Workaround**: manually delete the "Failed" resource in Azure portal before re-running **terraform apply**. **terraform import** will not work. + +### {{% icon-bug %}} Changing IP addresses in `listen` directives fails with "cannot reload nginx: timed out waiting for config to reload" (ID-4366) + +NGINXaaS uses NGINX's ["change configuration" feature](https://nginx.org/en/docs/control.html#reconfiguration) to update the configuration gracefully without dropping traffic. This starts new workers on the new configuration before shutting down the old workers on the old config. Some kinds of `listen` changes can block new workers from starting up. If you're changing from listening on all IPs to one (for example `listen 1234` -> `listen 127.0.0.1:1234` or vice versa), the config will fail to apply because the old workers and the new workers have an IP conflict. + +**Workaround**: Change the port as well as the IP address to avoid the conflict, and then make a second config change back to the desired port. + +### {{% icon-bug %}} Deploying NGINXaaS and Diagnostic Settings for NGINXaaS using a ARM Bicep or JSON template shows an error (ID-4326) + +While using a single template deployment to deploy both, a NGINXaaS instance and a diagnostic setting for the NGINXaaS instance, +you will see a validation error similar to: + +``` +{"code": "InvalidTemplateDeployment", "message": "The template deployment 'example' is not valid according to the validation procedure. The tracking id is '650afc1e-50d6-476c-bf94-9fc35ffeedd6'. See inner errors for details."} + +Inner Errors: +{"code": "OpenAPISpecValidationFailedForTemplateDeploymentResources", "message": "One or more resources in template deployment preflight validation request failed during OpenApi spec (swagger) validation. Please check error details for the resource identifiers."} + +Inner Errors: +{"code": "HttpPayloadAPISpecValidationFailed", "target": "/subscriptions/ee920d60-90f3-4a92-b5e7-bb284c3a6ce2/resourceGroups/testenv-1b791f58-workload/providers/NGINX.NGINXPLUS/nginxDeployments/myDeployment/providers/Microsoft.Insights/diagnosticSettings/myLoggingSetting", "message": "Failed during request payload validation against the API specification"} + +``` + +**Workaround**: Deploy your NGINXaaS instance and your diagnostic setting in separate templates. + +### {{% icon-bug %}} Not all NGINX Plus directives and use-cases are supported in NGINXaaS (ID-4331) + +NGINXaaS currently does not support all NGINX Plus directives and use-cases. We are continually adding new NGINX Plus capabilities into NGINXaaS to close the gap in functionality. You can follow the updates to the supported use-cases by visiting the [Changelog]({{< relref "./changelog.md" >}}). For a comprehensive list of currently allowed directives, please see the [Configuration Directives List]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#configuration-directives-list" >}}). + +### {{% icon-bug %}} Terraform errors using `package_data` (ID-2752) + +Specifying a configuration using a `tar.gz` file and the `package_data` directive fails. + +```text +│ Error: Insufficient config_file blocks +│ +│ on main.tf line 105, in resource "azurerm_nginx_configuration" "example": +│ 105: resource "azurerm_nginx_configuration" "example" { +│ +│ At least 1 "config_file" blocks are required. +``` + +**Workaround:** Extract the files from your `tar.gz` and use the `config_file` directive instead of `package_data` + +### {{% icon-bug %}} Deployment responsiveness takes approximately 5-10 seconds. (ID-872) + +When creating a new deployment or exposing a new port for traffic, there might be a lag time of 5-10 seconds, during which the Azure Load Balancer does not recognize the new ports, thus preventing making new connections to the NGINX deployment. + +**Workaround:** Wait 10 seconds to make requests or make multiple requests to the instance with low connect timeout times after creating a new deployment or exposing a new port to the deployment for the first 10 seconds after the deployment reaches the Completed state. + +### {{% icon-bug %}} NGINXaaS for Azure charges do not render correctly in the Azure Portal cost center. (ID-1660) + +NGINXaaS for Azure resources appear with a random suffix, and clicking the link does not lead to the NGINXaaS for Azure resource overview page. The charge details show "Unassigned" for all fields, but the charge amount is accurate. + +### {{% icon-bug %}} Configuration update will not succeed with a failed certificate. (ID-1545) + +If a configuration update request uses a certificate that is in failed `provisioningState`, the configuration update is rejected. + +**Workaround:** Update the referenced certificate before updating the configuration. Make sure the certificate provisioning is successful and retry the configuration update. + +### {{% icon-bug %}} Known networking limitations (ID-625) + +- NGINXaaS deployments cannot access [Private Endpoints](https://learn.microsoft.com/en-us/azure/private-link/private-endpoint-overview) behind network security groups for private links. Attempts to do so will fail silently. +- NGINXaaS deployments cannot access [Private Endpoints](https://learn.microsoft.com/en-us/azure/private-link/private-endpoint-overview) in a globally peered VNET. Attempts to do so will fail silently. +- The resource group which contains the public IP resource attached to NGINXaaS deployment cannot be moved across subscriptions. Attempts to do so will result in a validation error. +- Creating an NGINXaaS deployment in a dual-stack subnet is not supported. Attempts to do so will result in a validation error. +- NGINXaaS deployments cannot be created with an IPv6 Public IP address. Attempts to do so will result in a validation error. +- [Network security group](https://learn.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) (NSG) flow logs will not be available for IP traffic flowing through an NGINXaaS deployment attached to a customer delegated subnet. Flow logs for other resources on the same subnet will be available as normal. + +### {{% icon-bug %}} Deployment locked when updating mutliple certificates at once. (ID-767) + +Attaching multiple certificates to a deployment quickly will result in a deployment conflict and error with a "409" status code. Certificates are a sub-resource of the deployment, and a user cannot attach multiple certificates to a deployment simultaneously. This issue is more likely to occur when attempting to configure multiple certificates using client tools such as Terraform and ARM templates. + +**Workaround:** If you want to add multiple certificates to a deployment, configure resource dependencies between the certificate resources, which will cause them to be added to the deployment one at a time. + +**Terraform:** + +Use [depends_on](https://developer.hashicorp.com/terraform/language/meta-arguments/depends_on) to add a dependency between certificate resources: + +{{< highlight hcl "linenos=false,hl_lines=16" >}} +resource "azurerm_nginx_certificate" "cert1" { + name = "examplecert" + nginx_deployment_id = azurerm_nginx_deployment.test.id + key_virtual_path = "/src/cert/soservermekey.key" + certificate_virtual_path = "/src/cert/server.cert" + key_vault_secret_id = azurerm_key_vault_certificate.test.secret_id +} + +resource "azurerm_nginx_certificate" "cert2" { + name = "examplecert" + nginx_deployment_id = azurerm_nginx_deployment.test.id + key_virtual_path = "/src/cert/soservermekey.key" + certificate_virtual_path = "/src/cert/server.cert" + key_vault_secret_id = azurerm_key_vault_certificate.test.secret_id + + depends_on = [azurerm_nginx_certificate.cert1] +} +{{< / highlight >}} + +**ARM Template** + +Use [dependsOn](https://learn.microsoft.com/en-us/azure/azure-resource-manager/templates/resource-dependency) to add a dependency between certificate resources: + +{{< highlight json "linenos=false,hl_lines=21" >}} +{ + "type": "NGINX.NGINXPLUS/nginxDeployments/certificates", + "apiVersion": "2021-05-01-preview", + "name": "[concat(parameters('nginxDeploymentName'), '/', 'cert1')]", + "properties": { + "certificateVirtualPath": "[parameters('certificateVirtualPath')]", + "keyVirtualPath": "[parameters('keyVirtualPath')]", + "keyVaultSecretId": "[parameters('keyVaultSecretId')]" + } +} + +{ + "type": "NGINX.NGINXPLUS/nginxDeployments/certificates", + "apiVersion": "2021-05-01-preview", + "name": "[concat(parameters('nginxDeploymentName'), '/', 'cert2')]", + "properties": { + "certificateVirtualPath": "[parameters('certificateVirtualPath')]", + "keyVirtualPath": "[parameters('keyVirtualPath')]", + "keyVaultSecretId": "[parameters('keyVaultSecretId')]" + } + "dependsOn": ["cert1"] +} +{{< / highlight >}} + +**Bicep Template** + +Use [dependsOn](https://learn.microsoft.com/en-us/azure/azure-resource-manager/bicep/resource-dependencies) to add a dependency between certificate resources: + +{{< highlight bicep "linenos=false,hl_lines=17" >}} +resource cert1 'NGINX.NGINXPLUS/nginxDeployments/certificates@2021-05-01-preview' = { + name: '${nginxDeploymentName}/cert1' + properties: { + certificateVirtualPath: certificateVirtualPath + keyVirtualPath: keyVirtualPath + keyVaultSecretId: keyVaultSecretId + } +} + +resource cert2 'NGINX.NGINXPLUS/nginxDeployments/certificates@2021-05-01-preview' = { + name: '${nginxDeploymentName}/cert2' + properties: { + certificateVirtualPath: certificateVirtualPath + keyVirtualPath: keyVirtualPath + keyVaultSecretId: keyVaultSecretId + } + dependsOn: [cert1] +} +{{< / highlight >}} + +### {{% icon-bug %}} Terraform errors around capacity for Basic plan deployments (ID-4880) + +Basic plans have no capacity, but older versions of [`azurerm`](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs) do not handle that well. You may see errors like: + +- `azurerm_nginx_deployment` falsely detecting capacity changes from 0 to 20 +- `UnsupportedOnBasicPlan: The Basic plan does not support scaling.` errors when running `terraform apply` + +**Solution** Upgrade `azurerm` to version v3.116.0 or higher. diff --git a/content/nginxaas-azure/monitoring/_index.md b/content/nginxaas-azure/monitoring/_index.md new file mode 100644 index 000000000..62c0bb746 --- /dev/null +++ b/content/nginxaas-azure/monitoring/_index.md @@ -0,0 +1,8 @@ +--- +title: Logging and monitoring +weight: 300 +url: /nginxaas/azure/monitoring/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/monitoring/configure-alerts.md b/content/nginxaas-azure/monitoring/configure-alerts.md new file mode 100644 index 000000000..0d67b5c24 --- /dev/null +++ b/content/nginxaas-azure/monitoring/configure-alerts.md @@ -0,0 +1,62 @@ +--- +title: "Configure alerts" +weight: 300 +categories: ["tasks"] +toc: true +draft: false +docs: "DOCS-985" +url: /nginxaas/azure/monitoring/configure-alerts/ +--- + +## Overview + +{{}}F5 NGINX as a Service for Azure (NGINXaaS) publishes custom metrics to Azure Monitor. To learn more about how to create and manage metrics-based alert rules, refer to the [Alerts section in Azure Monitor](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/alerts-create-new-alert-rule?tabs=metric) documentation from Microsoft. {{}} + +This guide explains how to create and configure metrics-based alerts for your NGINXaaS for Azure deployment using Azure Monitor. + + +## Prerequisites + +- Setup is complete for [NGINXaaS for Azure deployment]({{< relref "/nginxaas-azure/getting-started/create-deployment/" >}}). + +- To complete this setup, you must be an owner or user access administrator for the NGINX deployment resource. + +- To enable metrics, see [Enable Monitoring]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}). + +{{}} See [Azure monitor overview](https://docs.microsoft.com/en-us/azure/azure-monitor/overview) documentation to familiarize with Azure Monitor. {{}} + +## Create metrics-based alerts for proactive monitoring. + +1. Go to your NGINXaaS for Azure deployment. + +2. Select **Alerts** in the left menu. + +3. In the **Create** menu, select **Alert rule**. + +4. Select the **Scope** tab, and choose NGINX deployment as the scope of the alert. + +{{}} The scope is auto-selected as NGINX deployment. {{}} + +5. In the **Conditions** tab, select a **Signal name**, for example, "nginx.http.request.count". + + {{< img src="nginxaas-azure/alert-select-signal.png" alt="Screenshot of the Conditions tab showing how to select a Signal name from the list" >}} + +6. Define the **alert logic** such as: + + - Set the threshold and average as per your requirements. + - Set the frequency to evaluate alerts as per your requirements. + + {{< img src="nginxaas-azure/alert-logic.png" alt="Screenshot of the alert logic page showing how to set the threshold and frequency" >}} + +7. Define the **actions**: + + - Create an **action group** for future reference. See the [Configure basic action group settings](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/action-groups) section. + - Define the **notification settings**: whom to notify when the alert is triggered. See the [Configure notifications](https://learn.microsoft.com/en-us/azure/azure-monitor/alerts/action-groups) section. + - (Optional) Define an action to be performed when the alert is triggered, such as a runbook or azure function. + +8. Fill out the details of the alert: + + - Specify the **severity** of the alert, and the name of the rule. + - In the **advanced options** tab, you can turn on "Enable alert upon creation" and "Automatically resolve alerts". + +{{}} [Standard Azure alert charges will apply](https://azure.microsoft.com/en-us/pricing/details/monitor/).{{}} \ No newline at end of file diff --git a/content/nginxaas-azure/monitoring/enable-logging/_index.md b/content/nginxaas-azure/monitoring/enable-logging/_index.md new file mode 100644 index 000000000..958a7c611 --- /dev/null +++ b/content/nginxaas-azure/monitoring/enable-logging/_index.md @@ -0,0 +1,8 @@ +--- +title: Enable NGINX logs +weight: 300 +url: /nginxaas/azure/monitoring/enable-logging/ +menu: + docs: + parent: NGINXaaS for Azure +--- diff --git a/content/nginxaas-azure/monitoring/enable-logging/logging-using-cli.md b/content/nginxaas-azure/monitoring/enable-logging/logging-using-cli.md new file mode 100644 index 000000000..df3740f7d --- /dev/null +++ b/content/nginxaas-azure/monitoring/enable-logging/logging-using-cli.md @@ -0,0 +1,71 @@ +--- +title: "Enable NGINX logs using CLI" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-1369" +url: /nginxaas/azure/monitoring/enable-logging/logging-using-cli/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) supports integrating Azure Diagnostic Settings to collect NGINX error and access logs. + +{{}} +Enabling logs using the **NGINX Logs** blade on your NGINXaaS deployment is now deprecated. This feature will be removed in an upcoming update. If you have issues accessing your NGINX logs using the deprecated method, please follow the steps in this guide to access your NGINX logs. +{{}} + +## Configuring NGINX logs collection using diagnostic settings + +### Prerequisites + +- A valid NGINX configuration with log directives enabled. NGINX logs can be configured using [error_log](#setting-up-error-logs) and [access_log](#setting-up-access-logs) directives. + +- A system-assigned managed identity. +{{}}The system-assigned managed identity does not need any role assignments to enable the logging functionality described in this section. You will need to make sure that the managed identity has the appropriate role assignments to access other resources that it is attached to (for example, certificates stored in Azure Key Vault). +{{}} + +- User must be an owner or user access administrator for the NGINX deployment resource. + + ### Adding diagnostic settings + +Diagnostic settings for the NGINXaaS deployment resource can be managed using the Azure monitor diagnostic settings [commands](https://learn.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings?view=azure-cli-latest). + +To add diagnostic settings to export NGINX logs to a storage account for an NGINXaaS deployment, the following command can be used: +```shell + az monitor diagnostic-settings create --resource --logs "[{category:NginxLogs,enabled:true,retention-policy:{enabled:false,days:0}}]" --name --storage-account +``` + +{{}}Due to limitations imposed by Azure, if the destination chosen is an Azure Storage account, the resource has to be in the same region as the NGINXaaS deployment resource. +{{}} + +To use a logs analytics workspace as the export destination, use the following command: +```shell + az monitor diagnostic-settings create --resource --logs "[{category:NginxLogs,enabled:true,retention-policy:{enabled:false,days:0}}]" --name --workspace + +``` + +To view the supported log categories for an NGINXaaS resource, use the following command: +```shell +az monitor diagnostic-settings list --resource +``` + +### Analyzing NGINX logs in Azure Storage + +{{< include "/nginxaas-azure/logging-analysis-azure-storage.md" >}} + +### Analyzing NGINX logs in Azure Log Analytics workspaces + +{{< include "/nginxaas-azure/logging-analysis-logs-analytics.md" >}} + +## Setting up error logs + +{{< include "/nginxaas-azure/logging-config-error-logs.md" >}} + +## Setting up access logs + +{{< include "/nginxaas-azure/logging-config-access-logs.md" >}} + +## Limitations + +{{< include "/nginxaas-azure/logging-limitations.md" >}} diff --git a/content/nginxaas-azure/monitoring/enable-logging/logging-using-portal.md b/content/nginxaas-azure/monitoring/enable-logging/logging-using-portal.md new file mode 100644 index 000000000..3f6e435ec --- /dev/null +++ b/content/nginxaas-azure/monitoring/enable-logging/logging-using-portal.md @@ -0,0 +1,85 @@ +--- +title: "Enable NGINX logs using Azure Portal" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-1369" +url: /nginxaas/azure/monitoring/enable-logging/logging-using-portal/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) supports integrating Azure Diagnostic Settings to collect NGINX error and access logs. + +{{}} +Enabling logs using the **NGINX Logs** blade on your NGINXaaS deployment is now deprecated. This feature will be removed in an upcoming update. If you have issues accessing your NGINX logs using the deprecated method, please follow the steps in this guide to access your NGINX logs. +{{}} + +## Configuring NGINX logs collection using diagnostic settings + +### Prerequisites + +- A valid NGINX configuration with log directives enabled. NGINX logs can be configured using [error_log](#setting-up-error-logs) and [access_log](#setting-up-access-logs) directives. + +- A system-assigned managed identity. + +{{}}The system-assigned managed identity does not need any role assignments to enable the logging functionality described in this section. You will need to make sure that the managed identity has the appropriate role assignments to access other resources that it is attached to (for example, certificates stored in Azure Key Vault). +{{}} + +- User must be an owner or user access administrator for the NGINX deployment resource. + + ### Adding diagnostic settings + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **Diagnostic Settings** in the left menu. + +1. Select **Add diagnostic setting**. + +1. Choose the **NGINX Logs** option and complete the details on the form, including the **Diagnostic setting name**. + +{{}}You will need to configure the system-assigned managed identity in order to see and select the **NGINX Logs** option. +{{}} + +1. Select preferred **Destination details**. + + {{< img src="nginxaas-azure/diagnostic-settings.png" alt="Screenshot of the Diagnostic Settings configuration page" >}} + +For more information about diagnostic settings destinations, please see the [Diagnostic Settings Destinations](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/diagnostic-settings#destinations) documentation. + +{{}}Due to limitations imposed by Azure, if the destination chosen is an Azure Storage account, the resource has to be in the same region as the NGINXaaS deployment resource. +{{}} + +{{}}If you are a Terraform user, please refer to [examples](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/deployments/with-diagnostic-setting-logging) provided to setup diagnostic settings for your NGINXaaS deployment{{}} + +### Analyzing NGINX logs in Azure Storage + +{{< include "/nginxaas-azure/logging-analysis-azure-storage.md" >}} + +### Analyzing NGINX logs in Azure Log Analytics workspaces + +{{< include "/nginxaas-azure/logging-analysis-logs-analytics.md" >}} + +### Disable NGINX logs collection + +1. Go to your NGINXaaS for Azure deployment. + +1. Select **Diagnostic Settings** in the left menu. + +1. Edit the previously added Diagnostic Settings. + +1. Select **Delete**. + +{{}}It can take up to 90 minutes after removing the diagnostic settings for logs to stop publishing to the diagnostic destinations.{{}} + +## Setting up error logs + +{{< include "/nginxaas-azure/logging-config-error-logs.md" >}} + +## Setting up access logs + +{{< include "/nginxaas-azure/logging-config-access-logs.md" >}} + +## Limitations + +{{< include "/nginxaas-azure/logging-limitations.md" >}} diff --git a/content/nginxaas-azure/monitoring/enable-logging/logging-using-terraform.md b/content/nginxaas-azure/monitoring/enable-logging/logging-using-terraform.md new file mode 100644 index 000000000..3563ed72c --- /dev/null +++ b/content/nginxaas-azure/monitoring/enable-logging/logging-using-terraform.md @@ -0,0 +1,63 @@ +--- +title: "Enable NGINX logs using Terraform" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-1369" +url: /nginxaas/azure/monitoring/enable-logging/logging-using-terraform/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) supports integrating Azure Diagnostic Settings to collect NGINX error and access logs. + +{{}} +Enabling logs using the **NGINX Logs** blade on your NGINXaaS deployment is now deprecated. This feature will be removed in an upcoming update. If you have issues accessing your NGINX logs using the deprecated method, please follow the steps in this guide to access your NGINX logs. +{{}} + +## Configuring NGINX logs collection using diagnostic settings + +### Prerequisites + +- A valid NGINX configuration with log directives enabled. NGINX logs can be configured using [error_log](#setting-up-error-logs) and [access_log](#setting-up-access-logs) directives. + +- A system-assigned managed identity. +{{}}The system-assigned managed identity does not need any role assignments to enable the logging functionality described in this section. You will need to make sure that the managed identity has the appropriate role assignments to access other resources that it is attached to (for example, certificates stored in Azure Key Vault). +{{}} + +- User must be an owner or user access administrator for the NGINX deployment resource. + + ### Adding diagnostic settings + +To setup diagnostic settings to enable logging to a storage account for your NGINXaaS deployment, please refer to [examples](https://github.com/nginxinc/nginxaas-for-azure-snippets/tree/main/terraform/deployments/with-diagnostic-setting-logging) provided. + +Once the terraform configurations provided in the repository above are used, the following commands can be used to update the deployment. + +```shell +terraform init +terraform plan -var="storage_account_resource_group=myresourcegroup" -var="storage_account_name=myaccountname" -out=plan.cache +terraform apply plan.cache +``` + +{{}}Due to limitations imposed by Azure, if the destination chosen is an Azure Storage account, the resource has to be in the same region as the NGINXaaS deployment resource. +{{}} + +### Analyzing NGINX logs in Azure Storage + +{{< include "/nginxaas-azure/logging-analysis-azure-storage.md" >}} + +### Analyzing NGINX logs in Azure Log Analytics workspaces + +{{< include "/nginxaas-azure/logging-analysis-logs-analytics.md" >}} + +## Setting up error logs + +{{< include "/nginxaas-azure/logging-config-error-logs.md" >}} + +## Setting up access logs + +{{< include "/nginxaas-azure/logging-config-access-logs.md" >}} + +## Limitations + +{{< include "/nginxaas-azure/logging-limitations.md" >}} diff --git a/content/nginxaas-azure/monitoring/enable-monitoring.md b/content/nginxaas-azure/monitoring/enable-monitoring.md new file mode 100644 index 000000000..833646c26 --- /dev/null +++ b/content/nginxaas-azure/monitoring/enable-monitoring.md @@ -0,0 +1,147 @@ +--- +title: "Enable monitoring" +weight: 200 +categories: ["tasks"] +toc: true +docs: "DOCS-876" +url: /nginxaas/azure/monitoring/enable-monitoring/ +--- + +Monitoring your application's performance is crucial for maintaining its reliability and efficiency. F5 NGINX as a Service for Azure (NGINXaaS) seamlessly integrates with Azure Monitor, allowing you to collect, correlate, and analyze metrics for a thorough understanding of your application's health and behavior. With Azure Monitor, you gain access to a wealth of information regarding your application's operations. You can: + +- Review Metrics: Examine the performance data collected from your application. +- Correlate Data: Connect different data points to gain insights into application performance trends. +- Analyze Performance: Dive deep into metrics to gain a comprehensive understanding of how your application operates. +- Create Alerts: Set up proactive monitoring by configuring alerts that notify you of potential issues before they escalate. + +{{}}NGINXaaS for Azure publishes *custom* metrics to Azure Monitor. To learn about the differences between standard and custom metrics, refer to the [Custom metrics in Azure Monitor overview](https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-custom-overview) documentation from Microsoft. Azure Monitor custom metrics are currently in public preview.{{}} + + +### Prerequisites + +- A user assigned managed identity or a system assigned managed identity with `Monitoring Metrics Publisher` role. + +{{}} When a user assigned managed identity or a system assigned managed identity is added to the deployment through portal, this role is automatically added.{{}} + +- User must be an owner or user access administrator for NGINX deployment resource to complete this set up. + +- If you're unfamiliar with Azure Monitor, refer to the [Azure monitor overview](https://docs.microsoft.com/en-us/azure/azure-monitor/overview) documentation from Microsoft. + +## Enable monitoring + +1. Log in to the Azure portal and navigate to your NGINXaaS for Azure deployment. +2. In the navigation pane under **Settings**, select the **NGINX monitoring** section. +3. Turn on the **Send metrics to Azure Monitor** setting. + +## View metrics with Azure Monitor metrics explorer + +1. In the navigation pane under **Monitoring**, select the **Metrics** section to access the Azure Monitor metrics explorer for your NGINXaaS deployment. +2. Refer to the [Azure Monitor metrics explorer](https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-getting-started) documentation from Microsoft to learn how you can create queries. + +{{}}Many of NGINX Plus's advanced statistics need to be enabled in the "nginx.conf" file before they will appear in the metrics explorer, for example "plus.http.request.bytes_*". Refer to [Gathering Data to Appear in Statistics](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#gathering-data-to-appear-in-statistics) to learn more.{{}} + +## Retrieve metrics through Azure Monitor API + +This section shows you how to effectively discover, gather and analyze NGINXaaS metrics through the Azure Monitor API. + +{{}}Refer to [Authenticate Azure Monitor requests](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/rest-api-walkthrough?tabs=portal#authenticate-azure-monitor-requests) for instructions on authenticating your API requests against the Azure Monitor API endpoint.{{}} + +1. **Retrieve metric namespaces:** Each metric belongs to a category, or "namespace", which groups similar types of metrics together. We recommend listing all namespaces for NGINXaaS to locate the metrics you're interested in. The following `curl` example shows how to retrieve all metrics namespaces on your NGINXaaS deployment: + + ```bash + curl --request GET --header "Authorization: Bearer $TOKEN" "https://management.azure.com/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-dep/providers/microsoft.insights/metricNamespaces?api-version=2024-02-01" + ``` + + The following JSON shows an example response body: + + ```json + { + "value": [ + ... + { + "id": "/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-dep/providers/microsoft.insights/metricNamespaces/NGINX Connections Statistics", + "name": "nginx connections statistics", + "type": "Microsoft.Insights/metricNamespaces", + "classification": "Custom", + "properties": { + "metricNamespaceName": "nginx connections statistics" + } + }, + ... + ] + } + ``` + +2. **Retrieve metric definitions:** Metrics definitions give you insights into the various metrics available for NGINXaaS within a namespace and what they represent. The following `curl` example shows how to retrieve all metrics definitions within the `nginx connections statistics` namespace for your NGINXaaS deployment: + + ```bash + curl --request GET --header "Authorization: Bearer $TOKEN" "https://management.azure.com/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-dep/providers/microsoft.insights/metricDefinitions?metricnamespace=nginx%20connections%20statistics&api-version=2024-02-01" + ``` + + The following JSON shows an example response body: + + ```json + { + "value": [ + ... + { + "id": "/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-dep/providers/microsoft.insights/metricdefinitions/Nginx Connections Statistics/nginx.conn.current", + "resourceId": "/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-deployment", + "namespace": "NGINX Connections Statistics", + "name": { + "value": "nginx.conn.current", + "localizedValue": "nginx.conn.current" + }, + ... + }, + ... + ] + } + ``` + +3. **Metric values:** Finally, you can obtain the actual metric values which represent real-time or historical data points that tell you how your NGINXaaS is performing. The following `curl` example shows how to retrieve the value of metric `nginx.conn.current` within the `nginx connections statistics` namespace over a 10-minute time window averaged over 5 minute intervals: + + ```bash + curl --request GET --header "Authorization: Bearer $TOKEN" "https://management.azure.com/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-dep/providers/microsoft.insights/metrics?metricnamespace=nginx%20connections%20statistics&metricnames=nginx.conn.current×pan=2024-03-27T20:00:00Z/2024-03-27T20:10:00Z&aggregation=Average&interval=PT5M&api-version=2024-02-01" + ``` + + The following JSON shows an example response body: + + ```json + { + "cost": 9, + "timespan": "2024-03-27T20:00:00Z/2024-03-27T20:10:00Z", + "interval": "PT5M", + "value": [ + { + "id": "/subscriptions/12345678-abcd-98765432-abcdef012345/resourceGroups/my-nginx-rg/providers/NGINX.NGINXPLUS/nginxDeployments/my-nginx-dep/providers/Microsoft.Insights/metrics/nginx.conn.current", + "type": "Microsoft.Insights/metrics", + "name": { + "value": "nginx.conn.current", + "localizedValue": "nginx.conn.current" + }, + "unit": "Unspecified", + "timeseries": [ + { + "metadatavalues": [], + "data": [ + { + "timeStamp": "2024-03-27T20:00:00Z", + "average": 4 + }, + { + "timeStamp": "2024-03-27T20:05:00Z", + "average": 4 + } + ] + } + ], + "errorCode": "Success" + } + ], + "namespace": "nginx connections statistics", + "resourceregion": "eastus2" + } + ``` + +{{}} Refer to the [Metrics Catalog]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md" >}}) for a listing of available namespaces and metrics.{{}} diff --git a/content/nginxaas-azure/monitoring/metrics-catalog.md b/content/nginxaas-azure/monitoring/metrics-catalog.md new file mode 100644 index 000000000..791529841 --- /dev/null +++ b/content/nginxaas-azure/monitoring/metrics-catalog.md @@ -0,0 +1,303 @@ +--- +title: "Metrics catalog" +weight: 400 +categories: ["concepts"] +toc: false +docs: "DOCS-877" +url: /nginxaas/azure/monitoring/metrics-catalog/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) provides a rich set of metrics that you can use to monitor the health and performance of your NGINXaaS deployment. This document provides a catalog of the metrics that are available for monitoring NGINXaaS for Azure in Azure Monitor. + +## Available metrics + +- [NGINXaaS Statistics](#nginxaas-statistics) +- [NGINX connections statistics](#nginx-connections-statistics) +- [NGINX requests and response statistics](#nginx-requests-and-response-statistics) +- [NGINX SSL Statistics](#nginx-ssl-statistics) +- [NGINX Cache Statistics](#nginx-cache-statistics) +- [NGINX Worker Statistics](#nginx-worker-statistics) +- [NGINX Upstream Statistics](#nginx-upstream-statistics) +- [NGINX System Statistics](#nginx-system-statistics) +- [NGINX Stream Statistics](#nginx-stream-statistics) +- [NGINX Resolver Statistics](#nginx-resolver-statistics) + +## Metrics + +The following metrics are reported on by NGINXaaS for Azure in Azure Monitor. +The metrics are categorized by the namespace used in Azure Monitor. The dimensions allow you to filter or split your queries in Azure Monitor providing you with a granular view over the metrics reported + +### NGINXaaS statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +| --------------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| ncu.provisioned | | count | The number of successfully provisioned NCUs during the aggregation interval. During scaling events, this may lag behind `ncu.requested` as the system works to achieve the request. Available for Standard plan deployments. | deployment | +| ncu.requested | | count | The requested number of NCUs during the aggregation interval. Describes the goal state of the system. Available for Standard plan deployments. | deployment | +| ncu.consumed | | count | The estimated number of NCUs used to handle the current traffic. This may burst above the `ncu.provisioned`. This can be used to guide scaling out or in to match your workload. See [Scaling Guidance]({{< relref "/nginxaas-azure/quickstart/scaling.md#iterative-approach" >}}) for details. Available for Standard plan deployments. | deployment | +| system.worker_connections | pid process_name | count | The number of nginx worker connections used on the dataplane. This metric is one of the factors which determines the deployment's consumed NCU value. | deployment | +| nginxaas.certificates | name status | count | The number of certificates added to the NGINXaaS deployment dimensioned by the name of the certificate and its status. Refer to [Certificate Health]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/overview.md#monitor-certificates" >}}) to learn more about the status dimension. | deployment | +| nginxaas.maxmind | status | count | The status of any MaxMind license in use for downloading geoip2 databases. Refer to [License Health]({{< relref "/nginxaas-azure/quickstart/geoip2.md#monitoring" >}}) to learn more about the status dimension. | deployment | + +{{}} + +### NGINX connections statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|------------------------------|----------------|----------|---------------------------------------------------------------------------------------------------------------|-----------------| +| nginx.conn.accepted | build version | count | Accepted Connections The total number of accepted client connections during the aggregation interval. | deployment | +| nginx.conn.dropped | build version | count | Dropped Connections The total number of dropped client connections during the aggregation interval. | deployment | +| nginx.conn.active | build version | avg | Active Connections The average number of active client connections during the aggregation interval. | deployment | +| nginx.conn.idle | build version | avg | Idle Connections The average number of idle client connections during the aggregation interval. | deployment | +| nginx.conn.current | build version | avg | Current Connections The average number of active and idle client connections during the aggregation interval. | deployment | + +{{}} + +### NGINX requests and response statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| nginx.http.request.count | build version | count | HTTP Requests The total number of HTTP requests during the aggregation interval. | deployment | +| nginx.http.request.current | build version | avg | Current Requests The average number of current requests during the aggregation interval. | deployment | +| nginx.http.limit_conns.passed | build version limit_conn_zone | count | Limit Conn Zone Passed HTTP Connections The total number of connections that were neither limited nor accounted as limited during the aggregation interval. | limit conn zone | +| nginx.http.limit_conns.rejected | build version limit_conn_zone | count | Limit Conn Zone Rejected HTTP Connections The total number of connections that were rejected during the aggregation interval. | limit conn zone | +| nginx.http.limit_conns.rejected_dry_run| build version limit_conn_zone | count | Limit Conn Zone Rejected HTTP Connections In The Dry Run Mode The total number of connections accounted as rejected in the dry run mode during the aggregation interval. | limit conn zone | +| nginx.http.limit_reqs.passed | build version limit_req_zone | count | Limit Req Zone Passed HTTP Requests Rate The total number of requests that were neither limited nor accounted as limited during the aggregation interval. | limit req zone | +| nginx.http.limit_reqs.delayed | build version limit_req_zone | count | Limit Req Zone Delayed HTTP Requests Rate The total number of requests that were delayed during the aggregation interval. | limit req zone | +| nginx.http.limit_reqs.rejected | build version limit_req_zone | count | Limit Req Zone Rejected HTTP Requests Rate The total number of requests that were rejected during the aggregation interval. | limit req zone | +| nginx.http.limit_reqs.delayed_dry_run | build version limit_req_zone | count | Limit Req Zone Delayed HTTP Requests Rate In The Dry Run Mode The total number of requests accounted as delayed in the dry run mode during the aggregation interval. | limit req zone | +| nginx.http.limit_reqs.rejected_dry_run | build version limit_req_zone | count | Limit Req Zone Rejected HTTP Requests Rate In The Dry Run Mode The total number of requests accounted as rejected in the dry run mode during the aggregation interval. | limit req zone | +| plus.http.request.count | build version server_zone | count | Server Zone HTTP Requests The total number of HTTP requests during the aggregation interval. | server zone | +| plus.http.response.count | build version server_zone | count | Server Zone HTTP Responses The total number of HTTP responses during the aggregation interval. | server zone | +| plus.http.status.1xx | build version server_zone | count | Server Zone HTTP 1xx Responses The total number of HTTP responses with a 1xx status code during the aggregation interval. | server zone | +| plus.http.status.2xx | build version server_zone | count | Server Zone HTTP 2xx Responses The total number of HTTP responses with a 2xx status code during the aggregation interval. | server zone | +| plus.http.status.3xx | build version server_zone | count | Server Zone HTTP 3xx Responses The total number of HTTP responses with a 3xx status code during the aggregation interval. | server zone | +| plus.http.status.4xx | build version server_zone | count | Server Zone HTTP 4xx Responses The total number of HTTP responses with a 4xx status code during the aggregation interval. | server zone | +| plus.http.status.5xx | build version server_zone | count | Server Zone HTTP 5xx Responses The total number of HTTP responses with a 5xx status code during the aggregation interval. | server zone | +| plus.http.status.processing | build version server_zone | avg | Server Zone Status Processing The number of client requests that are currently being processed. | server zone | +| plus.http.request.bytes_rcvd | build version server_zone | count | Server Zone Bytes Received The total number of bytes received from clients during the aggregation interval. | server zone | +| plus.http.request.bytes_sent | build version server_zone | count | Server Zone Bytes Sent The total number of bytes sent to clients during the aggregation interval. | server zone | +| plus.http.request.count | build version location_zone | count | Location Zone HTTP Requests The total number of HTTP requests during the aggregation interval. | location zone | +| plus.http.response.count | build version location_zone | count | Location Zone HTTP Responses The total number of HTTP responses in the aggregation interval. | location zone | +| plus.http.status.1xx | build version location_zone | count | Location Zone HTTP 1xx Responses The total number of HTTP responses with a 1xx status code during the aggregation interval. | location zone | +| plus.http.status.2xx | build version location_zone | count | Location Zone HTTP 2xx Responses The total number of HTTP responses with a 2xx status code during the aggregation interval. | location zone | +| plus.http.status.3xx | build version location_zone | count | Location Zone HTTP 3xx Responses The total number of HTTP responses with a 3xx status code during the aggregation interval. | location zone | +| plus.http.status.4xx | build version location_zone | count | Location Zone HTTP 4xx Responses The total number of HTTP responses with a 4xx status code during the aggregation interval. | location zone | +| plus.http.status.5xx | build version location_zone | count | Location Zone HTTP 5xx Responses The total number of HTTP responses with a 5xx status code during the aggregation interval. | location zone | +| plus.http.request.bytes_rcvd | build version location_zone | count | Location Zone Bytes Received The total number of bytes received from clients during the aggregation interval. | location zone | +| plus.http.request.bytes_sent | build version location_zone | count | Location Zone Bytes Sent The total number of bytes sent to clients during the aggregation interval. | location zone | + +{{}} + +### NGINX SSL statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| plus.ssl.failed | build version | count | The total number of failed SSL handshakes during the aggregation interval. | deployment | +| plus.ssl.handshakes | build version | count |The total number of successful SSL handshakes during the aggregation interval. | deployment | +| plus.ssl.reuses | build version | count |The total number of session reuses during SSL handshakes in the aggregation interval. | deployment | +| plus.ssl.no_common_protocol | build version | avg |The number of SSL handshakes failed because of no common protocol during the aggregation interval. | deployment | +| plus.ssl.no_common_cipher | build version | avg |The number of SSL handshakes failed because of no shared cipher during the aggregation interval. | deployment | +| plus.ssl.handshake_timeout | build version | avg | The number of SSL handshakes failed because of a timeout during the aggregation interval. | deployment | +| plus.ssl.peer_rejected_cert | build version | avg |The number of failed SSL handshakes when nginx presented the certificate to the client but it was rejected with a corresponding alert message during the aggregation interval. | deployment | +| plus.ssl.verify_failures.no_cert | build version | avg | SSL certificate verification errors - a client did not provide the required certificate during the aggregation interval. | deployment | +| plus.ssl.verify_failures.expired_cert | build version | avg |SSL certificate verification errors - an expired or not yet valid certificate was presented by a client during the aggregation interval. | deployment | +| plus.ssl.verify_failures.revoked_cert | build version | avg |SSL certificate verification errors - a revoked certificate was presented by a client during the aggregation interval. | deployment | +| plus.ssl.verify_failures.hostname_mismatch | build version | avg |SSL certificate verification errors - server's certificate doesn't match the hostname during the aggregation interval. | deployment| +| plus.ssl.verify_failures.other | build version | avg | SSL certificate verification errors - other SSL certificate verification errors during the aggregation interval. | deployment | +| plus.http.ssl.handshakes | build version server_zone | count |The total number of successful SSL handshakes during the aggregation interval. | server zone | +| plus.http.ssl.handshakes.failed | build version server_zone | count | The total number of failed SSL handshakes during the aggregation interval. | server zone | +| plus.http.ssl.session.reuses | build version server_zone | count |The total number of session reuses during SSL handshakes in the aggregation interval. | server zone | +| plus.http.ssl.no_common_protocol | build version server_zone | avg |The number of SSL handshakes failed because of no common protocol during the aggregation interval. | server zone | +| plus.http.ssl.no_common_cipher | build version server_zone | avg |The number of SSL handshakes failed because of no shared cipher during the aggregation interval. | server zone | +| plus.http.ssl.handshake_timeout | build version server_zone | avg | The number of SSL handshakes failed because of a timeout during the aggregation interval. | server zone | +| plus.http.ssl.peer_rejected_cert | build version server_zone | avg |The number of failed SSL handshakes when nginx presented the certificate to the client but it was rejected with a corresponding alert message during the aggregation interval. | server zone | +| plus.http.ssl.verify_failures.no_cert | build version server_zone | avg | SSL certificate verification errors - a client did not provide the required certificate during the aggregation interval. | server zone | +| plus.http.ssl.verify_failures.expired_cert | build version server_zone | avg |SSL certificate verification errors - an expired or not yet valid certificate was presented by a client during the aggregation interval. | server zone | +| plus.http.ssl.verify_failures.revoked_cert | build version server_zone | avg |SSL certificate verification errors - a revoked certificate was presented by a client during the aggregation interval. | server zone | +| plus.http.ssl.verify_failures.other | build version server_zone | avg | SSL certificate verification errors - other SSL certificate verification errors during the aggregation interval. | server zone | + +{{}} + +### NGINX cache statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| plus.cache.hit.ratio | build version cache_zone | avg | Cache Hit Ratio The average ratio of cache hits to misses during the aggregation interval. | cache zone | + +{{}} + +### NGINX worker statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| plus.worker.conn.accepted| build version worker_id | count |The total number of client connections accepted by the worker process during the aggregation interval. | worker | +| plus.worker.conn.dropped| build version worker_id | count |The total number of client connections dropped by the worker process during the aggregation interval. | worker | +| plus.worker.conn.active| build version worker_id | avg| The current number of active client connections that are currently being handled by the worker process during the aggregation interval. | worker | +| plus.worker.conn.idle| build version worker_id | avg|The number of idle client connections that are currently being handled by the worker process during the aggregation interval. | worker | +| plus.worker.http.request.total | build version worker_id | count | The total number of client requests received by the worker process during the aggregation interval. | worker | +| plus.worker.http.request.current | build version worker_id | avg| The current number of client requests that are currently being processed by the worker process during the aggregation interval. | worker| + +{{}} + +### NGINX upstream statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|-----------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| plus.http.upstream.peers.conn.active | build version upstream peer.address peer.name | avg | Upstream Server Active Connections The average number of active client connections during the aggregation interval. | upstream server | +| plus.http.upstream.peers.request.count | build version upstream peer.address peer.name | count | Upstream Server HTTP Requests The total number of HTTP requests during the aggregation interval. | upstream server | +| plus.http.upstream.peers.response.count | build version upstream peer.address peer.name | count | Upstream Server HTTP Responses The total number of HTTP responses during the aggregation interval. | upstream server | +| plus.http.upstream.peers.status.1xx | build version upstream peer.address peer.name | count | Upstream Server HTTP 1xx Responses The total number of HTTP responses with a 1xx status code during the aggregation interval. | upstream server | +| plus.http.upstream.peers.status.2xx | build version upstream peer.address peer.name | count | Upstream Server HTTP 2xx Responses The total number of HTTP responses with a 2xx status code during the aggregation interval. | upstream server | +| plus.http.upstream.peers.status.3xx | build version upstream peer.address peer.name | count | Upstream Server HTTP 3xx Responses The total number of HTTP responses with a 3xx status code during the aggregation interval. | upstream server | +| plus.http.upstream.peers.status.4xx | build version upstream peer.address peer.name | count | Upstream Server HTTP 4xx Responses The total number of HTTP responses with a 4xx status code during the aggregation interval. | upstream server | +| plus.http.upstream.peers.status.5xx | build version upstream peer.address peer.name | count | Upstream Server HTTP 5xx Responses The total number of HTTP responses with a 5xx status code during the aggregation interval. | upstream server | +| plus.http.upstream.peers.request.bytes_sent | build version upstream peer.address peer.name | count | | upstream server | +| plus.http.upstream.peers.request.bytes_rcvd | build version upstream peer.address peer.name | count | | upstream server | +| plus.http.upstream.peers.state.up | build version upstream peer.address peer.name | boolean | Upstream Server State Up Current state of upstream servers in deployment. If all upstream servers in the deployment are up, then the value will be 1. If any upstream server is not up, then the value will be 0. | upstream peer | +| plus.http.upstream.peers.state.draining | build version upstream peer.address peer.name | boolean | Upstream Server State Draining Current state of upstream servers in deployment. If any of the upstream servers in the deployment are draining, then the value will be 1. If no upstream server is draining, then the value will be 0. | upstream peer | +| plus.http.upstream.peers.state.down | build version upstream peer.address peer.name | boolean | Upstream Server State Down Current state of upstream servers in deployment. If any of the upstream servers in the deployment are down, then the value will be 1. If no upstream server is down, then the value will be 0. | upstream peer | +| plus.http.upstream.peers.state.unavail | build version upstream peer.address peer.name | boolean | Upstream Server State Unavailable Current state of upstream servers in deployment. If any of the upstream servers in the deployment are unavailable, then the value will be 1. If no upstream server is unavailable, then the value will be 0. | upstream peer | +| plus.http.upstream.peers.state.checking | build version upstream peer.address peer.name | boolean | Upstream Server State Check Current state of upstream servers in deployment. If any of the upstream servers in the deployment is being checked then the value will be 1. If no upstream server is being checked then the value will be 0. | upstream peer | +| plus.http.upstream.peers.state.unhealthy | build version upstream peer.address peer.name | boolean | Upstream Server State Unhealthy Current state of upstream servers in deployment. If any of the upstream servers in the deployment are unhealthy then the value will be 1. If no upstream server is unhealthy then the value will be 0. | upstream peer | +| plus.http.upstream.peers.fails | build version upstream peer.address peer.name | count | Upstream Server Fails The total number of unsuccessful attempts to communicate with the server during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.unavail | build version upstream peer.address peer.name | count | Upstream Server Unavailable The number of times the server became unavailable for client requests (state “unavail”) due to the number of unsuccessful attempts reaching the [max_fails](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#max_fails) threshold during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.health_checks.checks | build version upstream peer.address peer.name | count | Upstream Server Health Checks The total number of [health check](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#health_check) requests made during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.health_checks.fails | build version upstream peer.address peer.name | count | Upstream Server Health Checks Fails The number of failed health checks during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.health_checks.unhealthy | build version upstream peer.address peer.name | count | Upstream Server Health Checks Unhealthy How many times the server became unhealthy (state “unhealthy”) during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.health_checks.last_passed | build version upstream peer.address peer.name | boolean | Upstream Server Health Checks Last Pass last_passed (boolean) indicating if the last health check request was successful and passed [tests](https://nginx.org/en/docs/http/ngx_http_upstream_hc_module.html#match). | upstream peer | +| plus.http.upstream.peers.downstart | build version upstream peer.address peer.name | timestamp | Upstream Server Downstart The time when the server became “unavail”, “checking”, or “unhealthy”, as a UTC timestamp. | upstream peer | +| plus.http.upstream.peers.response.time | build version upstream peer.address peer.name | avg | Upstream Server Response Time The average time to get the [full response](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_response_time) from the server during the aggregation interval. | upstream server | +| plus.http.upstream.peers.header.time | build version upstream peer.address peer.name | avg | Upstream Server Header Time The average time to get the [response header](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_header_time) from the server | upstream server | +| plus.http.upstream.zombies | build version | avg | Upstream Zombies The current number of servers removed from the group but still processing active client requests | deployment | +| plus.http.upstream.keepalives | build version | avg | Upstream Keepalive Connections The current number of idle [keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) connections | deployment | +| plus.http.upstream.queue.maxsize | build version | avg | Upstream Queue Max Size The maximum number of requests that can be in the queue at the same time | deployment | +| plus.http.upstream.queue.overflows | build version | sum | Upstream Queue Overflows The total number of requests rejected due to the queue overflow | deployment | +| plus.http.upstream.queue.size | build version | avg | Upstream Queue Size The current number of requests in the queue | deployment | +| plus.http.upstream.peers.ssl.handshakes | build version upstream peer.address peer.name | count | The total number of successful SSL handshakes during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.handshakes.failed | build version upstream peer.address peer.name | count |The total number of failed SSL handshakes during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.session.reuses | build version upstream peer.address peer.name | count |The total number of session reuses during SSL handshake in the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.no_common_protocol | build version upstream peer.address peer.name | avg | The number of SSL handshakes failed because of no common protocol during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.handshake_timeout | build version upstream peer.address peer.name | avg |The number of SSL handshakes failed because of a timeout during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.peer_rejected_cert | build version upstream peer.address peer.name | avg | The number of failed SSL handshakes when nginx presented the certificate to the client but it was rejected with a corresponding alert message during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.verify_failures.expired_cert | build version upstream peer.address peer.name | avg | SSL certificate verification errors - an expired or not yet valid certificate was presented by a client during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.verify_failures.revoked_cert | build version upstream peer.address peer.name | avg | SSL certificate verification errors - a revoked certificate was presented by a client during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.verify_failures.hostname_mismatch | build version upstream peer.address peer.name | avg | SSL certificate verification errors - server's certificate doesn't match the hostname during the aggregation interval. | upstream peer | +| plus.http.upstream.peers.ssl.verify_failures.other | build version upstream peer.address peer.name | avg |SSL certificate verification errors - other SSL certificate verification errors during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.handshakes | build version upstream peer.address peer.name | count |The total number of successful SSL handshakes during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.handshakes.failed | build version upstream peer.address peer.name | count | The total number of failed SSL handshakes during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.session.reuses | build version upstream peer.address peer.name | count | The total number of session reuses during SSL handshake in the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.no_common_protocol | build version upstream peer.address peer.name | avg | The number of SSL handshakes failed because of no common protocol during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.handshake_timeout | build version upstream peer.address peer.name | avg | The number of SSL handshakes failed because of a timeout during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.peer_rejected_cert | build version upstream peer.address peer.name | avg | The number of failed SSL handshakes when nginx presented the certificate to the client but it was rejected with a corresponding alert message during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.verify_failures.expired_cert | build version upstream peer.address peer.name | avg | SSL certificate verification errors - an expired or not yet valid certificate was presented by a client during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.verify_failures.revoked_cert | build version upstream peer.address peer.name | avg | SSL certificate verification errors - a revoked certificate was presented by a client during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.verify_failures.hostname_mismatch | build version upstream peer.address peer.name | avg | SSL certificate verification errors - server's certificate doesn't match the hostname during the aggregation interval. | upstream peer | +| plus.stream.upstream.peers.ssl.verify_failures.other | build version upstream peer.address peer.name | avg | SSL certificate verification errors - other SSL certificate verification errors during the aggregation interval. | upstream peer | + +{{}} + +### NGINX system statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| system.cpu| | count | System CPU Utilization. | deployment | +| system.interface.bytes_rcvd| interface | count | System Interface Bytes Received. | deployment | +| system.interface.bytes_sent| interface | count | System Interface Bytes Sent. | deployment | +| system.interface.packets_rcvd| interface | count | System Interface Packets Received. | deployment | +| system.interface.packets_sent| interface | count | System Interface Packets Sent. | deployment | +| system.interface.total_bytes| interface | count | System Interface Total Bytes, sum of bytes_sent and bytes_rcvd. | deployment | +| system.interface.egress_throughput| interface | count | System Interface Egress Throughput, i.e. bytes sent per second| deployment | + +{{}} + +### NGINX stream statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|----------------------------------------|-----------------------------|-------|-----------------------------------------------------------------------------------------------------------------------------|---------------| +| plus.stream.limit_conns.passed | build, version, limit_conn_zone | count | The total number of connections that were neither limited nor accounted as limited. | limit conn zone | +| plus.stream.limit_conns.rejected | build, version, limit_conn_zone | count | The total number of connections that were rejected. | limit conn zone | +| plus.stream.limit_conns.rejected_dry_run | build, version, limit_conn_zone | count | The total number of connections accounted as rejected in the dry run mode. | limit conn zone | +| plus.stream.request.bytes_rcvd | build, version, server_zone | count | The total number of bytes received from clients. | server zone | +| plus.stream.request.bytes_sent | build, version, server_zone | count | The total number of bytes sent to clients. | server zone | +| plus.stream.status.2xx | build, version, server_zone | count | The total number of sessions completed with status codes “2xx”. | server zone | +| plus.stream.status.4xx | build, version, server_zone | count | The total number of sessions completed with status codes “4xx”. | server zone | +| plus.stream.status.5xx | build, version, server_zone | count | The total number of sessions completed with status codes “5xx”. | server zone | +| plus.stream.status.connections | build, version, server_zone | avg | The total number of connections accepted from clients. | server zone | +| plus.stream.status.discarded | build, version, server_zone | avg | The total number of connections completed without creating a session. | server zone | +| plus.stream.status.processing | build, version, server_zone | avg | The number of client connections that are currently being processed. | server zone | +| plus.stream.upstream.peers.conn.active | build, version, upstream, peer.address, peer.name | avg | The current number of connections. | upstream peer | +| plus.stream.upstream.peers.downstart | build, version, upstream, peer.address, peer.name | timestamp | The time when the server became “unavail”, “checking”, or “unhealthy”, in the ISO 8601 format with millisecond resolution. | upstream peer | +| plus.stream.upstream.peers.downtime | build, version, upstream, peer.address, peer.name | count | Total time the server was in the “unavail”, “checking”, and “unhealthy” states. | upstream peer | +| plus.stream.upstream.peers.fails | build, version, upstream, peer.address, peer.name | count | The total number of unsuccessful attempts to communicate with the server. | upstream peer | +| plus.stream.upstream.peers.health_checks.checks | build, version, upstream, peer.address, peer.name | count | The total number of health check requests made. | upstream peer | +| plus.stream.upstream.peers.health_checks.fails | build, version, upstream, peer.address, peer.name | count | The number of failed health checks. | upstream peer | +| plus.stream.upstream.peers.health_checks.last_passed | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating whether the last health check request was successful and passed tests. | upstream peer | +| plus.stream.upstream.peers.health_checks.unhealthy | build, version, upstream, peer.address, peer.name | count | How many times the server became unhealthy (state “unhealthy”). | upstream peer | +| plus.stream.upstream.peers.request.bytes_rcvd | build, version, upstream, peer.address, peer.name | count | The total number of bytes received from this server. | upstream peer | +| plus.stream.upstream.peers.request.bytes_sent | build, version, upstream, peer.address, peer.name | count | The total number of bytes sent to this server. | upstream peer | +| plus.stream.upstream.peers.response.time | build, version, upstream, peer.address, peer.name | avg | The average time to receive the last byte of data. | upstream peer | +| plus.stream.upstream.peers.state.checking | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating if any of the upstream servers are being checked. | upstream peer | +| plus.stream.upstream.peers.state.down | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating if any of the upstream servers are down. | upstream peer | +| plus.stream.upstream.peers.state.draining | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating if any of the upstream servers are draining. | upstream peer | +| plus.stream.upstream.peers.state.unavail | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating if any of the upstream servers are unavailable. | upstream peer | +| plus.stream.upstream.peers.state.unhealthy | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating if any of the upstream servers are unhealthy. | upstream peer | +| plus.stream.upstream.peers.state.up | build, version, upstream, peer.address, peer.name | boolean | Boolean indicating if all upstream servers are up. | upstream peer | +| plus.stream.upstream.peers.unavail | build, version, upstream, peer.address, peer.name | count | How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold. | upstream peer | +| plus.stream.upstream.zombies | build, version | avg | The current number of servers removed from the group but still processing active client connections. | deployment | +| plus.stream.ssl.handshakes| build version server_zone| count | The total number of successful SSL handshakes during the aggregation interval. | server zone| +| plus.stream.ssl.handshakes.failed | build version server_zone | count | SSL Handshakes Failed The total number of failed SSL handshakes during the aggregation interval. | server zone | +| plus.stream.ssl.session.reuses | build version server_zone | count | The total number of session reuses during SSL handshakes in the aggregation interval. | server zone | +| plus.stream.ssl.no_common_protocol | build version server_zone| avg |The number of SSL handshakes failed because of no common protocol during the aggregation interval. |server zone | +| plus.stream.ssl.no_common_cipher| build version server_zone | avg | The number of SSL handshakes failed because of no shared cipher during the aggregation interval. | server zone | +| plus.stream.ssl.handshake_timeout | build version server_zone | avg | The number of SSL handshakes failed because of a timeout during the aggregation interval. | server zone | +| plus.stream.ssl.peer_rejected_cert | build version server_zone | avg | The number of failed SSL handshakes when nginx presented the certificate to the client but it was rejected with a corresponding alert message during the aggregation interval. | server zone | +| plus.stream.ssl.verify_failures.no_cert | build version server_zone | avg |SSL certificate verification errors - a client did not provide the required certificate during the aggregation interval. |server zone | +| plus.stream.ssl.verify_failures.expired_cert | build version server_zone | avg |SSL certificate verification errors - an expired or not yet valid certificate was presented by a client during the aggregation interval. |server zone | +| plus.stream.ssl.verify_failures.revoked_cert | build version server_zone | avg |SSL certificate verification errors - a revoked certificate was presented by a client during the aggregation interval. |server zone | +| plus.stream.ssl.verify_failures.other | build version server_zone | avg |SSL certificate verification errors - other SSL certificate verification errors during the aggregation interval. | server zone | +| plus.stream.zone_sync.status.bytes_in | build, version | count | The number of bytes received by all nodes during the aggregation interval. | deployment | +| plus.stream.zone_sync.status.bytes_out | build, version | count | The number of bytes sent by all nodes during the aggregation interval. | deployment | +| plus.stream.zone_sync.status.msgs_in | build, version | count | The number of messages received by all nodes during the aggregation interval. | deployment | +| plus.stream.zone_sync.status.msgs_out | build, version | count | The number of messages sent by all nodes during the aggregation interval. | deployment | +| plus.stream.zone_sync.zones.records_pending | build, version, shared_memory_zone | avg | The average number of records that need to be sent to the cluster during the aggregation interval. | shared memory zone | +| plus.stream.zone_sync.zones.records_total | build, version, shared_memory_zone | avg | The average number of records stored in the shared memory zone by all nodes during the aggregation interval. | shared memory zone | + +{{}} + +### NGINX resolver statistics + +{{}} + +| **Metric** | **Dimensions** | **Type** | **Description** | **Roll-up per** | +|---------------------------------------|--------------------------------|----------|--------------------------------------------------------------------------------------------|-----------------| +| plus.resolvers.requests.name | build, version, resolver_zone | count | The number of requests to resolve names to addresses during the aggregation interval. | resolver zone | +| plus.resolvers.requests.srv | build, version, resolver_zone | count | The number of requests to resolve SRV records during the aggregation interval. | resolver zone | +| plus.resolvers.requests.addr | build, version, resolver_zone | count | The number of requests to resolve addresses to names during the aggregation interval. | resolver zone | +| plus.resolvers.responses.noerror | build, version, resolver_zone | count | The number of successful responses during the aggregation interval. | resolver zone | +| plus.resolvers.responses.formerr | build, version, resolver_zone | count | The number of FORMERR (Format error) responses during the aggregation interval. | resolver zone | +| plus.resolvers.responses.servfail | build, version, resolver_zone | count | The number of SERVFAIL (Server failure) responses during the aggregation interval. | resolver zone | +| plus.resolvers.responses.nxdomain | build, version, resolver_zone | count | The number of NXDOMAIN (Host not found) responses during the aggregation interval. | resolver zone | +| plus.resolvers.responses.notimp | build, version, resolver_zone | count | The number of NOTIMP (Unimplemented) responses during the aggregation interval. | resolver zone | +| plus.resolvers.responses.refused | build, version, resolver_zone | count | The number of REFUSED (Operation refused) responses during the aggregation interval. | resolver zone | +| plus.resolvers.responses.timedout | build, version, resolver_zone | count | The number of timed out requests during the aggregation interval. | resolver zone | +| plus.resolvers.responses.unknown | build, version, resolver_zone | count | The number of requests completed with an unknown error during the aggregation interval. | resolver zone | + +{{}} diff --git a/content/nginxaas-azure/overview/_index.md b/content/nginxaas-azure/overview/_index.md new file mode 100644 index 000000000..324e9b56b --- /dev/null +++ b/content/nginxaas-azure/overview/_index.md @@ -0,0 +1,8 @@ +--- +title: Overview +weight: 100 +url: /nginxaas/azure/overview/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/overview/feature-comparison.md b/content/nginxaas-azure/overview/feature-comparison.md new file mode 100644 index 000000000..6ee320a20 --- /dev/null +++ b/content/nginxaas-azure/overview/feature-comparison.md @@ -0,0 +1,70 @@ +--- +title: "Feature comparison" +weight: 300 +description: "Compare NGINXaaS for Azure with other NGINX offerings." +categories: ["concepts"] +toc: false +docs: "DOCS-1473" +url: /nginxaas/azure/overview/feature-comparison/ +--- + +{{}} + +|**Load Balancer**
   |**NGINX Open
Source** |**NGINX Plus
 ** |**F5 NGINXaaS
for Azure** | +|----------------------------------------|---------------------|---------------------|--------------------------| +|  [HTTP](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/) and [TCP/UDP](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) support |{{}} |{{}} |{{}} | +|  [Layer 7 request routing](https://www.nginx.org/en/docs/http/ngx_http_core_module.html#location) |{{}} |{{}} |{{}} | +|  [Session persistence](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/#enabling-session-persistence) |{{}} |{{}} |{{}} | +|  [Active health checks](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-health-check/) | |{{}} |{{}} | +|  [DNS service-discovery integration](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#service) | |{{}} |{{}} | +|**Content Cache** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  [Static and dynamic content caching](https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/)|{{}} |{{}} |{{}} | +|  [Cache-purging API](https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/#purging-content-from-the-cache) | |{{}} | | +|  MQTT protocol support for IOT devices | |{{}} |{{}} | +|**Web Server and Reverse Proxy** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  Origin server for static content |{{}} |{{}} |{{}} | +|  Reverse proxy: [HTTP](https://nginx.org/en/docs/http/ngx_http_proxy_module.html), [FastCGl](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html),
  [memcached](https://nginx.org/en/docs/http/ngx_http_memcached_module.html), [SCGI](https://nginx.org/en/docs/http/ngx_http_scgi_module.html), [uwsgi](https://nginx.org/en/docs/http/ngx_http_uwsgi_module.html) |{{}} | {{}} |{{}} | +|  [HTTP/2 gateway](https://www.nginx.org/en/docs/http/ngx_http_v2_module.html) |{{}} |{{}} |{{}} | +|  [gRPC proxy](https://nginx.org/en/docs/http/ngx_http_grpc_module.html) |{{}} |{{}} |{{}} | +|  [HTTP/2 server push](https://nginx.org/en/docs/http/ngx_http_v2_module.html#http2_push) |{{}} |{{}} |{{}} | +|  [HTTP/3 over QUIC](https://nginx.org/en/docs/http/ngx_http_v3_module.html) |{{}} |{{}} |{{}} | +|**Security Controls** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  [HTTP basic authentication](https://www.nginx.org/en/docs/http/ngx_http_auth_basic_module.html) |{{}} |{{}} |{{}} | +|  [HTTP authentication subrequests](https://nginx.org/en/docs/http/ngx_http_auth_request_module.html) |{{}} |{{}} |{{}} | +|  [IP address-based access control lists](https://nginx.org/en/docs/http/ngx_http_access_module.html) |{{}}|{{}} |{{}} | +|  [Rate limiting](https://blog.nginx.org/blog/rate-limiting-nginx) |{{}} |{{}} |{{}} | +|  Dual-stack RSA/ECC SSL/TLS offload |{{}} |{{}} |{{}} | +|  TLS 1.3 support |{{}} |{{}} |{{}} | +|  [JWT authentication](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) | |{{}} |{{}} | +|  OpenID Connect single sign-on
  (SSO) | |{{}} |{{}} | +|  Internal redirect | |{{}} | | +|  NGINX as a SAML Service Provider | |{{}} |{{}} | +|  [NGINX App Protect WAF](https://www.f5.com/products/nginx/nginx-app-protect) (additional cost) | |{{}} |{{}} | +|  [NGINX App Protect DoS](https://www.f5.com/products/nginx/nginx-app-protect) (additional cost) | |{{}} | | +|**Monitoring** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  Export to [external monitoring tools](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/) |{{}} |{{}} |Export metrics to
Azure Monitor | +|  Built-in dashboard | |{{}} |[Azure Monitor](https://learn.microsoft.com/en-us/azure/azure-monitor/overview)
and [Azure Portal](https://azure.microsoft.com/en-us/get-started/azure-portal) | +|  [Extended status with 100+
  additional metrics](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/) | |{{}} |{{}} | +|  Native Open Telemetry Tracing | |{{}} | | +|**High Availability (HA)** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  [Active-active](https://docs.nginx.com/nginx/admin-guide/high-availability/) | |{{}} |{{}} | +|  [Active-passive](https://docs.nginx.com/nginx/admin-guide/high-availability/) | |{{}} | Not Applicable | +|  [Configuration synchronization
  across cluster](https://docs.nginx.com/nginx/admin-guide/high-availability/configuration-sharing/) | |{{}} |{{}} | +|  [State sharing](https://docs.nginx.com/nginx/admin-guide/high-availability/zone_sync/): sticky-learn session
  persistence, rate limiting, key-value
  stores | |{{}} |{{}} | +|**Programmability** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  [NGINX JavaScript module](https://www.f5.com/company/blog/nginx/harnessing-power-convenience-of-javascript-for-each-request-with-nginx-javascript-module) |{{}} |{{}} |{{}} | +|  [NGINX Plus API for dynamic
  reconfiguration](https://docs.nginx.com/nginx/admin-guide/load-balancer/dynamic-configuration-api/) | |{{}} | | +|  [Key-value store](https://nginx.org/en/docs/http/ngx_http_keyval_module.html) | |{{}} |{{}} | +|  Dynamic reconfiguration without
  process reloads | |{{}} | | +|**Streaming Media** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  Live streaming: RTMP, HLS, DASH |{{}} |{{}} |{{}} | +|  VOD: Flash (FLV), MP4 |{{}} |{{}} |{{}} | +|  Adaptive bitrate VOD: [HLS](https://nginx.org/en/docs/http/ngx_http_hls_module.html), [HDS](https://nginx.org/en/docs/http/ngx_http_f4f_module.html) | |{{}} | | +|  [MP4 bandwidth controls](https://nginx.org/en/docs/http/ngx_http_mp4_module.html) | |{{}} | | +|**Third-party ecosystem** |**NGINX Open
Source** |**NGINX Plus
 ** |**NGINXaaS
for Azure** | +|  [Ingress controller](https://www.f5.com/products/nginx/nginx-ingress-controller) |{{}} |{{}} | | +|  OpenShift Router |{{}} |{{}} | | +|  [Dynamic modules repository](https://www.f5.com/go/product/nginx-modules) | |{{}} |[Image-Filter](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html)
[njs](https://nginx.org/en/docs/njs/)
[OpenTelemetry](https://nginx.org/en/docs/ngx_otel_module.html)
[XSLT](https://nginx.org/en/docs/http/ngx_http_xslt_module.html) | +|  Deployable as a service | | |Microsoft Azure | +|  [Commercial support](https://my.f5.com/manage/s/article/K000140156/) | |{{}} |{{}} | +{{
}} diff --git a/content/nginxaas-azure/overview/overview.md b/content/nginxaas-azure/overview/overview.md new file mode 100644 index 000000000..4dcaee8f5 --- /dev/null +++ b/content/nginxaas-azure/overview/overview.md @@ -0,0 +1,68 @@ +--- +title: "Overview and architecture" +weight: 100 +categories: ["concepts"] +toc: true +docs: "DOCS-879" +url: /nginxaas/azure/overview/overview/ +--- + +## What Is F5 NGINX as a Service for Azure? + +NGINX as a Service for Azure is a service offering that is tightly integrated into Microsoft Azure public cloud and its ecosystem, making applications fast, efficient, and reliable with full lifecycle management of advanced NGINX traffic services. +NGINXaaS for Azure is available in the Azure Marketplace. + +NGINXaaS for Azure is powered by [NGINX Plus](https://www.nginx.com/products/nginx/), which extends NGINX Open Source with advanced functionality and provides customers with a complete application delivery solution. Initial use cases covered by NGINXaaS include L7 HTTP load balancing and reverse proxy which can be managed through various Azure management tools. +NGINXaaS allows you to provision distinct deployments as per your business or technical requirements. + +## Capabilities + +The key capabilities of NGINXaaS for Azure are: + +- Simplifies onboarding by leveraging NGINX as a service. +- Lowers operational overhead in running and optimizing NGINX. +- Simplifies NGINX deployments with fewer moving parts (edge routing is built into the service). +- Supports migration of existing NGINX configurations to the cloud with minimal effort. +- Integrates with the Azure ecosystem (Microsoft Entra, Azure Key Vault, and Azure Monitor). +- Addresses a wide range of deployment scenarios (HTTP reverse proxy, JWT authentication, etc). +- Adopts a consumption-based pricing to align infrastructure costs to actual usage by billing transactions using Azure. + +## Supported regions + +NGINXaaS for Azure is supported in the following regions: +{{< bootstrap-table "table table-striped table-bordered" >}} +| **North America** | **South America** | **Europe** | **Asia Pacific** | +|----------------------------------------------------------|--------------------------------------------|--------------------------------------------|-------------------------| +| West Central US
West US
East US 2
West US 2
West US 3
East US
Central US
North Central US
Canada Central | Brazil South | West Europe
North Europe
Sweden Central
Germany West Central | Australia East
Japan East
Korea Central
Southeast Asia
Central India
South India | +{{< /bootstrap-table >}} + + +## NGINXaaS architecture + +{{< img src="nginxaas-azure/n4a-architecture.png" alt="The diagram illustrates the architecture of F5 NGINXaaS for Azure within a Microsoft Azure environment. It shows admins using Azure API/SDK, Azure Portal, Azure CLI, and Terraform to interact with the NGINX Plus component in the IaaS layer for edge routing. The diagram also depicts subnet delegation from the NGINX Plus component to a customer subscription, which includes Azure Key Vault, Azure Monitor, other Azure services, and multiple application servers (App Server 1, App Server 2, App Server N)." >}} + +- Azure management tools (API, CLI, portal, terraform) work with NGINXaaS to create, update, and delete deployments +- Each NGINXaaS deployment has dedicated network and compute resources. There is no possibility of [noisy neighbor problems](https://learn.microsoft.com/en-us/azure/architecture/antipatterns/noisy-neighbor/noisy-neighbor) or data leakage between deployments + +### Redundancy + +With the Standard Plan, NGINXaaS uses the following redundancy features to keep your service available. + +- We run _at least_ two NGINX Plus instances for each deployment in an active-active pattern +- NGINX Plus is constantly monitored for health. Any unhealthy instances are replaced with new ones +- We use [Azure Availability Zones](https://learn.microsoft.com/en-us/azure/availability-zones/az-overview) + to protect your deployment from local failures within an Azure region. We balance NGINX instances across the possible availability zones in [supported regions](https://learn.microsoft.com/en-us/azure/availability-zones/az-overview#azure-regions-with-availability-zones) + +{{< note >}} If you are creating a public IP for your deployment, be sure to make them [zone redundant](https://learn.microsoft.com/en-us/azure/virtual-network/ip-services/public-ip-addresses#availability-zone) to get the best uptime. {{}} + +### Data plane traffic + +{{< img src="nginxaas-azure/n4a-data-plane-architecture.svg" alt="The diagram illustrates the architecture of F5 NGINXaaS for Azure, showing end users accessing a public IP that routes through a network security group within a customer's Azure subscription. This leads to a delegated subnet in a virtual network, which connects to a zone-redundant load balancer within the NGINXaaS subscription. The load balancer distributes traffic across NGINX Plus instances in multiple availability zones, ensuring scalability and redundancy." >}} + +NGINXaaS uses new Azure networking capabilities to keep end-user traffic private. Each NGINX Plus instance passes traffic to downstream services using an elastic network card (NIC) that exists inside your subscription. These NICs are injected into a delegated virtual network. A network security group controls traffic to your NGINX Plus instances. + +NGINX Plus instances are automatically upgraded to receive security patches and the latest stable NGINX Plus version. + +## What's next + +To get started, check the [NGINX as a Service for Azure prerequisites]({{< relref "/nginxaas-azure/getting-started/prerequisites.md" >}}) diff --git a/content/nginxaas-azure/quickstart/_index.md b/content/nginxaas-azure/quickstart/_index.md new file mode 100644 index 000000000..2698489ac --- /dev/null +++ b/content/nginxaas-azure/quickstart/_index.md @@ -0,0 +1,8 @@ +--- +title: Quickstart guides +weight: 600 +url: /nginxaas/azure/quickstart/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/quickstart/basic-caching.md b/content/nginxaas-azure/quickstart/basic-caching.md new file mode 100644 index 000000000..1a452f609 --- /dev/null +++ b/content/nginxaas-azure/quickstart/basic-caching.md @@ -0,0 +1,24 @@ +--- +title: "Enable content caching" +weight: 200 +categories: ["tasks"] +toc: true +docs: "DOCS-897" +url: /nginxaas/azure/quickstart/basic-caching/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) supports caching using the [ngx_http_proxy_module](https://nginx.org/en/docs/http/ngx_http_proxy_module.html) module, improving performance by allowing content to be served from cache without having to contact upstream servers. For more information on caching with NGINX, see [NGINX Content Caching](https://docs.nginx.com/nginx/admin-guide/content-cache/content-caching/). + +## Configuring caching +```nginx +http { + # ... + proxy_cache_path /var/cache/nginx keys_zone=mycache:10m; +} +``` + +NGINXaaS for Azure only supports caching to `/var/cache/nginx`. This is because data at `/var/cache/nginx` will be stored in a separate [Temporary Disk](https://docs.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview#temporary-disk). The size of the temporary disk is 4GB. + +## Limitations + +Currently, `proxy_cache_purge` might not work as expected because NGINXaaS [deploys multiple instances of NGINX Plus]({{< relref "/nginxaas-azure/overview/overview.md#architecture" >}}) for high availability. The `PURGE` request will be routed to a single instance, and only the matched values on that instance will be purged. diff --git a/content/nginxaas-azure/quickstart/geoip2.md b/content/nginxaas-azure/quickstart/geoip2.md new file mode 100644 index 000000000..bc97c514f --- /dev/null +++ b/content/nginxaas-azure/quickstart/geoip2.md @@ -0,0 +1,60 @@ +--- +title: "GeoIP2" +weight: 700 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/quickstart/geoip2/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) supports GeoIP2 using the [`ngx_http_geoip2_module` or `ngx_stream_geoip2_module`](https://github.com/leev/ngx_http_geoip2_module) dynamic modules, enabling NGINXaaS to implement various user differentiation strategies. For more information on GeoIP2 with NGINX, see [NGINX GeoIP2](https://docs.nginx.com/nginx/admin-guide/dynamic-modules/geoip2/). + +NGINXaaS uses your MaxMind license to download GeoIP2 databases, puts them in the right place before NGINX starts, and updates the databases daily to reduce your operational overhead. All GeoIP2 data is deleted once you stop using GeoIP2 or delete your deployment. MaxMind provides a variety of [databases](https://www.maxmind.com/en/geoip-databases), including a lower accuracy [free option](https://www.maxmind.com/en/geolite2/signup). NGINXaaS uses a modified form of [MaxMind's `geoipupdate`](https://github.com/maxmind/geoipupdate). + +## Configure + +To enable GeoIP2 you [update your NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview.md">}}) to include your MaxMind license and the relevant NGINX directives. + +1. Log into MaxMind and [generate a `GeoIP.conf`](https://dev.maxmind.com/geoip/updating-databases/#2-obtain-geoipconf-with-account-information) file. +2. Add the `GeoIP.conf` file to your NGINX configuration, using the exact path `/etc/nginx/GeoIP.conf`. The `GeoIP.conf` will be validated, and must include `AccountID`, `LicenseKey`, and `EditionIDs`. Other configuration options in `GeoIP.conf` are ignored. We recommend you enable the **Protected** {{}} toggle button to mark `GeoIP.conf` as a protected file, which will prevent the contents from being read via any Azure client tools. +3. Add the `load_module` directive - the modules are available at `modules/ngx_http_geoip2_module.so` or `modules/ngx_stream_geoip2_module.so`. +4. Add `geoip2` directives to your NGINX configuration as desired. The `EditionIDs` from your `GeoIP.conf` are available at `/usr/local/share/GeoIP` + +{{}}NGINXaaS for Azure currently only supports the database directory at the path `/usr/local/share/GeoIP`.{{}} + +There are many different ways to use the `geoip2` directives; For example: + +```nginx +load_module modules/ngx_http_geoip2_module.so; + +http { + # "GeoLite2-City" is one of the EditionIDs in /etc/nginx/GeoIP.conf + geoip2 /usr/local/share/GeoIP/GeoLite2-City.mmdb { + $geoip2_city_name city names en; + } + + server { + listen 80; + server_name localhost; + location / { + return 200 "Hello $geoip2_city_name"; + } + } +} +``` + +## Monitoring + +All licenses are [validated with MaxMind](https://dev.maxmind.com/license-key-validation-api/) when initially added to your deployment, but MaxMind licenses can expire or be manually revoked. + +To view the status of your MaxMind license, [enable monitoring]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) for your NGINXaaS deployment and navigate to the Metrics tab. View the `nginxaas.maxmind` metric under the `nginxaas statistics` metric namespace. The `nginxaas.maxmind` metric reports the health of your license through the `status` dimension: + + {{}} + + | Status | Description | + | -------------- | ------------------------------------------------------------------------------------------ | + | `active` | The license is valid and in use to update GeoIP2 databases. | + | `unauthorized` | MaxMind returned an license error, which usually indicates an issue with the `GeoIP.conf`. | + + {{}} diff --git a/content/nginxaas-azure/quickstart/hosting-static-content.md b/content/nginxaas-azure/quickstart/hosting-static-content.md new file mode 100644 index 000000000..38ad5a5fe --- /dev/null +++ b/content/nginxaas-azure/quickstart/hosting-static-content.md @@ -0,0 +1,66 @@ +--- +title: "Hosting static content" +weight: 200 +categories: ["tasks"] +toc: true +docs: "DOCS-1344" +url: /nginxaas/azure/quickstart/hosting-static-content/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) supports hosting static content which allows users to serve static websites from their deployment. + +## Uploading static files as a tarball + +Follow the steps listed below to upload static content and relevant NGINX configuration using `tar`: + +1. Create an `nginx.conf` to configure your deployment to serve static content. The following is an example NGINX configuration: + +```nginx +http { + server { + listen 80; + location / { + root /srv; + index index.html; + } + } +} +``` + +2. Store your static files alongside the NGINX configuration. + +The following shows the structure of a directory containing an NGINX configuration and an `index.html` file that we will be served from the deployment. + +```bash +test-static-files $ tree . +. +├── nginx.conf +└── srv + └── index.html + +2 directories, 2 files +``` + +{{}}`index.html` is placed under the `srv` directory. When using `tar` to upload static content, the static content has to be placed under one of the allowed paths listed in the [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}).{{}} + +3. Create the tarball. + +```bash +test-static-files $ tar -cvzf /test.tar.gz * +``` + +4. Upload the tarball following instructions listed in the [NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#upload-gzip-nginx-configuration" >}}) documentation. + +5. After uploading the configuration, you should see the following files in your deployment: + - `nginx.conf` + - `srv/index.html` + +6. Browse to the deployment IP address, and you will see `index.html` being served from the deployment. + +## Uploading static files directly to the deployment + +You can also upload static files directly to the deployment. See [Adding NGINX Configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#add-nginx-configuration" >}}) to upload individual files to your deployment. Refer to the [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}) to see where files can be written to and read from. + +## Limitations + +NGINX Configuration payload larger than 3 MB is not supported. diff --git a/content/nginxaas-azure/quickstart/loadbalancer-kubernetes.md b/content/nginxaas-azure/quickstart/loadbalancer-kubernetes.md new file mode 100644 index 000000000..a2fe26070 --- /dev/null +++ b/content/nginxaas-azure/quickstart/loadbalancer-kubernetes.md @@ -0,0 +1,378 @@ +--- +title: "NGINXaaS Load Balancer for Kubernetes" +weight: 250 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/quickstart/loadbalancer-kubernetes/ +--- + +## Overview + +F5 NGINXaaS for Azure simplifies advanced Layer 4 and Layer 7 load balancing for Kubernetes clusters. With the NGINX Load Balancer for Kubernetes (NLK) feature, you can enable multi-cluster load balancing, failover, disaster recovery, and blue-green or canary deployments. + + + +```mermaid +flowchart TB + + Users[웃 Users] -.-> |GET '/tea' | NGINXaaS{NGINXaaS} + NGINXaaS -.-> P1 + NLK --> |Update upstream 'tea'| NGINXaaS + + subgraph AK[Azure Kubernetes Cluster] + TeaSvc{Tea svc} -.-> P2(Pod) + TeaSvc -.-> P1(Pod) + k8sapi[K8s API] --> |watch| NLK(NLK controller) + end + + style Users color:orange,stroke:orange,fill:#faefd9 + linkStyle 0,1 color:orange,stroke:orange + style NLK color:green,stroke:green,stroke-width:4px,fill:#d9fade + style NGINXaaS color:green,stroke:green,stroke-width:4px,fill:#d9fade + linkStyle 2 color:green,stroke:green + style AK fill:#9bb1de,color:# + style k8sapi color:#3075ff,stroke:#3075ff,stroke-width:4px + linkStyle 5 color:#3075ff,stroke:#3075ff + + accDescr: A diagram showing users sending GET requests to NGINXaaS, which proxies traffic to a Kubernetes-based service named "TeaSvc" running multiple pods in an Azure Kubernetes Cluster, with upstream configurations dynamically managed via an NLK controller watching the Kubernetes API. +``` + +The NLK controller monitors [Kubernetes Services](https://kubernetes.io/docs/concepts/services-networking/service/) and updates an [NGINX Upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) dynamically. NGINXaaS applies these updates immediately and keeps them in sync during scaling or upgrades. + +### Example use cases + +- You can use NGINXaaS for Azure to enforce rate limiting and application security with NGINX App Protect, then forward all accepted traffic to your Kubernetes applications. +- You can use NGINXaaS for Azure to receive traffic on `api.example.com` and route requests by URL path - for example, forwarding `/login` to a Kubernetes-based login service, `/graph` to a Kubernetes-hosted graph service, and `/process` to an application server on a standalone VM. + +## Getting Started + +This guide explains how to integrate NGINXaaS with an Azure Kubernetes Service (AKS) cluster. See [Advanced Configuration](#advanced-configuration) for options to customize the installation.. + +Before following the steps in this guide, you must: + +- Create an AKS cluster. +- Create an NGINXaaS deployment. See the [documentation]({{< relref "/nginxaas-azure/getting-started/create-deployment/deploy-azure-portal/" >}}) to deploy via the Azure portal. +- Ensure network connectivity between the subnet delegated to the NGINXaaS deployment and the subnet where AKS is deployed. For example, the AKS cluster and NGINXaaS deployment can run on the same Azure VNET or on peered VNETs. + +### Initial setup + +The steps in this section must be completed once for each new setup. We will install the NLK controller in the Kubernetes cluster and authorize that to send updates to the NGINXaaS deployment. + +1. Create an NGINXaaS data plane API key. +1. Look up the NGINXaaS data plane API endpoint. +1. Install the NLK controller. + +#### Create an NGINXaaS data plane API key + +{{}} +The data plane API key has the following requirements: +- The key should have an expiration date. The default expiration date is six months from the date of creation. The expiration date cannot be longer than two years from the date of creation. +- The key should be at least 12 characters long. +- The key requires three out of four of the following types of characters: + - lowercase characters. + - uppercase characters. + - symbols. + - numbers. + +A good example of an API key that will satisfy the requirements is UUIDv4. + +{{}} + +The data plane API key can be created using the Azure CLI or portal. + +##### Create an NGINXaaS data plane API key using the Azure portal + +1. Go to your NGINXaaS for Azure deployment. +1. Select **NGINXaaS Loadbalancer for Kubernetes** on the left blade. +1. Select **New API Key**. +1. Provide a name for the new API key in the right panel, and select an expiration date. +1. Select the **Add API Key** button. +1. Copy the value of the new API key. + +{{}} +Make sure to write down the key value in a safe location after creation, as you cannot retrieve it again. If you lose the generated value, delete the existing key and create a new one. +{{}} + +##### Create an NGINXaaS data plane API key using the Azure CLI + +Set shell variables about the name of the NGINXaaS you've already created: + +```bash +## Customize this to provide the details about my already created NGINXaaS deployment +nginxName=myNginx +nginxGroup=myNginxGroup +``` + +Generate a new random data plane API key: + +```bash +# Generate a new random key or specify a value for it. +keyName=myKey +keyValue=$(uuidgen --random) +``` + +Create the key for your NGINXaaS deployment: + +```bash +az nginx deployment api-key create --name $keyName --secret-text $keyValue --deployment-name $nginxName --resource-group $nginxGroup +``` + +#### NGINXaaS data plane API endpoint + +The data plane API endpoint can be retrieved using the Azure CLI or portal. + +##### View NGINXaaS data plane API endpoint using the Azure portal + +1. Go to your NGINXaaS for Azure deployment. +1. Select **NGINXaaS Loadbalancer for Kubernetes** on the left blade. +1. The data plane API endpoint associated with the deployment is available at the top of the screen. + +##### View NGINXaaS data plane API endpoint using the Azure CLI + +```bash +dataplaneAPIEndpoint=$(az nginx deployment show -g "$nginxGroup" -n "$nginxName" --query properties.dataplaneApiEndpoint -o tsv) +``` + +#### Install the NLK controller + +The NLK controller can be installed in your Kubernetes cluster using either Helm or the official [AKS Extension](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/f5-networks.f5-nginx-for-azure-aks-extension?tab=overview) available on the Azure Marketplace. + +##### Install the NLK controller using Helm + +Install the NLK controller using `helm install`. Be sure your kubectl context is pointed at the desired cluster. + +```bash +helm install nlk oci://registry-1.docker.io/nginxcharts/nginxaas-loadbalancer-kubernetes --version 1.0.0 \ + --set "nlk.dataplaneApiKey=${keyValue}" \ + --set "nlk.config.nginxHosts=${dataplaneAPIEndpoint}nplus" +``` + +##### Install the AKS Extension using the Azure CLI + +Install the NLK controller using `az k8s-extension`. + +```bash +## Customize this to provide the details about my already created AKS cluster +aksName=myCluster +aksGroup=myClusterGroup +az k8s-extension create \ + --name nlk \ + --extension-type "nginxinc.nginxaas-aks-extension" \ + --scope cluster \ + --cluster-name ${aksName} \ + --resource-group ${aksGroup} \ + --cluster-type managedClusters \ + --plan-name f5-nginx-for-azure-aks-extension \ + --plan-product f5-nginx-for-azure-aks-extension \ + --plan-publisher f5-networks \ + --release-namespace nlk \ + --config nlk.dataplaneApiKey=${keyValue} \ + --config nlk.config.nginxHosts=${dataplaneAPIEndpoint}nplus +``` + +##### Install the AKS Extension using the Azure portal + +You can also install the NLK controller AKS extension by navigating to [F5 NGINXaaS Loadbalancer for Kubernetes](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/f5-networks.f5-nginx-for-azure-aks-extension) in the Azure Marketplace and following the installation steps. + +- Select **Get it now**. +- Select **Continue** to proceed with the installation. +- On the **Basics** tab, provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Subscription | Select the appropriate Azure subscription. | + | Resource group | Select the AKS cluster's resource group. | + {{}} + +- Select **Cluster Details**, and provide the AKS cluster name. You can select an existing AKS cluster or create a new one. +- Select **Application Details**, and provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Cluster extension resource name | Provide a name for the NLK controller. | + | Installation namespace | Provide the AKS namespace for the NLK controller. | + | Allow minor version upgrades of extension | Select whether to allow the extension to be upgraded automatically to the latest minor version. | + | NGINXaaS Dataplane API Key | Provide the previously generated data plane API key value: `{keyValue}` | + | NGINXaaS Dataplane API Endpoint | Provide the previously retrieved data plane API endpoint value: `{dataplaneAPIEndpoint}nplus` | + {{}} + +- Select **Review + Create** to continue. +- Azure will validate the extension settings. This page will provide a summary of the provided information. Select **Create**. + +{{}} +The NGINXaaS data plane API that NLK uses is mounted at `${dataplaneAPIEndpoint}nplus`. For example, if the data plane API endpoint is `https://mynginx-75b3bf22a555.eastus2.nginxaas.net/` then the value for `nlk.config.nginxHosts` should be `https://mynginx-75b3bf22a555.eastus2.nginxaas.net/nplus`. +{{}} + +### Create an NGINX configuration with dynamic upstream + +You must define an [NGINX upstream](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#upstream) that satisfies the following requirements for it to be managed by the NLK controller: + +- The upstream cannot have any servers listed in it specified via the `server` directive. The controller will manage the servers dynamically. +- The upstream must have a shared memory [zone](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#zone) defined. +- The upstream must have a [state](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#state) file declared. + +The following is an example NGINX Configuration that can be used: + +```nginx +http { + upstream my-service { + # NOTE: There are no servers defined here as they will be managed dynamically by the controller. + zone my-service 64K; # required + state /tmp/my-service.state; # required + } + + server { + listen 80; + location / { + proxy_pass http://my-service; + } + } +} +``` + +[Apply the NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/" >}}) to your deployment after making the required changes. + +### Create a Kubernetes Service + +Expose a Kubernetes `Service` to route traffic to your workload. The `Service` has the following requirements: + +- Add the annotation: `nginx.com/nginxaas: nginxaas` to mark the service to be monitored by NLK. +- Choose one of the following `Service` types: + - `NodePort`: To route external traffic into the cluster using a well defined port exposed on each AKS worker node. + - `ClusterIP`: To route traffic to pods directly if you are running an Azure Container Networking Interface (CNI) that lets you expose the pods on the Azure VNET. +- The port name must be formatted as `{{NGINX Context}}-{{NGINX upstream name}}`. For example: + - If the upstream is in the `http` context and named `my-service` then the name is `http-my-service` + - If the upstream is in the `stream` context and named `jet` then the port name is `stream-jet` + +The following example uses a service of type `NodePort`: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service + annotations: + # Let the controller know to pay attention to this service. + # If you are connecting multiple controller the value can be used to distinguish them + nginx.com/nginxaas: nginxaas +spec: + # expose a port on the nodes + type: NodePort + ports: + - targetPort: http + protocol: TCP + # The port name helps connect to NGINXaaS. It must be prefixed with either `http-` or `stream-` + # and the rest of the name must match the name of an upstream in that context. + name: http-my-service + selector: + app: awesome +``` + +## Advanced Configuration + +### Controller Configuration + +| Helm Value | Description | Value | +|------------------------|----------------------------------------------------------|--------------------------------| +| `nlk.config.logLevel` | How verbose should the NLK controller logs be. | Possible values are `debug`, `info`, `warn`, `error`. Default: `info`. | +| `nlk.config.nginxHosts` | The NGINX Plus APIs to send upstream updates to. | Should be set to `{{dataplaneApiEndpoint}}nplus`.| +| `nlk.config.serviceAnnotationMatch` | The value to match on a Service's `nginx.com/nginxaas` annotation. Useful when configuring multiple NLK controllers to update separate NGINXaaS deployemnts. | Default: `nginxaas`. | +| `nlk.dataplaneApiKey` | The NGINXaaS data plane API key that will authorize the controller to talk to your NGINXaaS deployment. | | + +### Multiple AKS clusters + +A single NGINXaaS deployment can direct trafifc to multiple AKS clusters. Each AKS cluster needs its own copy of NLK installed and connected to NGINXaaS. + +```mermaid +flowchart TB + + TeaUsers[웃 Users] -.-> |GET /tea | NGINXaaS{NGINXaaS} + CoffeeUsers[웃 Users] -.-> |GET /coffee | NGINXaaS + NGINXaaS -.-> |GET /tea| E + H --> |Update upstream 'tea'| NGINXaaS + NGINXaaS -.-> |GET /coffee| K + M --> |Update upstream 'coffee'| NGINXaaS + + subgraph SG2[Azure Kubernetes Cluster 2] + k8sapi2[K8s API] --> |watch| M(NLK controller) + I{Coffee svc} -.-> J(Pod) + I -.-> K(Pod) + end + + subgraph SG1[Azure Kubernetes Cluster 1] + k8sapi1[K8s API] --> |watch| H(NLK controller) + D{Tea svc} -.-> E(Pod) + D -.-> F(Pod) + end + + + style TeaUsers color:red,stroke:red,fill:#faefd9 + linkStyle 0,2 color:red,stroke:red + style CoffeeUsers color:orange,stroke:orange,fill:#faefd9 + linkStyle 1,4 color:orange,stroke:orange + style NGINXaaS color:green,stroke:green,stroke-width:4px,fill:#d9fade + linkStyle 3,5 color:green,stroke:green + style SG1 fill:#9bb1de,color:# + style SG2 fill:#9bb1de,color:# + style k8sapi1 color:#3075ff,stroke:#3075ff,stroke-width:4px + style k8sapi2 color:#3075ff,stroke:#3075ff,stroke-width:4px + linkStyle 6,9 color:#3075ff,stroke:#3075ff + style H color:green,stroke:green,stroke-width:4px,fill:#d9fade + style M color:green,stroke:green,stroke-width:4px,fill:#d9fade + + accDescr:A diagram showing NGINXaaS directing separate user GET requests for `/tea` and `/coffee` to respective Kubernetes-based services "TeaSvc" and "CoffeeSvc" that are running in separate Azure Kubernetes Clusters. An NLK controller in each cluster is independently updating the NGINXaaS with dynamic upstream configuration. +``` + +{{}} +- Configuring multiple NLK controllers to update the same upstream isn't supported and will result in unpredictable behavior. +{{}} + +### Multiple NGINXaaS deployments + +Multiple NLK controllers can be installed in the same AKS cluster to update separate NGINXaaS deployments. + +Each NLK needs a unique helm release name and needs a unique helm value for `nlk.config.serviceAnnotationMatch`. Each NLK will only watch services that have the matching annotation. + +{{}} +- Consider using `helm` to install multiple NLK controllers on an AKS cluster. Installing multiple copies of the controller on the same AKS cluster is not supported via the [AKS Extension](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/f5-networks.f5-nginx-for-azure-aks-extension?tab=overview). +{{}} + +## Troubleshooting + +NGINXaaS Loadbalancer for Kubernetes and NGINXaaS continually monitor and attempt to repair in case of error. However, if upstreams are not populated as expected, here are a few things you can look for. + +### NLK controller logs + +The controller reports status information about the requests it is making to NGINXaaS. This is a good place to look to ensure that the controller has picked up your service and that it is communicating with NGINXaaS correctly. + +Run the following command to view the controller logs: `kubectl logs deployment/nlk-nginxaas-loadbalancer-kubernetes`. + +The logs can be made more verbose by setting the Helm value `nlk.config.logLevel` (see [Controller Configuration](#controller-configuration)). + +### Enable NGINX Upstream Update Logs + +NGINXaaS supports exporting dynamic upstream update logs to an Azure Storage account or to a Log Analytics workspace. + +To setup logging: +1. Select **Diagnostic settings** under **Monitoring**. +1. Select **Add diagnostic setting**. +1. On the following panel, provide a **Diagnostic setting name**. +1. Enable the **NGINX Upstream Update Logs** category. +1. Select a destination. + +For more information on logging, see [Enable NGINX Logs]({{< relref "/nginxaas-azure/monitoring/enable-logging/">}}). + +### Metrics + +NGINXaaS has the following metrics that are useful to monitor upstream health: + +- `plus.http.upstream.peers.state.up` -- does the peer report being healthy. +- `plus.http.upstream.peers.request.count` -- which peers are handling requests. + +See the [metrics catalog](../../monitoring/metrics-catalog) for the entire list of NGINXaaS metrics. diff --git a/content/nginxaas-azure/quickstart/njs-support.md b/content/nginxaas-azure/quickstart/njs-support.md new file mode 100644 index 000000000..d5f9770f5 --- /dev/null +++ b/content/nginxaas-azure/quickstart/njs-support.md @@ -0,0 +1,42 @@ +--- +title: "Use the njs Scripting language" +weight: 400 +categories: ["tasks"] +toc: true +docs: "DOCS-874" +url: /nginxaas/azure/quickstart/njs-support/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) supports the open-source [njs module](https://nginx.org/en/docs/http/ngx_http_js_module.html), allowing the extension of NGINX functionality with a subset of the Javascript language. + +## Upload NGINX configuration with njs + +Create an njs script file by uploading a gzipped tar file or create the script file in the editor. See [NGINX Configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}) for a step-by-step guide. + +{{}}If specifying an absolute file path as your njs script's `File path`, see the [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}) for the allowed directories the file can be written to.{{}} + +Switch between the language options to see syntax highlighting for NGINX configs or JavaScript. + +To use njs, enable the `ngx_http_js_module` module and specify the `js_import` directive with your njs file. + +```nginx +load_module modules/ngx_http_js_module.so; + +http { + js_import http.js; + + server { + location / { + js_content http.hello; + } + } +} +``` + +## njs validation + +NGINXaaS will not parse, evaluate, or run any provided njs scripts when validating the NGINX configuration. [Enable logging]({{< relref "/nginxaas-azure/monitoring/enable-logging/" >}}) to monitor errors caused by njs scripts. + +## "fs" module + +The njs [File System module](http://nginx.org/en/docs/njs/reference.html#njs_api_fs) provides operations with files. NGINXaaS only allows reading and writing from [specified directories]({{< relref "nginx-configuration.md#nginx-process-restrictions" >}}). diff --git a/content/nginxaas-azure/quickstart/rate-limiting.md b/content/nginxaas-azure/quickstart/rate-limiting.md new file mode 100644 index 000000000..80f8d908c --- /dev/null +++ b/content/nginxaas-azure/quickstart/rate-limiting.md @@ -0,0 +1,30 @@ +--- +title: "Enable rate limiting" +weight: 300 +categories: ["tasks"] +toc: true +docs: "DOCS-899" +url: /nginxaas/azure/quickstart/rate-limiting/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) supports rate limiting using the [ngx_http_limit_req_module](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html) module to limit the processing rate of requests. For more information on rate limiting with NGINX, see [NGINX Limiting Access to Proxied HTTP Resources](https://docs.nginx.com/nginx/admin-guide/security-controls/controlling-access-proxied-http/) and [Rate Limiting with NGINX and NGINX Plus](https://www.nginx.com/blog/rate-limiting-nginx/). + +## Configuring basic rate limiting + +```nginx +http { + #... + + limit_req_zone $binary_remote_addr zone=mylimit:10m rate=1r/s; + + server { + #... + + location /login/ { + limit_req zone=mylimit; + + } +} +``` + +{{}}As a prerequisite to using the `sync` parameter with `limit_req_zone` directive for rate limiting, enable [Runtime State Sharing with NGINXaaS for Azure]({{< relref "/nginxaas-azure/quickstart/runtime-state-sharing.md" >}}).{{}} diff --git a/content/nginxaas-azure/quickstart/recreate.md b/content/nginxaas-azure/quickstart/recreate.md new file mode 100644 index 000000000..62d322cb3 --- /dev/null +++ b/content/nginxaas-azure/quickstart/recreate.md @@ -0,0 +1,66 @@ +--- +title: "Recreating a deployment" +weight: 500 +categories: ["tasks"] +toc: true +docs: "DOCS-1378" +url: /nginxaas/azure/quickstart/recreate/ +--- + +Learn how to recreate an existing F5 NGINX as a Service for Azure (NGINXaaS) deployment using an Azure Resource Manager (ARM) template. + +There are two ways to replicate a current NGINXaaS for Azure deployment using ARM templates. You can either delete and recreate the deployment, or you can update the DNS to smoothly transition to the new deployment. + +## Prerequisites + +- [Azure CLI Installation](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) +- You need to be logged in to your Azure account through the CLI if you are using that for template deployment, see [Azure CLI Authentication](https://learn.microsoft.com/en-us/cli/azure/authenticate-azure-cli) + +## Export ARM template from existing deployment + +To export an ARM template for an existing deployment: + +1. Navigate to your existing NGINXaaS deployment. +1. Select **Export template** under **Automation** in the left menu. +1. Wait for the template to generate. +1. Select **Download**. + +## Delete and recreate strategy + +The simplest method to recreate a deployment is to delete the original deployment and then recreate it using the ARM template. + +The ARM template generated through the portal will include the VNET and public IP address used by the original deployment as dependencies. If you plan to change these with the new deployment these entries need to be modified in the template. + +To recreate the deployment: + +1. Export the template as instructed above. +1. Modify, if neccessary, and verify the data in the template for accuracy. +1. Delete the original deployment. +1. Use the exported ARM template to recreate the deployment using the Azure CLI: + +```bash +az deployment group create \ + --subscription= \ + --resource-group= \ + --template-file= +``` + +## DNS migration strategy + +If you control the DNS associated with the deployment's frontend and have flexibility of the IP address NGINXaaS uses you can recreate a deployment with no downtime. + +1. Export the template, as instructed above. +1. If you're using a public IP, create a new public IP resource. If you're using a private IP address, select a new IP address from your VNET. +1. Modify the ARM template to change the NGINXaaS deployment name and reference the new IP address. +1. Use the Azure CLI as above to create a new deployment. +1. Update DNS to refer to the new deployment's IP address. +1. Monitor metrics of the old deployment to watch for requests dropping off as clients use the new IP address from DNS. Note that depending on your DNS settings this could take from a few minutes to a few days. +1. Delete the old deployment. + +Remember to change your configuration on any firewall or Network Security Group associated with the deployment to allow access to your deployment's new IP address. + +## Further reading + +See the [Azure CLI Deployment Create](https://learn.microsoft.com/en-us/cli/azure/nginx/deployment#az-nginx-deployment-create) documentation for example commands to create deployment resources. + +You can find example code to manage NGINXaaS deployments and related objects in the NGINXaaS GitHub repository, [NGINXaaS Snippets](https://github.com/nginxinc/nginxaas-for-azure-snippets). diff --git a/content/nginxaas-azure/quickstart/runtime-state-sharing.md b/content/nginxaas-azure/quickstart/runtime-state-sharing.md new file mode 100644 index 000000000..1d880a4f5 --- /dev/null +++ b/content/nginxaas-azure/quickstart/runtime-state-sharing.md @@ -0,0 +1,115 @@ +--- +title: "Runtime State Sharing" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-1499" +url: /nginxaas/azure/quickstart/runtime-state-sharing/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) supports runtime state sharing using the [Zone Synchronization module](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) to synchronize shared memory zones across NGINXaaS instances. + +With runtime state sharing, NGINXaaS instances can share some state data between them, including: + +- [Sticky‑learn session persistence](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#sticky_learn) +- [Rate limiting](https://nginx.org/en/docs/http/ngx_http_limit_req_module.html#limit_req_zone) +- [Key‑value store](https://nginx.org/en/docs/http/ngx_http_keyval_module.html#keyval_zone) + +{{}}`sync` parameter with a directive describing shared memory zones, cannot be added to an existing memory zone that was not configured to sync and cannot be removed from an existing memory zone that was configured to sync. To switch, consider removing the directive before reapplying it with the desired parameters.{{}} + +For information on enabling synchronization for rate limiting with NGINXaaS for Azure, please visit the [Rate Limiting]({{< relref "/nginxaas-azure/quickstart/rate-limiting.md" >}}) documentation. + +## Configuring runtime state sharing among NGINXaaS for Azure deployment cluster instances + +To enable runtime state sharing, edit the NGINXaaS deployment's NGINX configuration to create a server with the `zone_sync` directive in the top-level `stream` block. The `stream` `server` block containing the `zone_sync` directive should use a local resolver at `127.0.0.1:49153` and provide a `listen` directive with only a port for the TCP server. The chosen port should match the port used with `zone_sync_server` directive. NGINXaaS cluster instances should be identified using domain name `internal.nginxaas.nginx.com` and resolved using `resolve` parameter of the `zone_sync_server` directive. + +```nginx +stream { + resolver 127.0.0.1:49153 valid=20s; + + server { + listen 9000; # should match the port specified with zone_sync_server + + zone_sync; + zone_sync_server internal.nginxaas.nginx.com:9000 resolve; + } +} +``` + +{{}}To enhance security, set up security rules for both incoming and outgoing traffic in the virtual network linked to the NSG of the subnet hosting NGINXaaS for Azure deployment. These rules should limit TCP traffic to the `zone_sync_server` port.{{}} + +## Enable the SSL/TLS protocol for connections to another cluster instance of the NGINXaaS for Azure deployment + + To allow SSL connections between cluster instances, edit the NGINXaaS deployment's NGINX configuration to enable the `zone_sync_ssl` directive along with `zone_sync` directive in the top-level `stream` block. The `stream` `server` block containing the `zone_sync_ssl` directive should specify the `ssl` parameter with the `listen` directive for the TCP server. `ssl_certificate` and `ssl_certificate_key` directives can reference a Key Vault certificate attached to the deployment. + +```nginx +stream { + resolver 127.0.0.1:49153 valid=20s; + + server { + listen 9000 ssl; + + ssl_certificate /opt/ssl/server.crt; + ssl_certificate_key /opt/ssl/server.key; + + zone_sync; + zone_sync_server internal.nginxaas.nginx.com:9000 resolve; + zone_sync_ssl on; + } +} +``` + +## Enable verification of certificate of another cluster instance of the NGINXaaS for Azure deployment + +To enable verification of the cluster instance certificate edit the NGINXaaS deployment's NGINX configuration to enable the `zone_sync_ssl_verify` directive along with `zone_sync` directive in the top-level `stream` block and provide the `zone_sync_ssl_trusted_certificate` directive. `zone_sync_ssl_trusted_certificate` directive can reference a Key Vault certificate attached to the deployment. The `zone_sync_ssl_name` directive if used, should provide the `name` parameter as `internal.nginxaas.nginx.com`. + +```nginx +stream { + resolver 127.0.0.1:49153 valid=20s; + + server { + listen 9000 ssl; + + ssl_certificate /opt/ssl/server.crt; + ssl_certificate_key /opt/ssl/server.key; + + zone_sync; + zone_sync_server internal.nginxaas.nginx.com:9000 resolve; + + zone_sync_ssl on; + zone_sync_ssl_verify on; + zone_sync_ssl_trusted_certificate /opt/ssl/server_ca.pem; + } +} +``` + +## Set up certificate-based authentication across cluster instances of the NGINXaaS for Azure deployment + +To set up certificate-based authentication across the cluster instances edit the NGINXaaS deployment's NGINX configuration to enable the `ssl_verify_client` directive along with `zone_sync` directive in the top-level `stream` block and provide the `ssl_client_certificate` directive. `zone_sync_ssl_certificate`, `zone_sync_ssl_certificate_key` and `ssl_client_certificate` directives can reference a Key Vault certificate attached to the deployment. + +```nginx +stream { + resolver 127.0.0.1:49153 valid=20s; + + server { + listen 9000 ssl; + + ssl_certificate /opt/ssl/zone_sync.crt; + ssl_certificate_key /opt/ssl/zone_sync.key; + ssl_verify_client on; + ssl_client_certificate /opt/ssl/zone_sync_ca.pem; + + zone_sync; + zone_sync_server internal.nginxaas.nginx.com:9000 resolve; + + zone_sync_ssl on; + zone_sync_ssl_verify on; + zone_sync_ssl_trusted_certificate /opt/ssl/zone_sync_ca.pem; + + zone_sync_ssl_certificate /opt/ssl/zone_sync.crt; + zone_sync_ssl_certificate_key /opt/ssl/zone_sync.key; + } +} +``` + +Refer to [Runtime State Sharing](https://docs.nginx.com/nginx/admin-guide/high-availability/zone_sync/) for guidance on using other directives from the [Zone Synchronization module](https://nginx.org/en/docs/stream/ngx_stream_zone_sync_module.html) diff --git a/content/nginxaas-azure/quickstart/scaling.md b/content/nginxaas-azure/quickstart/scaling.md new file mode 100644 index 000000000..bcdd955ec --- /dev/null +++ b/content/nginxaas-azure/quickstart/scaling.md @@ -0,0 +1,136 @@ +--- +title: "Scaling guidance" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-989" +url: /nginxaas/azure/quickstart/scaling/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) supports manual and automatic scaling of your deployment, allowing you to adapt to application traffic demands while controlling cost. + +{{}}Scaling requires the Standard plan.{{}} + +An NGINXaaS deployment can be scaled out to increase the capacity (increasing the cost) or scaled in to decrease the capacity (reducing the cost). Capacity is measured in [NGINX Capacity Units (NCU)](#nginx-capacity-unit-ncu). + +In this document you will learn: + +* What an NGINX Capacity Unit (NCU) is +* How to manually scale your deployment +* How to enable autoscaling on your deployment +* What capacity restrictions apply for your Marketplace plan +* How to monitor capacity usage +* How to estimate the amount of capacity to provision + +## NGINX Capacity Unit (NCU) + +{{< include "/nginxaas-azure/ncu-description.md" >}} + +## Manual scaling + +To update the capacity of your deploymentv using the Azure Portal, + + 1. Select **NGINXaaS scaling** in the left menu. + 1. Select `Manual`. + 1. Set the desired number of NCUs. + 1. Click **Submit** to update your deployment. + +## Autoscaling + +With autoscaling enabled, the size of your NGINXaaS deployment will automatically adjust based on traffic requirements without the need to guess how many NCUs to provision. You must specify a minimum and maximum NCU count. NGINXaaS will maintain the size of the deployment ensuring the number of provisioned NCUs does not fall below the set minimum NCUs and does not grow beyond the maximum NCUs. Refer to the [Capacity Restrictions](#capacity-restrictions) when setting the minimum and maximum capacity. + +When creating a new NGINXaaS deployment with autoscaling enabled, the initial size of the deployment will match the minimum NCU count. + +To enable autoscaling using the Azure Portal, + + 1. Select **NGINXaaS scaling** in the left menu. + 1. Select `Autoscale`. + 1. Specify the minimum and maximum NCU count. + 1. Click **Submit** to enable NGINXaaS deployment autoscaling. + +### Scaling rules + +NGINXaaS automatically adjusts the number of NCUs based on "scaling rules." A scaling rule defines when to scale, what direction to scale, and how much to scale. NGINXaaS will evaluate the following scaling rules, in order, based on consumed and provisioned NCU metrics. + + - *Moderate Increase Rule*: Over the last 5 minutes, if the average consumed NCUs is greater than or equal to 70% of the average provisioned NCUs, increase capacity by 20%. + - *Urgent Increase Rule*: Over the last minute, if the number of consumed NCUs is greater than or equal to 85% of the number of provisioned NCUs, increase capacity by 20%. + - *Decrease Rule*: Over the last 10 minutes, if the average consumed NCUs is less than or equal to 60% of the average provisioned NCUs, decrease capacity by 10%. + +To avoid creating a loop between scaling rules, NGINXaaS will not apply a scaling rule if it predicts that doing so would immediately trigger an opposing rule. For example, if the the "Urgent Increase Rule" is triggered due to a sudden spike in traffic, but the new capacity will cause the "Decrease Rule" to trigger immediately after, the autoscaler will not increase capacity. This prevents the deployment's capacity from increasing and decreasing erratically. + +## Capacity restrictions + +The following table outlines constraints on the specified capacity based on the chosen Marketplace plan, including the minimum capacity required for a deployment to be highly available, the maximum capacity, and what value the capacity must be a multiple of. By default, an NGINXaaS for Azure deployment will be created with the corresponding minimum capacity. + +{{}} +| **Marketplace Plan** | **Minimum Capacity (NCUs)** | **Maximum Capacity (NCUs)** | **Multiple of** | +|------------------------------|-----------------------------|-----------------------------|----------------------------| +| Standard | 10 | 500 | 10 | +{{}} + +{{< note >}}If you need a higher maximum capacity, please [open a request](https://my.f5.com/manage/s/) and specify the Resource ID of your NGINXaaS deployment, the region, and the desired maximum capacity you wish to scale to.{{< /note >}} + +## Connection processing methods restrictions + +- NGINXaaS only supports the `epoll` connection processing method when using the `use` directive, as NGINXaaS is based on Linux. + +## Metrics + +NGINXaaS provides metrics for visibility of the current and historical capacity values. These metrics, in the `NGINXaaS Statistics` namespace, include: + +* NCUs Requested: `ncu.requested` -- how many NCUs have been requested using the API. This is the goal state of the system at that point in time. +* NCUs Provisioned: `ncu.provisioned` -- how many NCUs have been successfully provisioned by the service. + * This is the basis for [billing]({{< relref "/nginxaas-azure/billing/overview.md" >}}). + * This may differ from `ncu.requested` temporarily during scale-out/scale-in events or during automatic remediation for a hardware failure. +* NCUs Consumed: `ncu.consumed` -- how many NCUs the current workload is using. + * If this is under 60% of the provisioned capacity, consider scaling in to reduce costs. If this is over 70% of the provisioned capacity, consider scaling out; otherwise, requests may fail or take longer than expected. Alternatively, enable autoscaling, so your deployment can automatically scale based on the consumed and provisioned capacity. + * This value may burst higher than `ncu.requested` due to variation in provisioned hardware. You will still only be billed for the minimum of `ncu.requested` and `ncu.provisioned`. + +See the [Metrics Catalog]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md" >}}) for a reference of all metrics. + +{{< note >}}These metrics aren't visible unless enabled, see how to [Enable Monitoring]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}) for details.{{< /note >}} + +## Estimating how many NCUs to provision + +To calculate how many NCUs to provision, take the highest value across the parameters that make up an NCU: + +* CPU +* Bandwidth +* Concurrent connections + +Example 1: "I need to support 2,000 concurrent connections but only 4 Mbps of traffic. I need 52 ACUs." You would need `Max(52/20, 4/60, 2000/400)` = `Max(2.6, 0.07, 5)` = At least 5 NCUs. + +Example 2: "I don't know any of these yet!" Either start with the minimum and [adjust capacity](#adjusting-capacity) with the [iterative approach](#iterative-approach) described below, or [enable autoscaling](#autoscaling). + +In addition to the maximum capacity needed, we recommend adding a 10% to 20% buffer of additional NCUs to account for unexpected spikes in traffic. Monitor the [NCUs Consumed metric](#metrics) over time to determine your peak usage levels and adjust your requested capacity accordingly. + +### Iterative approach + +1. Make an estimate by either: + * using the [Usage and Cost Estimator]({{< relref "/nginxaas-azure/billing/usage-and-cost-estimator.md" >}}) + * compare to a [reference workload](#reference-workloads) +2. Observe the `ncu.consumed` [metric](#metrics) in Azure Monitor of your workload +3. Decide what headroom factor you wish to have +4. Multiply the headroom factor by the consumed NCUs to get the target NCUs. +5. [Adjust capacity](#adjusting-capacity) to the target NCUs +6. repeat from step 2 -- it is always good to check back after making a change + +*Example*: + +1. I am really unsure what size I needed so I just specified the default capacity, `20NCUs`. +2. I observe that my `ncu.consumed` is currently at `18NCUs`. +3. This is early morning, traffic. I think midday traffic could be 3x what it is now. +4. `18 * 3 = 54` is my target capacity. +5. I can see that I need to scale by multiples of 10 so I'm going to scale out to `60NCUs`. +6. At midday I can see that I overestimated the traffic I would be getting and it was still a busy day. We peaked at `41NCUs`, let me scale in to `50NCUs` to reduce my cost. + +### Reference workloads + +These reference workloads were all measured with a simplistic NGINX config proxying requests to an upstream. Keepalive between NGINX and upstream is enabled. Minimal request matching or manipulation is done. + +| **TLS?** | **Conn/s** | **Req/s** | **Response Size** | **Throughput** | **NCU** | +|:--------:|-----------:|----------:|------------------:|---------------:|--------:| +| no | 12830 | 13430 | 0KB | 23Mbps | 18.8 | +| no | 12080 | 13046 | 1KB | 125Mbps | 19 | +| no | 12215 | 12215 | 10KB | 953Mbps | 21 | +| no | 1960 | 1690 | 100KB | 1295Mbps | 23.6 | diff --git a/content/nginxaas-azure/quickstart/security-controls/_index.md b/content/nginxaas-azure/quickstart/security-controls/_index.md new file mode 100644 index 000000000..939fd72da --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/_index.md @@ -0,0 +1,9 @@ +--- +title: Security controls +weight: 500 +url: /nginxaas/azure/quicksart/security-controls/ +toc: true +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/quickstart/security-controls/auth-basic.md b/content/nginxaas-azure/quickstart/security-controls/auth-basic.md new file mode 100644 index 000000000..add0a1d6b --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/auth-basic.md @@ -0,0 +1,37 @@ +--- +title: Restricting access with HTTP basic authentication +weight: 100 +categories: [tasks] +toc: true +docs: "DOCS-990" +url: /nginxaas/azure/security-controls/auth-basic/ +--- + +You can restrict access to resources by implementing username/password authentication using the "HTTP Basic Authentication" protocol. + +For more information on configuring HTTP Basic Authentication please refer to the [NGINX Plus Restricting Access with HTTP Basic Authentication](https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-http-basic-authentication/) documentation. + +## Uploading a password file + +F5 NGINX as a Service for Azure (NGINXaaS) accepts a file containing usernames and passwords using any of the password types specified in the [NGINX documentation](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file). The password file can be uploaded as a "protected file" when creating or updating your NGINX configuration to protect the file's contents from being read. The password file can alternatively be uploaded as a regular file. + +{{< img src="nginxaas-azure/auth-basic-htpasswd.png" alt="Screenshot of the Azure portal showing the password file upload" >}} + +## Configuring NGINX Plus for HTTP basic authentication + +Inside the location or server you are protecting, specify the [`auth_basic`](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic) directive giving a name to the password-protected area. Specify the [`auth_basic_user_file`](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html#auth_basic_user_file) directive referencing the password file. + +```nginx +location /protected { + auth_basic "Protected Area"; + auth_basic_user_file /opt/.htpasswd; +} +``` + +Submit the NGINX configuration to apply it. You should be prompted to log in when you access the protected location or server. + +{{}}The NGINX worker processes will open the password file. You must place the password file in a [directory the worker processes are allowed to read]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md#nginx-filesystem-restrictions" >}}) or else all authenticated requests will fail.{{}} + +- `/opt` +- `/srv` +- `/var/www` diff --git a/content/nginxaas-azure/quickstart/security-controls/certificates.md b/content/nginxaas-azure/quickstart/security-controls/certificates.md new file mode 100644 index 000000000..98b587117 --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/certificates.md @@ -0,0 +1,200 @@ +--- +title: "Use a certificate from Azure Key Vault" +weight: 50 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/quickstart/security-controls/certificates/ +--- + +## Overview + +This tutorial walks through a complete example of using SSL/TLS certificates from Azure Key Vault in an F5 NGINX as a Service for Azure (NGINXaaS) deployment to secure traffic. In this guide, you will create all necessary resources to add a certificate to an NGINXaaS deployment using the [Azure portal](https://portal.azure.com/). + +## Create an Azure Key Vault (AKV) + +NGINXaaS enables customers to securely store SSL/TLS certificates in Azure Key Vault. If you do not have a key vault, follow these steps to create one: + +1. From the Azure portal menu, or from the **Home** page, select **Create a resource**. +1. In the Search box, enter **Key Vault** and select the **Key Vault** service. +1. Select **Create**. +1. On the Create a key vault **Basics** tab, provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Subscription | Select the appropriate Azure subscription that you have access to. | + | Resource group | Specify whether you want to create a new resource group or use an existing one.
For more information, see [Azure Resource Group overview](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview). | + | Key vault name | Provide a unique name for your key vault. For this tutorial, we use `nginxaas-kv`. | + | Region | Select the region you want to deploy to. | + {{
}} + + For all other fields, you can leave them set to the default values. +1. Select **Review + Create** and then **Create**. + +## Create an NGINXaaS deployment + +If you do not have an NGINXaaS deployment, follow the steps in [Deploy using the Azure portal]({{< relref "/nginxaas-azure/getting-started/create-deployment/deploy-azure-portal.md" >}}). + +{{}} Your NGINXaaS deployment and your key vault must be in the same subscription. {{}} + +## Add an SSL/TLS certificate to your key vault + +Next, you can add an SSL/TLS certificate to your key vault by following [Azure's documentation to import an existing certificiate](https://learn.microsoft.com/en-us/azure/key-vault/certificates/tutorial-import-certificate?tabs=azure-portal), or you can generate a certificate. This tutorial will generate a self-signed certificate to quickly get started. + +1. Go to your key vault, `nginxaas-kv`. +1. Select **Certificates** in the left menu. +1. Select {{< fa "plus">}}**Generate/Import** and provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Method of Certificate Creation | Select **Generate** | + | Certificate Name | Provide a unique name for your certificate. For this tutorial, we use `nginxaas-cert`. | + | Type of Certificate Authority (CA) | Select **Self-signed certificate**. | + | CN | Provide the IP address of your NGINXaaS deployment as the CN. For example, `CN=135.237.74.224` | + {{}} + + For all other fields, you can leave them set to the default values. + +1. Select **Create**. + +## Assign a managed identity to your NGINXaaS deployment + +In order for your NGINXaaS deployment to access your key vault, it must have an assinged managed idenity with the `Key Vault Secrets User` role. For more information, see [Assign Managed Identities]({{< relref "/nginxaas-azure/getting-started/managed-identity-portal.md" >}}) and [Prerequisites for adding SSL/TLS certificates]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md#prerequisites" >}}). + +1. Go to your NGINXaaS deployment. +1. Select **Identity** in the left menu. +1. Under **System assigned**, ensure the status is set to "On". + {{}} When you create a deployment through the Azure portal, a system-assigned managed identity is automatically enabled for your deployment. {{}} +1. Under **System assigned**, select **Azure role assignments**. +1. Select {{< fa "plus">}}**Add role assignment** and provide the following information: + + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Scope | Select **Key Vault**. | + | Subscription | Select the Azure subscription your key vault is in. | + | Resource | Select your key vault, `nginxaas-kv`. | + | Role | Select **Key Vault Secrets User**. | + {{}} + +1. Select **Save**. + +## Add your certificate to your NGINXaaS deployment + +Now, you can add your SSL/TLS certificate from your key vault to your NGINXaaS deployment. For more information, see [Add certificates using the Azure portal]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md">}}). + +1. Go to your NGINXaaS deployment. +1. Select **NGINX certificates** in the left menu. +1. Select {{< fa "plus">}}**Add certificate** and provide the following information: + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Name | A unique name for the certificate. For this tutorial, we use `my-cert`. | + | Certificate path | Set to `/etc/nginx/ssl/example.crt`. | + | Key path | Set to `/etc/nginx/ssl/example.key`. | + {{}} + +1. Select **Select certificate** and provide the following information: + + {{}} + | Field | Description | + |----------------------- | ---------------------------- | + | Key vault | Select `nginxaas-kv`. | + | Certificate | Select `nginxaas-cert`. | + {{}} + +1. Select **Add certificate**. + +## Reference your certificate in your NGINX configuration + +Once a certificate has been added to your NGINXaaS deployment, you can reference it in your NGINX configuration to secure traffic. Refer to [Upload an NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview.md">}}) to add and update NGINX configuration files to your NGINXaaS deployment. The following NGINX configurations show examples of different certificate use cases. + +### Use case 1: SSL/TLS termination + +NGINXaaS supports SSL/TLS termination by decrypting incoming encrypted traffic before forwarding it on to your upstream servers. + +```nginx +http { + upstream backend { + server backend1.example.com:8000; # replace with your backend server address and port + } + + server { + listen 443 ssl; + + ssl_certificate /etc/nginx/ssl/example.crt; # must match the Certificate path + ssl_certificate_key /etc/nginx/ssl/example.key; # must match the Key path + + location / { + proxy_pass http://backend; + } + } +} +``` + +For more information on using NGINX for SSL/TLS termination, see [NGINX SSL Termination](https://docs.nginx.com/nginx/admin-guide/security-controls/terminating-ssl-http/). + +### Use case 2: Securing traffic to upstream servers + +NGINXaaS supports backend encryption by encrypting traffic between your NGINXaaS deployment and your upstream servers. + +```nginx +http { + upstream backend { + server backend1.example.com:8443; # replace with your backend server address and port + } + + server { + listen 80; + + location / { + proxy_pass https://backend; + proxy_ssl_certificate /etc/nginx/ssl/client.crt; # must match the Certificate path + proxy_ssl_certificate_key /etc/nginx/ssl/client.key; # must match the Key path + } + } +} +``` + +For more information on using NGINX to secure traffic to upstream servers, refer to [Securing HTTP Traffic to Upstream Servers](https://docs.nginx.com/nginx/admin-guide/security-controls/securing-http-traffic-upstream/) and [Securing TCP Traffic to Upstream Servers](https://docs.nginx.com/nginx/admin-guide/security-controls/securing-tcp-traffic-upstream/). + +## Configure Network Security Perimeter (NSP) + +If you want to disable public access to your key vault, you can configure a [Network Security Perimeter (NSP)](https://learn.microsoft.com/en-us/azure/private-link/network-security-perimeter-concepts). This will allow you to configure access rules to allow NGINXaaS to fetch certificates from your key vault while ensuring all other public access is denied. + +{{}} Network Security Perimeter is currently in public preview. Refer to [Azure's NSP documentation](https://learn.microsoft.com/en-us/azure/private-link/network-security-perimeter-concepts) for details on its current capabilities. {{}} + +1. Follow [Azure's documentation on prerequisites](https://learn.microsoft.com/en-us/azure/private-link/create-network-security-perimeter-portal#prerequisites) to ensure you are registed to create an NSP. +1. In the Search box, enter **Network Security Perimeters** and select **Network Security Perimeters** from the search results. +1. Select {{< fa "plus">}}**Create**. +1. In the **Basics** tab, provide the following information: + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Subscription | Select the appropriate Azure subscription that you have access to. | + | Resource group | Specify whether you want to create a new resource group or use an existing one.
For more information, see [Azure Resource Group overview](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview). | + | Name | Provide a unique name for your network security perimeter. For this tutorial, we use `nginxaas-nsp`. | + | Region | Select the region you want to deploy to. Refer to any [regional limitations](https://learn.microsoft.com/en-us/azure/private-link/network-security-perimeter-concepts#regional-limitations) NSP has while in public preview. | + | Profile name | Leave the profile name as the default `defaultProfile`. | + {{
}} +1. In the **Resources** tab, select {{< fa "plus">}}**Add**. +1. Search for your key vault, `nginxaas-kv`, select it, and click **Select**. +1. In the **Inbound access rules** tab, select {{< fa "plus">}}**Add** and provide the following information: + {{}} + | Field | Description | + |---------------------------- | ---------------------------- | + | Rule Name | Set to `allow-nginxaas-deployment-sub`. | + | Source Type | Select **Subscriptions**. | + | Allowed sources | Select the subscription of your NGINXaaS deployment. | + {{}} +1. Select **Review + Create** and then **Create**. + +By default, the key vault will be associated to the NSP in [Learning mode](https://learn.microsoft.com/en-us/azure/private-link/network-security-perimeter-concepts#access-modes-in-network-security-perimeter). This means traffic will be evaluated first based on the NSP's access rules. If no rules apply, evaluation will fall back to the key vault's firewall configuration. To fully secure public access, it is reccommended to [transition to Enforced mode](https://learn.microsoft.com/en-us/azure/private-link/network-security-perimeter-transition#transition-to-enforced-mode-for-existing-resources). + +1. Go to resource `nginxaas-nsp`. +1. Select **Associated resources** in the left menu. +1. Select the `nginxaas-kv` resource association. +1. Select **Change access mode**, set to **Enforced**, and select **Apply**. + +{{}} If you are using the Azure portal to add certificates, you will also need to add an inbound access rule to allow your IP address, so the portal can list the certificates in your key vault. {{}} \ No newline at end of file diff --git a/content/nginxaas-azure/quickstart/security-controls/jwt.md b/content/nginxaas-azure/quickstart/security-controls/jwt.md new file mode 100644 index 000000000..a7f027859 --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/jwt.md @@ -0,0 +1,78 @@ +--- +title: Setting up JWT authentication +weight: 200 +categories: [tasks] +toc: true +docs: "DOCS-1101" +url: /nginxaas/azure/quickstart/security-controls/jwt/ +--- + +F5 NGINX as a Service for Azure (NGINXaaS) provides the option to control access to your resources using JWT authentication. With JWT authentication, a client provides a JSON Web Token, and the token will be validated against a local key file or a remote service. This document will explain how to validate tokens using Microsoft Entra as the remote service. + +For more information on JWT authentication with NGINX+, please refer to [ngx_http_auth_jwt_module](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html) and [NGINX Plus Setting up JWT Authentication](https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-jwt-authentication/). + +## Prerequisites + +- Set up a tenant, see [Quickstart: Set up a tenant](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-create-new-tenant). +- URL of the remote service (IdP). In this documentation, we are using Microsoft Entra Signing Keys URL: `https://login.microsoftonline.com/common/discovery/keys`. For more information, see [Signing key rollover in the Microsoft identity platform](https://learn.microsoft.com/en-us/entra/identity-platform/signing-key-rollover). + +## Configuring NGINX for JWT authentication + +1. To configure NGINX to use JWT for authentication, you will need to create a JWT that will be issued to a client. You can use your identity provider (IdP) or your own service to create JWTs. For testing purposes, you can create your own JWT, see [Get Microsoft Entra tokens for users by using MSAL](https://learn.microsoft.com/en-us/azure/databricks/dev-tools/app-aad-token) for details. If you wish to use your own local JSON Web Key (JWK) file for authentication, please upload it along with the NGINX configuration. Remember to respect the instance's filesystem restrictions and specify a location for the key file within one of the allowed directories. For details on uploading the configuration and file system restrictions, see [Upload an NGINX Configuration](https://docs.nginx.com/nginxaas/azure/getting-started/nginx-configuration/). + +2. Set up an NGINX `location` block that enables the JWT authentication and defines the authentication realm ("API" in the example) with the [auth_jwt](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt) directive. To verify the signature or decrypt the content of JWT, you will need to specify the JWT type using the [auth_jwt_type](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_type) directive, and provide the path to the corresponding JSON Web Key (JWK) file using the [auth_jwt_key_file](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_file) and/or [auth_jwt_key_request](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_request) directives. Specifying both directives simultaneously will allow you to specify more than one key source. If no directives are specified, JWS signature verification will be skipped. + +```nginx +server { + listen 80; + + location / { + auth_jwt "API"; + auth_jwt_key_file /srv/key.jwk; + auth_jwt_key_request /_jwks_uri; + } + + location = /_jwks_uri { + proxy_pass https://login.microsoftonline.com/common/discovery/keys; + subrequest_output_buffer_size 12k; + } +} +``` + +{{}} +When using the common Microsoft Entra signing keys you will need to increase the size of the subrequest output buffer as the key file will not fit in the default buffer. +If the buffer is not sized properly, requests will result in empty responses. If [error logging is enabled]({{< relref "/nginxaas-azure/monitoring/enable-logging/" >}}), you will see an error in the error log.{{}} + +Enabling JWT key caching is recommended to achieve optimal performance. This can be done with the [auth_jwt_key_cache](https://nginx.org/en/docs/http/ngx_http_auth_jwt_module.html#auth_jwt_key_cache) directive. Note that caching of keys obtained from variables is not supported. If you are using Microsoft Entra as an identity provider for JWT authentication, please be aware that [keys are rotated frequently](https://learn.microsoft.com/en-us/entra/identity-platform/signing-key-rollover), and it is recommended to take that into consideration before using it as a static file or caching the response from the subrequest. + +The full example of getting JWT authentication from a subrequest: + +```nginx +http { + upstream my_backend { + server 10.0.0.1; + server 10.0.0.2; + } + + server { + listen 80; + + location / { + auth_jwt "API"; + auth_jwt_key_file conf/key.jwk; + auth_jwt_key_request /_jwks_uri; + auth_jwt_key_cache 1h; + proxy_pass http://my_backend; + } + + location = /_jwks_uri { + internal; + proxy_method GET; + proxy_pass https://login.microsoftonline.com/common/discovery/keys; + subrequest_output_buffer_size 12k; + } + } +} +``` + +To learn more about configuring JWT in more complex scenarios such as claims validation, see [Arbitrary JWT Claims Validation](https://docs.nginx.com/nginx/admin-guide/security-controls/configuring-jwt-authentication/#arbitrary-jwt-claims-validation). diff --git a/content/nginxaas-azure/quickstart/security-controls/oidc.md b/content/nginxaas-azure/quickstart/security-controls/oidc.md new file mode 100644 index 000000000..ff100be42 --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/oidc.md @@ -0,0 +1,172 @@ +--- +title: Set up OIDC authentication +weight: 300 +categories: ["tasks"] +toc: true +docs: "DOCS-1646" +url: /nginxaas/azure/quickstart/security-controls/oidc/ +--- + +## Overview + +Learn how to configure F5 NGINX as a Service (NGINXaaS) for Azure with OpenID Connect (OIDC) authentication. + +## Prerequisites + +1. Configure an NGINXaaS deployment with [SSL/TLS certificates]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/" >}}). + +2. Enable [Runtime State Sharing]({{< relref "/nginxaas-azure/quickstart/runtime-state-sharing.md" >}}) on the NGINXaaS deployment. + +3. [Configure the IdP](https://github.com/nginxinc/nginx-openid-connect/blob/main/README.md#configuring-your-idp). For example, you can [register a Microsoft Entra Web application](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-register-app) as the IdP. + + +## Configure NGINX as a Service for Azure with IdP + +Configuring NGINXaaS for Azure with OIDC is similar as [Configuring NGINX Plus](https://github.com/nginxinc/nginx-openid-connect/blob/main/README.md#configuring-nginx-plus) in [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) but it also has its own specific configurations that must be completed to work normally. + +1. If your IdP supports OpenID Connect Discovery (usually at the URI /.well-known/openid-configuration), use the `configure.sh` script in [nginx-openid-connect](https://github.com/nginxinc/nginx-openid-connect) to complete the configuration. Otherwise, follow [Configuring NGINX Plus](https://github.com/nginxinc/nginx-openid-connect/blob/main/README.md#configuring-nginx-plus) to complete the configuration. + +2. Configure NGINXaaS with specific configurations: + - `openid_connect_configuration.conf`: + + a. Set a proper timeout value for `map $host $zone_sync_leeway`. + + ```nginx + map $host $zone_sync_leeway { + # Specifies the maximum timeout for synchronizing ID tokens between cluster + # nodes when you use shared memory zone content sync. This option is only + # recommended for scenarios where cluster nodes can randomly process + # requests from user agents and there may be a situation where node "A" + # successfully received a token, and node "B" receives the next request in + # less than zone_sync_interval. + default 2000; # Time in milliseconds, e.g. (zone_sync_interval * 2 * 1000) + } + ``` + + b. Set a proper path for `proxy_cache_path`, see [Enable content caching]({{< relref "basic-caching.md" >}}). + + ```nginx + proxy_cache_path /var/cache/nginx/jwt levels=1 keys_zone=jwk:64k max_size=1m; + ``` + + c. Enable `sync` for the keyval memory zones and specify the state files to persist the current state across NGINX restarts. The state file paths are subject to [NGINX Filesystem Restrictions table]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/overview/#nginx-filesystem-restrictions" >}}) and must be placed in a directory accessible to the NGINX worker processes. + + ```nginx + keyval_zone zone=oidc_id_tokens:1M state=/opt/oidc_id_tokens.json timeout=1h sync; + keyval_zone zone=oidc_access_tokens:1M state=/opt/oidc_access_tokens.json timeout=1h sync; + keyval_zone zone=refresh_tokens:1M state=/opt/refresh_tokens.json timeout=8h sync; + keyval_zone zone=oidc_pkce:128K timeout=90s sync; # Temporary storage for PKCE code verifier. + ``` + + - `openid_connect.server_conf`: + + Remove the `location /api/` block, since NGINXaaS for Azure currently restricts access to the `api` directive. + ```nginx + location /api/ { + api write=on; + allow 127.0.0.1; # Only the NGINX host may call the NGINX Plus API + deny all; + access_log off; + } + ``` + + - Modify the root config file `nginx.conf` properly with `frontend.conf` content: + + a. Add `load_module modules/ngx_http_js_module.so;` near the top of the root config file, if it doesn't exist. + + b. Add `include conf.d/openid_connect_configuration.conf;` in the http block before the server block. + +
+ Example of nginx.conf using the localhost as a upstream server + + ```nginx + load_module modules/ngx_http_js_module.so; + + http { + + # This is the backend application we are protecting with OpenID Connect + upstream my_backend { + zone my_backend 64k; + # Reuse the localhost as a upstream server + # Modify to the real upstream server address if you have + server 127.0.0.1; + } + + # A local server block representing the upstream server for testing only + # Remove if you have the real upstream servers + server { + listen 80; + default_type text/html; + return 200 '

This is a site protected by OIDC!

\n'; + } + + # Custom log format to include the 'sub' claim in the REMOTE_USER field + log_format main_jwt '$remote_addr - $jwt_claim_sub [$time_local] "$request" $status ' + '$body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"'; + + # The frontend server - reverse proxy with OpenID Connect authentication + # + include conf.d/openid_connect_configuration.conf; + server { + include conf.d/openid_connect.server_conf; # Authorization code flow and Relying Party processing + error_log /var/log/nginx/error.log debug; # Reduce severity level as required + + listen 443 ssl; # Use SSL/TLS in production + ssl_certificate /etc/nginx/ssl/my-cert.crt; + ssl_certificate_key /etc/nginx/ssl/my-cert.key; + + location / { + # This site is protected with OpenID Connect + auth_jwt "" token=$session_jwt; + error_page 401 = @do_oidc_flow; + + #auth_jwt_key_file $oidc_jwt_keyfile; # Enable when using filename + auth_jwt_key_request /_jwks_uri; # Enable when using URL + + # Successfully authenticated users are proxied to the backend, + # with 'sub' claim passed as HTTP header + proxy_set_header username $jwt_claim_sub; + + # Bearer token is uses to authorize NGINX to access protected backend + #proxy_set_header Authorization "Bearer $access_token"; + + # Intercept and redirect "401 Unauthorized" proxied responses to nginx + # for processing with the error_page directive. Necessary if Access Token + # can expire before ID Token. + #proxy_intercept_errors on; + + proxy_pass http://my_backend; # The backend site/app + + access_log /var/log/nginx/access.log main_jwt; + } + } + } + + stream { + # Add localhost resolver for internal clustering hostname with resolver metrics collection + resolver 127.0.0.1:49153 valid=20s status_zone=stream_resolver_zone1; + + server { + listen 9000; + zone_sync; + zone_sync_server internal.nginxaas.nginx.com:9000 resolve; + } + } + ``` +
+ +3. Upload the NGINX configurations. See [Upload an NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/" >}}) for more details. + +4. In a web browser, open `https:///`. The browser will be redirected to the IdP server. After a successful login using the credentials of a user who has the authorization, the protected URI can be accessed. For example, using the `nginx.conf` in this guide, open `https:///` and complete the authentication. The browser will show: + + ```text + This is a site protected by OIDC! + ``` + +## Troubleshooting + +[Enable NGINX logs]({{< relref "/nginxaas-azure/monitoring/enable-logging/" >}}) and [Troubleshooting](https://github.com/nginxinc/nginx-openid-connect/tree/main?tab=readme-ov-file#troubleshooting) the OIDC issues. + +## Monitoring + +[Enable monitoring]({{< relref "/nginxaas-azure/monitoring/enable-monitoring.md" >}}), check [real time monitoring](https://github.com/nginxinc/nginx-openid-connect/blob/main/README.md#real-time-monitoring) to see how OIDC metrics are collected, and use "plus.http.*" metrics filtered with location_zone dimension in [NGINX requests and response statistics]({{< relref "/nginxaas-azure/monitoring/metrics-catalog.md#nginx-requests-and-response-statistics" >}}) to check the OIDC metrics. diff --git a/content/nginxaas-azure/quickstart/security-controls/private-link-to-upstreams.md b/content/nginxaas-azure/quickstart/security-controls/private-link-to-upstreams.md new file mode 100644 index 000000000..484eb565e --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/private-link-to-upstreams.md @@ -0,0 +1,233 @@ +--- +title: "Connect to upstreams with Azure Private Link" +weight: 400 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/quickstart/security-controls/private-link-to-upstreams/ +--- + +[Azure Private Link](https://learn.microsoft.com/en-us/azure/private-link/private-link-overview) eliminates exposure to the public internet by handling traffic over Microsoft's backbone network. This is especially useful if your NGINXaaS deployment and your upstreams are in different virtual networks. + +{{}}Depending on your use-case, we recommend using [virtual network peering](https://learn.microsoft.com/en-us/azure/virtual-network/virtual-network-peering-overview) instead of a Private Link service to maintain NGINX's load balancing capabilities.{{}} + +To set up a Private Link between your NGINXaaS deployment and your upstreams, you'll need two resources: + +- [Private Link service](https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview) +- [private endpoint](https://learn.microsoft.com/en-us/azure/private-link/private-endpoint-overview) + +## Create a Private Link service + +A Private Link service is an Azure resource that enables Private Link access to your application. If your upstream is an Azure PaaS service (for example, Azure Storage), then you do not need a Private Link service. To create a Private Link service, + +1. Configure your upstream to run behind a Standard Load Balancer. +1. Add load balacing rules per upstream server port. +1. Create a Private Link service and attach it to the Standard Load Balancer. + +The following example demonstrates this process using an existing virtual machine as the upstream. + +
+Create a Private Link service - Azure CLI + +### Prerequisites + +- Resource Group +- Virtual Network +- Subnet +- Workload Virtual Machine + +Please ensure the following environment variables are exported before copying the below Azure CLI commands. + +{{}} + | Name | Description | + |------------------ | ----------------- | + | APP_LOCATION | Location of the resource group + | APP_RESOURCE_GROUP | Name of the resource group the virtual machine is in | + | APP_VNET_NAME | Name of the virtual network the virtual machine is in | + | APP_SUBNET_NAME | Name of the subnet the virtual machine is in | + | APP_VM_NAME | Name of the workload virtual machine | + | APP_NIC_NAME | Name of the network interface of the virtual machine | + | APP_IP_CONFIG_NAME | Name of the IP configuration associated with the NIC | +{{}} + +### Create a load balancer + +```bash +$ az network lb create \ + --resource-group $APP_RESOURCE_GROUP \ + --name load-balancer \ + --sku Standard \ + --vnet-name $APP_VNET_NAME \ + --subnet $APP_SUBNET_NAME \ + --frontend-ip-name frontend \ + --backend-pool-name backend-pool +``` + +### Create health probes and load balancing rules + +Depending on your NGINX configuration, you will need to add a load balancing rule and health probe for each port your upstream servers are listening on. For example, given the following NGINX configuration snippet, + +```nginx +upstream { + server 10.0.1.4:8000; +} +``` + +Create a health probe monitoring on port `8000`: + +```bash +$ az network lb probe create \ + --resource-group $APP_RESOURCE_GROUP \ + --lb-name load-balancer \ + --name 8000-probe \ + --protocol tcp \ + --port 8000 +``` + +Create a load balancing rule listening on port `8000`: + +```bash +$ az network lb rule create \ + --resource-group $APP_RESOURCE_GROUP \ + --lb-name load-balancer \ + --name 8000-rule \ + --protocol tcp \ + --frontend-port 8000 \ + --backend-port 8000 \ + --frontend-ip-name frontend \ + --backend-pool-name backend-pool \ + --probe-name 8000-probe \ + --idle-timeout 15 \ + --enable-tcp-reset true +``` + +### Configure the workload VM behind the load balancer + +```bash +$ az network nic ip-config address-pool add \ + --address-pool backend-pool \ + --ip-config-name $APP_IP_CONFIG_NAME \ + --nic-name $APP_NIC_NAME \ + --resource-group $APP_RESOURCE_GROUP \ + --lb-name load-balancer +``` + +### Disable network policy + +The `privateLinkServiceNetworkPolicies` setting must be disabled to add a private link service in a virtual network. + +```bash +$ az network vnet subnet update \ + --name $APP_SUBNET_NAME \ + --vnet-name $APP_VNET_NAME \ + --resource-group $APP_RESOURCE_GROUP \ + --disable-private-link-service-network-policies yes +``` + +### Create a private link service + +```bash +$ az network private-link-service create \ + --resource-group $APP_RESOURCE_GROUP \ + --name private-link-service \ + --vnet-name $APP_VNET_NAME \ + --subnet $APP_SUBNET_NAME \ + --lb-name load-balancer \ + --lb-frontend-ip-configs frontend \ + --location $APP_LOCATION +``` + +
+ +## Create a private endpoint + +A private endpoint is a network interface that connects to a service powered by Azure Private Link. To connect your NGINXaaS to your upstreams using a private endpoint, + +1. Add a new, non-delegated subnet in your NGINXaaS deployment's virtual network. +1. Create a private endpoint. +1. Update your NGINX configuration to reference the private endpoint. + +The following example demonstrates this process using an existing NGINXaaS deployment and a Private Link service. + +
+Create a private endpoint - Azure CLI + +### Prerequisites + +- Resource Group +- Virtual Network +- NGINXaaS deployment +- Private Link service + +Please ensure the following environment variables are exported before copying the below Azure CLI commands. + +{{}} + | Name | Description | + |------------------ | ----------------- | + | DEP_RESOURCE_GROUP | Name of the resource group the NGINXaaS deployment is in | + | DEP_VNET_NAME | Name of the virtual network the NGINXaaS deployment is in | + | PRIVATE_ENDPOINT_SUBNET_ADDRESS_SPACE | Desired address space of the private endpoint's subnet | + | PRIVATE_LINK_SERVICE_ID | Resource ID of the Private Link service | +{{}} + +### Create a new subnet + +You must create a new subnet for the private endpoint because the existing NGINXaaS deployment's subnet is already delegated. + +```bash +$ az network vnet subnet create \ + --resource-group $DEP_RESOURCE_GROUP \ + --vnet-name $DEP_VNET_NAME \ + --name subnet-priv-endpoint \ + --address-prefix $PRIVATE_ENDPOINT_SUBNET_ADDRESS_SPACE +``` + +### Create a private endpoint + +```bash +$ az network private-endpoint create \ + --connection-name connection-1 \ + --name private-endpoint \ + --private-connection-resource-id $PRIVATE_LINK_SERVICE_ID \ + --resource-group $DEP_RESOURCE_GROUP \ + --subnet subnet-priv-endpoint \ + --manual-request false \ + --vnet-name $DEP_VNET_NAME +``` + +### Update your NGINXaaS configuration with the private endpoint's IP address + +First, get the IP address of the private endpoint: + +```bash +$ export nic_id=$(az network private-endpoint show \ + --resource-group $DEP_RESOURCE_GROUP \ + --name private-endpoint \ + --query "networkInterfaces[0].id" \ + --output tsv) + +$ az network nic show \ + --ids $nic_id \ + --query "ipConfigurations[0].privateIPAddress" \ + --output tsv +``` + +Then, reference it in your NGINX configuration's upstream servers. For example: + +```nginx +upstream { + server :8000; +} +``` + +
+ + +## Additional Resources + +The following guides provide step-by-step instructions to create a Private Link service and a private endpoint with your preferred client tool: + + +* [Azure portal](https://learn.microsoft.com/en-us/azure/private-link/create-private-link-service-portal?tabs=dynamic-ip) +* [Azure CLI](https://learn.microsoft.com/en-us/azure/private-link/create-private-link-service-cli) +* [ARM template](https://learn.microsoft.com/en-us/azure/private-link/create-private-link-service-template) + diff --git a/content/nginxaas-azure/quickstart/security-controls/securing-upstream-traffic.md b/content/nginxaas-azure/quickstart/security-controls/securing-upstream-traffic.md new file mode 100644 index 000000000..897d37660 --- /dev/null +++ b/content/nginxaas-azure/quickstart/security-controls/securing-upstream-traffic.md @@ -0,0 +1,123 @@ +--- +title: "Securing upstream traffic" +weight: 300 +categories: ["tasks"] +toc: true +docs: "DOCS-1475" +url: /nginxaas/azure/quickstart/security-controls/securing-upstream-traffic/ +--- + +Learn how to encrypt HTTP traffic between F5 NGINX as a Service for Azure (NGINXaaS) and an upstream group or a proxied server. To secure TCP traffic to upstream servers, follow the [NGINX Plus guide](https://docs.nginx.com/nginx/admin-guide/security-controls/securing-tcp-traffic-upstream/). As with securing HTTP traffic, you will need to [add the SSL/TLS client certificate]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md">}}) to the NGINXaaS deployment. + +### Prerequisites + +- [Add a SSL/TLS Certificate]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md">}}) to the NGINXaaS deployment. +- Enable [njs module]({{< relref "/nginxaas-azure/quickstart/njs-support.md">}}) if configuration uses njs directives. + +### Configuring NGINX + +[Add the client certificate and the key]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md">}}) that will be used to authenticate NGINX to the NGINXaaS deployment. Make a note of the filepaths you assign to the `Certificate path` and `Key path`. + +Next, change the URL to an upstream group to support SSL connections. In the NGINX configuration file, specify the “https” protocol for the proxied server or an upstream group in the `proxy_pass` directive: + +```nginx +location /upstream { + proxy_pass https://backend.example.com; +} +``` + +Add the client certificate and key to the NGINX config to authenticate NGINX on each upstream server with `proxy_ssl_certificate` and `proxy_ssl_certificate_key` directives using the filepaths noted above. NGINXaaS for Azure expects the directive's file arguments to match the filepaths assigned to a certificate and key that have been added to the NGINXaaS Deployment. + +```nginx +location /upstream { + proxy_pass https://backend.example.com; + proxy_ssl_certificate /etc/nginx/client.pem; + proxy_ssl_certificate_key /etc/nginx/client.key; +} +``` + +If you use a self-signed certificate for an upstream or your own CA, you may include this file by adding it to the [NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md">}}) and including the `proxy_ssl_trusted_certificate` directive. The file must be in the PEM format. Optionally, include the [`proxy_ssl_verify`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify) and [`proxy_ssl_verify_depth`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_ssl_verify_depth) directives to have NGINX check the validity of the security certificates: + +```nginx +location /upstream { + # ... + proxy_ssl_trusted_certificate /etc/nginx/trusted_ca_cert.crt; + proxy_ssl_verify on; + proxy_ssl_verify_depth 2; + # ... +} +``` + +If your configuration is using the [njs module]({{< relref "/nginxaas-azure/quickstart/njs-support.md">}}), you can include the `js_fetch_trusted_certificate` directive to [verify](http://nginx.org/en/docs/njs/reference.html#fetch_verify) HTTPS certificates with the [Fetch API](http://nginx.org/en/docs/njs/reference.html#ngx_fetch). + +Toggle `yes` to include the CA file as proctectd file when using Azure Portal as show below: + +{{< img src="nginxaas-azure/add-ca-as-protected-file.png" alt="Screenshot of the Azure portal showing the toggle for protected files" >}} + +### Configuring upstreams + +Each upstream server should be configured to accept HTTPS connections. For each upstream server, specify a path to the server certificate and the private key [added to the NGINXaaS Deployment]({{< relref "/nginxaas-azure/getting-started/ssl-tls-certificates/ssl-tls-certificates-portal.md">}}) with `ssl_certificate` and `ssl_certificate_key` directives: + +```nginx +server { + listen 443 ssl; + server_name backend1.example.com; + + ssl_certificate /etc/ssl/certs/server.crt; + ssl_certificate_key /etc/ssl/certs/server.key; + #... + location /upstream { + proxy_pass http://url_to_app.com; + # ... + } +} +``` + +Specify the path to a trusted client CA certificate added to the [NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md">}}) with the `ssl_client_certificate` or `ssl_trusted_certificate` directives. The file should be in PEM format. + +```nginx +server { + #... + ssl_client_certificate /etc/ssl/certs/ca.crt; + ssl_verify_client optional; + #... +} +``` + +Complete example to secure your traffic between NGINX and upstream servers is available [here](https://docs.nginx.com/nginx/admin-guide/security-controls/securing-http-traffic-upstream/#complete-example). + +### Additional configuration + +If your keys specified in `proxy_ssl_certificate_key` use passphrase, then include the passphrases as file to the NGINX configuration and reference the file in `proxy_ssl_password_file`. It is recomended to use a protected file as an argument for this directive. + +```nginx +location /upstream { + proxy_pass https://backend.example.com; + proxy_ssl_certificate /etc/nginx/client.pem; + proxy_ssl_certificate_key /etc/nginx/client.key; + proxy_ssl_password_file pswd.txt; +} +``` + +You can also configure NGINX with a list of revoked certificates using `proxy_ssl_crl` directive. Include this file in PEM format in your NGINX configuration. + +```nginx +location /upstream { + # ... + proxy_ssl_crl /etc/nginx/revoked.crt; + # ... +} +``` + +`ssl_session_ticket_key` directive specifies a file with the secret key used to encrypt and decrypt TLS session tickets. To use these directives in your config file, include a file to your [NGINX configuration]({{< relref "/nginxaas-azure/getting-started/nginx-configuration/nginx-configuration-portal.md" >}}) with 80 or 48 bytes of random data generated using `openssl` command, in your config bundle. For example, + +```nginx +http { + server { + ssl_certificate /etc/nginx/client.pem; + ssl_certificate_key /etc/nginx/client.key; + ssl_client_certificate /etc/nginx/ca.pem; + ssl_session_ticket_key keys; + } +} +``` diff --git a/content/nginxaas-azure/quickstart/upgrade-channels.md b/content/nginxaas-azure/quickstart/upgrade-channels.md new file mode 100644 index 000000000..19bb1d2fd --- /dev/null +++ b/content/nginxaas-azure/quickstart/upgrade-channels.md @@ -0,0 +1,43 @@ +--- +title: "Upgrade channels" +weight: 150 +categories: ["tasks"] +toc: true +docs: "DOCS-1480" +url: /nginxaas/azure/quickstart/upgrade-channels/ +--- + +## Overview + +Maintaining the latest version NGINX Plus, operating system (OS), and other software dependencies is a key feature offered by F5 NGINX as a Service for Azure (NGINXaaS). The **Upgrade Channel** is an upgrade path to which you can subscribe your NGINXaaS deployment to control the timing of software upgrades. The following channels are available: + +{{}} +| Channel | Description | +|-------------|---------------------------| +| preview | Selecting this channel automatically upgrades your deployment to the latest supported version of NGINX Plus and its dependencies soon after they become available. We recommend using this setting to try out new capabilities in deployments running in your development, testing, and staging environments. | +| stable | A deployment running on this channel will receive updates on NGINX Plus and its dependencies at a slower rate than the **Preview** channel. We recommend using this setting for production deployments where you might want stable features instead of the latest ones. This is the **default channel** if you do not specify one for your deployment. | +{{}} + +{{}} All channels will receive continuous updates related to OS patches, and security fixes. +{{}} + +## Availability of new features + +### NGINX Plus and related modules + +{{}} +| Channel | Availablity of NGINX Plus and related modules | +|-------------|-----------------------------------------------| +| preview | No sooner than 14 days of a new NGINX Plus [release](https://docs.nginx.com/nginx/releases/). | +| stable | No sooner than 45 days of a new NGINX Plus [release](https://docs.nginx.com/nginx/releases/). | +{{}} + +A new version of NGINX Plus and its related modules is first introduced to the **preview** channel, where it is goes through our acceptance testing. Once we have baked the software in the **preview** channel for a reasonable time, it is eventually graduated to the **stable** channel. The actual promotion timelines can vary, and you can view our [Changelog]({{< relref "/nginxaas-azure/changelog.md" >}}) for latest updates. + +## Changing the upgrade channel + +To change the upgrade channel on your deployment using the Azure Portal: + +1. Select **NGINX Upgrades** in the left menu. +1. Choose the desired **Upgrade Channel** from the dropdown menu. +1. Click **Submit**. diff --git a/content/nginxaas-azure/troubleshooting/_index.md b/content/nginxaas-azure/troubleshooting/_index.md new file mode 100644 index 000000000..f10607b28 --- /dev/null +++ b/content/nginxaas-azure/troubleshooting/_index.md @@ -0,0 +1,8 @@ +--- +title: Troubleshooting +weight: 700 +url: /nginxaas/azure/troubleshooting/ +menu: + docs: + parent: NGINXaaS for Azure +--- \ No newline at end of file diff --git a/content/nginxaas-azure/troubleshooting/migrate-from-standard.md b/content/nginxaas-azure/troubleshooting/migrate-from-standard.md new file mode 100644 index 000000000..3bc578dd3 --- /dev/null +++ b/content/nginxaas-azure/troubleshooting/migrate-from-standard.md @@ -0,0 +1,52 @@ +--- +title: "Migrating from Standard to Standard V2" +weight: 200 +categories: ["tasks"] +toc: true +url: /nginxaas/azure/troubleshooting/migrate-from-standard/ +--- + +## Overview + +F5 NGINX as a Service for Azure (NGINXaaS) now supports in-place migration from Standard plan to the Standard V2 plan, we encourage you to upgrade your deployment to the Standard V2 plan as soon as possible. **The Standard plan is scheduled for retirement on May 1, 2025**. If you fail to migrate by May 1, 2025, your NGINXaaS deployment will stop receiving automatic updates that address critical security issues. + +The Standard V2 plan maintains the same price as the Standard plan for existing capabilities. Enabling new capabilities such as NGINX App Protect WAF or additional listen ports that were added as part of Standard V2 will incur additional charges. + +{{< note >}} We currently only support in-place migration from Standard plan to the Standard V2 plan. Please avoid updating your Basic plan deployments to Standard V2 plan using this guide. {{< /note >}} + +## Migration Steps + +### Using the Portal + +1. Go to the **Overview** page of the NGINXaaS deployment in the Azure portal. +2. Under **Essentials**, find the **Pricing Tier** and select **Click to Upgrade**. +3. Select the Standard V2 plan and select Submit. + +### Using Terraform + +1. Update the Terraform AzureRM provider to 4.6.0 or above. + +``` +terraform { + required_version = "~> 1.3" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 4.6.0" + } + } +} +``` + +2. Modify the SKU to `standardv2_Monthly` in the azurerm_nginx_deployment resource. +3. Run `terraform plan`. Look at the output of terraform plan to ensure that your NGINXaaS deployment is not being replaced. +4. Run `terraform apply` to upgrade the deployment. + +### Using Azure-cli + +Run the below command to update your NGINXaaS deployment. + +```bash + az nginx deployment update --name myDeployment --resource-group \ + myResourceGroup --sku name="standardv2_Monthly_gmz7xq9ge3py" +``` diff --git a/content/nginxaas-azure/troubleshooting/troubleshooting.md b/content/nginxaas-azure/troubleshooting/troubleshooting.md new file mode 100644 index 000000000..9f35a160b --- /dev/null +++ b/content/nginxaas-azure/troubleshooting/troubleshooting.md @@ -0,0 +1,54 @@ +--- +title: "Get help" +weight: 100 +categories: ["tasks"] +toc: true +docs: "DOCS-882" +url: /nginxaas/azure/troubleshooting/troubleshooting/ +--- + +## Contact NGINX support + +To contact support about F5 NGINX as a Service for Azure (NGINXaaS): + +1. Go to your NGINXaaS deployment. + +2. Select **New Support request** in the left menu. + +3. Select **Raise a Support ticket**. + + {{< img src="nginxaas-azure/raise-ticket.png" alt="Screenshot of the Azure portal showing the Raise support ticket button" >}} + +4. You will be redirected to **MyF5** to create a new case. Log in to MyF5 with your F5 account. + +{{< note >}}If you can't complete a deployment successfully, the "New support request" option won't be available on the left-hand navigation menu. To raise a support ticket, go to the [MyF5 portal](https://my.f5.com). {{< /note >}} + +5. Go to the **Case Management** section and select **Create new case**. + + {{< img src="nginxaas-azure/new-case.png" alt="Screenshot of the MyF5 portal showing the Create new case button" >}} + +6. Select **NGINXaaS** in the Product dropdown. + + {{< img src="nginxaas-azure/create-case.png" alt="MyF5 Case form" >}} + +7. Complete the request with the relevant information about your issue, bug report, or feedback. If you are contacting us to report an issue, please include the following information, available in the **Properties** section of your deployment, for the support team to begin their investigation: + +- Location +- Date and time of the issue +- Resource ID + +{{< img src="nginxaas-azure/properties.png" alt="Screenshot of the Azure portal showing the Properties section" >}} + +8. Complete the **Additional information** and **Contact details** sections of your case and select **Submit**. + +## Update support contact information + +To provide or update the preferred support contact email: + +1. Go to your NGINX as a Service (NGINXaaS) for Azure deployment. + +2. Select **New Support request** in the left menu. + +3. Select the `Edit` Button next to the **Support Contact Email** field. + +4. Provide a new email address in the field and select **Submit**. diff --git a/content/nim/deploy/kubernetes/deploy-using-helm.md b/content/nim/deploy/kubernetes/deploy-using-helm.md index 0bc12fdcd..2e17d59b1 100644 --- a/content/nim/deploy/kubernetes/deploy-using-helm.md +++ b/content/nim/deploy/kubernetes/deploy-using-helm.md @@ -132,27 +132,31 @@ The `values.yaml` file customizes the Helm chart installation without modifying - name: regcred apigw: image: - repository: private-registry.nginx.com/nms-apigw + repository: private-registry.nginx.com/nms/apigw tag: core: image: - repository: private-registry.nginx.com/nms-core + repository: private-registry.nginx.com/nms/core tag: dpm: image: - repository: private-registry.nginx.com/nms-dpm + repository: private-registry.nginx.com/nms/dpm tag: ingestion: image: - repository: private-registry.nginx.com/nms-ingestion + repository: private-registry.nginx.com/nms/ingestion tag: integrations: image: - repository: private-registry.nginx.com/nms-integrations + repository: private-registry.nginx.com/nms/integrations + tag: + secmon: + image: + repository: private-registry.nginx.com/nms/secmon tag: utility: image: - repository: private-registry.nginx.com/nms-utility + repository: private-registry.nginx.com/nms/utility tag: ``` @@ -212,6 +216,7 @@ By default, the following network policies will be created in the release namesp dpm app.kubernetes.io/name=dpm 4m47s ingestion app.kubernetes.io/name=ingestion 4m47s integrations app.kubernetes.io/name=integrations 4m47s + secmon app.kubernetes.io/name=secmon 4m47s utility app.kubernetes.io/name=integrations 4m47s ``` diff --git a/content/nim/nginx-app-protect/setup-waf-config-management.md b/content/nim/nginx-app-protect/setup-waf-config-management.md index 7a1e73f0f..60a975537 100644 --- a/content/nim/nginx-app-protect/setup-waf-config-management.md +++ b/content/nim/nginx-app-protect/setup-waf-config-management.md @@ -60,10 +60,12 @@ The following table shows the NGINX App Protect WAF Release version and its corr | NGINX App Protect WAF Release version | WAF Compiler | |---------------------------------------|----------------------------| +| NGINX App Protect WAF 5.5.0 | nms-nap-compiler-v5.264.0 | | NGINX App Protect WAF 5.4.0 | nms-nap-compiler-v5.210.0 | | NGINX App Protect WAF 5.3.0 | nms-nap-compiler-v5.144.0 | | NGINX App Protect WAF 5.2.0 | nms-nap-compiler-v5.48.0 | | NGINX App Protect WAF 5.1.0 | nms-nap-compiler-v5.17.0 | +| NGINX App Protect WAF 4.13.0 | nms-nap-compiler-v5.264.0 | | NGINX App Protect WAF 4.12.0 | nms-nap-compiler-v5.210.0 | | NGINX App Protect WAF 4.11.0 | nms-nap-compiler-v5.144.0 | | NGINX App Protect WAF 4.10.0 | nms-nap-compiler-v5.48.0 | @@ -100,7 +102,7 @@ The following table shows the NGINX App Protect WAF Release version and its corr Install the WAF compiler, then restart the `nms-integrations` service: ```bash -sudo apt-get install nms-nap-compiler-v5.210.0 +sudo apt-get install nms-nap-compiler-v5.264.0 ``` {{}} @@ -108,7 +110,7 @@ sudo apt-get install nms-nap-compiler-v5.210.0 - If you want to have more than one version of the `nms-nap-compiler` installed on your system at once, you'll need to append `-o Dpkg::Options::="--force-overwrite"` to the `nms-nap-compiler` installation commands after your initial `nms-nap-compiler` installation. For example, the installation command would look like this: ```bash -sudo apt-get install nms-nap-compiler-v5.210.0 -o Dpkg::Options::="--force-overwrite" +sudo apt-get install nms-nap-compiler-v5.264.0 -o Dpkg::Options::="--force-overwrite" ``` {{}} @@ -120,7 +122,7 @@ Download the file dependencies.repo to `/etc/yum.repos.d`, enable the `codeready ```bash sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo sudo subscription-manager repos --enable codeready-builder-for-rhel-8-x86_64-rpms -sudo yum install nms-nap-compiler-v5.210.0 +sudo yum install nms-nap-compiler-v5.264.0 ``` ### RHEL 7.4 or later; CentOS @@ -129,7 +131,7 @@ Download the file `dependencies.repo` to `/etc/yum.repos.d`, enable the RHEL 7 s ```bash sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo sudo yum-config-manager --enable rhui-REGION-rhel-server-optional rhui-REGION-rhel-server-releases rhel-7-server-optional-rpms -sudo yum install nms-nap-compiler-v5.210.0 +sudo yum install nms-nap-compiler-v5.264.0 ``` ### Amazon Linux 2 LTS @@ -141,7 +143,7 @@ sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/app-protect-7.re sudo amazon-linux-extras enable epel sudo yum clean metadata sudo yum install epel-release -sudo yum install nms-nap-compiler-v5.210.0 +sudo yum install nms-nap-compiler-v5.264.0 ``` ### Oracle Linux 7.4 or later @@ -150,7 +152,7 @@ Download the file `dependencies.repo` to `/etc/yum.repos.d`, enable the `ol8_cod ```bash sudo wget -P /etc/yum.repos.d https://cs.nginx.com/static/files/dependencies.repo sudo yum-config-manager --enable ol8_codeready_builder -sudo yum install nms-nap-compiler-v5.210.0 +sudo yum install nms-nap-compiler-v5.264.0 ``` ### Download from MyF5 @@ -1093,24 +1095,24 @@ Check if the WAF compiler has been installed and is working properly by viewing sudo /opt/nms-nap-compiler/app_protect-/bin/apcompile -h ``` -For example, to view the help description for WAF compiler 5.210.0, run the following command: +For example, to view the help description for WAF compiler 5.264.0, run the following command: ``` bash -sudo /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -h +sudo /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile -h ``` The output looks similar to the following example: ```text USAGE: - /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile + /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile Examples: - /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -p /path/to/policy.json -o mypolicy.tgz - /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -p policyA.json -g myglobal.json -o /path/to/policyA_bundle.tgz - /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -g myglobalsettings.json --global-state-outfile /path/to/myglobalstate.tgz - /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -b /path/to/policy_bundle.tgz --dump - /opt/nms-nap-compiler/app_protect-5.210.0/bin/apcompile -l logprofA.json -o /path/to/logprofA_bundle.tgz + /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile -p /path/to/policy.json -o mypolicy.tgz + /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile -p policyA.json -g myglobal.json -o /path/to/policyA_bundle.tgz + /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile -g myglobalsettings.json --global-state-outfile /path/to/myglobalstate.tgz + /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile -b /path/to/policy_bundle.tgz --dump + /opt/nms-nap-compiler/app_protect-5.264.0/bin/apcompile -l logprofA.json -o /path/to/logprofA_bundle.tgz ... ``` diff --git a/layouts/index.html b/layouts/index.html index 1057533b2..37486508e 100644 --- a/layouts/index.html +++ b/layouts/index.html @@ -60,7 +60,7 @@

NGINX Gateway Fabric

@@ -69,7 +69,7 @@

NGINX Open Source

diff --git a/layouts/shortcodes/golden-star.html b/layouts/shortcodes/golden-star.html new file mode 100644 index 000000000..4f7eecce9 --- /dev/null +++ b/layouts/shortcodes/golden-star.html @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/layouts/shortcodes/icon-warning.html b/layouts/shortcodes/icon-warning.html new file mode 100644 index 000000000..e37033d75 --- /dev/null +++ b/layouts/shortcodes/icon-warning.html @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/static/img/aws/aws-generic-create-image-menu.png b/static/img/aws/aws-generic-create-image-menu.png new file mode 100644 index 000000000..2f21a8c16 Binary files /dev/null and b/static/img/aws/aws-generic-create-image-menu.png differ diff --git a/static/img/aws/aws-generic-instance-details.png b/static/img/aws/aws-generic-instance-details.png new file mode 100644 index 000000000..f9e40bed8 Binary files /dev/null and b/static/img/aws/aws-generic-instance-details.png differ diff --git a/static/img/aws/aws-generic-instance-display-first.png b/static/img/aws/aws-generic-instance-display-first.png new file mode 100644 index 000000000..6d5e04252 Binary files /dev/null and b/static/img/aws/aws-generic-instance-display-first.png differ diff --git a/static/img/aws/aws-generic-instance-security-outbound.png b/static/img/aws/aws-generic-instance-security-outbound.png new file mode 100644 index 000000000..bf84a7020 Binary files /dev/null and b/static/img/aws/aws-generic-instance-security-outbound.png differ diff --git a/static/img/aws/aws-instance-launch-success.png b/static/img/aws/aws-instance-launch-success.png new file mode 100644 index 000000000..294621ab1 Binary files /dev/null and b/static/img/aws/aws-instance-launch-success.png differ diff --git a/static/img/aws/aws-instance-select-sg.png b/static/img/aws/aws-instance-select-sg.png new file mode 100644 index 000000000..859fbf4d4 Binary files /dev/null and b/static/img/aws/aws-instance-select-sg.png differ diff --git a/static/img/aws/aws-instance-summary.png b/static/img/aws/aws-instance-summary.png new file mode 100644 index 000000000..0bbf920b0 Binary files /dev/null and b/static/img/aws/aws-instance-summary.png differ diff --git a/static/img/aws/aws-nlb-instance-choose-ami.png b/static/img/aws/aws-nlb-instance-choose-ami.png new file mode 100644 index 000000000..775a36c0c Binary files /dev/null and b/static/img/aws/aws-nlb-instance-choose-ami.png differ diff --git a/static/img/aws/aws-nlb-instance-choose-type.png b/static/img/aws/aws-nlb-instance-choose-type.png new file mode 100644 index 000000000..c74c9c38a Binary files /dev/null and b/static/img/aws/aws-nlb-instance-choose-type.png differ diff --git a/static/img/aws/aws-nlb-instance-connect.png b/static/img/aws/aws-nlb-instance-connect.png new file mode 100644 index 000000000..af7d79cd0 Binary files /dev/null and b/static/img/aws/aws-nlb-instance-connect.png differ diff --git a/static/nginx/images/aws-generic-create-image-menu.png b/static/nginx/images/aws-generic-create-image-menu.png deleted file mode 100644 index 97bdb97fa..000000000 Binary files a/static/nginx/images/aws-generic-create-image-menu.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-create-image-popup.png b/static/nginx/images/aws-generic-create-image-popup.png deleted file mode 100644 index 3a5823f92..000000000 Binary files a/static/nginx/images/aws-generic-create-image-popup.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-details.png b/static/nginx/images/aws-generic-instance-details.png deleted file mode 100644 index 784cd3c81..000000000 Binary files a/static/nginx/images/aws-generic-instance-details.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-display-first.png b/static/nginx/images/aws-generic-instance-display-first.png deleted file mode 100644 index 1cbe08821..000000000 Binary files a/static/nginx/images/aws-generic-instance-display-first.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-launch.png b/static/nginx/images/aws-generic-instance-launch.png deleted file mode 100644 index 8b63f1fd1..000000000 Binary files a/static/nginx/images/aws-generic-instance-launch.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-review-launch.png b/static/nginx/images/aws-generic-instance-review-launch.png deleted file mode 100644 index f88086f85..000000000 Binary files a/static/nginx/images/aws-generic-instance-review-launch.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-security-group.png b/static/nginx/images/aws-generic-instance-security-group.png deleted file mode 100644 index 227626066..000000000 Binary files a/static/nginx/images/aws-generic-instance-security-group.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-security-inbound.png b/static/nginx/images/aws-generic-instance-security-inbound.png deleted file mode 100644 index 50c9889bd..000000000 Binary files a/static/nginx/images/aws-generic-instance-security-inbound.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-security-outbound.png b/static/nginx/images/aws-generic-instance-security-outbound.png deleted file mode 100644 index dac80f6e8..000000000 Binary files a/static/nginx/images/aws-generic-instance-security-outbound.png and /dev/null differ diff --git a/static/nginx/images/aws-generic-instance-select-one.png b/static/nginx/images/aws-generic-instance-select-one.png deleted file mode 100644 index b71e9bb28..000000000 Binary files a/static/nginx/images/aws-generic-instance-select-one.png and /dev/null differ diff --git a/static/nginx/images/aws-instance-add-tags-name.png b/static/nginx/images/aws-instance-add-tags-name.png deleted file mode 100644 index 414783a77..000000000 Binary files a/static/nginx/images/aws-instance-add-tags-name.png and /dev/null differ diff --git a/static/nginx/images/aws-nlb-instance-add-storage.png b/static/nginx/images/aws-nlb-instance-add-storage.png deleted file mode 100644 index 23a095258..000000000 Binary files a/static/nginx/images/aws-nlb-instance-add-storage.png and /dev/null differ diff --git a/static/nginx/images/aws-nlb-instance-choose-ami.png b/static/nginx/images/aws-nlb-instance-choose-ami.png deleted file mode 100644 index b630f6774..000000000 Binary files a/static/nginx/images/aws-nlb-instance-choose-ami.png and /dev/null differ diff --git a/static/nginx/images/aws-nlb-instance-choose-type.png b/static/nginx/images/aws-nlb-instance-choose-type.png deleted file mode 100644 index be139720f..000000000 Binary files a/static/nginx/images/aws-nlb-instance-choose-type.png and /dev/null differ diff --git a/static/nginxaas-azure/add-ca-as-protected-file.png b/static/nginxaas-azure/add-ca-as-protected-file.png new file mode 100644 index 000000000..442f63bdd Binary files /dev/null and b/static/nginxaas-azure/add-ca-as-protected-file.png differ diff --git a/static/nginxaas-azure/alert-logic.png b/static/nginxaas-azure/alert-logic.png new file mode 100644 index 000000000..7719571d8 Binary files /dev/null and b/static/nginxaas-azure/alert-logic.png differ diff --git a/static/nginxaas-azure/alert-select-signal.png b/static/nginxaas-azure/alert-select-signal.png new file mode 100644 index 000000000..c3c9fa5a9 Binary files /dev/null and b/static/nginxaas-azure/alert-select-signal.png differ diff --git a/static/nginxaas-azure/auth-basic-htpasswd.png b/static/nginxaas-azure/auth-basic-htpasswd.png new file mode 100644 index 000000000..b152fd04c Binary files /dev/null and b/static/nginxaas-azure/auth-basic-htpasswd.png differ diff --git a/static/nginxaas-azure/azure-metrics-nginxaas.certificates.png b/static/nginxaas-azure/azure-metrics-nginxaas.certificates.png new file mode 100644 index 000000000..f909392ab Binary files /dev/null and b/static/nginxaas-azure/azure-metrics-nginxaas.certificates.png differ diff --git a/static/nginxaas-azure/create-case.png b/static/nginxaas-azure/create-case.png new file mode 100644 index 000000000..9edafb5b6 Binary files /dev/null and b/static/nginxaas-azure/create-case.png differ diff --git a/static/nginxaas-azure/css/cost-calculator_v2.css b/static/nginxaas-azure/css/cost-calculator_v2.css new file mode 100644 index 000000000..62e5efa9a --- /dev/null +++ b/static/nginxaas-azure/css/cost-calculator_v2.css @@ -0,0 +1,236 @@ +#calculator .form-field { + margin: 8px 0 20px 0; +} +#calculator .form-field label { + display: block; + font-size: 14px; + font-weight: 300; + color: #000000; + + line-height: 20px; +} +#calculator .form-field label span.label-details, .subtitle { + font-size: 12px; + color: #6C778C; +} + +#calculator input, select { + width: 300px; + height: 24px; + + margin-top: 4px; + color: #000000; + +} + +#calculator input[type=radio], #calculator input[type=checkbox] { + width: 24px; +} + +#calculator .section { + margin: 0 auto 24px auto; + max-width: 900px; +} + +#calculator #totals-section { + padding: 20px 0; + +} +#calculator #totals-section .total-text { + font-weight: 700; + margin-right: 50px; +} + +#calculator details { + position: relative; +} + +#calculator details > summary { + list-style: none; + padding-left: 20px; + margin: 20px 2px; + text-decoration: underline; + color: #515357; +} +#calculator details > summary::before { + content: "\FF0B"; + border: 2px solid gray; + display: inline-flex; + justify-content: center; + align-items: center; + position: absolute; + top: 2px; + left: 0; + height: 11px; + width: 11px; + font-size: 10px; + font-weight: 600; + border-radius: 50%; + text-decoration: none; +} +#calculator details[open] > summary::before { + content: "\FF0D"; +} + +#calculator .details-content { + padding: 10px; + display: flex; + + width: 100%; +} + +#calculator .details-section { + padding: 10px; + background-color: rgba(217, 217, 217, 0.1); + margin-right: 10px; + flex: 1; +} +#calculator .details-section:first-child { + flex: 2; +} + +#calculator .bandwidth-input-container::after { + content: "Mbps"; + color: #6C778C; + font-size: 12px; + margin-left: -60px; +} +#calculator .avg-conn-duration-container::after { + content: "seconds"; + color: #6C778C; + font-size: 12px; + margin-left: -70px; +} + +#calculator #ncuEstimateValue { + /* display: flex; */ + /* align-items: center; */ + /* justify-content: center; */ + font-weight: 700; +} + +#calculator input:invalid + #ncuValidation::after { + content: "Must be a multiple of 10, with a minimum of 10"; + color: #aa0000; + padding-left: 5px; + font-size: 12px; +} + +#calculator .totals { + display: flex; + justify-content: left; +} +#calculator .totals > span { + margin-right: 20px; +} + + +#calculator table { + margin: 0 auto; + border-collapse: collapse; +} + +#calculator th { + padding-bottom: 16px; +} + +#calculator tr.selected { + background-color: #e9f3ea; +} + + +#calculator .math { + font-weight: 300; + font-size: 12px; + background-color: #f7f7f7; + + margin: 8px 0 4px 0; + padding: 8px; +} + +#calculator pre { + font-family: inherit; +} + +#calculator var { + font-family: inherit; + font-weight: bolder; +} + +#calculator .titleCol { + font-weight: 400; + vertical-align: bottom; +} + +#calculator dt { + margin: 24px 0 8px 0; +} +#calculator dd { + margin-bottom: 8px; +} + +#calculator h3 { + display: flex; + justify-content: space-between; +} + +#calculator h3 label { + font-size: 14px; + width: 40%; + text-align: right; +} +#calculator h3 label input { + width: 20px; + vertical-align: middle; +} + +/* added for iteration 3 */ + +#calculator .form-section { + border: 1px solid #B7B7B7; + border-radius: 4px; + margin-bottom: 20px; +} +#calculator .form-section h4 { + font-weight: 600; +} +#calculator .form-section-content { + padding: 20px 18px; +} +#calculator .form-section-content:last-child { + padding-bottom: 18px; + border-top: 1px solid #B7B7B7; +} + +@media print { + body { + visibility: hidden; + height: 0; + } + + footer { + display: none; + } + + #calculator { + visibility: visible; + position: absolute; + top: 20px; + + margin: 0 auto; + + width: 100%; + } + + #calculator h3#calculator-section-heading { + justify-content: left; + } + + #calculator h3#calculator-section-heading button { + visibility: hidden; + } + + #calculator-section-heading::before { + content: "NGINX as a Service for Azure\00a0" + } +} diff --git a/static/nginxaas-azure/deployment-complete.png b/static/nginxaas-azure/deployment-complete.png new file mode 100644 index 000000000..09feb295f Binary files /dev/null and b/static/nginxaas-azure/deployment-complete.png differ diff --git a/static/nginxaas-azure/diagnostic-settings.png b/static/nginxaas-azure/diagnostic-settings.png new file mode 100644 index 000000000..ece806c47 Binary files /dev/null and b/static/nginxaas-azure/diagnostic-settings.png differ diff --git a/static/nginxaas-azure/faq-ip-location-one.png b/static/nginxaas-azure/faq-ip-location-one.png new file mode 100644 index 000000000..30926df52 Binary files /dev/null and b/static/nginxaas-azure/faq-ip-location-one.png differ diff --git a/static/nginxaas-azure/faq-ip-location-two.png b/static/nginxaas-azure/faq-ip-location-two.png new file mode 100644 index 000000000..3222f51fc Binary files /dev/null and b/static/nginxaas-azure/faq-ip-location-two.png differ diff --git a/static/nginxaas-azure/js/cost-calculator_v2.js b/static/nginxaas-azure/js/cost-calculator_v2.js new file mode 100644 index 000000000..361a69e42 --- /dev/null +++ b/static/nginxaas-azure/js/cost-calculator_v2.js @@ -0,0 +1,488 @@ +// todo [heftel] - if we are going to live with this for a while then this file should be broken up with +// modules. Browser support shouldn't be a concern anymore? - https://caniuse.com/es6-module + +(function () { + /** + * @typedef {typeof costs} Costs + * @constant + * @default + */ + const costs = { + regionsTiers: { + eastus2: { label: "East US 2", tier: 1 }, + northeurope: { label: "North Europe", tier: 1 }, + westcentralus: { label: "West Central US", tier: 1 }, + westus2: { label: "West US 2", tier: 1 }, + westus3: { label: "West US 3", tier: 1 }, + canadacentral: { label: "Canada Central", tier: 2 }, + centralindia: { label: "Central India", tier: 2 }, + centralus: { label: "Central US", tier: 2 }, + eastus: { label: "East US", tier: 2 }, + germanywestcentral: { label: "Germany West Central", tier: 2 }, + koreacentral: { label: "Korea Central", tier: 2 }, + northcentralus: { label: "North Central US", tier: 2 }, + southeastasia: { label: "Southeast Asia", tier: 2 }, + swedencentral: { label: "Sweden Central", tier: 2 }, + westeurope: { label: "West Europe", tier: 2 }, + westus: { label: "West US", tier: 2 }, + australiaeast: { label: "Australia East", tier: 3 }, + brazilsouth: { label: "Brazil South", tier: 3 }, + japaneast: { label: "Japan East", tier: 3 }, + southindia: { label: "South India", tier: 3 }, + }, + // cost per NCU + tiersCosts: { + 1: 0.03, + 2: 0.04, + 3: 0.05, + }, + WAF: 0.015, + listenPorts: 0.01, + numFreeListenPorts: 5, + }; + + /** + * @typedef {typeof ncuParameterVals} NcuParameterVals + * @constant + * @default + */ + const ncuParameterVals = { + connsPerSecPerAcu: 2.64, + acusPerNcu: 20, + connsPerNcu: 400, + mbpsPerNcu: 60, + }; + + const utils = { + /** + * + * @param {Costs} costs + * @param {CostCalculatorValuesState} values + * @returns {number} total - The total estimated cost + */ + calculateCost: (costs, values) => { + const regionCost = + costs.tiersCosts[costs.regionsTiers[values.region].tier]; + + const total = + values.numHours * + (values.numNcus * (regionCost + (values.isWAF ? costs.WAF : 0)) + + (values.numListenPorts > costs.numFreeListenPorts + ? (values.numListenPorts - costs.numFreeListenPorts) * + costs.listenPorts + : 0)); + + return total; + }, + + /** + * NcuValues needed to show values and calculations + * @typedef {Object} NcuValues + * @property {number} avgConcurrentConnections - the average concurrent connections + * @property {number} exactNcusNeeded - the number of NCUs needed as an integer + * @property {number} bundledNcusNeeded - the number of NCUs in bundles of tens + */ + /** + * @param {NcuEstimateValuesState} ncuEstimateFormValues + * @return {NcuValues} + */ + calculateNcuValues: (ncuEstimateFormValues) => { + // new connections per second avg duration in seconds + const avgConcurrentConnections = + ncuEstimateFormValues.avgNewConnsPerSec * + ncuEstimateFormValues.avgConnDuration; + + // Include 0s as default values in case of unexpected NaN + const minNcus = Math.max( + ncuEstimateFormValues.avgNewConnsPerSec / + (ncuParameterVals.connsPerSecPerAcu * ncuParameterVals.acusPerNcu) || + 0, + avgConcurrentConnections / ncuParameterVals.connsPerNcu || 0, + ncuEstimateFormValues.totalBandwidth / ncuParameterVals.mbpsPerNcu || 0 + ); + + return { + avgConcurrentConnections, + min: minNcus, + total: Math.max(10, Math.ceil(minNcus / 10) * 10), + }; + }, + + // async so it could call out to an API + loadCosts: async () => { + return costs; + }, + + /** + * Formats numbers to USD currency string + * @param {number} n + * @param {number} significantDigits + * @returns {string} + */ + currencyFormatter: (n, significantDigits) => { + return new Intl.NumberFormat("en-US", { + style: "currency", + currency: "USD", + maximumSignificantDigits: significantDigits + }).format(n); + }, + }; + + //////// + // "state" objects that keep form values + //////// + + /** + * @typedef {Object} CostCalculatorValuesState + * @property {string} region + * @property {number} numNcus + * @property {number} numHours + */ + /** + * @type {CostCalculatorValuesState} + */ + const calculatorValuesState = { + region: "westus2", + numNcus: 20, + numHours: 730, + numListenPorts: 5, + isWAF: false, + }; + + /** + * @typedef {Object} NcuEstimateValuesState + * @property {number} avgNewConnsPerSec + * @property {number} avgConnDuration + * @property {number} totalBandwidth + */ + /** + * @type {NcuEstimateValuesState} + */ + const ncuEstimateValuesState = { + avgNewConnsPerSec: 10, + avgConnDuration: 10, + totalBandwidth: 500, + }; + + //////// + // Keep element refs with hugo global jquery + //////// + + /** + * @type {Object.} + */ + const costFormElements = { + region: $("#region"), + numNcus: $("#numNcus"), + numHours: $("#numHours"), + numListenPorts: $("#numListenPorts"), + isWAF: $("#isWAF"), + }; + + /** + * @type {Object.} + */ + const costFormLabelElements = { + numNcusEstVal: $("#numNcusEstVal"), + }; + + /** + * @type {Object.} + */ + const ncuFormElements = { + avgNewConnsPerSec: $("#avgNewConnsPerSec"), + avgConnDuration: $("#avgConnDuration"), + totalBandwidth: $("#totalBandwidth"), + }; + + /** + * @type {Object.} + */ + const ncuEstimateElements = { + ncuEstConnRate: $("#ncuEstConnRate"), + ncuEstConnDuration: $("#ncuEstConnDuration"), + ncuEstAvgConn: $("#ncuEstAvgConn"), + ncuEstAvgConn2: $("#ncuEstAvgConn2"), + ncuEstConnRate2: $("#ncuEstConnRate2"), + ncuEstDataRate: $("#ncuEstDataRate"), + ncuEstMin1: $("#ncuEstMin1"), + ncuEstMin: $("#ncuEstMin"), + ncuEstTotal: $("#ncuEstTotal"), + ncuEstConnsPerNcu: $("#ncuEstConnsPerNcu"), + ncuEstConnsPerSecondPerNcu: $("#ncuEstConnsPerSecondPerNcu"), + ncuEstMbpsPerNcu: $("#ncuEstMbpsPerNcu"), + }; + + /** + * @type {Object.} + */ + const totalCostDetailElements = { + ncus: $("#cost-detail-ncus"), + hours: $("#cost-detail-hours"), + tierCost: $("#cost-detail-tier-cost"), + listenPorts: $("#cost-detail-listen-ports"), + listenPortsCost: $("#cost-detail-listen-ports-cost"), + waf: $("#cost-detail-waf"), + total: $("#cost-detail-total"), + tiersCostsTable: $("#tiers-costs-table"), + }; + + /////// + // Setup change and click listeners + /////// + + /** + * + * @param {Costs} costs + * @param {CostCalculatorValuesState} values + * @param {NcuEstimateValuesState} ncuEstimateValues + */ + const setupChangeListeners = ( + costs, + values = calculatorValuesState, + ncuEstimateValues = ncuEstimateValuesState + ) => { + Object.keys(costFormElements).map((elName) => { + costFormElements[elName].on("change", (evt) => { + if (elName === "isWAF") { + values[elName] = evt.target.checked; + } else { + values[elName] = evt.target.value; + } + updateCost(costs); + }); + }); + + Object.keys(ncuFormElements).map((elName) => { + ncuFormElements[elName].on("change", (evt) => { + ncuEstimateValues[elName] = evt.target.value; + updateNcuEstimate(ncuEstimateValues); + }); + }); + + $("#printButton").click(() => { + printCostEstimate(); + }); + }; + + ////// + // Element and form value initialization functions + ////// + + /** + * + * @param {Costs["regionsTiers"]} regionsTiers + */ + const populateTierSelect = (regionsTiers) => { + const $selectTarget = $("#region"); + + Object.keys(regionsTiers).forEach((tierKey) => { + const option = document.createElement("option"); + option.setAttribute("value", tierKey); + option.innerText = `${regionsTiers[tierKey].label} (tier ${regionsTiers[tierKey].tier})`; + $selectTarget.append(option); + }); + }; + + /** + * @param {Costs["regionsTiers"]} regionsTiers + * @param {Costs["tiersCosts"]} tiersCosts + */ + const populateTierCostTable = (regionsTiers, tiersCosts) => { + const $tableTarget = totalCostDetailElements.tiersCostsTable; + + Object.keys(regionsTiers).forEach((tierKey) => { + const row = document.createElement("tr"); + const col1 = document.createElement("td"); + const col2 = document.createElement("td"); + const col3 = document.createElement("td"); + col1.innerText = `${regionsTiers[tierKey].label}`; + col2.innerText = `${regionsTiers[tierKey].tier}`; + col3.innerText = `${utils.currencyFormatter( + tiersCosts[regionsTiers[tierKey].tier] + )}`; + + row.appendChild(col1); + row.appendChild(col2); + row.appendChild(col3); + + $tableTarget.append(row); + }); + }; + + /** + * Sets DOM elements with initial values from cost form state + * + * @param {CostCalculatorValuesState} values + */ + const initializeValues = (values = calculatorValuesState) => { + Object.keys(costFormElements).map((elName) => { + const curEl = costFormElements[elName]; + if (curEl.is("input") || curEl.is("select")) { + curEl.val(values[elName]); + } else { + $(curEl).children("input").first().val(values[elName]); + } + }); + }; + + /** + * Sets DOM elements with initial values from NCU form state + * + * @param {NcuEstimateValuesState} values + */ + const initializeNcuEstimateValues = (values = ncuEstimateValuesState) => { + Object.keys(ncuFormElements).map((elName) => { + const curEl = ncuFormElements[elName]; + if (curEl.is("input") || curEl.is("select")) { + curEl.val(values[elName]); + } else { + $(curEl).children("input").first().val(values[elName]); + } + }); + + updateNcuEstimate(ncuEstimateValuesState); + + ncuEstimateElements.ncuEstConnsPerNcu.text(ncuParameterVals.connsPerNcu); + ncuEstimateElements.ncuEstConnsPerSecondPerNcu.text( + ( + ncuParameterVals.connsPerSecPerAcu * ncuParameterVals.acusPerNcu + ).toFixed(2) + ); + ncuEstimateElements.ncuEstMbpsPerNcu.text(ncuParameterVals.mbpsPerNcu); + }; + + ////// + // Update values functions + ////// + + /** + * Calculates new NCU usage estimate and updates DOM elements that show values + * + * @param {NcuEstimateValuesState} ncuValues + */ + const updateNcuEstimate = (ncuValues) => { + const updatedNcuValues = utils.calculateNcuValues(ncuValues); + + $("#ncuEstimateValue").text(`${updatedNcuValues.total} NCUs`); + + // update cost estimate form when estimated number of NCUs changes + if (calculatorValuesState.numNcus !== updatedNcuValues.total) { + costFormElements.numNcus.val(updatedNcuValues.total).trigger("change"); + } + + ncuEstimateElements.ncuEstConnRate.text(ncuValues.avgNewConnsPerSec); + ncuEstimateElements.ncuEstConnDuration.text(ncuValues.avgConnDuration); + ncuEstimateElements.ncuEstAvgConn.text( + updatedNcuValues.avgConcurrentConnections + ); + + ncuEstimateElements.ncuEstAvgConn2.text( + updatedNcuValues.avgConcurrentConnections + ); + ncuEstimateElements.ncuEstConnRate2.text(ncuValues.avgNewConnsPerSec); + ncuEstimateElements.ncuEstDataRate.text(ncuValues.totalBandwidth); + + ncuEstimateElements.ncuEstMin1.text((updatedNcuValues.min ?? 0).toFixed(2)); + ncuEstimateElements.ncuEstMin.text((updatedNcuValues.min ?? 0).toFixed(2)); + ncuEstimateElements.ncuEstTotal.text(updatedNcuValues.total); + + costFormLabelElements.numNcusEstVal.text(updatedNcuValues.total); + }; + + /** + * Calculates new estimated cost based on base costs and + * form values, and updates the DOM elements that show values + * + * @param {Costs} costs + * @param {CostCalculatorValuesState} values + */ + const updateCost = (costs, values = calculatorValuesState) => { + const updatedTotalCost = utils.calculateCost(costs, values); + + $("#total-value").text(utils.currencyFormatter(updatedTotalCost)); + updateTotalCostDetails(values, updatedTotalCost); + }; + + /** + * @param {CostCalculatorValuesState} formValues + * @param {number} totalCost + */ + const updateTotalCostDetails = (formValues, totalCost) => { + totalCostDetailElements.hours.text(formValues.numHours); + totalCostDetailElements.ncus.text(formValues.numNcus); + totalCostDetailElements.listenPorts.text( + Math.max(formValues.numListenPorts - 5, 0) + ); + totalCostDetailElements.listenPortsCost.text( + utils.currencyFormatter(costs.listenPorts) + ); + + if (formValues.isWAF) { + totalCostDetailElements.tierCost.text( + `(${utils.currencyFormatter( + costs.tiersCosts[costs.regionsTiers[formValues.region].tier] + )} region cost + ${utils.currencyFormatter(costs.WAF, 3)} WAF cost)` + ); + } else { + totalCostDetailElements.tierCost.text( + utils.currencyFormatter( + costs.tiersCosts[costs.regionsTiers[formValues.region].tier] + ) + ); + } + + totalCostDetailElements.total.text(utils.currencyFormatter(totalCost)); + + // update highlighted tier cost + const rowIndex = + Object.keys(costs.regionsTiers).indexOf(formValues.region) + 1; + + totalCostDetailElements.tiersCostsTable.find("tr")?.each((index, rowEl) => { + if (index === rowIndex) { + $(rowEl).addClass("selected"); + } else { + $(rowEl).removeClass("selected"); + } + }); + }; + + /** + * Opens collapsed sections and uses window.print to open the + * browser default print window + */ + function printCostEstimate() { + // expand the total price details if they aren't already + const totalDetails = $("#total-cost-details"); + const detailsOpen = totalDetails.attr("open"); + if (!detailsOpen) { + totalDetails.attr("open", "true"); + } + const ncuDetails = $("#ncu-usage-details"); + const ncuDetailsOpen = ncuDetails.attr("open"); + if (!ncuDetailsOpen) { + ncuDetails.attr("open", "true"); + } + + window.print(); + + // collapse the total price details if it was closed initially + if (!detailsOpen) { + totalDetails.attr("open", null); + } + if (!ncuDetailsOpen) { + ncuDetails.attr("open", null); + } + } + + const start = async () => { + const costs = await utils.loadCosts(); + setupChangeListeners(costs); + initializeValues(calculatorValuesState); + initializeNcuEstimateValues(ncuEstimateValuesState); + populateTierSelect(costs.regionsTiers); + populateTierCostTable(costs.regionsTiers, costs.tiersCosts); + updateCost(costs); + }; + start(); +})(); diff --git a/static/nginxaas-azure/log-analytics-security.png b/static/nginxaas-azure/log-analytics-security.png new file mode 100644 index 000000000..3a1b10ef2 Binary files /dev/null and b/static/nginxaas-azure/log-analytics-security.png differ diff --git a/static/nginxaas-azure/n4a-architecture.png b/static/nginxaas-azure/n4a-architecture.png new file mode 100644 index 000000000..a95570b74 Binary files /dev/null and b/static/nginxaas-azure/n4a-architecture.png differ diff --git a/static/nginxaas-azure/n4a-data-plane-architecture.svg b/static/nginxaas-azure/n4a-data-plane-architecture.svg new file mode 100644 index 000000000..59ce334e4 --- /dev/null +++ b/static/nginxaas-azure/n4a-data-plane-architecture.svg @@ -0,0 +1,2304 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/static/nginxaas-azure/new-case.png b/static/nginxaas-azure/new-case.png new file mode 100644 index 000000000..5f144b61f Binary files /dev/null and b/static/nginxaas-azure/new-case.png differ diff --git a/static/nginxaas-azure/properties.png b/static/nginxaas-azure/properties.png new file mode 100644 index 000000000..215d69fac Binary files /dev/null and b/static/nginxaas-azure/properties.png differ diff --git a/static/nginxaas-azure/raise-ticket.png b/static/nginxaas-azure/raise-ticket.png new file mode 100644 index 000000000..9aca0a435 Binary files /dev/null and b/static/nginxaas-azure/raise-ticket.png differ diff --git a/static/nginxaas-azure/security-diagnostic-setting.png b/static/nginxaas-azure/security-diagnostic-setting.png new file mode 100644 index 000000000..d9e3d7ce0 Binary files /dev/null and b/static/nginxaas-azure/security-diagnostic-setting.png differ diff --git a/static/nginxaas-azure/test-deployment.png b/static/nginxaas-azure/test-deployment.png new file mode 100644 index 000000000..b4f2082f7 Binary files /dev/null and b/static/nginxaas-azure/test-deployment.png differ diff --git a/static/nginxaas-azure/validation-error.png b/static/nginxaas-azure/validation-error.png new file mode 100644 index 000000000..bf94c269c Binary files /dev/null and b/static/nginxaas-azure/validation-error.png differ