diff --git a/.travis.yml b/.travis.yml index 6a8b3885b..26fd84e83 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,15 @@ # https://docs.travis-ci.com/user/languages/go/ language: go -go: ["1.10"] + +# Use ".x" to ask gimme to choose latest stable minor version of each Go +# release: https://github.com/travis-ci/gimme +go: + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + +env: + # Necessary for Go 1.11 and 1.12, so they'll respect the version constraints + # in go.mod. + - GO111MODULE=on diff --git a/LICENSE b/LICENSE index b397cbe1b..d64569567 100644 --- a/LICENSE +++ b/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2018 Google LLC + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 0aeea4ee4..9dcf87af6 100644 --- a/README.md +++ b/README.md @@ -34,10 +34,29 @@ own and can obtain certificates for. 1. Install Go version 1.10 or higher. Optionally, set [$GOPATH](https://github.com/golang/go/wiki/GOPATH) to something (default is `~/go`) and/or add `$GOPATH/bin` to `$PATH`. - 2. `go get -u -mod=vendor github.com/ampproject/amppackager/cmd/amppkg` - - Optionally, move the built `~/go/bin/amppkg` wherever you like. - 3. Create a file `amppkg.toml`. A minimal config looks like this: + 1. Get amppackager. + + Check your Go version by running `go version`. + + For Go 1.14 and higher versions run: + + ``` + go get -u github.com/ampproject/amppackager/cmd/amppkg + ``` + + For Go 1.13 and earlier versions run: + + ``` + go get -u -mod=vendor github.com/ampproject/amppackager/cmd/amppkg + ``` + + 1. Optionally, move the built `~/go/bin/amppkg` wherever you like. + 1. Prepare a temporary certificate and private key pair to use for signing the + exchange when testing your config. Follow WICG + [instructions](https://github.com/WICG/webpackage/tree/master/go/signedexchange#creating-our-first-signed-exchange) + to ensure compliance with the [WICG certificate + requirements](https://wicg.github.io/webpackage/draft-yasskin-httpbis-origin-signed-exchanges-impl.html#cross-origin-cert-req). + 1. Create a file `amppkg.toml`. A minimal config looks like this: ``` LocalOnly = true CertFile = 'path/to/fullchain.pem' @@ -49,7 +68,7 @@ own and can obtain certificates for. Domain = "amppackageexample.com" ``` More details can be found in [amppkg.example.toml](amppkg.example.toml). - 4. `amppkg -development` + 1. `amppkg -development` If `amppkg.toml` is not in the current working directory, pass `-config=/path/to/amppkg.toml`. @@ -61,12 +80,17 @@ container. #### Test your config - 1. Run Chrome with the following commandline flags: + 1. Run Chrome with the following command line flags: ``` - --user-data-dir=/tmp/udd - --ignore-certificate-errors-spki-list=$(openssl x509 -pubkey -noout -in path/to/fullchain.pem | openssl pkey -pubin -outform der | openssl dgst -sha256 -binary | base64) - --enable-features=SignedHTTPExchange - 'data:text/html,click me' + alias chrome = [FULL PATH TO CHROME BINARY] + PATH_TO_FULLCHAIN_PEM = [FULL PATH TO fullchain.pem] + chrome --user-data-dir=/tmp/udd\ + --ignore-certificate-errors-spki-list=$(\ + openssl x509 -pubkey -noout -in $PATH_TO_FULLCHAIN_PEM |\ + openssl pkey -pubin -outform der |\ + openssl dgst -sha256 -binary | base64)\ + --enable-features=SignedHTTPExchange\ + 'data:text/html,click me' ``` 2. Open DevTools. Check 'Preserve log'. 3. Click the `click me` link. @@ -148,13 +172,15 @@ You may also want to: before publication, or with a regular audit of a sample of documents. The [transforms](transformer/) are designed to work on valid AMP pages, and may break invalid AMP in small ways. + 4. Setup + [monitoring](#monitoring-amppackager-in-production-via-its-prometheus-endpoints) + of `amppackager` and related requests to AMP document server. Once you've done the above, you should be able to test by launching Chrome -without any comamndline flags; just make sure -chrome://flags/#enable-signed-http-exchange is enabled. To test by visiting the -packager URL directly, first add a Chrome extension to send an -`AMP-Cache-Transform: any` request header. Otherwise, follow the above -"Demonstrate privacy-preserving prefetch" instructions. +without any command line flags. To test by visiting the packager URL directly, +first add a Chrome extension to send an `AMP-Cache-Transform: any` request +header. Otherwise, follow the above "Demonstrate privacy-preserving prefetch" +instructions. ##### Security Considerations @@ -175,8 +201,9 @@ that: It is possible to test an otherwise fully production configuration without obtaining a certificate with the `CanSignHttpExchanges` extension. `amppkg` -still needs to perform OCSP verification, so the Issuer CA must be valid (i.e. no -self-signed certificates). e.g. You can use a certificate from [Let's Encrypt](https://letsencrypt.org/). +still needs to perform OCSP verification, so the Issuer CA must be valid (i.e. +no self-signed certificates). e.g. You can use a certificate from [Let's +Encrypt](https://letsencrypt.org/). Running `amppkg` with the `-invalidcert` flag will skip the check for `CanSignHttpExchanges`. This flag is not necessary when using the @@ -204,8 +231,9 @@ eligible for use in the AMP viewer in other browsers. ### Limitations -Currently, the packager will refuse to sign any AMP documents larger than 4 MB. -Patches that allow for streamed signing are welcome. +Currently, the packager will refuse to sign any AMP documents that hit the size +limit of 4MB. You can [monitor](monitoring.md#available-metrics) the size of +your documents that have been signed, to see how close you are to the limit. The packager refuses to sign any URL that results in a redirect. This is by design, as neither the original URL nor the final URL makes sense as the signed @@ -225,6 +253,65 @@ amp-install-serviceworker will still succeed in the unsigned AMP viewer case, and crawlers may reuse the contents of the signed exchange when displaying an AMP viewer to browser versions that don't support SXG. +#### `` + +If you have any inline ``s (those with a `script` attribute), then +the expiration of the SXG will be set based on the minimum `max-age` of those +``s, minus one day (due to +[backdating](https://github.com/ampproject/amppackager/issues/397)). If +possible, prefer external ``s (those with a `src` attribute), which +do not have this limitation. + +If inline is necessary, you will need to weigh the [security +risks](https://wicg.github.io/webpackage/draft-yasskin-http-origin-signed-responses.html#seccons-downgrades) +against the [AMP Cache requirement](docs/cache_requirements.md) for a minimum +`max-age` of `345600` (4 days). For SXGs shorter than that, the Google AMP Cache +will treat them as if unsigned (by showing an AMP Viewer). + +#### How does `amppackager` process a document it cannot sign? + +Packager will respond to every request with either a signed document, an +unsigned document, or an error. + +It will sign every document it can. It may, however, decide not to, +for a number of reasons: the certificate may be invalid, the page may not be a +valid AMP page, the page may not be an AMP page at all, the page may be 4MB or +larger, etc. + +If packager cannot sign the document but can fetch it, it will proxy the +document unsigned. + +If there was a problem with the gateway fetch request, or with the original +request, packager will respond with an HTTP error, and log the problem to +stdout. + +You can monitor the packager's error rates, as well as the rates of signed +vs unsigned documents, via the tools discussed in the next section. + +Specifically, you can monitor the requests that resulted in a signed or an +unsigned document via `documents_signed_vs_unsigned` metric, and the ones that +resulted in an error - via `total_requests_by_code_and_url` metric. + +#### Monitoring `amppackager` in production via its Prometheus endpoints + +Once you've run the `amppackager` server in production, you may want to +[monitor](monitoring.md) its health and performance. You may also monitor the +performance of the underlying requests to the AMP document server. You can +monitor both servers via the [Prometheus](https://prometheus.io/) endpoints +provided by `amppackager`. A few examples of questions you can answer: + +* Is `amppackager` up and running? +* How many requests has it processed since it's been up? +* What was the 0.9 percentile latency of handling those request? +* How many of those requests have triggered a gateway request to the + AMP document server? +* For those gateway requests, what was the 0.9 percentile latency of + the AMP document server? + +You can perform one-off manual health inspections, visualize the real-time +stats, set up alerts, and more. To learn what are all the things you can +monitor, and how to do it, check the [monitoring manual](monitoring.md). + ## Local Transformer The local transformer is a library within the AMP Packager that transforms AMP diff --git a/amppkg.example.toml b/amppkg.example.toml index 2431bcc3a..3277038cb 100644 --- a/amppkg.example.toml +++ b/amppkg.example.toml @@ -255,6 +255,7 @@ ForwardedRequestHeaders = [] # For the DNS challenge, go-acme/lego, there are certain environment variables that need to be set up which depends on # the DNS provider that you use to fulfill the DNS challenge. See: # https://go-acme.github.io/lego/dns/ + # To use this, build amppkg with `go build -tags dns01`; it is disabled by default because it bloats the binary. # DnsProvider = "gcloud" # This config will be used if 'autorenewcert' is turned on and 'development' is turned on. diff --git a/cmd/amppkg/main.go b/cmd/amppkg/main.go index d5e8a5bb9..0685b1e38 100644 --- a/cmd/amppkg/main.go +++ b/cmd/amppkg/main.go @@ -28,6 +28,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/ampproject/amppackager/packager/certcache" "github.com/ampproject/amppackager/packager/certloader" @@ -42,6 +43,7 @@ import ( var flagConfig = flag.String("config", "amppkg.toml", "Path to the config toml file.") var flagDevelopment = flag.Bool("development", false, "True if this is a development server.") var flagInvalidCert = flag.Bool("invalidcert", false, "True if invalid certificate intentionally used in production.") +var flagStaging = flag.String("staging", "", "URL that overrides the base URL used to host certs, used for testing. Can only be used with -development flag.") // IMPORTANT: do not turn on this flag for now, it's still under development. var flagAutoRenewCert = flag.Bool("autorenewcert", false, "True if amppackager is to attempt cert auto-renewal.") @@ -104,7 +106,7 @@ func main() { } else { die(errors.Wrap(err, "initializing cert cache")) } - } + } healthz, err := healthz.New(certCache) if err != nil { @@ -119,15 +121,21 @@ func main() { defer rtvCache.StopCron() var overrideBaseURL *url.URL - if *flagDevelopment { + if *flagStaging != "" { + overrideBaseURL, err = url.Parse(*flagStaging) + if err != nil { + die(errors.Wrap(err, "parsing staging URL")) + } + } else if *flagDevelopment { overrideBaseURL, err = url.Parse(fmt.Sprintf("https://localhost:%d/", config.Port)) if err != nil { die(errors.Wrap(err, "parsing development base URL")) } } + signerRequireHeaders := !*flagDevelopment signer, err := signer.New(certCache, key, config.URLSet, rtvCache, certCache.IsHealthy, - overrideBaseURL, /*requireHeaders=*/!*flagDevelopment, config.ForwardedRequestHeaders) + overrideBaseURL, signerRequireHeaders, config.ForwardedRequestHeaders, time.Now) if err != nil { die(errors.Wrap(err, "building signer")) } @@ -143,7 +151,7 @@ func main() { Addr: addr, // Don't use DefaultServeMux, per // https://blog.cloudflare.com/exposing-go-on-the-internet/. - Handler: logIntercept{mux.New(certCache, signer, validityMap, healthz)}, + Handler: logIntercept{mux.New(certCache, signer, validityMap, healthz, promhttp.Handler())}, ReadTimeout: 10 * time.Second, ReadHeaderTimeout: 5 * time.Second, // If needing to stream the response, disable WriteTimeout and diff --git a/cmd/gateway_server/server.go b/cmd/gateway_server/server.go index 7d1340ed2..20c20a687 100644 --- a/cmd/gateway_server/server.go +++ b/cmd/gateway_server/server.go @@ -74,7 +74,7 @@ func (s *gatewayServer) GenerateSXG(ctx context.Context, request *pb.SXGRequest) } // Note: do not initialize certCache, we just want it to hold the certs for now. - certCache := certcache.New(certs, nil, []string{""}, "", "", "", nil); + certCache := certcache.New(certs, nil, []string{""}, "", "", "", nil, time.Now) privateKey, err := util.ParsePrivateKey(request.PrivateKey) if err != nil { @@ -116,7 +116,7 @@ func (s *gatewayServer) GenerateSXG(ctx context.Context, request *pb.SXGRequest) }, } - packager, err := signer.New(certCache, privateKey, urlSets, s.rtvCache, shouldPackage, signUrl, false, []string{}) + packager, err := signer.New(certCache, privateKey, urlSets, s.rtvCache, shouldPackage, signUrl, false, []string{}, time.Now) if err != nil { return errorToSXGResponse(err), nil diff --git a/deploy/gcloud/Dockerfile.consumer b/deploy/gcloud/Dockerfile.consumer new file mode 100644 index 000000000..4f056cda2 --- /dev/null +++ b/deploy/gcloud/Dockerfile.consumer @@ -0,0 +1,78 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Used https://medium.com/@chemidy/create-the-smallest-and-secured-golang-docker-image-based-on-scratch-4752223b7324 +# as a guide. + +# Use an official Go runtime as a parent image +FROM golang:1.13 as builder + +ENV GO111MODULE=on + +# Install git. +# Git is required for fetching the dependencies. +RUN apt-get update \ + && apt-get install -y git + +WORKDIR /data + +# Run this if you clone from master branch. +RUN git clone -b master https://github.com/ampproject/amppackager.git /data/amppackager +# RUN git clone https://github.com/ampproject/amppackager.git /data/amppackager + +WORKDIR /data/amppackager/cmd/amppkg + +# Build the binary. +# See: https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b +# https://github.com/kelseyhightower/contributors +# Avoid "x509: failed to load system roots and no roots provided" by bundling root certificates. +# Avoid dynamic linking by using the pure Go net package (-tags netgo) +# Avoid dynamic linking by disabling cgo (CGO_ENABLED=0) +# Reduce binary size by omitting dwarf information (-ldflags '-w') +RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' -o /go/bin/amppkg + +FROM alpine:latest as certs + +RUN apk --update add ca-certificates + +# Build a small executable from docker alpine. Docker alpine is needed because +# bash is required to be present by the deployer as it prints the env variables +# listed below for verifiying the docker image. +FROM alpine:latest + +# Copy the certs from certs image. +ENV PATH=/bin +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +# Copy the AMP packager binary into our app dir inside the container. +COPY --from=builder /go/bin/amppkg . + +# Make port 8080 available to the world outside this container. This +# port must match the AMP Packager port configured in the toml file. +EXPOSE 8080 + +ENV PATH=$PATH:. + +# This env var is used by click-to-deploy deployer, right now needs to be +# manually synced with C2D_RELEASE in the other Docker images for AMP Packager +# Deployer currently in https://github.com/banaag/click-to-deploy but will move +# to https://github.com/GoogleCloudPlatform/click-to-deploy. +ENV C2D_RELEASE=0.0.1 + +# Start the AMP Packager +ENTRYPOINT ["amppkg"] + +# Set default flags to run in development mode. +CMD ["-config=/consumer/amppkg_consumer.toml"] + diff --git a/deploy/gcloud/Dockerfile.init b/deploy/gcloud/Dockerfile.init new file mode 100644 index 000000000..23c4cd21b --- /dev/null +++ b/deploy/gcloud/Dockerfile.init @@ -0,0 +1,40 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM alpine:latest as certs + +RUN apk --update add ca-certificates + +FROM debian:buster-slim as builder + +# Git is required for fetching the dependencies. +RUN apt-get update \ + && apt-get install -y openssl \ + && apt-get install -y git + +WORKDIR /data + +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +COPY deploy.sh ./deploy.sh + +COPY san_template.cnf ./san_template.cnf + +COPY *.toml ./ + +# This env var is used by click-to-deploy deployer, right now needs to be +# manually synced with C2D_RELEASE in the other Docker images for AMP Packager +ENV C2D_RELEASE=0.0.1 + +CMD ["/bin/bash", "-c", "./deploy.sh"]] diff --git a/deploy/gcloud/Dockerfile.renewer b/deploy/gcloud/Dockerfile.renewer new file mode 100644 index 000000000..27c9bd6fa --- /dev/null +++ b/deploy/gcloud/Dockerfile.renewer @@ -0,0 +1,79 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Used https://medium.com/@chemidy/create-the-smallest-and-secured-golang-docker-image-based-on-scratch-4752223b7324 +# as a guide. + +# Use an official Go runtime as a parent image +FROM golang:1.13 as builder + +ENV GO111MODULE=on + +# Install git. +# Git is required for fetching the dependencies. +RUN apt-get update \ + && apt-get install -y git + +WORKDIR /data + +# Run this if you clone from master branch. +RUN git clone -b master https://github.com/ampproject/amppackager.git /data/amppackager +# RUN git clone https://github.com/ampproject/amppackager.git /data/amppackager + +WORKDIR /data/amppackager/cmd/amppkg + +# Build the binary. +# See: https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b +# https://github.com/kelseyhightower/contributors +# Avoid "x509: failed to load system roots and no roots provided" by bundling root certificates. +# Avoid dynamic linking by using the pure Go net package (-tags netgo) +# Avoid dynamic linking by disabling cgo (CGO_ENABLED=0) +# Reduce binary size by omitting dwarf information (-ldflags '-w') +RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' -o /go/bin/amppkg + +FROM alpine:latest as certs + +RUN apk --update add ca-certificates + +# Build a small executable from docker alpine. Docker alpine is needed because +# bash is required to be present by the deployer as it prints the env variables +# listed below for verifiying the docker image +FROM alpine:latest + +ENV PATH=/bin + +# Copy the certs from the certs image. +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +# Copy the AMP packager binary into our app dir inside the container. +COPY --from=builder /go/bin/amppkg . + +# Make port 8080 available to the world outside this container. This +# port must match the AMP Packager port configured in the toml file. +EXPOSE 8080 + +ENV PATH=$PATH:. + +# This env var is used by click-to-deploy deployer, right now needs to be +# manually synced with C2D_RELEASE in the other Docker images for AMP Packager +# Deployer currently in https://github.com/banaag/click-to-deploy but will move +# to https://github.com/GoogleCloudPlatform/click-to-deploy. +ENV C2D_RELEASE=0.0.1 + +# Start the AMP Packager +ENTRYPOINT ["amppkg"] + +# Set default flags to run in autorenew cert mode. +CMD ["-autorenewcert", "-config=/renewer/amppkg_renewer.toml"] + diff --git a/deploy/gcloud/README.md b/deploy/gcloud/README.md new file mode 100644 index 000000000..04bb1082f --- /dev/null +++ b/deploy/gcloud/README.md @@ -0,0 +1,127 @@ +# AMP Packager Google Cloud Deployment + +This page shows you how to deploy a [Kubernetes](https://kubernetes.io/) cluster of + [AMP Packager](https://github.com/ampproject/amppackager#amp-packager) in a Google Cloud environment: + + 1. Setup your production or development environment in preparation for the cloud deployment. + 2. Modify the setup.sh script to enter all information relevant to your cloud + deployment. + 3. Run the gcloud_up.sh to initiate the deployment. + 4. Run the gcloud_down.sh to tear down the deployment, if desired. + +## Before you begin + +These instructions are mostly lifted from the [Google Kubernetes Engine tutorial](https://cloud.google.com/kubernetes-engine/docs/tutorials/hello-app). You may want to consult that page for further reference. The following items are required before you can begin your deployment: + + 1. Visit the [Kubernetes Engine page](https://console.cloud.google.com/kubernetes/list) in the Google Cloud Console. + + 2. Create or select an existing [Google Cloud Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project) + + 3. Wait for the API and related services to be enabled. This can take several minutes. + + 4. Make sure that billing is enabled for your Google Cloud project. [Learn how to confirm billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project). + + 5. Install the [Google Cloud SDK](https://cloud.google.com/sdk/docs/quickstarts), which includes the gcloud command-line tool. + + 6. Using the gcloud command line tool, install the Kubernetes command-line tool. kubectl is used to communicate with Kubernetes, which is the cluster orchestration system of GKE clusters: + + gcloud components install kubectl + + 7. Install [Docker Community Edition (CE)](https://docs.docker.com/install/) on your workstation. You will use this to build a container image for the application. + + 8. Install the [Git source control](https://git-scm.com/downloads) tool to fetch the sample application from GitHub. + +## Enter your project setup information into setup.sh + +The following information is required to be entered into setup.sh: + + 1. PROJECT_ID. This is your Google Cloud Project ID where you want your cluster to reside. Note that if your project ID is scoped by a domain, you need to replace the ':' with a '/' in the project id name. See: [Google Cloud domain scoped projects](https://cloud.google.com/container-registry/docs/overview#domain-scoped_projects). + + 2. COMPUTE_ENGINE_ZONE. Select a region where you want your app's computing resources located. See: [Compute Zone Locations](https://console.cloud.google.com/compute/zones) + + 3. AMP_PACKAGER_DOMAIN. The domain you want to use for the [Certificate Signing Request](https://www.digicert.com/ecc-csr-creation-ssl-installation-apache.htm). + + 4. AMP_PACKAGER_COUNTRY. The country you want to use for the [Certificate Signing Request](https://www.digicert.com/ecc-csr-creation-ssl-installation-apache.htm). + + 5. AMP_PACKAGER_STATE. The state you want to use for the [Certificate Signing Request](https://www.digicert.com/ecc-csr-creation-ssl-installation-apache.htm). + + 6. AMP_PACKAGER_LOCALITY. The locality you want to use for the [Certificate Signing Request](https://www.digicert.com/ecc-csr-creation-ssl-installation-apache.htm). + + 7. AMP_PACKAGER_ORGANIZATION. The organization you want to use for the [Certificate Signing Request](https://www.digicert.com/ecc-csr-creation-ssl-installation-apache.htm). + + 8. ACME_EMAIL_ADDRESS. The email address you used for your [Digicert ACME Account](https://docs.digicert.com/manage-certificates/certificate-profile-options/get-your-signed-http-exchange-certificate/). + + 9. ACME_DIRECTORY_URL. The [ACME API Directory URL](https://docs.digicert.com/certificate-tools/acme-user-guide/acme-directory-urls-signed-http-exchange-certificates/). Note that this URL is security sensitive so do not check in any files that contain this into Github. + +The following information can be customized in setup.sh, but the default also works fine: + + 1. AMP_PACKAGER_VERSION_TAG. The version tag attached to your docker image when it's built and uploaded into the [Container Registry](https://cloud.google.com/container-registry). + + 2. AMP_PACKAGER_NUM_REPLICAS. Number of AMP Packager replicas to run. Default is 2, you can scale up to 8 instances. + + 3. AMP_PACKAGER_CERT_FILENAME. The name of the amppackager certificate. + + 4. AMP_PACKAGER_CSR_FILENAME. The name of the amppackager [Certificate Signing Request](https://www.digicert.com/ecc-csr-creation-ssl-installation-apache.htm). + + 5. AMP_PACKAGER_PRIV_KEY_FILENAME. The name of the amppackager private key (see this [article])(https://www.ericlight.com/using-ecdsa-certificates-with-lets-encrypt). + + Note that the cert, CSR and private key all go together. You may choose to + manually create these and [request the certificate from Digicert by email](https://docs.digicert.com/manage-certificates/certificate-profile-options/get-your-signed-http-exchange-certificate/). If you chose to go this route, you may simply copy these files into your + amppackager/deploy/gcloud/generated directory and they will not be auto-generated and + fulfilled by amppackager using ACME. By default, these files will be + automatically created given the information you supplied in setup.sh, and the + certificate will be requested using ACME automatically. + +## Run the deployment script (gcloud_up.sh) + + 1. Go to the directory where you installed amppackager. + 2. cd deploy/gcloud + 3. ./gcloud_up.sh. The script may pause and prompt you for a response at certain points before it + continues. + 4. Wait for script to finish. + +## Make sure the deployment is up and ready + + 1. Check that all the components of the deployment are up in the Kubernetes + page. If everything is alright, everything should have a green check mark + in the console. Check everything under Cluster, Workloads and "Service & + Ingress". + + Clusters will have "amppackager-cluster". + Workloads will have "amppackager-cert-renewer-pd", + "amppackager-nfs-server", and user-specified number of replicas of + "amppackager-cert-consumer-deployment". + Service & Ingress will have "amppackager-nfs-service and + amppackager-service". + + 2. Issue curl command to check on the health of the amppackager. It should + return "ok". The amppackager service IP address could be using "kubectl get + service, it will be under EXTERNAL-IP column. + + curl http://AMP_PACKAGER_SERVICE_IP_ADDRESS:AMP_PACKAGER_SERVICE_PORT/healthz + + 3. Issue curl command to test if you can download the sample signed exchange + in your domain ($AMP_PACKAGER_DOMAIN in setup.sh). + + curl -H 'Accept: application/signed-exchange;v=b3' -H 'AMP-Cache-Transform: google;v="1..2"' -i http://AMP_PACKAGER_SERVICE_IP_ADDRESS:AMP_PACKAGER_SERVICE_PORT/priv/doc/https://$AMP_PACKAGER_DOMAIN/ + + 4. After you finish testing, lock down the amppackager service so that it's + only visible to your frontend server. You do this by modifying the + following section of amppackage_service.yaml: + + loadBalancerSourceRanges: + - YOUR_FRONTEND_SERVER_IP_ADDRESS_HERE in CIDR format. CIDR is explained + in the comments before this section in the yaml file. + + 5. After you make the modifications in step 4, issue the following command to + apply changes to you cluster: + + kubectl apply -f amppackager_service.yaml + +## Optionally, run the teardown script (gcloud_down.sh) + + 1. Go to the directory where you installed amppackager. + 2. cd deploy/gcloud + 3. ./gcloud_down.sh + 4. Wait for script to finish. + diff --git a/deploy/gcloud/amppackager_cert_consumer_template.yaml b/deploy/gcloud/amppackager_cert_consumer_template.yaml new file mode 100644 index 000000000..a49782017 --- /dev/null +++ b/deploy/gcloud/amppackager_cert_consumer_template.yaml @@ -0,0 +1,37 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: amppackager-cert-consumer-deployment + labels: + app: amppackager-consumer +spec: + replicas: $(AMP_PACKAGER_NUM_REPLICAS) + selector: + matchLabels: + app: amppackager-consumer + template: + metadata: + labels: + app: amppackager-consumer + spec: + containers: + - image: gcr.io/$(PROJECT_ID)/amppackager:$(AMP_PACKAGER_VERSION_TAG) + imagePullPolicy: IfNotPresent + name: amppackager-cert-consumer-pd + ports: + - containerPort: 8080 + protocol: TCP + imagePullPolicy: Always + volumeMounts: + # name should match from volumes section + - name: nfs-volume-renewer + mountPath: "/renewer" + - name: nfs-volume-consumer + mountPath: "/consumer" + volumes: + - name: nfs-volume-renewer + persistentVolumeClaim: + claimName: nfs-renewer-pvc + - name: nfs-volume-consumer + persistentVolumeClaim: + claimName: nfs-consumer-pvc diff --git a/deploy/gcloud/amppackager_cert_renewer_template.yaml b/deploy/gcloud/amppackager_cert_renewer_template.yaml new file mode 100644 index 000000000..39726bb37 --- /dev/null +++ b/deploy/gcloud/amppackager_cert_renewer_template.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: amppackager-cert-renewer-pd +spec: + replicas: 1 + selector: + matchLabels: + app: amppackager-cert-renewer-pd + template: + metadata: + name: amppackager-cert-renewer-pd + labels: + app: amppackager-cert-renewer-pd + spec: + containers: + - image: gcr.io/$(PROJECT_ID)/amppackager_renewer:$(AMP_PACKAGER_VERSION_TAG) + name: amppackager-cert-renewer-container + imagePullPolicy: Always + name: amppackager + volumeMounts: + # name should match from volumes section + - name: nfs-volume-renewer + mountPath: "/renewer" + - name: nfs-volume-consumer + mountPath: "/consumer" + volumes: + - name: nfs-volume-renewer + persistentVolumeClaim: + claimName: nfs-renewer-pvc + - name: nfs-volume-consumer + persistentVolumeClaim: + claimName: nfs-consumer-pvc + diff --git a/deploy/gcloud/amppackager_service.yaml b/deploy/gcloud/amppackager_service.yaml new file mode 100644 index 000000000..a8b5d2947 --- /dev/null +++ b/deploy/gcloud/amppackager_service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: amppackager-service +spec: + type: LoadBalancer + selector: + app: amppackager-consumer + ports: + - protocol: TCP + port: 60000 + targetPort: 8080 + # Uncomment the next 2 lines if you want to limit the ip addresses that can access this + # service. Replace the IP address with the IP you desire to have access to the load balancer. + # See: + # https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + # https://mxtoolbox.com/subnetcalculator.aspx + # https://www.digitalocean.com/community/tutorials/understanding-ip-addresses-subnets-and-cidr-notation-for-networking + + # loadBalancerSourceRanges: + # - 35.212.176.108/32 diff --git a/deploy/gcloud/amppkg_consumer_template.toml b/deploy/gcloud/amppkg_consumer_template.toml new file mode 100644 index 000000000..c20196c8e --- /dev/null +++ b/deploy/gcloud/amppkg_consumer_template.toml @@ -0,0 +1,9 @@ +CertFile = '/consumer/$(AMP_PACKAGER_CERT_FILENAME)' +NewCertFile = '/consumer/new.cert' +KeyFile = '/consumer/$(AMP_PACKAGER_PRIV_KEY_FILENAME)' +OCSPCache = '/consumer/amppkg-ocsp' + +[[URLSet]] + [URLSet.Sign] + Domain = "$(AMP_PACKAGER_DOMAIN)" + diff --git a/deploy/gcloud/amppkg_renewer_template.toml b/deploy/gcloud/amppkg_renewer_template.toml new file mode 100644 index 000000000..97079581b --- /dev/null +++ b/deploy/gcloud/amppkg_renewer_template.toml @@ -0,0 +1,19 @@ +CertFile = '/renewer/$(AMP_PACKAGER_CERT_FILENAME)' +NewCertFile = '/renewer/new.cert' +CSRFile = '/renewer/$(AMP_PACKAGER_CSR_FILENAME)' +KeyFile = '/renewer/$(AMP_PACKAGER_PRIV_KEY_FILENAME)' +OCSPCache = '/renewer/amppkg-ocsp' + +[[URLSet]] + [URLSet.Sign] + Domain = "$(AMP_PACKAGER_DOMAIN)" + +[ACMEConfig] + [ACMEConfig.Production] + DiscoURL = "$(ACME_DIRECTORY_URL)" + EmailAddress = "$(ACME_EMAIL_ADDRESS)" + HttpChallengePort = 5002 + HttpWebRootDir = '/renewer/www' + TlsChallengePort = 5003 + # Uncomment if you need wildcard domains in your certs + # DnsProvider = "gcloud" diff --git a/deploy/gcloud/build.sh b/deploy/gcloud/build.sh new file mode 100755 index 000000000..8771f2a6b --- /dev/null +++ b/deploy/gcloud/build.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Build docker images for the AMP packager renewer, consumer and init. +# Renewer and consumer are the same binaries, passed different command line +# arguments. These images can be used for testing the AMP Packager deployer and +# may also be installed in the gcloud marketplace. +export PROJECT_ID="YOUR_GCLOUD_PROJECT_ID" +export AMP_PACKAGER_VERSION_TAG="0.0" + +docker build -f Dockerfile.consumer -t gcr.io/${PROJECT_ID}/amppackager:${AMP_PACKAGER_VERSION_TAG} . +docker build -f Dockerfile.renewer -t gcr.io/${PROJECT_ID}/amppackager_renewer:${AMP_PACKAGER_VERSION_TAG} . +docker build -f Dockerfile.init -t gcr.io/${PROJECT_ID}/amppackager_init:${AMP_PACKAGER_VERSION_TAG} . + +docker push gcr.io/${PROJECT_ID}/amppackager:${AMP_PACKAGER_VERSION_TAG} +docker push gcr.io/${PROJECT_ID}/amppackager_renewer:${AMP_PACKAGER_VERSION_TAG} +docker push gcr.io/${PROJECT_ID}/amppackager_init:${AMP_PACKAGER_VERSION_TAG} diff --git a/deploy/gcloud/clean.sh b/deploy/gcloud/clean.sh new file mode 100755 index 000000000..8249175d0 --- /dev/null +++ b/deploy/gcloud/clean.sh @@ -0,0 +1,3 @@ +#!/bin/bash +# Clean out all autogenerated files in this directory. +rm -rf ./generated diff --git a/deploy/gcloud/deploy.sh b/deploy/gcloud/deploy.sh new file mode 100755 index 000000000..9160dd911 --- /dev/null +++ b/deploy/gcloud/deploy.sh @@ -0,0 +1,107 @@ +# This shell script is used by the docker init container used in the +# click-to-deploy deployer for AMP Packager. + +export GENFILES_DIR="$1" +export AMP_PACKAGER_NUM_REPLICAS=$2 +export AMP_PACKAGER_DOMAIN=$3 +export AMP_PACKAGER_COUNTRY="$4" +export AMP_PACKAGER_STATE="$5" +export AMP_PACKAGER_LOCALITY="$6" +export AMP_PACKAGER_ORGANIZATION="$7" +export AMP_PACKAGER_CERT_FILENAME="$8" +export AMP_PACKAGER_CSR_FILENAME="$9" +export AMP_PACKAGER_PRIV_KEY_FILENAME=${10} +export ACME_EMAIL_ADDRESS=${11} +export ACME_DIRECTORY_URL=${12} + +echo "Env variables:" +printenv +echo "End Env variables:" + +if [ ! -d "$GENFILES_DIR" ]; then + echo "Creating generated/ directory" + mkdir -p $GENFILES_DIR +fi + +# All user/project specific information will be setup in ./setup.sh. +# source $CURRENT_DIR/setup.sh + +# Note that PRIVATE KEY, SAN Config and CSR are optional steps. If you have +# these files generated already, you can copy them into this directory, using +# the naming convention you specifed in setup.sh. +# IMPORTANT: the private key, SAN, CSR and the certificate all go together, +# you cannot mix and match a new private key with an existing certificate and +# so on. + +# *** PRIVATE KEY +# Generate prime256v1 ecdsa private key. If you already have a key, +# copy it to amppkg.privkey. +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" ]; then + echo "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME exists. Skipping generation." +else + echo "Generating key ..." + openssl ecparam -out "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" -name prime256v1 -genkey +fi + +# *** SAN Config +# Generate the SAN file needed for CSR generation with the project specific values. +# See: https://ethitter.com/2016/05/generating-a-csr-with-san-at-the-command-line/ +if [ -f "$GENFILES_DIR/san.cnf" ]; then + echo "$GENFILES_DIR/san.cnf exists. Skipping generation." +else + echo "Generating SAN file ..." + cat san_template.cnf \ + | sed 's/$(AMP_PACKAGER_COUNTRY)/'"$AMP_PACKAGER_COUNTRY"'/g' \ + | sed 's/$(AMP_PACKAGER_STATE)/'"$AMP_PACKAGER_STATE"'/g' \ + | sed 's/$(AMP_PACKAGER_LOCALITY)/'"$AMP_PACKAGER_LOCALITY"'/g' \ + | sed 's/$(AMP_PACKAGER_ORGANIZATION)/'"$AMP_PACKAGER_ORGANIZATION"'/g' \ + | sed 's/$(AMP_PACKAGER_DOMAIN)/'"$AMP_PACKAGER_DOMAIN"'/g' \ + > $GENFILES_DIR/san.cnf +fi + +# *** CSR +# Create a certificate signing request for the private key using the SAN config +# generated above. Copy the CSR to a safe place. If you already have a CSR, +# copy into amppkg.csr (or whatever you named it in setup.sh). +# To print 'openssl req -text -noout -verify -in amppkg.csr' +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME" ]; then + echo "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME exists. Skipping generation." +else + echo "Generating CSR ..." + openssl req -new -sha256 -key "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" \ + -subj "/C=$AMP_PACKAGER_COUNTRY/ST=$AMP_PACKAGER_STATE/O=$AMP_PACKAGER_ORGANIZATION/CN=$AMP_PACKAGER_DOMAIN" \ + -nodes -out "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME" -outform pem -config $GENFILES_DIR/san.cnf +fi + +# Generate the TOML files with the project specific values. +if [ -f "$GENFILES_DIR/amppkg_consumer.toml" ]; then + echo "$GENFILES_DIR/amppkg_consumer.toml exists. Skipping generation." +else + echo "Generating TOML files ..." + cat amppkg_consumer_template.toml \ + | sed 's/$(AMP_PACKAGER_CERT_FILENAME)/'"$AMP_PACKAGER_CERT_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_CSR_FILENAME)/'"$AMP_PACKAGER_CSR_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_PRIV_KEY_FILENAME)/'"$AMP_PACKAGER_PRIV_KEY_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_DOMAIN)/'"$AMP_PACKAGER_DOMAIN"'/g' \ + > $GENFILES_DIR/amppkg_consumer.toml +fi + +if [ -f "$GENFILES_DIR/amppkg_renewer.toml" ]; then + echo "$GENFILES_DIR/amppkg_renewer.toml exists. Skipping generation." +else + cat amppkg_renewer_template.toml \ + | sed 's/$(AMP_PACKAGER_CERT_FILENAME)/'"$AMP_PACKAGER_CERT_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_CSR_FILENAME)/'"$AMP_PACKAGER_CSR_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_PRIV_KEY_FILENAME)/'"$AMP_PACKAGER_PRIV_KEY_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_DOMAIN)/'"$AMP_PACKAGER_DOMAIN"'/g' \ + | sed 's/$(ACME_EMAIL_ADDRESS)/'"$ACME_EMAIL_ADDRESS"'/g' \ + | sed 's,$(ACME_DIRECTORY_URL),'"$ACME_DIRECTORY_URL"',g' \ + > $GENFILES_DIR/amppkg_renewer.toml +fi + +# This assumes current working directory is amppackager/docker/gcloud +# default is the default namespace for the gcloud project +echo "Copying files to NFS mount ..." + +mkdir -p $1/www + diff --git a/deploy/gcloud/gcloud_down.sh b/deploy/gcloud/gcloud_down.sh new file mode 100755 index 000000000..af89d8663 --- /dev/null +++ b/deploy/gcloud/gcloud_down.sh @@ -0,0 +1,8 @@ +#!/bin/bash +source $(dirname $0)/setup.sh + +gcloud config set project $PROJECT_ID +gcloud config set compute/zone $COMPUTE_ENGINE_ZONE + +gcloud container clusters delete amppackager-cluster --quiet --verbosity=none +gcloud compute disks delete amppackager-nfs-disk --quiet --verbosity=none diff --git a/deploy/gcloud/gcloud_up.sh b/deploy/gcloud/gcloud_up.sh new file mode 100755 index 000000000..877654dc2 --- /dev/null +++ b/deploy/gcloud/gcloud_up.sh @@ -0,0 +1,232 @@ +# This script deploys 3 instances of amppackagers into a gcloud kubernetes +# cluster. 1 instance (renewer) will be responsible for automatically renewing +# certificates using the ACME protocol. The remaining 2 instances (consumer) will be made +# accessible to a web-server that can be configured to forward signed exchange +# requests to them for processing. + +# git clone https://github.com/ampproject/amppackager.git +# cd amppackager/deploy/gcloud +# To start: ./gcloud_up.sh +# To shutdown: ./gcloud_down.sh +# To clean out all of the autogenerated files in this directory: ./clean.sh + +export CURRENT_DIR=$(dirname $0) +export GENFILES_DIR="$CURRENT_DIR/generated" + +if [ ! -d "$GENFILES_DIR" ]; then + echo "Creating generated/ directory" + mkdir -p $GENFILES_DIR +fi + +# All user/project specific information will be setup in ./setup.sh. +source $CURRENT_DIR/setup.sh + +# Note that PRIVATE KEY, SAN Config and CSR are optional steps. If you have +# these files generated already, you can copy them into this directory, using +# the naming convention you specifed in setup.sh. +# IMPORTANT: the private key, SAN, CSR and the certificate all go together, +# you cannot mix and match a new private key with an existing certificate and +# so on. + +# *** PRIVATE KEY +# Generate prime256v1 ecdsa private key. If you already have a key, +# copy it to amppkg.privkey. +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" ]; then + echo "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME exists. Skipping generation." +else + echo "Generating key ..." + openssl ecparam -out "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" -name prime256v1 -genkey +fi + +# *** SAN Config +# Generate the SAN file needed for CSR generation with the project specific values. +# See: https://ethitter.com/2016/05/generating-a-csr-with-san-at-the-command-line/ +if [ -f "$GENFILES_DIR/san.cnf" ]; then + echo "$GENFILES_DIR/san.cnf exists. Skipping generation." +else + echo "Generating SAN file ..." + cat san_template.cnf \ + | sed 's/$(AMP_PACKAGER_COUNTRY)/'"$AMP_PACKAGER_COUNTRY"'/g' \ + | sed 's/$(AMP_PACKAGER_STATE)/'"$AMP_PACKAGER_STATE"'/g' \ + | sed 's/$(AMP_PACKAGER_LOCALITY)/'"$AMP_PACKAGER_LOCALITY"'/g' \ + | sed 's/$(AMP_PACKAGER_ORGANIZATION)/'"$AMP_PACKAGER_ORGANIZATION"'/g' \ + | sed 's/$(AMP_PACKAGER_DOMAIN)/'"$AMP_PACKAGER_DOMAIN"'/g' \ + > $GENFILES_DIR/san.cnf +fi + +# *** CSR +# Create a certificate signing request for the private key using the SAN config +# generated above. Copy the CSR to a safe place. If you already have a CSR, +# copy into amppkg.csr (or whatever you named it in setup.sh). +# To print 'openssl req -text -noout -verify -in amppkg.csr' +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME" ]; then + echo "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME exists. Skipping generation." +else + echo "Generating CSR ..." + openssl req -new -sha256 -key "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" \ + -subj "/C=$AMP_PACKAGER_COUNTRY/ST=$AMP_PACKAGER_STATE/O=$AMP_PACKAGER_ORGANIZATION/CN=$AMP_PACKAGER_DOMAIN" \ + -nodes -out "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME" -outform pem -config $GENFILES_DIR/san.cnf +fi + +# Generate the TOML files with the project specific values. +if [ -f "$GENFILES_DIR/amppkg_consumer.toml" ]; then + echo "$GENFILES_DIR/amppkg_consumer.toml exists. Skipping generation." +else + echo "Generating TOML files ..." + cat amppkg_consumer_template.toml \ + | sed 's/$(AMP_PACKAGER_CERT_FILENAME)/'"$AMP_PACKAGER_CERT_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_CSR_FILENAME)/'"$AMP_PACKAGER_CSR_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_PRIV_KEY_FILENAME)/'"$AMP_PACKAGER_PRIV_KEY_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_DOMAIN)/'"$AMP_PACKAGER_DOMAIN"'/g' \ + > $GENFILES_DIR/amppkg_consumer.toml +fi + +if [ -f "$GENFILES_DIR/amppkg_renewer.toml" ]; then + echo "$GENFILES_DIR/amppkg_renewer.toml exists. Skipping generation." +else + cat amppkg_renewer_template.toml \ + | sed 's/$(AMP_PACKAGER_CERT_FILENAME)/'"$AMP_PACKAGER_CERT_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_CSR_FILENAME)/'"$AMP_PACKAGER_CSR_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_PRIV_KEY_FILENAME)/'"$AMP_PACKAGER_PRIV_KEY_FILENAME"'/g' \ + | sed 's/$(AMP_PACKAGER_DOMAIN)/'"$AMP_PACKAGER_DOMAIN"'/g' \ + | sed 's/$(ACME_EMAIL_ADDRESS)/'"$ACME_EMAIL_ADDRESS"'/g' \ + | sed 's,$(ACME_DIRECTORY_URL),'"$ACME_DIRECTORY_URL"',g' \ + > $GENFILES_DIR/amppkg_renewer.toml +fi + +# Generate the yaml files that have the project id and docker image version tag with proper values. +if [ -f "$GENFILES_DIR/amppackager_cert_renewer.yaml" ]; then + echo "$GENFILES_DIR/amppackager_cert_renewer.toml exists. Skipping generation." +else + echo "Generating renewer YAML file ..." + cat amppackager_cert_renewer_template.yaml \ + | sed 's/$(PROJECT_ID)/'$PROJECT_ID'/g' \ + | sed 's/$(AMP_PACKAGER_VERSION_TAG)/'$AMP_PACKAGER_VERSION_TAG'/g' \ + > $GENFILES_DIR/amppackager_cert_renewer.yaml +fi + +if [ -f "$GENFILES_DIR/amppackager_cert_consumer.yaml" ]; then + echo "$GENFILES_DIR/amppackager_cert_consumer.toml exists. Skipping generation." +else + echo "Generating consumer YAML file ..." + cat amppackager_cert_consumer_template.yaml \ + | sed 's/$(PROJECT_ID)/'$PROJECT_ID'/g' \ + | sed 's/$(AMP_PACKAGER_NUM_REPLICAS)/'$AMP_PACKAGER_NUM_REPLICAS'/g' \ + | sed 's/$(AMP_PACKAGER_VERSION_TAG)/'$AMP_PACKAGER_VERSION_TAG'/g' \ + > $GENFILES_DIR/amppackager_cert_consumer.yaml +fi + +# Build docker images for the Amppackager renewer and consumer, if necessary. +# Renewer and consumer are the same binaries, passed different command line +# arguments. If you don't want to rebuild the binaries, you can comment out the +# next 2 lines as well as the docker images and push commands below. +docker build -f Dockerfile.consumer -t gcr.io/${PROJECT_ID}/amppackager:${AMP_PACKAGER_VERSION_TAG} . +docker build -f Dockerfile.renewer -t gcr.io/${PROJECT_ID}/amppackager_renewer:${AMP_PACKAGER_VERSION_TAG} . + +# To check that it succeeded, list the images that got built. +docker images +echo "Does the above look correct? If so, hit enter; else Ctrl-C." +read + +# https://cloud.google.com/container-registry/docs/advanced-authentication#gcloud-helper +gcloud auth configure-docker + +# Push the docker image into the cloud container registry. +# See: https://cloud.google.com/container-registry/docs/overview +# See: https://cloud.google.com/container-registry/docs/pushing-and-pulling +docker push gcr.io/${PROJECT_ID}/amppackager:${AMP_PACKAGER_VERSION_TAG} +docker push gcr.io/${PROJECT_ID}/amppackager_renewer:${AMP_PACKAGER_VERSION_TAG} + +# NOTE: if you have other gcloud projects, you should create/activate an amppkg +# named configuration before calling this command (also gcloud_{up/down}.sh). +# Otherwise it'll muck with your global state. +gcloud config set project $PROJECT_ID +gcloud config set compute/zone $COMPUTE_ENGINE_ZONE + +# Allow 10 nodes maximum for this cluster. +echo "Creating kubernetes cluster" +gcloud container clusters create amppackager-cluster --num-nodes=10 --enable-ip-alias --metadata disable-legacy-endpoints=true + +# Setup your credentials +# https://cloud.google.com/sdk/gcloud/reference/container/clusters/get-credentials +gcloud container clusters get-credentials amppackager-cluster + +# Create the NFS disk for RW sharing amongst the kubernetes deployments +# cert-renewer and cert-consumer. +# https://medium.com/@Sushil_Kumar/readwritemany-persistent-volumes-in-google-kubernetes-engine-a0b93e203180 +echo "Creating NFS disk" +gcloud compute disks create --size=10GB --zone=${COMPUTE_ENGINE_ZONE} amppackager-nfs-disk +kubectl apply -f nfs_renewer_pvc.yaml +kubectl apply -f nfs_consumer_pvc.yaml +kubectl apply -f nfs_clusterip_service.yaml +kubectl apply -f nfs_server_deployment.yaml +export AMPPACKAGER_NFS_SERVER=$(kubectl get pods | grep amppackager-nfs | awk '{print $1}') + +# Sleep for a few minutes, waiting for NFS disk to be deployed. +sleep 4m + +# This assumes current working directory is amppackager/docker/gcloud +# default is the default namespace for the gcloud project +echo "Copying files to NFS mount ..." +kubectl cp www default/"$AMPPACKAGER_NFS_SERVER":/exports/ + +if [ -f "$GENFILES_DIR/amppkg_consumer.toml" ]; then + echo "Copying amppkg_consumer.toml to NFS mount" + kubectl cp $GENFILES_DIR/amppkg_consumer.toml default/"$AMPPACKAGER_NFS_SERVER":/exports +fi + +if [ -f "$GENFILES_DIR/amppkg_renewer.toml" ]; then + echo "Copying amppkg_renewer.toml to NFS mount" + kubectl cp $GENFILES_DIR/amppkg_renewer.toml default/"$AMPPACKAGER_NFS_SERVER":/exports +fi + +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" ]; then + echo "Copying $GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME to NFS mount" + kubectl cp "$GENFILES_DIR/$AMP_PACKAGER_PRIV_KEY_FILENAME" default/"$AMPPACKAGER_NFS_SERVER":/exports +fi + +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME" ]; then + echo "Copying $GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME to NFS mount" + kubectl cp "$GENFILES_DIR/$AMP_PACKAGER_CSR_FILENAME" default/"$AMPPACKAGER_NFS_SERVER":/exports +fi + +if [ -f "$GENFILES_DIR/$AMP_PACKAGER_CERT_FILENAME" ]; then + echo "Copying $GENFILES_DIR/$AMP_PACKAGER_CERT_FILENAME to NFS mount" + kubectl cp "$GENFILES_DIR/$AMP_PACKAGER_CERT_FILENAME" default/"$AMPPACKAGER_NFS_SERVER":/exports +fi + +kubectl apply -f $GENFILES_DIR/amppackager_cert_renewer.yaml + +# Wait until either the cert is present on disk in the NFS mount or X number +# of retries are finished. If cert is being requested via ACME, this may take +# some time. +result=1 +retries=0 +while true +do + if [ $result -eq 0 ]; then + echo "Cert is available!" + break + else + sleep 60 + retries=$((retries+1)) + if [ "$retries" -ge 10 ]; then + echo "Cert not present, giving up." + break + else + echo "Waiting for cert ..." + fi + # TODO(banaag): need to fix hardcoded /exports/amppkg.cert after you + # decipher kubectl set env craziness. + kubectl exec -it $AMPPACKAGER_NFS_SERVER -- test -f /exports/amppkg.cert 2> /dev/null + result=$? + fi +done + +kubectl apply -f $GENFILES_DIR/amppackager_cert_consumer.yaml +kubectl apply -f amppackager_service.yaml + +# List the service that got started. +kubectl get service + +echo "Setup complete." diff --git a/deploy/gcloud/nfs_clusterip_service.yaml b/deploy/gcloud/nfs_clusterip_service.yaml new file mode 100644 index 000000000..038421f48 --- /dev/null +++ b/deploy/gcloud/nfs_clusterip_service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: amppackager-nfs-server +spec: + ports: + - name: nfs + port: 2049 + - name: mountd + port: 20048 + - name: rpcbind + port: 111 + selector: + role: nfs-server diff --git a/deploy/gcloud/nfs_consumer_pvc.yaml b/deploy/gcloud/nfs_consumer_pvc.yaml new file mode 100644 index 000000000..e28832253 --- /dev/null +++ b/deploy/gcloud/nfs_consumer_pvc.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-consumer-pv +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + nfs: + server: amppackager-nfs-server.default.svc.cluster.local + path: "/" + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nfs-consumer-pvc +spec: + accessModes: + - ReadWriteMany + storageClassName: "" + resources: + requests: + storage: 5Gi diff --git a/deploy/gcloud/nfs_renewer_pvc.yaml b/deploy/gcloud/nfs_renewer_pvc.yaml new file mode 100644 index 000000000..4d001dbc6 --- /dev/null +++ b/deploy/gcloud/nfs_renewer_pvc.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-renewer-pv +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteMany + nfs: + server: amppackager-nfs-server.default.svc.cluster.local + path: "/" + +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nfs-renewer-pvc +spec: + accessModes: + - ReadWriteMany + storageClassName: "" + resources: + requests: + storage: 5Gi diff --git a/deploy/gcloud/nfs_server_deployment.yaml b/deploy/gcloud/nfs_server_deployment.yaml new file mode 100644 index 000000000..a49772ac2 --- /dev/null +++ b/deploy/gcloud/nfs_server_deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: amppackager-nfs-server +spec: + replicas: 1 + selector: + matchLabels: + role: nfs-server + template: + metadata: + labels: + role: nfs-server + spec: + containers: + - name: amppackager-nfs-server + image: gcr.io/google_containers/volume-nfs:0.8 + ports: + - name: nfs + containerPort: 2049 + - name: mountd + containerPort: 20048 + - name: rpcbind + containerPort: 111 + securityContext: + privileged: true + volumeMounts: + - mountPath: /exports + name: nfs-renewer-pvc + volumes: + - name: nfs-renewer-pvc + gcePersistentDisk: + pdName: amppackager-nfs-disk + fsType: ext4 diff --git a/deploy/gcloud/san_template.cnf b/deploy/gcloud/san_template.cnf new file mode 100644 index 000000000..a95109943 --- /dev/null +++ b/deploy/gcloud/san_template.cnf @@ -0,0 +1,13 @@ +[ req ] +distinguished_name = req_distinguished_name +req_extensions = req_ext +[ req_distinguished_name ] +countryName = $(AMP_PACKAGER_COUNTRY) +stateOrProvinceName = $(AMP_PACKAGER_STATE) +localityName = $(AMP_PACKAGER_LOCALITY) +organizationName = $(AMP_PACKAGER_ORGANIZATION) +commonName = $(AMP_PACKAGER_DOMAIN) +[ req_ext ] +subjectAltName = @alt_names +[alt_names] +DNS.1 = $(AMP_PACKAGER_DOMAIN) diff --git a/deploy/gcloud/setup.sh b/deploy/gcloud/setup.sh new file mode 100644 index 000000000..0cf20317e --- /dev/null +++ b/deploy/gcloud/setup.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +# AMPPackager requires that you have 2 items so that auto-renewal of +# certificates work: +# - Certificate Signing Request (CSR) +# - Private key +# This can be autogenerated for you by the ./gcloud_up.sh script or you +# can manually create these by following the instructions in this link: +# https://docs.digicert.com/manage-certificates/certificate-profile-options/get-your-signed-http-exchange-certificate/ +# If you manually generate these files, make sure to use the filenames that you +# specified below (e.g. AMP_PACKAGER_CSR_FILENAME value should be the name used for +# the CSR file). + +# GCloud project information +# Note that if your project id is scoped by a domain, you need to replace the +# ':' with a '/'. See: https://cloud.google.com/container-registry/docs/overview#domain-scoped_projects +# REQUIRED +export PROJECT_ID="YOUR_GCLOUD_PROJECT_ID" +# For compute zones, see: https://console.cloud.google.com/compute/zones +export COMPUTE_ENGINE_ZONE="YOUR_COMPUTE_ENGINE_ZONE" + +# The version tag of the docker build of amppackager you want to build/use. +# You can leave it as the default unless you want to use a specific version +# of amppackager. +# OPTIONAL +export AMP_PACKAGER_VERSION_TAG="latest" + +# The number of amppackager consumer replicas you'd like to run to which the +# load balancer can forward requests. +# OPTIONAL +export AMP_PACKAGER_NUM_REPLICAS=2 + +# The domain you want to use for the signed exchange. +# REQUIRED +export AMP_PACKAGER_DOMAIN="YOUR_AMP_PACKAGER_DOMAIN" +export AMP_PACKAGER_COUNTRY="YOUR_COUNTRY" +export AMP_PACKAGER_STATE="YOUR_STATE" +export AMP_PACKAGER_LOCALITY="YOUR_LOCALITY" +export AMP_PACKAGER_ORGANIZATION="YOUR_ORGANIZATION" + +# ACME API related information +# REQUIRED +export ACME_EMAIL_ADDRESS="YOUR_ACME_EMAIL_ADDRESS" +export ACME_DIRECTORY_URL="YOUR_ACME_DIRECTORY_URL" + +# The Signed Exchange Certificate you got from your certificate authority +# (Digicert) using the domain/location information above. +# OPTIONAL (if file is not present, amppackager renewer instance will try to +# retrieve one for you). +# DO NOT CHECK IN INTO GITHUB. +export AMP_PACKAGER_CERT_FILENAME="amppkg.cert" + +# These files can be autogenerated. You can choose replace it with your own +# file if desired but each file has to be present in your +# local amppackager/deploy/gcloud directory. +# OPTIONAL (if files are not present, script will autogenerate them for you) +# DO NOT CHECK THEM INTO GITHUB. +export AMP_PACKAGER_CSR_FILENAME="amppkg.csr" +export AMP_PACKAGER_PRIV_KEY_FILENAME="amppkg.privkey" + diff --git a/deploy/gcloud/www/README b/deploy/gcloud/www/README new file mode 100644 index 000000000..d22fc8f32 --- /dev/null +++ b/deploy/gcloud/www/README @@ -0,0 +1 @@ +Directory is intentionally empty. diff --git a/docs/cache_requirements.md b/docs/cache_requirements.md index 5f122d094..11ff30406 100644 --- a/docs/cache_requirements.md +++ b/docs/cache_requirements.md @@ -20,21 +20,17 @@ These include: * Parameter values of type string, binary, or identifier. * The payload must be: * non-empty. - * well-formed UTF-8 that doesn't contain: - * any characters that cause a parse-error during [HTML preprocessing](https://html.spec.whatwg.org/multipage/parsing.html#preprocessing-the-input-stream) - * U+0000 NULL * valid transformed AMP. The canonical definition of transformed AMP is the return value of [`transform.Process()`](https://github.com/ampproject/amppackager/blob/e4bf0430ba152cfe82ccf063df92021dfc0f26a5/transformer/transformer.go#L219). If given a [valid AMP](https://github.com/ampproject/amphtml/tree/master/validator) doc as input, it should produce a valid transformed AMP doc. There may be other ways of achieving this, but they are unsupported (i.e. may arbitrarily break in the future). - * unchanged after calling [`transform -config NONE`](https://github.com/ampproject/amppackager/tree/releases/transformer#how-to-use). * matching one of the versions requested by the `AMP-Cache-Transform` header. Note that this version range will increase over time, at a cadence TBD (likely 6-8 weeks with 2 or 3 supported latest versions). - * If the signed `cache-control` header has a `no-cache` directive, it cannot - have a value (i.e. `no-cache=some-header` is disallowed). + * If the signed `cache-control` header has a `no-cache` or `private` directive, + it cannot have a value (i.e. `no-cache=some-header` is disallowed). * The signed `content-security-policy` header must be present and comply with these rules: * `default-src`, `script-src`, `object-src`, `style-src`, and `report-uri` @@ -52,7 +48,9 @@ These include: (e.g. max 20 urls, rel=preload only, as=script|style only). URLs must be limited to `cdn.ampproject.org` and the allowlisted [font provider URLs](https://github.com/ampproject/amphtml/blob/b0ff92429923c86f3973009a84ff02f4f1868b4d/validator/validator-main.protoascii#L310). * There must not be a signed `variant-key-04` or `variants-04` header. - * The signature's duration (expiry minus date) must be >= 4 days. + * The signature's lifetime (`expires` minus request time) must be >= 3 days; + given AMP Packager's behavior of [backdating by 1 day](https://github.com/ampproject/amppackager/blob/cc38c5fad40fc603119a298700820b97a4f0c54f/packager/signer/signer.go#L497), + this effectively means a minimum duration (`expires` minus `date`) of 4 days. The above is an attempt at a complete list of SXG-related requirements, but it is not guaranteed to be complete. diff --git a/go.mod b/go.mod index 4e1f85e67..416452a24 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/pelletier/go-toml v1.1.0 github.com/pkg/errors v0.8.1 github.com/pquerna/cachecontrol v0.0.0-20180306154005-525d0eb5f91d + github.com/prometheus/client_golang v1.1.0 github.com/stretchr/testify v1.4.0 golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 diff --git a/go.sum b/go.sum index 69f531285..280de9917 100644 --- a/go.sum +++ b/go.sum @@ -51,6 +51,7 @@ github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= @@ -180,6 +181,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -230,16 +232,23 @@ github.com/pquerna/cachecontrol v0.0.0-20180306154005-525d0eb5f91d/go.mod h1:prY github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= diff --git a/internal/url/url_test.go b/internal/url/url_test.go index 51e48cfc3..29c5fbb6d 100644 --- a/internal/url/url_test.go +++ b/internal/url/url_test.go @@ -33,13 +33,6 @@ func TestString(t *testing.T) { "https://foo.com/i haz spaces?q=i haz spaces", "https://foo.com/i%20haz%20spaces?q=i%20haz%20spaces", }, - { - // TODO(b/123017837): Go escapes only certain chars in fragments. - "fragment not entirely reescaped", // This is intrinsic Go URL behavior. - "https://example.com/amp.html#htmlURL=http%3A%2F%2Fbar.com%2Fbaz", - //does not produce: "https://example.com/amp.html#htmlURL=http%3A%2F%2Fbar.com%2Fbaz", - "https://example.com/amp.html#htmlURL=http://bar.com/baz", - }, { "fragment with space and quote reescaped", "https://example.com/amp.html#fragment-\" ", diff --git a/monitoring.md b/monitoring.md new file mode 100644 index 000000000..eddd7c617 --- /dev/null +++ b/monitoring.md @@ -0,0 +1,228 @@ +# Monitoring `amppackager` in production + +Once you've run `amppackager` server in production, you may want to monitor its +health and performance, as well as the performance of the underlying requests to +the AMP document server. + +`amppackager` provides a few [Prometheus](https://prometheus.io/) metrics for such monitoring. They are available via the `/metrics` endpoint. + +Prometheus is a powerful monitoring framework. We encourage you to fully utilize +it to make your monitoring convenient, scalable and automated. A few things we +recommend you do: +1. Explore the metrics `amppackager` provides by following or skimming through + the tutorial below. Pick the essential metrics you'd like to monitor. +1. [Set up a Prometheus server](https://prometheus.io/docs/prometheus/latest/getting_started/). +1. Set up all `amppackager` replicas as targets for the Prometheus server. Use + the [multi-target exporter + pattern](https://prometheus.io/docs/guides/multi-target-exporter/#understanding-and-using-the-multi-target-exporter-pattern). +1. Try + [querying](https://prometheus.io/docs/prometheus/latest/querying/basics/) + the essential metrics you chose. +1. Visualize the metrics via + [grafana](https://prometheus.io/docs/visualization/grafana/). +1. Setup [alerts](https://prometheus.io/docs/alerting/latest/overview/) that + will notify you of abnormal behavior (e.g. latencies growing beyond 60 + seconds). + +The sections below walk you through the available metrics, explain how to +manually check them via command line, and how to interpret the results. All the +command line examples are for Linux OS. + +## Monitoring health + +To check `amppackager`'s health, `curl` its `/healthz` endpoint: + +```console +$ curl https://localhost:8080/healthz +``` + +If the server is up and has a fresh, valid certificate, it will respond with +`ok`. If not, it will provide an error message. + +## Monitoring performance + +You can take a step further and check a few performance metrics, both for +requests handled by `amppackager`, and for the underlying gateway requests that +`amppackager` sends to the AMP document server. Get all the metrics by `curl`ing +the `/metrics` endpoint. + +## Example: monitoring total requests count + +The example command below fetches all the available metrics. It then greps the report for `total_requests_by_code_and_url` metric. This metric counts the HTTP requests the `amppackager` server has processed since it's been up. + +```console +$ curl https://127.0.0.1:8080/metrics | grep total_requests_by_code_and_url + +# HELP total_requests_by_code_and_url Total number of requests by HTTP code and URL. +# TYPE total_requests_by_code_and_url counter +total_requests_by_code_and_url{code="200",handler="healthz"} 3 +total_requests_by_code_and_url{code="200",handler="validityMap"} 5 +total_requests_by_code_and_url{code="200",handler="signer"} 6 +total_requests_by_code_and_url{code="502",handler="signer"} 4 +total_requests_by_code_and_url{code="404",handler="handler_not_assigned"} 1 +``` + +The example stats above are broken down by response HTTP code, and by the +internal amppackager's module (handler) that has handled the request. The stats +report 3 requests to the `healthz` handler that got a 200 response (OK), 4 +requests to the `signer` handler that got a 502 response (Bad Gateway) etc. + +## `amppackager`'s handlers + +The table below lists `amppackager`'s handlers accounted for by the metrics: + +| Handler | Responsibility | +|-|-| +| signer | Handles `/priv/doc` requests. Fetches the AMP document, signs it and returns it. | +| certCache | Handles `/amppkg/cert` requests. Returns your Signed Exchange certificate. | +| validityMap | Handles `/amppkg/validity` requests. Returns the [validity data](https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.6) referred by `amppackager`'s signatures. | +| healthz | Handles `/healthz` requests. Checks if `amppackager` is running and has a valid, fresh certificate. | +| metrics | Handles `/metrics` requests. Reports performance metrics for `amppackager` and for the underlying gateway requests to the AMP document server. | + +## Metrics labels: breakdown by handler and response code + +For some metrics like `total_requests_by_code_and_url` the stats in the +`/metrics` response are grouped into buckets by two dimensions: the handler +name, and the HTTP response code. E.g. note the buckets in the the example +above: + +```console +total_requests_by_code_and_url{code="200",handler="healthz"} 3 +total_requests_by_code_and_url{code="200",handler="signer"} 6 +total_requests_by_code_and_url{code="502",handler="signer"} 4 +``` + +Invalid requests that were not routed by `amppackager` to any handler are +assigned a special label `handler_not_assigned`: + +```console +total_requests_by_code_and_url{code="404",handler="handler_not_assigned"} 1 +``` + +Some metrics only make sense for a particular handler. E.g. +`gateway_request_latencies_in_seconds` and other metrics related to gateway +requests are only related to `signer` handler's operation. Such metrics are only +broken down into buckets by the response code, not by the handler. + +__Labels__ are key-value properties of buckets that indicate the specific +values of the breakdown dimensions, e.g. `code="200"` or `handler="healthz"`. + +## Metrics types: counters and summaries + +The two types of metrics are *counters* and *summaries*. + +For some metrics like `total_requests_by_code_and_url`, each request increases +the total by 1, so the metric is a *counter* that has no historical data, just +the accumulated total. + +For other metrics like `request_latencies_in_seconds`, a distribution of +requests latencies is stored, and a few historical percentiles are reported - +0.5 percentile, 0.9 percentile 0.99 percentile. Such metrics are *summaries*. + +## Available metrics + +The table below lists the key available metrics, along with their types and +labels. + +| Metric | Metric type | Explanation | Broken down by HTTP response code? | Broken down by [handler](https://github.com/MichaelRybak/amppackager/blob/doc/monitoring.md#understanding-stats-broken-down-by-amppackagers-handlers)? | +|--|--|--|--|--| +| total_requests_by_code_and_url | Counter | Total number of requests handled by `amppackager` since it's been up. | Yes | Yes | +| request_latencies_in_seconds | [Summary](#metrics-types-counters-and-summaries) | `amppackager`'s handlers latencies in seconds, measured from the moment the handler starts processing the request, to the moment the response is returned. | Yes | Yes | +| total_gateway_requests_by_code | Counter | Total number of underlying requests sent by `signer` handler to the AMP document server. | Yes | No, specific to [`signer` handler](#amppackagers-handlers). | +| gateway_request_latencies_in_seconds | [Summary](#metrics-types-counters-and-summaries) | Latencies (in seconds) of gateway requests to the AMP document server. | Yes | No, specific to [`signer` handler](#amppackagers-handlers). | +| signed_amp_documents_size_in_bytes | [Summary](#metrics-types-counters-and-summaries) | Actual size (in bytes) of gateway response body from AMP document server. Reported only if signer decided to sign, not return an error or proxy unsigned. | No, specific to 200 (OK) responses. | No, specific to [`signer` handler](#amppackagers-handlers). | +| documents_signed_vs_unsigned | Counter | Total number of successful underlying requests to AMP document server, broken down by status based on the action signer has taken: sign or proxy unsigned. Does not account for requests to `amppackager` that resulted in an HTTP error. | No, specific to 200 (OK) responses. | No, specific to [`signer` handler](#amppackagers-handlers). | + +## Understanding percentiles reported by Summaries + +A [percentile](https://en.wikipedia.org/wiki/Percentile) indicates the value +below which a given percentage of observations in a group of observations falls. +E.g. if in a room of 100 people, 80% are shorter than you, then you are the 80% +percentile. The 50% percentile is also known as +[median](https://en.wikipedia.org/wiki/Median). + +For summary metrics like `request_latencies_in_seconds`, the `/metrics` endpoint provides three percentiles: 0.5, 0.9, 0.99. + +To get an idea of how long it __usually__ takes `amppackager` to handle a +request, look at the respective 0.5 percentile. To check the rare, __worst case +scenarios__, look at 0.9 and 0.99 percentiles. + +Consider the following example. Let's say you're interested in the stats for the `request_latencies_in_seconds` metric, specifically for requests that got an OK response (200) from the `signer` handler: + +```console +$ curl https://127.0.0.1:8080/metrics | grep request_latencies_in_seconds | grep signer | grep code=\"200\" + +request_latencies_in_seconds{code="200",handler="signer",quantile="0.5"} 0.023 +request_latencies_in_seconds{code="200",handler="signer",quantile="0.9"} 0.237 +request_latencies_in_seconds{code="200",handler="signer",quantile="0.99"} 0.238 +request_latencies_in_seconds_sum{code="200",handler="signer"} 661.00 +request_latencies_in_seconds_count{code="200",handler="signer"} 10000 +``` + +According to the example stats above, `signer` has handled 10000 requests. +Consider the respective 10000 latencies ranked from smallest to largest. The +latency of the request handled the fastest gets ranked 1, and of the one handled +the slowest - 10000. According to the stats above, the latency ranked 5001 is +0.023s, the latency ranked 9001 is 0.237s, and the latency ranked 9901 is +0.238s. + +__The conclusion for the example above__: successful signer requests are usually handled within 0.023s, but occasionally may take up to 0.238s. + +Note that the results provided for latencies and other summaries may be off by a +few rank positions in the ranking. This is due to metrics engine optimizations +that allow to not store all the historical data, therefore saving RAM +significantly. The results are still accurate enough for performance monitoring. + + +Also note that every stat (e.g. 0.9 percentile latency) provided by the metrics +is an actual historical value that has been seen by the server, not an +approximation. + +### Mean vs percentiles + +In the example above all the 10000 requests were handled in 661 seconds, which +means the mean (average) latency was 0.0661s. This value is ~3 times larger than +the median. So which one more accurately represents the "typical" scenario? Why +not look at mean instead of looking at percentiles? + +Median (0.5 percentile) is [more stable against +outliers](https://en.wikipedia.org/wiki/Median) than mean, and therefore gives a +better understanding of the typical response time. At the same time the 0.9 and +0.99 percentiles give you a good idea about the large outliers, i.e. abnormally +slow response times. + +## More examples + +Check all the stats (note - this command produces numerous metrics that are self-documented; the key ones are also explained in the table above): + +```console +$ curl https://127.0.0.1:8080/metrics +``` + +Check stats for the `total_gateway_requests_by_code` metric: + +```console +$ curl https://127.0.0.1:8080/metrics | grep total_gateway_requests_by_code +``` + +Check stats for the `gateway_request_latencies_in_seconds` metric, for requests +that got an OK response (200) : + +```console +$ curl https://127.0.0.1:8080/metrics | grep gateway_request_latencies_in_seconds | grep code=\"200\" +``` + +Check the 0.9 percentile latency for the `request_latencies_in_seconds` metric, +for requests that got an OK response (200) from the `signer` handler: + +```console +$ curl https://127.0.0.1:8080/metrics | grep request_latencies_in_seconds | grep signer | grep code=\"200\" | grep quantile=\"0.9\" +``` + +## Performance metrics lifetime + +The metrics provided by the `/metrics` endpoint are reset upon the execution of +the `amppackager` server binary. Every request to `/metrics` is served with the +stats accumulated since the server's been up, up to the time of the request, but +not including the request itself. + diff --git a/packager/certcache/certcache.go b/packager/certcache/certcache.go index 80bfd6a23..2638eb6bb 100644 --- a/packager/certcache/certcache.go +++ b/packager/certcache/certcache.go @@ -109,6 +109,7 @@ type CertCache struct { extractOCSPServer func(*x509.Certificate) (string, error) // Given an HTTP request/response, returns its cache expiry. httpExpiry func(*http.Request, *http.Response) time.Time + timeNow func() time.Time } // Callers need to call Init() on the returned CertCache before the cache can auto-renew certs. @@ -121,7 +122,7 @@ type CertCache struct { // An alternative pattern would be to create an IsInitialized() bool or similarly named function that verifies all of the required fields have // been set. Then callers can just set fields in the struct by name and assert IsInitialized before doing anything with it. func New(certs []*x509.Certificate, certFetcher *certfetcher.CertFetcher, domains []string, - certFile string, newCertFile string, ocspCache string, generateOCSPResponse OCSPResponder) *CertCache { + certFile string, newCertFile string, ocspCache string, generateOCSPResponse OCSPResponder, timeNow func() time.Time) *CertCache { certName := "" if len(certs) > 0 && certs[0] != nil { certName = util.CertName(certs[0]) @@ -140,11 +141,11 @@ func New(certs []*x509.Certificate, certFetcher *certfetcher.CertFetcher, domain // certificate, all needing to staple an OCSP response. You don't // want to have all of them hammering the OCSP server - ideally, // you'd have one request, in the backend, and updating them all. - ocspFile: &Chained{first: &InMemory{}, second: &LocalFile{path: ocspCache}}, - ocspFilePath: ocspCache, - stop: make(chan struct{}), + ocspFile: &Chained{first: &InMemory{}, second: &LocalFile{path: ocspCache}}, + ocspFilePath: ocspCache, + stop: make(chan struct{}), generateOCSPResponse: generateOCSPResponse, - client: http.Client{Timeout: 60 * time.Second}, + client: http.Client{Timeout: 60 * time.Second}, extractOCSPServer: func(cert *x509.Certificate) (string, error) { if cert == nil || len(cert.OCSPServer) < 1 { return "", errors.New("Cert missing OCSPServer.") @@ -164,6 +165,7 @@ func New(certs []*x509.Certificate, certFetcher *certfetcher.CertFetcher, domain CertFile: certFile, NewCertFile: newCertFile, isInitialized: false, + timeNow: timeNow, } } @@ -223,7 +225,7 @@ func (this *CertCache) GetLatestCert() *x509.Certificate { return nil } - d, err := util.GetDurationToExpiry(this.getCert(), time.Now()) + d, err := util.GetDurationToExpiry(this.getCert(), this.timeNow()) if err != nil { // Current cert is already invalid. Check if renewal is available. log.Println("Current cert is expired, attempting to renew: ", err) @@ -259,12 +261,16 @@ func (this *CertCache) createCertChainCBOR(ocsp []byte) ([]byte, error) { return buf.Bytes(), nil } -func (this *CertCache) ocspMidpoint(bytes []byte, issuer *x509.Certificate) (time.Time, error) { +func (this *CertCache) parseOCSP(bytes []byte, issuer *x509.Certificate) (*ocsp.Response, error) { resp, err := ocsp.ParseResponseForCert(bytes, this.getCert(), issuer) if err != nil { - return time.Time{}, errors.Wrap(err, "Parsing OCSP") + return nil, errors.Wrap(err, "Parsing OCSP") } - return resp.ThisUpdate.Add(resp.NextUpdate.Sub(resp.ThisUpdate) / 2), nil + return resp, nil +} + +func (this *CertCache) ocspMidpoint(resp *ocsp.Response) time.Time { + return resp.ThisUpdate.Add(resp.NextUpdate.Sub(resp.ThisUpdate) / 2) } func (this *CertCache) ServeHTTP(resp http.ResponseWriter, req *http.Request) { @@ -285,13 +291,15 @@ func (this *CertCache) ServeHTTP(resp http.ResponseWriter, req *http.Request) { util.NewHTTPError(http.StatusInternalServerError, "Error reading OCSP: ", err).LogAndRespond(resp) return } - midpoint, err := this.ocspMidpoint(ocsp, this.findIssuer()) + ocspResp, err := this.parseOCSP(ocsp, this.findIssuer()) if err != nil { - util.NewHTTPError(http.StatusInternalServerError, "Error computing OCSP midpoint: ", err).LogAndRespond(resp) + log.Println("Invalid OCSP:", err) + util.NewHTTPError(http.StatusInternalServerError, "Invalid OCSP: ", err).LogAndRespond(resp) return } + midpoint := this.ocspMidpoint(ocspResp) // int is large enough to represent 24855 days in seconds. - expiry := int(midpoint.Sub(time.Now()).Seconds()) + expiry := int(midpoint.Sub(this.timeNow()).Seconds()) if expiry < 0 { expiry = 0 } @@ -340,7 +348,7 @@ func (this *CertCache) isHealthy(ocspResp []byte) error { if err != nil { return errors.Wrap(err, "Error parsing OCSP response") } - if resp.NextUpdate.Before(time.Now()) { + if resp.NextUpdate.Before(this.timeNow()) { return errors.Errorf("Cached OCSP is stale, NextUpdate: %v", resp.NextUpdate) } return nil @@ -395,15 +403,15 @@ func (this *CertCache) readOCSP(allowRetries bool) ([]byte, time.Time, error) { } for numTries := 0; numTries < maxTries; { - ocsp, ocspUpdateAfter, err = this.readOCSPHelper(numTries, numTries >= maxTries - 1) + ocsp, ocspUpdateAfter, err = this.readOCSPHelper(numTries, numTries >= maxTries-1) if err != nil { return nil, ocspUpdateAfter, err } if !this.shouldUpdateOCSP(ocsp) { - break; + break } // Wait only if are not on our last try. - if numTries < maxTries - 1 { + if numTries < maxTries-1 { waitTimeInMinutes = waitForSpecifiedTime(waitTimeInMinutes, numTries) } numTries++ @@ -475,13 +483,17 @@ func (this *CertCache) shouldUpdateOCSP(ocsp []byte) bool { // This is a permanent error; do not attempt OCSP update. return false } - // Compute the midpoint per sleevi #3 (see above). - midpoint, err := this.ocspMidpoint(ocsp, issuer) + ocspResp, err := this.parseOCSP(ocsp, issuer) if err != nil { - log.Println("Error computing OCSP midpoint:", err) + // An old ocsp cache causes a parse error in case of cert renewal. Do not log it. + if this.isInitialized { + log.Println("Invalid OCSP:", err) + } return true } - if time.Now().After(midpoint) { + // Compute the midpoint per sleevi #3 (see above). + midpoint := this.ocspMidpoint(ocspResp) + if this.timeNow().After(midpoint) { // TODO(twifkak): Use a logging framework with support for debug-only statements. log.Println("Updating OCSP; after midpoint: ", midpoint) return true @@ -493,7 +505,7 @@ func (this *CertCache) shouldUpdateOCSP(ocsp []byte) bool { // possible, and observe HTTP cache semantics." this.ocspUpdateAfterMu.RLock() defer this.ocspUpdateAfterMu.RUnlock() - if time.Now().After(this.ocspUpdateAfter) { + if this.timeNow().After(this.ocspUpdateAfter) { // TODO(twifkak): Use a logging framework with support for debug-only statements. log.Println("Updating OCSP; expired by HTTP cache headers: ", this.ocspUpdateAfter) return true @@ -630,11 +642,11 @@ func (this *CertCache) fetchOCSP(orig []byte, certs []*x509.Certificate, ocspUpd log.Println("Invalid OCSP status:", resp.Status) return orig } - if resp.ThisUpdate.After(time.Now()) { + if resp.ThisUpdate.After(this.timeNow()) { log.Println("OCSP thisUpdate in the future:", resp.ThisUpdate) return orig } - if resp.NextUpdate.Before(time.Now()) { + if resp.NextUpdate.Before(this.timeNow()) { log.Println("OCSP nextUpdate in the past:", resp.NextUpdate) return orig } @@ -750,7 +762,7 @@ func (this *CertCache) updateCertIfNecessary() { d := time.Duration(0) err := errors.New("") if this.hasCert() { - d, err = util.GetDurationToExpiry(this.getCert(), time.Now()) + d, err = util.GetDurationToExpiry(this.getCert(), this.timeNow()) } if err != nil { this.renewedCertsMu.Lock() @@ -817,8 +829,10 @@ func (this *CertCache) updateCertIfNecessary() { } func (this *CertCache) doesCertNeedReloading() bool { - if !this.hasCert() { return true } - d, err := util.GetDurationToExpiry(this.getCert(), time.Now()) + if !this.hasCert() { + return true + } + d, err := util.GetDurationToExpiry(this.getCert(), this.timeNow()) return err != nil || d < certRenewalInterval } @@ -882,7 +896,7 @@ func PopulateCertCache(config *util.Config, key crypto.PrivateKey, generateOCSPR if err != nil { return nil, errors.Wrap(err, "creating cert fetcher from config") } - certCache := New(certs, certFetcher, []string{domain}, config.CertFile, config.NewCertFile, config.OCSPCache, generateOCSPResponse) + certCache := New(certs, certFetcher, []string{domain}, config.CertFile, config.NewCertFile, config.OCSPCache, generateOCSPResponse, time.Now) return certCache, nil } diff --git a/packager/certcache/certcache_test.go b/packager/certcache/certcache_test.go index b8ca00ab0..567a45929 100644 --- a/packager/certcache/certcache_test.go +++ b/packager/certcache/certcache_test.go @@ -69,6 +69,7 @@ type CertCacheSuite struct { ocspHandler func(w http.ResponseWriter, req *http.Request) tempDir string handler *CertCache + fakeClock *pkgt.FakeClock } func stringPtr(s string) *string { @@ -79,8 +80,10 @@ func (this *CertCacheSuite) New() (*CertCache, error) { // TODO(twifkak): Stop the old CertCache's goroutine. // TODO(banaag): Consider adding a test with certfetcher set. // For now, this tests certcache without worrying about certfetcher. + // certCache := New(pkgt.B3Certs, nil, []string{"example.com"}, "cert.crt", "newcert.crt", + // filepath.Join(this.tempDir, "ocsp"), nil, time.Now) certCache := New(pkgt.B3Certs, nil, []string{"example.com"}, "cert.crt", "newcert.crt", - filepath.Join(this.tempDir, "ocsp"), nil) + filepath.Join(this.tempDir, "ocsp"), nil, this.fakeClock.Now) certCache.extractOCSPServer = func(*x509.Certificate) (string, error) { return this.ocspServer.URL, nil } @@ -107,8 +110,9 @@ func (this *CertCacheSuite) TearDownSuite() { } func (this *CertCacheSuite) SetupTest() { + this.fakeClock = pkgt.NewFakeClock() var err error - this.fakeOCSP, err = FakeOCSPResponse(time.Now()) + this.fakeOCSP, err = FakeOCSPResponse(this.fakeClock.Now()) this.Require().NoError(err, "creating fake OCSP response") this.ocspHandler = func(resp http.ResponseWriter, req *http.Request) { @@ -138,7 +142,7 @@ func (this *CertCacheSuite) TearDownTest() { } func (this *CertCacheSuite) mux() http.Handler { - return mux.New(this.handler, nil, nil, nil) + return mux.New(this.handler, nil, nil, nil, nil) } func (this *CertCacheSuite) ocspServerCalled(f func()) bool { @@ -176,7 +180,7 @@ func (this *CertCacheSuite) DecodeCBOR(r io.Reader) map[string][]byte { } func (this *CertCacheSuite) TestServesCertificate() { - resp := pkgt.Get(this.T(), this.mux(), "/amppkg/cert/"+pkgt.CertName) + resp := pkgt.NewRequest(this.T(), this.mux(), "/amppkg/cert/"+pkgt.CertName).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) this.Assert().Equal("nosniff", resp.Header.Get("X-Content-Type-Options")) cbor := this.DecodeCBOR(resp.Body) @@ -193,7 +197,7 @@ func (this *CertCacheSuite) TestCertCacheIsNotHealthy() { // Prime memory cache with a past-midpoint OCSP: err := os.Remove(filepath.Join(this.tempDir, "ocsp")) this.Require().NoError(err, "deleting OCSP tempfile") - this.fakeOCSP, err = FakeOCSPResponse(time.Now().Add(-4 * 24 * time.Hour)) + this.fakeOCSP, err = FakeOCSPResponse(this.fakeClock.Now().Add(-4 * 24 * time.Hour)) this.Require().NoError(err, "creating stale OCSP response") this.Require().True(this.ocspServerCalled(func() { this.handler, err = this.New() @@ -214,7 +218,7 @@ func (this *CertCacheSuite) TestCertCacheIsNotHealthy() { } func (this *CertCacheSuite) TestServes404OnMissingCertificate() { - resp := pkgt.Get(this.T(), this.mux(), "/amppkg/cert/lalala") + resp := pkgt.NewRequest(this.T(), this.mux(), "/amppkg/cert/lalala").Do() this.Assert().Equal(http.StatusNotFound, resp.StatusCode, "incorrect status: %#v", resp) body, _ := ioutil.ReadAll(resp.Body) // Small enough not to fit a cert or key: @@ -223,11 +227,10 @@ func (this *CertCacheSuite) TestServes404OnMissingCertificate() { func (this *CertCacheSuite) TestOCSP() { // Verify it gets included in the cert-chain+cbor payload. - resp := pkgt.Get(this.T(), this.mux(), "/amppkg/cert/"+pkgt.CertName) + resp := pkgt.NewRequest(this.T(), this.mux(), "/amppkg/cert/"+pkgt.CertName).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) // 302400 is 3.5 days. max-age is slightly less because of the time between fake OCSP generation and cert-chain response. - // TODO(twifkak): Make this less flaky, by injecting a fake clock. - this.Assert().Equal("public, max-age=302399", resp.Header.Get("Cache-Control")) + this.Assert().Equal("public, max-age=302387", resp.Header.Get("Cache-Control")) cbor := this.DecodeCBOR(resp.Body) this.Assert().Equal(this.fakeOCSP, cbor["ocsp"]) } @@ -250,7 +253,7 @@ func (this *CertCacheSuite) TestOCSPExpiry() { // Prime memory and disk cache with a past-midpoint OCSP: err := os.Remove(filepath.Join(this.tempDir, "ocsp")) this.Require().NoError(err, "deleting OCSP tempfile") - this.fakeOCSP, err = FakeOCSPResponse(time.Now().Add(-4 * 24 * time.Hour)) + this.fakeOCSP, err = FakeOCSPResponse(this.fakeClock.Now().Add(-4 * 24 * time.Hour)) this.Require().NoError(err, "creating expired OCSP response") this.Require().True(this.ocspServerCalled(func() { this.handler, err = this.New() @@ -258,7 +261,7 @@ func (this *CertCacheSuite) TestOCSPExpiry() { })) // Verify HTTP response expires immediately: - resp := pkgt.Get(this.T(), this.mux(), "/amppkg/cert/"+pkgt.CertName) + resp := pkgt.NewRequest(this.T(), this.mux(), "/amppkg/cert/"+pkgt.CertName).Do() this.Assert().Equal("public, max-age=0", resp.Header.Get("Cache-Control")) // On update, verify network is called: @@ -272,7 +275,7 @@ func (this *CertCacheSuite) TestOCSPUpdateFromDisk() { // Prime memory cache with a past-midpoint OCSP: err := os.Remove(filepath.Join(this.tempDir, "ocsp")) this.Require().NoError(err, "deleting OCSP tempfile") - this.fakeOCSP, err = FakeOCSPResponse(time.Now().Add(-4 * 24 * time.Hour)) + this.fakeOCSP, err = FakeOCSPResponse(this.fakeClock.Now().Add(-4 * 24 * time.Hour)) this.Require().NoError(err, "creating stale OCSP response") this.Require().True(this.ocspServerCalled(func() { this.handler, err = this.New() @@ -280,7 +283,7 @@ func (this *CertCacheSuite) TestOCSPUpdateFromDisk() { })) // Prime disk cache with a fresh OCSP. - freshOCSP, err := FakeOCSPResponse(time.Now()) + freshOCSP, err := FakeOCSPResponse(this.fakeClock.Now()) this.Require().NoError(err, "creating fresh OCSP response") err = ioutil.WriteFile(filepath.Join(this.tempDir, "ocsp"), freshOCSP, 0644) this.Require().NoError(err, "writing fresh OCSP response to disk") @@ -315,7 +318,7 @@ func (this *CertCacheSuite) TestOCSPIgnoreInvalidUpdate() { // Prime memory and disk cache with a past-midpoint OCSP: err := os.Remove(filepath.Join(this.tempDir, "ocsp")) this.Require().NoError(err, "deleting OCSP tempfile") - staleOCSP, err := FakeOCSPResponse(time.Now().Add(-4 * 24 * time.Hour)) + staleOCSP, err := FakeOCSPResponse(this.fakeClock.Now().Add(-4 * 24 * time.Hour)) this.Require().NoError(err, "creating stale OCSP response") this.fakeOCSP = staleOCSP this.Require().True(this.ocspServerCalled(func() { @@ -324,7 +327,7 @@ func (this *CertCacheSuite) TestOCSPIgnoreInvalidUpdate() { })) // Try to update with an invalid OCSP: - this.fakeOCSP, err = FakeOCSPResponse(time.Now().Add(-8 * 24 * time.Hour)) + this.fakeOCSP, err = FakeOCSPResponse(this.fakeClock.Now().Add(-8 * 24 * time.Hour)) this.Require().NoError(err, "creating expired OCSP response") this.Assert().True(this.ocspServerCalled(func() { _, _, err := this.handler.readOCSP(true) diff --git a/packager/certfetcher/certfetcher.go b/packager/certfetcher/certfetcher.go index aa9447599..7bc513590 100644 --- a/packager/certfetcher/certfetcher.go +++ b/packager/certfetcher/certfetcher.go @@ -24,7 +24,6 @@ import ( "github.com/go-acme/lego/v3/challenge/http01" "github.com/go-acme/lego/v3/challenge/tlsalpn01" "github.com/go-acme/lego/v3/lego" - "github.com/go-acme/lego/v3/providers/dns" "github.com/go-acme/lego/v3/providers/http/webroot" "github.com/go-acme/lego/v3/registration" "github.com/pkg/errors" @@ -110,7 +109,7 @@ func New(email string, certSignRequest *x509.CertificateRequest, privateKey cryp } if dnsProvider != "" { - provider, err := dns.NewDNSChallengeProviderByName(dnsProvider) + provider, err := DNSProvider(dnsProvider) if err != nil { return nil, errors.Wrap(err, "Getting DNS01 challenge provider.") } diff --git a/packager/certfetcher/dns.go b/packager/certfetcher/dns.go new file mode 100644 index 000000000..2f73c3c95 --- /dev/null +++ b/packager/certfetcher/dns.go @@ -0,0 +1,12 @@ +// +build dns01 + +package certfetcher + +import ( + "github.com/go-acme/lego/v3/challenge" + "github.com/go-acme/lego/v3/providers/dns" +) + +func DNSProvider(name string) (challenge.Provider, error) { + return dns.NewDNSChallengeProviderByName(name) +} diff --git a/packager/certfetcher/no_dns.go b/packager/certfetcher/no_dns.go new file mode 100644 index 000000000..d2e3469e3 --- /dev/null +++ b/packager/certfetcher/no_dns.go @@ -0,0 +1,12 @@ +// +build !dns01 + +package certfetcher + +import ( + "github.com/go-acme/lego/v3/challenge" + "github.com/pkg/errors" +) + +func DNSProvider(name string) (challenge.Provider, error) { + return nil, errors.New("amppkg was built without DNS-01 support; please rebuild with `-tags dns01`") +} diff --git a/packager/healthz/healthz_test.go b/packager/healthz/healthz_test.go index 3d25cb143..9ff76d794 100644 --- a/packager/healthz/healthz_test.go +++ b/packager/healthz/healthz_test.go @@ -52,13 +52,13 @@ func (this fakeNotHealthyCertHandler) IsHealthy() error { func TestHealthzOk(t *testing.T) { handler, err := New(fakeHealthyCertHandler{}) require.NoError(t, err) - resp := pkgt.Get(t, mux.New(nil, nil, nil, handler), "/healthz") + resp := pkgt.NewRequest(t, mux.New(nil, nil, nil, handler, nil), "/healthz").Do() assert.Equal(t, http.StatusOK, resp.StatusCode, "ok", resp) } func TestHealthzFail(t *testing.T) { handler, err := New(fakeNotHealthyCertHandler{}) require.NoError(t, err) - resp := pkgt.Get(t, mux.New(nil, nil, nil, handler), "/healthz") + resp := pkgt.NewRequest(t, mux.New(nil, nil, nil, handler, nil), "/healthz").Do() assert.Equal(t, http.StatusInternalServerError, resp.StatusCode, "error", resp) } diff --git a/packager/mux/mux.go b/packager/mux/mux.go index e5696c272..3ec62f66e 100644 --- a/packager/mux/mux.go +++ b/packager/mux/mux.go @@ -39,37 +39,123 @@ import ( "strings" "github.com/ampproject/amppackager/packager/util" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" ) +// routingRule maps a URL path prefix to three entities: +// * suffixValidatorFunc - a function that validates the suffix of URL path, +// * handler - an http.Handler that should handle such prefix, +// * handlerPrometheusLabel - a label (dimension) to be used in +// handler-agnostic Prometheus metrics like requests count. +// Must adhere to the Prometheus data model: +// https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +type routingRule struct { + urlPathPrefix string + suffixValidatorFunc func(suffix string, req *http.Request, params *map[string]string, errorMsg *string, errorCode *int) + handler http.Handler + handlerPrometheusLabel string +} + +// mux stores a routingMatrix, an array of routing rules that define the mux' +// routing logic. Note that the order of rules in the matrix is important: the +// first matching rule will be applied by ServeHTTP, so the rule for a +// particular prefix should precede the rule for it's sub-prefix. E.g. the rule +// for routing requests prefixed with “/priv/doc/” (w/ trailing slash) must go +// before the rule for routing requests prefixed with “/priv/doc” (w/o trailing +// slash). +// The last rule in the matrix must have empty urlPathPrefix, so it would match +// any URL. This ensures there's at least one matching rule for any URL. type mux struct { - certCache http.Handler - signer http.Handler - validityMap http.Handler - healthz http.Handler + routingMatrix []routingRule + defaultRule routingRule } -// The main entry point. Use the return value for http.Server.Handler. -func New(certCache http.Handler, signer http.Handler, validityMap http.Handler, healthz http.Handler) http.Handler { - return &mux{certCache, signer, validityMap, healthz} +// return404 is a URL Path Suffix Validator that always returns 404. +func return404(suffix string, req *http.Request, params *map[string]string, errorMsg *string, errorCode *int) { + *errorMsg, *errorCode = "404 page not found", http.StatusNotFound +} + +// expectNoSuffix is a URL Path Suffix Validator that expects an empty suffix. +func expectNoSuffix(suffix string, req *http.Request, params *map[string]string, errorMsg *string, errorCode *int) { + if suffix != "" { + return404(suffix, req, params, errorMsg, errorCode) + } } +// expectSignerQuery is a URL Path Suffix Validator specific to signer requests. +func expectSignerQuery(suffix string, req *http.Request, params *map[string]string, errorMsg *string, errorCode *int) { + (*params)["signURL"] = suffix + if req.URL.RawQuery != "" { + (*params)["signURL"] += "?" + req.URL.RawQuery + } +} + +// expectCertQuery is a URL Path Suffix Validator specific to cert requests. +func expectCertQuery(suffix string, req *http.Request, params *map[string]string, errorMsg *string, errorCode *int) { + unescaped, err := url.PathUnescape(suffix) + if err != nil { + *errorMsg, *errorCode = "400 bad request - bad URL encoding", http.StatusBadRequest + } else { + (*params)["certName"] = unescaped + } +} + +// New is the main entry point. Use the return value for http.Server.Handler. +func New(certCache http.Handler, signer http.Handler, validityMap http.Handler, healthz http.Handler, metrics http.Handler) http.Handler { + return &mux{ + // Note that the order of rules in the matrix matters: the first + // matching rule will be applied, so the rule for “/priv/doc/” precedes + // the rule for “/priv/doc” (note that SignerURLPrefix is "/priv/doc"). + // Also note that the last rule matches any URL. + []routingRule{ + {util.SignerURLPrefix + "/", expectSignerQuery, signer, "signer"}, + {util.SignerURLPrefix, expectNoSuffix, signer, "signer"}, + {util.CertURLPrefix + "/", expectCertQuery, certCache, "certCache"}, + {util.ValidityMapPath, expectNoSuffix, validityMap, "validityMap"}, + {util.HealthzPath, expectNoSuffix, healthz, "healthz"}, + {util.MetricsPath, expectNoSuffix, metrics, "metrics"}, + }, + /* defaultRule= */ routingRule{"", return404, nil, "handler_not_assigned"}, + } +} + +// promRequestsTotal is a Prometheus counter that observes total requests count. +var promRequestsTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "total_requests_by_code_and_url", + Help: "Total number of requests by HTTP code and URL.", + }, + []string{"code", "handler"}, +) + +// promRequestsLatency is a Prometheus summary that observes requests latencies. +// Objectives key value pairs set target quantiles and respective allowed rank variance. +// Upon query, for each Objective quantile (0.5, 0.9, 0.99) the summary returns +// an actual observed latency value that is ranked close to the Objective value. +// For more intuition on the Objectives see http://alexandrutopliceanu.ro/post/targeted-quantiles/. +var promRequestsLatency = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "request_latencies_in_seconds", + Help: "Requests latencies in seconds, from the moment the routing is complete, to the moment the response is returned.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"code", "handler"}, +) + +// tryTrimPrefix trims prefix off s if s starts with prefix, and keeps s as is +// otherwise. It returns the remaining suffix and a boolean success indicator. +// If prefix is an empty string, returns s and "true". func tryTrimPrefix(s, prefix string) (string, bool) { sLen := len(s) trimmed := strings.TrimPrefix(s, prefix) - return trimmed, len(trimmed) != sLen + return trimmed, len(prefix)+len(trimmed) == sLen } var allowedMethods = map[string]bool{http.MethodGet: true, http.MethodHead: true} func (this *mux) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - if !allowedMethods[req.Method] { - http.Error(resp, "405 method not allowed", http.StatusMethodNotAllowed) - return - } - - params := map[string]string{} - req = WithParams(req, params) - // Use EscapedPath rather than RequestURI because the latter can take // absolute-form, per https://tools.ietf.org/html/rfc7230#section-5.3. // @@ -78,41 +164,55 @@ func (this *mux) ServeHTTP(resp http.ResponseWriter, req *http.Request) { // https://wicg.github.io/webpackage/draft-yasskin-http-origin-signed-responses.html#application-signed-exchange // item 3. path := req.URL.EscapedPath() - if suffix, ok := tryTrimPrefix(path, "/priv/doc"); ok { - if suffix == "" { - this.signer.ServeHTTP(resp, req) - } else if suffix[0] == '/' { - params["signURL"] = suffix[1:] - if req.URL.RawQuery != "" { - params["signURL"] += "?" + req.URL.RawQuery - } - this.signer.ServeHTTP(resp, req) - } else { - http.NotFound(resp, req) - } - } else if suffix, ok := tryTrimPrefix(path, util.CertURLPrefix+"/"); ok { - unescaped, err := url.PathUnescape(suffix) - if err != nil { - http.Error(resp, "400 bad request - bad URL encoding", http.StatusBadRequest) - } else { - params["certName"] = unescaped - this.certCache.ServeHTTP(resp, req) + + // Find the first matching routing rule. + var matchingRule *routingRule + var suffix string + for _, currentRule := range this.routingMatrix { + if currentSuffix, isMatch := tryTrimPrefix(path, currentRule.urlPathPrefix); isMatch { + matchingRule = ¤tRule + suffix = currentSuffix + break } - } else if path == util.HealthzPath { - this.healthz.ServeHTTP(resp, req) - } else if path == util.ValidityMapPath { - this.validityMap.ServeHTTP(resp, req) + } + + if matchingRule == nil { + matchingRule = &this.defaultRule + } + + errorMsg := "" + errorCode := 0 + // Validate HTTP method and params, parse params and attach them to req. + if !allowedMethods[req.Method] { + errorMsg, errorCode = "405 method not allowed", http.StatusMethodNotAllowed } else { - http.NotFound(resp, req) + params := map[string]string{} + req = WithParams(req, params) + matchingRule.suffixValidatorFunc(suffix, req, ¶ms, &errorMsg, &errorCode) } + + // Prepare the handler. + var handlerFunc http.Handler + if errorCode == 0 { + handlerFunc = matchingRule.handler + } else { + handlerFunc = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, errorMsg, errorCode) }) + } + + // Decorate the call to handlerFunc with Prometheus measurers of requests + // count and latency, pre-labelled (curried) with the right handler label. + label := prometheus.Labels{"handler": matchingRule.handlerPrometheusLabel} + promhttp.InstrumentHandlerDuration(promRequestsLatency.MustCurryWith(label), + promhttp.InstrumentHandlerCounter(promRequestsTotal.MustCurryWith(label), + handlerFunc)).ServeHTTP(resp, req) } type paramsKeyType struct{} var paramsKey = paramsKeyType{} -// Gets the params from the request context, injected by the mux. Guaranteed to -// be non-nil. Call from the handlers. +// Params gets the params from the request context, injected by the mux. +// Guaranteed to be non-nil. Call from the handlers. func Params(req *http.Request) map[string]string { params := req.Context().Value(paramsKey) switch v := params.(type) { @@ -124,9 +224,9 @@ func Params(req *http.Request) map[string]string { } } -// Returns a copy of req annotated with the given params. (Params is stored by -// reference, and so may be mutated afterwards.) To be called only by this -// library itself or by tests. +// WithParams returns a copy of req annotated with the given params. (Params is +// stored by reference, and so may be mutated afterwards.) To be called only by +// this library itself or by tests. func WithParams(req *http.Request, params map[string]string) *http.Request { return req.WithContext(context.WithValue(req.Context(), paramsKey, params)) } diff --git a/packager/mux/mux_test.go b/packager/mux/mux_test.go new file mode 100644 index 000000000..4b67e723b --- /dev/null +++ b/packager/mux/mux_test.go @@ -0,0 +1,514 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mux + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + pkgt "github.com/ampproject/amppackager/packager/testing" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// expand propagates hardcoded values into template test url. +func expand(templateURL string) string { + templateURL = strings.Replace(templateURL, "$HOST", "http://www.publisher_amp_server.com", 1) + templateURL = strings.Replace(templateURL, "$FETCH", "http://www.publisher_main_server.com/some_page", 1) + templateURL = strings.Replace(templateURL, "$SIGN", "https://www.publisher_main_server.com/some_page", 1) + templateURL = strings.Replace(templateURL, "$CERT", pkgt.CertName, 1) + return templateURL +} + +// mockedHandler mocks mux' underlying http handlers - signer, cert etc. +type mockedHandler struct { + mock.Mock +} + +func (m *mockedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + m.Called(Params(req)) +} + +func TestServeHTTPSuccess(t *testing.T) { + templateTests := []struct { + testName string + testURL string + expectHandler string + expectParams map[string]string + }{ + { + testName: `Signer - empty`, + testURL: `$HOST/priv/doc`, + expectHandler: `signer`, + expectParams: map[string]string{}, + }, { + testName: `Signer - with query, empty`, + testURL: `$HOST/priv/doc?`, + expectHandler: `signer`, + expectParams: map[string]string{}, + }, { + testName: `Signer - with query, regular`, + testURL: `$HOST/priv/doc?fetch=$FETCH&sign=$SIGN`, + expectHandler: `signer`, + expectParams: map[string]string{}, + }, { + testName: `Signer - with query, escaping`, + testURL: `$HOST/priv/doc?fetch=$FETCH&sign=$SIGN%2A\`, + expectHandler: `signer`, + expectParams: map[string]string{}, + }, { + testName: `Signer - with path, empty`, + testURL: `$HOST/priv/doc/`, + expectHandler: `signer`, + expectParams: map[string]string{`signURL`: ``}, + }, { + testName: `Signer - with path, regular`, + testURL: `$HOST/priv/doc/$FETCH`, + expectHandler: `signer`, + expectParams: map[string]string{`signURL`: `$FETCH`}, + }, { + testName: `Signer - with path, escaping`, + testURL: `$HOST/priv/doc/$FETCH%2A\`, + expectHandler: `signer`, + expectParams: map[string]string{`signURL`: `$FETCH%2A%5C`}, + }, { + testName: `Signer - with path and query, regular`, + testURL: `$HOST/priv/doc/$FETCH?amp=1`, + expectHandler: `signer`, + expectParams: map[string]string{`signURL`: `$FETCH?amp=1`}, + }, { + testName: `Signer - with path and query, escaping`, + testURL: `$HOST/priv/doc/$FETCH%2A\?amp=1%2A\`, + expectHandler: `signer`, + expectParams: map[string]string{`signURL`: `$FETCH%2A%5C?amp=1%2A\`}, + }, { + testName: `Cert - empty`, + testURL: `$HOST/amppkg/cert/`, + expectHandler: `cert`, + expectParams: map[string]string{`certName`: ``}, + }, { + testName: `Cert - regular`, + testURL: `$HOST/amppkg/cert/$CERT`, + expectHandler: `cert`, + expectParams: map[string]string{`certName`: `$CERT`}, + }, { + testName: `Cert - escaping`, + testURL: `$HOST/amppkg/cert/$CERT%2A\`, + expectHandler: `cert`, + expectParams: map[string]string{`certName`: `$CERT*\`}, + }, { + testName: `ValidityMap - regular`, + testURL: `$HOST/amppkg/validity`, + expectHandler: `validityMap`, + expectParams: map[string]string{}, + }, { + testName: `Healthz - regular`, + testURL: `$HOST/healthz`, + expectHandler: `healthz`, + expectParams: map[string]string{}, + }, { + testName: `Metrics - regular`, + testURL: `$HOST/metrics`, + expectHandler: `metrics`, + expectParams: map[string]string{}, + }, + } + for _, tt := range templateTests { + testName := tt.testName + t.Run(testName, func(t *testing.T) { + // Defer validation to ensure it does happen. + mocks := map[string](*mockedHandler){"signer": &mockedHandler{}, "healthz": &mockedHandler{}, "cert": &mockedHandler{}, "validityMap": &mockedHandler{}, "metrics": &mockedHandler{}} + var actualResp *http.Response + defer func() { + // Expect no errors. + assert.Equal(t, 200, actualResp.StatusCode, "No error expected: %#v", actualResp) + + // Expect the right call to the right handler, and no calls to the rest. + for _, mockedHandler := range mocks { + mockedHandler.AssertExpectations(t) + } + }() + + // expand template URL and expectParams. + tt.testURL = expand(tt.testURL) + for v, k := range tt.expectParams { + tt.expectParams[v] = expand(k) + } + + // Set expectation. + expectMockedHandler := mocks[tt.expectHandler] + expectMockedHandler.On("ServeHTTP", tt.expectParams) + + // Run. + mux := New(mocks["cert"], mocks["signer"], mocks["validityMap"], mocks["healthz"], mocks["metrics"]) + actualResp = pkgt.NewRequest(t, mux, tt.testURL).Do() + }) + } +} + +func expectError(t *testing.T, url string, expectErrorMessage string, expectErrorCode int, body io.Reader) { + // Defer validation to ensure it does happen. + mockedHandler := new(mockedHandler) + var actualResp *http.Response + var actualErrorMessage string + defer func() { + // Expect the right error. + assert.Equal(t, expectErrorCode, actualResp.StatusCode) + assert.Equal(t, expectErrorMessage, actualErrorMessage) + + // Expect no calls to mocks. + mockedHandler.AssertExpectations(t) + }() + + // Initialize mux with 4 identical mocked handlers, because no calls are expect to any of them. + mux := New(mockedHandler, mockedHandler, mockedHandler, mockedHandler, mockedHandler) + + // Run and extract error. + actualResp = pkgt.NewRequest(t, mux, url).SetBody(body).Do() + actualErrorMessageBuffer, _ := ioutil.ReadAll(actualResp.Body) + actualErrorMessage = fmt.Sprintf("%s", actualErrorMessageBuffer) + +} + +func TestServeHTTPexpect404s(t *testing.T) { + templateTests := []struct { + testName string + URL string + }{ + {"No such endpoint ", "$HOST/abc"}, + {"Signer - unexpected extra char ", "$HOST/priv/doc1"}, + {"Cert - no closing slash ", "$HOST/amppkg/cert"}, + {"ValidityMap - unexpected closing slash", "$HOST/amppkg/validity/"}, + {"Healthz - unexpected closing slash ", "$HOST/healthz/"}, + {"Healthz - unexpected extra char ", "$HOST/healthz1"}, + {"Metrics - unexpected closing slash ", "$HOST/metrics/"}, + } + for _, tt := range templateTests { + t.Run(tt.testName, func(t *testing.T) { + expectError(t, expand(tt.URL), "404 page not found\n", http.StatusNotFound, nil) + }) + } +} + +func TestServeHTTPexpect405(t *testing.T) { + body := strings.NewReader("Non empty body so this sends a POST request") + expectError(t, expand("$HOST/healthz"), "405 method not allowed\n", http.StatusMethodNotAllowed, body) +} + +func TestParamsIncorrectValueType(t *testing.T) { + req := httptest.NewRequest("", "http://abc.com", nil) + + // Pass string instead of expected map[string]string. + req = req.WithContext(context.WithValue(req.Context(), paramsKey, "Some string")) + + // Expect Params to handle invalid input gracefully. + assert.Equal(t, Params(req), map[string]string{}) +} + +const promExpectedHeaderRequestsTotal = ` + # HELP total_requests_by_code_and_url Total number of requests by HTTP code and URL. + # TYPE total_requests_by_code_and_url counter + ` + +// TestPrometheusMetricRequestsTotal tests the respective Prometheus metric. +// Test each scenario in isolation to make sure each of them works, then test +// them altogether to make sure they don't interfere with each other. +func TestPrometheusMetricRequestsTotal(t *testing.T) { + nopHandler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + + tests := []struct { + testName string + testHint string + testFunc func() + expectedMetrics string + }{ + { + /* testName= */ `AllHandlersNOP200`, + /* testHint= */ ` + Make requests to mux with all handlers being NOPs returning 200. + Request Healthz twice to test aggregation of identical requests. + `, + /* testFunc= */ func() { + mux := New(nopHandler, nopHandler, nopHandler, nopHandler, nopHandler) + pkgt.NewRequest(t, mux, expand(`$HOST/priv/doc?fetch=$FETCH&sign=$SIGN`)).Do() + pkgt.NewRequest(t, mux, expand(`$HOST/amppkg/cert/$CERT`)).Do() + pkgt.NewRequest(t, mux, expand(`$HOST/amppkg/validity`)).Do() + pkgt.NewRequest(t, mux, expand(`$HOST/healthz`)).Do() + pkgt.NewRequest(t, mux, expand(`$HOST/healthz`)).Do() + }, + /* expectedMetrics = */ ` + total_requests_by_code_and_url{code="200",handler="signer"} 1 + total_requests_by_code_and_url{code="200",handler="certCache"} 1 + total_requests_by_code_and_url{code="200",handler="validityMap"} 1 + total_requests_by_code_and_url{code="200",handler="healthz"} 2 + `, + }, + { + /* testName= */ `ErrorsReturnedByMuxDirectly`, + /* testHint= */ ` + Test counting requests to same handler that returned different codes. + Trigger a 404 attributed to healthz by adding an unexpected suffix to path. + `, + /* testFunc= */ func() { + mux := New(nopHandler, nopHandler, nopHandler, nopHandler, nopHandler) + pkgt.NewRequest(t, mux, expand(`$HOST/healthzSOME_SUFFIX`)).Do() + }, + /* expectedMetrics = */ ` + total_requests_by_code_and_url{code="404",handler="healthz"} 1 + `, + }, + { + /* testName= */ `UnassignedRequests`, + /* testHint= */ ` + Test counting request not assigned to a handler. + `, + /* testFunc= */ func() { + mux := New(nopHandler, nopHandler, nopHandler, nopHandler, nopHandler) + pkgt.NewRequest(t, mux, expand(`$HOST/abc`)).Do() + pkgt.NewRequest(t, mux, expand(`$HOST/def`)).Do() + pkgt.NewRequest(t, mux, expand(`$HOST/ghi`)).Do() + }, + /* expectedMetrics = */ ` + total_requests_by_code_and_url{code="404",handler="handler_not_assigned"} 3 + `, + }, + { + /* testName= */ `ForbiddenMethod`, + /* testHint= */ ` + Special case: forbidden method. + `, + /* testFunc= */ func() { + mux := New(nopHandler, nopHandler, nopHandler, nopHandler, nopHandler) + body := strings.NewReader("Non empty body so this will be a POST request") + pkgt.NewRequest(t, mux, expand("$HOST/healthz")).SetBody(body).Do() + }, + /* expectedMetrics = */ ` + total_requests_by_code_and_url{code="405",handler="healthz"} 1 + `, + }, + { + /* testName= */ `ErrorReturnedByHandler`, + /* testHint= */ ` + Some of the above requests generated errors, but those errors were thrown + by mux, not by handlers. Handlers were no-ops. Now let's simulate a + request that triggers a handler-generated error. + Specifically let's simulate signer returning a 400. + `, + /* testFunc= */ func() { + signerMockReturning400 := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "Bad Request", 400) })) + mux := New(nopHandler, signerMockReturning400, nopHandler, nopHandler, nopHandler) + pkgt.NewRequest(t, mux, expand("$HOST/priv/doc/abc")).Do() + }, + /* expectedMetrics = */ ` + total_requests_by_code_and_url{code="400",handler="signer"} 1 + `, + }, + { + /* testName= */ `FetchMetricsEndpoint`, + /* testHint= */ ` + Special case: send a request to "metrics" endpoint, which results in two actions: + 1) Previously collected metrics are returned in response with status 200. + 2) Prometheus requests counter is incremented for respective handler and code ("metric", 200). + Let's test that these actions work fine together. + Let's send a "metric" request to a new mux instance that has a real, non-mocked + metric handler. Such request, along with downstream validation, checks + that the "metric" endpoint's underlying logic doesn't interfere + with accounting for the actual metric request. + `, + /* testFunc= */ func() { + mux := New(nopHandler, nopHandler, nopHandler, nopHandler, promhttp.Handler()) + pkgt.NewRequest(t, mux, expand(`$HOST/metrics`)).Do() + }, + /* expectedMetrics = */ ` + total_requests_by_code_and_url{code="200",handler="metrics"} 1 + `, + }, + } + + // Test each scenario in isolation. + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + promRequestsTotal.Reset() + expectedMetrics := promExpectedHeaderRequestsTotal + tt.expectedMetrics + tt.testFunc() + expectation := strings.NewReader(expectedMetrics) + if err := promtest.CollectAndCompare(promRequestsTotal, expectation, "total_requests_by_code_and_url"); err != nil { + t.Errorf("TestPrometheusMetricRequestsTotal - "+tt.testName+": unexpected collecting result:\n%s", err) + } + }) + } + + // Test all scenarios together. + promRequestsTotal.Reset() + expectedMetrics := promExpectedHeaderRequestsTotal + for _, tt := range tests { + expectedMetrics += tt.expectedMetrics + tt.testFunc() + } + expectation := strings.NewReader(expectedMetrics) + if err := promtest.CollectAndCompare(promRequestsTotal, expectation, "total_requests_by_code_and_url"); err != nil { + t.Errorf("TestPrometheusMetricRequestsTotal - all scenarios in single run: unexpected collecting result:\n%s", err) + } +} + +// TestPrometheusMetricRequestsLatency tests the end-to-end latencies metrics. +// It checks that the right error codes and handlers are accounted for. It also +// checks that the latencies are positive, but doesn't expect exact values, +// because latencies are non-deterministic. +// It would be nice to mock time (e.g. patch the time.Since function) to test +// the exact latencies values produced, and to simulate slow execution, too. +// However, seems like there's no "native" way to monkey-patch in Go. +// There is an option that doesn't look safe enough: +// https://www.reddit.com/r/golang/comments/30try1/monkey_patching_in_go/ +// https://news.ycombinator.com/item?id=22442170. +func TestPrometheusMetricRequestsLatency(t *testing.T) { + hintPrefix := "TestPrometheusMetricRequestsLatency" + + type metricRecordKey struct { + codeLabelPair, handlerLabelPair string + } + + type scenarioRequests []struct { + urlTemplate string + mockHandlerThrows404 bool + } + + type scenarioExpectedSampleCountMap map[metricRecordKey]uint64 + + scenarios := []struct { + requests scenarioRequests + expectedSampleCountMap scenarioExpectedSampleCountMap + }{ + { + scenarioRequests{ + {urlTemplate: `$HOST/priv/doc?fetch=$FETCH&sign=$SIGN`, mockHandlerThrows404: true}, + {urlTemplate: `$HOST/priv/doc?fetch=$FETCH&sign=$SIGN`, mockHandlerThrows404: false}, + }, + scenarioExpectedSampleCountMap{ + {`name:"code" value:"404" `, `name:"handler" value:"signer" `}: 1, + {`name:"code" value:"200" `, `name:"handler" value:"signer" `}: 1, + }, + }, + { + scenarioRequests{ + {urlTemplate: `$HOST/amppkg/cert/$CERT`, mockHandlerThrows404: false}, + }, + scenarioExpectedSampleCountMap{ + {`name:"code" value:"200" `, `name:"handler" value:"certCache" `}: 1, + }, + }, + { + scenarioRequests{ + {urlTemplate: `$HOST/amppkg/validity`, mockHandlerThrows404: false}, + }, + scenarioExpectedSampleCountMap{ + {`name:"code" value:"200" `, `name:"handler" value:"validityMap" `}: 1, + }, + }, + { + scenarioRequests{ + {urlTemplate: `$HOST/healthz`, mockHandlerThrows404: false}, + {urlTemplate: `$HOST/healthz`, mockHandlerThrows404: false}, + {urlTemplate: `$HOST/healthz`, mockHandlerThrows404: false}, + }, + scenarioExpectedSampleCountMap{ + {`name:"code" value:"200" `, `name:"handler" value:"healthz" `}: 3, + }, + }, + { + scenarioRequests{ + {urlTemplate: `$HOST/metrics`, mockHandlerThrows404: false}, + }, + scenarioExpectedSampleCountMap{ + {`name:"code" value:"200" `, `name:"handler" value:"metrics" `}: 1, + }, + }, + { + scenarioRequests{ + {urlTemplate: `$HOST/abc`, mockHandlerThrows404: false}, + {urlTemplate: `$HOST/def`, mockHandlerThrows404: false}, + }, + scenarioExpectedSampleCountMap{ + {`name:"code" value:"404" `, `name:"handler" value:"handler_not_assigned" `}: 2, + }, + }, + } + + for _, scenario := range scenarios { + promRequestsLatency.Reset() + + for _, req := range scenario.requests { + mockHandler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if req.mockHandlerThrows404 { + http.Error(w, "404 page not found", 404) + } + })) + mux := New(mockHandler, mockHandler, mockHandler, mockHandler, mockHandler) + pkgt.NewRequest(t, mux, expand(req.urlTemplate)).Do() + + } + + expectedSampleCountMap := scenario.expectedSampleCountMap + + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(promRequestsLatency); err != nil { + t.Errorf(hintPrefix+" - registering collector failed: %s", err) + } + + actualMetricFamilyArr, err := reg.Gather() + if err != nil { + t.Errorf(hintPrefix+" - gathering metrics failed: %s", err) + } + + assert.Equal(t, 1, len(actualMetricFamilyArr), + hintPrefix+" expects exactly one metric family.") + + assert.Equal(t, "request_latencies_in_seconds", *actualMetricFamilyArr[0].Name, + hintPrefix+" expects the right metric name.") + + assert.Equal(t, len(expectedSampleCountMap), len(actualMetricFamilyArr[0].Metric), + hintPrefix+" expects the right amount of metrics collected and gathered.") + + for _, actualMetric := range actualMetricFamilyArr[0].Metric { + // Expect the right sample count. + code := actualMetric.Label[0].String() + handler := actualMetric.Label[1].String() + expectedSampleCount := expectedSampleCountMap[metricRecordKey{code, handler}] + actualSampleCount := actualMetric.Summary.GetSampleCount() + assert.Equal(t, expectedSampleCount, actualSampleCount, hintPrefix+" expects the right sample count for "+code+" "+handler) + + // Expect the right number of quantiles. + assert.Equal(t, 3, len(actualMetric.Summary.Quantile), hintPrefix+" expects the right number of quantiles.") + + // Expect the right quantiles. + // Expect positive quantile values, because latencies are non-zero. + // Don't check the exact values, because latencies are non-deterministic. + expectedQuantileKeys := []float64{0.5, 0.9, 0.99} + for i, quantile := range actualMetric.Summary.Quantile { + assert.Equal(t, expectedQuantileKeys[i], quantile.GetQuantile(), hintPrefix+" expects the right quantile.") + assert.True(t, quantile.GetValue() > .0, hintPrefix+" expects non-zero quantile value (latency).") + } + } + } +} diff --git a/packager/signer/signer.go b/packager/signer/signer.go index 5b3c54c37..fa1659299 100644 --- a/packager/signer/signer.go +++ b/packager/signer/signer.go @@ -39,6 +39,8 @@ import ( "github.com/ampproject/amppackager/transformer" rpb "github.com/ampproject/amppackager/transformer/request" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" ) // The user agent to send when issuing fetches. Should look like a mobile device. @@ -78,14 +80,6 @@ var statusNotModifiedHeaders = map[string]bool{ "Vary": true, } -// MICE requires the sender process its payload in reverse order -// (https://tools.ietf.org/html/draft-thomson-http-mice-03#section-2.1). -// In an HTTP reverse proxy, this could be done using range requests, but would -// be inefficient. Therefore, the signer requires the whole payload in memory. -// To prevent DoS, a memory limit is set. This limit is mostly arbitrary, -// though there's no benefit to having a limit greater than that of AMP Caches. -const maxBodyLength = 4 * 1 << 20 - // The current maximum is defined at: // https://cs.chromium.org/chromium/src/content/browser/loader/merkle_integrity_source_stream.cc?l=18&rcl=591949795043a818e50aba8a539094c321a4220c // The maximum is cheapest in terms of network usage, and probably CPU on both @@ -132,6 +126,7 @@ type Signer struct { overrideBaseURL *url.URL requireHeaders bool forwardedRequestHeaders []string + timeNow func() time.Time } func noRedirects(req *http.Request, via []*http.Request) error { @@ -140,14 +135,14 @@ func noRedirects(req *http.Request, via []*http.Request) error { func New(certHandler certcache.CertHandler, key crypto.PrivateKey, urlSets []util.URLSet, rtvCache *rtv.RTVCache, shouldPackage func() error, overrideBaseURL *url.URL, - requireHeaders bool, forwardedRequestHeaders []string) (*Signer, error) { + requireHeaders bool, forwardedRequestHeaders []string, timeNow func() time.Time) (*Signer, error) { client := http.Client{ CheckRedirect: noRedirects, // TODO(twifkak): Load-test and see if default transport settings are okay. Timeout: 60 * time.Second, } - return &Signer{certHandler, key, &client, urlSets, rtvCache, shouldPackage, overrideBaseURL, requireHeaders, forwardedRequestHeaders}, nil + return &Signer{certHandler, key, &client, urlSets, rtvCache, shouldPackage, overrideBaseURL, requireHeaders, forwardedRequestHeaders, timeNow}, nil } func (this *Signer) fetchURL(fetch *url.URL, serveHTTPReq *http.Request) (*http.Request, *http.Response, *util.HTTPError) { @@ -181,7 +176,7 @@ func (this *Signer) fetchURL(fetch *url.URL, serveHTTPReq *http.Request) (*http. // TODO(twifkak): Extract host from upstream Forwarded header // and concatenate. (Do not include any other parameters, as // they may lead to over-signing.) - req.Header.Set("Forwarded", `host=` + quotedHost) + req.Header.Set("Forwarded", `host=`+quotedHost) xfh := serveHTTPReq.Host if oldXFH := serveHTTPReq.Header.Get("X-Forwarded-Host"); oldXFH != "" { xfh = oldXFH + "," + xfh @@ -247,7 +242,7 @@ func MutateFetchedContentSecurityPolicy(fetched string) string { // Add missing directives or replace the ones that were removed in some cases newCsp.WriteString( "default-src * blob: data:;" + - "report-uri https://csp-collector.appspot.com/csp/amp;" + + "report-uri https://csp.withgoogle.com/csp/amp;" + "script-src blob: https://cdn.ampproject.org/rtv/ " + "https://cdn.ampproject.org/v0.js " + "https://cdn.ampproject.org/v0/ " + @@ -277,6 +272,52 @@ func (this *Signer) genCertURL(cert *x509.Certificate, signURL *url.URL) (*url.U return ret, nil } +// promGatewayRequestsTotal is a Prometheus counter that observes total gateway requests count. +var promGatewayRequestsTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "total_gateway_requests_by_code", + Help: "Total number of underlying requests to AMP document server - by HTTP response status code.", + }, + []string{"code"}, +) + +// promGatewayRequestsLatency is a Prometheus summary that observes requests latencies. +// Objectives key value pairs set target quantiles and respective allowed rank variance. +// Upon query, for each Objective quantile (0.5, 0.9, 0.99) the summary returns +// an actual observed latency value that is ranked close to the Objective value. +// For more intuition on the Objectives see http://alexandrutopliceanu.ro/post/targeted-quantiles/. +var promGatewayRequestsLatency = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "gateway_request_latencies_in_seconds", + Help: "Latencies (in seconds) of gateway requests to AMP document server - by HTTP response status code.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"code"}, +) + +func (this *Signer) fetchURLAndMeasure(fetch *url.URL, serveHTTPReq *http.Request) (*http.Request, *http.Response, *util.HTTPError) { + startTime := this.timeNow() + + fetchReq, fetchResp, httpErr := this.fetchURL(fetch, serveHTTPReq) + if httpErr == nil { + // httpErr is nil, i.e. the gateway request did succeed. Let Prometheus + // observe the gateway request and its latency - along with the response code. + label := prometheus.Labels{"code": strconv.Itoa(fetchResp.StatusCode)} + promGatewayRequestsTotal.With(label).Inc() + + latency := this.timeNow().Sub(startTime) + promGatewayRequestsLatency.With(label).Observe(latency.Seconds()) + } else { + // httpErr can have a non-nil value. E.g. http.StatusBadGateway (502) + // is the most probable error fetchURL returns if failed. In case of + // non-nil httpErr don't observe the request. Instead do nothing and let + // mux's promRequestsTotal observe the top level non-gateway request (along + // with the response code e.g. 502) once signer has completed handling it. + } + + return fetchReq, fetchResp, httpErr +} + func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { resp.Header().Add("Vary", "Accept, AMP-Cache-Transform") @@ -306,7 +347,7 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { return } - fetchReq, fetchResp, httpErr := this.fetchURL(fetchURL, req) + fetchReq, fetchResp, httpErr := this.fetchURLAndMeasure(fetchURL, req) if httpErr != nil { httpErr.LogAndRespond(resp) return @@ -320,7 +361,7 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { if err := this.shouldPackage(); err != nil { log.Println("Not packaging because server is unhealthy; see above log statements.", err) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) return } var act string @@ -330,7 +371,7 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { act, transformVersion = amp_cache_transform.ShouldSendSXG(header_value) if act == "" { log.Println("Not packaging because AMP-Cache-Transform request header is invalid:", header_value) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) return } } else { @@ -338,12 +379,13 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { transformVersion, err = transformer.SelectVersion(nil) if err != nil { log.Println("Not packaging because of internal SelectVersion error:", err) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) + return } } if this.requireHeaders && !accept.CanSatisfy(GetJoined(req.Header, "Accept")) { log.Printf("Not packaging because Accept request header lacks application/signed-exchange;v=%s.\n", accept.AcceptedSxgVersion) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) return } @@ -352,13 +394,13 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { // If fetchURL returns an OK status, then validate, munge, and package. if err := validateFetch(fetchReq, fetchResp); err != nil { log.Println("Not packaging because of invalid fetch: ", err) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) return } for header := range statefulResponseHeaders { if errorOnStatefulHeaders && GetJoined(fetchResp.Header, header) != "" { log.Println("Not packaging because ErrorOnStatefulHeaders = True and fetch response contains stateful header: ", header) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) return } } @@ -369,11 +411,11 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { // Variants headers (https://tools.ietf.org/html/draft-ietf-httpbis-variants-04) are disallowed by AMP Cache. // We could delete the headers, but it's safest to assume they reflect the downstream server's intent. log.Println("Not packaging because response contains a Variants header.") - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) return } - this.serveSignedExchange(resp, fetchResp, signURL, act, transformVersion) + this.consumeAndSign(resp, fetchResp, &SXGParams{signURL, act, transformVersion}) case 304: // If fetchURL returns a 304, then also return a 304 with appropriate headers. @@ -386,7 +428,7 @@ func (this *Signer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { default: log.Printf("Not packaging because status code %d is unrecognized.\n", fetchResp.StatusCode) - proxy(resp, fetchResp, nil) + proxyUnconsumed(resp, fetchResp) } } @@ -415,22 +457,83 @@ func formatLinkHeader(preloads []*rpb.Metadata_Preload) (string, error) { return strings.Join(values, ","), nil } -// serveSignedExchange does the actual work of transforming, packaging and signed and writing to the response. -func (this *Signer) serveSignedExchange(resp http.ResponseWriter, fetchResp *http.Response, signURL *url.URL, act string, transformVersion int64) { - // After this, fetchResp.Body is consumed, and attempts to read or proxy it will result in an empty body. - fetchBody, err := ioutil.ReadAll(io.LimitReader(fetchResp.Body, maxBodyLength)) +type SXGParams struct { + signURL *url.URL + ampCacheTransformHeader string + transformVersion int64 +} + +// consumedFetchResp stores the fetch response in memory - including the +// consumed body, not a stream reader. Signer loads the whole payload in memory +// in order to be able to sign it, because it's required by signer's +// serveSignedExchange method, specifically by its underlying calls to +// transformer.Process and to signedexchange.NewExchange. The former performs +// AMP HTML transforms, which depend on a non-streaming HTML parser. The latter +// signs it, which requires the whole payload in memory, because MICE requires +// the sender to process its payload in reverse order +// (https://tools.ietf.org/html/draft-thomson-http-mice-03#section-2.1). In an +// HTTP reverse proxy, this could be done using range requests, but would be +// inefficient. +type consumedFetchResp struct { + body []byte + StatusCode int + Header http.Header +} + +// maxSignableBodyLength is the signable payload length limit. If not hit, the +// signer will load the payload into consumedFetchResp and sign it. If hit, the +// signer won't sign the payload, but will proxy it in full by streaming it. +// This way the signer limits per-request memory usage, making amppackager more +// predictable provisioning-wise. The limit is mostly arbitrary, though there's +// no benefit to having a limit greater than that of AMP Caches. +const maxSignableBodyLength = 4 * 1 << 20 + +func (this *Signer) consumeAndSign(resp http.ResponseWriter, fetchResp *http.Response, params *SXGParams) { + // Cap in order to limit per-request memory usage. + fetchBodyMaybeCapped, err := ioutil.ReadAll(io.LimitReader(fetchResp.Body, maxSignableBodyLength)) if err != nil { util.NewHTTPError(http.StatusBadGateway, "Error reading body: ", err).LogAndRespond(resp) return } - // Perform local transformations. - r := getTransformerRequest(this.rtvCache, string(fetchBody), signURL.String()) - r.Version = transformVersion + if len(fetchBodyMaybeCapped) == maxSignableBodyLength { + // Body was too long and has been capped. Fallback to proxying. + log.Println("Not packaging because the document size hit the limit of ", strconv.Itoa(maxSignableBodyLength), " bytes.") + proxyPartiallyConsumed(resp, fetchResp, fetchBodyMaybeCapped) + } else { + // Body has been consumed fully. OK to proceed. + this.serveSignedExchange(resp, consumedFetchResp{fetchBodyMaybeCapped, fetchResp.StatusCode, fetchResp.Header}, params) + } + +} + +var promSignedAmpDocumentsSize = promauto.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "signed_amp_documents_size_in_bytes", + Help: "Actual size (in bytes) of gateway response body from AMP document server. Reported only if signer decided to sign, not return an error or proxy unsigned.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{}, +) + +var promDocumentsSignedVsUnsigned = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "documents_signed_vs_unsigned", + Help: "Total number of successful underlying requests to AMP document server, broken down by status based on the action signer has taken: sign or proxy unsigned.", + }, + []string{"status"}, +) + +// serveSignedExchange does the actual work of transforming, packaging, signing and writing to the response. +func (this *Signer) serveSignedExchange(resp http.ResponseWriter, fetchResp consumedFetchResp, params *SXGParams) { + // Perform local transformations, as required by AMP SXG caches, per + // docs/cache_requirements.md. + r := getTransformerRequest(this.rtvCache, string(fetchResp.body), params.signURL.String()) + r.Version = params.transformVersion transformed, metadata, err := transformer.Process(r) if err != nil { log.Println("Not packaging due to transformer error:", err) - proxy(resp, fetchResp, fetchBody) + proxyConsumed(resp, fetchResp) return } @@ -438,7 +541,7 @@ func (this *Signer) serveSignedExchange(resp http.ResponseWriter, fetchResp *htt linkHeader, err := formatLinkHeader(metadata.Preloads) if err != nil { log.Println("Not packaging due to Link header error:", err) - proxy(resp, fetchResp, fetchBody) + proxyConsumed(resp, fetchResp) return } @@ -471,22 +574,29 @@ func (this *Signer) serveSignedExchange(resp http.ResponseWriter, fetchResp *htt fetchResp.Header.Get("Content-Security-Policy"))) exchange := signedexchange.NewExchange( - accept.SxgVersion /*uri=*/, signURL.String() /*method=*/, "GET", + accept.SxgVersion, + /*uri=*/ params.signURL.String(), + /*method=*/ "GET", http.Header{}, fetchResp.StatusCode, fetchResp.Header, []byte(transformed)) if err := exchange.MiEncodePayload(miRecordSize); err != nil { - util.NewHTTPError(http.StatusInternalServerError, "Error MI-encoding: ", err).LogAndRespond(resp) + log.Printf("Error MI-encoding: %s\n", err) + proxyConsumed(resp, fetchResp) return } cert := this.certHandler.GetLatestCert() - certURL, err := this.genCertURL(cert, signURL) + certURL, err := this.genCertURL(cert, params.signURL) if err != nil { - util.NewHTTPError(http.StatusInternalServerError, "Error building cert URL: ", err).LogAndRespond(resp) + log.Printf("Error building cert URL: %s\n", err) + proxyConsumed(resp, fetchResp) return } now := time.Now() validityHRef, err := url.Parse(util.ValidityMapPath) if err != nil { - util.NewHTTPError(http.StatusInternalServerError, "Error building validity href: ", err).LogAndRespond(resp) + // Won't ever happen because util.ValidityMapPath is a constant. + log.Printf("Error building validity href: %s\n", err) + proxyConsumed(resp, fetchResp) + return } // Expires - Date must be <= 604800 seconds, per // https://tools.ietf.org/html/draft-yasskin-httpbis-origin-signed-exchanges-impl-00#section-3.5. @@ -495,31 +605,40 @@ func (this *Signer) serveSignedExchange(resp http.ResponseWriter, fetchResp *htt duration = maxAge } date := now.Add(-24 * time.Hour) + expires := date.Add(duration) + if !expires.After(now) { + log.Printf("Not packaging because computed max-age %d places expiry in the past\n", metadata.MaxAgeSecs) + proxyConsumed(resp, fetchResp) + return + } signer := signedexchange.Signer{ Date: date, - Expires: date.Add(duration), + Expires: expires, Certs: []*x509.Certificate{cert}, CertUrl: certURL, - ValidityUrl: signURL.ResolveReference(validityHRef), + ValidityUrl: params.signURL.ResolveReference(validityHRef), PrivKey: this.key, // TODO(twifkak): Should we make Rand user-configurable? The // default is to use getrandom(2) if available, else // /dev/urandom. } if err := exchange.AddSignatureHeader(&signer); err != nil { - util.NewHTTPError(http.StatusInternalServerError, "Error signing exchange: ", err).LogAndRespond(resp) + log.Printf("Error signing exchange: %s\n", err) + proxyConsumed(resp, fetchResp) return } var body bytes.Buffer if err := exchange.Write(&body); err != nil { - util.NewHTTPError(http.StatusInternalServerError, "Error serializing exchange: ", err).LogAndRespond(resp) + log.Printf("Error serializing exchange: %s\n", err) + proxyConsumed(resp, fetchResp) + return } // If requireHeaders was true when constructing signer, the // AMP-Cache-Transform outer response header is required (and has already // been validated) - if act != "" { - resp.Header().Set("AMP-Cache-Transform", act) + if params.ampCacheTransformHeader != "" { + resp.Header().Set("AMP-Cache-Transform", params.ampCacheTransformHeader) } resp.Header().Set("Content-Type", accept.SxgContentType) @@ -538,20 +657,44 @@ func (this *Signer) serveSignedExchange(resp http.ResponseWriter, fetchResp *htt log.Println("Error writing response:", err) return } + + promSignedAmpDocumentsSize.WithLabelValues().Observe(float64(len(fetchResp.body))) + promDocumentsSignedVsUnsigned.WithLabelValues("signed").Inc() } -// Proxy the content unsigned. If body is non-nil, it is used in place of fetchResp.Body. -// TODO(twifkak): Take a look at the source code to httputil.ReverseProxy and -// see what else needs to be implemented. -func proxy(resp http.ResponseWriter, fetchResp *http.Response, body []byte) { - for k, v := range fetchResp.Header { +func proxyUnconsumed(resp http.ResponseWriter, fetchResp *http.Response) { + proxyImpl(resp, fetchResp.Header, fetchResp.StatusCode, + /* consumedPrefix= */ nil, + /* unconsumedSuffix = */ fetchResp.Body) +} + +func proxyPartiallyConsumed(resp http.ResponseWriter, fetchResp *http.Response, consumedBodyPrefix []byte) { + proxyImpl(resp, fetchResp.Header, fetchResp.StatusCode, + /* consumedPrefix= */ consumedBodyPrefix, + /* unconsumedSuffix = */ fetchResp.Body) +} + +func proxyConsumed(resp http.ResponseWriter, consumedFetchResp consumedFetchResp) { + proxyImpl(resp, consumedFetchResp.Header, consumedFetchResp.StatusCode, + /* consumedPrefix= */ consumedFetchResp.body, + /* unconsumedSuffix = */ nil) +} + +// Proxy the content unsigned. The body may be already partially or fully +// consumed. TODO(twifkak): Take a look at the source code to +// httputil.ReverseProxy and see what else needs to be implemented. +func proxyImpl(resp http.ResponseWriter, header http.Header, statusCode int, consumedPrefix []byte, unconsumedSuffix io.ReadCloser) { + for k, v := range header { resp.Header()[k] = v } - resp.WriteHeader(fetchResp.StatusCode) - if body != nil { - resp.Write(body) - } else { - bytesCopied, err := io.Copy(resp, fetchResp.Body) + resp.WriteHeader(statusCode) + + if consumedPrefix != nil { + resp.Write(consumedPrefix) + } + + if unconsumedSuffix != nil { + bytesCopied, err := io.Copy(resp, unconsumedSuffix) if err != nil { if bytesCopied == 0 { util.NewHTTPError(http.StatusInternalServerError, "Error copying response body").LogAndRespond(resp) @@ -560,4 +703,6 @@ func proxy(resp http.ResponseWriter, fetchResp *http.Response, body []byte) { } } } + + promDocumentsSignedVsUnsigned.WithLabelValues("proxied unsigned").Inc() } diff --git a/packager/signer/signer_test.go b/packager/signer/signer_test.go index 67add030b..bd3815ddb 100644 --- a/packager/signer/signer_test.go +++ b/packager/signer/signer_test.go @@ -27,6 +27,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/WICG/webpackage/go/signedexchange" "github.com/WICG/webpackage/go/signedexchange/structuredheader" @@ -38,12 +39,14 @@ import ( "github.com/ampproject/amppackager/transformer" rpb "github.com/ampproject/amppackager/transformer/request" "github.com/pkg/errors" + promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/suite" ) var fakePath = "/amp/secret-life-of-pine-trees.html" var fakeBody = []byte("They like to OPINE. Get it? (Is he fir real? Yew gotta be kidding me.)") var transformedBody = []byte("They like to OPINE. Get it? (Is he fir real? Yew gotta be kidding me.)") +var header = http.Header{"AMP-Cache-Transform": {"google"}, "Accept": {"application/signed-exchange;v=" + accept.AcceptedSxgVersion}} func headerNames(headers http.Header) []string { names := make([]string, len(headers)) @@ -74,29 +77,17 @@ type SignerSuite struct { shouldPackage error fakeHandler func(resp http.ResponseWriter, req *http.Request) lastRequest *http.Request + fakeClock *pkgt.FakeClock } func (this *SignerSuite) new(urlSets []util.URLSet) http.Handler { forwardedRequestHeaders := []string{"Host", "X-Foo"} - handler, err := New(fakeCertHandler{}, pkgt.Key, urlSets, &rtv.RTVCache{}, func() error { return this.shouldPackage }, nil, true, forwardedRequestHeaders) + this.fakeClock = pkgt.NewFakeClock() + handler, err := New(fakeCertHandler{}, pkgt.Key, urlSets, &rtv.RTVCache{}, func() error { return this.shouldPackage }, nil, true, forwardedRequestHeaders, this.fakeClock.Now) this.Require().NoError(err) // Accept the self-signed certificate generated by the test server. handler.client = this.httpsClient - return mux.New(nil, handler, nil, nil) -} - -func (this *SignerSuite) get(t *testing.T, handler http.Handler, target string) *http.Response { - return pkgt.GetH(t, handler, target, http.Header{ - "AMP-Cache-Transform": {"google"}, "Accept": {"application/signed-exchange;v=" + accept.AcceptedSxgVersion}}) -} - -func (this *SignerSuite) getFRH(t *testing.T, handler http.Handler, target string, host string, header http.Header) *http.Response { - return pkgt.GetHH(t, handler, target, host, header) -} - -func (this *SignerSuite) getB(t *testing.T, handler http.Handler, target string, body string) *http.Response { - return pkgt.GetBHH(t, handler, target, "", strings.NewReader(body), http.Header{ - "AMP-Cache-Transform": {"google"}, "Accept": {"application/signed-exchange;v=" + accept.AcceptedSxgVersion}}) + return mux.New(nil, handler, nil, nil, nil) } func (this *SignerSuite) httpURL() string { @@ -176,9 +167,8 @@ func (this *SignerSuite) TestSimple() { Sign: &util.URLPattern{[]string{"https"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}, Fetch: &util.URLPattern{[]string{"http"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, boolPtr(true)}, }} - resp := this.get(this.T(), this.new(urlSets), - "/priv/doc?fetch="+url.QueryEscape(this.httpURL()+fakePath)+ - "&sign="+url.QueryEscape(this.httpSignURL()+fakePath)) + target := "/priv/doc?fetch=" + url.QueryEscape(this.httpURL()+fakePath) + "&sign=" + url.QueryEscape(this.httpSignURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(fakePath, this.lastRequest.URL.String()) this.Assert().Equal(userAgent, this.lastRequest.Header.Get("User-Agent")) @@ -242,9 +232,8 @@ func (this *SignerSuite) TestFetchSignWithForwardedRequestHeaders() { // Request with ForwardedRequestHeaders header := http.Header{"AMP-Cache-Transform": {"google"}, "Accept": {"application/signed-exchange;v=" + accept.AcceptedSxgVersion}, "X-Foo": {"foo"}, "X-Bar": {"bar"}} - resp := this.getFRH(this.T(), this.new(urlSets), - "/priv/doc?fetch="+url.QueryEscape(this.httpURL()+fakePath)+"&sign="+url.QueryEscape(this.httpSignURL_CertSubjectCN()+fakePath), - "www.example.com", header) + target := "/priv/doc?fetch=" + url.QueryEscape(this.httpURL()+fakePath) + "&sign=" + url.QueryEscape(this.httpSignURL_CertSubjectCN()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("www.example.com", header).Do() this.Assert().Equal(fakePath, this.lastRequest.URL.String()) this.Assert().Equal(userAgent, this.lastRequest.Header.Get("User-Agent")) this.Assert().Equal("1.1 amppkg", this.lastRequest.Header.Get("Via")) @@ -281,13 +270,12 @@ func (this *SignerSuite) TestForwardedHost() { }} header := http.Header{ "AMP-Cache-Transform": {"google"}, "Accept": {"application/signed-exchange;v=" + accept.AcceptedSxgVersion}, - "Forwarded": {`host="www.example.com";for=192.0.0.1`}, - "X-Forwarded-For": {"192.0.0.1"}, + "Forwarded": {`host="www.example.com";for=192.0.0.1`}, + "X-Forwarded-For": {"192.0.0.1"}, "X-Forwarded-Host": {"www.example.com"}} - this.getFRH(this.T(), this.new(urlSets), - "/priv/doc?fetch="+url.QueryEscape(this.httpURL()+fakePath)+ - "&sign="+url.QueryEscape(this.httpSignURL()+fakePath), - "example.com", header) + + target := "/priv/doc?fetch=" + url.QueryEscape(this.httpURL()+fakePath) + "&sign=" + url.QueryEscape(this.httpSignURL()+fakePath) + pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("example.com", header).Do() this.Assert().Equal(`host="example.com"`, this.lastRequest.Header.Get("Forwarded")) this.Assert().Equal("www.example.com,example.com", this.lastRequest.Header.Get("X-Forwarded-Host")) @@ -298,9 +286,8 @@ func (this *SignerSuite) TestEscapeQueryParamsInFetchAndSign() { Sign: &util.URLPattern{[]string{"https"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(".*"), false, 2000, nil}, Fetch: &util.URLPattern{[]string{"http"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(".*"), false, 2000, boolPtr(true)}, }} - resp := this.get(this.T(), this.new(urlSets), - "/priv/doc?fetch="+url.QueryEscape(this.httpURL()+fakePath+"?")+ - "&sign="+url.QueryEscape(this.httpSignURL()+fakePath+"?")) + target := "/priv/doc?fetch=" + url.QueryEscape(this.httpURL()+fakePath+"?") + "&sign=" + url.QueryEscape(this.httpSignURL()+fakePath+"?") + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) this.Assert().Equal(fakePath+"?%3Chi%3E", this.lastRequest.URL.String()) @@ -309,19 +296,40 @@ func (this *SignerSuite) TestEscapeQueryParamsInFetchAndSign() { this.Assert().Equal(this.httpSignURL()+fakePath+"?%3Chi%3E", exchange.RequestURI) } +func (this *SignerSuite) TestMissingFetchParam() { + urlSets := []util.URLSet{{ + Sign: &util.URLPattern{[]string{"https"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}, + Fetch: &util.URLPattern{[]string{"http"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, boolPtr(true)}, + }} + target := "/priv/doc?sign=" + url.QueryEscape(this.httpSignURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() + this.Assert().Equal(http.StatusBadRequest, resp.StatusCode, "incorrect status: %#v", resp) +} + +func (this *SignerSuite) TestMissingSignParam() { + urlSets := []util.URLSet{{ + Sign: &util.URLPattern{[]string{"https"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}, + Fetch: &util.URLPattern{[]string{"http"}, "", this.httpHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, boolPtr(true)}, + }} + target := "/priv/doc?fetch=" + url.QueryEscape(this.httpURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() + this.Assert().Equal(http.StatusBadRequest, resp.StatusCode, "incorrect status: %#v", resp) +} + func (this *SignerSuite) TestDisallowInvalidCharsSign() { urlSets := []util.URLSet{{ Sign: &util.URLPattern{[]string{"https"}, "", this.httpsHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}, }} - resp := this.get(this.T(), this.new(urlSets), - "/priv/doc?&sign="+url.QueryEscape(this.httpSignURL()+fakePath+"")) + target := "/priv/doc?&sign=" + url.QueryEscape(this.httpSignURL()+fakePath+"") + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusBadRequest, resp.StatusCode, "incorrect status: %#v", resp) } func (this *SignerSuite) TestNoFetchParam() { urlSets := []util.URLSet{{ Sign: &util.URLPattern{[]string{"https"}, "", this.httpsHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}}} - resp := this.get(this.T(), this.new(urlSets), "/priv/doc?sign="+url.QueryEscape(this.httpsURL()+fakePath)) + target := "/priv/doc?sign=" + url.QueryEscape(this.httpsURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -334,7 +342,8 @@ func (this *SignerSuite) TestSignAsPathParam() { urlSets := []util.URLSet{{ Sign: &util.URLPattern{[]string{"https"}, "", this.httpsHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}, }} - resp := this.get(this.T(), this.new(urlSets), `/priv/doc/`+this.httpsURL()+fakePath) + target := `/priv/doc/` + this.httpsURL() + fakePath + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -347,7 +356,8 @@ func (this *SignerSuite) TestSignAsPathParamWithQuery() { urlSets := []util.URLSet{{ Sign: &util.URLPattern{[]string{"https"}, "", this.httpsHost(), stringPtr("/amp/.*"), []string{}, stringPtr(".*"), false, 2000, nil}, }} - resp := this.get(this.T(), this.new(urlSets), `/priv/doc/`+this.httpsURL()+fakePath+"?amp=1") + target := `/priv/doc/` + this.httpsURL() + fakePath + "?amp=1" + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -361,7 +371,8 @@ func (this *SignerSuite) TestSignAsPathParamWithUnusualPctEncoding() { urlSets := []util.URLSet{{ Sign: &util.URLPattern{[]string{"https"}, "", this.httpsHost(), stringPtr("/amp/.*"), []string{}, stringPtr(""), false, 2000, nil}, }} - resp := this.get(this.T(), this.new(urlSets), `/priv/doc/`+this.httpsURL()+fakePath+`%2A`) + target := `/priv/doc/` + this.httpsURL() + fakePath + `%2A` + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -377,7 +388,8 @@ func (this *SignerSuite) TestPreservesContentType() { resp.Header().Set("Content-Type", "text/html;charset=utf-8;v=5") resp.Write(fakeBody) } - resp := this.get(this.T(), this.new(urlSets), "/priv/doc?sign="+url.QueryEscape(this.httpsURL()+fakePath)) + target := "/priv/doc?sign=" + url.QueryEscape(this.httpsURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -393,7 +405,8 @@ func (this *SignerSuite) TestRemovesLinkHeaders() { resp.Header().Set("Link", "rel=preload;") resp.Write(fakeBody) } - resp := this.get(this.T(), this.new(urlSets), "/priv/doc?sign="+url.QueryEscape(this.httpsURL()+fakePath)) + target := "/priv/doc?sign=" + url.QueryEscape(this.httpsURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -409,7 +422,8 @@ func (this *SignerSuite) TestRemovesStatefulHeaders() { resp.Header().Set("Set-Cookie", "yum yum yum") resp.Write(fakeBody) } - resp := this.get(this.T(), this.new(urlSets), "/priv/doc?sign="+url.QueryEscape(this.httpsURL()+fakePath)) + target := "/priv/doc?sign=" + url.QueryEscape(this.httpsURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal(http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -443,10 +457,8 @@ func (this *SignerSuite) TestMutatesCspHeaders() { resp.Write(fakeBody) } - resp := this.get( - this.T(), - this.new(urlSets), - "/priv/doc?sign="+url.QueryEscape(this.httpsURL()+fakePath)) + target := "/priv/doc?sign=" + url.QueryEscape(this.httpsURL()+fakePath) + resp := pkgt.NewRequest(this.T(), this.new(urlSets), target).SetHeaders("", header).Do() this.Assert().Equal( http.StatusOK, resp.StatusCode, "incorrect status: %#v", resp) exchange, err := signedexchange.ReadExchange(resp.Body) @@ -455,7 +467,7 @@ func (this *SignerSuite) TestMutatesCspHeaders() { "base-uri http://*.example.com;"+ "block-all-mixed-content;"+ "default-src * blob: data:;"+ - "report-uri https://csp-collector.appspot.com/csp/amp;"+ + "report-uri https://csp.withgoogle.com/csp/amp;"+ "script-src blob: https://cdn.ampproject.org/rtv/ https://cdn.ampproject.org/v0.js https://cdn.ampproject.org/v0/ https://cdn.ampproject.org/viewer/;"+ "style-src 'unsafe-inline' https://cdn.materialdesignicons.com https://cloud.typography.com https://fast.fonts.net https://fonts.googleapis.com https://maxcdn.bootstrapcdn.com https://p.typekit.net https://pro.fontawesome.com https://use.fontawesome.com https://use.typekit.net;"+ "object-src 'none'", @@ -469,7 +481,8 @@ func (this *SignerSuite) TestAddsLinkHeaders() { resp.Header().Set("Content-Type", "text/html; charset=utf-8") resp.Write([]byte("