diff --git a/.github/workflows/mdbook.yml b/.github/workflows/mdbook.yml new file mode 100644 index 0000000..b44b279 --- /dev/null +++ b/.github/workflows/mdbook.yml @@ -0,0 +1,55 @@ +# Sample workflow for building and deploying a mdBook site to GitHub Pages +# +# To get started with mdBook see: https://rust-lang.github.io/mdBook/index.html +# +name: Deploy mdBook site to Pages +on: + # Runs on pushes targeting the default branch + push: + branches: ["main"] + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false +jobs: + # Build job + build: + runs-on: ubuntu-latest + env: + MDBOOK_VERSION: 0.4.43 + steps: + - uses: actions/checkout@v4 + - name: Install mdBook + run: | + curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh + rustup update + cargo install --version ${MDBOOK_VERSION} mdbook + cargo install mdbook-toc mdbook-mermaid + - name: Setup Pages + id: pages + uses: actions/configure-pages@v5 + - name: Build with mdBook + run: mdbook build + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: ./book + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7585238 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +book diff --git a/.img/import-orders-from-schema.png b/.img/import-orders-from-schema.png new file mode 100644 index 0000000..31c2510 Binary files /dev/null and b/.img/import-orders-from-schema.png differ diff --git a/.img/import-orders.png b/.img/import-orders.png new file mode 100644 index 0000000..a51b443 Binary files /dev/null and b/.img/import-orders.png differ diff --git a/book.toml b/book.toml new file mode 100644 index 0000000..fa8be27 --- /dev/null +++ b/book.toml @@ -0,0 +1,10 @@ +[book] +authors = ["dylan"] +language = "en" +multilingual = false +src = "src" +title = "Dylan's Blog" + +[preprocessor.toc] +command = "mdbook-toc" +renderer = ["html"] diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..a5a533e --- /dev/null +++ b/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +mdbook serve + +# open localhost:3000 diff --git a/src/SUMMARY.md b/src/SUMMARY.md new file mode 100644 index 0000000..0962194 --- /dev/null +++ b/src/SUMMARY.md @@ -0,0 +1,96 @@ +# Summary + +# Solana + +- [Quickstart]() + - [使用 TypeScript 创建账户](solana/quickstart/使用TypeScript创建账户.md) + - [不使用 Anchor 开发 solana program(Native Program)](solana/quickstart/不使用Anchor开发solana的program.md) + +# React + +- [Functional Component]() + - [Render all chartjs charts in react typescript tailwindcss projects](react/functional-component/render-all-chartjs-charts.md) + +# TypeScript + +- [Redux]() + - [Redux TypeScript 大型软件实践](typescript/redux/redux-typescript-大型项目实践.md) + +# Rust + +- [GRPC]() + - [Rust grpc helloworld](rust/grpc/rust-grpc-helloworld.md) +- [Diesel]() + - [Upgrade diesel to 2.0](rust/diesel/upgrade-diesel-to-2.0.md) + - [Use jsonb in diesel](rust/diesel/use-jsonb-in-diesel.md) +- [Tokio]() + - [Tokio Codec](rust/tokio/tokio-codec.md) + - [Async Healthcheck Multiple Endpoints](rust/tokio/async-healthcheck-multiple-endpoints.md) +- [Actix]() + - [prometheus support for actix-web project](rust/actix/prometheus-support-to-actix-web.md) + - [Send Http Request in Handle function And started function When Using Actix crate](rust/actix/send-http-request-in-handle-function-and-started-function-when-using-actix-crate.md) +- [Serde]() + - [Serialize time::OffsetDataTime type using serde_as in serde_with crate](rust/serde/serialize-time-offsetdatetime-type-using-serde-as-in-serde-with-crate.md) +- [Error]() + - [How to organise application Error in actix-web application](rust/error/how-to-organise-application-error-in-actix-web-application.md) + - [Return error when unwrap Option when None](rust/error/return-error-when-unwrap-option-when-none.md) + +# Golang + +- [GRPC]() + - [go code output path](golang/grpc/go_opt.md) +- [Go Modules]() + - [Go get gitlab subgroup module](golang/gomod/go-get-gitlab-subgroup-module.md) +- [Password]() + - [Write password encryption package using ChatGPT](golang/password/write-password-encryption-package-using-chatgpt.md) + +# MUI + +- [Upgrade]() + - [Upgrade from v4 to v5](mui/upgrade/upgrade-from-v4-to-v5.md) + +# Gitlab + +- [Gitlab Runner]() + - [Register gitlab runner on Amazon Linux 2](gitlab/runner/register-gitlab-runner-on-amazon-linux-2.md) + - [Install gitlab runner on ubuntu](gitlab/runner/install-gitlab-runner-on-ubuntu.md) +- [CI]() + - [Build multi-arch docker image using buildx](gitlab/ci/build-multi-arch-docker-image-using-buildx.md) + +# repository + +# Terraform + +- [Gitlab]() + - [Start Gitlab using Terraform](terraform/gitlab/start-gitlab-using-terraform.md) +- [Import]() + - [Manage DMS resource using terraform](terraform/import/terraform-import-dms-replication-instance-blog.md) + - [Review terraform import](terraform/import/review-terraform-import.md) +- [S3]() + - [一个 s3 bucket 分配权限的 terraform 管理介绍.md](terraform/s3/s3-bucket-permission-management-using-terraform.md) +- [Management]() + - [A Good Developer knows management](terraform/management/a-good-developer-knows-management.md) + +# Kubernetes + +- [CronJob]() + - [CronJob To Restart Deployment](kubernetes/cronjob/cronjob-to-restart-deployment.md) +- [Security]() + - [Access s3 files in pod](kubernetes/security/access-s3-files-in-pod.md) +- [Kubebuilder]() + - [kubebuilder quickstart](kubernetes/kubebuilder/kubebuilder-quickstart.md) + +# Flink + +- [DataStream]() + - [DataStream Socket Source and Print Sink](flink/datastream/datastream-socket-source-and-print-sink.md) + +# Linkerd + +- [Quickstart]() + - [linkerd quickstart](linkerd/quickstart/linkerd-quickstart.md) + +# Kong + +- [GRPC]() + - [Multiple GRPC Service Routing in Kong](kong/grpc/multiple-grpc-service-routing.md) diff --git a/src/flink/datastream/datastream-socket-source-and-print-sink.md b/src/flink/datastream/datastream-socket-source-and-print-sink.md new file mode 100644 index 0000000..451d037 --- /dev/null +++ b/src/flink/datastream/datastream-socket-source-and-print-sink.md @@ -0,0 +1,387 @@ +# Flink DataStream Socket Source and Print Sink + +- [Intro](#intro) +- [Setup](#setup) +- [Walkthrough for flink datastream api, socket as source and print as sink](#walkthrough-for-flink-datastream-api-socket-as-source-and-print-as-sink) +- [Tests](#tests) + - [Simple test](#simple-test) + - [Random string](#random-string) + - [Shakespeare](#shakespeare) + - [Long string](#long-string) + +# Intro + +In this tutorial, we will walkthrough how to use flink datastream api to read data from socket and print the result. + +# Setup + +First, let's set up a new flink project. We can use the official flink quickstart script to create a new project. + +```bash +bash -c "$(curl https://flink.apache.org/q/gradle-quickstart.sh)" -- 1.15.0 _2.12 +``` + +# Walkthrough for flink datastream api, socket as source and print as sink + +Let's see how to initialize `StreamExecutionEnvironment` and setup print sink. + +We start by creating a `StreamExecutionEnvironment`, which is the main entry point for all Flink applications. We then create a `DataStream` by adding a source function that reads from a socket and a sink function that prints its input to the console. + +```java +public class DataStreamJob { + + public static void windowWordCount() throws Exception { + // StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + + final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); + DataStream> dataStream = env + .socketTextStream("localhost", 9999) + .flatMap(new Splitter()) + .keyBy(value -> value.f0) + .window(TumblingProcessingTimeWindows.of(Time.seconds(5))) + .sum(1); + + dataStream.print(); + + env.execute("Window WordCount"); + } + + public static void main(String[] args) throws Exception { + // Sets up the execution environment, which is the main entry point + // to building Flink applications. + // final StreamExecutionEnvironment env = + // StreamExecutionEnvironment.getExecutionEnvironment(); + + windowWordCount(); + } +} +``` + +The `windowWordCount` method first creates a datastream from a socket, then splits the words from the text into a tuple of (word, 1) and groups by the word. It then creates a window of 5 seconds and sums the counts of each word in the window. The result is then printed to the console. + +Run local socket using nc: + +```bash +nc -lk 9999 +``` + +Build flink job: + +```bash +gradle clean installShadowDist +``` + +Submit flink job: + +```bash +FLINK_HOME=~/flink/flink-1.15.1 +$FLINK_HOME/bin/flink run -c org.myorg.quickstart.DataStreamJob build/install/quickstart-shadow/lib/quickstart-0.1-SNAPSHOT-all.jar +``` + +The output will be like: + +``` +WARNING: An illegal reflective access operation has occurred +WARNING: Illegal reflective access by org.apache.flink.api.java.ClosureCleaner (file:/home/ec2-user/flink/flink-1.15.1/lib/flink-dist-1.15.1.jar) to field java.lang.String.value +WARNING: Please consider reporting this to the maintainers of org.apache.flink.api.java.ClosureCleaner +WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations +WARNING: All illegal access operations will be denied in a future release +Job has been submitted with JobID 674851e1ff9da68eb742f93d6d874ca6 +``` + +# Tests + +Let's see how to test the job. + +## Simple test + +We can test it by using `nc` command. + +Type in terminal running `nc` command: + +```bash +123 555 +hello world +hello world +hi jon +hi non +hello 123 +``` + +Check log in `$FLINK_HOME/log` directory, file `flink-ec2-user-taskexecutor-1-ip-172-24-145-118.cn-northwest-1.compute.internal.out`: + +```bash +==> flink-ec2-user-taskexecutor-1-ip-172-24-145-118.cn-northwest-1.compute.internal.out <== +(world,1) +(hello,1) +(555,1) +(hello,1) +(world,1) +(hi,2) +(non,1) +(jon,1) +(hello,1) +(123,1) +``` + +## Random string + +`nc`: + +``` +a b c a b c a b c hello non hello k +``` + +flink log: + +``` +==> flink-ec2-user-taskexecutor-1-ip-172-24-145-118.cn-northwest-1.compute.internal.out <== +(a,3) +(k,1) +(non,1) +(hello,2) +(c,3) +(b,3) +``` + +## Shakespeare + +input + +``` +To be, or not to be, that is the question +``` + +log + +``` +==> flink-ec2-user-taskexecutor-1-ip-172-24-145-118.cn-northwest-1.compute.internal.out <== +(To,1) +(question,1) +(the,1) +(is,1) +(that,1) +(to,1) +(not,1) +(or,1) +(be,,2) +``` + +## Long string + +input + +``` +To be, or not to be, that is the question: +Whether 'tis nobler in the mind to suffer +The slings and arrows of outrageous fortune, +Or to take arms against a sea of troubles +And by opposing end them. To die—to sleep, +No more; and by a sleep to say we end +The heart-ache and the thousand natural shocks +That flesh is heir to: 'tis a consummation +Devoutly to be wish'd. To die, to sleep; +To sleep, perchance to dream—ay, there's the rub: +For in that sleep of death what dreams may come, +When we have shuffled off this mortal coil, +Must give us pause—there's the respect +That makes calamity of so long life. +For who would bear the whips and scorns of time, +Th'oppressor's wrong, the proud man's contumely, +The pangs of dispriz'd love, the law's delay, +The insolence of office, and the spurns +That patient merit of th'unworthy takes, +When he himself might his quietus make +With a bare bodkin? Who would fardels bear, +To grunt and sweat under a weary life, +But that the dread of something after death, +The undiscovere'd country, from whose bourn +No traveller returns, puzzles the will, +And makes us rather bear those ills we have +Than fly to others that we know not of? +Thus conscience doth make cowards of us all, +And thus the native hue of resolution +Is sicklied o'er with the pale cast of thought, +And enterprises of great pith and moment +With this regard their currents turn awry +And lose the name of action +``` + +log + +``` +==> flink-ec2-user-taskexecutor-1-ip-172-24-145-118.cn-northwest-1.compute.internal.out <== +(,2) +(action,1) +(name,1) +(lose,1) +(awry,1) +(turn,1) +(currents,1) +(their,1) +(regard,1) +(moment,1) +(pith,1) +(great,1) +(enterprises,1) +(thought,,1) +(cast,1) +(pale,1) +(with,1) +(o'er,1) +(sicklied,1) +(Is,1) +(resolution,1) +(hue,1) +(native,1) +(thus,1) +(all,,1) +(cowards,1) +(doth,1) +(conscience,1) +(Thus,1) +(of?,1) +(know,1) +(others,1) +(fly,1) +(Than,1) +(ills,1) +(those,1) +(rather,1) +(will,,1) +(puzzles,1) +(returns,,1) +(traveller,1) +(bourn,1) +(whose,1) +(from,1) +(country,,1) +(undiscovere'd,1) +(death,,1) +(after,1) +(something,1) +(dread,1) +(But,1) +(life,,1) +(weary,1) +(under,1) +(sweat,1) +(grunt,1) +(bear,,1) +(fardels,1) +(Who,1) +(bodkin?,1) +(bare,1) +(With,2) +(make,2) +(quietus,1) +(his,1) +(might,1) +(himself,1) +(he,1) +(takes,,1) +(th'unworthy,1) +(merit,1) +(patient,1) +(spurns,1) +(office,,1) +(insolence,1) +(delay,,1) +(law's,1) +(love,,1) +(dispriz'd,1) +(pangs,1) +(contumely,,1) +(man's,1) +(proud,1) +(wrong,,1) +(Th'oppressor's,1) +(time,,1) +(scorns,1) +(whips,1) +(bear,2) +(would,2) +(who,1) +(life.,1) +(long,1) +(so,1) +(calamity,1) +(makes,2) +(respect,1) +(pause—there's,1) +(us,3) +(give,1) +(Must,1) +(coil,,1) +(mortal,1) +(this,2) +(off,1) +(shuffled,1) +(have,2) +(When,2) +(come,,1) +(may,1) +(dreams,1) +(what,1) +(death,1) +(For,2) +(rub:,1) +(there's,1) +(dream—ay,,1) +(perchance,1) +(sleep;,1) +(die,,1) +(wish'd.,1) +(be,1) +(Devoutly,1) +(consummation,1) +(to:,1) +(heir,1) +(flesh,1) +(That,3) +(shocks,1) +(natural,1) +(thousand,1) +(heart-ache,1) +(we,4) +(say,1) +(sleep,2) +(more;,1) +(No,2) +(sleep,,2) +(die—to,1) +(them.,1) +(end,2) +(opposing,1) +(by,2) +(And,5) +(troubles,1) +(sea,1) +(a,5) +(against,1) +(arms,1) +(take,1) +(Or,1) +(fortune,,1) +(outrageous,1) +(of,14) +(arrows,1) +(and,7) +(slings,1) +(The,5) +(suffer,1) +(mind,1) +(in,2) +(nobler,1) +('tis,2) +(Whether,1) +(question:,1) +(the,14) +(is,2) +(that,4) +(to,8) +(not,2) +(or,1) +(be,,2) +(To,5) +``` diff --git a/src/gitlab/ci/build-multi-arch-docker-image-using-buildx.md b/src/gitlab/ci/build-multi-arch-docker-image-using-buildx.md new file mode 100644 index 0000000..a8469f0 --- /dev/null +++ b/src/gitlab/ci/build-multi-arch-docker-image-using-buildx.md @@ -0,0 +1,133 @@ +# Build multi-arch docker image using buildx + +- [Setup simple golang project using go mod](#setup-simple-golang-project-using-go-mod) +- [.gitlab-ci.yml](#gitlab-ciyml) +- [Setup CI variable to authenticate gitlab to aws ecr](#setup-ci-variable-to-authenticate-gitlab-to-aws-ecr) +- [Run ci](#run-ci) +- [Inspect the image](#inspect-the-image) + +# Setup simple golang project using go mod + +Iniit golang project using `go mod`: + +```bash +go mod init gitlab.com/aoaojiaoaoaojiao/go-multi-arch +``` + +Write `main.go`: + +```go +package main + +import "fmt" + +func main() { + fmt.Println("vim-go") +} +``` + +# .gitlab-ci.yml + +```yaml +# You can override the included template(s) by including variable overrides +# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings +# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings +# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings +# Note that environment variables can be set in several places +# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence +# stages: +# - test +# sast: +# stage: test +# include: +# - template: Security/SAST.gitlab-ci.yml + +image: docker:20.10.8 + +stages: + - build-push + +variables: + DOCKER_DRIVER: overlay2 + BUILDX_VERSION: "v0.6.1" + BUILDX_ARCH: "linux-amd64" + AWS_DEFAULT_REGION: us-east-1 + AWS_ECR_NAME: 444333555686.dkr.ecr.us-east-1.amazonaws.com/orders/orders + AWS_ACCOUNT_ID: 444333555686 + #DOCKER_IMAGE_NAME: your-docker-image-name + #DOCKER_USERNAME: AWS + GIT_COMMIT_SHA: ${CI_COMMIT_SHORT_SHA} + +build and push: + stage: + build-push + #image: docker:dind + image: docker:20.10.8-dind + services: + - docker:dind + before_script: + - apk update + - apk add --no-cache curl python3 py3-pip git + - pip3 install awscli + - wget -O /usr/bin/docker-buildx https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.${BUILDX_ARCH} + - chmod +x /usr/bin/docker-buildx + - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com + script: + #- docker buildx create --use + #- docker buildx build --platform linux/amd64,linux/arm64 -t $AWS_ECR_NAME:$GIT_COMMIT_SHA --push . + - docker-buildx create --use + - docker-buildx build --platform linux/amd64,linux/arm64 -t $AWS_ECR_NAME:$GIT_COMMIT_SHA --push . +``` + +# Setup CI variable to authenticate gitlab to aws ecr + +In your GitLab project, go to `Settings` > `CI/CD`. Set the following CI/CD variables: + +Environment variable name Value + +- `AWS_ACCESS_KEY_ID` Your Access key ID. +- `AWS_SECRET_ACCESS_KEY` Your secret access key. +- `AWS_DEFAULT_REGION` Your region code. You might want to confirm that the AWS service you intend to use is available in the chosen region. + +Variables are protected by default. To use GitLab CI/CD with branches or tags that are not protected, clear the Protect variable checkbox. + +# Run ci + +Any time you push code into the repo, gitlab runn run ci pipieline and images with two platforms will be push to AWS ECR, based on the arguments of `docker-buildx build`: `--platform linux/amd64,linux/arm64` + +# Inspect the image + +Inspect the image: + +```bash +docker manifest inspect 444333555686.dkr.ecr.us-east-1.amazonaws.com/orders/orders:71db070c +``` + +Output: + +```json +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "manifests": [ + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 1682, + "digest": "sha256:65666a6rbcccc7er7we7w7238238d7ds7fd7sdfs7fs7ds7s7s7", + "platform": { + "architecture": "amd64", + "os": "linux" + } + }, + { + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "size": 1681, + "digest": "sha256:i23iwejfdsadfasjdsapfsdaphfdasusdahudshisadhoasdodshd", + "platform": { + "architecture": "arm64", + "os": "linux" + } + } + ] +} +``` diff --git a/src/gitlab/runner/install-gitlab-runner-on-ubuntu.md b/src/gitlab/runner/install-gitlab-runner-on-ubuntu.md new file mode 100644 index 0000000..db43d56 --- /dev/null +++ b/src/gitlab/runner/install-gitlab-runner-on-ubuntu.md @@ -0,0 +1,222 @@ +# Install gitlab runner on ubuntu + +- [Intro](#intro) + - [Check GPU info on instance](#check-gpu-info-on-instance) + - [Install gitlab runner](#install-gitlab-runner) + - [Register gitlab runner](#register-gitlab-runner) + - [Uninstall GitLab Runner](#uninstall-gitlab-runner) +- [Start gitlab-runner in container](#start-gitlab-runner-in-container) + - [Gitlab runner token](#gitlab-runner-token) + - [Use local volume](#use-local-volume) + - [docker volume](#docker-volume) +- [Register gitlab runner in non-interactive mode](#register-gitlab-runner-in-non-interactive-mode) +- [stop gitlab-runner](#stop-gitlab-runner) +- [Refs](#refs) + +# Intro + +I've been using GitLab CI/CD for a while now, and I have to say that it's an amazing tool for code management and automating builds and deployments. + +In this blog post, I'll share my experience installing and using GitLab Runner on Ubuntu with GPU instance. + +## Check GPU info on instance + +First, check the GPU info on the instance: + +```bash +(base) smolai@smolai-Z790-UD-AX:~$ nvidia-smi +Mon Sep 18 15:57:06 2023 ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 525.116.04 Driver Version: 525.116.04 CUDA Version: 12.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 NVIDIA GeForce ... Off | 00000000:01:00.0 Off | Off | +| 0% 35C P8 5W / 450W | 3754MiB / 24564MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ + ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| 0 N/A N/A 13290 C python 3752MiB | ++-----------------------------------------------------------------------------+ +``` + +## Install gitlab runner + +Next, we can install gitlab runner using the following command: + +```bash +wget -qO - https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh | sudo bash +sudo apt install -y gitlab-runner +``` + +## Register gitlab runner + +After installation, we can register gitlab runner using the following command: + +```bash +# register gitlab runner +sudo gitlab-runner register \ + --non-interactive \ + --url "https://gitlab.planetsmol.com" \ + --registration-token "glrt-ooooooiiiiiiiiii" \ + --description "docker-runner" \ + --executor "docker" \ + --docker-image ubuntu:latest +``` + +Here is the output of the command: + +```bash +Runtime platform arch=amd64 os=linux pid=74044 revision=f5dfa4d1 version=16.3.1 +Running in system-mode. + +Verifying runner... is valid runner=eooovvviii +Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded! + +Configuration (with the authentication token) was saved in "/etc/gitlab-runner/config.toml" +root@smolai:/tmp# docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper x86_64-f5dfa4d1 4af7e8dd8eb7 18 seconds ago 64.1MB +meet latest 7d62cb955a7f 5 weeks ago 915MB +busybox latest a416a98b71e2 2 months ago 4.26MB +``` + +## Uninstall GitLab Runner + +If you want to completely remove GitLab Runner, run the following command: + +```bash +# If you want to completely remove GitLab Runner, run the following command: +sudo apt purge --autoremove -y gitlab-runner + +# Remove GPG key and repository: +sudo apt-key del 513111FF +sudo rm -rf /etc/apt/sources.list.d/runner_gitlab-runner.list + +# Remove GitLab Runner user: +sudo deluser --remove-home gitlab-runner + +#You can also remove GitLab Runner configuration: +sudo rm -rf /etc/gitlab-runner +``` + +# Start gitlab-runner in container + +You have two options to start gitlab-runner in container. + +To store gitlab-runner config in docker volume, you can either use docker volume or use local system volume. + +After install gitlab-runner, you can start gitlab-runner in container. + +You have to register gitlab runner after start gitlab-runner in container. + +As the docs said: + +> Runner registration is the process that links the runner with one or more GitLab instances. You must register the runner so that it can pick up jobs from the GitLab instance. + +## Gitlab runner token + +You have to obtain gitlab runner token from gitlab to register the runner. + +Here is how to obtain the token from gitlab: + +1. Login to gitlab +2. Click on the "Runners" button +3. Click on the "Tokens" button +4. Click on the "Create token" button +5. Copy the token + +Notice, the gitlab runner authentication tokens have the prefix `glrt-`. + +## Use local volume + +Let's see how to use local system volume mounts to start the Runner container. + +```bash +# Create the directory to mount the docker volume +mkdir -p /srv/gitlab-runner/config + +# Start the GitLab Runner container +docker run -d --name gitlab-runner --restart always \ + -v /srv/gitlab-runner/config:/etc/gitlab-runner \ + -v /var/run/docker.sock:/var/run/docker.sock \ + gitlab/gitlab-runner:latest +``` + +Now the runner is started, let's see how to register it. + +```bash +docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register +``` + +## docker volume + +If you want to use docker volume to start the Runner container, you can use the following command: + +```bash +# Create the Docker volume +docker volume create gitlab-runner-config + +# Start the GitLab Runner container using the volume we just created +docker run -d --name gitlab-runner --restart always \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v gitlab-runner-config:/etc/gitlab-runner \ + gitlab/gitlab-runner:latest +``` + +# Register gitlab runner in non-interactive mode + +You can use non-interactive mode to register the runner, refer to https://docs.gitlab.com/runner/commands/index.html#non-interactive-registration for more details. + +If you want to register the runner on linux, you can use the following command: + +```bash +sudo gitlab-runner register \ + --non-interactive \ + --url "https://gitlab.com/" \ + --token "$RUNNER_TOKEN" \ + --executor "docker" \ + --docker-image alpine:latest \ + --description "docker-runner" +``` + +If you want to register the runner through docker, you can use the following command: + +```bash +docker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \ + --non-interactive \ + --executor "docker" \ + --docker-image alpine:latest \ + --url "https://gitlab.com/" \ + --token "$RUNNER_TOKEN" \ + --description "docker-runner" +``` + +# stop gitlab-runner + +To stop the gitlab-runner container, you can use the following command: + +```bash +docker stop gitlab-runner && docker rm gitlab-runner +``` + +# Refs + +Install +https://lindevs.com/install-gitlab-runner-on-ubuntu + +Install gitlab runner +https://docs.gitlab.com/runner/install/docker.html + +Another install doc +https://docs.gitlab.com/runner/install/linux-repository.html + +Register runner +https://docs.gitlab.com/runner/register/index.html#docker diff --git a/src/gitlab/runner/register-gitlab-runner-on-amazon-linux-2.md b/src/gitlab/runner/register-gitlab-runner-on-amazon-linux-2.md new file mode 100644 index 0000000..7bbb89d --- /dev/null +++ b/src/gitlab/runner/register-gitlab-runner-on-amazon-linux-2.md @@ -0,0 +1,162 @@ +# Register gitlab runner on Amazon Linux 2 + +- [Intro](#intro) +- [Start AWS EC2 Instance](#start-aws-ec2-instance) +- [Install gitlab runner on Amazon Linux 2](#install-gitlab-runner-on-amazon-linux-2) +- [Register gitlab runner](#register-gitlab-runner) +- [Set GITLAB_PORT to 443](#set-gitlab_port-to-443) +- [Write .gitlab-ci.yml for go project](#write-gitlab-ciyml-for-go-project) +- [Test the runner](#test-the-runner) + +# Intro + +In this article, we will show you how to register gitlab runner on Amazon Linux 2 and setup gitlab ci for go project. + +# Start AWS EC2 Instance + +You can create aws ec2 instance either in aws console or using aws cli. + +Notice you should enable public ip address if your aws ec2 instance is in public subnet of you VPC, otherwise you will not be able to access the internet! + +# Install gitlab runner on Amazon Linux 2 + +Next, we'll install gitlab runner on Amazon Linux 2. Run the following bash script: + +```bash +# install on amazon linux 2 +# refs: https://github.com/beda-software/FAQ/blob/master/aws-ec2-gitlab-runner.md +curl -L "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh" | sudo bash +sudo -E yum install gitlab-runner +sudo amazon-linux-extras install docker +sudo service docker start +sudo usermod -a -G docker ec2-user +sudo systemctl enable docker.service +sudo systemctl enable containerd.service +sudo usermod -a -G docker ec2-user +sudo yum install -y git +``` + +# Register gitlab runner + +You can register gitlab runner by using `gitlab-ci-multi-runner` command: + +```bash +# register runner +# sudo gitlab-ci-multi-runner register -n --url GITLAB_URL --registration-token "TOKEN" --executor docker --description "Name of docker runner" --docker-image "docker:latest" --docker-privileged +``` + +Replace `GITLAB_URL` and `TOKEN` with your self-hosted gitlab url and gitlab token. You can go to `https://gitlab.mycompany.com/admin/runners` and click `Register an instance runner` button, and you will see the `TOKEN` in the popup. + +```bash +# register runner in gitlab ccc +sudo gitlab-ci-multi-runner register -n --url https://gitlab.mycompany.com/ --registration-token "A____TOKEN_____A" --executor docker --description "Name of docker runner" --docker-image "docker:latest" --docker-privileged +``` + +Output: + +``` +Runtime platform arch=amd64 os=linux pid=8095 revision=865283c5 version=16.1.0 +Running in system-mode. + +WARNING: Support for registration tokens and runner parameters in the 'register' command has been deprecated in GitLab Runner 15.6 and will be replaced with support for authentication tokens. For more information, see https://gitlab.com/gitlab-org/gitlab/-/issues/380872 +Registering runner... succeeded runner=paaaaaaa +Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded! + +Configuration (with the authentication token) was saved in "/etc/gitlab-runner/config.toml" +``` + +You will see a runner with name `paaaaaaa` is registered successfully. + +And you can also config the runner in `/etc/gitlab-runner/config.toml`. + +You can go to runners page in gitlab Admin and check if the runner is registered successfully. + +# Set GITLAB_PORT to 443 + +As the runner is registered successfully, you may encounter an error like this: + +``` +gitlab-runner version 14.10.1 fails to clone a repo configured with a https repo URL, stating HTTP Basic: Access denied. +``` + +As we deploy gitlab using `https://github.com/sameersbn/docker-gitlab`, you may need to set `GITLAB_PORT` to `443` based on your configuration for load balancer above gitlab service, or add `clone_url` to the runner config file `/etc/gitlab-runner/config.toml`. Also, don't forget to restart docker servcie `sudo service docker restart`. + +```toml +concurrent = 1 +check_interval = 0 +shutdown_timeout = 0 + +[session_server] + session_timeout = 1800 + +[[runners]] + name = "Name of docker runner" + url = "https://gitlab.mycompany.com/" + clone_url = "https://gitlab.mycompany.com/" + id = 1 + token = "s_____________n" + token_obtained_at = 2023-07-22T01:20:41Z + token_expires_at = 0001-01-01T00:00:00Z + executor = "docker" + [runners.cache] + MaxUploadedArchiveSize = 0 + [runners.docker] + tls_verify = false + image = "docker:latest" + privileged = true + disable_entrypoint_overwrite = false + oom_kill_disable = false + disable_cache = false + volumes = ["/cache"] + shm_size = 0 +``` + +# Write .gitlab-ci.yml for go project + +For go project, you can write a gitlab ci file and test the runner. + +Here is a simple gitlab ci file `.gitlab-ci.yml` for go project: + +```yaml +# You can copy and paste this template into a new `.gitlab-ci.yml` file. +# You should not add this template to an existing `.gitlab-ci.yml` file by using the `include:` keyword. +# +# To contribute improvements to CI/CD templates, please follow the Development guide at: +# https://docs.gitlab.com/ee/development/cicd/templates.html +# This specific template is located at: +# https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Go.gitlab-ci.yml + +image: golang:latest + +stages: + - test + - build + - deploy + +format: + stage: test + script: + - go fmt $(go list ./... | grep -v /vendor/) + - go vet $(go list ./... | grep -v /vendor/) + - go test -race $(go list ./... | grep -v /vendor/) + +compile: + stage: build + script: + - mkdir -p mybinaries + - go build -o mybinaries ./... + artifacts: + paths: + - mybinaries + +deploy: + stage: deploy + script: echo "Define your deployment script!" + environment: production +``` + +# Test the runner + +You can trigger ci either by committing on `main` branch or clicking `Run Pipeline` in your project's pipeline page, i.e https://gitlab.mycompany.com/myname/go-ci-test/-/pipelines + +If the pipeline is passed, your gitlab runner is runner successfully. diff --git a/src/golang/gomod/go-get-gitlab-subgroup-module.md b/src/golang/gomod/go-get-gitlab-subgroup-module.md new file mode 100644 index 0000000..691a7a7 --- /dev/null +++ b/src/golang/gomod/go-get-gitlab-subgroup-module.md @@ -0,0 +1,120 @@ +# Go mod gitlab subgroup module + +- [Intro](#intro) +- [Create go mod in gitlab subgroup](#create-go-mod-in-gitlab-subgroup) +- [Test use go mod in gitlab subgroup](#test-use-go-mod-in-gitlab-subgroup) +- [Config ~/.netrc](#config-netrc) +- [Add go module in gitlab subgroup](#add-go-module-in-gitlab-subgroup) +- [Test in main.go](#test-in-maingo) + +# Intro + +In this article, we will learn how to use go mod to get private gitlab subgroup module. + +# Create go mod in gitlab subgroup + +First, let's create subgroup with name `group1` in `aoaojiaoaoaojiao` gitlab group. + +Next, we will create a new project named `math` under subgroup `group1` in gitlab. + +After that, we initialize the project using `go mod init gitlab.com/aoaojiaoaoaojiao/group1/math` in the `math` project. + +Now, here is the `go.mod` file: + +``` +module gitlab.com/aoaojiaoaoaojiao/group1/math + +go 1.20 +``` + +We add a `Add` function in `add.go`: + +`add.go`: + +```go + +package math + +func Add(a int, b int) int { + return a + b +} +``` + +# Test use go mod in gitlab subgroup + +Create a golang project: + +```bash +mkdir -p test-go-module-in-gitlab-subgroup +cd test-go-module-in-gitlab-subgroup +go mod init test +``` + +# Config ~/.netrc + +Before you can pull dependency using `go get`, you need to add configure to `~/.netrc`: + +``` +machine gitlab.com login password +``` + +# Add go module in gitlab subgroup + +Then add `math` as dependency: + +```bash +go get gitlab.com/aoaojiaoaoaojiao/group1/math +``` + +You expected everything goes well, but you didn't. Error occurred. + +``` +gitlab.com/aoaojiaoaoaojiao/group1/math@v0.0.0-20230530092926-88bf01cac6da: verifying module: gitlab.com/aoaojiaoaoaojiao/group1/math@v0.0.0-20230530092926-88bf01cac6da: reading https://goproxy.io/sumdb/sum.golang.org/lookup/gitlab.com/aoaojiaoaoaojiao/group1/math@v0.0.0-20230530092926-88bf01cac6da: 404 Not Found + server response: + not found: gitlab.com/aoaojiaoaoaojiao/group1/math@v0.0.0-20230530092926-88bf01cac6da: invalid version: git ls-remote -q origin in /tmp/gopath/pkg/mod/cache/vcs/b401f5b06f1a57210edcb631d77909880fab25833fcdeab7b9341e5d4617599b: exit status 128: + fatal: could not read Username for 'https://gitlab.com': terminal prompts disabled + Confirm the import path was entered correctly. + If this is a private repository, see https://golang.org/doc/faq#git_https for additional information. +``` + +You need to tell to by using `export GOPRIVATE='gitlab.com'` or `go env -w GOPRIVATE=gitlab.com`: + +```bash +export GOPRIVATE='gitlab.com' +go get gitlab.com/aoaojiaoaoaojiao/group1/math +``` + +> Why this happens is that go get tries to discover the modules at a given path in order to find the requested Go module repository. Only after the repository is found, the tools will do git clone or git checkout and the SSH keys will be used for authentication. The issue comes down to the fact that private Gitlab subgroups cannot be listed/viewed without a Gitlab Access Token. + +Output: + +``` +go: added gitlab.com/aoaojiaoaoaojiao/group1/math v0.0.0-20230530092926-88bf01cac6da +``` + +# Test in main.go + +Write code in `main.go` to call `Add` function in `math` module. + +`main.go` + +```go +package main + +import ( + "fmt" + + "gitlab.com/aoaojiaoaoaojiao/group1/math" +) + +func main() { + res := math.Add(1, 2) + fmt.Printf("1 + 2 = %d\n", res) +} +``` + +Output: + +```bash +1 + 2 = 3 +``` diff --git a/src/golang/grpc/go_opt.md b/src/golang/grpc/go_opt.md new file mode 100644 index 0000000..e7e87d2 --- /dev/null +++ b/src/golang/grpc/go_opt.md @@ -0,0 +1,164 @@ +# GRPC golang output path + +- [go code output path](#go-code-output-path) + - [code structure](#code-structure) + - [--go_out=paths=import](#--go_outpathsimport) + - [--go_out=module=$PREFIX](#--go_outmoduleprefix) + - [--go_out=paths=source_relative](#--go_outpathssource_relative) + +# go code output path + +The protocol buffer compiler produces Go output when invoked with the go_out flag. The argument to the go_out flag is the directory where you want the compiler to write your Go output. The compiler creates a single source file for each `.proto` file input. The name of the output file is created by replacing the `.proto` extension with `.pb.go`. + +Where in the output directory the generated `.pb.go` file is placed depends on the compiler flags. There are several output modes: + +- If the `paths=import` flag is specified, the output file is placed in a directory named after the Go package’s import path. For example, an input file protos/buzz.proto with a Go import path of `example.com/project/protos/fizz` results in an output file at `example.com/project/protos/fizz/buzz.pb.go`. This is the default output mode if a paths flag is not specified. +- If the `module=$PREFIX` flag is specified, the output file is placed in a directory named after the Go package’s import path, but with the specified directory prefix removed from the output filename. For example, an input file `protos/buzz.proto` with a Go import path of `example.com/project/protos/fizz` and `example.com/project` specified as the module prefix results in an output file at `protos/fizz/buzz.pb.go`. Generating any Go packages outside the module path results in an error. This mode is useful for outputting generated files directly into a Go module. +- If the `paths=source_relative` flag is specified, the output file is placed in the same relative directory as the input file. For example, an input file `protos/buzz.proto` results in an output file at `protos/buzz.pb.go` + +## code structure + +The directory tree looks like this: + +```bash +❯ exa -l --tree +drwxr-xr-x - dylan 15 Feb 10:49 . +drwxr-xr-x - dylan 15 Feb 10:27 ├── cmd +drwxr-xr-x - dylan 15 Feb 10:28 │ └── greeting-server +.rw-r--r-- 30 dylan 15 Feb 10:28 │ └── main.go +.rw-r--r-- 250 dylan 15 Feb 10:49 ├── Makefile +drwxr-xr-x - dylan 15 Feb 10:49 ├── protos +.rw-r--r-- 365 dylan 15 Feb 10:48 │ └── greeting.proto +.rw-r--r-- 1.4k dylan 15 Feb 10:40 └── README.md +``` + +## --go_out=paths=import + +Using `--go_out=paths=import` to generate code into import path. + +```bash +❯ protoc --go_out=. --go_opt=paths=import --go-grpc_out=. --go-grpc_opt=paths=import protos/*.proto +``` + +```bash +❯ exa -l --tree +drwxr-xr-x - dylan 15 Feb 11:21 . +drwxr-xr-x - dylan 15 Feb 10:27 ├── cmd +drwxr-xr-x - dylan 15 Feb 10:28 │ └── greeting-server +.rw-r--r-- 30 dylan 15 Feb 10:28 │ └── main.go +drwxr-xr-x - dylan 15 Feb 11:21 ├── github.com +drwxr-xr-x - dylan 15 Feb 11:21 │ └── grpc-greeting +drwxr-xr-x - dylan 15 Feb 11:21 │ └── greeting +.rw-r--r-- 7.2k dylan 15 Feb 11:21 │ ├── greeting.pb.go +.rw-r--r-- 3.7k dylan 15 Feb 11:21 │ └── greeting_grpc.pb.go +.rw-r--r-- 250 dylan 15 Feb 10:49 ├── Makefile +drwxr-xr-x - dylan 15 Feb 11:20 ├── protos +.rw-r--r-- 365 dylan 15 Feb 10:48 │ └── greeting.proto +.rw-r--r-- 1.4k dylan 15 Feb 10:40 └── README.md +``` + +You can write generated code into different places. Here we put in `whatever` folder. + +```bash +mkdir -p whatever +protoc --go_out=whatever --go_opt=paths=import --go-grpc_out=. --go-grpc_opt=paths=import protos/*.proto +``` + +The directory tree looks like this: + +```bash +drwxr-xr-x - dylan 15 Feb 11:22 . +drwxr-xr-x - dylan 15 Feb 10:27 ├── cmd +drwxr-xr-x - dylan 15 Feb 10:28 │ └── greeting-server +.rw-r--r-- 30 dylan 15 Feb 10:28 │ └── main.go +drwxr-xr-x - dylan 15 Feb 11:21 ├── github.com +drwxr-xr-x - dylan 15 Feb 11:21 │ └── grpc-greeting +drwxr-xr-x - dylan 15 Feb 11:21 │ └── greeting +.rw-r--r-- 7.2k dylan 15 Feb 11:21 │ ├── greeting.pb.go +.rw-r--r-- 3.7k dylan 15 Feb 11:22 │ └── greeting_grpc.pb.go +.rw-r--r-- 250 dylan 15 Feb 10:49 ├── Makefile +drwxr-xr-x - dylan 15 Feb 11:20 ├── protos +.rw-r--r-- 365 dylan 15 Feb 10:48 │ └── greeting.proto +.rw-r--r-- 1.4k dylan 15 Feb 10:40 ├── README.md +drwxr-xr-x - dylan 15 Feb 11:22 └── whatever +drwxr-xr-x - dylan 15 Feb 11:22 └── github.com +drwxr-xr-x - dylan 15 Feb 11:22 └── grpc-greeting +drwxr-xr-x - dylan 15 Feb 11:22 └── greeting +.rw-r--r-- 7.2k dylan 15 Feb 11:22 └── greeting.pb.go +``` + +## --go_out=module=$PREFIX + +We can put generated go code and grpc code into path, with specific directory prefix removed. Here we remove `github.com` directory and use `grpc-greeting` as the root path. + +```bash +protoc --go_out=. --go_opt=module=github.com --go-grpc_out=. --go-grpc_opt=paths=import protos/*.proto +``` + +The directory tree looks like this: + +``` +drwxr-xr-x - dylan 15 Feb 11:31 . +drwxr-xr-x - dylan 15 Feb 10:27 ├── cmd +drwxr-xr-x - dylan 15 Feb 10:28 │ └── greeting-server +.rw-r--r-- 30 dylan 15 Feb 10:28 │ └── main.go +drwxr-xr-x - dylan 15 Feb 11:31 ├── grpc-greeting +drwxr-xr-x - dylan 15 Feb 11:31 │ └── greeting +.rw-r--r-- 7.2k dylan 15 Feb 11:31 │ ├── greeting.pb.go +.rw-r--r-- 3.7k dylan 15 Feb 11:31 │ └── greeting_grpc.pb.go +.rw-r--r-- 250 dylan 15 Feb 10:49 ├── Makefile +drwxr-xr-x - dylan 15 Feb 11:20 ├── protos +.rw-r--r-- 365 dylan 15 Feb 10:48 │ └── greeting.proto +.rw-r--r-- 1.4k dylan 15 Feb 10:40 └── README.md +``` + +We can also put generated go code and grpc code into different path. + +```bash +protoc --go_out=. --go_opt=module=github.com --go-grpc_out=. --go-grpc_opt=paths=import protos/*.proto +``` + +The directory tree looks like this: + +```bash +❯ exa -l --tree +drwxr-xr-x - dylan 15 Feb 11:27 . +drwxr-xr-x - dylan 15 Feb 10:27 ├── cmd +drwxr-xr-x - dylan 15 Feb 10:28 │ └── greeting-server +.rw-r--r-- 30 dylan 15 Feb 10:28 │ └── main.go +drwxr-xr-x - dylan 15 Feb 11:27 ├── github.com +drwxr-xr-x - dylan 15 Feb 11:27 │ └── grpc-greeting +drwxr-xr-x - dylan 15 Feb 11:27 │ └── greeting +.rw-r--r-- 3.7k dylan 15 Feb 11:27 │ └── greeting_grpc.pb.go +drwxr-xr-x - dylan 15 Feb 11:27 ├── grpc-greeting +drwxr-xr-x - dylan 15 Feb 11:27 │ └── greeting +.rw-r--r-- 7.2k dylan 15 Feb 11:27 │ └── greeting.pb.go +.rw-r--r-- 250 dylan 15 Feb 10:49 ├── Makefile +drwxr-xr-x - dylan 15 Feb 11:20 ├── protos +.rw-r--r-- 365 dylan 15 Feb 10:48 │ └── greeting.proto +.rw-r--r-- 1.4k dylan 15 Feb 10:40 └── README.md +``` + +## --go_out=paths=source_relative + +Using `--go_out=paths=source_relative` to generate code into the same relative directory as the input path. + +```bash +protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative protos/*.proto +``` + +The directory tree looks like this: + +```bash +❯ exa -l --tree +drwxr-xr-x - dylan 15 Feb 10:49 . +drwxr-xr-x - dylan 15 Feb 10:27 ├── cmd +drwxr-xr-x - dylan 15 Feb 10:28 │ └── greeting-server +.rw-r--r-- 30 dylan 15 Feb 10:28 │ └── main.go +.rw-r--r-- 250 dylan 15 Feb 10:49 ├── Makefile +drwxr-xr-x - dylan 15 Feb 11:19 ├── protos +.rw-r--r-- 7.2k dylan 15 Feb 11:19 │ ├── greeting.pb.go +.rw-r--r-- 365 dylan 15 Feb 10:48 │ ├── greeting.proto +.rw-r--r-- 3.7k dylan 15 Feb 11:19 │ └── greeting_grpc.pb.go +.rw-r--r-- 1.4k dylan 15 Feb 10:40 └── README.md +``` diff --git a/src/golang/password/write-password-encryption-package-using-chatgpt.md b/src/golang/password/write-password-encryption-package-using-chatgpt.md new file mode 100644 index 0000000..01a0d04 --- /dev/null +++ b/src/golang/password/write-password-encryption-package-using-chatgpt.md @@ -0,0 +1,5 @@ +# Write password encryption package using ChatGPT + +The full chat history with chatgpt is here: + +https://docs.google.com/document/d/e/2PACX-1vSiMi4cbOwsHbBChlIum8yrq80h6K1rYRll2v0pYlzIwKM7ugTiMN8tnUkSItCFeSWrISYsjsDmDfYq/pub diff --git a/src/kong/grpc/multiple-grpc-service-routing.md b/src/kong/grpc/multiple-grpc-service-routing.md new file mode 100644 index 0000000..a792927 --- /dev/null +++ b/src/kong/grpc/multiple-grpc-service-routing.md @@ -0,0 +1 @@ +# Multiple GRPC Service Routing in Kong diff --git a/src/kubernetes/cronjob/cronjob-to-restart-deployment.md b/src/kubernetes/cronjob/cronjob-to-restart-deployment.md new file mode 100644 index 0000000..4fa9d26 --- /dev/null +++ b/src/kubernetes/cronjob/cronjob-to-restart-deployment.md @@ -0,0 +1,325 @@ +# CronJob To Restart Deployment + +- [CronJob To Restart Deployment](#cronjob-to-restart-deployment) + - [Explain line by line](#explain-line-by-line) + - [ServiceAccount](#serviceaccount) + - [Role](#role) + - [RoleBinding](#rolebinding) + - [CronJob](#cronjob) + +# CronJob To Restart Deployment + +Here is an example of how to restart deployment using cronjob. + +```yaml +--- +# Service account the client will use to reset the deployment, +# by default the pods running inside the cluster can do no such things. +kind: ServiceAccount +apiVersion: v1 +metadata: + name: deployment-restart + namespace: default +--- +# allow getting status and patching only the one deployment you want to restart +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: deployment-restart + namespace: default +rules: + - apiGroups: ["apps", "extensions"] + resources: ["deployments"] + resourceNames: ["my-fast-and-robust-service"] + verbs: + # "list" and "watch" are only needed if you want to use `rollout status` + ["get", "patch", "list", "watch"] +--- +# bind the role to the service account +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: deployment-restart + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: deployment-restart +subjects: + - kind: ServiceAccount + name: deployment-restart + namespace: default +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: deployment-restart + namespace: default +spec: + concurrencyPolicy: Forbid + # cron spec of time, here, 8 o'clock + schedule: "0 1 * * *" + jobTemplate: + spec: + backoffLimit: + # this has very low chance of failing, as all this does + # is prompt kubernetes to schedule new replica set for + # the deployment + 2 + activeDeadlineSeconds: + # timeout, makes most sense with "waiting for rollout" variant specified below + 600 + template: + spec: + serviceAccountName: + # name of the service account configured above + deployment-restart + restartPolicy: Never + containers: + - name: kubectl + image: + # probably any kubectl image will do, + # optionaly specify version, but this + # should not be necessary, as long the + # version of kubectl is new enough to + # have `rollout restart` + bitnami/kubectl + command: + - "kubectl" + - "rollout" + - "restart" + - "deployment/my-fast-and-robust-service" +``` + +这是一份 Kubernetes YAML 配置文件,用于创建一个 CronJob,定期重启指定名称的 Deployment。以下是配置文件的解释: + +1. 通过创建 ServiceAccount,确保 Kubernetes 能够在崩溃时自动重启 Deployment。 + +2. 创建一个 RBAC Role,允许使用 `get`、`patch`、`list` 和 `watch` 操作,但仅限于指定的 Deployment(即 `my-fast-and-robust-service`)。 + +3. 创建一个 RBAC RoleBinding,将上述角色与先前创建的 ServiceAccount 绑定。 + +4. 创建一个 CronJob,定义了要执行的任务和运行频率。该任务将定期启动一个 Pod,并使用 kubectl 命令重启指定的 Deployment。 + +总之,这个配置文件实现了一个可以定期重启指定 `Deployment` 的 `CronJob`,以确保 `Deployment` 在运行过程中保持稳定性和可用性。这可能会有助于解决某些资源耗尽或性能问题,以及其他可能导致 `Deployment` 失败的情况。请注意,这应该被视为一种应急措施,应该在确定没有更好的解决方案之前谨慎使用。 + +## Explain line by line + +### ServiceAccount + +好的,下面是这个 YAML 配置文件中每一行的解释: + +``` +--- +# Service account the client will use to reset the deployment, +# by default the pods running inside the cluster can do no such things. +kind: ServiceAccount +apiVersion: v1 +metadata: + name: deployment-restart + namespace: default +``` + +- `---`:指示 Kubernetes YAML 文件的开始。 + +- `# Service account the client will use to reset the deployment,`:注释,提供了正在创建的 ServiceAccount 的描述。 + +- `kind: ServiceAccount`:指定 Kubernetes 对象的类型,这里是 ServiceAccount。 + +- `apiVersion: v1`:指定 API 版本,这里是 v1。 + +- `metadata:`:定义 Kubernetes 对象的元数据,包括名称和命名空间。 + +- `name: deployment-restart`:指定创建的 ServiceAccount 的名称为 deployment-restart。 + +- `namespace: default`:指定命名空间为 default。 + +### Role + +``` +--- +# allow getting status and patching only the one deployment you want to restart +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: deployment-restart + namespace: default +rules: + - apiGroups: ["apps", "extensions"] + resources: ["deployments"] + resourceNames: ["my-fast-and-robust-service"] + verbs: + # "list" and "watch" are only needed if you want to use `rollout status` + ["get", "patch", "list", "watch"] +``` + +- `---`:指示 Kubernetes YAML 文件的开始。 + +- `# allow getting status and patching only the one deployment you want to restart`:注释,提供了所创建的 Role 的描述。 + +- `apiVersion: rbac.authorization.k8s.io/v1`:指定 RBAC API 版本,这里是 v1。 + +- `kind: Role`:指定 Kubernetes 对象类型为 Role。 + +- `metadata:`:定义 Kubernetes 对象的元数据,包括名称和命名空间。 + +- `name: deployment-restart`:指定创建的 Role 的名称为 deployment-restart。 + +- `namespace: default`:指定命名空间为 default。 + +- `rules:`:定义权限规则。 + +- `- apiGroups: ["apps", "extensions"]`:指定 API 组。这里指定了 apps 和 extensions。 + +- `resources: ["deployments"]`:指定资源种类,这里是 deployments。 + +- `resourceNames: ["my-fast-and-robust-service"]`:指定资源名称,这里是 my-fast-and-robust-service。 + +- `verbs: ["get", "patch", "list", "watch"]`:指定允许执行的操作,这里包括 get、patch、list 和 watch。 + +### RoleBinding + +``` +--- +# bind the role to the service account +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: deployment-restart + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: deployment-restart +subjects: + - kind: ServiceAccount + name: deployment-restart + namespace: default +``` + +- `---`:指示 Kubernetes YAML 文件的开始。 + +- `# bind the role to the service account`:注释,提供了 RoleBinding 的描述。 + +- `apiVersion: rbac.authorization.k8s.io/v1`:指定 RBAC API 版本,这里是 v1。 + +- `kind: RoleBinding`:指定 Kubernetes 对象类型为 RoleBinding。 + +- `metadata:`:定义 Kubernetes 对象的元数据,包括名称和命名空间。 + +- `name: deployment-restart`:指定创建的 RoleBinding 的名称为 deployment-restart。 + +- `namespace: default`:指定命名空间为 default。 + +- `roleRef:`:引用要绑定的角色。 + +- `apiGroup: rbac.authorization.k8s.io`:指定 RBAC API 组。 + +- `kind: Role`:指定角色类型为 Role。 + +- `name: deployment-restart`:指定要绑定的角色的名称为 deployment-restart。 + +- `subjects:`:指定要绑定角色的主体(例如 ServiceAccount)。 + +- `- kind: ServiceAccount`:指定主体对象的类型为 ServiceAccount。 + +- `name: deployment-restart`:指定主体对象的名称为 deployment-restart。 + +- `namespace: default`:指定主体对象所在的命名空间为 default。 + +### CronJob + +``` +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: deployment-restart + namespace: default +spec: + concurrencyPolicy: Forbid + # cron spec of time, here, 8 o'clock + schedule: "0 1 * * *" + jobTemplate: + spec: + backoffLimit: + # this has very low chance of failing, as all this does + # is prompt kubernetes to schedule new replica set for + # the deployment + 2 + activeDeadlineSeconds: + # timeout, makes most sense with "waiting for rollout" variant specified below + 600 + template: + spec: + serviceAccountName: + # name of the service account configured above + deployment-restart + restartPolicy: Never + containers: + - name: kubectl + image: + # probably any kubectl image will do, + # optionaly specify version, but this + # should not be necessary, as long the + # version of kubectl is new enough to + # have `rollout restart` + bitnami/kubectl + command: + - "kubectl" + - "rollout" + - "restart" + - "deployment/my-fast-and-robust-service" +``` + +- `---`:指示 Kubernetes YAML 文件的开始。 + +- `apiVersion: batch/v1beta1`:指定批处理 API 版本,这里是 v1beta1。 + +- `kind: CronJob`:指定 Kubernetes 对象类型为 CronJob。 + +- `metadata:`:定义 Kubernetes 对象的元数据,包括名称和命名空间。 + +- `name: deployment-restart`:指定创建的 `CronJob` 的名称为 deployment-restart。 + +- `namespace: default`:指定命名空间为 `default`。 + +- `spec:`:定义 CronJob 的规范。 + +- `concurrencyPolicy: Forbid`:指定并发策略为 Forbid,即如果上一个任务还未完成,则不会启动新的任务。 + +- `schedule: "0 1 * * *"`:指定 CronJob 的运行频率,这里是每天的凌晨 1 点。 + +- `jobTemplate:`:定义要执行的作业模板。 + +- `spec:`:指定作业的规范。 + +- `backoffLimit:`:定义作业的退避限制,即在失败后重试此作业的次数。这里设置为 2。 + +- `activeDeadlineSeconds:`:定义作业的运行时间截止日期(以秒为单位)。这里设置为 600 秒。 + +- `template:`:定义作业的 Pod 模板。 + +- `spec:`:指定 Pod 的规范。 + +- `serviceAccountName:`:指定 Pod 使用的 ServiceAccount 的名称,这里是 deployment-restart。 + +- `restartPolicy: Never`:定义 Pod 的重启策略为 Never,即当 Pod 终止时不会自动重启。 + +- `containers:`:定义 Pod 中的容器列表。 + +- `- name: kubectl`:指定容器的名称为 kubectl。 + +- `image: bitnami/kubectl`:指定使用的 kubectl 容器镜像。 + +- `command:`:指定要在容器中执行的命令列表。 + +- `- "kubectl"`:指定要执行的第一个命令为 kubectl。 + +- `- "rollout"`:指定要执行的第二个命令为 rollout。 + +- `- "restart"`:指定要执行的第三个命令为 restart。 + +- `- "deployment/my-fast-and-robust-service"`:指定要重启的 Deployment 的名称为 `my-fast-and-robust-service`。 + +总之,这个 `YAML` 配置文件定义了 `CronJob`,并使用 kubectl 命令重启指定的 Deployment。该 CronJob 将定期运行,并确保 Deployment 在运行过程中保持稳定性和可用性。 diff --git a/src/kubernetes/kubebuilder/kubebuilder-quickstart.md b/src/kubernetes/kubebuilder/kubebuilder-quickstart.md new file mode 100644 index 0000000..5bfbe88 --- /dev/null +++ b/src/kubernetes/kubebuilder/kubebuilder-quickstart.md @@ -0,0 +1,413 @@ +# Kubebuilder quickstart + +- [Intro](#intro) +- [kubebuilder](#kubebuilder) + - [首先安装 kubebuilder](#%E9%A6%96%E5%85%88%E5%AE%89%E8%A3%85-kubebuilder) + - [其次新建 kubebuilder 工程](#%E5%85%B6%E6%AC%A1%E6%96%B0%E5%BB%BA-kubebuilder-%E5%B7%A5%E7%A8%8B) + - [新建 group version kind](#%E6%96%B0%E5%BB%BA-group-version-kind) + - [定义 CRD Spec](#%E5%AE%9A%E4%B9%89-crd-spec) + - [实现自定义 CRD 的 Reconcile](#%E5%AE%9E%E7%8E%B0%E8%87%AA%E5%AE%9A%E4%B9%89-crd-%E7%9A%84-reconcile) + - [Reconcile 内部实现](#reconcile-%E5%86%85%E9%83%A8%E5%AE%9E%E7%8E%B0) + - [Reconcile logic](#reconcile-logic) + - [处理 CRD](#%E5%A4%84%E7%90%86-crd) + - [finalizer](#finalizer) + - [deployment](#deployment) + - [service](#service) + - [ingress](#ingress) +- [测试](#%E6%B5%8B%E8%AF%95) + +# Intro + +我们知道 k8s 部署应用非常繁琐,首先要配置 `deployment`, `service`,外部流量的引入还要使用 `ingress`,这就要同时维护三个文件,而利用 kubernetes operator 能大大简化部署操作。 + +我们采用 `kuberbuilder` 这个 `framework` 来开发 kubernenetes operator,要完成**一个配置文件完成「kubectl apply 新建三连」的功能**。 + +# kubebuilder + +## 首先安装 kubebuilder + +```shell +os=$(go env GOOS) +arch=$(go env GOARCH) +v=2.3.1 +# download kubebuilder and extract it to tmp +curl -L https://go.kubebuilder.io/dl/${v}/${os}/${arch} | tar -xz -C /tmp/ + +# move to a long-term location and put it on your path +# (you'll need to set the KUBEBUILDER_ASSETS env var if you put it somewhere else) +sudo mv /tmp/kubebuilder_${v}_${os}_${arch} /usr/local/kubebuilder +export PATH=$PATH:/usr/local/kubebuilder/bin +``` + +## 其次新建 kubebuilder 工程 + +```shell +kubebuilder init --domain example.com CustomImageDeploy +``` + +## 新建 group version kind + +```shell +kubebuilder create api --group customimagedeploy --version v1 --kind CustomImageDeploy +``` + +## 定义 CRD Spec + +接下来我们需要定义 CRD 的 Spec + +```go +// CustomImageDeploySpec defines the desired state of CustomImageDeploy +type CustomImageDeploySpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Image is the docker image with version info of CustomImageDeploy. + Image string `json:"image,omitempty"` + + // Size is the number of pods to run + Size int32 `json:"size"` + + // Port is the port of container + Port int32 `json:"port"` +} +``` + +这里定义了要运行的 docker image(`Image`),数量(`Size`)和 container 的端口(`Port`) + +## 实现自定义 CRD 的 Reconcile + +```go +func (r *CustomImageDeployReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { +} +``` + +Reconcile 函数实现了 operator 的功能。 + +## Reconcile 内部实现 + +### Reconcile logic + +- 获取 CRD,处理 `client.IgnoreNotFound(err)`,此时表示 CRD 被删除,Reconcile 返回 `ctrl.Result{}, nil` ,Reconcile loop 结束 + +- 处理 `finalizer`,如果`ObjectMeta.DeletionTimestamp.IsZero()` 则表示未正在被删除,我们需要给 CRD 的 `ObjectMeta` 添加 `finalizer`;否则我们判断 CRD 的 ObjectMeta 是否包含 finalizer,并删除其他外部资源,删除成功之后清除 ObjectMeta 中的 `finalizer`,剩下删除的工作交给 `kubernetes` 去处理 + +- 获取 `ingress`,处理 `client.IgnoreNotFound(err)`,此时表示 `ingress` 尚未被创建,则需要调用 `r.Client.Create` 来创建 + +- 获取 `deployment`,处理 `client.IgnoreNotFound(err)`,此时表示 `deployment` 尚未被创建,则需要调用 `r.Client.Create` 来创建 + +- 获取 `service`,处理 `client.IgnoreNotFound(err)`,此时表示 `service` 尚未被创建,则需要调用 `r.Client.Create` 来创建 + +- 其他外部资源的处理,由于我们未使用其他外部资源,这里忽略 + +### 处理 CRD + +```go +log := r.Log.WithValues("customimagedeploy", req.NamespacedName) + +log.Info("[CustomImageDeployReconciler::Reconsile]", "req: ", req) + +cid := &customimagedeployv1.CustomImageDeploy{} +err := r.Client.Get(context.TODO(), req.NamespacedName, cid) +log.Info("Begin to use finalizer", "cid : ", cid) + +if err != nil { + //if errors.IsNotFound(err) { + // // Request object not found, could have been deleted after reconcile req. + // // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // // Return and don't requeue + // log.Info("CustomImageDeploy resource not found. Ignoring since object must be deleted.") + // return ctrl.Result{}, nil + //} + //return ctrl.Result{}, err + log.Info("[CustomImageDeployReconciler::Reconsile] get err != nil", "err: ", err) + return ctrl.Result{}, client.IgnoreNotFound(err) +} +``` + +### finalizer + +```go +f := "customimagedeploy.finalizers.example.com" +if cid.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("DeletionTimestamp.IsZero") + + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. + if !containsString(cid.ObjectMeta.Finalizers, f) { + cid.ObjectMeta.Finalizers = append(cid.ObjectMeta.Finalizers, f) + if err := r.Update(context.Background(), cid); err != nil { + return reconcile.Result{}, err + } + } +} else { + // The object is being deleted + if containsString(cid.ObjectMeta.Finalizers, f) { + // our finalizer is present, so lets handle our external dependency + if err := r.deleteExternalDependency(cid); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return reconcile.Result{}, err + } + + // remove our finalizer from the list and update it. + cid.ObjectMeta.Finalizers = removeString(cid.ObjectMeta.Finalizers, f) + if err := r.Update(context.Background(), cid); err != nil { + return reconcile.Result{}, err + } + } + + // Our finalizer has finished, so the reconciler can do nothing. + return reconcile.Result{}, nil +} +``` + +其中 `containsString` 方法判断 `finalizer` 数组里是否包含我们预先定义的 `finalizer` + +```go +func containsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false +} +``` + +### deployment + +```go +// check if Deployment already exists, if not create a new one +deployment := &appsv1.Deployment{} +log.Info("Getting the deployment.", "cid: ", cid) +err = r.Client.Get(context.Background(), types.NamespacedName{Name: cid.Name, Namespace: cid.Namespace}, deployment) +if errors.IsNotFound(err) { + dep := r.deploymentForCustomImageDeploy(cid) + log.Info("Creating a new deployment.", "Namespace: ", dep.Namespace, "Name: ", dep.Name) + err = r.Client.Create(context.Background(), dep) + if err != nil { + log.Error(err, "Failed to create a new deployment", "Namespace: ", dep.Namespace, "Name: ", dep.Name) + return ctrl.Result{}, err + } +} +if err != nil { + log.Error(err, "Failed to create a new deployment") + return ctrl.Result{}, err +} + +// ensure the size +size := cid.Spec.Size +if deployment.Spec.Replicas == nil { + // replicas is nil, requeue + log.Info("deployment.Spec.Replicas is nil") + return ctrl.Result{RequeueAfter: time.Second * 5}, nil +} + +if *deployment.Spec.Replicas != size { + deployment.Spec.Replicas = &size + err = r.Client.Update(context.Background(), deployment) + if err != nil { + log.Error(err, "Failed to udpate deployment", "Namespace: ", deployment.Namespace, "Name: ", deployment.Name) + return ctrl.Result{}, err + } + // size not match, requeue + return ctrl.Result{RequeueAfter: time.Second * 5}, nil +} +``` + +其中 `deploymentForCustomImageDeploy` 会准备需要创建 `deployment` 的 Spec + +```go +// deploymentForMemcached returns a Deployment object +func (r *CustomImageDeployReconciler) deploymentForCustomImageDeploy(c *customimagedeployv1.CustomImageDeploy) *appsv1.Deployment { + replicas := c.Spec.Size + image := c.Spec.Image + name := c.Name + port := c.Spec.Port + + ls := labelsForCustomImageDeploy(name) + + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.Name, + Namespace: c.Namespace, + Labels: ls, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: ls, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: ls, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Image: image, + Name: name, + Ports: []corev1.ContainerPort{{ + ContainerPort: port, + // Name: name, // Name is optinal, no more than 15 characters + }}, + }}, + }, + }, + }, + } + + log := r.Log.WithValues("CustomImageDeployReconciler", "deploymentForCustomImageDeploy") + + // Set Memcached instance as the owner of the Deployment. + if err := ctrl.SetControllerReference(c, dep, r.Scheme); err != nil { + log.Info("SetControllerReference", "error : ", err) + } //todo check how to get the schema + + return dep +} +``` + +其中辅助函数 `labelsForCustomImageDeploy` 用于生成 labels + +```go +// labelsForCustomImageDeploy returns the labels for selecting the resources +// belonging to the given custom-image-deploy CR name. +func labelsForCustomImageDeploy(name string) map[string]string { + return map[string]string{"app": name, "managed_by": "custom-image-deploy"} +} +``` + +### service + +```go +// check if Service already exists, if not create a new one +service := &corev1.Service{} +log.Info("Getting the service.", "cid: ", cid) +err = r.Client.Get(context.Background(), types.NamespacedName{Name: cid.Name, Namespace: cid.Namespace}, service) +if errors.IsNotFound(err) { + svc := r.serviceForCustomImageDeploy(cid) + log.Info("Creating a new service.", "Namespace: ", svc.Namespace, "Name: ", svc.Name) + err = r.Client.Create(context.Background(), svc) + if err != nil { + log.Error(err, "Failed to create a new service", "Namespace: ", svc.Namespace, "Name: ", svc.Name) + return ctrl.Result{}, err + } +} +if err != nil { + log.Error(err, "Failed to create a new service") + return ctrl.Result{}, err +} + +// make sure service is created(has a clusterip) +if service.Spec.ClusterIP == "" { + return ctrl.Result{RequeueAfter: time.Second * 5}, nil +} +``` + +### ingress + +```go +// check if Ingress already exists, if not create a new one +ing := &networking.Ingress{} +err = r.Client.Get(context.TODO(), types.NamespacedName{Name: cid.Name, Namespace: cid.Namespace}, ing) +if errors.IsNotFound(err) { + log.Info("Creating a new ingress.", "cid: ", cid) + ing := r.ingressForCustomImageDeploy(cid) + log.Info("Creating a new ingress.", "Namespace: ", ing.Namespace, "Name: ", ing.Name) + err = r.Client.Create(context.TODO(), ing) + if err != nil { + log.Error(err, "Failed to create a new ingress", "Namespace: ", ing.Namespace, "Name: ", ing.Name) + return ctrl.Result{}, err + } +} + +if err != nil { + log.Error(err, "Failed to create a new ingress", "Namespace: ", ing.Namespace, "Name: ", ing.Name) + return ctrl.Result{}, err +} + +if len(ing.Status.LoadBalancer.Ingress) == 0 { + return ctrl.Result{RequeueAfter: time.Second * 5}, nil +} +``` + +# 测试 + +以 `nginx` 为例,我们需要创建 nginx deployment、nginx service 和 nginx ingress。以往我们都会准备三个文件:`deployment.yaml,` `service.yaml`, `ingress.yaml`,现在我们只需一个文件就可以了 `nginx.yaml`。 + +```yaml +apiVersion: customimagedeploy.example.com/v1 +kind: CustomImageDeploy +metadata: + name: customimagedeploy-nginx +spec: + # Add fields here + size: 1 + port: 80 + image: "nginx:1.17" +``` + +一键部署: + +```shell +kubectl apply -f nginx.yaml +``` + +查看结果: + +```shell +$ k get pod,svc,deploy,rs,ing -l managed_by=custom-image-deploy +NAME READY STATUS RESTARTS AGE +pod/customimagedeploy-nginx-7f55f7c585-pb9bm 1/1 Running 0 6m20s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/customimagedeploy-nginx ClusterIP 172.20.50.80 80/TCP 6m20s + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/customimagedeploy-nginx 1/1 1 1 6m20s + +NAME DESIRED CURRENT READY AGE +replicaset.apps/customimagedeploy-nginx-7f55f7c585 1 1 1 6m20s + +NAME HOSTS ADDRESS PORTS AGE +ingress.extensions/customimagedeploy-nginx customimagedeploy-nginx.default a123456789012345623242424424-1314151151515.elb.cn-northwest-1.amazonaws.com.cn 80 84s +``` + +由于我们部署了 kong 作为 api gateway,我们可以通过访问 load balancer 地址来测试一下是否能正确访问刚刚部署的 nginx 服务 + +```shell +curl -H "Host: customimagedeploy-nginx.default" \ +a123456789012345623242424424-1314151151515.elb.cn-northwest-1.amazonaws.com.cn +``` + +```html + + + + Welcome to nginx! + + + +

Welcome to nginx!

+

+ If you see this page, the nginx web server is successfully installed and + working. Further configuration is required. +

+ +

+ For online documentation and support please refer to + nginx.org.
+ Commercial support is available at + nginx.com. +

+ +

Thank you for using nginx.

+ + +``` diff --git a/src/kubernetes/security/access-s3-files-in-pod.md b/src/kubernetes/security/access-s3-files-in-pod.md new file mode 100644 index 0000000..e66bb23 --- /dev/null +++ b/src/kubernetes/security/access-s3-files-in-pod.md @@ -0,0 +1,130 @@ +# Pod security to access aws s3 files + +- [Intro](#intro) +- [创建 `IAM policy`](#%E5%88%9B%E5%BB%BA-iam-policy) +- [创建 service account 并和 `IAM policy` 绑定](#%E5%88%9B%E5%BB%BA-service-account-%E5%B9%B6%E5%92%8C-iam-policy-%E7%BB%91%E5%AE%9A) +- [修改 Deployment 的 spec.template.spec.serviceAccountName](#%E4%BF%AE%E6%94%B9-deployment-%E7%9A%84-spectemplatespecserviceaccountname) +- [Refs](#refs) + +# Intro + +部署在 `kubernetes` 的应用的安全性是值得考量的,为了提高安全性,我们可以通过以下的方式来保证应用的安全性,比如: + +- 给 `kubernetes` 的 worker node 添加 `IAM policy`,这样 worker node 上运行的 pod 就可以通过 worker node 的 `IAM policy` 的权限来访问应用的资源 +- 给 `kubernetes` 的 service account 绑定 `IAM policy`,这样 service account 可以通过 `IAM policy` 的权限来访问应用的资源 + +当然,worker node 添加 `IAM policy` 这种方式粒度比较粗糙,不太推荐,我们可以通过更精细的方式来保证应用的安全性。 + +# 创建 `IAM policy` + +以下的脚本会创建一个 `IAM policy`,这个 `IAM policy` 可以被 `kubernetes` 的 service account 绑定,以便 service account 可以通过 `IAM policy` 的权限来访问应用的资源。 + +```bash +#!/bin/bash +POLICY_NAME="my-very-sensitive-bucket-service-account-policy" +K8S_SERVICE_ACCOUNT_ACCESS_S3_BUCKET_POLICY_FILE="k8s-service-account-access-s3-bucket-policy.json" + +cat > $K8S_SERVICE_ACCOUNT_ACCESS_S3_BUCKET_POLICY_FILE < linkerd.yaml +# replace gcr.io image with docker hub; for linux, replace `sed -i ""` with `sed -i` +sed -i "" -e 's/gcr.io\/linkerd-io\//woodsmur\/gcr.io_linkerd-io_/g' linkerd.yaml +``` + +再通过 kubectl 安装: + +```bash +kubectl apply -f linkerd.yaml +``` + +喝杯咖啡之后 linkerd 就安装完成了,如果不行就两杯。 + +# 4、开启 dashboard + +```bash +linkerd dashboard & +``` + +默认会随机分配一个端口如 50750,打开 http://127.0.0.1:50750/overview,通过 dashboard 可以探索 linkerd mesh 内部组件。 + +# 5、安装示例工程 emojivoto + +```bash +curl -sL https://run.linkerd.io/emojivoto.yml | kubectl apply -f - +``` + +将容器 80 端口转发到本地 8080,这样通过 localhost:8080 就能访问 emojivoto 服务了。 + +```bash +kubectl -n emojivoto port-forward svc/web-svc 8080:80 & +``` + +由于通过 kubectl 安装,此时没有注入 linkerd sidecar,所以 Meshed 显示 0/1。 + +# 6、注入 linkerd 到 emojivoto + +```bash +kubectl get -n emojivoto deploy -o yaml \ +| linkerd inject - \ +| kubectl apply -f - +``` + +注入 linkerd 的 deployment 在 dashboard 上可以查看到显示为 `Meshed`。 + +注入 linkderd 需要操纵的是 deployment,如: + +```bash +linkerd inject deployment.yml \ +| kubectl apply -f - +``` + +它会把 linkerd 作为 sidecar inject 到 pod 里,并配置 iptables。 + +```bash +$ kubectl get -n emojivoto deploy -o yaml | linkerd inject - | kubectl apply -f - + +deployment "emoji" injected +deployment "vote-bot" injected +deployment "voting" injected +deployment "web" injected + +deployment.extensions/emoji configured +deployment.extensions/vote-bot configured +deployment.extensions/voting configured +deployment.extensions/web configured +``` + +# 7、通过 grafana 查看监控 + +访问 `emojivoto` 几次,可以在 grafana 面板 http://127.0.0.1:50750/grafana 查看到服务的历史数据,如 Success rate, Request rate, Latency distribution percentiles 等。 + +# 8、调试 + +为了演示,demo 故意在代码里埋了一些错误。 + +点开 `deployment/web`,可以看到 `deploy/web` 接收 `deploy/vote-bot` 请求,同时也会给 `deploy/emoji` 和 `deploy/voting` 发送请求。 + +TODO: + +但是值得注意的是,`deploy/vote-bot` 和 `deploy/voting` 的成功率都不是 100%,由于 `deploy/vote-bot` 调用 `deploy/web` 进而调用 `deploy/voting`, +还可以看出,对 web 而言,`deploy/vote-bot` (对应 PATH `/api/vote`)是入口,而 `deploy/voting` (对应 PATH `/emojivoto.v1.VotingService/VoteDoughnut`)是出口,我们可以猜想错误因 `deploy/voting` 而起。 + +我们还可以点击 tap,通过查看只针对这一接口 `/emojivoto.v1.VotingService/VoteDoughnut` 的请求来进一步定位错误。 + +通过图中 Unknown 的消息及 grpc 针对 Unknown code 的说明文档(https://godoc.org/google.golang.org/grpc/codes#Code),可知这个接口有异常。 + +我们还可以通过查看代码来验证:https://github.com/BuoyantIO/emojivoto/blob/master/emojivoto-voting-svc/api/api.go#L22,我们可以看到 doughnut(甜甜圈)那里报错了,从而定位坑。 + +# Refs + +https://linkerd.io/2/tasks/debugging-your-service/ diff --git a/src/mui/upgrade/upgrade-from-v4-to-v5.md b/src/mui/upgrade/upgrade-from-v4-to-v5.md new file mode 100644 index 0000000..4a842db --- /dev/null +++ b/src/mui/upgrade/upgrade-from-v4-to-v5.md @@ -0,0 +1,81 @@ +# Title: Upgrading Material-UI from v4 to v5: A Comprehensive Guide + + + +- [Introduction:](#introduction) +- [Step 1: Upgrade React to 17.0.0:](#step-1-upgrade-react-to-1700) +- [Step 2: Update MUI packages and peer dependencies:](#step-2-update-mui-packages-and-peer-dependencies) +- [Step 3: Run codemods:](#step-3-run-codemods) +- [Step 4: Fix broken code:](#step-4-fix-broken-code) +- [Step 5: Replace all imports:](#step-5-replace-all-imports) +- [Step 6: Test and finalize:](#step-6-test-and-finalize) +- [References:](#references) + + + +# Introduction: + +Material-UI is a popular React component library that provides a set of pre-built UI components for building modern and responsive web applications. With the release of Material-UI v5, there have been significant changes and improvements, making it essential for developers to upgrade from v4 to v5. In this blog post, we will walk you through the step-by-step process of upgrading Material-UI to its latest version. + +# Step 1: Upgrade React to 17.0.0: + +To start the upgrade process, it is necessary to update React to version 17.0.0 or above. This can be done using the following command: + +```bash +yarn upgrade @material-ui/core@^4.11.2 react@^17.0.0 +``` + +# Step 2: Update MUI packages and peer dependencies: + +Next, we need to update the Material-UI packages and their peer dependencies. Run the following commands to install the required packages: + +```bash +yarn add @mui/material @mui/styles +yarn add @mui/lab +yarn add @mui/icons-material +yarn add @emotion/react @emotion/styled +``` + +# Step 3: Run codemods: + +Material-UI provides codemods that automatically adjust your code to account for breaking changes in v5. These codemods help in migrating your codebase efficiently. Run the following command to apply the preset-safe codemod: + +```bash +npx @mui/codemod v5.0.0/preset-safe +``` + +Additionally, you can run specific codemods for individual components or pages if needed. For example: + +```bash +npx @mui/codemod v5.0.0/preset-safe components +npx @mui/codemod v5.0.0/preset-safe pages +``` + +# Step 4: Fix broken code: + +After running the codemods, it's important to review your codebase for any broken code. One common issue is the usage of the `theme.spacing()` function, which has changed in v5. Replace instances of `theme.spacing(2)` with `2`, `theme.spacing(4)` with `4`, and so on, to fix this issue. + +# Step 5: Replace all imports: + +With the release of v5, the package names have changed from `@material-ui/*` to `@mui/*`. To ensure compatibility with the latest version, replace all imports accordingly. Here are some examples: + +```bash +yarn remove @material-ui/core +yarn remove @material-ui/icons +yarn remove @material-ui/lab +yarn remove @material-ui/pickers + +yarn remove @mui/x-data-grid +yarn add @mui/x-data-grid +``` + +# Step 6: Test and finalize: + +After completing the above steps, thoroughly test your application to ensure that it runs without any errors. Make any necessary adjustments or fixes as required. Once you are confident that your application is functioning correctly, commit the changes and finalize the upgrade process. + +Conclusion: +Upgrading Material-UI from v4 to v5 is an important step to take advantage of the latest features, bug fixes, and improvements. By following the steps outlined in this guide, you can smoothly upgrade your Material-UI-based application to the latest version. Remember to thoroughly test your application after the upgrade to ensure everything is functioning as expected. Happy coding with Material-UI v5! + +# References: + +- Material-UI Migration Guide: [Migrating to v5: getting started](https://mui.com/material-ui/migration/migration-v4/) diff --git a/src/react/functional-component/render-all-chartjs-charts.md b/src/react/functional-component/render-all-chartjs-charts.md new file mode 100644 index 0000000..d1b03c9 --- /dev/null +++ b/src/react/functional-component/render-all-chartjs-charts.md @@ -0,0 +1,448 @@ +# Render all chartjs charts in react typescript tailwindcss projects + +In this tutorial, we'll show you how to render all chartjs charts in React project. + +# Table of Contents + + + +- [Define all chartjs charts](#define-all-chartjs-charts) +- [Write a funcitonal component](#write-a-funcitonal-component) +- [Write a chartjs wrapper](#write-a-chartjs-wrapper) +- [Final Code](#final-code) +- [Demo](#demo) +- [Optimize](#optimize) + * [chatgpt says](#chatgpt-says) + + + +# Define all chartjs charts + +We copy all chartjs official example codes and put them in `component/chartjsofficial` directory. We also define title and component for each example chart. + +```jsx +import { VerticalBarChart } from "@/component/chartjsofficial/verticalbarchart"; +import { HorizontalBarChart } from "@/component/chartjsofficial/horizontalbarchart"; +import { StackedBarChart } from "@/component/chartjsofficial/stackedbarchart"; +import { GroupedBarChart } from "@/component/chartjsofficial/groupedbarchart"; +import { AreaChart } from "@/component/chartjsofficial/areachart"; +import { LineChart } from "@/component/chartjsofficial/linechart"; +import { MultiAxisLineChart } from "@/component/chartjsofficial/multiaxislinechart"; +import { PieChart } from "@/component/chartjsofficial/piechart"; +import { DoughnutChart } from "@/component/chartjsofficial/doughnutchart"; +import { PolarAreaChart } from "@/component/chartjsofficial/polarareachart"; +import { RadarChart } from "@/component/chartjsofficial/radarchart"; +import { ScatterChart } from "@/component/chartjsofficial/scatterchart"; +import { BubbleChart } from "@/component/chartjsofficial/bubblechart"; +import { MultiTypeChart } from "@/component/chartjsofficial/multitypechart"; +import { ChartEvents } from "@/component/chartjsofficial/chartevents"; +import { ChartRef } from "@/component/chartjsofficial/chartref"; +import { GradientChart } from "@/component/chartjsofficial/gradientchart"; +import { ChartEventsSingleDataset } from "@/component/chartjsofficial/charteventssingledataset"; +import { ChartEventsSingleDatasetOutsideDatasource } from "@/component/chartjsofficial/charteventssingledatasetoutsidedatasource"; + +const components = [ + { + title: "VerticalBarChart", + component: VerticalBarChart, + }, + { + title: "HorizontalBarChart", + component: HorizontalBarChart, + }, + { + title: "StackedBarChart", + component: StackedBarChart, + }, + { + title: "GroupedBarChart", + component: GroupedBarChart, + }, + { + title: "AreaChart", + component: AreaChart, + }, + { + title: "LineChart", + component: LineChart, + }, + { + title: "MultiAxisLineChart", + component: MultiAxisLineChart, + }, + { + title: "DoughnutChart", + component: DoughnutChart, + }, + { + title: "PolarAreaChart", + component: PolarAreaChart, + }, + { + title: "RadarChart", + component: RadarChart, + }, + { + title: "ScatterChart", + component: ScatterChart, + }, + { + title: "BubbleChart", + component: BubbleChart, + }, + { + title: "ScatterChart", + component: ScatterChart, + }, + { + title: "MultiTypeChart", + component: MultiTypeChart, + }, + { + title: "ChartEvents", + component: ChartEvents, + }, + { + title: "ChartRef", + component: ChartRef, + }, + { + title: "GradientChart", + component: GradientChart, + }, + { + title: "ChartEventsSingleDataset", + component: ChartEventsSingleDataset, + }, +]; +``` + +# Write a funcitonal component + +In order to render all components, we write a functional component to take array of component with title and render them in one place. + +```jsx +type Component = { + title: string, + component: React.FunctionComponent, +}; +type ChartProps = { + components: Component[], +}; + +const ComponentWrapper: React.FC = ({ components }) => { + return ( +
+ {components.map((component, index) => { + return ( + + + + ); + })} +
+ ); +}; +``` + +# Write a chartjs wrapper + +In order to add title for each chart, we write a functional component to wrapper up every chart with a `h1`. Note we define the `h1` style using tailwindcss, setting div size(`h-96`) and text size(`text-3xl`), etc. + +```jsx +export const ChartWrapper: React.FC<{ + title: string, + children: React.ReactNode, +}> = ({ title, children }) => { + return ( +
+

{title}

+ {children} +
+ ); +}; +``` + +# Final Code + +First, import all chartjs components and define components to render. + +```jsx +import { VerticalBarChart } from "@/component/chartjsofficial/verticalbarchart"; +import { HorizontalBarChart } from "@/component/chartjsofficial/horizontalbarchart"; +import { StackedBarChart } from "@/component/chartjsofficial/stackedbarchart"; +import { GroupedBarChart } from "@/component/chartjsofficial/groupedbarchart"; +import { AreaChart } from "@/component/chartjsofficial/areachart"; +import { LineChart } from "@/component/chartjsofficial/linechart"; +import { MultiAxisLineChart } from "@/component/chartjsofficial/multiaxislinechart"; +import { PieChart } from "@/component/chartjsofficial/piechart"; +import { DoughnutChart } from "@/component/chartjsofficial/doughnutchart"; +import { PolarAreaChart } from "@/component/chartjsofficial/polarareachart"; +import { RadarChart } from "@/component/chartjsofficial/radarchart"; +import { ScatterChart } from "@/component/chartjsofficial/scatterchart"; +import { BubbleChart } from "@/component/chartjsofficial/bubblechart"; +import { MultiTypeChart } from "@/component/chartjsofficial/multitypechart"; +import { ChartEvents } from "@/component/chartjsofficial/chartevents"; +import { ChartRef } from "@/component/chartjsofficial/chartref"; +import { GradientChart } from "@/component/chartjsofficial/gradientchart"; +import { ChartEventsSingleDataset } from "@/component/chartjsofficial/charteventssingledataset"; +import { ChartEventsSingleDatasetOutsideDatasource } from "@/component/chartjsofficial/charteventssingledatasetoutsidedatasource"; + +const components = [ + { + title: "VerticalBarChart", + component: VerticalBarChart, + }, + { + title: "HorizontalBarChart", + component: HorizontalBarChart, + }, + { + title: "StackedBarChart", + component: StackedBarChart, + }, + { + title: "GroupedBarChart", + component: GroupedBarChart, + }, + { + title: "AreaChart", + component: AreaChart, + }, + { + title: "LineChart", + component: LineChart, + }, + { + title: "MultiAxisLineChart", + component: MultiAxisLineChart, + }, + { + title: "DoughnutChart", + component: DoughnutChart, + }, + { + title: "PolarAreaChart", + component: PolarAreaChart, + }, + { + title: "RadarChart", + component: RadarChart, + }, + { + title: "ScatterChart", + component: ScatterChart, + }, + { + title: "BubbleChart", + component: BubbleChart, + }, + { + title: "ScatterChart", + component: ScatterChart, + }, + { + title: "MultiTypeChart", + component: MultiTypeChart, + }, + { + title: "ChartEvents", + component: ChartEvents, + }, + { + title: "ChartRef", + component: ChartRef, + }, + { + title: "GradientChart", + component: GradientChart, + }, + { + title: "ChartEventsSingleDataset", + component: ChartEventsSingleDataset, + }, +]; +``` + +Next, write a function called `ChartWrapper` to wrapper up react chartjs chart components with a `h1` as the title. + +```jsx +export const ChartWrapper: React.FC<{ + title: string, + children: React.ReactNode, +}> = ({ title, children }) => { + return ( +
+

{title}

+ {children} +
+ ); +}; +``` + +Then, write a functional component to take a arrays of components and render them use `ChartWrapper` component. + +```jsx +type Component = { + title: string, + component: React.FunctionComponent, +}; +type ChartProps = { + components: Component[], +}; + +const ComponentWrapper: React.FC = ({ components }) => { + return ( +
+ {components.map((component, index) => { + return ( + + + + ); + })} +
+ ); +}; +``` + +Finally, in `App` component, we iterate the components and render them in a div with `grid` class. + +```jsx +export default function App() { + return ( +
+ + ChartJS + NextJS + TailwindCSS + + + + +
+
+

+ ChartJS + NextJS + TailwindCSS +

+
+
+ {} +
+
+
+
+
+ ); +} +``` + +# Demo + +![chartjs-react-chartjs-2-nextjs-tailwind-typescript-using-grid](https://user-images.githubusercontent.com/74223747/224673738-9edb7c6d-5e49-44a2-9bb0-00fa9eb5f608.png) + +# Optimize + +Note, for `ComponentWrapper` function component, it's more verbose. We could write it in another way. + +Here is the old way. + +```jsx +// NOTE: OK but verbose +const ComponentWrapper: React.FC = ({ components }) => { + return ( +
+ {components.map((c, index) => { + return ( + + + + ); + })} +
+ ); +}; +``` + +The new way: + +```jsx +// NOTE: concise +const ComponentWrapperChatGPTAgain: React.FC = ({ components }) => { + return ( +
+ {components.map((c, index) => { + const { title, component: C } = c; + + return ( + + + + ); + })} +
+ ); +}; +``` + +To render list of charts components with titles and properties, we wrote two React component functions. While both functions produce the same output, their implementation approaches are different. + +The first function `ComponentWrapper` is a good method, but the downside is that it is quite verbose. It uses curly braces and a return statement to wrap the component inside ``, and it also references `c.title` and `c.component` multiple times within the function body. + +The second function `ComponentWrapperChatGPTAgain` is a more concise implementation. It uses ES6 destructuring syntax to get title and component from the components object, which avoids repeated use of `c.title` and `c.component`. Additionally, it uses a more descriptive variable name `C` for the component. + +Clearly, the second implementation is more succinct, easier to read and maintain, and also makes it easier to add new properties or methods. Therefore, we recommend using the second function `ComponentWrapperChatGPTAgain` as a more efficient programming practice. + +To summarize, the primary difference between these two implementation methods is that the first one is verbose, which makes it difficult to read and maintain, while the second one is more concise and easier to read and maintain. The second function also uses destructuring, which is a useful syntax for improving readability by eliminating repetition. By using a more concise and readable implementation, you can streamline your code and make it more efficient. + +## chatgpt says + +Comparing Two React Component Functions + +In this post, we will compare and contrast two React component functions that are used to render a list of chart components with titles and properties. While both functions ultimately produce the same result, their implementation approaches are different. + +Function 1: ComponentWrapper + +```jsx +const ComponentWrapper: React.FC = ({ components }) => { + return ( +
+ {components.map((c, index) => { + return ( + + + + ); + })} +
+ ); +}; +``` + +This function is a good approach, but it is quite verbose. It uses curly braces and a return statement to wrap the component inside ``, and it also references `c.title` and `c.component` multiple times within the function body. It might be a bit difficult to read and maintain for developers, especially with more complex components. + +Function 2: ComponentWrapperChatGPTAgain + +```jsx +const ComponentWrapperChatGPTAgain: React.FC = ({ components }) => { + return ( +
+ {components.map((c, index) => { + const { title, component: C } = c; + + return ( + + + + ); + })} +
+ ); +}; +``` + +This function, on the other hand, is a more concise implementation. It uses ES6 destructuring syntax to get title and component from the components object, which avoids repeated use of `c.title` and `c.component`. Additionally, it uses a more descriptive variable name `C` for the component. + +The second implementation is more succinct, easier to read and maintain, and also makes it easier to add new properties or methods in the future. Therefore, we recommend using the second function `ComponentWrapperChatGPTAgain` as a more efficient programming practice. + +Conclusion + +The primary difference between these two implementation methods is that the first one is verbose, which makes it difficult to read and maintain, while the second one is more concise and easier to read and maintain. The second function also uses destructuring, which is a useful syntax for improving readability by eliminating repetition. By using a concise, readable implementation, code can be streamlined and more efficient. diff --git a/src/rust/actix/prometheus-support-to-actix-web.md b/src/rust/actix/prometheus-support-to-actix-web.md new file mode 100644 index 0000000..e284235 --- /dev/null +++ b/src/rust/actix/prometheus-support-to-actix-web.md @@ -0,0 +1,163 @@ +# prometheus support for actix-web project + +- [Init an empty actix-web project with tokio runtime](#init-an-empty-actix-web-project-with-tokio-runtime) +- [Add prometheus support actix-web project](#add-prometheus-support-actix-web-project) +- [Enable process features](#enable-process-features) +- [How to use](#how-to-use) +- [Run actix-web server](#run-actix-web-server) +- [Request metrics endpoint](#request-metrics-endpoint) + +## Init an empty actix-web project with tokio runtime + +```bash +# init project +cargo init actix-web-t +# add dependencies +cargo add actix-web +cargo add tokio --features full +``` + +## Add prometheus support actix-web project + +```bash +cargo add actix_web_prometheus +``` + +`actix-web-prometheus` is a middleware inspired by and forked from actix-web-prom. By default three metrics are tracked (this assumes the namespace `actix_web_prometheus`): + +- `actix_web_prometheus_incoming_requests` (labels: endpoint, method, status): the total number of HTTP requests handled by the actix `HttpServer`. +- `actix_web_prometheus_response_code` (labels: endpoint, method, statuscode, type): Response codes of all HTTP requests handled by the actix `HttpServer`. +- `actix_web_prometheus_response_time` (labels: endpoint, method, status): Total the request duration of all HTTP requests handled by the actix `HttpServer`. + +## Enable process features + +You could also enable `process` features when adding `actix_web_prometheus` crate, which means process metrics will also be collected. + +```bash +cargo add actix_web_prometheus --features process +``` + +Output: + +```bash + Updating crates.io index +warning: translating `actix_web_prometheus` to `actix-web-prometheus` + Adding actix-web-prometheus v0.1.2 to dependencies. + Features: + + process +``` + +## How to use + +Here is an simple example of how to integrate this middleware into `actix-web` project. + +`main.rs` + +```rust +use actix_web::{http, web, App, HttpServer, Responder, Result, HttpResponse}; +use actix_web_prometheus::PrometheusMetricsBuilder; +use serde::{Deserialize, Serialize}; + + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + let prometheus = PrometheusMetricsBuilder::new("api") + .endpoint("/metrics") + .build() + .unwrap(); + + HttpServer::new(move || { + App::new() + .wrap(prometheus.clone()) + }) + .bind(("127.0.0.1", 8080))? + .run() + .await +} +``` + +## Run actix-web server + +```bash +cargo run +``` + +Output: + +```bash +warning: unused imports: `HttpResponse`, `Responder`, `Result`, `http`, `web` + --> src/main.rs:1:17 + | +1 | use actix_web::{http, web, App, HttpServer, Responder, Result, HttpResponse}; + | ^^^^ ^^^ ^^^^^^^^^ ^^^^^^ ^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +warning: unused imports: `Deserialize`, `Serialize` + --> src/main.rs:3:13 + | +3 | use serde::{Deserialize, Serialize}; + | ^^^^^^^^^^^ ^^^^^^^^^ + +warning: `actix-web-t` (bin "actix-web-t") generated 2 warnings + Finished dev [unoptimized + debuginfo] target(s) in 0.26s + Running `target/debug/actix-web-t` +``` + +## Request metrics endpoint + +Build and run actix-web project, we can send request to `/metrics` endpoint. + +```bash +curl 0:8080/metrics +``` + +Ouput: + +```bash +# HELP api_incoming_requests Incoming Requests +# TYPE api_incoming_requests counter +api_incoming_requests{endpoint="/metrics",method="GET",status="200"} 28 +# HELP api_response_code Response Codes +# TYPE api_response_code counter +api_response_code{endpoint="/metrics",method="GET",statuscode="200",type="200"} 28 +# HELP api_response_time Response Times +# TYPE api_response_time histogram +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.005"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.01"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.025"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.05"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.1"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.25"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="0.5"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="1"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="2.5"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="5"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="10"} 28 +api_response_time_bucket{endpoint="/metrics",method="GET",status="200",le="+Inf"} 28 +api_response_time_sum{endpoint="/metrics",method="GET",status="200"} 0.03155173499999999 +api_response_time_count{endpoint="/metrics",method="GET",status="200"} 28 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 66 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 50000 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 23 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 6410240 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1677656707 +# HELP process_threads Number of OS threads in the process. +# TYPE process_threads gauge +process_threads 3 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 165015552 +``` + +Notice, on MacOS, process metrics are not exported. diff --git a/src/rust/actix/send-http-request-in-handle-function-and-started-function-when-using-actix-crate.md b/src/rust/actix/send-http-request-in-handle-function-and-started-function-when-using-actix-crate.md new file mode 100644 index 0000000..2396bc5 --- /dev/null +++ b/src/rust/actix/send-http-request-in-handle-function-and-started-function-when-using-actix-crate.md @@ -0,0 +1,879 @@ +# Send Http Request in Handle function or in started function When Using Actix crate + +- [Send Http Request in Handle function](#send-http-request-in-handle-function) + - [Return type `Result`](#return-type-result) + - [Return type `Result`](#return-type-result) + - [Return type `Result`](#return-type-result) + - [Return type `Result`](#return-type-result) +- [Send Http Request in started function](#send-http-request-in-started-function) +- [Full source code](#full-source-code) + - [Send http request in `handle` function](#send-http-request-in-handle-function) + - [Send http request in `started` function](#send-http-request-in-started-function) +- [Appendix](#appendix) + - [Actor trait](#actor-trait) + +# Send Http Request in Handle function + +When using actors to develop concurrent applications, you may need to run asynchronous functions, such as sending HTTP requests, when an actor is started or when handling specific messages. + +We know there's a method called `started` when implementing `Actor` trait. The `Actor` trait is defined as follows: + +```rust +pub trait Actor: Sized + Unpin + 'static { + /// Actor execution context type + type Context: ActorContext; + + /// Called when an actor gets polled the first time. + fn started(&mut self, ctx: &mut Self::Context) {} + + /// Called after an actor is in `Actor::Stopping` state. + /// + /// There can be several reasons for stopping: + /// + /// - `Context::stop` gets called by the actor itself. + /// - All addresses to the current actor get dropped and no more + /// evented objects are left in the context. + /// + /// An actor can return from the stopping state to the running + /// state by returning `Running::Continue`. + fn stopping(&mut self, ctx: &mut Self::Context) -> Running { + Running::Stop + } + + /// Called after an actor is stopped. + /// + /// This method can be used to perform any needed cleanup work or + /// to spawn more actors. This is the final state, after this + /// method got called, the actor will be dropped. + fn stopped(&mut self, ctx: &mut Self::Context) {} + + /// Start a new asynchronous actor, returning its address. + fn start(self) -> Addr + where + Self: Actor>, + { + Context::new().run(self) + } + + /// Construct and start a new asynchronous actor, returning its + /// address. + /// + /// This is constructs a new actor using the `Default` trait, and + /// invokes its `start` method. + fn start_default() -> Addr + where + Self: Actor> + Default, + { + Self::default().start() + } + + /// Start new actor in arbiter's thread. + fn start_in_arbiter(wrk: &ArbiterHandle, f: F) -> Addr + where + Self: Actor>, + F: FnOnce(&mut Context) -> Self + Send + 'static, + { + let (tx, rx) = channel::channel(DEFAULT_CAPACITY); + + // create actor + wrk.spawn_fn(move || { + let mut ctx = Context::with_receiver(rx); + let act = f(&mut ctx); + let fut = ctx.into_future(act); + + actix_rt::spawn(fut); + }); + + Addr::new(tx) + } + + /// Start a new asynchronous actor given a `Context`. + /// + /// Use this method if you need the `Context` object during actor + /// initialization. + fn create(f: F) -> Addr + where + Self: Actor>, + F: FnOnce(&mut Context) -> Self, + { + let mut ctx = Context::new(); + let act = f(&mut ctx); + ctx.run(act) + } +} +``` + +The `started` function will be called when actor is started, but if we call async function in `started` function(e.g. sending http request), we'll get an error: + +```rust +error[E0728]: `await` is only allowed inside `async` functions and blocks + --> src/bin/call-async-in-non-async-function.rs:25:57 + | +22 | / fn handle(&mut self, _: Msg, _: &mut Context) -> Self::Result { +23 | | // async move { Ok(()) } +24 | | +25 | | let response = reqwest::get("https://hyper.rs").await.unwrap(); + | | ^^^^^ only allowed inside `async` functions and blocks +... | +35 | | // }) +36 | | } + | |_____- this is not `async` + +For more information about this error, try `rustc --explain E0728`. +warning: `actix_example` (bin "call-async-in-non-async-function") generated 6 warnings +error: could not compile `actix_example` (bin "call-async-in-non-async-function") due to previous error; 6 warnings emitted +``` + +In Rust, `await` can only be used _within_ an async function or an async block. You can refer to [Async book](https://rust-lang.github.io/async-book/03_async_await/01_chapter.html) for more details. + +The solution is easy, I'll explain it step by step. + +## Return type `Result<(), ()>` + +Let's start with calling async function or async block in `handle` method. + +We can specify the result to be a `ResponseFuture>` and wrapper async block with `Box::pin`. + +```rust +#[derive(Message)] +#[rtype(result = "Result<(), ()>")] +struct Msg; + +struct MyActor2; + +impl Actor for MyActor2 { + type Context = Context; +} + +impl Handler for MyActor2 { + type Result = ResponseFuture>; + + fn handle(&mut self, _: Msg, _: &mut Context) -> Self::Result { + Box::pin(async move { + // Some async computation + println!("Box::pin called"); + Ok(()) + }) + } +} +``` + +As we use `ResponseFuture>` type in `Handler` trait's associated type `Result`, we can return a Box Future using `Box::pin` function in `handle` method. + +## Return type `Result` + +Now, let's change return type from `Result<(), ()>` to `Result`, which will return a `usize` from async block. + +```rust +#[derive(Message)] +#[rtype(result = "Result")] +struct Msg3; + +struct MyActor3; + +impl Actor for MyActor3 { + type Context = Context; +} + +impl Handler for MyActor3 { + type Result = ResponseActFuture>; + + fn handle(&mut self, _: Msg3, _: &mut Context) -> Self::Result { + Box::pin( + async { + println!("will return 42"); + // Some async computation + 42 + } + .into_actor(self) // converts future to ActorFuture + .map(|res, _act, _ctx| { + println!("map"); + // Do some computation with actor's state or context + Ok(res) + }), + ) + } +} +``` + +We need to change in 3 places: + +- Using `#[rtype(result = "Result")]` macro in `struct Msg3` +- Change associated type from `ResponseActFuture>;` to `ResponseActFuture>;` +- Change async block to return a value of `usize` + +## Return type `Result` + +If we care about the status code from http response, what should we do? Obviousely, we can declare a `Result` type. Here `u16` represents the status code from http response. + +```rust +#[derive(Message)] +#[rtype(result = "Result")] +// return http status code +struct Msg4; + +struct MyActor4; + +impl Actor for MyActor4 { + type Context = Context; +} + +impl Handler for MyActor4 { + // type Result = ResponseActFuture>; + type Result = ResponseActFuture>; + + fn handle(&mut self, _: Msg4, _: &mut Context) -> Self::Result { + // let res = reqwest::get("https://hyper.rs").await?; + // println!("Status: {}", res.status()); + // let body = res.text().await?; + + Box::pin( + async { + println!("will return 42"); + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + println!("Got status from hyper.rs {}", response.status()); + response.status().as_u16() + }, + Err(err) => { + println!("get response error : {err}"); + 42 as u16 + }, + }; + status_code + } + .into_actor(self) // converts future to ActorFuture + .map(|res, _act, _ctx| { + println!("result in map process : {res}"); + // Do some computation with actor's state or context + Ok(res) + }), + ) + } +} +``` + +In async block, we return status code using `response.status().as_u16()`. + +## Return type `Result` + +What if we want to use the response body, what should we do? It's quite easy to change from `u16` to `String`. The code looks like this: + +```rust +#[derive(Message)] +#[rtype(result = "Result")] +// return http reponse body +struct Msg5; + +struct MyActor5; + +impl Actor for MyActor5 { + type Context = Context; +} + +impl Handler for MyActor5 { + // type Result = ResponseActFuture>; + type Result = ResponseActFuture>; + + fn handle(&mut self, _: Msg5, _: &mut Context) -> Self::Result { + // let res = reqwest::get("https://hyper.rs").await?; + // println!("Status: {}", res.status()); + // let body = res.text().await?; + + Box::pin( + async { + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + println!("Reponse Ok from hyper.rs {}", response.status()); + match response.text().await { + Ok(body) => body, + Err(err) => { + format!("Convert Reposne to string error : {err}") + } + } + }, + Err(err) => { + format!("Reposne error from hyper.rs, error : {err}") + }, + }; + status_code + } + .into_actor(self) // converts future to ActorFuture + .map(|res, _act, _ctx| { + println!("result in map process : {res}"); + // Do some computation with actor's state or context + Ok(res) + }), + ) + } +} +``` + +Now, we use `response.text().await` to convert reponse to string and return the response body for later use. + +# Send Http Request in started function + +If we want to store some state in actor and initialize it when actor is started, we can use `context.wait` to wait an async block, turn it into an actor through `into_actor` and store the return value of async block in `then` method. + +```rust +#[derive(Clone)] +struct MyActor { + status_code: Option, +} + +impl MyActor { + fn print_status_code(&mut self, context: &mut Context) { + println!("status code: {:?}", self.status_code); + } +} + +impl Actor for MyActor { + type Context = Context; + + fn started(&mut self, context: &mut Context) { + println!("In started"); + // ✅NOTE: This will run + context.wait( + async move { + // send http reqwest + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + println!( + "Got status from hyper.rs {}", + response.status() + ); + response.status().as_u16() + } + Err(err) => { + println!("get response error : {err}"); + 42 as u16 + } + }; + println!("status code: {status_code}"); + + status_code + } + .into_actor(self) + .then(|output, s, ctx| { + s.status_code = Some(output); + fut::ready(()) + }), + ); + + IntervalFunc::new(Duration::from_millis(5000), Self::print_status_code) + .finish() + .spawn(context); + + context.run_later(Duration::from_millis(20000), |_, _| { + System::current().stop() + }); + } +} +``` + +In this example, we store status code as `Option` in `MyActor` and save it then method from `ActorFutureExt` trait: + +```rust +fn started(&mut self, context: &mut Context) { + context.wait( + async move { + // send http reqwest + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + response.status().as_u16() + } + Err(err) => { + 42 as u16 + } + }; + status_code + } + .into_actor(self) + .then(|output, s, ctx| { + s.status_code = Some(output); + fut::ready(()) + }), + ); +} +``` + +Here is the definition of `ActorFutureExt` trait. + +```rust +pub trait ActorFutureExt: ActorFuture { + /// Map this future's result to a different type, returning a new future of + /// the resulting type. + fn map(self, f: F) -> Map + where + F: FnOnce(Self::Output, &mut A, &mut A::Context) -> U, + Self: Sized, + { + Map::new(self, f) + } + + /// Chain on a computation for when a future finished, passing the result of + /// the future to the provided closure `f`. + fn then(self, f: F) -> Then + where + F: FnOnce(Self::Output, &mut A, &mut A::Context) -> Fut, + Fut: ActorFuture, + Self: Sized, + { + then::new(self, f) + } + + /// Add timeout to futures chain. + /// + /// `Err(())` returned as a timeout error. + fn timeout(self, timeout: Duration) -> Timeout + where + Self: Sized, + { + Timeout::new(self, timeout) + } + + /// Wrap the future in a Box, pinning it. + /// + /// A shortcut for wrapping in [`Box::pin`]. + fn boxed_local(self) -> LocalBoxActorFuture + where + Self: Sized + 'static, + { + Box::pin(self) + } +} +``` + +# Full source code + +## Send http request in `handle` function + +```rust +use actix::prelude::*; +use anyhow::Result; +use futures::prelude::*; +use tokio::time::{sleep, Duration}; + +#[derive(Message)] +#[rtype(result = "Result<(), ()>")] +struct Msg; + +struct MyActor2; + +impl Actor for MyActor2 { + type Context = Context; +} + +impl Handler for MyActor2 { + type Result = ResponseFuture>; + + fn handle(&mut self, _: Msg, _: &mut Context) -> Self::Result { + Box::pin(async move { + // Some async computation + println!("Box::pin called"); + Ok(()) + }) + } +} + +#[derive(Message)] +#[rtype(result = "Result")] +struct Msg3; + +struct MyActor3; + +impl Actor for MyActor3 { + type Context = Context; +} + +impl Handler for MyActor3 { + type Result = ResponseActFuture>; + + fn handle(&mut self, _: Msg3, _: &mut Context) -> Self::Result { + Box::pin( + async { + println!("will return 42"); + // Some async computation + 42 + } + .into_actor(self) // converts future to ActorFuture + .map(|res, _act, _ctx| { + println!("map"); + // Do some computation with actor's state or context + Ok(res) + }), + ) + } +} + +#[derive(Message)] +#[rtype(result = "Result")] +// return http status code +struct Msg4; + +struct MyActor4; + +impl Actor for MyActor4 { + type Context = Context; +} + +impl Handler for MyActor4 { + // type Result = ResponseActFuture>; + type Result = ResponseActFuture>; + + fn handle(&mut self, _: Msg4, _: &mut Context) -> Self::Result { + // let res = reqwest::get("https://hyper.rs").await?; + // println!("Status: {}", res.status()); + // let body = res.text().await?; + + Box::pin( + async { + println!("will return 42"); + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + println!("Got status from hyper.rs {}", response.status()); + response.status().as_u16() + }, + Err(err) => { + println!("get response error : {err}"); + 42 as u16 + }, + }; + status_code + } + .into_actor(self) // converts future to ActorFuture + .map(|res, _act, _ctx| { + println!("result in map process : {res}"); + // Do some computation with actor's state or context + Ok(res) + }), + ) + } +} + +#[derive(Message)] +#[rtype(result = "Result")] +// return http reponse body +struct Msg5; + +struct MyActor5; + +impl Actor for MyActor5 { + type Context = Context; +} + +impl Handler for MyActor5 { + // type Result = ResponseActFuture>; + type Result = ResponseActFuture>; + + fn handle(&mut self, _: Msg5, _: &mut Context) -> Self::Result { + // let res = reqwest::get("https://hyper.rs").await?; + // println!("Status: {}", res.status()); + // let body = res.text().await?; + + Box::pin( + async { + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + println!("Reponse Ok from hyper.rs {}", response.status()); + match response.text().await { + Ok(body) => body, + Err(err) => { + format!("Convert Reposne to string error : {err}") + } + } + }, + Err(err) => { + format!("Reposne error from hyper.rs, error : {err}") + }, + }; + status_code + } + .into_actor(self) // converts future to ActorFuture + .map(|res, _act, _ctx| { + println!("result in map process : {res}"); + // Do some computation with actor's state or context + Ok(res) + }), + ) + } +} + +fn main() -> Result<()> { + let mut sys = actix::System::new(); + + sys.block_on(async { + // let _addr = MyActor {}.start(); + // let _addr = MyActor2 {}.start(); + // let addr = MyActor3 {}.start(); + // addr.do_send(Msg3 {}) + // OK + // let addr = MyActor4 {}.start(); + // addr.do_send(Msg4 {}) + // OK + let addr = MyActor5 {}.start(); + addr.do_send(Msg5 {}) + }); + sys.run()?; + + Ok(()) +} +``` + +## Send http request in `started` function + +```rust +use actix::prelude::*; +use actix::utils::IntervalFunc; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::oneshot::channel; +use tokio::sync::Mutex; + +#[derive(Clone)] +struct MyActor { + status_code: Option, +} + +impl MyActor { + fn tick(&mut self, context: &mut Context) { + println!("tick"); + } + + fn print_status_code(&mut self, context: &mut Context) { + println!("status code: {:?}", self.status_code); + } +} + +impl Actor for MyActor { + type Context = Context; + + fn started(&mut self, context: &mut Context) { + println!("In started"); + // ✅NOTE: This will run + context.wait( + async move { + // send http reqwest + let status_code = match reqwest::get("https://hyper.rs").await { + Ok(response) => { + println!( + "Got status from hyper.rs {}", + response.status() + ); + response.status().as_u16() + } + Err(err) => { + println!("get response error : {err}"); + 42 as u16 + } + }; + println!("status code: {status_code}"); + + status_code + } + .into_actor(self) + .then(|output, s, ctx| { + s.status_code = Some(output); + fut::ready(()) + }), + ); + + IntervalFunc::new(Duration::from_millis(5000), Self::print_status_code) + .finish() + .spawn(context); + + context.run_later(Duration::from_millis(20000), |_, _| { + System::current().stop() + }); + } +} + +fn main() { + let mut sys = System::new(); + let addr = sys.block_on(async { MyActor { status_code: None }.start() }); + sys.run(); +} +``` + +# Appendix + +## Actor trait + +````rust +/// Actors are objects which encapsulate state and behavior. +/// +/// Actors run within a specific execution context +/// [`Context`](struct.Context.html). The context object is available +/// only during execution. Each actor has a separate execution +/// context. The execution context also controls the lifecycle of an +/// actor. +/// +/// Actors communicate exclusively by exchanging messages. The sender +/// actor can wait for a response. Actors are not referenced directly, +/// but by address [`Addr`](struct.Addr.html) To be able to handle a +/// specific message actor has to provide a +/// [`Handler`](trait.Handler.html) implementation for this +/// message. All messages are statically typed. A message can be +/// handled in asynchronous fashion. An actor can spawn other actors +/// or add futures or streams to the execution context. The actor +/// trait provides several methods that allow controlling the actor +/// lifecycle. +/// +/// # Actor lifecycle +/// +/// ## Started +/// +/// An actor starts in the `Started` state, during this state the +/// `started` method gets called. +/// +/// ## Running +/// +/// After an actor's `started` method got called, the actor +/// transitions to the `Running` state. An actor can stay in the +/// `running` state for an indefinite amount of time. +/// +/// ## Stopping +/// +/// The actor's execution state changes to `stopping` in the following +/// situations: +/// +/// * `Context::stop` gets called by actor itself +/// * all addresses to the actor get dropped +/// * no evented objects are registered in its context. +/// +/// An actor can return from the `stopping` state to the `running` +/// state by creating a new address or adding an evented object, like +/// a future or stream, in its `Actor::stopping` method. +/// +/// If an actor changed to a `stopping` state because +/// `Context::stop()` got called, the context then immediately stops +/// processing incoming messages and calls the `Actor::stopping()` +/// method. If an actor does not return back to a `running` state, +/// all unprocessed messages get dropped. +/// +/// ## Stopped +/// +/// If an actor does not modify execution context while in stopping +/// state, the actor state changes to `Stopped`. This state is +/// considered final and at this point the actor gets dropped. +#[allow(unused_variables)] +pub trait Actor: Sized + Unpin + 'static { + /// Actor execution context type + type Context: ActorContext; + + /// Called when an actor gets polled the first time. + fn started(&mut self, ctx: &mut Self::Context) {} + + /// Called after an actor is in `Actor::Stopping` state. + /// + /// There can be several reasons for stopping: + /// + /// - `Context::stop` gets called by the actor itself. + /// - All addresses to the current actor get dropped and no more + /// evented objects are left in the context. + /// + /// An actor can return from the stopping state to the running + /// state by returning `Running::Continue`. + fn stopping(&mut self, ctx: &mut Self::Context) -> Running { + Running::Stop + } + + /// Called after an actor is stopped. + /// + /// This method can be used to perform any needed cleanup work or + /// to spawn more actors. This is the final state, after this + /// method got called, the actor will be dropped. + fn stopped(&mut self, ctx: &mut Self::Context) {} + + /// Start a new asynchronous actor, returning its address. + /// + /// # Examples + /// + /// ``` + /// use actix::prelude::*; + /// + /// struct MyActor; + /// impl Actor for MyActor { + /// type Context = Context; + /// } + /// + /// #[actix::main] + /// async fn main() { + /// // start actor and get its address + /// let addr = MyActor.start(); + /// # System::current().stop(); + /// } + /// ``` + fn start(self) -> Addr + where + Self: Actor>, + { + Context::new().run(self) + } + + /// Construct and start a new asynchronous actor, returning its + /// address. + /// + /// This is constructs a new actor using the `Default` trait, and + /// invokes its `start` method. + fn start_default() -> Addr + where + Self: Actor> + Default, + { + Self::default().start() + } + + /// Start new actor in arbiter's thread. + fn start_in_arbiter(wrk: &ArbiterHandle, f: F) -> Addr + where + Self: Actor>, + F: FnOnce(&mut Context) -> Self + Send + 'static, + { + let (tx, rx) = channel::channel(DEFAULT_CAPACITY); + + // create actor + wrk.spawn_fn(move || { + let mut ctx = Context::with_receiver(rx); + let act = f(&mut ctx); + let fut = ctx.into_future(act); + + actix_rt::spawn(fut); + }); + + Addr::new(tx) + } + + /// Start a new asynchronous actor given a `Context`. + /// + /// Use this method if you need the `Context` object during actor + /// initialization. + /// + /// # Examples + /// + /// ``` + /// use actix::prelude::*; + /// + /// struct MyActor { + /// val: usize, + /// } + /// impl Actor for MyActor { + /// type Context = Context; + /// } + /// + /// #[actix::main] + /// async fn main() { + /// let addr = MyActor::create(|ctx: &mut Context| MyActor { val: 10 }); + /// # System::current().stop(); + /// } + /// ``` + fn create(f: F) -> Addr + where + Self: Actor>, + F: FnOnce(&mut Context) -> Self, + { + let mut ctx = Context::new(); + let act = f(&mut ctx); + ctx.run(act) + } +} +```` diff --git a/src/rust/diesel/upgrade-diesel-to-2.0.md b/src/rust/diesel/upgrade-diesel-to-2.0.md new file mode 100644 index 0000000..276cf28 --- /dev/null +++ b/src/rust/diesel/upgrade-diesel-to-2.0.md @@ -0,0 +1,346 @@ +# Upgrade diesel to 2.0 + +- [An introduction to diesel 2.0](#an-introduction-to-diesel-20) +- [Upgrade to diesel 2.0](#upgrade-to-diesel-20) + - [Upgrade diesel version in Cargo.toml](#upgrade-diesel-version-in-cargotoml) + - [Add mut to PgConnection and dao functions](#add-mut-to-pgconnection-and-dao-functions) + - [Derive attributes](#derive-attributes) +- [Refs](#refs) + +# An introduction to diesel 2.0 + +Diesel 2.0 has breaking changes compared to 1.4.x. + +Any code base migrating from Diesel 1.4.x to Diesel 2.0 is expected to be affected at least by the following changes: + +- [Diesel now requires a mutable reference to the connection](https://diesel.rs/guides/migration_guide.html#2-0-0-mutable-connection) +- [Changed derive attributes](https://diesel.rs/guides/migration_guide.html#2-0-0-derive-attributes) + +# Upgrade to diesel 2.0 + +## Upgrade diesel version in Cargo.toml + +In order to upgrade diesel to 2.0, we need to change dependencies in `Cargo.toml`. + +```diff +diff --git a/rust/projects/diesel_2.0_example/Cargo.toml b/rust/projects/diesel_2.0_example/Cargo.toml +index 8f5a1aa..5164334 100644 +--- a/rust/projects/diesel_2.0_example/Cargo.toml ++++ b/rust/projects/diesel_2.0_example/Cargo.toml +@@ -5,12 +5,13 @@ authors = ["lichuan "] + edition = "2018" + + [dependencies] +-diesel = { version = "1.4", features = [ ++diesel = { version = "2.1.2", features = [ + "postgres", + "serde_json", + "chrono", + "numeric", + "64-column-tables", ++ "r2d2", + ] } + # r2d2 = "0.8" + # r2d2-diesel = "1.0" + dotenv = "0.14" + # actix-web = "1.0.3" + chrono = { version = "0.4.7", features = ["serde"] } +``` + +We change diesel version to `2.1.2` and remove `r2d2` and `r2d2-diesel` dependencies, as they are included in `diesel` crate, by specifying the `r2d2` feature in `diesel` crate `diesel = { version = "2.1.2", features = [ "r2d2" ] }`. + +## Add mut to PgConnection and dao functions + +Diesel now requires mutable access to the Connection to perform any database interaction. The following changes are required for all usages of any Connection type: + +```diff +- let connection = PgConnection::establish_connection("…")?; +- let result = some_query.load(&connection)?; ++ let mut connection = PgConnection::establish_connection("…")?; ++ let result = some_query.load(&mut connection)?; +``` + +Here are the changes for our own code: + +```diff +diff --git a/rust/projects/diesel_2.0_example/src/bin/contacts.rs b/rust/projects/diesel_2.0_example/src/bin/contacts.rs +index a74e29397..6efc8ef4f 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/contacts.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/contacts.rs +@@ -16,11 +16,15 @@ fn test_contacts() { + env::var("DATABASE_URL").unwrap_or(LOCAL_DATABASE_URL.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); ++ let mut conn = pool.get().unwrap(); + +- let conn: &PgConnection = &connection; +- conn.execute("TRUNCATE TABLE contacts").unwrap(); +- conn.execute("alter sequence contacts_id_seq restart;").unwrap(); ++ diesel::sql_query("TRUNCATE TABLE contacts").execute(&mut conn).unwrap(); ++ diesel::sql_query("alter sequence contacts_id_seq restart;") ++ .execute(&mut conn) ++ .unwrap(); ++ ++ // conn.execute("TRUNCATE TABLE contacts").unwrap(); ++ // conn.execute("alter sequence contacts_id_seq restart;").unwrap(); + + let santas_address: serde_json::Value = serde_json::from_str( + r#"{ +@@ -61,7 +65,7 @@ fn test_contacts() { + }, + ]; + +- let contacts = create_contracts(&conn, &new_contacts).unwrap(); ++ let contacts = create_contracts(&mut conn, &new_contacts).unwrap(); + println!("{:?}", contacts); + + // let inserted_address = insert_into(contacts) +@@ -75,9 +79,7 @@ fn get_contacts() { + let database_url = + env::var("DATABASE_URL").unwrap_or(LOCAL_DATABASE_URL.into()); + let pool = db::init_pool(database_url); +- let connection = pool.get().unwrap(); +- +- let conn: &PgConnection = &connection; ++ let mut conn = pool.get().unwrap(); + + let santas_address: serde_json::Value = serde_json::from_str( + r#"{ +@@ -86,12 +88,13 @@ fn get_contacts() { + ) + .unwrap(); + +- let contacts = get_contacts_by_address(&conn, &santas_address).unwrap(); ++ let contacts = get_contacts_by_address(&mut conn, &santas_address).unwrap(); + println!("{:?}", contacts); + + let santas_address2: serde_json::Value = json!(true); + +- let contacts = get_contacts_by_address(&conn, &santas_address2).unwrap(); ++ let contacts = ++ get_contacts_by_address(&mut conn, &santas_address2).unwrap(); + println!("{:?}", contacts); + } + +diff --git a/rust/projects/diesel_2.0_example/src/bin/select-limit-offset.rs b/rust/projects/diesel_2.0_example/src/bin/select-limit-offset.rs +index a9c583039..a69489b7a 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/select-limit-offset.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/select-limit-offset.rs +@@ -10,13 +10,11 @@ fn select_limit_offset() { + env::var("DATABASE_URL").unwrap_or(local_database_url.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); +- +- let _conn: &PgConnection = &connection; ++ let mut conn = pool.get().unwrap(); + + let limit = 2; + let offset = 2; +- let all = get_select_limit_offset(&connection, limit, offset).unwrap(); ++ let all = get_select_limit_offset(&mut conn, limit, offset).unwrap(); + println!("select : {:?} records", all); + } + +@@ -26,17 +24,15 @@ fn select_limit_offset_loop() { + env::var("DATABASE_URL").unwrap_or(local_database_url.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); +- +- let _conn: &PgConnection = &connection; ++ let mut conn = pool.get().unwrap(); + + let limit = 2; + let mut offset = 0; +- let all = get_select_limit_offset(&connection, limit, offset).unwrap(); ++ let all = get_select_limit_offset(&mut conn, limit, offset).unwrap(); + println!("select : {:?} records", all); + + loop { +- if let Ok(res) = get_select_limit_offset(&connection, limit, offset) { ++ if let Ok(res) = get_select_limit_offset(&mut conn, limit, offset) { + if res.len() == 0 { + break; + } +diff --git a/rust/projects/diesel_2.0_example/src/bin/test-connect-r2d2-pool-actix.rs b/rust/projects/diesel_2.0_example/src/bin/test-connect-r2d2-pool-actix.rs +index 1010a2380..44bce1eb4 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/test-connect-r2d2-pool-actix.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/test-connect-r2d2-pool-actix.rs +@@ -2,15 +2,22 @@ extern crate diesel; + + use std::env; + +-use actix_web::{web, App, HttpRequest, HttpServer, Responder}; ++use actix_web::{web, App, HttpServer, Responder}; + use diesel_example::db; + +-fn greet(req: HttpRequest) -> impl Responder { +- let name = req.match_info().get("name").unwrap_or("World"); +- format!("Hello {}!", &name) ++// TODO: why compile error for actix-web 4? ++// fn greet(req: HttpRequest) -> impl Responder { ++// let name = req.match_info().get("name").unwrap_or("World"); ++// format!("Hello {}!", &name) ++// } ++ ++// #[get("/hello/{name}")] ++async fn greet(name: web::Path) -> impl Responder { ++ format!("Hello {}!", name) + } + +-fn main() { ++#[actix_web::main] ++async fn main() -> std::io::Result<()> { + let database_url = env::var("DATABASE_URL").expect("set DATABASE_URL"); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +@@ -26,5 +33,5 @@ fn main() { + .bind("127.0.0.1:8000") + .expect("Can not bind to port 8000") + .run() +- .unwrap(); ++ .await + } +diff --git a/rust/projects/diesel_2.0_example/src/bin/test-partial-inserts.rs b/rust/projects/diesel_2.0_example/src/bin/test-partial-inserts.rs +index 1819a9677..0fa4e2650 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/test-partial-inserts.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/test-partial-inserts.rs +@@ -1,4 +1,3 @@ +- + use diesel_example::dao::partial_inserts::create_partial_inserts; + use diesel_example::db; + use diesel_example::model::partial_inserts::NewPartialInsert; +@@ -10,7 +9,7 @@ fn main() { + env::var("DATABASE_URL").unwrap_or(local_database_url.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); ++ let mut conn = pool.get().unwrap(); + + let v = vec![ + NewPartialInsert { user_id: 5, name: Some("3".to_string()) }, +@@ -20,7 +19,7 @@ fn main() { + // 如果每次插入多个,其中一个报错(比如以上 user_id = 2),整体插入失败(5 不被插入) + // Err(DatabaseError(UniqueViolation, "duplicate key value violates unique constraint \"ui_partial_inserts_user_id\"")) + +- let r = create_partial_inserts(&connection, &v); ++ let r = create_partial_inserts(&mut conn, &v); + + println!("{:?}", r); + } +diff --git a/rust/projects/diesel_2.0_example/src/bin/timestamp-with-zone.rs b/rust/projects/diesel_2.0_example/src/bin/timestamp-with-zone.rs +index e5a793222..1d3b3e72d 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/timestamp-with-zone.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/timestamp-with-zone.rs +@@ -1,7 +1,5 @@ +-use chrono::{DateTime}; +-use diesel_example::dao::timestamp_with_zone::{ +- create_timestamp_with_zones, +-}; ++use chrono::DateTime; ++use diesel_example::dao::timestamp_with_zone::create_timestamp_with_zones; + use diesel_example::db; + use diesel_example::model::timestamp_with_zone::NewTimestampWithZone; + use std::env; +@@ -12,7 +10,7 @@ fn main() { + env::var("DATABASE_URL").unwrap_or(local_database_url.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); ++ let mut conn = pool.get().unwrap(); + + /* + let mut user_id = 123; +@@ -45,7 +43,7 @@ fn main() { + NewTimestampWithZone { user_id: 109, created_at: dt.into() }, + ]; + +- let inserted = create_timestamp_with_zones(&connection, &zones); ++ let inserted = create_timestamp_with_zones(&mut conn, &zones); + match inserted { + Ok(ref v) => { + println!( +diff --git a/rust/projects/diesel_2.0_example/src/bin/updated-at.rs b/rust/projects/diesel_2.0_example/src/bin/updated-at.rs +index c536b6a9c..27c5a37da 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/updated-at.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/updated-at.rs +@@ -10,7 +10,7 @@ fn main() { + env::var("DATABASE_URL").unwrap_or(local_database_url.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); ++ let mut conn = pool.get().unwrap(); + + let user_id = 123; + let n = NewWeiboFeedCrawlStatus { +@@ -20,11 +20,8 @@ fn main() { + created_at: Local::now().naive_local(), + updated_at: None, + }; +- create_weibo_feed_crawl_status(&connection, &n); ++ create_weibo_feed_crawl_status(&mut conn, &n); + update_total_page_count_and_next_page_index_by_user_id( +- user_id, +- &connection, +- 1, +- 1, ++ user_id, &mut conn, 1, 1, + ); + } +diff --git a/rust/projects/diesel_2.0_example/src/bin/upsert-if-condition.rs b/rust/projects/diesel_2.0_example/src/bin/upsert-if-condition.rs +index ca7f19d77..807f74272 100644 +--- a/rust/projects/diesel_2.0_example/src/bin/upsert-if-condition.rs ++++ b/rust/projects/diesel_2.0_example/src/bin/upsert-if-condition.rs +@@ -10,11 +10,15 @@ fn upsert_on_conflict_id() { + env::var("DATABASE_URL").unwrap_or(local_database_url.into()); + let pool = db::init_pool(database_url); + // https://github.com/sfackler/r2d2/issues/37 +- let connection = pool.get().unwrap(); ++ let mut conn = pool.get().unwrap(); + +- let conn: &PgConnection = &connection; +- conn.execute("TRUNCATE TABLE upserts_if_condition").unwrap(); +- conn.execute("alter sequence upserts_if_condition_id_seq restart;") ++ // let conn: &mut PgConnection = &mut connection; ++ // conn.execute("TRUNCATE TABLE upserts_if_condition").unwrap(); ++ // conn.execute("alter sequence upserts_if_condition_id_seq restart;") ++ // .unwrap(); ++ ++ diesel::sql_query("TRUNCATE TABLE upserts_if_condition") ++ .execute(&mut conn) + .unwrap(); + + let u = NewUpsertIfCondition { +@@ -27,7 +31,7 @@ fn upsert_on_conflict_id() { + next: Some(9), + email: Some("e".into()), + }; +- let inserted = create_upserts_if_condition_by_sql(&connection, &u).unwrap(); ++ let inserted = create_upserts_if_condition_by_sql(&mut conn, &u).unwrap(); + println!("{} records inserted", inserted.len()); + } +``` + +## Derive attributes + +You should add `#[diesel()]` when using derive attributes. + +Below is an example of using `table_name` macro to define `NewTimestampWithZone` struct. + +```rust +#[derive(Insertable, Debug)] +// NOTE: For diesel 1.4.x, we use `table_name` macro directly. +// #[table_name = "timestamp_with_zone"] +#[table_name = "timestamp_with_zone"] +pub struct NewTimestampWithZone { + pub user_id: i64, + pub created_at: DateTime, +} +``` + +# Refs + +Diesel 2.0 migration guide +https://diesel.rs/guides/migration_guide.html diff --git a/src/rust/diesel/use-jsonb-in-diesel.md b/src/rust/diesel/use-jsonb-in-diesel.md new file mode 100644 index 0000000..85035dc --- /dev/null +++ b/src/rust/diesel/use-jsonb-in-diesel.md @@ -0,0 +1,1851 @@ +# Diesel JSONB Example + +- [Into](#into) +- [Install dependencies](#install-dependencies) +- [Write sql for migration](#write-sql-for-migration) +- [run migration](#run-migration) +- [Create database models](#create-database-models) +- [Add more dependencies](#add-more-dependencies) +- [Modify main.rs](#modify-mainrs) +- [Build](#build) + - [Build and fail](#build-and-fail) + - [Fix: import `orders` from `schema` module](#fix-import-orders-from-schema-module) + - [Fix: change `total_amount` type from `f64` to `BigDecimal`](#fix-change-total_amount-type-from-f64-to-bigdecimal) + - [Fix: Change `metadata` type from `Value` to `Option`](#fix-change-metadata-type-from-value-to-option) +- [Create orders](#create-orders) +- [Query orders](#query-orders) +- [Update order](#update-order) +- [Delete order](#delete-order) +- [Summary](#summary) +- [Refs](#refs) + +# Into + +This is a simple example of using Diesel with JSONB in a Rust project. + +# Install dependencies + +First, let's create a new project and add dependencies: + +```bash +# Init project +cargo init order-diesel-jsonb-example + +# Add dependencies +cd order-diesel-jsonb-example +cargo add diesel -F postgres +cargo add dotenvy + +# Install diesel cli +cargo install diesel_cli + +# Tell diesel where to find the database +# echo DATABASE_URL=postgres://username:password@localhost/diesel_demo > .env +echo DATABASE_URL=postgres://localhost/diesel_demo > .env + +# Create postgres database +createdb diesel_demo +psql diesel_demo + +# setup diesel and run migrations +diesel setup +diesel migration generate create_orders + +# Output: +# Creating migrations/2024-12-16-120623_create_orders/up.sql +# Creating migrations/2024-12-16-120623_create_orders/down.sql +``` + +# Write sql for migration + +As diesel documents say: + +> Migrations allow us to evolve the database schema over time. Each migration consists of an up.sql file to apply the changes and a down.sql file to revert them. + +diesel will create `migrations` directory after running `diesel migration generate create_orders`. It will create `up.sql` and `down.sql` files. + +Let's write some sql in `migrations/2024-12-16-120623_create_orders/up.sql`: + +```sql +-- Your SQL goes here +CREATE TABLE orders ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + total_amount DECIMAL(10, 2) NOT NULL, + order_date TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + metadata JSONB +); + +CREATE INDEX idx_order_metadata ON orders USING gin (metadata); +``` + +And `migrations/2024-12-16-120623_create_orders/down.sql`: + +```sql +DROP TABLE orders; +``` + +# run migration + +Run `diesel migration run` will create table in postgres: + +``` +dylan@/tmp:diesel_demo> \d orders; ++--------------+-----------------------------+------------------------------------------------------+ +| Column | Type | Modifiers | +|--------------+-----------------------------+------------------------------------------------------| +| id | integer | not null default nextval('orders_id_seq'::regclass) | +| user_id | integer | not null | +| total_amount | numeric(10,2) | not null | +| order_date | timestamp without time zone | not null default CURRENT_TIMESTAMP | +| metadata | jsonb | | ++--------------+-----------------------------+------------------------------------------------------+ +Indexes: + "orders_pkey" PRIMARY KEY, btree (id) + "idx_order_metadata" gin (metadata) + +Time: 0.034s +``` + +Also, `diesel migration generate create_orders` will generate the following code in `src/schema.rs`: + +``` +// @generated automatically by Diesel CLI. + +diesel::table! { + orders (id) { + id -> Int4, + user_id -> Int4, + total_amount -> Numeric, + order_date -> Timestamp, + metadata -> Nullable, + } +} +``` + +Schema defines a Rust module representing the table structure. + +The key components for the schema are: + +- Table Structure: Each table is represented by a struct, typically referenced as `users::table`. +- Column Definitions: Each column is represented by a struct implementing the `Expression` trait to specify SQL types. +- DSL Module: Provides a convenient syntax for queries, making them less verbose than writing SQL directly. + +By leveraging Diesel's powerful features, you can easily interact with the database, perform complex queries, and manage your data efficiently. + +```rust +use diesel::prelude::*; +use dotenvy::dotenv; +use diesel::pg::PgConnection; +use diesel::r2d2::ConnectionManager; +use diesel::r2d2::Pool; + +use crate::schema::orders; + +pub type PgPool = Pool>; + +pub fn establish_connection() -> PgPool { + dotenv().ok(); + + let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set"); + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .build(manager) + .expect("Failed to create pool.") +} + +pub fn create_order(conn: &PgConnection, new_order: NewOrder) -> Result { + use crate::schema::orders; + + diesel::insert_into(orders::table) + .values(&new_order) + .get_result(conn) +} +``` + +# Create database models + +Now, let's create database models in `src/models.rs`. + +```rust +use diesel::prelude::*; +use serde::{Deserialize, Serialize}; +use chrono::NaiveDateTime; +use serde_json::Value; + +#[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] +#[diesel(table_name = orders)] +#[diesel(check_for_backend(diesel::pg::Pg))] +pub struct Order { + pub id: i32, + pub user_id: i32, + pub total_amount: f64, + pub order_date: NaiveDateTime, + pub metadata: Value, // This will use JSONB +} + +#[derive(Insertable, Deserialize)] +#[diesel(table_name = orders)] +pub struct NewOrder { + pub user_id: i32, + pub total_amount: f64, + pub metadata: Value, +} +``` + +Here are some notes about the code: + +- `#[derive(Queryable)]` will generate all of the code needed to load a `Order` struct from a SQL query. + +- `#[derive(Selectable)]` will generate code to construct a matching select clause based on your model type based on the table defined via `#[diesel(table_name = orders)]`. + +- `#[diesel(check_for_backend(diesel::pg::Pg))` (or `sqlite::SQLite` or `mysql::MySQL`) adds additional compile time checks to verify that all field types in your struct are compatible with their corresponding SQL side expressions. This part is optional, but it greatly improves the generated compiler error messages. + +If any types are not compatible with column type in database, Diesel will emit a compile-time error. + +# Add more dependencies + +```bash +cargo add anyhow serde_json +cargo add chrono -F serde +cargo add serde -F derive +``` + +Here is the output: + +```bash +> cargo add anyhow serde_json + Updating crates.io index + Adding anyhow v1.0.94 to dependencies + Features: + + std + - backtrace + Adding serde_json v1.0.133 to dependencies + Features: + + std + - alloc + - arbitrary_precision + - float_roundtrip + - indexmap + - preserve_order + - raw_value + - unbounded_depth + Updating crates.io index + Blocking waiting for file lock on package cache + Locking 6 packages to latest compatible versions + Adding anyhow v1.0.94 + Adding memchr v2.7.4 + Adding ryu v1.0.18 + Adding serde v1.0.216 + Adding serde_derive v1.0.216 + Adding serde_json v1.0.133 +> cargo add chrono -F serde + Updating crates.io index + Adding chrono v0.4.39 to dependencies + Features: + + alloc + + android-tzdata + + clock + + iana-time-zone + + js-sys + + now + + oldtime + + serde + + std + + wasm-bindgen + + wasmbind + + winapi + + windows-targets + - __internal_bench + - arbitrary + - libc + - pure-rust-locales + - rkyv + - rkyv-16 + - rkyv-32 + - rkyv-64 + - rkyv-validation + - unstable-locales + Updating crates.io index + Blocking waiting for file lock on package cache + Locking 31 packages to latest compatible versions + Adding android-tzdata v0.1.1 + Adding android_system_properties v0.1.5 + Adding autocfg v1.4.0 + Adding bumpalo v3.16.0 + Adding cc v1.2.4 + Adding cfg-if v1.0.0 + Adding chrono v0.4.39 + Adding core-foundation-sys v0.8.7 + Adding iana-time-zone v0.1.61 + Adding iana-time-zone-haiku v0.1.2 + Adding js-sys v0.3.76 + Adding libc v0.2.168 + Adding log v0.4.22 + Adding num-traits v0.2.19 + Adding once_cell v1.20.2 + Adding shlex v1.3.0 + Adding wasm-bindgen v0.2.99 + Adding wasm-bindgen-backend v0.2.99 + Adding wasm-bindgen-macro v0.2.99 + Adding wasm-bindgen-macro-support v0.2.99 + Adding wasm-bindgen-shared v0.2.99 + Adding windows-core v0.52.0 + Adding windows-targets v0.52.6 + Adding windows_aarch64_gnullvm v0.52.6 + Adding windows_aarch64_msvc v0.52.6 + Adding windows_i686_gnu v0.52.6 + Adding windows_i686_gnullvm v0.52.6 + Adding windows_i686_msvc v0.52.6 + Adding windows_x86_64_gnu v0.52.6 + Adding windows_x86_64_gnullvm v0.52.6 + Adding windows_x86_64_msvc v0.52.6 +> cargo add serde -F derive + Updating crates.io index + Adding serde v1.0.216 to dependencies + Features: + + derive + + serde_derive + + std + - alloc + - rc + - unstable + Blocking waiting for file lock on package cache + Blocking waiting for file lock on package cache +``` + +# Modify main.rs + +Now, let's modify `main.rs` to create a new order and print the result: + +```rust +mod models; +mod schema; +mod db; + +use diesel::pg::PgConnection; +use diesel::Connection; +use dotenvy::dotenv; +use std::env; + +fn establish_connection() -> PgConnection { + dotenv().ok(); + let database_url = env::var("DATABASE_URL") + .expect("DATABASE_URL must be set"); + PgConnection::establish(&database_url) + .expect(&format!("Error connecting to {}", database_url)) +} + +fn main() { + let conn = &mut establish_connection(); + + // Example usage + let new_order = models::NewOrder { + user_id: 1, + total_amount: 99.99, + metadata: serde_json::json!({ + "items": ["book", "pen"], + "shipping_method": "express", + "gift_wrap": true + }), + }; + + match db::create_order(conn, new_order) { + Ok(order) => println!("Created order: {:?}", order), + Err(e) => eprintln!("Error creating order: {}", e), + } +} +``` + +# Build + +## Build and fail + +Now, let's build the project and see the error: + +```bash +> cargo build + Compiling core-foundation-sys v0.8.7 + Compiling itoa v1.0.14 + Compiling memchr v2.7.4 + Compiling pq-sys v0.6.3 + Compiling num-traits v0.2.19 + Compiling byteorder v1.5.0 + Compiling bitflags v2.6.0 + Compiling serde v1.0.216 + Compiling ryu v1.0.18 + Compiling anyhow v1.0.94 + Compiling dotenvy v0.15.7 + Compiling iana-time-zone v0.1.61 + Compiling diesel v2.2.6 + Compiling serde_json v1.0.133 + Compiling chrono v0.4.39 + Compiling order-diesel-jsonb-example v0.1.0 ( +error[E0433]: failed to resolve: use of undeclared crate or module `orders` + --> src/models.rs:7:23 + | +7 | #[diesel(table_name = orders)] + | ^^^^^^ use of undeclared crate or module `orders` + | +help: a struct with a similar name exists + | +7 | #[diesel(table_name = Order)] + | ~~~~~ +help: consider importing this struct through its public re-export + | +1 + use crate::schema::orders::dsl::orders; + | + +error[E0433]: failed to resolve: use of undeclared crate or module `orders` + --> src/models.rs:18:23 + | +18 | #[diesel(table_name = orders)] + | ^^^^^^ use of undeclared crate or module `orders` + | +help: a struct with a similar name exists + | +18 | #[diesel(table_name = Order)] + | ~~~~~ +help: consider importing this struct through its public re-export + | +1 + use crate::schema::orders::dsl::orders; + | + +warning: unused import: `serde_json::json` + --> src/db.rs:3:5 + | +3 | use serde_json::json; + | ^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `&NewOrder: diesel::Insertable` is not satisfied + --> src/db.rs:10:17 + | +10 | .values(&new_order) + | ------ ^^^^^^^^^^ the trait `diesel::Insertable
` is not implemented for `&NewOrder` + | | + | required by a bound introduced by this call + | +note: required by a bound in `IncompleteInsertStatement::::values` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_builder/insert_statement/mod.rs:115:12 + | +113 | pub fn values(self, records: U) -> InsertStatement + | ------ required by a bound in this associated function +114 | where +115 | U: Insertable, + | ^^^^^^^^^^^^^ required by this bound in `IncompleteInsertStatement::::values` + +error[E0277]: the trait bound `(i32, i32, f64, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:11:21 + | +11 | .get_result(conn) + | ---------- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, f64, NaiveDateTime, Value)`, which is required by `InsertStatement: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:6:10 + | +6 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +9 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `InsertStatement` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `get_result` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1722:15 + | +1720 | fn get_result<'query, U>(self, conn: &mut Conn) -> QueryResult + | ---------- required by a bound in this associated function +1721 | where +1722 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::get_result` + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `(i32, i32, f64, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:17:16 + | +17 | .first(conn) + | ----- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, f64, NaiveDateTime, Value)`, which is required by `SelectStatement, query_builder::select_clause::DefaultSelectClause>, query_builder::distinct_clause::NoDistinctClause, query_builder::where_clause::WhereClause>>>, query_builder::order_clause::NoOrderClause, LimitOffsetClause>, NoOffsetClause>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:6:10 + | +6 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +9 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `SelectStatement, DefaultSelectClause>, NoDistinctClause, ..., ..., ...>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `first` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1779:22 + | +1776 | fn first<'query, U>(self, conn: &mut Conn) -> QueryResult + | ----- required by a bound in this associated function +... +1779 | Limit: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::first` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-0af85acd9e7b4e2f.long-type-11474480310476070344.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `(i32, i32, f64, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:24:15 + | +24 | .load(conn) + | ---- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, f64, NaiveDateTime, Value)`, which is required by `SelectStatement, query_builder::select_clause::DefaultSelectClause>, query_builder::distinct_clause::NoDistinctClause, query_builder::where_clause::WhereClause>>>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:6:10 + | +6 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +9 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `SelectStatement, DefaultSelectClause>, NoDistinctClause, WhereClause<...>>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `diesel::RunQueryDsl::load` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1542:15 + | +1540 | fn load<'query, U>(self, conn: &mut Conn) -> QueryResult> + | ---- required by a bound in this associated function +1541 | where +1542 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::load` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-0af85acd9e7b4e2f.long-type-14102635225686123243.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +Some errors have detailed explanations: E0277, E0433. +For more information about an error, try `rustc --explain E0277`. +warning: `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") generated 1 warning +error: could not compile `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") due to 6 previous errors; 1 warning emitted +``` + +## Fix: import `orders` from `schema` module + +Although there are a lot of errors, the rust compiler is able to find the error and give us some hints. + +```bash +error[E0433]: failed to resolve: use of undeclared crate or module `orders` + --> src/models.rs:7:23 + | +7 | #[diesel(table_name = orders)] + | ^^^^^^ use of undeclared crate or module `orders` + | +help: a struct with a similar name exists + | +7 | #[diesel(table_name = Order)] + | ~~~~~ +help: consider importing this struct through its public re-export + | +1 + use crate::schema::orders::dsl::orders; +``` + +Howerver, the hint is not correcty, We should import `orders` from `schema` module(`use crate::schema::orders`), not `use crate::schema::orders::dsl::orders`. + +Let's import `orders` from `schema` module: + +```rust +use crate::schema::orders; +``` + +Or follow the IDE: + +![Import orders](https://github.com/dylankyc/dylankyc.github.io/blob/main/.img/import-orders.png?raw=true) + +![Import orders](https://github.com/dylankyc/dylankyc.github.io/blob/main/.img/import-orders-from-schema.png?raw=true) + +Also, we should add `chrono` and `serde_json` features for `diesel` dependency: + +```bash +cargo add diesel -F chrono,serde_json + Updating crates.io index + Adding diesel v2.2.6 to dependencies + Features: + + 32-column-tables + + chrono + + postgres + + postgres_backend + + serde_json + + with-deprecated + - 128-column-tables + - 64-column-tables + - __with_asan_tests + - extras + - huge-tables + - i-implement-a-third-party-backend-and-opt-into-breaking-changes + - ipnet-address + - large-tables + - mysql + - mysql_backend + - mysqlclient-src + - network-address + - numeric + - pq-src + - quickcheck + - r2d2 + - returning_clauses_for_sqlite_3_35 + - sqlite + - time + - unstable + - uuid + - without-deprecated +``` + +Let's build it again. + +```bash +> cargo build + Compiling diesel v2.2.6 + Compiling order-diesel-jsonb-example v0.1.0 +warning: unused import: `serde_json::json` + --> src/db.rs:3:5 + | +3 | use serde_json::json; + | ^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `Value: FromSqlRow, Pg>` is not satisfied + --> src/models.rs:16:19 + | +16 | pub metadata: Value, // This will use JSONB + | ^^^^^ the trait `FromSql, Pg>` is not implemented for `Value`, which is required by `Value: FromSqlRow, Pg>` + | + = note: double check your type mappings via the documentation of `diesel::sql_types::Nullable` + = note: `diesel::sql_query` requires the loading target to column names for loading values. + You need to provide a type that explicitly derives `diesel::deserialize::QueryableByName` + = help: the following other types implement trait `FromSql`: + `Value` implements `FromSql` + `Value` implements `FromSql` + = note: required for `Value` to implement `diesel::Queryable, Pg>` + = note: required for `Value` to implement `FromSqlRow, Pg>` + = help: see issue #48214 + +error[E0277]: the trait bound `f64: FromSqlRow` is not satisfied + --> src/models.rs:14:23 + | +14 | pub total_amount: f64, + | ^^^ the trait `FromSql` is not implemented for `f64`, which is required by `f64: FromSqlRow` + | + = note: double check your type mappings via the documentation of `diesel::sql_types::Numeric` + = note: `diesel::sql_query` requires the loading target to column names for loading values. + You need to provide a type that explicitly derives `diesel::deserialize::QueryableByName` + = help: the trait `FromSql` is implemented for `f64` + = help: for that trait implementation, expected `Double`, found `diesel::sql_types::Numeric` + = note: required for `f64` to implement `diesel::Queryable` + = note: required for `f64` to implement `FromSqlRow` + = help: see issue #48214 + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:8:33 + | +8 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `f64` to implement `AsExpression` + = note: this error originates in the derive macro `Insertable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:8:33 + | +8 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `&'insert f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `&'insert f64` to implement `diesel::Expression` + = note: required for `&'insert f64` to implement `AsExpression` + = note: this error originates in the derive macro `Insertable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:19:10 + | +19 | #[derive(Insertable, Deserialize)] + | ^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `f64` to implement `AsExpression` + = note: this error originates in the derive macro `Insertable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:19:10 + | +19 | #[derive(Insertable, Deserialize)] + | ^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `&'insert f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `&'insert f64` to implement `diesel::Expression` + = note: required for `&'insert f64` to implement `AsExpression` + = note: this error originates in the derive macro `Insertable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:14:9 + | +14 | pub total_amount: f64, + | ^^^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `f64` to implement `AsExpression` + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:14:9 + | +14 | pub total_amount: f64, + | ^^^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `&'insert f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `&'insert f64` to implement `diesel::Expression` + = note: required for `&'insert f64` to implement `AsExpression` + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:23:9 + | +23 | pub total_amount: f64, + | ^^^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `f64` to implement `AsExpression` + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:23:9 + | +23 | pub total_amount: f64, + | ^^^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `&'insert f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `&'insert f64` to implement `diesel::Expression` + = note: required for `&'insert f64` to implement `AsExpression` + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/db.rs:10:10 + | +10 | .values(&new_order) + | ^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `&f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `&f64` to implement `diesel::Expression` + = note: required for `&f64` to implement `AsExpression` + +error[E0277]: the trait bound `f64: AppearsOnTable` is not satisfied + --> src/db.rs:11:21 + | +11 | .get_result(conn) + | ---------- ^^^^ the trait `AppearsOnTable` is not implemented for `f64`, which is required by `InsertStatement>>, DefaultableColumnInsertValue>, DefaultableColumnInsertValue, &Value>>>), table>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `AppearsOnTable`: + `&'a T` implements `AppearsOnTable` + `(T0, T1)` implements `AppearsOnTable` + `(T0, T1, T2)` implements `AppearsOnTable` + `(T0, T1, T2, T3)` implements `AppearsOnTable` + `(T0, T1, T2, T3, T4)` implements `AppearsOnTable` + `(T0, T1, T2, T3, T4, T5)` implements `AppearsOnTable` + `(T0, T1, T2, T3, T4, T5, T6)` implements `AppearsOnTable` + `(T0, T1, T2, T3, T4, T5, T6, T7)` implements `AppearsOnTable` + and 137 others + = note: required for `&f64` to implement `AppearsOnTable` + = note: required for `DefaultableColumnInsertValue>` to implement `InsertValues<_, table>` + = note: 1 redundant requirement hidden + = note: required for `(DefaultableColumnInsertValue>>, ..., ...)` to implement `InsertValues<_, table>` + = note: required for `ValuesClause<(DefaultableColumnInsertValue>>, ..., ...), ...>` to implement `QueryFragment<_>` + = note: 1 redundant requirement hidden + = note: required for `InsertStatement, ..., ...), ...>, ..., ...>` to implement `QueryFragment<_>` + = note: required for `InsertStatement>, ..., ...), ...>>` to implement `LoadQuery<'_, _, _>` +note: required by a bound in `get_result` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1722:15 + | +1720 | fn get_result<'query, U>(self, conn: &mut Conn) -> QueryResult + | ---------- required by a bound in this associated function +1721 | where +1722 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::get_result` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-12046924592425831098.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-16463581489430962252.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-8702231718444061151.txt' + = note: consider using `--verbose` to print the full type name to the console + +error[E0271]: type mismatch resolving `::InsertWithDefaultKeyword == NotSpecialized` + --> src/db.rs:11:21 + | +11 | .get_result(conn) + | ---------- ^^^^ expected `NotSpecialized`, found `IsoSqlDefaultKeyword` + | | + | required by a bound introduced by this call + | + = note: required for `DefaultableColumnInsertValue>` to implement `QueryFragment` + = note: required for `DefaultableColumnInsertValue>` to implement `InsertValues` + = note: 3 redundant requirements hidden + = note: required for `InsertStatement, ..., ...), ...>, ..., ...>` to implement `QueryFragment` + = note: required for `InsertStatement>, ..., ...), ...>>` to implement `LoadQuery<'_, diesel::PgConnection, _>` +note: required by a bound in `get_result` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1722:15 + | +1720 | fn get_result<'query, U>(self, conn: &mut Conn) -> QueryResult + | ---------- required by a bound in this associated function +1721 | where +1722 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::get_result` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-12046924592425831098.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-9860913280699406432.txt' + = note: consider using `--verbose` to print the full type name to the console + +error[E0277]: the trait bound `(i32, i32, f64, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:11:21 + | +11 | .get_result(conn) + | ---------- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, f64, NaiveDateTime, Value)`, which is required by `InsertStatement>>, DefaultableColumnInsertValue>, DefaultableColumnInsertValue, &Value>>>), table>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:8:10 + | +8 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +11 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `InsertStatement>, ..., ...), ...>>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `get_result` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1722:15 + | +1720 | fn get_result<'query, U>(self, conn: &mut Conn) -> QueryResult + | ---------- required by a bound in this associated function +1721 | where +1722 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::get_result` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-12046924592425831098.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `(i32, i32, f64, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:17:16 + | +17 | .first(conn) + | ----- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, f64, NaiveDateTime, Value)`, which is required by `SelectStatement, query_builder::select_clause::DefaultSelectClause>, query_builder::distinct_clause::NoDistinctClause, query_builder::where_clause::WhereClause>>>, query_builder::order_clause::NoOrderClause, LimitOffsetClause>, NoOffsetClause>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:8:10 + | +8 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +11 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `SelectStatement, DefaultSelectClause>, NoDistinctClause, ..., ..., ...>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `first` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1779:22 + | +1776 | fn first<'query, U>(self, conn: &mut Conn) -> QueryResult + | ----- required by a bound in this associated function +... +1779 | Limit: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::first` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-13104014775257459898.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `(i32, i32, f64, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:24:15 + | +24 | .load(conn) + | ---- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, f64, NaiveDateTime, Value)`, which is required by `SelectStatement, query_builder::select_clause::DefaultSelectClause>, query_builder::distinct_clause::NoDistinctClause, query_builder::where_clause::WhereClause>>>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:8:10 + | +8 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +11 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `SelectStatement, DefaultSelectClause>, NoDistinctClause, WhereClause<...>>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `diesel::RunQueryDsl::load` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1542:15 + | +1540 | fn load<'query, U>(self, conn: &mut Conn) -> QueryResult> + | ---- required by a bound in this associated function +1541 | where +1542 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::load` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-f4ea1891b9fe6645.long-type-451606845660155404.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +Some errors have detailed explanations: E0271, E0277. +For more information about an error, try `rustc --explain E0271`. +warning: `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") generated 1 warning +error: could not compile `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") due to 24 previous errors; 1 warning emitted +``` + +## Fix: change `total_amount` type from `f64` to `BigDecimal` + +That's a lot of errors, let's fix it step by step. + +First, let's see the error for `total_amount` field + +```bash +error[E0277]: the trait bound `f64: FromSqlRow` is not satisfied + --> src/models.rs:14:23 + | +14 | pub total_amount: f64, + | ^^^ the trait `FromSql` is not implemented for `f64`, which is required by `f64: FromSqlRow` + | + = note: double check your type mappings via the documentation of `diesel::sql_types::Numeric` + = note: `diesel::sql_query` requires the loading target to column names for loading values. + You need to provide a type that explicitly derives `diesel::deserialize::QueryableByName` + = help: the trait `FromSql` is implemented for `f64` + = help: for that trait implementation, expected `Double`, found `diesel::sql_types::Numeric` + = note: required for `f64` to implement `diesel::Queryable` + = note: required for `f64` to implement `FromSqlRow` + = help: see issue #48214 + +error[E0277]: the trait bound `f64: diesel::Expression` is not satisfied + --> src/models.rs:8:33 + | +8 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^^ the trait `diesel::Expression` is not implemented for `f64`, which is required by `f64: AsExpression` + | + = help: the following other types implement trait `diesel::Expression`: + &'a T + (T0, T1) + (T0, T1, T2) + (T0, T1, T2, T3) + (T0, T1, T2, T3, T4) + (T0, T1, T2, T3, T4, T5) + (T0, T1, T2, T3, T4, T5, T6) + (T0, T1, T2, T3, T4, T5, T6, T7) + and 137 others + = note: required for `f64` to implement `AsExpression` + = note: this error originates in the derive macro `Insertable` (in Nightly builds, run with -Z macro-backtrace for more info) +``` + +Notice, in migration file, `total_amount` column type is `Decimal`, and diesel will use `Numeric` type in it's SQL query. + +Here is the source code of `diesel::sql_types::Numeric` + +```rust +/// The arbitrary precision numeric SQL type. +/// +/// This type is only supported on PostgreSQL and MySQL. +/// On SQLite, [`Double`] should be used instead. +/// +/// ### [`ToSql`](crate::serialize::ToSql) impls +/// +/// - [`bigdecimal::BigDecimal`] with `feature = ["numeric"]` +/// +/// ### [`FromSql`](crate::deserialize::FromSql) impls +/// +/// - [`bigdecimal::BigDecimal`] with `feature = ["numeric"]` +/// +/// [`bigdecimal::BigDecimal`]: /bigdecimal/struct.BigDecimal.html +#[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] +#[diesel(postgres_type(oid = 1700, array_oid = 1231))] +#[diesel(mysql_type(name = "Numeric"))] +#[diesel(sqlite_type(name = "Double"))] +pub struct Numeric; +``` + +We should tell diesel to use `numeric` type. + +```sql +CREATE TABLE orders ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + total_amount DECIMAL(10, 2) NOT NULL, -- 👈👈👈👈👈👈 🙋 + order_date TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + metadata JSONB +); +``` + +Let's enable `numeric` feature to `diesel` dependency to correctly deserialize `total_amount` field. + +```bash +cargo add diesel -F numeric + Updating crates.io index + Adding diesel v2.2.6 to dependencies + Features: + + 32-column-tables + + chrono + + numeric + + postgres + + postgres_backend + + serde_json + + with-deprecated + - 128-column-tables + - 64-column-tables + - __with_asan_tests + - extras + - huge-tables + - i-implement-a-third-party-backend-and-opt-into-breaking-changes + - ipnet-address + - large-tables + - mysql + - mysql_backend + - mysqlclient-src + - network-address + - pq-src + - quickcheck + - r2d2 + - returning_clauses_for_sqlite_3_35 + - sqlite + - time + - unstable + - uuid + - without-deprecated + Locking 4 packages to latest compatible versions + Adding bigdecimal v0.4.7 + Adding libm v0.2.11 + Adding num-bigint v0.4.6 + Adding num-integer v0.1.46 +``` + +Now, let's build it again. + +Still lots of errors: + +```bash + Compiling order-diesel-jsonb-example v0.1.0 +warning: unused import: `serde_json::json` + --> src/db.rs:3:5 + | +3 | use serde_json::json; + | ^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `Value: FromSqlRow, Pg>` is not satisfied + --> src/models.rs:18:19 + | +18 | pub metadata: Value, // This will use JSONB + | ^^^^^ the trait `FromSql, Pg>` is not implemented for `Value`, which is required by `Value: FromSqlRow, Pg>` + | + = note: double check your type mappings via the documentation of `diesel::sql_types::Nullable` + = note: `diesel::sql_query` requires the loading target to column names for loading values. + You need to provide a type that explicitly derives `diesel::deserialize::QueryableByName` + = help: the following other types implement trait `FromSql`: + `Value` implements `FromSql` + `Value` implements `FromSql` + = note: required for `Value` to implement `diesel::Queryable, Pg>` + = note: required for `Value` to implement `FromSqlRow, Pg>` + = help: see issue #48214 + +error[E0277]: the trait bound `(i32, i32, BigDecimal, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:11:21 + | +11 | .get_result(conn) + | ---------- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, BigDecimal, NaiveDateTime, Value)`, which is required by `InsertStatement>>, DefaultableColumnInsertValue>>, DefaultableColumnInsertValue, &Value>>>), table>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:9:10 + | +9 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +12 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, Numeric, Timestamp, Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `InsertStatement>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `get_result` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1722:15 + | +1720 | fn get_result<'query, U>(self, conn: &mut Conn) -> QueryResult + | ---------- required by a bound in this associated function +1721 | where +1722 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::get_result` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-cdb3b656aa15b263.long-type-9790004947608049719.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-cdb3b656aa15b263.long-type-2452195376562117312.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `(i32, i32, BigDecimal, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:17:16 + | +17 | .first(conn) + | ----- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, BigDecimal, NaiveDateTime, Value)`, which is required by `SelectStatement, query_builder::select_clause::DefaultSelectClause>, query_builder::distinct_clause::NoDistinctClause, query_builder::where_clause::WhereClause>>>, query_builder::order_clause::NoOrderClause, LimitOffsetClause>, NoOffsetClause>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:9:10 + | +9 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +12 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, Numeric, Timestamp, Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `SelectStatement, DefaultSelectClause<...>, ..., ..., ..., ...>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `first` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1779:22 + | +1776 | fn first<'query, U>(self, conn: &mut Conn) -> QueryResult + | ----- required by a bound in this associated function +... +1779 | Limit: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::first` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-cdb3b656aa15b263.long-type-12865645728958808655.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-cdb3b656aa15b263.long-type-2452195376562117312.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `(i32, i32, BigDecimal, NaiveDateTime, Value): FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not satisfied + --> src/db.rs:24:15 + | +24 | .load(conn) + | ---- ^^^^ the trait `FromStaticSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` is not implemented for `(i32, i32, BigDecimal, NaiveDateTime, Value)`, which is required by `SelectStatement, query_builder::select_clause::DefaultSelectClause>, query_builder::distinct_clause::NoDistinctClause, query_builder::where_clause::WhereClause>>>>: LoadQuery<'_, _, _>` + | | + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromStaticSqlRow`: + `(T0,)` implements `FromStaticSqlRow<(ST0,), __DB>` + `(T1, T0)` implements `FromStaticSqlRow<(ST1, ST0), __DB>` + `(T1, T2, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST0), __DB>` + `(T1, T2, T3, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST0), __DB>` + `(T1, T2, T3, T4, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST0), __DB>` + `(T1, T2, T3, T4, T5, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST0), __DB>` + `(T1, T2, T3, T4, T5, T6, T7, T0)` implements `FromStaticSqlRow<(ST1, ST2, ST3, ST4, ST5, ST6, ST7, ST0), __DB>` + and 24 others +note: required for `models::Order` to implement `diesel::Queryable<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + --> src/models.rs:9:10 + | +9 | #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +... +12 | pub struct Order { + | ^^^^^ + = note: required for `models::Order` to implement `FromSqlRow<(Integer, Integer, diesel::sql_types::Numeric, diesel::sql_types::Timestamp, diesel::sql_types::Nullable), Pg>` + = note: required for `(Integer, Integer, Numeric, Timestamp, Nullable)` to implement `load_dsl::private::CompatibleType` + = note: required for `SelectStatement, DefaultSelectClause>, ..., ...>` to implement `LoadQuery<'_, diesel::PgConnection, models::Order>` +note: required by a bound in `diesel::RunQueryDsl::load` + --> /Users/dylan/.cargo/registry/src/index.crates.io-6f17d22bba15001f/diesel-2.2.6/src/query_dsl/mod.rs:1542:15 + | +1540 | fn load<'query, U>(self, conn: &mut Conn) -> QueryResult> + | ---- required by a bound in this associated function +1541 | where +1542 | Self: LoadQuery<'query, Conn, U>, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `RunQueryDsl::load` + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-cdb3b656aa15b263.long-type-17907481964813576637.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: the full name for the type has been written to '/Users/dylan/projects/order-diesel-jsonb-example/target/debug/deps/order_diesel_jsonb_example-cdb3b656aa15b263.long-type-2452195376562117312.txt' + = note: consider using `--verbose` to print the full type name to the console + = note: this error originates in the derive macro `Queryable` (in Nightly builds, run with -Z macro-backtrace for more info) + +For more information about this error, try `rustc --explain E0277`. +warning: `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") generated 1 warning +error: could not compile `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") due to 4 previous errors; 1 warning emitted +``` + +## Fix: Change `metadata` type from `Value` to `Option` + +Next, let's see the error for `metadata` field. + +```bash +> cargo build + Compiling order-diesel-jsonb-example v0.1.0 +warning: unused import: `serde_json::json` + --> src/db.rs:3:5 + | +3 | use serde_json::json; + | ^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `Value: FromSqlRow, Pg>` is not satisfied + --> src/models.rs:18:19 + | +18 | pub metadata: Value, // This will use JSONB + | ^^^^^ the trait `FromSql, Pg>` is not implemented for `Value`, which is required by `Value: FromSqlRow, Pg>` + | + = note: double check your type mappings via the documentation of `diesel::sql_types::Nullable` + = note: `diesel::sql_query` requires the loading target to column names for loading values. + You need to provide a type that explicitly derives `diesel::deserialize::QueryableByName` + = help: the following other types implement trait `FromSql`: + `Value` implements `FromSql` + `Value` implements `FromSql` + = note: required for `Value` to implement `diesel::Queryable, Pg>` + = note: required for `Value` to implement `FromSqlRow, Pg>` + = help: see issue #48214 +``` + +The reason for that error is that we declare `metadata` as `NOT NULL` in migration sql file, which is not aligned with `metadata` type in model defination. The fix is easy by changing datetype for `metadata` from `Value` to `Option`. + +```rust +#[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Debug)] +#[diesel(table_name = orders)] +#[diesel(check_for_backend(diesel::pg::Pg))] +pub struct Order { + pub id: i32, + pub user_id: i32, + // pub total_amount: f64, + pub total_amount: BigDecimal, + pub order_date: NaiveDateTime, + // pub metadata: Value, // This will use JSONB ❌ + pub metadata: Option, // This will use JSONB ✅ +} +``` + +`metadata` column type: + +```sql +CREATE TABLE orders ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + total_amount DECIMAL(10, 2) NOT NULL, + order_date TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + metadata JSONB -- 👈👈👈👈👈👈 🙋 +); +``` + +Now, let's build it. + +```bash + Compiling order-diesel-jsonb-example v0.1.0 +warning: unused import: `serde_json::json` + --> src/db.rs:3:5 + | +3 | use serde_json::json; + | ^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +warning: function `get_order_by_id` is never used + --> src/db.rs:15:8 + | +15 | pub fn get_order_by_id(conn: &mut PgConnection, order_id: i32) -> Result { + | ^^^^^^^^^^^^^^^ + | + = note: `#[warn(dead_code)]` on by default + +warning: function `get_orders_by_user` is never used + --> src/db.rs:21:8 + | +21 | pub fn get_orders_by_user(conn: &mut PgConnection, user_id_param: i32) -> Result> { + | ^^^^^^^^^^^^^^^^^^ + +warning: `order-diesel-jsonb-example` (bin "order-diesel-jsonb-example") generated 3 warnings (run `cargo fix --bin "order-diesel-jsonb-example"` to apply 1 suggestion) +``` + +🎉🎉🎉 + +We could also add `NOT NULL` to `metadata` column type and keep `metadata` as `Value` in model definition. + +# Create orders + +Finally, let's create some orders and see the result in database. + +```bash +cargo run --bin order-diesel-jsonb-example +``` + +Connect the database using `psql diesel_demo` and see the result: + +```sql +dylan@/tmp:diesel_demo> \d ++--------+----------------------------+----------+-------+ +| Schema | Name | Type | Owner | +|--------+----------------------------+----------+-------| +| public | __diesel_schema_migrations | table | dylan | +| public | orders | table | dylan | +| public | orders_id_seq | sequence | dylan | ++--------+----------------------------+----------+-------+ +SELECT 3 +Time: 0.008s +dylan@/tmp:diesel_demo> select * from orders; ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+----------------------------+-----------------------------------------------------------------------------| +| 1 | 1 | 0.80 | 2024-12-17 03:05:10.732408 | {"items": ["book", "pen"], "gift_wrap": true, "shipping_method": "express"} | ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------+ +SELECT 1 +Time: 0.006s +``` + +# Query orders + +As a next step, let's see how to query orders. + +Below is the query result for `SELECT * FROM orders WHERE metadata @> '{"address": "Article Circle Expressway 2"}'`: + +```sql +dylan@/tmp:diesel_demo> select * from orders; ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------| +| 1 | 1 | 0.80 | 2024-12-17 03:05:10.732408 | {"items": ["book", "pen"], "gift_wrap": true, "shipping_method": "express"} | +| 2 | 1 | 0.80 | 2024-12-17 03:08:16.591275 | {"items": ["book", "pen"], "gift_wrap": true, "shipping_method": "express"} | +| 3 | 1 | 0.80 | 2024-12-17 05:46:41.173109 | {"items": ["book", "pen"], "address": "123 Main St, Anytown, USA", "gift_wrap": true, "shipping_method": "express"} | +| 4 | 1 | 0.80 | 2024-12-17 05:47:40.956483 | {"items": ["book", "pen"], "address": "Article Circle Expressway 2", "gift_wrap": true, "shipping_method": "express"} | ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +SELECT 4 +Time: 0.006s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> SELECT * FROM orders WHERE metadata @> '{"address": "Article Circle Expressway 2"}'; ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------| +| 4 | 1 | 0.80 | 2024-12-17 05:47:40.956483 | {"items": ["book", "pen"], "address": "Article Circle Expressway 2", "gift_wrap": true, "shipping_method": "express"} | ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +SELECT 1 +Time: 0.013s +``` + +Let's see how we can archieve the same result using Diesel. + +We can use operators for jsonb types: + +```rust +pub fn get_orders_by_address( + conn: &mut PgConnection, metadata: &serde_json::Value, +) -> QueryResult> { + use crate::schema::orders::dsl::{metadata as orders_metadata, orders}; + let query = orders.filter(orders_metadata.contains(metadata)); + let debug = diesel::debug_query::(&query); + println!("The insert query: {:#?}", debug); + query.get_results(conn) +} +``` + +The code above uses `contains` jsonb operator(`@>`) to query the orders by metadata. You can use `{"address": "Article Circle Expressway 2"}` as the creteria. + +Now, let's modify `main.rs` to call the `get_orders_by_address` method. + +```rust + +fn main() { + let conn = &mut establish_connection(); + + // // Example usage + // let new_order = models::NewOrder { + // user_id: 1, + // // total_amount: 99.99, + // total_amount: BigDecimal::from_str("0.80").unwrap(), + // metadata: serde_json::json!({ + // "items": ["book", "pen"], + // "shipping_method": "express", + // "gift_wrap": true, + // "address": "Article Circle Expressway 2", + // // "address": "123 Main St, Anytown, USA", + // }), + // }; + + // match db::create_order(conn, new_order) { + // Ok(order) => println!("Created order: {:?}", order), + // Err(e) => eprintln!("Error creating order: {}", e), + // } + + let metadata_address: serde_json::Value = serde_json::json!({"address": "Article Circle Expressway 2"}); + match db::get_orders_by_address(conn, &metadata_address) { + Ok(orders) => println!("Orders by address: {:#?}", orders), + Err(e) => eprintln!("Error getting orders by address: {}", e), + } +} +``` + +Running the query using `contains` jsonb operator(`@>`) gives us the same result. Notice the `@>` operator in the SQL query. + +```bash + Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.46s + Running `target/debug/order-diesel-jsonb-example` +The insert query: Query { + sql: "SELECT \"orders\".\"id\", \"orders\".\"user_id\", \"orders\".\"total_amount\", \"orders\".\"order_date\", \"orders\".\"metadata\" FROM \"orders\" WHERE (\"orders\".\"metadata\" @> $1)", + binds: [ + Object { + "address": String("Article Circle Expressway 2"), + }, + ], +} +Orders by address: [ + Order { + id: 4, + user_id: 1, + total_amount: BigDecimal("80e-2"), + order_date: 2024-12-17T05:47:40.956483, + metadata: Some( + Object { + "address": String("Article Circle Expressway 2"), + "gift_wrap": Bool(true), + "items": Array [ + String("book"), + String("pen"), + ], + "shipping_method": String("express"), + }, + ), + }, +] +``` + +Below is the source code for `contains` method in [diesel source code](https://github.com/diesel-rs/diesel/blob/b705023d85e6ef76292a427aa70f21c28a5902ff/diesel/src/pg/expression/expression_methods.rs#L2771). + +````rust +/// PostgreSQL specific methods present on JSONB expressions. +#[cfg(feature = "postgres_backend")] +pub trait PgJsonbExpressionMethods: Expression + Sized { + /// Creates a PostgreSQL `@>` expression. + /// + /// This operator checks whether left hand side JSONB value contains right hand side JSONB value + /// + /// # Example + /// + /// ```rust + /// # include!("../../doctest_setup.rs"); + /// # + /// # table! { + /// # contacts { + /// # id -> Integer, + /// # name -> VarChar, + /// # address -> Jsonb, + /// # } + /// # } + /// # + /// # fn main() { + /// # run_test().unwrap(); + /// # } + /// # + /// # #[cfg(feature = "serde_json")] + /// # fn run_test() -> QueryResult<()> { + /// # use self::contacts::dsl::*; + /// # let conn = &mut establish_connection(); + /// # diesel::sql_query("DROP TABLE IF EXISTS contacts").execute(conn).unwrap(); + /// # diesel::sql_query("CREATE TABLE contacts ( + /// # id SERIAL PRIMARY KEY, + /// # name VARCHAR NOT NULL, + /// # address JSONB NOT NULL + /// # )").execute(conn).unwrap(); + /// # + /// let easter_bunny_address: serde_json::Value = serde_json::json!({ + /// "street": "123 Carrot Road", + /// "province": "Easter Island", + /// "region": "Valparaíso", + /// "country": "Chile", + /// "postcode": "88888", + /// }); + /// diesel::insert_into(contacts) + /// .values((name.eq("Bunny"), address.eq(&easter_bunny_address))) + /// .execute(conn)?; + /// + /// let country_chile: serde_json::Value = serde_json::json!({"country": "Chile"}); + /// let contains_country_chile = contacts.select(address.contains(&country_chile)).get_result::(conn)?; + /// assert!(contains_country_chile); + /// # Ok(()) + /// # } + /// # #[cfg(not(feature = "serde_json"))] + /// # fn run_test() -> QueryResult<()> { + /// # Ok(()) + /// # } + /// ``` + fn contains(self, other: T) -> dsl::Contains + where + Self::SqlType: SqlType, + T: AsExpression, + { + Grouped(Contains::new(self, other.as_expression())) + } +} +```` + +# Update order + +Now, let's update the order by filtering the order by address. + +```rust +// write update order by address function +pub fn update_order_by_address( + conn: &mut PgConnection, address: &str, new_amount: BigDecimal, +) -> QueryResult { + use crate::schema::orders::dsl::{metadata, orders, total_amount}; + + let query = diesel::update(orders) + .filter(metadata.contains(json!({ "address": address }))) + .set(total_amount.eq(new_amount)); + + let debug = diesel::debug_query::(&query); + println!("The update query: {:#?}", debug); + + query.execute(conn) +} +``` + +This function will update the order by filtering the order by address. + +Now, let's modify `main.rs` to call the `update_order_by_address` method. + +```rust +fn main() { + let conn = &mut establish_connection(); + // // Example usage + // let new_order = models::NewOrder { + // user_id: 1, + // // total_amount: 99.99, + // total_amount: BigDecimal::from_str("0.80").unwrap(), + // metadata: serde_json::json!({ + // "items": ["book", "pen"], + // "shipping_method": "express", + // "gift_wrap": true, + // "address": "Article Circle Expressway 2", + // // "address": "123 Main St, Anytown, USA", + // }), + // }; + + // match db::create_order(conn, new_order) { + // Ok(order) => println!("Created order: {:?}", order), + // Err(e) => eprintln!("Error creating order: {}", e), + // } + + + // Query + // let metadata_address: serde_json::Value = serde_json::json!({"address": "Article Circle Expressway 2"}); + // match db::get_orders_by_address(conn, &metadata_address) { + // Ok(orders) => println!("Orders by address: {:#?}", orders), + // Err(e) => eprintln!("Error getting orders by address: {}", e), + // } + + // Update + let address = "Article Circle Expressway 2"; + let new_amount = BigDecimal::from_f64(1234.56).unwrap(); + match db::update_order_by_address(conn, address, new_amount) { + Ok(orders) => println!("Orders by address: {:#?}", orders), + Err(e) => eprintln!("Error getting orders by address: {}", e), + } +} +``` + +Below is the data changes after the update: + +```sql +dylan@/tmp:diesel_demo> SELECT * FROM orders WHERE metadata @> '{"address": "Article Circle Expressway 2"}'; ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------| +| 4 | 1 | 0.80 | 2024-12-17 05:47:40.956483 | {"items": ["book", "pen"], "address": "Article Circle Expressway 2", "gift_wrap": true, "shipping_method": "express"} | ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +SELECT 1 +Time: 0.017s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> SELECT * FROM orders WHERE metadata @> '{"address": "Article Circle Expressway 2"}'; ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------| +| 4 | 1 | 1234.56 | 2024-12-17 05:47:40.956483 | {"items": ["book", "pen"], "address": "Article Circle Expressway 2", "gift_wrap": true, "shipping_method": "express"} | ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +SELECT 1 +Time: 0.007s +``` + +# Delete order + +Let's write the code to delete the order by filtering the order by address. + +```rust +// write delete order by address function +pub fn delete_order_by_address( + conn: &mut PgConnection, address: &str, +) -> QueryResult { + use crate::schema::orders::dsl::{metadata, orders}; + + let query = diesel::delete(orders) + .filter(metadata.contains(json!({ "address": address }))); + + let debug = diesel::debug_query::(&query); + println!("The delete query: {:#?}", debug); + + query.execute(conn) +} +``` + +Let's modify `main.rs` to call the `delete_order_by_address` method. + +```rust +fn main() { + let conn = &mut establish_connection(); + + // // Example usage + // let new_order = models::NewOrder { + // user_id: 1, + // // total_amount: 99.99, + // total_amount: BigDecimal::from_str("0.80").unwrap(), + // metadata: serde_json::json!({ + // "items": ["book", "pen"], + // "shipping_method": "express", + // "gift_wrap": true, + // "address": "Article Circle Expressway 2", + // // "address": "123 Main St, Anytown, USA", + // }), + // }; + + // match db::create_order(conn, new_order) { + // Ok(order) => println!("Created order: {:?}", order), + // Err(e) => eprintln!("Error creating order: {}", e), + // } + + + // Query + // let metadata_address: serde_json::Value = serde_json::json!({"address": "Article Circle Expressway 2"}); + // match db::get_orders_by_address(conn, &metadata_address) { + // Ok(orders) => println!("Orders by address: {:#?}", orders), + // Err(e) => eprintln!("Error getting orders by address: {}", e), + // } + + // Update + // let address = "Article Circle Expressway 2"; + // let new_amount = BigDecimal::from_f64(1234.56).unwrap(); + // match db::update_order_by_address(conn, address, new_amount) { + // Ok(orders) => println!("Orders by address: {:#?}", orders), + // Err(e) => eprintln!("Error getting orders by address: {}", e), + // } + + // Delete + let address = "Article Circle Expressway 2"; + match db::delete_order_by_address(conn, address) { + Ok(orders) => println!("Orders by address: {:#?}", orders), + Err(e) => eprintln!("Error getting orders by address: {}", e), + } +} +``` + +Below is the data changes after the delete: + +```sql +dylan@/tmp:diesel_demo> SELECT * FROM orders WHERE metadata @> '{"address": "Article Circle Expressway 2"}'; ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------| +| 4 | 1 | 1234.56 | 2024-12-17 05:47:40.956483 | {"items": ["book", "pen"], "address": "Article Circle Expressway 2", "gift_wrap": true, "shipping_method": "express"} | ++----+---------+--------------+----------------------------+-----------------------------------------------------------------------------------------------------------------------+ +SELECT 1 +Time: 0.007s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> +Time: 0.000s +dylan@/tmp:diesel_demo> SELECT * FROM orders WHERE metadata @> '{"address": "Article Circle Expressway 2"}'; ++----+---------+--------------+------------+----------+ +| id | user_id | total_amount | order_date | metadata | +|----+---------+--------------+------------+----------| ++----+---------+--------------+------------+----------+ +SELECT 0 +Time: 0.006s +``` + +# Summary + +In this demo, we have seen how to setup diesel and run migrations, write sql for migration, create orders, query orders, update orders, delete orders. + +We learned how to use Diesel to query the orders by metadata using the `@>` jsonb operator. + +We have also seen how to update the order by filtering the order by address and delete the order by filtering the order by address. + +# Refs + +Diesel schema +https://diesel.rs/guides/schema-in-depth.html + +Diesel getting started +https://diesel.rs/guides/getting-started diff --git a/src/rust/error/how-to-organise-application-error-in-actix-web-application.md b/src/rust/error/how-to-organise-application-error-in-actix-web-application.md new file mode 100644 index 0000000..97b0d71 --- /dev/null +++ b/src/rust/error/how-to-organise-application-error-in-actix-web-application.md @@ -0,0 +1,341 @@ +# How to organise application Error in actix-web application + +- [Into](#into) +- [Actix-web http layer](#actix-web-http-layer) +- [Solution](#solution) + - [Solution 1: implement `From` trait manually](#solution-1-implement-from-trait-manually) + - [Solution 2: use thiserror crate](#solution-2-use-thiserror-crate) + +# Into + +In this article, we will learn how to organise application Error in actix-web application. + +Every backend application has a dao layer(or data access object) and a http layer. + +For dao layer, usually we use `lib::Result` as return type. Below is an example of fetching all users when using `clickhosue-rs` crate: + +```rust +use clickhouse::error::{Error, Result}; + +pub async fn get_all_users(client: &Client) -> Result> { + let users = client + .query("SELECT ?fields FROM users") + .fetch_all::() + .await?; + + Ok(users) +} +``` + +Notice, the `Result` is not `std::result::Result` type, it is a type that use `clickhouse::error::Error` as the error type in `Result` generic type. + +```rust +use std::{error::Error as StdError, fmt, io, result, str::Utf8Error}; + +use serde::{de, ser}; + +/// A result with a specified [`Error`] type. +pub type Result = result::Result; + + +/// Represents all possible errors. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum Error { + #[error("invalid params: {0}")] + InvalidParams(#[source] Box), + #[error("network error: {0}")] + Network(#[source] Box), + #[error("compression error: {0}")] + Compression(#[source] Box), + #[error("decompression error: {0}")] + Decompression(#[source] Box), + #[error("no rows returned by a query that expected to return at least one row")] + RowNotFound, + #[error("sequences must have a known size ahead of time")] + SequenceMustHaveLength, + #[error("`deserialize_any` is not supported")] + DeserializeAnyNotSupported, + #[error("not enough data, probably a row type mismatches a database schema")] + NotEnoughData, + #[error("string is not valid utf8")] + InvalidUtf8Encoding(#[from] Utf8Error), + #[error("tag for enum is not valid")] + InvalidTagEncoding(usize), + #[error("a custom error message from serde: {0}")] + Custom(String), + #[error("bad response: {0}")] + BadResponse(String), + #[error("timeout expired")] + TimedOut, + + // Internally handled errors, not part of public API. + // XXX: move to another error? + #[error("internal error: too small buffer, need another {0} bytes")] + #[doc(hidden)] + TooSmallBuffer(usize), +} +``` + +# Actix-web http layer + +Actix-web framework requires the error type is `actix_web::error::Error` if `actix_web::Result` is used as the return type. + +`Result` type in actix-web : + +```rust +pub use self::error::Error; +pub use self::internal::*; +pub use self::response_error::ResponseError; +pub(crate) use macros::{downcast_dyn, downcast_get_type_id}; + +/// A convenience [`Result`](std::result::Result) for Actix Web operations. +/// +/// This type alias is generally used to avoid writing out `actix_http::Error` directly. +pub type Result = std::result::Result; +``` + +`Error` type in actix-web : + +```rust +/// General purpose Actix Web error. +/// +/// An Actix Web error is used to carry errors from `std::error` through actix in a convenient way. +/// It can be created through converting errors with `into()`. +/// +/// Whenever it is created from an external object a response error is created for it that can be +/// used to create an HTTP response from it this means that if you have access to an actix `Error` +/// you can always get a `ResponseError` reference from it. +pub struct Error { + cause: Box, +} +``` + +Normally, we write actix-web handler and call dao functions like this: + +```rust +pub async fn get_all_users(data: web::Data) -> actix_web::Result { + let db = &data.db; + + // call dao function to fetch data + let users = users::get_all_users(db).await?; + Ok(web::Json(users)) +} +``` + +If you write a http handler like this and call function in dao(i.e. `dao::get_all_users`) function, which returns an error from the dao crate, error will happen. + +```rust + --> src/user/http.rs:348:54 + | +348 | let users = users::get_all(db).await?; + | ^ the trait `ResponseError` is not implemented for `clickhouse::error::Error` + | + = help: the following other types implement trait `ResponseError`: + AppError + BlockingError + Box<(dyn StdError + 'static)> + HttpError + Infallible + InvalidHeaderValue + JsonPayloadError + PathError + and 17 others + = note: required for `actix_web::Error` to implement `std::convert::From` + = note: required for `Result<_, actix_web::Error>` to implement `FromResidual>` +``` + +It means that you cannot convert `clickhouse::error::Error` to `actix_web::Error`. + +The reason is that the error type in dao function is `clickhouse::error::Error`, not `actix_web::Error`. While in http handler, the error type is `actix_web::Error` . You have to implement `From` trait as rust compiler told you. + +# Solution + +## Solution 1: implement `From` trait manually + +One possible solution is to define application error and implement `From` trait, which will convert `clickhouse::error::Error` to `AppError`: + +```rust +// Define your application error in enum +#[derive(Debug)] +pub enum AppError { + ClickhouseError(ClickhouseError), + // ... other error variants +} + +impl ResponseError for AppError { + fn error_response(&self) -> HttpResponse { + HttpResponse::InternalServerError().body(self.to_string()) + // We can also use match to handle specific error define in clickhouse::error::Error + // match *self { + // AppError::ClickhouseError(ref err) => match err { + // ClickhouseError::Timeout(err) => HttpResponse::InternalServerError() + // .body(format!("Clickhouse server error: {}", err)), + // ClickhouseError::Network(err) => { + // HttpResponse::BadRequest().body(format!("Clickhouse client error: {}", err)) + // } + // _ => HttpResponse::InternalServerError().body("Unknown error"), + // }, // ... handle other error variants + // } + } +} + +use std::fmt; + +impl fmt::Display for AppError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + AppError::ClickhouseError(ref err) => { + write!(f, "Clickhouse error: {}", err) + } // ... handle other error variants + } + } +} + +impl From for AppError { + fn from(error: ClickhouseError) -> Self { + AppError::ClickhouseError(error) + } +} +``` + +Finally, you need to modify actix-web handler: + +```rust +use clickhouse::error::{Error, Result}; + +// Previous function signature +// pub async fn get_all_users(data: web::Data) -> actix_web::Result { +// Pass AppError to handler's return type +pub async fn get_all_users(data: web::Data) -> actix_web::Result { + let db = &data.db; + + // call dao function to fetch data + let users = users::get_all_users(db).await?; + Ok(web::Json(users)) +} +``` + +🎉🎉🎉 + +## Solution 2: use thiserror crate + +Another solution is to use thiserror crate. + +This crate will automatically implement `From` trait, which will do the conversion. + +```rust +use thiserror::Error; +use clickhouse::error::Error as ClickhouseError; +use actix_web::{HttpResponse, ResponseError}; + +#[derive(Debug, Error)] +pub enum AppError { + #[error("Clickhouse error: {0}")] + ClickhouseError(#[from] ClickhouseError), + + // You can add more error variants as needed + #[error("Database connection error")] + DatabaseConnectionError, + + #[error("Internal server error: {0}")] + InternalError(String), +} + +impl ResponseError for AppError { + fn error_response(&self) -> HttpResponse { + match self { + AppError::ClickhouseError(_) => { + HttpResponse::InternalServerError().body(self.to_string()) + } + AppError::DatabaseConnectionError => { + HttpResponse::ServiceUnavailable().body(self.to_string()) + } + AppError::InternalError(_) => { + HttpResponse::InternalServerError().body(self.to_string()) + } + } + } +} +``` + +Here is the key improvements with `thiserror`: + +- The `#[derive(Error)]` automatically implements `std::error::Error`. +- `#[error("...")]` provides a convenient way to implement `Display` trait. +- `#[from]` attribute automatically implements `From` trait for error conversion. +- The code is more concise and readable. +- You can easily add more error variants with custom error messages. + +Benefits of this approach: + +- Automatic error conversion +- Clear, descriptive error messages +- Easy to extend with new error types +- Consistent error handling across the application + +The `ResponseError` implementation allows you to: + +- Map different error types to appropriate HTTP status codes +- Provide meaningful error responses +- Easily customize error handling for different error variants + +Note: Make sure to import necessary types and traits from the appropriate modules (actix-web, thiserror, etc.). + +Below is the example of using thiserror crate: + +```rust +// use actix_web::{get, post, web, App, HttpRequest, HttpResponse, HttpServer, Responder}; +use actix_web::{ web, App, HttpServer, Responder}; +use clickhouse::Client; +use clickhouse_example::{dao::get_all_users, error::AppError}; + +// This struct represents state +pub(crate) struct AppState { + pub app_name: String, + pub db: Client, +} + +// NOTE: This function is not working because of error type mismatch +// async fn get_users( +// data: web::Data, +// ) -> actix_web::Result { +// let db = &data.db; + +// // call dao function to fetch data +// let users = get_all_users(db).await?; +// Ok(web::Json(users)) +// } + +// Handler function +pub(crate) async fn get_users(data: web::Data) -> actix_web::Result { + let db = &data.db; + + // call dao function to fetch data + let users = get_all_users(db).await?; + Ok(web::Json(users)) +} + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + let url = "http://localhost:8123"; + let database = "default"; + let user = "test"; + let password = "secret"; + let client = Client::default() + .with_url(url) + .with_user(user) + .with_password(password) + .with_database(database); + + HttpServer::new(move || { + App::new() + .app_data(web::Data::new(AppState {db:client.clone(), app_name: "My App".into() })) + .route("/users", web::get().to(get_users)) + }) + .bind(("127.0.0.1", 8080))? + .run() + .await +} +``` diff --git a/src/rust/error/return-error-when-unwrap-option-when-none.md b/src/rust/error/return-error-when-unwrap-option-when-none.md new file mode 100644 index 0000000..7765822 --- /dev/null +++ b/src/rust/error/return-error-when-unwrap-option-when-none.md @@ -0,0 +1,161 @@ +# Return error when unwrap Option when None + +- [Intro](#intro) +- [Application error](#application-error) +- [Function returns Result](#function-returns-result) +- [Solution](#solution) + - [Use `match`](#use-match) + - [Use `ok_or_else`](#use-ok_or_else) + +# Intro + +In this blog, we will learn how to handle optional values and return errors in Actix-Web Handlers. + +In Rust, dealing with optional values (`Option`) and converting them to errors in web handlers is a common task. This blog explores different strategies to handle cases where an expected value is `None`. + +When working with optional values, you have several idiomatic Rust approaches: + +- Using `match` to Convert `None` to an Error + + - Explicitly match on the `Option` type + - Explicitly return an error when the value is `None` + +- Using `ok_or_else()` Method + - Provides a concise way to convert `Option` to `Result` + - Allows lazy error generation + - Avoids unnecessary error creation if not needed + +Let's explore these approaches with practical examples. + +# Application error + +Suppose we define our application error like this: + +```rust +use actix_web::{HttpResponse, ResponseError}; +use clickhouse::error::Error as ClickhouseError; +use std::fmt; + +#[derive(Debug)] +pub enum AppError { + ClickhouseError(ClickhouseError), + ScheduleError(String), + SQLGenError(String), +} + +impl ResponseError for AppError { + fn error_response(&self) -> HttpResponse { + HttpResponse::InternalServerError().body(self.to_string()) + // match *self { + // AppError::ClickhouseError(ref err) => match err { + // ClickhouseError::Server(err) => HttpResponse::InternalServerError() + // .body(format!("Clickhouse server error: {}", err)), + // ClickhouseError::Client(err) => { + // HttpResponse::BadRequest().body(format!("Clickhouse client error: {}", err)) + // } + // _ => HttpResponse::InternalServerError().body("Unknown error"), + // }, // ... handle other error variants + // } + } +} + +impl fmt::Display for AppError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + AppError::ClickhouseError(ref err) => { + write!(f, "Clickhouse error: {}", err) + } + AppError::ScheduleError(ref err) => { + write!(f, "Schedule error: {}", err) + } + AppError::SQLGenError(ref err) => { + write!(f, "SQLGen error: {}", err) + } + } + } +} + +impl From for AppError { + fn from(error: ClickhouseError) -> Self { + AppError::ClickhouseError(error) + } +} +``` + +# Function returns Result + +If we have a function which will return `Result` type: + +```rust +fn sql_gen_visualization_barchart( + query: &VisualizationQuery, + table_name: &str, + //) -> Result> { + //) -> anyhow::Result { +) -> anyhow::Result { + // gen sql for visualization + let mut sql = String::new(); + let field = query.field.as_ref(); + // ... +} +``` + +# Solution + +If we would like to return error when one of parameters is empty, we can do as follows. + +## Use `match` + +```rust +let field = match field { + Some(field) => field, + None => return Err(AppError::SQLGenError("Field is empty".to_string())), +}; +``` + +By using `match`, we can easily return an error when `field` is None. + +## Use `ok_or_else` + +If we don't like the `match`, we can leverage `ok_or_else` method, which will do the same way as using `match`. + +```rust +let field = query + .field + .as_ref() + .ok_or_else(|| AppError::SQLGenError("Field is empty".to_string()))?; +``` + +Below is the source code of `ok_or_else` method: + +````rust +impl Option { + /// Transforms the `Option` into a [`Result`], mapping [`Some(v)`] to + /// [`Ok(v)`] and [`None`] to [`Err(err())`]. + /// + /// [`Ok(v)`]: Ok + /// [`Err(err())`]: Err + /// [`Some(v)`]: Some + /// + /// # Examples + /// + /// ``` + /// let x = Some("foo"); + /// assert_eq!(x.ok_or_else(|| 0), Ok("foo")); + /// + /// let x: Option<&str> = None; + /// assert_eq!(x.ok_or_else(|| 0), Err(0)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn ok_or_else(self, err: F) -> Result + where + F: FnOnce() -> E, + { + match self { + Some(v) => Ok(v), + None => Err(err()), + } + } +} +```` diff --git a/src/rust/grpc/rust-grpc-helloworld.md b/src/rust/grpc/rust-grpc-helloworld.md new file mode 100644 index 0000000..b372ec1 --- /dev/null +++ b/src/rust/grpc/rust-grpc-helloworld.md @@ -0,0 +1,531 @@ +# Rust grpc helloworld + +- [Init the project](#init-the-project) +- [Write protocol file](#write-protocol-file) +- [Write build.rs](#write-buildrs) +- [Write helloworld grpc server](#write-helloworld-grpc-server) +- [Write helloworld grpc client](#write-helloworld-grpc-client) +- [Run helloworld grpc server](#run-helloworld-grpc-server) +- [Run helloworld grpc client](#run-helloworld-grpc-client) +- [Generated helloworld.rs](#generated-helloworldrs) + +In this tutorial, we'll walk you through how to setup a gRPC server in Rust using `tonic` crate. + +# Init the project + +First, we'll create a new project using `cargo init `. + +```bash +cargo init tonic_example +``` + +Let's see the structure of the directory. + +```bash +❯ tree +. +├── Cargo.toml +└── src + └── main.rs + +2 directories, 2 files + +~/tmp/tonic_example master* + +❯ ct Cargo.toml +[package] +name = "tonic_example" +version = "0.1.0" +edition = "2021" + +[dependencies] +``` + +As we'll build a grpc service, we can use `tonic` crate, which is a Rust implementation of gRPC. + +# Write protocol file + +We'll create a simple greeter service. We'll put proto file in `proto` directory. + +```bash +mkdir proto +touch proto/helloworld.proto +``` + +Here's the content of the `helloworld.proto` file, in which we define `HelloRequest` type, `HelloReply` type and `Greeter` service. + +```proto +syntax = "proto3"; +package helloworld; + +service Greeter { + // Our SayHello rpc accepts HelloRequests and returns HelloReplies + rpc SayHello (HelloRequest) returns (HelloReply); +} + +message HelloRequest { + // Request message contains the name to be greeted + string name = 1; +} + +message HelloReply { + // Reply contains the greeting message + string message = 1; +} +``` + +Here's an explanation of the code: + +- `syntax = "proto3";`: This line indicates that you are using version 3 of the protobuf language. +- `package helloworld;`: This line defines the package name for the service. It helps to prevent name clashes between protobuf messages. + +The service definition starts with this line: `service Greeter {`. Here are the key points: + +- `Greeter`: This is the name of the service (essentially an API) you are defining. +- The service `Greeter` has a single method `SayHello` which is defined as:`rpc SayHello (HelloRequest) returns (HelloReply);` + - `SayHello`: This is the name of the function that will be exposed to clients on the gRPC server. + - `(HelloRequest)`: This denotes the input parameters of the method. It takes in a single parameter of the type `HelloRequest`. + - `returns (HelloReply)`: This shows that the function returns a `HelloReply` message. + +The protocol buffer message types are defined with this code: + +- `.message HelloRequest`: The `HelloRequest` message has a single field name of type `string`. The `= 1;` bit is a unique number used to identify the field in the message binary format. +- `.message HelloReply`: The `HelloReply` message also has a single field message also of type `string`. + +In a nutshell, you have defined a `Greeter` service that has a `SayHello` method expecting a `HelloRequest` that contains a name and returns a `HelloReply` containing the message. It's analogous to defining a REST API endpoint but in the gRPC and protocol buffers context. + +# Write build.rs + +In order to build protobuf file, we need to install the `protoc` protocol buffers compiler. On MacOS, we can install by using this command: + +```bash +brew install protobuf +``` + +# Write helloworld grpc server + +Now, let's write server side code. + +Create a file `helloworld-server.rs` in `src/bin` directory: `touch src/bin/helloworld-server.rs`. + +```rust +use tonic::{transport::Server, Request, Response, Status}; + +use hello_world::{ + greeter_server::{Greeter, GreeterServer}, + HelloReply, HelloRequest, +}; + +pub mod hello_world { + tonic::include_proto!("helloworld"); +} + +#[derive(Debug, Default)] +pub struct MyGreeter {} + +#[tonic::async_trait] +impl Greeter for MyGreeter { + async fn say_hello( + &self, request: Request, + ) -> Result, Status> { + // println!("Got a request: {:?}", request); + + let reply = hello_world::HelloReply { + message: format!("Hello {}!", request.into_inner().name).into(), + }; + + Ok(Response::new(reply)) + } +} + +#[tokio::main] +// #[tokio::main(core_threads = 16, max_threads = 32)] +async fn main() -> Result<(), Box> { + // NOTE: This works! + let addr = "[::1]:50051".parse()?; + // NOTE❌: This does NOT works! ConnectionRefused error + // let addr = "0.0.0.0:50051".parse()?; + let greeter = MyGreeter::default(); + + Server::builder() + .add_service(GreeterServer::new(greeter)) + .serve(addr) + .await?; + + Ok(()) +} +``` + +# Write helloworld grpc client + +Now, let's write client side code. + +Create a file `helloworld-client.rs` in `src/bin` directory: `touch src/bin/helloworld-client.rs`. + +```rust +use hello_world::greeter_client::GreeterClient; +use hello_world::HelloRequest; + +pub mod hello_world { + tonic::include_proto!("helloworld"); +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let mut client = GreeterClient::connect("http://[::1]:50051").await?; + + let request = tonic::Request::new(HelloRequest { name: "Tonic".into() }); + + let response = client.say_hello(request).await?; + + println!("RESPONSE={:?}", response); + + Ok(()) +} +``` + +# Run helloworld grpc server + +Now, let's run the grpc server. + +```bash +cargo run --bin helloworld-server +``` + +# Run helloworld grpc client + +While the server is up and running, we can run the grpc client to send request to the server. + +```bash +cargo run --bin helloworld-client +``` + +Output: + +```bash + Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.11s + Running `target/debug/helloworld-client` +RESPONSE=Response { + metadata: MetadataMap { + headers: { + "content-type": "application/grpc", + "date": "Wed, 03 Apr 2024 17:56:21 GMT", + "grpc-status": "0", + }, + }, + message: HelloReply { + message: "Hello Tonic!", + }, + extensions: Extensions, +} +``` + +# Generated helloworld.rs + +If you're interested in what the generated file looks like, you can refer to `helloworld.rs` file which is located in `target/debug/build/tonic_example-4094918d1c86be5c/out` directory. + +Below is the contents of the file. + +```rust +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HelloRequest { + /// Request message contains the name to be greeted + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HelloReply { + /// Reply contains the greeting message + #[prost(string, tag = "1")] + pub message: ::prost::alloc::string::String, +} +/// Generated client implementations. +pub mod greeter_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct GreeterClient { + inner: tonic::client::Grpc, + } + impl GreeterClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl GreeterClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> GreeterClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + GreeterClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Our SayHello rpc accepts HelloRequests and returns HelloReplies + pub async fn say_hello( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/helloworld.Greeter/SayHello", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("helloworld.Greeter", "SayHello")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod greeter_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with GreeterServer. + #[async_trait] + pub trait Greeter: Send + Sync + 'static { + /// Our SayHello rpc accepts HelloRequests and returns HelloReplies + async fn say_hello( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + #[derive(Debug)] + pub struct GreeterServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl GreeterServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for GreeterServer + where + T: Greeter, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/helloworld.Greeter/SayHello" => { + #[allow(non_camel_case_types)] + struct SayHelloSvc(pub Arc); + impl tonic::server::UnaryService + for SayHelloSvc { + type Response = super::HelloReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::say_hello(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SayHelloSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for GreeterServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for GreeterServer { + const NAME: &'static str = "helloworld.Greeter"; + } +} +``` diff --git a/src/rust/serde/serialize-time-offsetdatetime-type-using-serde-as-in-serde-with-crate.md b/src/rust/serde/serialize-time-offsetdatetime-type-using-serde-as-in-serde-with-crate.md new file mode 100644 index 0000000..afe1970 --- /dev/null +++ b/src/rust/serde/serialize-time-offsetdatetime-type-using-serde-as-in-serde-with-crate.md @@ -0,0 +1,397 @@ +# Serialize time::OffsetDataTime type using serde_as in serde_with crate + +- [Table design](#table-design) +- [Database management using sqlx](#database-management-using-sqlx) +- [Writing Data Access Layer](#writing-data-access-layer) +- [Write actix handler](#write-actix-handler) + - [AppState](#appstate) + - [Actix handler](#actix-handler) + - [HttpServer setup](#httpserver-setup) + - [Config routes](#config-routes) + - [Run the application](#run-the-application) +- [Request data through api](#request-data-through-api) +- [Choose correct serialize method](#choose-correct-serialize-method) +- [Request data through api after using serde_as](#request-data-through-api-after-using-serde_as) + +# Table design + +When we develop backend api using postgres and sqlx, you will definitely use date in your database design. Take user table as an example: + +```sql +-- Add migration script here +CREATE TYPE gender AS ENUM ('male', 'female', 'other'); + +-- Table `users` +CREATE TABLE + IF NOT EXISTS users ( + id BIGSERIAL PRIMARY KEY, + username TEXT UNIQUE NOT NULL, + gender GENDER NOT NULL, + disabled BOOLEAN NOT NULL DEFAULT FALSE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW () + ); +``` + +In the provided example, the `users` table includes a `created_at` column of type `TIMESTAMPTZ`. + +In PostgreSQL, the `TIMESTAMPTZ` data type stands for "timestamp with time zone." + +# Database management using sqlx + +We can also use `sqlx` to generate migration and add the sql above to the migration script. + +The following example will use `sqlx`, which is [SQLx](https://github.com/launchbadge/sqlx)'s associated command-line utility for managing databases, migrations, to create database and generate the migration. + +```bash +# create database +DATABASE_URL=postgres://localhost/test sqlx database create + +# create migration +DATABASE_URL=postgres://localhost/test sqlx migrate add user +``` + +We can list all migration scripts in `migrations` direction, which is generated by `sqlx migrate add user` command. + +``` +drwxr-xr-x - username 22 Aug 14:22 migrations +.rw-r--r-- 334 username 22 Aug 14:22 └── 20230822062052_user.sql +``` + +We added above sql to file `20230822062052_user.sql` and `sqlx` will handle the migration. + +# Writing Data Access Layer + +We can write an `all_users` function to fetch data from database using `sqlx` crate. + +```rust +use serde::Serialize; +use sqlx::PgPool; +use time::OffsetDateTime; + +#[derive(Debug, Serialize)] +pub struct User { + pub id: i64, + pub username: String, + pub disabled: bool, + pub gender: Gender, + pub created_at: OffsetDateTime, +} + +#[derive(Clone, PartialEq, PartialOrd, Serialize, sqlx::Type, Debug)] +#[sqlx(type_name = "gender")] +#[sqlx(rename_all = "lowercase")] +pub enum Gender { + Male, + Female, + Other, +} + +impl User { + pub async fn all(connection: &PgPool) -> Result, sqlx::Error> { + let users = sqlx::query_as!( + User, + r#" + SELECT + id, + username, + gender as "gender: _", + disabled, + created_at + FROM users + "# + ) + .fetch_all(connection) + .await?; + + Ok(users) + } +} +``` + +The code snippet above uses the `sqlx` crate to interact with a PostgreSQL database and retrieve user data. + +- The `User` struct represents a user entity and is serialized using the serde crate. It contains fields such as `id`, `username`, `disabled`, `gender`, and `created_at`, representing the corresponding columns in the database table. +- The `Gender` enum represents the possible genders a user can have. It is derived from Clone, PartialEq, PartialOrd, and Serialize. The `sqlx::Type` trait is implemented to specify that this `enum` should be treated as a PostgreSQL custom type named "gender". The `sqlx(rename_all)` attribute is used to specify that the enum variants should be serialized in lowercase. You can refer to [rename_all](https://docs.rs/sqlx/latest/sqlx/trait.FromRow.html#rename_all) for more details. + If you don't specify `sqlx(rename_all)`, an error will occur: + +``` +thread 'actix-rt|system:0|arbiter:0' panicked at 'called `Result::unwrap()` on an `Err` value: ColumnDecode { index: "2", source: "invalid value \"male\" for enum Gender" }', enum-example/src/bin/enum.rs:33:45 +``` + +- The `User` struct also contains an `all` function that retrieves all users from the database. It takes a reference to a `PgPool` connection pool as a parameter and returns a `Result` with a vector of `User` instances or an `sqlx::Error` if an error occurs. +- Inside the `all` function, a SQL query is defined using the `sqlx::query_as!` macro. It selects the necessary columns from the `users` table, including mapping the `gender` column to the `Gender` enum using the as `"gender: _"` syntax. +- Finally, the `fetch_all` method is called on the query to execute it and retrieve all rows as a vector of User instances. The result is then returned as a Result. + +# Write actix handler + +## AppState + +Once we have the code implemented, let's see how we can use it to retrieve user data from a PostgreSQL database. + +First, we define `AppState` struct to represent the server's state, which contains two fields: `app_name`, a string representing the application name, and `pool`, a `PgPool` instance representing the connection pool to the PostgreSQL database. + +You can also add more to `AppState`, i.e. redis client to exchange data from Redis or kafka client to send or receive messages from Kafka, etc. + +```rust +// This struct represents state +struct AppState { + app_name: String, + pool: PgPool, +} +``` + +## Actix handler + +Then, we define a handler for retrieving all users. + +```rust +async fn all_users(data: web::Data) -> Result { + let connection = &data.pool; + let users = User::all(connection).await.unwrap(); + Ok(web::Json(users)) +} +``` + +The `all_users` function is an asynchronous handler that retrieves all users from the database. It takes a `web::Data` parameter containing the shared `AppState` data. Inside the function, it accesses the `PgPool` instance from the shared data and uses the `User` model to fetch all users from the database asynchronously. + +## HttpServer setup + +Next, we will create a `PgPool` instance and store the pool in application state variable, pass in a `Data::new(AppState { ... })` instance using `app_data` method. + +```rust +use sqlx::postgres::{PgPool, PgPoolOptions}; + +async fn main() -> std::io::Result<()> { + env::set_var("RUST_LOG", "info"); + env_logger::init(); + + let db_url = "postgres://localhost/test"; + let pool = connect(db_url).await.unwrap(); + HttpServer::new(move || { + App::new() + // .app_data(pool.clone()) + .app_data(Data::new(AppState { + app_name: "enum".into(), + pool: pool.clone(), + })) + .service(web::scope("/api/v1").configure(config)) + .route("/health", web::get().to(health)) + }) + .bind(("0.0.0.0", 8080))? + .run() + .await +} + +/// Open a connection to a database +pub async fn connect(db_url: &str) -> sqlx::Result { + // NOTE: older version of sqlx use PgPool, for newer version use + // PgPoolOptions::new to create a pool + // + // let pool = PgPool::new(db_url).await?; + + // Create a connection pool + let pool = PgPoolOptions::new() + .max_connections(5) + // .connect("postgres://localhost/test") + // .connect(&env::var("DATABASE_URL")?) + .connect(db_url) + .await?; + Ok(pool) +} +``` + +## Config routes + +Finally, we will configure routes for the application. + +We can use `configure` method to configure routes by passing an `function` with `F: FnOnce(&mut ServiceConfig)` trait bound like this: + +```rust +HttpServer::new(move || { + App::new() + // .app_data(pool.clone()) + .app_data(Data::new(AppState { + app_name: "enum".into(), + pool: pool.clone(), + })) + // config routers + .service(web::scope("/api/v1").configure(config)) + .route("/health", web::get().to(health)) +}) +.bind(("0.0.0.0", 8080))? +.run() +.await +``` + +Here is the signature for `configure` method: + +```rust + pub fn configure(mut self, cfg_fn: F) -> Self + where + F: FnOnce(&mut ServiceConfig); +``` + +And our config method: + +```rust +use actix_web::{ + web::{self, Data, ServiceConfig}, + web::{get, post, resource as r, scope}, + App, Error, HttpRequest, HttpResponse, HttpServer, Responder, Result, +}; + +// this function could be located in different module +pub fn config(cfg: &mut ServiceConfig) { + cfg + // users + .service(scope("/users").service( + r("").route(get().to(all_users)), // .route(post().to(delete_user)), + )); +} +``` + +## Run the application + +The application is configured with routes using the `service` and `route` methods. It includes a scope for API versioning with `/api/v1` and sets up a route for a health check endpoint ("/health") and a route to retrieve all users ("/users"). + +With all things tied up, we can run application using `cargo run` or `cargo run --bin ` if you have multiple binaries in you project: + +``` + Finished dev [unoptimized + debuginfo] target(s) in 2.73s + Running `target/debug/enum` +[2023-08-23T02:00:37Z INFO actix_server::builder] starting 10 workers +[2023-08-23T02:00:37Z INFO actix_server::server] Actix runtime found; starting in Actix runtime +``` + +# Request data through api + +Now it's time to test the api. + +We can request the user data through `/api/v1/users` endpoint: + +```bash +curl '0:8080/api/v1/users' | jq +``` + +Output: + +```json +[ + { + "id": 1, + "username": "john_doe", + "disabled": false, + "gender": "Male", + "created_at": [2023, 234, 15, 3, 34, 422482000, 0, 0, 0] + }, + { + "id": 2, + "username": "jane_smith", + "disabled": true, + "gender": "Female", + "created_at": [2023, 234, 15, 3, 34, 422482000, 0, 0, 0] + }, + { + "id": 3, + "username": "alex_jones", + "disabled": false, + "gender": "Other", + "created_at": [2023, 234, 15, 3, 34, 422482000, 0, 0, 0] + } +] +``` + +We have some trouble. The `created_at` is returned as an array, which should be a string like this: `2023-08-22T15:03:34.422482Z`. How to solve this problem? + +# Choose correct serialize method + +To fix the serialization problem of `OffsetDataTime` data type in `User` struct, we need to specify corrent serialization method for `created_at` field. + +We can use `serce_with` crate and use `Rfc3339` in `serde_as` macro, which will serialize `OffsetDataTime` like this `1985-04-12T23:20:50.52Z` instead of an array of integers `[2023, 234, 15, 3, 34, 422482000, 0, 0, 0]`. + +```rust +/// Well-known formats, typically standards. +pub mod well_known { + pub mod iso8601; + mod rfc2822; + mod rfc3339; + + #[doc(inline)] + pub use iso8601::Iso8601; + pub use rfc2822::Rfc2822; + pub use rfc3339::Rfc3339; +} +``` + +You can use `serde_with` crate as follows: + +- Place the `#[serde_as]` attribute before the `#[derive]` attribute. +- Use `#[serde_as(as = "...")`] instead of `#[serde(with = "...")]` to annotate field in struct + +Below is an example of using `serde_with` together with `serde_as` for `User` struct. + +```rust +use time::format_description::well_known::Rfc3339; + +#[serde_with::serde_as] +#[derive(Debug, Serialize)] +pub struct User { + pub id: i64, + pub username: String, + pub disabled: bool, + pub gender: Gender, + #[serde_as(as = "Rfc3339")] + pub created_at: OffsetDateTime, +} + +#[derive(Clone, PartialEq, PartialOrd, Serialize, sqlx::Type, Debug)] +#[sqlx(type_name = "gender")] +#[sqlx(rename_all = "lowercase")] +pub enum Gender { + Male, + Female, + Other, +} +``` + +Notice, we use `#[serde_as(as = "Rfc3339")]` to annotate `created_at` field with `OffsetDataTime` type. + +It's quite convenient to use. + +# Request data through api after using serde_as + +Now, when we request the data, we get the datetime as we wanted. + +```bash +curl '0:8080/api/v1/users' | jq +``` + +Output: + +```json +[ + { + "id": 1, + "username": "john_doe", + "disabled": false, + "gender": "Male", + "created_at": "2023-08-22T15:03:34.422482Z" + }, + { + "id": 2, + "username": "jane_smith", + "disabled": true, + "gender": "Female", + "created_at": "2023-08-22T15:03:34.422482Z" + }, + { + "id": 3, + "username": "alex_jones", + "disabled": false, + "gender": "Other", + "created_at": "2023-08-22T15:03:34.422482Z" + } +] +``` + +🎉🎉🎉 diff --git a/src/rust/tokio/async-healthcheck-multiple-endpoints.md b/src/rust/tokio/async-healthcheck-multiple-endpoints.md new file mode 100644 index 0000000..9400822 --- /dev/null +++ b/src/rust/tokio/async-healthcheck-multiple-endpoints.md @@ -0,0 +1,144 @@ +# Async Healthcheck Multiple Endpoints + +- [Intro](#intro) +- [Code](#code) +- [Code explain](#code-explain) + +## Intro + +Today, I'll show you how to use tokio to do healthcheck for multiple endpoints. + +The architecture is simple: + +- Initialized vector of healthcheck endpoints +- Spawn futures to do health check + +## Code + +```rust +// use tokio::time::sleep; +use serde::{Deserialize, Serialize}; +use std::fs::File; +use std::io::BufReader; +use std::time::Duration; +use tokio::select; +use tokio::sync::mpsc; +use tokio::time; +use tokio::time::{interval, sleep}; + +#[derive(Debug, Deserialize, Serialize, Clone)] +struct Config { + interval: u64, + url: String, +} + +async fn check_url(config: Config) { + loop { + println!("In check_url loop"); + let url = &config.url; + match reqwest::get(url).await { + Err(e) => println!("Error: Failed to access {}: {}", config.url, e), + Ok(response) => { + // println!("{response:?}"); + if !response.status().is_success() { + println!( + "Error: {} returned status code {}", + config.url, + response.status() + ); + } + + println!("check for {url} OK"); + } + } + sleep(Duration::from_secs(config.interval)).await; + } +} + +#[tokio::main] +async fn main() { + // Load configuration from file + // let file = File::open("config.json").expect("Failed to open config file"); + // let reader = BufReader::new(file); + // let configs: Vec = + // serde_json::from_reader(reader).expect("Failed to parse config file"); + + let configs = vec![ + Config { interval: 10, url: "http://www.baidu.com".to_string() }, + Config { interval: 10, url: "http://www.qq.com".to_string() }, + ]; + + // Create a shared timer + // let mut ticker = interval(Duration::from_secs(1)); + + // let mut interval = + // time::interval(time::Duration::from_millis(consume_interval)); + + // Create a task for each URL and spawn it + // + // NOTE: we don't need to run in loop in spawn, check_url already has loop + // for config in configs { + // // let mut tick = ticker.tick(); + // tokio::spawn(async move { + // let mut ticker = interval(Duration::from_secs(1)); + // loop { + // select! { + // // _ = tick => { + // _ = ticker.tick() => { + // println!("1s ..."); + // check_url(config.clone()).await; + // } + // } + // } + // }); + // } + + for config in configs { + tokio::spawn(async move { + println!("spawn check future ..."); + check_url(config.clone()).await; + }); + } + + println!("Infinite loop"); + // Keep the program running so that other tasks can continue to run + time::sleep(Duration::from_secs(2000)).await; + // loop {} +} +``` + +## Code explain + +1. Load configuration from file or hard code the configuration + +We can hard code the configuration or load configuration from file. + +```rust +// let file = File::open("config.json").expect("Failed to open config file"); +// let reader = BufReader::new(file); +// let configs: Vec = +// serde_json::from_reader(reader).expect("Failed to parse config file"); + +let configs = vec![ + Config { interval: 10, url: "http://www.baidu.com".to_string() }, + Config { interval: 10, url: "http://www.qq.com".to_string() }, +]; +``` + +2. Create a task for each URL and spawn it + +```rust +for config in configs { + tokio::spawn(async move { + println!("spawn check future ..."); + check_url(config.clone()).await; + }); +} +``` + +3. Keep the program running so that other tasks can continue to run + +```rust +time::sleep(Duration::from_secs(2000)).await; +// loop {} // Keep the program running so that other tasks can continue to run +``` diff --git a/src/rust/tokio/tokio-codec.md b/src/rust/tokio/tokio-codec.md new file mode 100644 index 0000000..35dab23 --- /dev/null +++ b/src/rust/tokio/tokio-codec.md @@ -0,0 +1,1184 @@ +# Tokio Codec + +- [Intro](#intro) + - [EchoCodec](#echocodec) + - [Echo using io::Copy](#echo-using-iocopy) +- [Stream Sink trait](#stream-sink-trait) + +# Intro + +今天来讲讲 `tokio` 的 `codec`。 + +顾名思义,`codec` 是一个编码解码器,用于将原始字节解码为 `rust` 的数据类型。 + +首先,我们来看看 `codec` 的基本用法。 + +## EchoCodec + +我们首先来看一个简单的例子,我们将 `tokio` 中的 `TcpStream` 进行编码和解码。 + +实现 `Decoder` 和 `Encoder` 这两个 `trait` 即可拥有编解码的功能。 + +在实现这两个 `trait` 之前,我们首先定义错误类型,这里使用 `enum ConnectionError`,使用 `enum` 也是最常见的定义错误类型的方式。 + +为什么要先定义错误类型呢?因为 `Decoder` 和 `Encoder` 这两个 `trait` 都定义了一个叫做 `Error` 的关联类型(`associated type`),所以为了实现这两个 `trait`,我们也需要定义一个错误类型。 + +下面是 `tokio_util::codec` 这个包(`package`)里的 `Decoder` 和 `Encoder` 的定义。 + +`Decoder` `trait` 的定义: + +```rust +/// Decoding of frames via buffers. +/// +/// This trait is used when constructing an instance of [`Framed`] or +/// [`FramedRead`]. An implementation of `Decoder` takes a byte stream that has +/// already been buffered in `src` and decodes the data into a stream of +/// `Self::Item` frames. +/// +/// Implementations are able to track state on `self`, which enables +/// implementing stateful streaming parsers. In many cases, though, this type +/// will simply be a unit struct (e.g. `struct HttpDecoder`). +/// +/// For some underlying data-sources, namely files and FIFOs, +/// it's possible to temporarily read 0 bytes by reaching EOF. +/// +/// In these cases `decode_eof` will be called until it signals +/// fullfillment of all closing frames by returning `Ok(None)`. +/// After that, repeated attempts to read from the [`Framed`] or [`FramedRead`] +/// will not invoke `decode` or `decode_eof` again, until data can be read +/// during a retry. +/// +/// It is up to the Decoder to keep track of a restart after an EOF, +/// and to decide how to handle such an event by, for example, +/// allowing frames to cross EOF boundaries, re-emitting opening frames, or +/// resetting the entire internal state. +/// +/// [`Framed`]: crate::codec::Framed +/// [`FramedRead`]: crate::codec::FramedRead +pub trait Decoder { + /// The type of decoded frames. + type Item; + + /// The type of unrecoverable frame decoding errors. + /// + /// If an individual message is ill-formed but can be ignored without + /// interfering with the processing of future messages, it may be more + /// useful to report the failure as an `Item`. + /// + /// `From` is required in the interest of making `Error` suitable + /// for returning directly from a [`FramedRead`], and to enable the default + /// implementation of `decode_eof` to yield an `io::Error` when the decoder + /// fails to consume all available data. + /// + /// Note that implementors of this trait can simply indicate `type Error = + /// io::Error` to use I/O errors as this type. + /// + /// [`FramedRead`]: crate::codec::FramedRead + type Error: From; + + /// Attempts to decode a frame from the provided buffer of bytes. + /// + /// This method is called by [`FramedRead`] whenever bytes are ready to be + /// parsed. The provided buffer of bytes is what's been read so far, and + /// this instance of `Decode` can determine whether an entire frame is in + /// the buffer and is ready to be returned. + /// + /// If an entire frame is available, then this instance will remove those + /// bytes from the buffer provided and return them as a decoded + /// frame. Note that removing bytes from the provided buffer doesn't always + /// necessarily copy the bytes, so this should be an efficient operation in + /// most circumstances. + /// + /// If the bytes look valid, but a frame isn't fully available yet, then + /// `Ok(None)` is returned. This indicates to the [`Framed`] instance that + /// it needs to read some more bytes before calling this method again. + /// + /// Note that the bytes provided may be empty. If a previous call to + /// `decode` consumed all the bytes in the buffer then `decode` will be + /// called again until it returns `Ok(None)`, indicating that more bytes need to + /// be read. + /// + /// Finally, if the bytes in the buffer are malformed then an error is + /// returned indicating why. This informs [`Framed`] that the stream is now + /// corrupt and should be terminated. + /// + /// [`Framed`]: crate::codec::Framed + /// [`FramedRead`]: crate::codec::FramedRead + /// + /// # Buffer management + /// + /// Before returning from the function, implementations should ensure that + /// the buffer has appropriate capacity in anticipation of future calls to + /// `decode`. Failing to do so leads to inefficiency. + /// + /// For example, if frames have a fixed length, or if the length of the + /// current frame is known from a header, a possible buffer management + /// strategy is: + /// + /// # use std::io; + /// # + /// # use bytes::BytesMut; + /// # use tokio_util::codec::Decoder; + /// # + /// # struct MyCodec; + /// # + /// impl Decoder for MyCodec { + /// // ... + /// # type Item = BytesMut; + /// # type Error = io::Error; + /// + /// fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + /// // ... + /// + /// // Reserve enough to complete decoding of the current frame. + /// let current_frame_len: usize = 1000; // Example. + /// // And to start decoding the next frame. + /// let next_frame_header_len: usize = 10; // Example. + /// src.reserve(current_frame_len + next_frame_header_len); + /// + /// return Ok(None); + /// } + /// } + /// + /// An optimal buffer management strategy minimizes reallocations and + /// over-allocations. + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error>; + + /// A default method available to be called when there are no more bytes + /// available to be read from the underlying I/O. + /// + /// This method defaults to calling `decode` and returns an error if + /// `Ok(None)` is returned while there is unconsumed data in `buf`. + /// Typically this doesn't need to be implemented unless the framing + /// protocol differs near the end of the stream, or if you need to construct + /// frames _across_ eof boundaries on sources that can be resumed. + /// + /// Note that the `buf` argument may be empty. If a previous call to + /// `decode_eof` consumed all the bytes in the buffer, `decode_eof` will be + /// called again until it returns `None`, indicating that there are no more + /// frames to yield. This behavior enables returning finalization frames + /// that may not be based on inbound data. + /// + /// Once `None` has been returned, `decode_eof` won't be called again until + /// an attempt to resume the stream has been made, where the underlying stream + /// actually returned more data. + fn decode_eof(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { + match self.decode(buf)? { + Some(frame) => Ok(Some(frame)), + None => { + if buf.is_empty() { + Ok(None) + } else { + Err(io::Error::new(io::ErrorKind::Other, "bytes remaining on stream").into()) + } + } + } + } + + /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this + /// `Io` object, using `Decode` and `Encode` to read and write the raw data. + /// + /// Raw I/O objects work with byte sequences, but higher-level code usually + /// wants to batch these into meaningful chunks, called "frames". This + /// method layers framing on top of an I/O object, by using the `Codec` + /// traits to handle encoding and decoding of messages frames. Note that + /// the incoming and outgoing frame types may be distinct. + /// + /// This function returns a *single* object that is both `Stream` and + /// `Sink`; grouping this into a single object is often useful for layering + /// things like gzip or TLS, which require both read and write access to the + /// underlying object. + /// + /// If you want to work more directly with the streams and sink, consider + /// calling `split` on the [`Framed`] returned by this method, which will + /// break them into separate objects, allowing them to interact more easily. + /// + /// [`Stream`]: futures_core::Stream + /// [`Sink`]: futures_sink::Sink + /// [`Framed`]: crate::codec::Framed + fn framed(self, io: T) -> Framed + where + Self: Sized, + { + Framed::new(io, self) + } +} +``` + +`Encoder` `trait` 的定义: + +```rust +/// Trait of helper objects to write out messages as bytes, for use with +/// [`FramedWrite`]. +/// +/// [`FramedWrite`]: crate::codec::FramedWrite +pub trait Encoder { + /// The type of encoding errors. + /// + /// [`FramedWrite`] requires `Encoder`s errors to implement `From` + /// in the interest letting it return `Error`s directly. + /// + /// [`FramedWrite`]: crate::codec::FramedWrite + type Error: From; + + /// Encodes a frame into the buffer provided. + /// + /// This method will encode `item` into the byte buffer provided by `dst`. + /// The `dst` provided is an internal buffer of the [`FramedWrite`] instance and + /// will be written out when possible. + /// + /// [`FramedWrite`]: crate::codec::FramedWrite + fn encode(&mut self, item: Item, dst: &mut BytesMut) -> Result<(), Self::Error>; +} +``` + +由于还不知道未来会有几种类型的错误,我们先随意定义两个: `Disconnected` 和 `Io(io::IoError)`,分别代表 `网络连接出错(断开)`以及`读取 socket 时发生的 io 错误`,当然实际场景的错误更加复杂和多样。 + +```rust +// 因为 codecs 的 Encoder trait 有个 associate type ,所以需要 Error 定义 +#[derive(Debug)] +pub enum ConnectionError { + Io(io::Error), + Disconnected, +} + +impl From for ConnectionError { + fn from(err: io::Error) -> Self { + ConnectionError::Io(err) + } +} +``` + +其次定义 `EchoCodec`,它实现了 `Decoder` 和 `Encoder` `trait`,其中 `Decoder` 和 `Encoder` 的 `associated type` 都是 `ConnectionError`。 + +实现 `From` 的原因是 `Encoder` 的关联类型的类型约束: `type Error: From`,即我们必须能够将 `io::Error` 转换为 `ConnectionError`。 + +接下来我们定义需要被编码的消息类型 `Message`,它是一个 `String` 类型,并为此实现 `Encoder` 和 `Decoder` `trait`。 + +被编码(`Encode`)的意思是,将 `Message` 类型转换为 `BytesMut`,然后写入到 `TcpStream` 中。 +被解码(`Decode`)的意思是,从 `FramedRead` 中读取 `BytesMut`,然后解码为 `Message` 供应用程序使用。 + +```rust +use tokio::codec::{Decoder, Encoder}; + +type Message = String; + +struct EchoCodec; + +// 给 EchoCodec 实现 Encoder trait +impl Encoder for EchoCodec { + type Error = ConnectionError; + + fn encode( + &mut self, item: Message, dst: &mut BytesMut, + ) -> Result<(), Self::Error> { + // 将 Message 写入 dst + dst.extend(item.as_bytes()); + Ok(()) + } +} + +// 给 EchoCodec 实现 Decoder trait +impl Decoder for EchoCodec { + type Item = Message; + + type Error = ConnectionError; + + fn decode( + &mut self, src: &mut BytesMut, + ) -> Result, Self::Error> { + // 将 src 中的数据转换为 String + if src.is_empty() { + return Ok(None); + } + // 将 src 中的数据移除 + let data = src.split(); + let data = String::from_utf8_lossy(&data[..]).to_string(); + + // 将 line 转换为 Message + Ok(Some(data)) + } +} +``` + +上面可以看出,`encode` 方法就是将 `Message` 转换为 `bytes` 并写入 `BytesMut`(通过 `BytesMut` 的 `extend` 方法),而 `decode` 方法就是将 `BytesMut` 转换为 `Message`。 + +最后,在 `main` 函数里是这么使用的: + +```rust +#[tokio::main] +async fn main() -> Result<(), Box> { + // start listening on 50007 + let listener = TcpListener::bind("127.0.0.1:50007").await?; + println!("echo server started!"); + + loop { + let (socket, addr) = listener.accept().await?; + + println!("accepted connection from: {}", addr); + + tokio::spawn(async move { + let codec = EchoCodec {}; + let mut conn = codec.framed(socket); + while let Some(message) = conn.next().await { + if let Ok(message) = message { + println!("received: {:?}", message); + conn.send(message).await.unwrap(); + } + } + }); + } +} +``` + +值得注意的是,`codec` 的 `framed` 方法(`codec.framed(socket)`)将 `TcpStream` 转换为 `Framed`,这个 `Framed` 就是实现了 `tokio` 中的 `Stream` 和 `Sink` 这两个 `trait`,因而具有了接收(通过 `Stream`)和发送(通过 `Sink`)数据的功能,关于这两个 `trait`,后面会提到。 + +上述 `framed` 方法是 `Decoder` `trait` 的方法,它第一个参数是 `TcpStream`,第二个参数是 `EchoCodec`,这个 `EchoCodec` 就是我们定义的 `EchoCodec`。 + +`Decoder::framed` 方法的定义如下: + +```rust +fn framed(self, codec: U) -> Framed +where + T: AsyncRead + AsyncWrite, + U: Decoder + Encoder, +{ + Framed::new(self, codec) +} +``` + +`Framed::new` 方法创建一个 `Framed` 实例,将 `TcpStream` 和 `EchoCodec` 保存在 `FramedImpl` 中。 + +`Framed` struct 的定义以及 `new` 方法的定义: + +```rust +pin_project! { + /// A unified [`Stream`] and [`Sink`] interface to an underlying I/O object, using + /// the `Encoder` and `Decoder` traits to encode and decode frames. + /// + /// You can create a `Framed` instance by using the [`Decoder::framed`] adapter, or + /// by using the `new` function seen below. + /// [`Stream`]: futures_core::Stream + /// [`Sink`]: futures_sink::Sink + /// [`AsyncRead`]: tokio::io::AsyncRead + /// [`Decoder::framed`]: crate::codec::Decoder::framed() + pub struct Framed { + #[pin] + inner: FramedImpl + } +} + +impl Framed +where + T: AsyncRead + AsyncWrite, +{ + /// Provides a [`Stream`] and [`Sink`] interface for reading and writing to this + /// I/O object, using [`Decoder`] and [`Encoder`] to read and write the raw data. + /// + /// Raw I/O objects work with byte sequences, but higher-level code usually + /// wants to batch these into meaningful chunks, called "frames". This + /// method layers framing on top of an I/O object, by using the codec + /// traits to handle encoding and decoding of messages frames. Note that + /// the incoming and outgoing frame types may be distinct. + /// + /// This function returns a *single* object that is both [`Stream`] and + /// [`Sink`]; grouping this into a single object is often useful for layering + /// things like gzip or TLS, which require both read and write access to the + /// underlying object. + /// + /// If you want to work more directly with the streams and sink, consider + /// calling [`split`] on the `Framed` returned by this method, which will + /// break them into separate objects, allowing them to interact more easily. + /// + /// Note that, for some byte sources, the stream can be resumed after an EOF + /// by reading from it, even after it has returned `None`. Repeated attempts + /// to do so, without new data available, continue to return `None` without + /// creating more (closing) frames. + /// + /// [`Stream`]: futures_core::Stream + /// [`Sink`]: futures_sink::Sink + /// [`Decode`]: crate::codec::Decoder + /// [`Encoder`]: crate::codec::Encoder + /// [`split`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.split + pub fn new(inner: T, codec: U) -> Framed { + Framed { + inner: FramedImpl { + inner, + codec, + state: Default::default(), + }, + } + } +} +``` + +`FramedImpl` 除了保存了 `TcpStream` 和 `EchoCodec`,它还保存了一个 `State`,这个 `State` 是一个 `RWFrames` 实例,相当于一个缓冲区。 + +`FramedImpl` struct 的定义: + +```rust +pin_project! { + #[derive(Debug)] + pub(crate) struct FramedImpl { + #[pin] + pub(crate) inner: T, + pub(crate) state: State, + pub(crate) codec: U, + } +} +``` + +`RWFrames` 、`ReadFrame` 和 `WriteFrame` 的定义。 + +```rust +#[derive(Debug)] +pub(crate) struct ReadFrame { + pub(crate) eof: bool, + pub(crate) is_readable: bool, + pub(crate) buffer: BytesMut, + pub(crate) has_errored: bool, +} + +pub(crate) struct WriteFrame { + pub(crate) buffer: BytesMut, +} + +#[derive(Default)] +pub(crate) struct RWFrames { + pub(crate) read: ReadFrame, + pub(crate) write: WriteFrame, +} +``` + +`RWFrames` 实现了 `Borrow` 和 `BorrowMut` 这两个 `trait`,能分别返回 `ReadFrame` 和 `WriteFrame` 用来作为读写数据的缓冲。 + +```rust +impl Borrow for RWFrames { + fn borrow(&self) -> &ReadFrame { + &self.read + } +} +impl BorrowMut for RWFrames { + fn borrow_mut(&mut self) -> &mut ReadFrame { + &mut self.read + } +} +impl Borrow for RWFrames { + fn borrow(&self) -> &WriteFrame { + &self.write + } +} +impl BorrowMut for RWFrames { + fn borrow_mut(&mut self) -> &mut WriteFrame { + &mut self.write + } +} +``` + +`RWFrames` 实现 `Borrow` `BorrowMut` 也比较有意思,当需要从 `Stream` `读` 数据(这里指异步读取 `AsyncRead`)的时候,会调用 `BorrowMut` 方法,返回内部的 `ReadFrame` 的引用,作为读数据的缓冲。当需要向 `Sink` `写` 数据(这里指异步写入 `AsyncWrite`)的时候,会调用 `BorrowMut` 方法,返回内部的 `WriteFrame` 的引用,作为写数据的缓冲。 + +而 `FramedImpl` 实现了 `Stream` 和 `Sink` 这两个 `trait`。`Stream` 代表读数据,`Sink` 代表写数据。实现 `Stream` 时,`FramedImpl` 的泛型参数的约束是 `T: AsyncRead` 和 `R: BorrowMut`,表示 `FramedImpl::inner` 只需满足 `AsyncRead`,而且读取操作时会用到 `ReadFrame`。 + +实现 `Sink` 时,`FramedImpl` 的泛型参数的约束是 `T: AsyncWrite` 和 `R: BorrowMut`。实现 `Sink` 时,表示 `FramedImpl::inner` 只需满足 `AsyncWrite`,而且写入操作时会用到 `WriteFrame`。 + +另外,比较有趣的是,`FramedImpl` 实现 `Stream` 时,`poll_next` 方法有个状态机,体现了读取数据流时复杂的流程。 + +```rust +impl Stream for FramedImpl +where + T: AsyncRead, + U: Decoder, + R: BorrowMut, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use crate::util::poll_read_buf; + + let mut pinned = self.project(); + let state: &mut ReadFrame = pinned.state.borrow_mut(); + // The following loops implements a state machine with each state corresponding + // to a combination of the `is_readable` and `eof` flags. States persist across + // loop entries and most state transitions occur with a return. + // + // The initial state is `reading`. + // + // | state | eof | is_readable | has_errored | + // |---------|-------|-------------|-------------| + // | reading | false | false | false | + // | framing | false | true | false | + // | pausing | true | true | false | + // | paused | true | false | false | + // | errored | | | true | + // `decode_eof` returns Err + // ┌────────────────────────────────────────────────────────┐ + // `decode_eof` returns │ │ + // `Ok(Some)` │ │ + // ┌─────┐ │ `decode_eof` returns After returning │ + // Read 0 bytes ├─────▼──┴┐ `Ok(None)` ┌────────┐ ◄───┐ `None` ┌───▼─────┐ + // ┌────────────────►│ Pausing ├───────────────────────►│ Paused ├─┐ └───────────┤ Errored │ + // │ └─────────┘ └─┬──▲───┘ │ └───▲───▲─┘ + // Pending read │ │ │ │ │ │ + // ┌──────┐ │ `decode` returns `Some` │ └─────┘ │ │ + // │ │ │ ┌──────┐ │ Pending │ │ + // │ ┌────▼──┴─┐ Read n>0 bytes ┌┴──────▼─┐ read n>0 bytes │ read │ │ + // └─┤ Reading ├───────────────►│ Framing │◄────────────────────────┘ │ │ + // └──┬─▲────┘ └─────┬──┬┘ │ │ + // │ │ │ │ `decode` returns Err │ │ + // │ └───decode` returns `None`──┘ └───────────────────────────────────────────────────────┘ │ + // │ read returns Err │ + // └────────────────────────────────────────────────────────────────────────────────────────────┘ + loop { + // too long, omit + } + } +``` + +`FramedImpl` 实现了 `Stream`,我们就能够从它那里读取数据了。 + +读取数据的过程是通过 `StreamExt::next` 方法实现的,它是对 `Stream` `trait` 的扩展,提供了很多实用方法,其中 `next` 就是其中一个。 + +`StreamExt::next` 方法的定义: + +```rust +/// An extension trait for `Stream`s that provides a variety of convenient +/// combinator functions. +pub trait StreamExt: Stream { + /// Creates a future that resolves to the next item in the stream. + /// + /// Note that because `next` doesn't take ownership over the stream, + /// the [`Stream`] type must be [`Unpin`]. If you want to use `next` with a + /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can + /// be done by boxing the stream using [`Box::pin`] or + /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils` + /// crate. + /// + /// # Examples + /// + /// # futures::executor::block_on(async { + /// use futures::stream::{self, StreamExt}; + /// + /// let mut stream = stream::iter(1..=3); + /// + /// assert_eq!(stream.next().await, Some(1)); + /// assert_eq!(stream.next().await, Some(2)); + /// assert_eq!(stream.next().await, Some(3)); + /// assert_eq!(stream.next().await, None); + /// # }); + fn next(&mut self) -> Next<'_, Self> + where + Self: Unpin, + { + assert_future::, _>(Next::new(self)) + } + // other methods... +} +``` + +`StreamExt::next` 方法创建一个对自身的引用,并且返回一个 `Next` 对象,这个对象实现了 `Future` `trait`,所以我们可以通过 `await` 来读取数据。 + +`Next` struct 的定义: + +```rust +/// Future for the [`next`](super::StreamExt::next) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Next<'a, St: ?Sized> { + stream: &'a mut St, +} + +impl Unpin for Next<'_, St> {} + +impl<'a, St: ?Sized + Stream + Unpin> Next<'a, St> { + pub(super) fn new(stream: &'a mut St) -> Self { + Self { stream } + } +} + +impl FusedFuture for Next<'_, St> { + fn is_terminated(&self) -> bool { + self.stream.is_terminated() + } +} + +impl Future for Next<'_, St> { + type Output = Option; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.stream.poll_next_unpin(cx) + } +} +``` + +在得知 `FramedImpl` 如何读取数据之后,那么 `FramedImpl` 是如何实现向 `Sink` 写入数据的呢? + +`FramedImpl` 实现了 `Sink` `trait`,可以看到主要是调用了 `FramedImpl::poll_flush` 方法将 `Encoder` 编码的数据通过字节流发送出去。 + +```rust +impl Sink for FramedImpl +where + T: AsyncWrite, + U: Encoder, + U::Error: From, + W: BorrowMut, +{ + type Error = U::Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.state.borrow().buffer.len() >= BACKPRESSURE_BOUNDARY { + self.as_mut().poll_flush(cx) + } else { + Poll::Ready(Ok(())) + } + } + + fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { + let pinned = self.project(); + pinned + .codec + .encode(item, &mut pinned.state.borrow_mut().buffer)?; + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use crate::util::poll_write_buf; + trace!("flushing framed transport"); + let mut pinned = self.project(); + + while !pinned.state.borrow_mut().buffer.is_empty() { + let WriteFrame { buffer } = pinned.state.borrow_mut(); + trace!("writing; remaining={}", buffer.len()); + + let n = ready!(poll_write_buf(pinned.inner.as_mut(), cx, buffer))?; + + if n == 0 { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to \ + write frame to transport", + ) + .into())); + } + } + + // Try flushing the underlying IO + ready!(pinned.inner.poll_flush(cx))?; + + trace!("framed transport flushed"); + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + ready!(self.as_mut().poll_flush(cx))?; + ready!(self.project().inner.poll_shutdown(cx))?; + + Poll::Ready(Ok(())) + } +} +``` + +而我们能通过 `send` 方法(参看 `main` 函数中的 `conn.send(message).await.unwrap();`)将编码的 `Message` 发送出去,是因为 `SinkExt` 是对 `Sink` `trait` 的扩展,它提供了 `send` 方法。 + +```rust +impl SinkExt for T where T: Sink {} + +/// An extension trait for `Sink`s that provides a variety of convenient +/// combinator functions. +pub trait SinkExt: Sink { + /// A future that completes after the given item has been fully processed + /// into the sink, including flushing. + /// + /// Note that, **because of the flushing requirement, it is usually better + /// to batch together items to send via `feed` or `send_all`, + /// rather than flushing between each item.** + fn send(&mut self, item: Item) -> Send<'_, Self, Item> + where + Self: Unpin, + { + assert_future::, _>(Send::new(self, item)) + } + + // other methods... +} +``` + +这个方法返回一个 `Send` struct,它是对 `Feed` 的一个简单 wrapper,它的作用是将 `item` 发送出去,发送功能交给 `Feed::sink_pin_mut::poll_flush` 来实现。 + +```rust +/// Future for the [`send`](super::SinkExt::send) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Send<'a, Si: ?Sized, Item> { + feed: Feed<'a, Si, Item>, +} + +// Pinning is never projected to children +impl Unpin for Send<'_, Si, Item> {} + +impl<'a, Si: Sink + Unpin + ?Sized, Item> Send<'a, Si, Item> { + pub(super) fn new(sink: &'a mut Si, item: Item) -> Self { + Self { feed: Feed::new(sink, item) } + } +} + +impl + Unpin + ?Sized, Item> Future for Send<'_, Si, Item> { + type Output = Result<(), Si::Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = &mut *self; + + if this.feed.is_item_pending() { + ready!(Pin::new(&mut this.feed).poll(cx))?; + debug_assert!(!this.feed.is_item_pending()); + } + + // we're done sending the item, but want to block on flushing the + // sink + ready!(this.feed.sink_pin_mut().poll_flush(cx))?; + + Poll::Ready(Ok(())) + } +} +``` + +这里是 `Feed` struct 的定义: + +```rust +/// Future for the [`feed`](super::SinkExt::feed) method. +#[derive(Debug)] +#[must_use = "futures do nothing unless you `.await` or poll them"] +pub struct Feed<'a, Si: ?Sized, Item> { + sink: &'a mut Si, + item: Option, +} + +// Pinning is never projected to children +impl Unpin for Feed<'_, Si, Item> {} + +impl<'a, Si: Sink + Unpin + ?Sized, Item> Feed<'a, Si, Item> { + pub(super) fn new(sink: &'a mut Si, item: Item) -> Self { + Feed { sink, item: Some(item) } + } + + pub(super) fn sink_pin_mut(&mut self) -> Pin<&mut Si> { + Pin::new(self.sink) + } + + pub(super) fn is_item_pending(&self) -> bool { + self.item.is_some() + } +} + +impl + Unpin + ?Sized, Item> Future for Feed<'_, Si, Item> { + type Output = Result<(), Si::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + let mut sink = Pin::new(&mut this.sink); + ready!(sink.as_mut().poll_ready(cx))?; + let item = this.item.take().expect("polled Feed after completion"); + sink.as_mut().start_send(item)?; + Poll::Ready(Ok(())) + } +} +``` + +`Feed` 实现了 `Future` `trait`,其 `poll` 方法首先调用 `poll_ready` 方法,如果 `poll_ready` 返回 `Ready`,则调用 `start_send` 方法,将 `item` 发送出去,如果 `start_send` 返回 `Ready`,则返回 `Ready`,否则继续调用 `poll_ready` 方法。 + +`poll_ready` 存在的意义是对是否能够发送 `item` 做出判断,如果不能发送,则需要等待(`poll_ready` 返回 `Poll::Pending` 等待被唤醒,具体实现是通过调用 `cx.waker().wake_by_ref()` 将异步任务注册,等待下一次被调度,`poll_ready` 的文档说明了这个过程,见下面 👇),直到能够发送。举个例子,在 `FramedImpl` 实现 `Sink` `trait` 时,采用了底层缓冲区(`WriteFrame`)的方式来存储待发送的数据,如果缓冲区满了,则调用 `poll_flush` 方法,否则表示可以开始发送数据(调用 `start_send` 方法)。 + +`FramedImpl::poll_ready` 方法的实现如下: + +```rust +/// Attempts to prepare the `Sink` to receive a value. +/// +/// This method must be called and return `Poll::Ready(Ok(()))` prior to +/// each call to `start_send`. +/// +/// This method returns `Poll::Ready` once the underlying sink is ready to +/// receive data. If this method returns `Poll::Pending`, the current task +/// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready` +/// should be called again. +/// +/// In most cases, if the sink encounters an error, the sink will +/// permanently be unable to receive items. +fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.state.borrow().buffer.len() >= BACKPRESSURE_BOUNDARY { + self.as_mut().poll_flush(cx) + } else { + Poll::Ready(Ok(())) + } +} +``` + +通过分析 `Feed` 的 `poll` 方法,我们得知数据最终是如何发送出去的了。 + +至于待发送的数据何时被编码,我们可以看到是在 `FramedImpl::start_send` 方法来做的。 + +`FramedImpl::start_end` 方法的实现如下: + +```rust +fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> { + let pinned = self.project(); + pinned + .codec + .encode(item, &mut pinned.state.borrow_mut().buffer)?; + Ok(()) +} +``` + +所以,我们能通过 `next` 来从数据流中接收并解析成 `Message` 结构,然后又通过 `send` 方法来将接收到的数据发送出去了。 + +`next` 接收数据 `send` 发送数据: + +```rust +while let Some(message) = conn.next().await { + if let Ok(message) = message { + println!("received: {:?}", message); + conn.send(message).await.unwrap(); + } +} +``` + +总结一下就是: + +- `SinkExt` 提供了 `send` 方法,用于将接收到的数据发送出去 +- `SinkExt::send` 方法通过 `Send::new` 返回一个实现了 `Future` 的 `Send` struct +- `Send` 内部采用 `Feed` 实现,目的是防止重复发送(将带发送的 `Item` 放入 `Option`,在 `poll` 被调用前检查是否已经被编码发送出去,如果已经被编码并发送,则 `Option` 为 `None`),`Feed` 也实现了 `Future` `trait` +- `Send` 方法首先检查 `Feed` 的 `is_item_pending` 方法,如果 `Feed` 的 `item` 为 `None`,则表示 `Feed` 已经被编码并发送出去,如果 `Feed` 的 `item` 为 `Some`,则表示 `Feed` 还未被编码并发送出去,需要调用 `Feed` 的 `poll` 方法。 +- `Feed::poll` 方法完成发送逻辑 + - 调用 `poll_ready` 判断是否可以发送,缓冲区 `BACKPRESSURE_BOUNDARY` 大小为 8k,满了则无法发送 + - 调用 `self.item.take` 将待发送的 `Item` 取出 + - 调用 `start_send` 对 `Item` 进行编码 +- `Send` 最后调用 `poll_flush` (此时是 `FramedImpl::poll_flush`)刷新写缓冲区 + +客户端测试: + +首先运行 server 端: + +```rust +echo server started! +accepted connection from: 127.0.0.1:60105 +received: "1\r\n" +received: "2\r\n" +received: "3\r\n" +received: "44\r\n" +received: "55\r\n" +received: "66\r\n" +received: "777\r\n" +received: "888\r\n" +received: "999\r\n" +``` + +其次使用 `telnet` 来连接 `server` 端,并输入数字,然后按回车键,这些数字会被转换成字符串,然后会被发送到 `server` 端。 + +客户端的连接: + +``` +telnet localhost 50007 +Trying 127.0.0.1... +Connected to localhost. +Escape character is '^]'. +1 +1 +2 +2 +3 +3 +44 +44 +55 +55 +66 +66 +777 +777 +888 +888 +999 +999 +``` + +可以看到,`server` 端接收到的数据是 `1\r\n`, `2\r\n`, `3\r\n`, `44\r\n`, `55\r\n`, `66\r\n`, `777\r\n`, `888\r\n`, `999\r\n`,成功接受到来自 `client` 端的数据。 + +## Echo using io::Copy + +手动实现 `EchoCodec` 比较繁琐,为了方便,我们可以使用 `io::copy` 来实现 `EchoCodec` 的功能,它的实现如下: + +首先,`socket.split()` 将 `socket` 分成两个部分,一个是接收数据(这个在 `tokio` 里叫做 `ReadHalf`),一个是发送数据(这个在 `tokio` 里叫做 `WriteHalf`)。`io::copy` 将接收数据(`ReadHalf`)的部分拷贝到发送数据(`WritHalf`)的部分,这样就实现了数据的双向传输。 + +```rust +// 使用 io::copy 自动拷贝数据,需要调用 tokio::io::split 分割成 reader 和 writer +let (mut rd, mut wr) = socket.split(); +if io::copy(&mut rd, &mut wr).await.is_err() { + eprintln!("failed to copy"); +} +``` + +完整的实现如下: + +```rust +#[tokio::main] +async fn main() -> Result<(), Box> { + // start listening on 50007 + let listener = TcpListener::bind("127.0.0.1:50007").await?; + println!("echo server started!"); + + loop { + let (mut socket, addr) = listener.accept().await?; + + println!("accepted connection from: {}", addr); + + tokio::spawn(async move { + // 方法1: + // 使用 io::copy 自动拷贝数据,需要调用 tokio::io::split 分割成 reader 和 writer + let (mut rd, mut wr) = socket.split(); + if io::copy(&mut rd, &mut wr).await.is_err() { + eprintln!("failed to copy"); + } + }); + } + Ok(()) +} +``` + +同样的,我们用客户端来进行测试: + +首先运行 server 端: + +```rust +echo server started! +accepted connection from: 127.0.0.1:60205 +received: "1\r\n" +received: "2\r\n" +received: "3\r\n" +received: "44\r\n" +received: "55\r\n" +received: "66\r\n" +received: "777\r\n" +received: "888\r\n" +received: "999\r\n" +``` + +其次使用 `telnet` 来连接 `server` 端,并输入数字,然后按回车键,这些数字会被转换成字符串,然后会被发送到 `server` 端。 + +客户端的连接: + +``` +telnet localhost 50007 +Trying 127.0.0.1... +Connected to localhost. +Escape character is '^]'. +1 +1 +2 +2 +3 +3 +44 +44 +55 +55 +66 +66 +777 +777 +888 +888 +999 +999 +``` + +可以看到,利用 `io::copy` 和手动实现 `EchoCodec` 的输出一致。 + +# Stream Sink trait + +最后,附赠一下 `Stream` 和 `Sink` `trait` 的定义。 + +实现了 `tokio` 中的 `Stream` 和 `Sink` 就能从数据流(如 `TcpStream` 或 `File`)中获取数据,并且能够将数据写回到数据流中。 + +`Stream` `trait` 的定义: + +```rust +/// A stream of values produced asynchronously. +/// +/// If `Future` is an asynchronous version of `T`, then `Stream` is an asynchronous version of `Iterator`. A stream +/// represents a sequence of value-producing events that occur asynchronously to +/// the caller. +/// +/// The trait is modeled after `Future`, but allows `poll_next` to be called +/// even after a value has been produced, yielding `None` once the stream has +/// been fully exhausted. +#[must_use = "streams do nothing unless polled"] +pub trait Stream { + /// Values yielded by the stream. + type Item; + + /// Attempt to pull out the next value of this stream, registering the + /// current task for wakeup if the value is not yet available, and returning + /// `None` if the stream is exhausted. + /// + /// # Return value + /// + /// There are several possible return values, each indicating a distinct + /// stream state: + /// + /// - `Poll::Pending` means that this stream's next value is not ready + /// yet. Implementations will ensure that the current task will be notified + /// when the next value may be ready. + /// + /// - `Poll::Ready(Some(val))` means that the stream has successfully + /// produced a value, `val`, and may produce further values on subsequent + /// `poll_next` calls. + /// + /// - `Poll::Ready(None)` means that the stream has terminated, and + /// `poll_next` should not be invoked again. + /// + /// # Panics + /// + /// Once a stream has finished (returned `Ready(None)` from `poll_next`), calling its + /// `poll_next` method again may panic, block forever, or cause other kinds of + /// problems; the `Stream` trait places no requirements on the effects of + /// such a call. However, as the `poll_next` method is not marked `unsafe`, + /// Rust's usual rules apply: calls must never cause undefined behavior + /// (memory corruption, incorrect use of `unsafe` functions, or the like), + /// regardless of the stream's state. + /// + /// If this is difficult to guard against then the [`fuse`] adapter can be used + /// to ensure that `poll_next` always returns `Ready(None)` in subsequent + /// calls. + /// + /// [`fuse`]: https://docs.rs/futures/0.3/futures/stream/trait.StreamExt.html#method.fuse + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Returns the bounds on the remaining length of the stream. + /// + /// Specifically, `size_hint()` returns a tuple where the first element + /// is the lower bound, and the second element is the upper bound. + /// + /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`. + /// A [`None`] here means that either there is no known upper bound, or the + /// upper bound is larger than [`usize`]. + /// + /// # Implementation notes + /// + /// It is not enforced that a stream implementation yields the declared + /// number of elements. A buggy stream may yield less than the lower bound + /// or more than the upper bound of elements. + /// + /// `size_hint()` is primarily intended to be used for optimizations such as + /// reserving space for the elements of the stream, but must not be + /// trusted to e.g., omit bounds checks in unsafe code. An incorrect + /// implementation of `size_hint()` should not lead to memory safety + /// violations. + /// + /// That said, the implementation should provide a correct estimation, + /// because otherwise it would be a violation of the trait's protocol. + /// + /// The default implementation returns `(0, `[`None`]`)` which is correct for any + /// stream. + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, None) + } +} +``` + +`Sink` `trait` 的定义: + +```rust +/// A `Sink` is a value into which other values can be sent, asynchronously. +/// +/// Basic examples of sinks include the sending side of: +/// +/// - Channels +/// - Sockets +/// - Pipes +/// +/// In addition to such "primitive" sinks, it's typical to layer additional +/// functionality, such as buffering, on top of an existing sink. +/// +/// Sending to a sink is "asynchronous" in the sense that the value may not be +/// sent in its entirety immediately. Instead, values are sent in a two-phase +/// way: first by initiating a send, and then by polling for completion. This +/// two-phase setup is analogous to buffered writing in synchronous code, where +/// writes often succeed immediately, but internally are buffered and are +/// *actually* written only upon flushing. +/// +/// In addition, the `Sink` may be *full*, in which case it is not even possible +/// to start the sending process. +/// +/// As with `Future` and `Stream`, the `Sink` trait is built from a few core +/// required methods, and a host of default methods for working in a +/// higher-level way. The `Sink::send_all` combinator is of particular +/// importance: you can use it to send an entire stream to a sink, which is +/// the simplest way to ultimately consume a stream. +#[must_use = "sinks do nothing unless polled"] +pub trait Sink { + /// The type of value produced by the sink when an error occurs. + type Error; + + /// Attempts to prepare the `Sink` to receive a value. + /// + /// This method must be called and return `Poll::Ready(Ok(()))` prior to + /// each call to `start_send`. + /// + /// This method returns `Poll::Ready` once the underlying sink is ready to + /// receive data. If this method returns `Poll::Pending`, the current task + /// is registered to be notified (via `cx.waker().wake_by_ref()`) when `poll_ready` + /// should be called again. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Begin the process of sending a value to the sink. + /// Each call to this function must be preceded by a successful call to + /// `poll_ready` which returned `Poll::Ready(Ok(()))`. + /// + /// As the name suggests, this method only *begins* the process of sending + /// the item. If the sink employs buffering, the item isn't fully processed + /// until the buffer is fully flushed. Since sinks are designed to work with + /// asynchronous I/O, the process of actually writing out the data to an + /// underlying object takes place asynchronously. **You *must* use + /// `poll_flush` or `poll_close` in order to guarantee completion of a + /// send**. + /// + /// Implementations of `poll_ready` and `start_send` will usually involve + /// flushing behind the scenes in order to make room for new messages. + /// It is only necessary to call `poll_flush` if you need to guarantee that + /// *all* of the items placed into the `Sink` have been sent. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error>; + + /// Flush any remaining output from this sink. + /// + /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. If this + /// value is returned then it is guaranteed that all previous values sent + /// via `start_send` have been flushed. + /// + /// Returns `Poll::Pending` if there is more work left to do, in which + /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when + /// `poll_flush` should be called again. + /// + /// In most cases, if the sink encounters an error, the sink will + /// permanently be unable to receive items. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Flush any remaining output and close this sink, if necessary. + /// + /// Returns `Poll::Ready(Ok(()))` when no buffered items remain and the sink + /// has been successfully closed. + /// + /// Returns `Poll::Pending` if there is more work left to do, in which + /// case the current task is scheduled (via `cx.waker().wake_by_ref()`) to wake up when + /// `poll_close` should be called again. + /// + /// If this function encounters an error, the sink should be considered to + /// have failed permanently, and no more `Sink` methods should be called. + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; +} +``` diff --git a/src/solana/README.md b/src/solana/README.md new file mode 100644 index 0000000..481331b --- /dev/null +++ b/src/solana/README.md @@ -0,0 +1 @@ +# Solana diff --git "a/src/solana/quickstart/\344\270\215\344\275\277\347\224\250Anchor\345\274\200\345\217\221solana\347\232\204program.md" "b/src/solana/quickstart/\344\270\215\344\275\277\347\224\250Anchor\345\274\200\345\217\221solana\347\232\204program.md" new file mode 100644 index 0000000..cf3403a --- /dev/null +++ "b/src/solana/quickstart/\344\270\215\344\275\277\347\224\250Anchor\345\274\200\345\217\221solana\347\232\204program.md" @@ -0,0 +1,1802 @@ +# 不使用 Anchor 开发 solana program + + + +# 初始化工程 + +## 使用 Cargo 初始化工程 + +我们可以使用 cargo 来初始化工程。 + +```bash +cargo init hello_world --lib +``` + +# 编写代码 + +## 程序入口 entrypoint + +下面利用 `entrypoint` 来编写程序入口。 + +`entrypoint` macro 需要一个函数参数,作为 solana program 的入口函数。 + +```rust +pub fn process_instruction() -> ProgramResult { + msg!("Hello, world!"); + Ok(()) +} +``` + +如果传递给 `entrypoint` macro 的函数签名不符合要求,编译时会报错: + +```bash + Checking hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) +error[E0061]: this function takes 0 arguments but 3 arguments were supplied + --> src/lib.rs:6:1 + | +6 | entrypoint!(process_instruction); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | unexpected argument #1 of type `&Pubkey` + | unexpected argument #2 of type `&Vec>` + | unexpected argument #3 of type `&[u8]` + | +note: function defined here + --> src/lib.rs:8:8 + | +8 | pub fn process_instruction() -> ProgramResult { + | ^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `entrypoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +For more information about this error, try `rustc --explain E0061`. +error: could not compile `hello_world` (lib) due to 1 previous error +``` + +## 修改 process_instruction 函数的签名 + +给 `process_instruction` 函数添加三个参数: + +- `program_id`: `&Pubkey` 类型,表示当前程序的公钥地址 +- `accounts`: `&[AccountInfo]` 类型,是一个 AccountInfo 数组的引用,包含了交易涉及的所有账户信息 +- `instruction_data`: `&[u8]` 类型,是指令的输入数据,以字节数组的形式传入 + +这三个参数是 Solana 程序执行时的基本要素: + +- `program_id` 用于验证程序身份和权限 +- `accounts` 包含了程序需要读取或修改的所有账户数据 +- `instruction_data` 携带了调用程序时传入的具体指令数据 + +```rust +pub fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + msg!("Hello, world!"); + Ok(()) +} +``` + +注意这里参数名前加了下划线前缀(`_`),是因为在这个简单的示例中我们暂时没有使用这些参数,这样可以避免编译器的未使用变量警告。在实际开发中,这些参数都是非常重要的,我们会在后续的示例中详细介绍如何使用它们。 + +关于函数签名,我们也可以[参考 solana_program_entrypoint 这个 crate 的文档](https://docs.rs/solana-program-entrypoint/latest/solana_program_entrypoint/macro.entrypoint.html): + +```rust +/// fn process_instruction( +/// program_id: &Pubkey, // Public key of the account the program was loaded into +/// accounts: &[AccountInfo], // All accounts required to process the instruction +/// instruction_data: &[u8], // Serialized instruction-specific data +/// ) -> ProgramResult; +``` + +# 构建程序 + +## 使用 cargo build-sbf 构建程序 + +为了构建 solana program,我们需要使用 `cargo build-sbf` 程序。 + +```bash +cargo build-sbf +``` + +构建失败了,以下是报错信息。 + +``` +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf +error: package `solana-program v2.1.4` cannot be built because it requires rustc 1.79.0 or newer, while the currently active rustc version is 1.75.0-dev +Either upgrade to rustc 1.79.0 or newer, or use +cargo update solana-program@2.1.4 --precise ver +where `ver` is the latest version of `solana-program` supporting rustc 1.75.0-dev +``` + +我们可以通过 `--version` 参数来查看 `rustc` 的版本信息。 + +```bash +cargo-build-sbf --version +``` + +输出: + +``` +solana-cargo-build-sbf 1.18.25 +platform-tools v1.41 +rustc 1.75.0 +``` + +关于系统版本的 rust compiler 和 build-sbf 使用的 rust compiler 不对应的问题,可以参考这个 issue。 +https://github.com/solana-labs/solana/issues/34987 + +## 解决 build-sbf 编译失败问题 + +一种方式是使用旧版本的 `solana-program`,如 `=1.17.0` 版本。 + +```toml +[package] +name = "hello_world" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +solana-program = "=1.17.0" +# solana-program = "=1.18.0" +``` + +但是运行 `cargo build-sbf` 之后,出现了另外的错误。 + +```bash +error: failed to parse lock file at: /Users/dylan/Code/solana/projects/hello_world/Cargo.lock + +Caused by: + lock file version 4 requires `-Znext-lockfile-bump` +``` + +猜测可能是 `build-sbf` 使用的 cargo 版本不支持 version = 4 版本的 `Cargo.lock` 文件,而这个是编辑器(vscode/cursor)打开的状态下,rust-analyser 自动生成的。 + +安装 `stable` 版本的 solana cli 工具链: `sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)"`,发现还是无法编译,报错如下: + +```bash +dylan@smalltown ~/Code/solana/projects/hello_world (master)> sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)" +downloading stable installer + ✨ stable commit 7104d71 initialized +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf --version +solana-cargo-build-sbf 2.0.17 +platform-tools v1.42 + +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf +[2024-12-04T11:14:48.052020000Z ERROR cargo_build_sbf] Failed to install platform-tools: HTTP status client error (404 Not Found) for url (https://github.com/anza-xyz/platform-tools/releases/download/v1.42/platform-tools-osx-x86_64.tar.bz2) +``` + +在进行 `cargo build-sbf` 编译的时候,需要下载对应版本的 `platform-tools`,因为未发布针对 Mac(Intel) 的 `v1.42` 版本 的 `platform-tools`,所以上述命令运行失败。 + +```bash +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf + Compiling cc v1.2.2 + Compiling serde v1.0.215 + Compiling solana-frozen-abi-macro v1.17.0 + Compiling ahash v0.7.8 + Compiling solana-frozen-abi v1.17.0 + Compiling either v1.13.0 + Compiling bs58 v0.4.0 + Compiling log v0.4.22 + Compiling hashbrown v0.11.2 + Compiling itertools v0.10.5 + Compiling solana-sdk-macro v1.17.0 + Compiling bytemuck v1.20.0 + Compiling borsh v0.9.3 + Compiling num-derive v0.3.3 + Compiling blake3 v1.5.5 + Compiling solana-program v1.17.0 + Compiling bv v0.11.1 + Compiling serde_json v1.0.133 + Compiling serde_bytes v0.11.15 + Compiling bincode v1.3.3 +Error: Function _ZN112_$LT$solana_program..instruction..InstructionError$u20$as$u20$solana_frozen_abi..abi_example..AbiEnumVisitor$GT$13visit_for_abi17hc69c00f4c61717f8E Stack offset of 6640 exceeded max offset of 4096 by 2544 bytes, please minimize large stack variables. Estimated function frame size: 6680 bytes. Exceeding the maximum stack offset may cause undefined behavior during execution. + + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `release` profile [optimized] target(s) in 25.19s ++ ./platform-tools/rust/bin/rustc --version ++ ./platform-tools/rust/bin/rustc --print sysroot ++ set +e ++ rustup toolchain uninstall solana +info: uninstalling toolchain 'solana' +info: toolchain 'solana' uninstalled ++ set -e ++ rustup toolchain link solana platform-tools/rust ++ exit 0 +⏎ + +dylan@smalltown ~/Code/solana/projects/hello_world (master)> ls target/deploy/ +hello_world-keypair.json hello_world.so +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf --version +solana-cargo-build-sbf 2.1.4 +platform-tools v1.43 +rustc 1.79.0 + +dylan@smalltown ~/Code/solana/projects/hello_world (master) [1]> sh -c "$(curl -sSfL https://release.anza.xyz/beta/install)" +downloading beta installer + ✨ beta commit 024d047 initialized +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf --version +solana-cargo-build-sbf 2.1.4 +platform-tools v1.43 +rustc 1.79.0 +dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf +Error: Function _ZN112_$LT$solana_program..instruction..InstructionError$u20$as$u20$solana_frozen_abi..abi_example..AbiEnumVisitor$GT$13visit_for_abi17hc69c00f4c61717f8E Stack offset of 6640 exceeded max offset of 4096 by 2544 bytes, please minimize large stack variables. Estimated function frame size: 6680 bytes. Exceeding the maximum stack offset may cause undefined behavior during execution. + + Finished `release` profile [optimized] target(s) in 0.23s +``` + +使用 `beta` 版本的 solana cli tool suites 虽然能够编译,但是遇到了这个错误: + +`Exceeding the maximum stack offset may cause undefined behavior during execution.` + +``` + Compiling bincode v1.3.3 +Error: Function _ZN112_$LT$solana_program..instruction..InstructionError$u20$as$u20$solana_frozen_abi..abi_example..AbiEnumVisitor$GT$13visit_for_abi17hc69c00f4c61717f8E Stack offset of 6640 exceeded max offset of 4096 by 2544 bytes, please minimize large stack variables. Estimated function frame size: 6680 bytes. Exceeding the maximum stack offset may cause undefined behavior during execution. +``` + +具体原因依旧是老生常谈的版本问题,原因分析可以参考: +https://solana.stackexchange.com/questions/16443/error-function-stack-offset-of-7256-exceeded-max-offset-of-4096-by-3160-bytes + +尝试更新 `solana-program` 的版本到 `2.1.4` 之后(运行 `sh -c "$(curl -sSfL https://release.anza.xyz/v2.1.4/install)"`),用以下版本的工具链进行编译: + +```bash +> cargo build-sbf --version +solana-cargo-build-sbf 2.1.4 +platform-tools v1.43 +rustc 1.79.0 + +# solana-cargo-build-sbf 2.2.0 +# platform-tools v1.43 +# rustc 1.79.0 +``` + +运行 `cargo build-sbf`: + +```bash +> cargo build-sbf + Compiling serde v1.0.215 + Compiling equivalent v1.0.1 + Compiling hashbrown v0.15.2 + Compiling toml_datetime v0.6.8 + Compiling syn v2.0.90 + Compiling winnow v0.6.20 + Compiling cfg_aliases v0.2.1 + Compiling once_cell v1.20.2 + Compiling borsh v1.5.3 + Compiling solana-define-syscall v2.1.4 + Compiling solana-sanitize v2.1.4 + Compiling solana-atomic-u64 v2.1.4 + Compiling bs58 v0.5.1 + Compiling bytemuck v1.20.0 + Compiling five8_core v0.1.1 + Compiling five8_const v0.1.3 + Compiling solana-decode-error v2.1.4 + Compiling solana-msg v2.1.4 + Compiling cc v1.2.2 + Compiling solana-program-memory v2.1.4 + Compiling log v0.4.22 + Compiling solana-native-token v2.1.4 + Compiling solana-program-option v2.1.4 + Compiling indexmap v2.7.0 + Compiling blake3 v1.5.5 + Compiling toml_edit v0.22.22 + Compiling serde_derive v1.0.215 + Compiling bytemuck_derive v1.8.0 + Compiling solana-sdk-macro v2.1.4 + Compiling thiserror-impl v1.0.69 + Compiling num-derive v0.4.2 + Compiling proc-macro-crate v3.2.0 + Compiling borsh-derive v1.5.3 + Compiling thiserror v1.0.69 + Compiling solana-secp256k1-recover v2.1.4 + Compiling solana-borsh v2.1.4 + Compiling solana-hash v2.1.4 + Compiling bincode v1.3.3 + Compiling bv v0.11.1 + Compiling solana-serde-varint v2.1.4 + Compiling serde_bytes v0.11.15 + Compiling solana-fee-calculator v2.1.4 + Compiling solana-short-vec v2.1.4 + Compiling solana-sha256-hasher v2.1.4 + Compiling solana-pubkey v2.1.4 + Compiling solana-instruction v2.1.4 + Compiling solana-sysvar-id v2.1.4 + Compiling solana-slot-hashes v2.1.4 + Compiling solana-clock v2.1.4 + Compiling solana-epoch-schedule v2.1.4 + Compiling solana-last-restart-slot v2.1.4 + Compiling solana-rent v2.1.4 + Compiling solana-program-error v2.1.4 + Compiling solana-stable-layout v2.1.4 + Compiling solana-serialize-utils v2.1.4 + Compiling solana-account-info v2.1.4 + Compiling solana-program-pack v2.1.4 + Compiling solana-bincode v2.1.4 + Compiling solana-slot-history v2.1.4 + Compiling solana-program-entrypoint v2.1.4 + Compiling solana-cpi v2.1.4 + Compiling solana-program v2.1.4 + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `release` profile [optimized] target(s) in 50.87s +``` + +总算编译成功了,开瓶香槟庆祝一下吧! + +这里是 Cargo.toml 文件: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +solana-program = "2.1.4" +# solana-program = "=1.17.0" +``` + +# 部署 + +当我们通过运行 `solana program deploy` 命令来部署程序的时候,部署失败了。 + +```bash +dylan@smalltown ~/Code/solana/projects/helloworld (master)> solana program deploy ./target/deploy/helloworld.so +⠁ 0.0% | Sending 1/173 transactions [block height 2957; re-sign in 150 blocks] + thread 'main' panicked at quic-client/src/nonblocking/quic_client.rs:142:14: +QuicLazyInitializedEndpoint::create_endpoint bind_in_range: Os { code: 55, kind: Uncategorized, message: "No buffer space available" } +note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace +``` + +那么这个 `No buffer space available` 是什么意思呢? + +排查了很久终于无果,凭借多年的经验,大概率应该是 **版本** 的问题,因为通过 `Anchor` 创建的工程是能够正常部署的。 + +这里记录一下 `solana` 命令的版本信息: + +```bash +> solana --version +solana-cli 2.2.0 (src:67704836; feat:1081947060, client:Agave) +``` + +## 回到 Anchor 工程验证部署失败源自版本的问题 + +我们可以通过 `anchor init helloworld` 新建工程,并通过 `anchor build` 和 `anchor deploy` 来部署程序。 + +```bash +anchor init helloworld +cd helloworld +anchor build +anchor deploy +``` + +从出错信息了解到,全新生成的 anchor 工程部署的时候会发生同样的错误:`No buffer space available` + +```bash +dylan@smalltown ~/tmp/helloworld (main)> anchor deploy +Deploying cluster: https://api.devnet.solana.com +Upgrade authority: /Users/dylan/.config/solana/id.json +Deploying program "helloworld"... +Program path: /Users/dylan/tmp/helloworld/target/deploy/helloworld.so... +⠁ 0.0% | Sending 1/180 transactions [block height 332937196; re-sign in 150 blocks] thread 'main' panicked at quic-client/src/nonblocking/quic_client.rs:142:14: +QuicLazyInitializedEndpoint::create_endpoint bind_in_range: Os { code: 55, kind: Uncategorized, message: "No buffer space available" } +note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace +There was a problem deploying: Output { status: ExitStatus(unix_wait_status(25856)), stdout: "", stderr: "" }. +``` + +检查下 anchor 的版本: + +```bash +dylan@smalltown ~/tmp/helloworld (main)> anchor deploy --help +Deploys each program in the workspace + +Usage: anchor-0.30.1 deploy [OPTIONS] [-- ...] + +Arguments: + [SOLANA_ARGS]... Arguments to pass to the underlying `solana program deploy` command + +Options: + -p, --program-name Only deploy this program + --provider.cluster Cluster override + --program-keypair Keypair of the program (filepath) (requires program-name) + --provider.wallet Wallet override + -v, --verifiable If true, deploy from path target/verifiable + -h, --help Print help +``` + +检查下 solana 的版本: + +```bash +> solana --version +solana-cli 2.2.0 (src:67704836; feat:1081947060, client:Agave) +``` + +这个 `2.2.0` 的版本看着有些奇怪,忽然想到为了编译 solana 程序,我安装了 edge 版本的 solana cli,其携带的 solana cli 的版本是 `2.2.0`: + +```bash +sh -c "$(curl -sSfL https://release.anza.xyz/edge/install)" +``` + +于是换回了 `stable` 版本: + +```bash +> sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)" +downloading stable installer + ✨ stable commit fbead11 initialized +``` + +而 stable 版本的 solana 是 `2.0.19`。 + +```bash +> solana --version +solana-cli 2.0.19 (src:fbead118; feat:607245837, client:Agave) +``` + +重新部署程序之前,我们先来清理下之前部署失败的程序的 `buffers`,也就是 buffer accounts。关于什么是 buffer accounts,请参考 Tips 3。 + +- 查看所有的 buffer accounts: `solana program show --buffers` +- 关闭所有的 buffer accounts: `solana program close --buffers` + - 关闭 buffer accounts 可以回收存储在 buffer accounts 里的 SOL + +```bash +Error: error sending request for url (https://api.devnet.solana.com/): operation timed out +dylan@smalltown ~/tmp/helloworld (main)> solana program show --buffers + +Buffer Address | Authority | Balance +CcKFVBzcsrcReZHBLnwzkQbNGXoK4hUee7hkgtbHCKtL | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 0.12492504 SOL +62wFzMYBhxWg4ntEJmFZcQ3P3Qtm9SbaBcbTmV8o8yPk | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 0.12492504 SOL +9q88jzvR5AdPdNTihxWroxRL7cBWQ5xXepNfDdaqmMTv | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 1.26224472 SOL +3nqzHv9vUphsmAjoR1C5ShgZ54muTzkZZ6Z4NKfqrKqt | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 1.26224472 SOL +8tZ8YYA1WS6WFVyEbJAdgnszXYZwwq7b9RLdoiry2Fb1 | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 0.12492504 SOL + +dylan@smalltown ~/tmp/helloworld (main)> solana program close --buffers + +Buffer Address | Authority | Balance +CcKFVBzcsrcReZHBLnwzkQbNGXoK4hUee7hkgtbHCKtL | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 0.12492504 SOL +62wFzMYBhxWg4ntEJmFZcQ3P3Qtm9SbaBcbTmV8o8yPk | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 0.12492504 SOL +9q88jzvR5AdPdNTihxWroxRL7cBWQ5xXepNfDdaqmMTv | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 1.26224472 SOL +3nqzHv9vUphsmAjoR1C5ShgZ54muTzkZZ6Z4NKfqrKqt | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 1.26224472 SOL +8tZ8YYA1WS6WFVyEbJAdgnszXYZwwq7b9RLdoiry2Fb1 | FCxBXdduz9HqTEPvEBSuFLLAjbVYh9a5ZgEnZwKyN2ZH | 0.12492504 SOL +``` + +好了 buffer accounts 清理完毕,此时我们也换回了 `stable` 版本的 solana cli,我们再尝试部署程序: + +```bash +> anchor deploy +Deploying cluster: https://api.devnet.solana.com +Upgrade authority: /Users/dylan/.config/solana/id.json +Deploying program "helloworld"... +Program path: /Users/dylan/tmp/helloworld/target/deploy/helloworld.so... +Program Id: DiSGTiXGq4HXCxq1pAibuGZjSpKT4Av8WShvuuYhTks9 + +Signature: 2EXHmU68k9SmJ5mXuM61pFDnUgozbJZ5ihHChPqFMVgjRJy4zCqnq6NAbvDkfiHd29xsmW4Vr3Kk6wHFbLEdCEZb + +Deploy success +``` + +成功了 🎉,再开一瓶香槟庆祝下吧! + +这更加深了我们的猜测:版本问题导致程序无法部署。 + +## 再回来部署我们的 hello_world 工程 + +好了,验证了部署失败不是工程类型(anchor project or cargo projct)导致的原因之后,我们再回到 `cargo init` 创建的工程:`hello_world`. + +我们可以通过 `solana` 的子命令来部署程序: 运行 `solana program deploy ./target/deploy/helloworld.so` 部署程序。 + +我们会分别在 `localnet` 和 `devnet` 部署。 + +### localnet 部署 + +首先是 `localnet` 部署。 + +切换环境到 localnet: + +```bash +dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana_local +Config File: /Users/dylan/.config/solana/cli/config.yml +RPC URL: http://localhost:8899 +WebSocket URL: ws://localhost:8900/ (computed) +Keypair Path: /Users/dylan/.config/solana/id.json +Commitment: confirmed +dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana config get +Config File: /Users/dylan/.config/solana/cli/config.yml +RPC URL: http://localhost:8899 +WebSocket URL: ws://localhost:8900/ (computed) +Keypair Path: /Users/dylan/.config/solana/id.json +Commitment: confirmed +``` + +部署程序: + +```bash +dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana program deploy ./target/deploy/hello_world.so +Program Id: DhQr1KGGQcf8BeU5uQvR35p2kgKqEinD45PRTDDRqx7z + +Signature: 3WVEWN4NUodsb8ZDjbjrTWXLikZ7wbWCuzuRZtSBmyKL4kVvESSeLwKZ3cJo1At4vDcaBs5iEcHhdteyXCwqwmDw +``` + +### devnet 部署 + +下面是 `devnet` 部署。 + +切换环境到 localnet: + +```bash +dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana_devnet +Config File: /Users/dylan/.config/solana/cli/config.yml +RPC URL: https://api.devnet.solana.com +WebSocket URL: wss://api.devnet.solana.com/ (computed) +Keypair Path: /Users/dylan/.config/solana/id.json +Commitment: confirmed + +dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana config get +Config File: /Users/dylan/.config/solana/cli/config.yml +RPC URL: https://api.devnet.solana.com +WebSocket URL: wss://api.devnet.solana.com/ (computed) +Keypair Path: /Users/dylan/.config/solana/id.json +Commitment: confirmed + +dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana program deploy ./target/deploy/hello_world.so +Program Id: DhQr1KGGQcf8BeU5uQvR35p2kgKqEinD45PRTDDRqx7z + +Signature: 4P89gHNUNccQKJAsE3aXJVpFrWeqLxcmk9SYHbQCX7T1sEvyPrxcbrAeJbk8F8YKwWT79nTswSZkz7mtSb55nboF +``` + +我们可以通过 solana balance 来查询下部署前后的余额 + +```bash +# 部署之前余额 +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana balance +75.153619879 SOL + +# 部署之后余额 +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> solana balance +75.152378439 SOL +``` + +而此时的版本: + +```bash +dylan@smalltown ~/Code/solana/projects/helloworld (master)> solana --version +solana-cli 2.0.19 (src:fbead118; feat:607245837, client:Agave) +``` + +由此可见,不要尝鲜用最新的版本(solana-cli `2.2.0`),否则会弄巧成拙。 + +# Tips + +## Tip 1: solana cli 的版本和 Cargo.toml 里的版本保持一致 + +在 [solana 的官方教程](https://solana.com/developers/guides/getstarted/local-rust-hello-world#create-a-new-rust-library-with-cargo)里提到这个 Tip: + +> It is highly recommended to keep your solana-program and other Solana Rust dependencies in-line with your installed version of the Solana CLI. For example, if you are running Solana CLI 2.0.3, you can instead run: + +```bash +cargo add solana-program@"=2.0.3" +``` + +> This will ensure your crate uses only 2.0.3 and nothing else. If you experience compatibility issues with Solana dependencies, check out the + +## Tip 2: 不要在 dependencies 里添加 solana-sdk,因为这是 offchain 使用的 + +参考这里的说明: +https://solana.stackexchange.com/questions/9109/cargo-build-bpf-failed + +> I have identified the issue. The solana-sdk is designed for off-chain use only, so it should be removed from the dependencies. + +错误将 `solana-sdk` 添加到 dependencies 报错: + +```bash + Compiling autocfg v1.4.0 + Compiling jobserver v0.1.32 +error: target is not supported, for more information see: https://docs.rs/getrandom/#unsupported-targets + --> src/lib.rs:267:9 + | +267 | / compile_error!("\ +268 | | target is not supported, for more information see: \ +269 | | https://docs.rs/getrandom/#unsupported-targets\ +270 | | "); + | |__________^ + +error[E0433]: failed to resolve: use of undeclared crate or module `imp` + --> src/lib.rs:291:5 + | +291 | imp::getrandom_inner(dest) + | ^^^ use of undeclared crate or module `imp` + +For more information about this error, try `rustc --explain E0433`. +error: could not compile `getrandom` (lib) due to 2 previous errors +warning: build failed, waiting for other jobs to finish... +``` + +## Tip 3: 关于 buffer accounts + +在 Solana 中,buffer accounts 是用于程序部署过程中的一种临时账户,它是 Solana 部署程序时的一个重要机制。由于 Solana 的交易大小限制为 `1232` 字节,部署程序时通常需要多个交易步骤。在这个过程中,buffer account 的作用是存储程序的字节码,直到部署完成。 + +buffer account 的关键点: + +- 临时存储:buffer account 用于存放程序的字节码,确保在部署过程中能够处理较大的程序。 +- 自动关闭:一旦程序成功部署,相关的 buffer account 会自动关闭,从而释放占用的资源。 +- 失败处理:如果部署失败,buffer account 不会自动删除,用户可以选择: + - 继续使用现有的 buffer account 来完成部署。 + - 关闭 buffer account,以便回收已分配的 SOL(租金)。 +- 检查 buffer accounts:可以通过命令 `solana program show --buffers` 来检查当前是否存在未关闭的 buffer accounts。 +- 关闭 buffer accounts:可以通过命令 `solana program close --buffers` 来关闭 buffer accounts。 + +关于 solana 程序部署的过程的解释,可以查考官方文档: https://solana.com/docs/programs/deploying#program-deployment-process + +# 重新部署 + +重新部署只需要编辑代码之后运行 `cargo build-sbf` 编译代码,再通过 `solana program deply ./target/deploy/hello_world.so` 部署即可。 + +```bash +cargo build-sbf +solana program deploy ./target/deploy/hello_world.so +``` + +可以通过运行测试和 client 脚本来验证运行的是新版本的 program。 + +```bash +# 运行测试 +cargo test-sbf +# 运行 client 脚本 +cargo run --example client +``` + +比如,我修改 `msg!` 输入内容为 `Hello, world! GM!GN!`,运行测试和 client 脚本能够看到 log 里有这个输出。 + +```rust +pub fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + msg!("Hello, world! GM!GN!"); + Ok(()) +} +``` + +运行测试: + +``` +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo test-sbf + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished release [optimized] target(s) in 1.76s + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `test` profile [unoptimized + debuginfo] target(s) in 13.92s + Running unittests src/lib.rs (target/debug/deps/hello_world-ee1a919556768e26) + +running 1 test +[2024-12-06T08:06:57.714248000Z INFO solana_program_test] "hello_world" SBF program from /Users/dylan/Code/solana/projects/hello_world/target/deploy/hello_world.so, modified 19 seconds, 228 ms, 255 µs and 392 ns ago +[2024-12-06T08:06:57.947344000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM invoke [1] +[2024-12-06T08:06:57.947695000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Hello, world! GM!GN! +[2024-12-06T08:06:57.947738000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM consumed 140 of 200000 compute units +[2024-12-06T08:06:57.947897000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM success +test test::test_hello_world ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.24s + + Doc-tests hello_world + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s +``` + +TODO: image + +# 最佳实践 + +## 安装 solana-cli 的最佳实践 + +最好的方式是安装指定版本的 solana cli,如可以用以下方式安装 `2.0.3` 的版本: + +```bash +# 安装 stable 和 beta 都不推荐 +# sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)" +# sh -c "$(curl -sSfL https://release.anza.xyz/beta/install)" +# 推荐安装指定版本 +sh -c "$(curl -sSfL https://release.anza.xyz/v2.0.3/install)" +``` + +输出: + +``` +downloading v2.0.3 installer + ✨ 2.0.3 initialized +``` + +运行 `cargo build-sbf --version` 查看下 `cargo build-sbf` 的版本: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master) [1]> cargo build-sbf --version +solana-cargo-build-sbf 2.0.3 +platform-tools v1.41 +rustc 1.75.0 +``` + +可以看到,这里的 rustc 版本是 `1.75.0`,比较老旧,编译的时候必须带上 `-Znext-lockfile-bump` 参数,否则编译出错: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf +info: uninstalling toolchain 'solana' +info: toolchain 'solana' uninstalled +error: failed to parse lock file at: /Users/dylan/Code/solana/projects/hello_world/Cargo.lock + +Caused by: + lock file version 4 requires `-Znext-lockfile-bump` +``` + +以下是传递 `-Znext-lockfile-bump` 参数之后,完整的编译过程: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo build-sbf -- -Znext-lockfile-bump + Compiling proc-macro2 v1.0.92 + Compiling unicode-ident v1.0.14 + Compiling version_check v0.9.5 + Compiling typenum v1.17.0 + Compiling autocfg v1.4.0 + Compiling serde v1.0.215 + Compiling syn v1.0.109 + Compiling cfg-if v1.0.0 + Compiling equivalent v1.0.1 + Compiling hashbrown v0.15.2 + Compiling semver v1.0.23 + Compiling generic-array v0.14.7 + Compiling ahash v0.8.11 + Compiling winnow v0.6.20 + Compiling indexmap v2.7.0 + Compiling toml_datetime v0.6.8 + Compiling shlex v1.3.0 + Compiling quote v1.0.37 + Compiling subtle v2.6.1 + Compiling cc v1.2.2 + Compiling syn v2.0.90 + Compiling once_cell v1.20.2 + Compiling rustversion v1.0.18 + Compiling feature-probe v0.1.1 + Compiling zerocopy v0.7.35 + Compiling cfg_aliases v0.2.1 + Compiling borsh v1.5.3 + Compiling bv v0.11.1 + Compiling rustc_version v0.4.1 + Compiling num-traits v0.2.19 + Compiling memoffset v0.9.1 + Compiling thiserror v1.0.69 + Compiling toml_edit v0.22.22 + Compiling blake3 v1.5.5 + Compiling block-buffer v0.10.4 + Compiling crypto-common v0.1.6 + Compiling solana-program v2.0.3 + Compiling digest v0.10.7 + Compiling hashbrown v0.13.2 + Compiling constant_time_eq v0.3.1 + Compiling bs58 v0.5.1 + Compiling arrayvec v0.7.6 + Compiling arrayref v0.3.9 + Compiling keccak v0.1.5 + Compiling sha2 v0.10.8 + Compiling toml v0.5.11 + Compiling sha3 v0.10.8 + Compiling proc-macro-crate v3.2.0 + Compiling borsh-derive-internal v0.10.4 + Compiling borsh-schema-derive-internal v0.10.4 + Compiling getrandom v0.2.15 + Compiling lazy_static v1.5.0 + Compiling bytemuck v1.20.0 + Compiling log v0.4.22 + Compiling proc-macro-crate v0.1.5 + Compiling serde_derive v1.0.215 + Compiling thiserror-impl v1.0.69 + Compiling num-derive v0.4.2 + Compiling solana-sdk-macro v2.0.3 + Compiling bytemuck_derive v1.8.0 + Compiling borsh-derive v1.5.3 + Compiling borsh-derive v0.10.4 + Compiling borsh v0.10.4 + Compiling serde_bytes v0.11.15 + Compiling bincode v1.3.3 + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished release [optimized] target(s) in 2m 28s ++ ./platform-tools/rust/bin/rustc --version ++ ./platform-tools/rust/bin/rustc --print sysroot ++ set +e ++ rustup toolchain uninstall solana +info: uninstalling toolchain 'solana' +info: toolchain 'solana' uninstalled ++ set -e ++ rustup toolchain link solana platform-tools/rust ++ exit 0 +``` + +值得注意的是,无论是安装 stable 版本还是 beta 版本都会导致编译失败,stable 版本运行 `cargo build-sbf` 会去 github release 页面下载针对 `x86_64` 架构的 platform-tools,但是官方没有发布提供针对这个版本的 platform-tools。以下是出错信息: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master) [1]> cargo build-sbf --version +solana-cargo-build-sbf 2.0.19 +platform-tools v1.42 + +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master) [1]> cargo build-sbf +[2024-12-05T06:17:30.547088000Z ERROR cargo_build_sbf] Failed to install platform-tools: HTTP status client error (404 Not Found) for url (https://github.com/anza-xyz/platform-tools/releases/download/v1.42/platform-tools-osx-x86_64.tar.bz2) +``` + +发现如果指定 `--tools-version` 为 `v1.43` 也不能成功编译。 + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master) [1]> cargo build-sbf --tools-version v1.43 + Blocking waiting for file lock on package cache + Blocking waiting for file lock on package cache + Compiling blake3 v1.5.5 + Compiling solana-program v2.0.3 + Compiling bs58 v0.5.1 + Compiling solana-sdk-macro v2.0.3 + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `release` profile [optimized] target(s) in 1m 16s ++ curl -L https://github.com/anza-xyz/platform-tools/releases/download/v1.42/platform-tools-osx-x86_64.tar.bz2 -o platform-tools-osx-x86_64.tar.bz2 + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 9 100 9 0 0 16 0 --:--:-- --:--:-- --:--:-- 16 ++ tar --strip-components 1 -jxf platform-tools-osx-x86_64.tar.bz2 +tar: Error opening archive: Unrecognized archive format ++ return 1 ++ popd ++ return 1 +/Users/dylan/.local/share/solana/install/releases/stable-fbead118867c08e6c3baaf8d196897c2536f067a/solana-release/bin/sdk/sbf/scripts/strip.sh: line 23: /Users/dylan/.local/share/solana/install/releases/stable-fbead118867c08e6c3baaf8d196897c2536f067a/solana-release/bin/sdk/sbf/dependencies/platform-tools/llvm/bin/llvm-objcopy: No such file or directory +``` + +所以还是老老实实安装指定版本的 solana cli 吧。 + +# 如何查看部署的 program + +我们可以通过访问以下地址来查看部署的 program。 + +https://explorer.solana.com/?cluster=custom + +它会自动用本地的 localhost:8899 作为 rpc endpoint,在搜索栏搜索 program id,即可看到 transaction 详情。 + +TODO: image + +# 客户端调用 + +## 客户调用程序 (Rust) (invoke solana program) + +首先创建 `examples` 目录,并在 `examples` 目录下创建 `client.rs` 文件。 + +```bash +mkdir -p examples +touch examples/client.rs +``` + +在 `Cargo.toml` 增加以下内容: + +```toml +[[example]] +name = "client" +path = "examples/client.rs" +``` + +添加 `solana-client` 依赖: + +```bash +cargo add solana-client@1.18.26 --dev +``` + +添加以下代码到 `examples/client.rs`,注意替换你自己部署的 program ID: + +```rust +use solana_client::rpc_client::RpcClient; +use solana_sdk::{ + commitment_config::CommitmentConfig, + instruction::Instruction, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, +}; +use std::str::FromStr; + +#[tokio::main] +async fn main() { + // Program ID (replace with your actual program ID) + let program_id = Pubkey::from_str("85K3baeo8tvZBmuty2UP8mMVd1vZtxLkmeUkj1s6tnT6").unwrap(); + + // Connect to the Solana devnet + let rpc_url = String::from("http://127.0.0.1:8899"); + let client = RpcClient::new_with_commitment(rpc_url, CommitmentConfig::confirmed()); + + // Generate a new keypair for the payer + let payer = Keypair::new(); + + // Request airdrop + let airdrop_amount = 1_000_000_000; // 1 SOL + let signature = client + .request_airdrop(&payer.pubkey(), airdrop_amount) + .expect("Failed to request airdrop"); + + // Wait for airdrop confirmation + loop { + let confirmed = client.confirm_transaction(&signature).unwrap(); + if confirmed { + break; + } + } + + // Create the instruction + let instruction = Instruction::new_with_borsh( + program_id, + &(), // Empty instruction data + vec![], // No accounts needed + ); + + // Add the instruction to new transaction + let mut transaction = Transaction::new_with_payer(&[instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer], client.get_latest_blockhash().unwrap()); + + // Send and confirm the transaction + match client.send_and_confirm_transaction(&transaction) { + Ok(signature) => println!("Transaction Signature: {}", signature), + Err(err) => eprintln!("Error sending transaction: {}", err), + } +} +``` + +这个简单的脚本能够调用已部署的 solana program,它主要做了以下几件事: + +- 连接本地 RPC +- 创建新账户 +- 空投 1 SOL 给新开的账户 +- 创建 hello_world program 所需的指令(Instruction) +- 发送交易 (通过 `send_and_confirm_transaction`) + +关于 program ID,我们可以通过 `solana address -k .json` 命令来获取 program ID: + +```bash +solana address -k ./target/deploy/hello_world-keypair.json +``` + +`-k` 参数接收 keypair 的文件,可以获得 PublicKey。 + +运行 client: + +```bash +cargo run --example client +``` + +运行 client 代码的输出: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo run --example client + Blocking waiting for file lock on package cache + Blocking waiting for file lock on package cache + Blocking waiting for file lock on package cache + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `dev` profile [unoptimized + debuginfo] target(s) in 5.13s + Running `target/debug/examples/client` +Transaction Signature: iPcYzbBCM6kkXvdx5GQLS9WYunT6yWFAp8NeRyNH5ZHbjXNpGuT1pqLAmQZSa2g7mubuFmaCTxqPVS54J4Zz22h +``` + +## 客户端调用(TypeScript) + +我们可以通过建立 nodejs 工程来发送交易: + +```bash +mkdir -p helloworld +npm init -y +npm install --save-dev typescript +npm install @solana/web3.js@1 @solana-developers/helpers@2 +``` + +建立 `tsconfig.json` 配置文件: + +```json +{ + "compilerOptions": { + "target": "es2016", + "module": "commonjs", + "types": ["node"], + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": true, + "skipLibCheck": true + } +} +``` + +创建 `hello-world-client.ts` 文件,注意修改 `PublicKey` 的参数为你部署时生成的 programID: + +```typescript +import { + Connection, + PublicKey, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; +import { getKeypairFromFile } from "@solana-developers/helpers"; + +async function main() { + const programId = new PublicKey( + "DhQr1KGGQcf8BeU5uQvR35p2kgKqEinD45PRTDDRqx7z" + ); + + // Connect to a solana cluster. Either to your local test validator or to devnet + const connection = new Connection("http://localhost:8899", "confirmed"); + //const connection = new Connection("https://api.devnet.solana.com", "confirmed"); + + // We load the keypair that we created in a previous step + const keyPair = await getKeypairFromFile("~/.config/solana/id.json"); + + // Every transaction requires a blockhash + const blockhashInfo = await connection.getLatestBlockhash(); + + // Create a new transaction + const tx = new Transaction({ + ...blockhashInfo, + }); + + // Add our Hello World instruction + tx.add( + new TransactionInstruction({ + programId: programId, + keys: [], + data: Buffer.from([]), + }) + ); + + // Sign the transaction with your previously created keypair + tx.sign(keyPair); + + // Send the transaction to the Solana network + const txHash = await connection.sendRawTransaction(tx.serialize()); + + console.log("Transaction sent with hash:", txHash); + + await connection.confirmTransaction({ + blockhash: blockhashInfo.blockhash, + lastValidBlockHeight: blockhashInfo.lastValidBlockHeight, + signature: txHash, + }); + + console.log( + `Congratulations! Look at your ‘Hello World' transaction in the Solana Explorer: + https://explorer.solana.com/tx/${txHash}?cluster=custom` + ); +} + +main(); +``` + +运行: + +```bash +npx ts-node hello-world-client.ts +``` + +输出: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/solana-web3-example (master)> npx ts-node hello-world-client.ts +(node:4408) ExperimentalWarning: CommonJS module /usr/local/lib/node_modules/npm/node_modules/debug/src/node.js is loading ES Module /usr/local/lib/node_modules/npm/node_modules/supports-color/index.js using require(). +Support for loading ES Module in require() is an experimental feature and might change at any time +(Use `node --trace-warnings ...` to show where the warning was created) +(node:4467) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead. +(Use `node --trace-deprecation ...` to show where the warning was created) +Transaction sent with hash: 29aFYDNv1cyrByA8FTBxrhohJx3H1FVLSUordaA1RVcXSNSy7zN5mGW5rwj6pDuopMvvoBaKNHeKmQ8c17uVnqoN +Congratulations! Look at your ‘Hello World' transaction in the Solana Explorer: + https://explorer.solana.com/tx/29aFYDNv1cyrByA8FTBxrhohJx3H1FVLSUordaA1RVcXSNSy7zN5mGW5rwj6pDuopMvvoBaKNHeKmQ8c17uVnqoN?cluster=custom +``` + +TODO: image + +# 一些实验 + +## 哪些版本能成功编译和测试 + +首先看一下我们安装的 `build-sbf` 和 `test-sbf` 的版本: + +```bash +# build-sbf 版本 +> cargo build-sbf --version +solana-cargo-build-sbf 2.1.4 +platform-tools v1.43 +rustc 1.79.0 + +# test-sbf 版本 +> cargo test-sbf --version +solana-cargo-test-sbf 2.1.4 +``` + +我们通过这个命令来测试哪些版本能够正确编译和测试: `rm -rf target Cargo.lock && cargo build-sbf && cargo test-sbf` + +| version | DevDependencies & Dependencies | NOTE | +| --------- | ---------------------------------------------------------------------------------------------------------- | -------------- | +| ✅2.1.4 | `cargo add solana-sdk@2.1.4 solana-program-test@2.1.4 tokio --dev && cargo add solana-program@2.1.4` | latest version | +| ✅2.0.18 | `cargo add solana-sdk@2.0.18 solana-program-test@2.0.18 tokio --dev && cargo add solana-program@2.0.18` | latest version | +| ✅2.0.3 | `cargo add solana-sdk@2.0.3 solana-program-test@2.0.3 tokio --dev && cargo add solana-program@2.0.3` | | +| ✅1.18.26 | `cargo add solana-sdk@1.18.26 solana-program-test@1.18.26 tokio --dev && cargo add solana-program@1.18.26` | | + +这里是 `Cargo.toml` 的例子(对应版本是 `2.0.3`): + +```toml +[package] +name = "hello_world" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +solana-program = "2.0.3" + +[dev-dependencies] +solana-program-test = "2.0.3" +solana-sdk = "2.0.3" +tokio = "1.42.0" +``` + +# 测试 + +关于 solana 程序的测试,我们一般采用 + +bankrun 是一个用于在 Node.js 中测试 Solana 程序的轻量级框架。与传统的 solana-test-validator 相比,bankrun 提供了更高的速度和便利性。它能够实现一些 solana-test-validator 无法做到的功能,例如时间回溯和动态设置账户数据。 + +它会启动一个轻量级的 BanksServer,这个服务类似于一个 RPC 节点,但速度更快,并且创建一个 BanksClient 来与服务器进行通信 + +主要特点: + +- 高效性:比 solana-test-validator 快得多。 +- 灵活性:支持时间回溯和动态账户数据设置。 +- solana-bankrun 底层基于 solana-program-test,使用轻量级的 BanksServer 和 BanksClient。 + +接下来,我们来看看如何用 Rust(`solana-program-test`) 和 NodeJS(`solana-bankrun`) 编写测试用例。 + +## 测试(Rust) + +首先,我们来用 Rust 代码进行测试。 + +首先安装测试所需要的依赖: + +```bash +cargo add solana-sdk@1.18.26 solana-program-test@1.18.26 tokio --dev +# NOTE: There's no error like `Exceeding maximum ...` when building with solana-program = 2.1.4 +# We use solana cli with version `2.1.4` +# To install solana-cli with version 2.1.4, run this command: +# +# sh -c "$(curl -sSfL https://release.anza.xyz/v2.1.4/install)" +# +# cargo add solana-sdk@=2.1.4 solana-program-test@=2.1.4 tokio --dev +# cargo add solana-program@=2.1.4 +``` + +因为我们已经测试过,对于版本 `2.1.4`, `2.0.18`, `2.0.3`, `1.18.26` 都能成功编译和测试,所以我们只选择了其中一个版本 `1.18.26` 来做演示。 + +测试结果输出: + +```bash +(base) dylan@smalltown ~/Code/solana/projects/hello_world (master)> cargo test-sbf + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `release` profile [optimized] target(s) in 2.46s + Blocking waiting for file lock on build directory + Compiling hello_world v0.1.0 (/Users/dylan/Code/solana/projects/hello_world) + Finished `test` profile [unoptimized + debuginfo] target(s) in 14.29s + Running unittests src/lib.rs (target/debug/deps/hello_world-823cf88515d0fd05) + +running 1 test +[2024-12-06T02:00:47.545448000Z INFO solana_program_test] "hello_world" SBF program from /Users/dylan/Code/solana/projects/hello_world/target/deploy/hello_world.so, modified 16 seconds, 964 ms, 380 µs and 220 ns ago +[2024-12-06T02:00:47.750627000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM invoke [1] +[2024-12-06T02:00:47.750876000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Hello, world! +[2024-12-06T02:00:47.750906000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM consumed 137 of 200000 compute units +[2024-12-06T02:00:47.750953000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM success +test test::test_hello_world ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.21s + + Doc-tests hello_world + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s +``` + +## 测试(NodeJS) + +接下来,我们来用 NodeJS 编写测试用例。 + +首先使用 pnpm 新建工程。 + +```bash +mkdir hello_world_frontend +cd hello_world_frontend + +# 初始化 pnpm 项目 +pnpm init +``` + +接下来安装依赖: + +```bash +# 安装必要的依赖 +pnpm add -D typescript ts-node @types/node chai ts-mocha solana-bankrun +pnpm add @solana/web3.js solana-bankrun +``` + +然后,编写测试程序: + +```typescript +import { + PublicKey, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; +import { start } from "solana-bankrun"; +import { describe, test } from "node:test"; +import { assert } from "chai"; + +describe("hello-solana", async () => { + // load program in solana-bankrun + const PROGRAM_ID = PublicKey.unique(); + const context = await start( + [{ name: "hello_world", programId: PROGRAM_ID }], + [] + ); + const client = context.banksClient; + const payer = context.payer; + + test("Say hello!", async () => { + const blockhash = context.lastBlockhash; + // We set up our instruction first. + let ix = new TransactionInstruction({ + // using payer keypair from context to sign the txn + keys: [{ pubkey: payer.publicKey, isSigner: true, isWritable: true }], + programId: PROGRAM_ID, + data: Buffer.alloc(0), // No data + }); + + const tx = new Transaction(); + tx.recentBlockhash = blockhash; + // using payer keypair from context to sign the txn + tx.add(ix).sign(payer); + + // Now we process the transaction + let transaction = await client.processTransaction(tx); + + assert(transaction.logMessages[0].startsWith("Program " + PROGRAM_ID)); + const message = "Program log: " + "Hello, world! GM!GN!"; + console.log("🌈🌈🌈 "); + console.log(transaction.logMessages[1]); + // NOTE: transaction.logMesages is an array: + // + // [ + // 'Program 11111111111111111111111111111112 invoke [1]', + // 'Program log: Hello, world! GM!GN!', + // 'Program 11111111111111111111111111111112 consumed 340 of 200000 compute units', + // 'Program 11111111111111111111111111111112 success' + // ] + assert(transaction.logMessages[1] === message); + assert( + transaction.logMessages[2] === + "Program log: Our program's Program ID: " + PROGRAM_ID + ); + assert( + transaction.logMessages[3].startsWith( + "Program " + PROGRAM_ID + " consumed" + ) + ); + assert(transaction.logMessages[4] === "Program " + PROGRAM_ID + " success"); + assert(transaction.logMessages.length == 5); + }); +}); +``` + +首先,我们通过 `start` 函数生成一个 `context`,这个 `context` 里会有和 `bankServer` 交互的 `bankClient` 以及 `payer` 账户。 + +接下来,通过 `TransactionInstruction` 来准备交易的 `Instruction`,发送交易需要对消息进行签名,这里使用 `payer` 来对交易进行签名,将它放在 `keys` 数组里。 + +```javascript +let ix = new TransactionInstruction({ + keys: [{ pubkey: payer.publicKey, isSigner: true, isWritable: true }], + programId: PROGRAM_ID, + data: Buffer.alloc(0), // No data +}); +``` + +创建一个新的交易指令 (`TransactionInstruction`),`TransactionInstruction` 的定义及参数类型 `TransactionInstructionCtorFields` 如下: + +```typescript +/** + * Transaction Instruction class + */ +declare class TransactionInstruction { + /** + * Public keys to include in this transaction + * Boolean represents whether this pubkey needs to sign the transaction + */ + keys: Array; + /** + * Program Id to execute + */ + programId: PublicKey; + /** + * Program input + */ + data: Buffer; + constructor(opts: TransactionInstructionCtorFields); +} + +/** + * List of TransactionInstruction object fields that may be initialized at construction + */ +type TransactionInstructionCtorFields = { + keys: Array; + programId: PublicKey; + data?: Buffer; +}; +``` + +关于 `TransactionInstructionCtorFields` 的说明: + +- `keys`: 需要签名的公钥(支付者的公钥)。 +- `programId`: 程序的 ID。 +- `data`: 这里没有附加数据。 + +然后我们准备 `Transaction` 的数据。 + +首先 `Transaction` 需要最近的区块哈希,这个可以从 `context` 的 `lastBlockHash` 获取。 + +```javascript +const blockhash = context.lastBlockhash; +``` + +下面是创建交易的过程。 + +```javascript +const tx = new Transaction(); +tx.recentBlockhash = blockhash; +tx.add(ix).sign(payer); +``` + +创建一个新的交易 (`Transaction`) 需要如下步骤: + +- 设置最近的区块哈希。 +- 添加之前定义的指令(`tx.add`),并使用支付者的密钥对交易进行签名(`.sign`)。 + +`add` 函数通过 Javascript 的 [Rest parameters](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Functions/rest_parameters) 特性将参数转换成数组类型,每个数组类型的是 `Transaction | TransactionInstruction | TransactionInstructionCtorFields` 的联合类型 `Union Type`。 + +```typescript +declare class Transaction { + /** + * Signatures for the transaction. Typically created by invoking the + * `sign()` method + */ + signatures: Array; + /** + * The first (payer) Transaction signature + * + * @returns {Buffer | null} Buffer of payer's signature + */ + get signature(): Buffer | null; + /** + * The transaction fee payer + */ + feePayer?: PublicKey; + /** + * The instructions to atomically execute + */ + instructions: Array; + /** + * Add one or more instructions to this Transaction + * + * @param {Array< Transaction | TransactionInstruction | TransactionInstructionCtorFields >} items - Instructions to add to the Transaction + */ + add( + ...items: Array< + Transaction | TransactionInstruction | TransactionInstructionCtorFields + > + ): Transaction; +} +``` + +创建完交易之后,通过 `client.processTransaction` 发送交易并等到结果。 + +```javascript +let transaction = await client.processTransaction(tx); +``` + +这里是 `processTransaction` 的定义: + +```typescript +/** + * A client for the ledger state, from the perspective of an arbitrary validator. + * + * The client is used to send transactions and query account data, among other things. + * Use `start()` to initialize a BanksClient. + */ +export declare class BanksClient { + constructor(inner: BanksClientInner); + private inner; + /** + * Send a transaction and return immediately. + * @param tx - The transaction to send. + */ + sendTransaction(tx: Transaction | VersionedTransaction): Promise; + /** + * Process a transaction and return the result with metadata. + * @param tx - The transaction to send. + * @returns The transaction result and metadata. + */ + processTransaction( + tx: Transaction | VersionedTransaction + ): Promise; +} +``` + +其 `inner` 是个 `BanksClient`,除了处理交易外,它还能干很多事情,以下是它的定义。 + +```typescript +export class BanksClient { + getAccount(address: Uint8Array, commitment?: CommitmentLevel | undefined | null): Promise + sendLegacyTransaction(txBytes: Uint8Array): Promise + sendVersionedTransaction(txBytes: Uint8Array): Promise + processLegacyTransaction(txBytes: Uint8Array): Promise + processVersionedTransaction(txBytes: Uint8Array): Promise + tryProcessLegacyTransaction(txBytes: Uint8Array): Promise + tryProcessVersionedTransaction(txBytes: Uint8Array): Promise + simulateLegacyTransaction(txBytes: Uint8Array, commitment?: CommitmentLevel | undefined | null): Promise + simulateVersionedTransaction(txBytes: Uint8Array, commitment?: CommitmentLevel | undefined | null): Promise + getTransactionStatus(signature: Uint8Array): Promise + getTransactionStatuses(signatures: Array): Promise> + getSlot(commitment?: CommitmentLevel | undefined | null): Promise + getBlockHeight(commitment?: CommitmentLevel | undefined | null): Promise + getRent(): Promise + getClock(): Promise + getBalance(address: Uint8Array, commitment?: CommitmentLevel | undefined | null): Promise + getLatestBlockhash(commitment?: CommitmentLevel | undefined | null): Promise + getFeeForMessage(messageBytes: Uint8Array, commitment?: CommitmentLevel | undefined | null): Promise +} + +/** + * Process a transaction and return the result with metadata. + * @param tx - The transaction to send. + * @returns The transaction result and metadata. + */ + async processTransaction( + tx: Transaction | VersionedTransaction, + ): Promise { + const serialized = tx.serialize(); + const internal = this.inner; + const inner = + tx instanceof Transaction + ? await internal.processLegacyTransaction(serialized) + : await internal.processVersionedTransaction(serialized); + return new BanksTransactionMeta(inner); + } +``` + +`processTransaction` 会先通过 `serialize` 对 transaction 进行序列化,判断属于 `LegacyTransaction` 还是 `VersionedTransaction`,分别调用 `processLegacyTransaction` 或 `processVersionedTransaction` 异步方法,并将结果通过 `BanksTransactionMeta` 返回。 + +而 `BanksTransactionMeta` 包含了 `logMessages` `returnData` 和 `computeUnitsConsumed` 属性。 + +```typescript +export class TransactionReturnData { + get programId(): Uint8Array; + get data(): Uint8Array; +} +export class BanksTransactionMeta { + get logMessages(): Array; + get returnData(): TransactionReturnData | null; + get computeUnitsConsumed(): bigint; +} +``` + +其中 `logMessages` 是一个字符串数组,用于存储与交易相关的日志消息。我们可以通过这些日志信息,对测试结果进行验证。 + +比如,可以通过对 `logMessages[0]` 验证 solana program 被调用时,会输出以 `Program ` + `PROGRAM_ID` 开头的内容: + +```javascript +assert(transaction.logMessages[0].startsWith("Program " + PROGRAM_ID)); +``` + +一个简单的 `logMessages` 数组的例子: + +```json +[ + "Program 11111111111111111111111111111112 invoke [1]", + "Program log: Hello, world! GM!GN!", + "Program log: Our program's Program ID: {program_id}", + "Program 11111111111111111111111111111112 consumed 443 of 200000 compute units", + "Program 11111111111111111111111111111112 success" +] +``` + +值得注意的是,在我们的 solana program 里,第一个 `msg!` 输出的日志是 `Hello, world! GM!GN!`,但是发送交易返回的 `logMessages` 数组里它在数组的第二个元素,这是什么原因呢? + +```rust +pub fn process_instruction( + program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + msg!("Hello, world! GM!GN!"); + // NOTE: You must not use interpolating string like this, as it will not + // output the string value correctly. + // + // You must use placeholder instead. + // + // Below is the transaction.logMessages array when using interpolating string + // + // [ + // 'Program 11111111111111111111111111111112 invoke [1]', + // 'Program log: Hello, world! GM!GN!', + // "Program log: Our program's Program ID: {program_id}", + // 'Program 11111111111111111111111111111112 consumed 443 of 200000 compute units', + // 'Program 11111111111111111111111111111112 success' + // ] + // msg!("Our program's Program ID: {program_id}"); + msg!("Our program's Program ID: {}", program_id); + Ok(()) +} +``` + +其原因是 solana program 执行时 `program runtime` 会通过 `program_invoke` 函数打印被调用的日志,也就是这里的: `Program 11111111111111111111111111111112 invoke [1]`。关于 `program_invoke` 函数的代码可以在 [anza-xyz/agave](https://github.com/anza-xyz/agave/blob/6c6c26eec4317e06e334609ea686b0192a210092/program-runtime/src/stable_log.rs#L20) 这里找到。 + +````rust +/// Log a program invoke. +/// +/// The general form is: +/// +/// ```notrust +/// "Program
invoke []" +/// ``` +pub fn program_invoke( + log_collector: &Option>>, + program_id: &Pubkey, + invoke_depth: usize, +) { + ic_logger_msg!( + log_collector, + "Program {} invoke [{}]", + program_id, + invoke_depth + ); +} +```` + +接下来的检查可以根据具体的业务场景按部就班的进行。 + +比如,下面检查 solana program 里第一个 `msg!` 打印的内容: + +```javascript +const message = "Program log: " + "Hello, world! GM!GN!"; +assert(transaction.logMessages[1] === message); +``` + +接下来,检查 solana program 里第二个 `msg!` 打印的内容: + +```javascript +assert(transaction.logMessages[1] === message); +assert( + transaction.logMessages[2] === + "Program log: Our program's Program ID: " + PROGRAM_ID +); +``` + +再下来,检查其他日志消息的内容和格式,包括程序的成功消息和消耗的计算单位,并确保日志消息的总数为 `5`。 + +```javascript +assert( + transaction.logMessages[3].startsWith("Program " + PROGRAM_ID + " consumed") +); +assert(transaction.logMessages[4] === "Program " + PROGRAM_ID + " success"); +assert(transaction.logMessages.length == 5); +``` + +至此,一个简单的通过 `NodeJS` 编写的测试就写好了。 + +#### All in one test setup script + +如果你比较懒,可以直接运行以下脚本到 `setup.sh`,并运行 `bash setup.sh`。 + +```bash +# 创建测试目录 +mkdir hello_world_frontend +cd hello_world_frontend + +# 初始化 pnpm 项目 +pnpm init + +# 安装必要的依赖 +pnpm add -D typescript ts-node @types/node chai ts-mocha solana-bankrun +pnpm add @solana/web3.js solana-bankrun + +# 创建 TypeScript 配置文件 +cat > tsconfig.json << EOF +{ + "compilerOptions": { + "target": "es2020", + "module": "commonjs", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "outDir": "./dist", + "rootDir": "./src" + }, + "include": ["src/**/*"], + "exclude": ["node_modules"] +} +EOF + +# 创建源代码目录和测试文件 +mkdir -p tests +cat > tests/hello_world.test.ts << EOF +import { + PublicKey, + Transaction, + TransactionInstruction, + } from "@solana/web3.js"; + import { start } from "solana-bankrun"; + import { describe, test } from "node:test"; + import { assert } from "chai"; + + describe("hello-solana", async () => { + // load program in solana-bankrun + const PROGRAM_ID = PublicKey.unique(); + const context = await start( + [{ name: "hello_world", programId: PROGRAM_ID }], + [], + ); + const client = context.banksClient; + const payer = context.payer; + + test("Say hello!", async () => { + const blockhash = context.lastBlockhash; + // We set up our instruction first. + let ix = new TransactionInstruction({ + // using payer keypair from context to sign the txn + keys: [{ pubkey: payer.publicKey, isSigner: true, isWritable: true }], + programId: PROGRAM_ID, + data: Buffer.alloc(0), // No data + }); + + const tx = new Transaction(); + tx.recentBlockhash = blockhash; + // using payer keypair from context to sign the txn + tx.add(ix).sign(payer); + + // Now we process the transaction + let transaction = await client.processTransaction(tx); + + assert(transaction.logMessages[0].startsWith("Program " + PROGRAM_ID)); + const message = "Program log: " + "Hello, world! GM!GN!"; + console.log("🌈🌈🌈 "); + console.log(transaction.logMessages); + assert(transaction.logMessages[1] === message); + assert( + transaction.logMessages[2] === + "Program log: Our program's Program ID: " + PROGRAM_ID, + ); + assert( + transaction.logMessages[3].startsWith( + "Program " + PROGRAM_ID + " consumed", + ), + ); + assert(transaction.logMessages[4] === "Program " + PROGRAM_ID + " success"); + assert(transaction.logMessages.length == 5); + }); +}); +EOF + +# 更新 package.json 添加测试脚本 +cat > package.json << EOF +{ + "name": "hello_world_frontend", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "pnpm ts-mocha -p ./tsconfig.json -t 1000000 ./tests/hello_world.test.ts" + }, + "keywords": [], + "author": "", + "license": "ISC", + "devDependencies": { + "@types/jest": "^29.5.11", + "@types/node": "^20.10.5", + "chai": "^5.1.2", + "jest": "^29.7.0", + "solana-bankrun": "^0.4.0", + "ts-jest": "^29.1.1", + "ts-mocha": "^10.0.0", + "ts-node": "^10.9.2", + "typescript": "^5.3.3" + }, + "dependencies": { + "@solana/web3.js": "^1.87.6" + } +} + +# 运行测试 +pnpm test +EOF +``` + +# TODO + +这个命令会在 `target/deploy` 目录下生成两个重要文件: + +- `hello_world.so`:编译后的程序文件,这是一个 BPF (Berkeley Packet Filter) 格式的可执行文件 +- `hello_world-keypair.json`:程序的密钥对文件,用于程序的部署和升级 + +构建过程可能需要一些时间,因为它需要: + +1. 下载并编译必要的依赖 +2. 将 Rust 代码编译成 BPF 字节码 +3. 生成程序需要的密钥对 + +如果你看到类似下面的输出,说明构建成功: + +```bash +BPF SDK: /Users/username/.local/share/solana/install/releases/1.14.x/solana-release/bin/sdk/bpf +cargo-build-sbf child: rustup toolchain list -v +cargo-build-sbf child: cargo +bpf build --target bpfel-unknown-unknown --release + Finished release [optimized] target(s) in 0.20s +cargo-build-sbf child: /Users/username/.local/share/solana/install/releases/1.14.x/solana-release/bin/sdk/bpf/scripts/strip.sh /Users/username/projects/hello_world/target/bpfel-unknown-unknown/release/hello_world.so /Users/username/projects/hello_world/target/deploy/hello_world.so +``` + +# 部署程序 + +现在我们可以将编译好的程序部署到 Solana 网络上。在开发阶段,我们通常使用本地测试网(localhost)或开发网(devnet)进行测试。 + +首先确保你的 Solana CLI 配置指向了正确的集群: + +```bash +# 切换到开发网 +solana config set --url devnet + +# 查看当前配置 +solana config get +``` + +然后使用以下命令部署程序: + +```bash +solana program deploy target/deploy/hello_world.so +``` + +部署成功后,你会看到程序的 ID(公钥地址)。请保存这个地址,因为在后续与程序交互时会需要它。 + +# 下一步 + +至此,我们已经完成了一个最基础的 Solana 程序的开发和部署。虽然这个程序只是简单地打印 "Hello, world!",但它包含了 Solana 程序开发的基本要素: + +- 程序入口点的定义 +- 基本的参数结构 +- 构建和部署流程 + +在接下来的章节中,我们将学习: + +- 如何处理账户数据 +- 如何实现更复杂的指令逻辑 +- 如何进行程序测试 +- 如何确保程序安全性 + +# Refs + +关于 cargo-build-sbf 解释 +https://github.com/solana-labs/solana/issues/34987#issuecomment-1913538260 + +https://solana.stackexchange.com/questions/16443/error-function-stack-offset-of-7256-exceeded-max-offset-of-4096-by-3160-bytes + +安装 solana cli tool suites(注意不要安装 edge 版本,会发现部署不成功问题) +https://solana.com/docs/intro/installation + +https://github.com/solana-labs/solana/issues/34987#issuecomment-1914665002 +https://github.com/anza-xyz/agave/issues/1572 + +在 solana 编写一个 helloworld +https://solana.com/developers/guides/getstarted/local-rust-hello-world#create-a-new-rust-library-with-cargo diff --git "a/src/solana/quickstart/\344\275\277\347\224\250TypeScript\345\210\233\345\273\272\350\264\246\346\210\267.md" "b/src/solana/quickstart/\344\275\277\347\224\250TypeScript\345\210\233\345\273\272\350\264\246\346\210\267.md" new file mode 100644 index 0000000..2198cb6 --- /dev/null +++ "b/src/solana/quickstart/\344\275\277\347\224\250TypeScript\345\210\233\345\273\272\350\264\246\346\210\267.md" @@ -0,0 +1 @@ +# 使用 TypeScript 创建账户 diff --git a/src/terraform/gitlab/start-gitlab-using-terraform.md b/src/terraform/gitlab/start-gitlab-using-terraform.md new file mode 100644 index 0000000..8eb8913 --- /dev/null +++ b/src/terraform/gitlab/start-gitlab-using-terraform.md @@ -0,0 +1,890 @@ +# Start gitlab using terraform + +- [aws](#aws) + - [aws provider](#aws-provider) + - [vpc](#vpc) +- [ec2 instance](#ec2-instance) +- [security group](#security-group) +- [ebs volume](#ebs-volume) +- [eip](#eip) +- [gitlab setup](#gitlab-setup) +- [GitLab clone](#gitlab-clone) +- [GitLab administration](#gitlab-administration) + +如何在 `AWS` 中快速部署一台 `GitLab` 服务?利用 `terraform`,我们可以自由调配云服务的资源,并快速将 `GitLab` 部署到 `AWS` 中。 + +# aws + +## aws provider + +在我们使用 `terraform` 创建云服务器时,我们需要指定 provider。 + +首先,我们定义 aws provider。 + +aws provider 是 `terraform` 的一个特殊的资源类型,它提供了一个简单的 API,用于访问 `AWS` 中的服务。 + +EN: Before we can use terraform to deploy our application, we need to install the aws provider. + +以下配置是 aws provider 的配置: + +```hcl +provider "aws" { + region = "cn-northwest-1" +} +``` + +我们指定了 `AWS` 的 `region` 为 `cn-northwest-1`,这个 `region` 就是我们的云服务器所在的地区。 + +## vpc + +运行 `GitLab` 的 EC2 放在哪里呢?一般来讲,服务器需要一个 `VPC`,这个 `VPC` 就是我们的云服务器所在的网络,而我们的 EC2 instance 就是在这个 `VPC` 中的一个虚拟机。 + +一般我们会利用现有的 vpc 而不是新建 vpc,从 AWS console 中查询以下 vpc 的 id,我们把它放到名为 vpc 的 variable 中。 + +EN: As we have already have a vpc, we can use it to deploy our application. Here we use variable to define the vpc id. + +```hcl +variable "vpc" { + type = string + default = "vpc-0f0f0f0f0f0f0f0f" + description = "The VPC ID of " +} +``` + +# ec2 instance + +在确定了 `VPC` 的 id 之后,我们就可以创建 EC2 instance 了。 + +我们使用 `aws_instance` 资源来创建 EC2 instance,并且指定了一个名为 `gitlab` 的实例。 + +此外,我们还指定了一个实例的类型,这个类型是 `m5a.large`,这个类型是 `AWS` 中的一个预定义的类型,我们可以在 AWS console 中查询到。 + +`ami` 为版本为 Ubuntu 20.04 的镜像,`key_name` 为登录 EC2 的 key,`root_block_device` 为系统盘,我们此时分配了 `40G` 的磁盘空间,`subnet_id` 为 vpc 下的一个子网,它也是预先就创建好的。 + +`vpc_security_group_ids` 为我们创建 EC2 所需的安全组,下面我们会讲它开启了哪些规则。 + +EN: We can use the ec2 resource to deploy our application. We use `aws_instance` resource and specify the vpc id, subnet id, instance type(`m5a.large` ), key_name(`gitlab`), Ubuntu 20.04 server(`ami-ffff111db56e65f8d`), 40GiB root block size volume. + +```hcl +resource "aws_instance" "gitlab" { + # Ubuntu Server 20.04 LTS (HVM), SSD Volume Type - ami-ffff111db56e65f8d (64-bit x86) / ami-0429c857c8db3027a (64-bit Arm) + ami = "ami-ffff111db56e65f8d" + instance_type = "m5a.large" + key_name = "gitlab" + + root_block_device { + volume_size = "40" + volume_type = "gp3" + } + + # (subnet-public1-cn-north-1a) + subnet_id = "subnet-2222333344445555" + vpc_security_group_ids = ["${aws_security_group.gitlab.id}"] + # associate_public_ip_address = true + tags = { + Name = "gitlab" + } +} +``` + +在 EC2 创建好之后,我们可以这样登录: + +EN: After provisioning, you can access the GitLab instance. + +``` +ssh -i ~/.ssh/gitlab.pem +``` + +# security group + +EC2 实例需要一个安全组,用来控制它的 `ingress` 规则和 `egress` 规则。可以把 security group 看成是防火墙。 + +通过配置文件可知,我们允许 ssh 登录 EC2,允许访问 `GitLab` http 的端口 80 和 https 的端口 443。 + +对于出口的规则,我们不做任何限制,可以访问任何地方。 + +```hcl +resource "aws_security_group" "gitlab" { + description = "Security group for gitlab" + vpc_id = var.vpc + + egress { + cidr_blocks = ["0.0.0.0/0"] + description = "Allow all outbound traffic" + from_port = 0 + protocol = "-1" + self = false + to_port = 0 + } + + ingress { + cidr_blocks = ["0.0.0.0/0"] + description = "ssh" + from_port = 22 + protocol = "tcp" + self = false + to_port = 22 + } + + ingress { + cidr_blocks = ["0.0.0.0/0"] + description = "allow public access http" + from_port = 80 + protocol = "tcp" + self = false + to_port = 80 + } + + ingress { + cidr_blocks = ["0.0.0.0/0"] + description = "allow public access https" + from_port = 443 + protocol = "tcp" + self = false + to_port = 443 + } + + name = "gitlab" + revoke_rules_on_delete = false + tags = { + "Name" = "gitlab" + } + tags_all = { + "Name" = "gitlab" + } + + timeouts {} +} +``` + +# ebs volume + +为了存储 git repository,我们可以利用 `aws_ebs_volume` 给 EC2 实例分配一个 EBS 磁盘,并且指定它的大小为 `40GiB`,再通过 `aws_volume_attachment` 将 EBS 磁盘与 EC2 实例进行绑定。 + +```hcl +resource "aws_ebs_volume" "gitlab" { + availability_zone = "cn-north-1a" + size = 40 + type = "gp3" + + tags = { + Name = "gitlab" + } +} + +resource "aws_volume_attachment" "ebs_attachment_gitlab" { + device_name = "/dev/sdh" + volume_id = aws_ebs_volume.gitlab.id + instance_id = aws_instance.gitlab.id +} +``` + +好了,就这样就差不都了,我们来执行一下 `terraform plan` 命令,看看有没有错误。 + +下面是添加了 `aws_ebs_volume` 和 `aws_volume_attachment` 之后执行 `terraform plan` 的结果: + +``` +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # aws_ebs_volume.gitlab will be created + + resource "aws_ebs_volume" "gitlab" { + + arn = (known after apply) + + availability_zone = "cn-north-1a" + + encrypted = (known after apply) + + id = (known after apply) + + iops = (known after apply) + + kms_key_id = (known after apply) + + size = 40 + + snapshot_id = (known after apply) + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + throughput = (known after apply) + + type = "gp3" + } + + # aws_instance.gitlab will be created + + resource "aws_instance" "gitlab" { + + ami = "ami-ffff111db56e65f8d" + + arn = (known after apply) + + associate_public_ip_address = (known after apply) + + availability_zone = (known after apply) + + cpu_core_count = (known after apply) + + cpu_threads_per_core = (known after apply) + + disable_api_termination = (known after apply) + + ebs_optimized = (known after apply) + + get_password_data = false + + host_id = (known after apply) + + id = (known after apply) + + instance_initiated_shutdown_behavior = (known after apply) + + instance_state = (known after apply) + + instance_type = "m5a.large" + + ipv6_address_count = (known after apply) + + ipv6_addresses = (known after apply) + + key_name = "gitlab" + + monitoring = (known after apply) + + outpost_arn = (known after apply) + + password_data = (known after apply) + + placement_group = (known after apply) + + placement_partition_number = (known after apply) + + primary_network_interface_id = (known after apply) + + private_dns = (known after apply) + + private_ip = (known after apply) + + public_dns = (known after apply) + + public_ip = (known after apply) + + secondary_private_ips = (known after apply) + + security_groups = (known after apply) + + source_dest_check = true + + subnet_id = "subnet-88ff88ff88ff" + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + tenancy = (known after apply) + + user_data = (known after apply) + + user_data_base64 = (known after apply) + + user_data_replace_on_change = false + + vpc_security_group_ids = (known after apply) + + + capacity_reservation_specification { + + capacity_reservation_preference = (known after apply) + + + capacity_reservation_target { + + capacity_reservation_id = (known after apply) + } + } + + + ebs_block_device { + + delete_on_termination = (known after apply) + + device_name = (known after apply) + + encrypted = (known after apply) + + iops = (known after apply) + + kms_key_id = (known after apply) + + snapshot_id = (known after apply) + + tags = (known after apply) + + throughput = (known after apply) + + volume_id = (known after apply) + + volume_size = (known after apply) + + volume_type = (known after apply) + } + + + enclave_options { + + enabled = (known after apply) + } + + + ephemeral_block_device { + + device_name = (known after apply) + + no_device = (known after apply) + + virtual_name = (known after apply) + } + + + metadata_options { + + http_endpoint = (known after apply) + + http_put_response_hop_limit = (known after apply) + + http_tokens = (known after apply) + + instance_metadata_tags = (known after apply) + } + + + network_interface { + + delete_on_termination = (known after apply) + + device_index = (known after apply) + + network_interface_id = (known after apply) + } + + + root_block_device { + + delete_on_termination = true + + device_name = (known after apply) + + encrypted = (known after apply) + + iops = (known after apply) + + kms_key_id = (known after apply) + + throughput = (known after apply) + + volume_id = (known after apply) + + volume_size = 60 + + volume_type = "standard" + } + } + + # aws_security_group.gitlab will be created + + resource "aws_security_group" "gitlab" { + + arn = (known after apply) + + description = "Security group for gitlab" + + egress = [ + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "Allow all outbound traffic" + + from_port = 0 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "-1" + + security_groups = [] + + self = false + + to_port = 0 + }, + ] + + id = (known after apply) + + ingress = [ + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "allow public access http" + + from_port = 80 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 80 + }, + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "allow public access https" + + from_port = 443 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 443 + }, + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "test" + + from_port = 22 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 22 + }, + + { + + cidr_blocks = [ + + "10.0.0.0/16", + ] + + description = "allow ssh" + + from_port = 22 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 22 + }, + ] + + name = "gitlab" + + name_prefix = (known after apply) + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + vpc_id = "vpc-0afafafafaf" + + + timeouts {} + } + + # aws_volume_attachment.ebs_attachment_gitlab will be created + + resource "aws_volume_attachment" "ebs_attachment_gitlab" { + + device_name = "/dev/sdh" + + id = (known after apply) + + instance_id = (known after apply) + + volume_id = (known after apply) + } + +Plan: 4 to add, 0 to change, 0 to destroy. + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +以上显示,terraform 会创建以下 4 种资源: + +- `aws_instance.gitlab` +- `aws_volume.gitlab` +- `aws_volume_attachment.ebs_attachment_gitlab` +- `aws_security_group.gitlab` + +下面执行 `terraform apply` 命令,4 个资源都会被创建,并且资源的属性都会被设置为预期的值。 + +``` + Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + + Terraform will perform the following actions: + + # aws_ebs_volume.gitlab will be created + + resource "aws_ebs_volume" "gitlab" { + + arn = (known after apply) + + availability_zone = "cn-north-1a" + + encrypted = (known after apply) + + id = (known after apply) + + iops = (known after apply) + + kms_key_id = (known after apply) + + size = 40 + + snapshot_id = (known after apply) + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + throughput = (known after apply) + + type = "gp3" + } + + # aws_instance.gitlab will be created + + resource "aws_instance" "gitlab" { + + ami = "ami-ffff111db56e65f8d" + + arn = (known after apply) + + associate_public_ip_address = (known after apply) + + availability_zone = (known after apply) + + cpu_core_count = (known after apply) + + cpu_threads_per_core = (known after apply) + + disable_api_termination = (known after apply) + + ebs_optimized = (known after apply) + + get_password_data = false + + host_id = (known after apply) + + id = (known after apply) + + instance_initiated_shutdown_behavior = (known after apply) + + instance_state = (known after apply) + + instance_type = "m5a.large" + + ipv6_address_count = (known after apply) + + ipv6_addresses = (known after apply) + + key_name = "gitlab" + + monitoring = (known after apply) + + outpost_arn = (known after apply) + + password_data = (known after apply) + + placement_group = (known after apply) + + placement_partition_number = (known after apply) + + primary_network_interface_id = (known after apply) + + private_dns = (known after apply) + + private_ip = (known after apply) + + public_dns = (known after apply) + + public_ip = (known after apply) + + secondary_private_ips = (known after apply) + + security_groups = (known after apply) + + source_dest_check = true + + subnet_id = "subnet-069a0f9b9c9f9f9f9" + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + tenancy = (known after apply) + + user_data = (known after apply) + + user_data_base64 = (known after apply) + + user_data_replace_on_change = false + + vpc_security_group_ids = (known after apply) + + + capacity_reservation_specification { + + capacity_reservation_preference = (known after apply) + + + capacity_reservation_target { + + capacity_reservation_id = (known after apply) + } + } + + + ebs_block_device { + + delete_on_termination = (known after apply) + + device_name = (known after apply) + + encrypted = (known after apply) + + iops = (known after apply) + + kms_key_id = (known after apply) + + snapshot_id = (known after apply) + + tags = (known after apply) + + throughput = (known after apply) + + volume_id = (known after apply) + + volume_size = (known after apply) + + volume_type = (known after apply) + } + + + enclave_options { + + enabled = (known after apply) + } + + + ephemeral_block_device { + + device_name = (known after apply) + + no_device = (known after apply) + + virtual_name = (known after apply) + } + + + metadata_options { + + http_endpoint = (known after apply) + + http_put_response_hop_limit = (known after apply) + + http_tokens = (known after apply) + + instance_metadata_tags = (known after apply) + } + + + network_interface { + + delete_on_termination = (known after apply) + + device_index = (known after apply) + + network_interface_id = (known after apply) + } + + + root_block_device { + + delete_on_termination = true + + device_name = (known after apply) + + encrypted = (known after apply) + + iops = (known after apply) + + kms_key_id = (known after apply) + + throughput = (known after apply) + + volume_id = (known after apply) + + volume_size = 60 + + volume_type = "standard" + } + } + + # aws_security_group.gitlab will be created + + resource "aws_security_group" "gitlab" { + + arn = (known after apply) + + description = "Security group for gitlab" + + egress = [ + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "Allow all outbound traffic" + + from_port = 0 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "-1" + + security_groups = [] + + self = false + + to_port = 0 + }, + ] + + id = (known after apply) + + ingress = [ + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "allow public access http" + + from_port = 80 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 80 + }, + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "allow public access https" + + from_port = 443 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 443 + }, + + { + + cidr_blocks = [ + + "0.0.0.0/0", + ] + + description = "test" + + from_port = 22 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 22 + }, + + { + + cidr_blocks = [ + + "10.0.0.0/16", + ] + + description = "allow ssh" + + from_port = 22 + + ipv6_cidr_blocks = [] + + prefix_list_ids = [] + + protocol = "tcp" + + security_groups = [] + + self = false + + to_port = 22 + }, + ] + + name = "gitlab" + + name_prefix = (known after apply) + + owner_id = (known after apply) + + revoke_rules_on_delete = false + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + vpc_id = "vpc-02fefefefefe" + + + timeouts {} + } + + # aws_volume_attachment.ebs_attachment_gitlab will be created + + resource "aws_volume_attachment" "ebs_attachment_gitlab" { + + device_name = "/dev/sdh" + + id = (known after apply) + + instance_id = (known after apply) + + volume_id = (known after apply) + } + + Plan: 4 to add, 0 to change, 0 to destroy. + + Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes +``` + +输入 yes 之后,我们的 4 种资源都会被创建好了。 + +``` +aws_instance.gitlab: Creating... +aws_instance.gitlab: Still creating... [10s elapsed] +aws_instance.gitlab: Creation complete after 13s [id=i-0afefefefe] +aws_volume_attachment.ebs_attachment_gitlab: Creating... +aws_volume_attachment.ebs_attachment_gitlab: Still creating... [10s elapsed] +aws_volume_attachment.ebs_attachment_gitlab: Still creating... [20s elapsed] +aws_volume_attachment.ebs_attachment_gitlab: Creation complete after 21s [id=vai-28fefefe] + +Apply complete! Resources: 4 added, 0 changed, 0 destroyed. +``` + +# eip + +下面我们给 gitlab 机器分配一个 `EIP`,它是一个 public ip 地址,我们可以通过这个 `EIP` 来访问 gitlab 机器。 + +EN: Add public ip for ec2 instance + +```hcl +resource "aws_eip" "gitlab" { + vpc = true + tags = { + Name = "gitlab" + } +} + +resource "aws_eip_association" "eip_association_gitlab" { + instance_id = aws_instance.gitlab.id + allocation_id = aws_eip.gitlab.id +} +``` + +执行 `terraform plan` 后,我们会看到下面的输出: + +``` +# aws_eip.gitlab will be created + + resource "aws_eip" "gitlab" { + + allocation_id = (known after apply) + + association_id = (known after apply) + + carrier_ip = (known after apply) + + customer_owned_ip = (known after apply) + + domain = (known after apply) + + id = (known after apply) + + instance = (known after apply) + + network_border_group = (known after apply) + + network_interface = (known after apply) + + private_dns = (known after apply) + + private_ip = (known after apply) + + public_dns = (known after apply) + + public_ip = (known after apply) + + public_ipv4_pool = (known after apply) + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + vpc = true + } + + # aws_eip_association.eip_association_gitlab will be created + + resource "aws_eip_association" "eip_association_gitlab" { + + allocation_id = (known after apply) + + id = (known after apply) + + instance_id = (known after apply) + + network_interface_id = (known after apply) + + private_ip_address = (known after apply) + + public_ip = (known after apply) + } +``` + +它会创建两个资源:一个 `EIP` 和一个 `EIP` 关联,这样,我们的 gitlab 机器就可以通过这个 `EIP` 访问了。 + +``` +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # aws_eip.gitlab will be created + + resource "aws_eip" "gitlab" { + + allocation_id = (known after apply) + + association_id = (known after apply) + + carrier_ip = (known after apply) + + customer_owned_ip = (known after apply) + + domain = (known after apply) + + id = (known after apply) + + instance = (known after apply) + + network_border_group = (known after apply) + + network_interface = (known after apply) + + private_dns = (known after apply) + + private_ip = (known after apply) + + public_dns = (known after apply) + + public_ip = (known after apply) + + public_ipv4_pool = (known after apply) + + tags = { + + "Name" = "gitlab" + } + + tags_all = { + + "Name" = "gitlab" + } + + vpc = true + } + + # aws_eip_association.eip_association_gitlab will be created + + resource "aws_eip_association" "eip_association_gitlab" { + + allocation_id = (known after apply) + + id = (known after apply) + + instance_id = "i-0afefefefe" + + network_interface_id = (known after apply) + + private_ip_address = (known after apply) + + public_ip = (known after apply) + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +aws_eip.gitlab: Creating... +aws_eip.gitlab: Creation complete after 0s [id=eipalloc-ppp-kkk-0xff0xff] +aws_eip_association.eip_association_gitlab: Creating... +aws_eip_association.eip_association_gitlab: Creation complete after 1s [id=eipassoc-kukukulala] + +Apply complete! Resources: 2 added, 0 changed, 0 destroyed. +``` + +# gitlab setup + +在结束了基础设施的创建之后,我们需要安装及设置 gitlab 软件本身。 + +我们使用 https://github.com/sameersbn/docker-gitlab 这个作为 `GitLab` 的镜像来安装 gitlab。 + +准备 `docker-compose.yml` 文件,运行 `docker-compose up -d` 命令,这样我们就可以看到 gitlab 在运行了。 + +```yaml +version: "2.3" + +services: + redis: + restart: always + image: redis:6.2.6 + command: + - --loglevel warning + volumes: + - ./redis-data:/data:Z + + postgresql: + restart: always + image: sameersbn/postgresql:12-20200524 + volumes: + - ./postgresql-data:/var/lib/postgresql:Z + environment: + - DB_USER=gitlab + - DB_PASS=password + - DB_NAME=gitlabhq_production + - DB_EXTENSION=pg_trgm,btree_gist + + gitlab: + restart: always + image: sameersbn/gitlab:14.9.2 + depends_on: + - redis + - postgresql + ports: + - "10080:80" + # - "443:443" + - "10022:22" + volumes: + - ./gitlab-data:/home/git/data:Z + healthcheck: + test: ["CMD", "/usr/local/sbin/healthcheck"] + interval: 5m + timeout: 10s + retries: 3 + start_period: 5m + environment: + - DEBUG=true + + - DB_ADAPTER=postgresql + - DB_HOST=postgresql + - DB_PORT=5432 + - DB_USER=gitlab + - DB_PASS=password + - DB_NAME=gitlabhq_production + + - REDIS_HOST=redis + - REDIS_PORT=6379 + + - GITLAB_HTTPS=false + #- SSL_SELF_SIGNED=true + + - GITLAB_HOST= + #- GITLAB_PORT=443 + - GITLAB_SSH_PORT=10022 + - GITLAB_RELATIVE_URL_ROOT= + - GITLAB_SECRETS_DB_KEY_BASE=FF11111 + - GITLAB_SECRETS_SECRET_KEY_BASE=FF22222 + - GITLAB_SECRETS_OTP_KEY_BASE=FF33333 + +volumes: + redis-data: + postgresql-data: + gitlab-data: +``` + +# GitLab clone + +因为我们在 `docker-compose.yml` 中指定了 `10022:22` 这样的端口映射,在克隆代码时,所以我们需要指定端口信息: + +```shell +git clone ssh://git@:10022//.git +``` + +# GitLab administration + +`gitlab` 运行之后,我们可以对 `gitlab` 进行管理以增加其安全性,比如: + +- 开启两步认证 `MFA` +- 禁用注册功能,只允许用户通过邮箱登录,取消勾选 `Menu > Admin > Settings > General > Sign-up restrictions > Sign-up enabled` +- 进入 `Menu > Admin > Settings > General > Visibility and access controls section`,限制 `Restricted visibility levels` 为 `public`,这样可以限制只有登录用户才能查看 user profile +- 取消用户注册,设置 `Sign-up restrictions` 为 `disabled` +- 限制两步认证才能登录,勾选 `Sign-in restrictions > Two-factor authentication > Enforce two-factor authentication` + +好了,说到这里 `GitLab` 已经初步搭建完成,下面就可以自由的写 bug 啦。 diff --git a/src/terraform/import/review-terraform-import.md b/src/terraform/import/review-terraform-import.md new file mode 100644 index 0000000..f772d9b --- /dev/null +++ b/src/terraform/import/review-terraform-import.md @@ -0,0 +1,267 @@ +# Review terraform import + +首先看一下 `terraform import` 的参数: + + The import command expects two arguments. + Usage: terraform [global options] import [options] ADDR ID + + Import existing infrastructure into your Terraform state. + + This will find and import the specified resource into your Terraform + state, allowing existing infrastructure to come under Terraform + management without having to be initially created by Terraform. + + The ADDR specified is the address to import the resource to. Please + see the documentation online for resource addresses. The ID is a + resource-specific ID to identify that resource being imported. Please + reference the documentation for the resource type you're importing to + determine the ID syntax to use. It typically matches directly to the ID + that the provider uses. + + The current implementation of Terraform import can only import resources + into the state. It does not generate configuration. A future version of + Terraform will also generate configuration. + + Because of this, prior to running terraform import it is necessary to write + a resource configuration block for the resource manually, to which the + imported object will be attached. + + This command will not modify your infrastructure, but it will make + network requests to inspect parts of your infrastructure relevant to + the resource being imported. + + Options: + + -config=path Path to a directory of Terraform configuration files + to use to configure the provider. Defaults to pwd. + If no config files are present, they must be provided + via the input prompts or env vars. + + -allow-missing-config Allow import when no resource configuration block exists. + + -input=false Disable interactive input prompts. + + -lock=false Don't hold a state lock during the operation. This is + dangerous if others might concurrently run commands + against the same workspace. + + -lock-timeout=0s Duration to retry a state lock. + + -no-color If specified, output won't contain any color. + + -var 'foo=bar' Set a variable in the Terraform configuration. This + flag can be set multiple times. This is only useful + with the "-config" flag. + + -var-file=foo Set variables in the Terraform configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. + + -ignore-remote-version A rare option used for the remote backend only. See + the remote backend documentation for more information. + + -state, state-out, and -backup are legacy options supported for the local + backend only. For more information, see the local backend's documentation. + +运行 `terraform import`,导入安全组(security group): + + terraform import aws_security_group.gitlab sg-f2f2f2f2f2f2f2f2 + + Error: resource address "aws_security_group.gitlab" does not exist in the configuration. + + Before importing this resource, please create its configuration in the root module. For example: + + resource "aws_security_group" "gitlab" { + # (resource arguments) + } + +在导入资源的时候,提示错误,我们必须首先创建资源的配置,然后再导入资源。 + +在 `main.tf` 文件里添加以上资源 `resource "aws_security_group" "gitlab" {}`,再次运行 `terraform import` 命令: + + terraform import aws_security_group.gitlab sg-f2f2f2f2f2f2f2f2 + +输出: + + aws_security_group.gitlab: Importing from ID "sg-f2f2f2f2f2f2f2f2"... + aws_security_group.gitlab: Import prepared! + Prepared aws_security_group for import + aws_security_group.gitlab: Refreshing state... [id=sg-f2f2f2f2f2f2f2f2] + + + Import successful! + + The resources that were imported are shown above. These resources are now in + your Terraform state and will henceforth be managed by Terraform. + +import 之后,运行 `terraform plan` 来检查状态: + + aws_security_group.gitlab: Refreshing state... [id=sg-f2f2f2f2f2f2f2f2] + + Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following + symbols: + -/+ destroy and then create replacement + + Terraform will perform the following actions: + + # aws_security_group.gitlab must be replaced + -/+ resource "aws_security_group" "gitlab" { + ~ arn = "arn:aws:ec2:us-east-1:81818181888:security-group/sg-f2f2f2f2f2f2f2f2" -> (known after app + ly) + ~ description = "Security group for gitlab" -> "Managed by Terraform" # forces replacement + ~ egress = [ + - { + - cidr_blocks = [ + - "0.0.0.0/0", + ] + - description = "" + - from_port = 0 + - ipv6_cidr_blocks = [] + - prefix_list_ids = [] + - protocol = "-1" + - security_groups = [] + - self = false + - to_port = 0 + }, + ] -> (known after apply) + ~ id = "sg-f2f2f2f2f2f2f2f2" -> (known after apply) + ~ ingress = [ + - { + - cidr_blocks = [ + - "55.188.0.0/16", + ] + - description = "" + - from_port = 22 + - ipv6_cidr_blocks = [] + - prefix_list_ids = [] + - protocol = "tcp" + - security_groups = [] + - self = false + - to_port = 22 + }, + - { + - cidr_blocks = [] + - description = "r2r2-v2" + - from_port = 10080 + - ipv6_cidr_blocks = [] + - prefix_list_ids = [] + - protocol = "tcp" + - security_groups = [ + - "sg-0x1231238888", + ] + - self = false + - to_port = 10080 + }, + ] -> (known after apply) + ~ name = "gitlab" -> (known after apply) + + name_prefix = (known after apply) + ~ owner_id = "81818181888" -> (known after apply) + + revoke_rules_on_delete = false + - tags = { + - "Name" = "gitlab" + } -> null + ~ tags_all = { + - "Name" = "gitlab" + } -> (known after apply) + ~ vpc_id = "vpc-0102030401020304" -> (known after apply) + + - timeouts {} + } + + Plan: 1 to add, 0 to change, 1 to destroy. + + Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run + "terraform apply" now. + +可以看到,我们导入的资源没有在 `main.tf` 配置文件中,如果现在我们运行 `terraform apply` 则会删除这些资源。 + +为了让 `terraform.tfstate` 和 cloud provider 的配置一致,需要手动修改 tf 文件,添加对 import 资源的配置,添加的方法,基本上将 `terraform plan` 的输出(注意去掉 `-` `~` 这些)拷贝到对应的 resource 中,并做一些调整。对于 security group,添加资源配置的方法如下: + + resource "aws_security_group" "gitlab" { + # arn = "arn:aws:ec2:us-east-1:81818181888:security-group/sg-f2f2f2f2f2f2f2f2" -> (known after apply) + description = "Security group for gitlab" + egress = [ + { + cidr_blocks = [ + "0.0.0.0/0", + ] + description = "" + from_port = 0 + ipv6_cidr_blocks = [] + prefix_list_ids = [] + protocol = "-1" + security_groups = [] + self = false + to_port = 0 + }, + ] + # id = "sg-f2f2f2f2f2f2f2f2" -> (known after apply) + ingress = [ + { + cidr_blocks = [ + "55.188.0.0/16", + ] + description = "" + from_port = 22 + ipv6_cidr_blocks = [] + prefix_list_ids = [] + protocol = "tcp" + security_groups = [] + self = false + to_port = 22 + }, + { + cidr_blocks = [] + description = "r2r2-v2" + from_port = 10080 + ipv6_cidr_blocks = [] + prefix_list_ids = [] + protocol = "tcp" + security_groups = [ + "sg-0x1231238888", + ] + self = false + to_port = 10080 + }, + ] + name = "gitlab" + # owner_id = "81818181888" -> (known after apply) + revoke_rules_on_delete = false + tags = { + "Name" = "gitlab" + } + tags_all = { + "Name" = "gitlab" + } + vpc_id = "vpc-0102030401020304" + + timeouts {} + } + +首先 `terraform init`: + + Initializing the backend... + + Initializing provider plugins... + - Finding latest version of hashicorp/aws... + - Installing hashicorp/aws v4.1.0... + - Installed hashicorp/aws v4.1.0 (signed by HashiCorp) + + Terraform has created a lock file .terraform.lock.hcl to record the provider + selections it made above. Include this file in your version control repository + so that Terraform can guarantee to make the same selections by default when + you run "terraform init" in the future. + + Terraform has been successfully initialized! + + You may now begin working with Terraform. Try running "terraform plan" to see + any changes that are required for your infrastructure. All Terraform commands + should now work. + + If you ever set or change modules or backend configuration for Terraform, + rerun this command to reinitialize your working directory. If you forget, other + commands will detect it and remind you to do so if necessary. + +# refs + +https://www.terraform.io/cli/import/usage diff --git a/src/terraform/import/terraform-import-dms-replication-instance-blog.md b/src/terraform/import/terraform-import-dms-replication-instance-blog.md new file mode 100644 index 0000000..3e39f8c --- /dev/null +++ b/src/terraform/import/terraform-import-dms-replication-instance-blog.md @@ -0,0 +1,618 @@ +# Manage AWS DMS resource using terraform + +我们如何管理云服务中现有的资源呢?此时需要 `terraform import` 来帮忙。 + +我们拿 AWS DMS(Data Migration Service)来举例。 + +一般来讲,DMS 会有复制实例(Replication Instance)用来执行数据的迁移,下面我们来通过 terraform 来管理一个现有的复制实例。 + +从 AWS console 中查询需要管理的复制实例的 ARN:`arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL`,因为 `import` 需要用到这个资源 ID。 + +下面开始 import。 + +首先,给 `terraform` 来个 `alias`: + +```bash +alias t=terraform +``` + +其次,运行 terraform import: + +```bash +t import aws_dms_replication_instance.feature arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL +``` + +输出: + +``` +aws_dms_replication_instance.feature: Importing from ID "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL"... +aws_dms_replication_instance.feature: Import prepared! +Prepared aws_dms_replication_instance for import +aws_dms_replication_instance.feature: Refreshing state... [id=arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL] +╷ +│ Error: error describing DMS Replication Instance (arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL): InvalidParameterValueException: The parameter value arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL is not valid for argument Filter: replication-instance-id due to its length 86 exceeds 63. +│ status code: 400, request id: a6aaaaa3-1aaf-4aa6-aaa3-1aaaaaaaaaa5 +│ +│ + +Not set region correctly. +``` + +什么?竟然出错了: `replication-instance-id due to its length 86 exceeds 63`: + +``` +│ Error: error describing DMS Replication Instance (arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL): InvalidParameterValueException: The parameter value arn:aws-cn:dms:cn-northwest-1:501502503504:rep:DOYOUTHINKTERRAFORMISAGOODTOOL is not valid for argument Filter: replication-instance-id due to its length 86 exceeds 63. +``` + +这个长度超过 63 的错是什么意思? + +经过 google 半小时的查询,终于恍然大悟,原来 import 的参数不是 ARN,而是一个 ID。那这个 ID 在 AWS 里就是资源的 identifier。 + +于是修改 import 参数,传递 DMS 复制实例的 identifier,再次运行 `terraform import`。 + +``` +t import aws_dms_replication_instance.feature featrue-dms-test +``` + +发现还是出错了: + +``` +aws_dms_replication_instance.feature: Importing from ID "featrue-dms-test"... +aws_dms_replication_instance.feature: Import prepared! + Prepared aws_dms_replication_instance for import +aws_dms_replication_instance.feature: Refreshing state... [id=featrue-dms-test] +╷ +│ Error: Cannot import non-existent remote object +│ +│ While attempting to import an existing object to "aws_dms_replication_instance.feature", the provider detected that no object +│ exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with +│ the provider's configured region or endpoint, or use "terraform apply" to create a new remote object for this resource. + ╵ +``` + +这次不是报长度的错了,而是 `the provider detected that no object exists with the given id`。 + +这个错误又是什么意思呢? + +再经历了又半小时的 google 之后,发现可能是没有设置正确的 region 导致的,于是讲 AWS_DEFAULT_REGION 设置成正确的值之后,运行 import: + +``` +set_up_aws_profile_sandbox_and_region +t import aws_dms_replication_instance.feature featrue-dms-test +``` + +输出: + +``` +aws_dms_replication_instance.feature: Importing from ID "featrue-dms-test"... +aws_dms_replication_instance.feature: Import prepared! + Prepared aws_dms_replication_instance for import +aws_dms_replication_instance.feature: Refreshing state... [id=featrue-dms-test] + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` + +Congratulations! 终于成功了! + +下面看看我们有什么: + + ls -lh + .rw-r--r-- 57 William Shakespear 27 Jun 13:47 main.tf + .rw-r--r-- 2.1k William Shakespear 30 Jun 10:27 terraform.tfstate + +我们有一个 `main.tf` 的配置文件和 `terraform.tfstate` 的状态文件。于是我们可以运行 `terraform plan` 和 `terraform apply` 来创建或修改基础设施的变更了。值得注意的是,在执行 `terraform apply` 之前,会创建一个 `plan` ,这个 `plan` 是本地状态和云服务之间的差异,而 `apply` 则会将本地配置同步到云服务(此处为 AWS)。 + +下面我们运行 `terraform plan`。 + +就像 `terraform plan --help` 命令所说的: + +> Generates a speculative execution plan, showing what actions Terraform +> would take to apply the current configuration. This command will not +> actually perform the planned actions. + +> You can optionally save the plan to a file, which you can then pass to +> the "apply" command to perform exactly the actions described in the plan. + +我们运行 `terraform plan`: + +``` +╷ +│ Error: Missing required argument +│ +│ on main.tf line 2, in resource "aws_dms_replication_instance" "feature": +│ 2: resource "aws_dms_replication_instance" "feature" { +│ +│ The argument "replication_instance_id" is required, but no definition was found. +╵ +╷ +│ Error: Missing required argument +│ +│ on main.tf line 2, in resource "aws_dms_replication_instance" "feature": +│ 2: resource "aws_dms_replication_instance" "feature" { +│ +│ The argument "replication_instance_class" is required, but no definition was found. +``` + +出错啦!执行 `terraform plan` 提示了两个错: `replication_instance_class` 和 `replication_instance_id` 是必须的,我们没有提供。当然啦,我们的 `main.tf` 还是空空的状态。 + +> There's error when executing `terraform plan`, it shows `"replication_instance_id" is required, but no definition was found`. What is `replication_instance_id` anyway? + +```hcl +resource "aws_dms_replication_instance" "feature" { +} +``` + +在调查了 terraform 文档之后,我们补齐这两个必须参数。 + +```hcl +resource "aws_dms_replication_instance" "feature" { + replication_instance_id = "feature-dms-test" + replication_instance_class = "dms.r5.xlarge" +} +``` + +执行 `terraform plan`: + +``` +aws_dms_replication_instance.feature: Refreshing state... [id=featrue-dms-test] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: +-/+ destroy and then create replacement + +Terraform will perform the following actions: + + # aws_dms_replication_instance.feature must be replaced +-/+ resource "aws_dms_replication_instance" "feature" { + ~ allocated_storage = 500 -> (known after apply) + ~ auto_minor_version_upgrade = true -> (known after apply) + ~ availability_zone = "cn-northwest-1c" -> (known after apply) + ~ engine_version = "3.4.6" -> (known after apply) + ~ id = "featrue-dms-test" -> (known after apply) + ~ kms_key_arn = "arn:aws-cn:kms:cn-northwest-1:501502503504:key/39999999-9999-9999-9999-999999999999" -> (known after apply) + ~ multi_az = false -> (known after apply) + ~ preferred_maintenance_window = "mon:11:50-mon:12:20" -> (known after apply) + ~ publicly_accessible = false -> (known after apply) + ~ replication_instance_arn = "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR" -> (known after apply) + ~ replication_instance_id = "featrue-dms-test" -> "feature-dms-test" # forces replacement + ~ replication_instance_private_ips = [ + - "100.200.100.1", + ] -> (known after apply) + ~ replication_instance_public_ips = [ + - "", + ] -> (known after apply) + ~ replication_subnet_group_id = "default-vpc-010203040506070809" -> (known after apply) + - tags = { + - "description" = "featrue-dms-test" + } -> null + ~ tags_all = { + - "description" = "featrue-dms-test" + } -> (known after apply) + ~ vpc_security_group_ids = [ + - "sg-a0b0c0d0a0b0c0d0", + ] -> (known after apply) + # (1 unchanged attribute hidden) + + - timeouts {} + } + +Plan: 1 to add, 0 to change, 1 to destroy. + +──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run +"terraform apply" now. + +``` + +太好了!终于像模像样的输出了! + +可是,等等!为什么有 `1 to add` 和 `1 to destroy` 呢?难不成要摧毁我云服务的资源嘛?太可怕了。我先冷静冷静,下一步到底改干啥,可不要发生 `rm -rf /` 的惨剧。 + +在喝完咖啡,思索了一阵之后,得知一个重要结论。 + +我本地的配置 `main.tf` 和云服务的配置不匹配,但是本地只配置了 `replication_instance_id` 和 `replication_instance_class`,如果我执行 `terraform apply`,我就告诉 `terraform`:请给我创建一个 dms replication instance 资源,它的 `replication_instance_id` 和 `replication_instance_class` 分别如配置所说,其他参数看着给。于是 `terraform` 比较来比较去,发现只能先云服务现有的给删了(`1 to destroy`)再给我们创建一个(`1 to add`)。 + +这当然不是我们需要的。 + +那么我们应该怎么做呢?当然是拷贝 `terraform plan` 的输出(主要是波浪线的部分,这部分是 change)到 `main.tf`,完成手动云服务到本地配置的反向同步。 + +```hcl +resource "aws_dms_replication_instance" "feature" { + replication_instance_id = "feature-dms-test" + replication_instance_class = "dms.r5.xlarge" + + ~ allocated_storage = 500 -> (known after apply) + ~ auto_minor_version_upgrade = true -> (known after apply) + ~ availability_zone = "cn-northwest-1c" -> (known after apply) + ~ engine_version = "3.4.6" -> (known after apply) + ~ id = "featrue-dms-test" -> (known after apply) + ~ kms_key_arn = "arn:aws-cn:kms:cn-northwest-1:501502503504:key/39999999-9999-9999-9999-999999999999" -> (known after apply) + ~ multi_az = false -> (known after apply) + ~ preferred_maintenance_window = "mon:11:50-mon:12:20" -> (known after apply) + ~ publicly_accessible = false -> (known after apply) + ~ replication_instance_arn = "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR" -> (known after apply) + ~ replication_instance_id = "featrue-dms-test" -> "feature-dms-test" # forces replacement + ~ replication_instance_private_ips = [ + - "100.200.100.1", + ] -> (known after apply) + ~ replication_instance_public_ips = [ + - "", + ] -> (known after apply) + ~ replication_subnet_group_id = "default-vpc-010203040506070809" -> (known after apply) + - tags = { + - "description" = "featrue-dms-test" + } -> null + ~ tags_all = { + - "description" = "featrue-dms-test" + } -> (known after apply) + ~ vpc_security_group_ids = [ + - "sg-a0b0c0d0a0b0c0d0", + ] -> (known after apply) + # (1 unchanged attribute hidden) + + - timeouts {} +} +``` + +于是,我们根据 `terraform plan` 的输出,填充到 `main.tf` 文件里: + +``` +resource "aws_dms_replication_instance" "feature" { + replication_instance_class = "dms.r5.xlarge" + + allocated_storage = 500 + auto_minor_version_upgrade = true + availability_zone = "cn-northwest-1c" + engine_version = "3.4.6" + id = "featrue-dms-test" + kms_key_arn = "arn:aws-cn:kms:cn-northwest-1:501502503504:key/39999999-9999-9999-9999-999999999999" + multi_az = false + preferred_maintenance_window = "mon:11:50-mon:12:20" + publicly_accessible = false + replication_instance_arn = "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR" + replication_instance_id = "feature-dms-test" # forces replacement + replication_instance_private_ips = [ + "100.200.100.1", + ] + replication_instance_public_ips = [ + "", + ] + replication_subnet_group_id = "default-vpc-010203040506070809" + tags = null + tags_all = { + "description" = "featrue-dms-test" + } + vpc_security_group_ids = [ + "sg-a0b0c0d0a0b0c0d0", + ] + + timeouts {} +} + +``` + +此时再次运行 `terraform plan` + +``` +╷ +│ Error: Invalid or unknown key +│ +│ with aws_dms_replication_instance.feature, +│ on main.tf line 9, in resource "aws_dms_replication_instance" "feature": +│ 9: id = "featrue-dms-test" +│ +╵ +╷ +│ Error: Value for unconfigurable attribute +│ +│ with aws_dms_replication_instance.feature, +│ on main.tf line 14, in resource "aws_dms_replication_instance" "feature": +│ 14: replication_instance_arn = "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR" +│ +│ Can't configure a value for "replication_instance_arn": its value will be decided automatically based on the result of applying +│ this configuration. +╵ +╷ +│ Error: Value for unconfigurable attribute +│ +│ with aws_dms_replication_instance.feature, +│ on main.tf line 16, in resource "aws_dms_replication_instance" "feature": +│ 16: replication_instance_private_ips = [ +│ 17: "100.200.100.1", +│ 18: ] +│ +│ Can't configure a value for "replication_instance_private_ips": its value will be decided automatically based on the result of +│ applying this configuration. +╵ +╷ +│ Error: Value for unconfigurable attribute +│ +│ with aws_dms_replication_instance.feature, +│ on main.tf line 19, in resource "aws_dms_replication_instance" "feature": +│ 19: replication_instance_public_ips = [ +│ 20: "", +│ 21: ] +│ +│ Can't configure a value for "replication_instance_public_ips": its value will be decided automatically based on the result of +│ applying this configuration. +╵ +``` + +它会提示一些错,意思是有些参数会在创建时决定,所以我们无须提供,注释掉对应的行如 `replication_instance_private_ips` 和 `replication_instance_public_ips`,再次运行 `terraform plan`。 + +> Fix error by comment error line. As `replication_instance_private_ips` and `replication_instance_public_ips` will be decided automatically based on the result of applying this configuration. We comment it out and run plan again. + +``` +resource "aws_dms_replication_instance" "feature" { + publicly_accessible = false + # replication_instance_arn = "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR" + replication_instance_id = "feature-dms-test" # forces replacement + # replication_instance_private_ips = [ + # "100.200.100.1", + # ] + # replication_instance_public_ips = [ + # "", + # ] +} +``` + +但是我们还是会发现出错了,这是为什么呢? + +> But we also encounter an error, saying we will destroy the resource and recreate it. + +``` +aws_dms_replication_instance.feature: Refreshing state... [id=featrue-dms-test] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: +-/+ destroy and then create replacement + +Terraform will perform the following actions: + + # aws_dms_replication_instance.feature must be replaced +-/+ resource "aws_dms_replication_instance" "feature" { + ~ id = "featrue-dms-test" -> (known after apply) + ~ replication_instance_arn = "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR" -> (known after apply) + ~ replication_instance_id = "featrue-dms-test" -> "feature-dms-test" # forces replacement + ~ replication_instance_private_ips = [ + - "100.200.100.1", + ] -> (known after apply) + ~ replication_instance_public_ips = [ + - "", + ] -> (known after apply) + - tags = { + - "description" = "featrue-dms-test" + } -> null + ~ tags_all = { + - "description" = "featrue-dms-test" + } -> (known after apply) + # (11 unchanged attributes hidden) + + # (1 unchanged block hidden) + } + +Plan: 1 to add, 0 to change, 1 to destroy. + +``` + +我们尝试这删除本地 `terraform.tfstate` 文件,运行 `terraform plan`,发现错误依旧。 + +> Delete terraform.state file and run `t import aws_dms_replication_instance.feature feature-dms-test` with correct name. + +``` +aws_dms_replication_instance.feature: Importing from ID "feature-dms-test"... +aws_dms_replication_instance.feature: Import prepared! + Prepared aws_dms_replication_instance for import +aws_dms_replication_instance.feature: Refreshing state... [id=feature-dms-test] +╷ +│ Error: Cannot import non-existent remote object +│ +│ While attempting to import an existing object to "aws_dms_replication_instance.feature", the provider detected that no object +│ exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with +│ the provider's configured region or endpoint, or use "terraform apply" to create a new remote object for this resource. + +``` + +啊!细心的我终于发现是因为云服务的资源名字被叫错了!云服务的 identifier 叫做 `featrue-dms-test`,而我们需要 `import` 的名字叫做 `feature-dms-test`!我的心里唱起了深深太平洋底深深伤心,于是紧急联系相关人员这个名字是否可以更改,在得知这只是个测试任务可以改名字时,我果断(含泪)敲下了如下的命令: + +> Change dms replication instance identifier. + +```bash +aws dms modify-replication-instance --replication-instance-arn arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR --replication-instance-identifier feature-dms-test --apply-immediately +``` + +这是它的输出: + +``` +{ + "ReplicationInstance": { + "ReplicationInstanceIdentifier": "feature-dms-test", + "ReplicationInstanceClass": "dms.r5.xlarge", + "ReplicationInstanceStatus": "available", + "AllocatedStorage": 500, + "InstanceCreateTime": "2022-05-23T12:59:24.006000+08:00", + "VpcSecurityGroups": [ + { + "VpcSecurityGroupId": "sg-a0b0c0d0a0b0c0d0", + "Status": "active" + } + ], + "AvailabilityZone": "cn-northwest-1c", + "ReplicationSubnetGroup": { + "ReplicationSubnetGroupIdentifier": "default-vpc-010203040506070809", + "ReplicationSubnetGroupDescription": "default group created by console for vpc id vpc-010203040506070809", + "VpcId": "vpc-010203040506070809", + "SubnetGroupStatus": "Complete", + "Subnets": [ + { + "SubnetIdentifier": "subnet-0102030405060708", + "SubnetAvailabilityZone": { + "Name": "cn-northwest-1b" + }, + "SubnetStatus": "Active" + }, + { + "SubnetIdentifier": "subnet-0807060504030201", + "SubnetAvailabilityZone": { + "Name": "cn-northwest-1c" + }, + "SubnetStatus": "Active" + }, + { + "SubnetIdentifier": "subnet-1213141516171819", + "SubnetAvailabilityZone": { + "Name": "cn-northwest-1a" + }, + "SubnetStatus": "Active" + } + ] + }, + "PreferredMaintenanceWindow": "mon:11:50-mon:12:20", + "PendingModifiedValues": {}, + "MultiAZ": false, + "EngineVersion": "3.4.6", + "AutoMinorVersionUpgrade": true, + "KmsKeyId": "arn:aws-cn:kms:cn-northwest-1:501502503504:key/99999999999c6-4999999999999999999999", + "ReplicationInstanceArn": "arn:aws-cn:dms:cn-northwest-1:501502503504:rep:ILOVEREADALLPOEMSOFWILLIAMSHAKESPEAR", + "ReplicationInstancePrivateIpAddress": "100.200.100.1", + "ReplicationInstancePublicIpAddresses": [ + null + ], + "ReplicationInstancePrivateIpAddresses": [ + "100.200.100.1" + ], + "PubliclyAccessible": false + } +} +``` + +等待了片刻之后,identifier 被修改成功了。赶紧试试 `terraform import` 吧~ + +``` +t import aws_dms_replication_instance.feature feature-dms-test +``` + +终于 `import` 成功了: + +``` +aws_dms_replication_instance.feature: Importing from ID "feature-dms-test"... +aws_dms_replication_instance.feature: Import prepared! + Prepared aws_dms_replication_instance for import +aws_dms_replication_instance.feature: Refreshing state... [id=feature-dms-test] + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. + + +``` + +此时的 `main.tf` 依旧空空如也,我们仿照先前的做法,将 `terraform plan` 的输出写到 `main.tf` 里。 + +``` +t plan +``` + +输出: + +``` +aws_dms_replication_instance.feature: Refreshing state... [id=feature-dms-test] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: + ~ update in-place + +Terraform will perform the following actions: + + # aws_dms_replication_instance.feature will be updated in-place + ~ resource "aws_dms_replication_instance" "feature" { + id = "feature-dms-test" + ~ tags = { + - "description" = "featrue-dms-test" -> null + } + ~ tags_all = { + - "description" = "featrue-dms-test" + } -> (known after apply) + # (15 unchanged attributes hidden) + + # (1 unchanged block hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. +``` + +我们看到,`add` 和 `destroy` 都变成 0 了,一个很大的进步!这就是说,我们即使执行 `terraform apply` 也不会误操作发生删除资源的事故。 + +下面要做的就是修复那些 `modify` 的部分(波浪线),如果无关紧要的话(比如 tag 之类的)可以放着不管,如果有洁癖的话(遵循 best practice),可以在 `main.tf` 里做些调整。 + +> We can see both add and destroy number is 0, we don't need to worry about any resource will be deleted, which may cause desaster, and it's best practice to add description to aws resource. + +> Now we know there's no add or destroy, we can apply the changes by runing `terraform apply`. Notice, you need to enter `yes` to perform the actions. + +此时运行 `t apply`: + +输出: + +``` +aws_dms_replication_instance.feature: Refreshing state... [id=feature-dms-test] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: + ~ update in-place + +Terraform will perform the following actions: + + # aws_dms_replication_instance.feature will be updated in-place + ~ resource "aws_dms_replication_instance" "feature" { + id = "feature-dms-test" + ~ tags = { + - "description" = "featrue-dms-test" -> null + } + ~ tags_all = { + - "description" = "featrue-dms-test" + } -> (known after apply) + # (15 unchanged attributes hidden) + + # (1 unchanged block hidden) + } + +Plan: 0 to add, 1 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +aws_dms_replication_instance.feature: Modifying... [id=feature-dms-test] +aws_dms_replication_instance.feature: Modifications complete after 0s [id=feature-dms-test] + +Apply complete! Resources: 0 added, 1 changed, 0 destroyed. +``` + +此时 apply 成功,我们把一个 AWS DMS 的复制示例通过 terraform 来管理了,是不是很简单(真的有吗)? + +> After applying the changes, we can check if there's any difference between our local infrastructure and the infrastructure in the cloud by running `terraform plan` again. + +在执行完 `terraform apply` 之后,我们再次执行 `terraform plan`,它会因为检测不到变更(本地配置已经和云服务同步)而不做其他操作。 + +``` +aws_dms_replication_instance.feature: Refreshing state... [id=feature-dms-test] + +No changes. Your infrastructure matches the configuration. + +Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. +``` + +这样,我们就可以如法炮制 `import` 其他的资源了。 + +> Life is hard? Life is hard. + +terraform doc: + +https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dms_replication_instance diff --git a/src/terraform/management/a-good-developer-knows-management.md b/src/terraform/management/a-good-developer-knows-management.md new file mode 100644 index 0000000..723b782 --- /dev/null +++ b/src/terraform/management/a-good-developer-knows-management.md @@ -0,0 +1,194 @@ +# A Good Developer knows management + +没有不好的管理者,只有懒的管理者。 + +先看下我们在哪里: + + $pwd + /home/fantasticmachine/terraform + +像我们写 Hello world 的时候,我们需要一个 main 文件,另外的变量文件 variables.tf 体现了编程语言里的模块化思想: + + $tree + . + ├── main.tf + └── variables.tf + +`variables.tf` 定义了我们需要的变量,如 `access_key` `secret_key` 和 `region` + + $cat variables.tf + variable "access_key" {} + variable "secret_key" {} + + variable "region" { + default = "cn-north-1" + } + +为了让 `terraform` 访问 `aws`,需要在 `main` 文件里加入 `iam` 配置,即加入 `access_key` 和 `secret_key`,配置信息需要放在 `terraform` 的 `aws` provider 里: + + $cat main.tf + provider "aws" { + access_key = "${var.access_key}" + secret_key = "${var.secret_key}" + region = "${var.region}" + } + +为了让 `terraform` 顺利连接 `aws`,我们需要在 `bash` 的环境变量里指定 `access_key` 和 `secret_key`: + + export TF_VAR_access_key="AKIAPLONGTIMEAGO" + export TF_VAR_secret_key="Lukeskywalkershowmehowtousesword" + export AWS_ACCESS_KEY=$TF_VAR_access_key + export AWS_SECRET_KEY=$TF_VAR_secret_key + export EC2_REGION=cn-north-1 + +好了,先拿 iam 开刀。 + +在控制台已经建了几个 group,如 Developer 等,我们现在让 `terraform` 管理。 + +导入到 main.tf 配置中: + + $terraform import aws_iam_group.Developers developers + Error: resource address "aws_iam_group.Developers" does not exist in the configuration. + + Before importing this resource, please create its configuration in the root module. For example: + + resource "aws_iam_group" "Developers" { + # (resource arguments) + } + +发现出错了! + +错误信息说 `aws_iam_group.Developers` 这个资源不在配置文件中,需要我们手动创建。 + + $cat >> main.tf < Principal 是权限的委托人,此时的例子用来给 IAM User 访问 s3 bucket resource 的权限。除此之外,委托人还包括很多类型如特定 AWS 账户、单个或多个 iam user、iam role 或 aws 的服务,具体可参考 Principal 的官方文档。 + +利用 `aws_s3_bucket_policy` 定义资源,这将给我们的目标 bucket 附加 policy。 + + resource "aws_s3_bucket_policy" "k8s-cluster-bucket-test" { + bucket = "k8s-cluster-bucket-test" + policy = "${data.aws_iam_policy_document.s3-bucket-policy-document.json}" + } + +## 第四步 + +回到 users module。 + +我们利用 `aws_iam_policy` 来创建针对指定 bucket 的写权限 `s3:PutObject`。 + + resource "aws_iam_policy" "s3-put-object-policy" { + name = "s3-put-object-policy" + description = "A s3 pub object policy" + policy = < { + /// Contains the success value + Ok(T), + + /// Contains the error value + Err(E), +} +``` + +它在 pattern match 的时候很有用,出错处理几乎离不开它: + +```rust +fn halves_if_even(i: i32) -> Result { + if i % 2 == 0 { Ok(i/2) } else { Err(/* something */) } +} + +fn do_the_thing(i: i32) -> Result { + let i = match halves_if_even(i) { + Ok(i) => i, + e => return e, + }; + + // use `i` +} +``` + +我们可以效仿 Rust 实现 TypeScript 的 match on Either + +我们使用 `union type` 来定义 Either + +```typescript +type Left = { type: "left"; value: T }; +type Right = { type: "right"; value: T }; +type Either = Left | Right; +``` + +Either 定义了一个容器,实际编码中,我们需要从 Either 容器里提取结果,为了调用者的方便,我们允许传入 callback 来处理不同的情况 + +```typescript +function match( + input: Either, + left: (left: L) => T, + right: (right: R) => T +) { + switch (input.type) { + case "left": + return left(input.value); + case "right": + return right(input.value); + } +} +``` + +调用者此时可以定义自己的函数,返回类型是个 Either,失败返回 Error,成功得到运动的方向 + +```typescript +function validateCrabMoveDirection( + crab: Crab +): Either { + if (crab.name === "strange crab") { + // return Left type + return { type: "left", value: Error("x") }; + } + // return Right type + return { type: "right", value: { direction: crab.smartmove("right") } }; +} +``` + +于是可以用 match 来获取上述函数的运行结果: + +```typescript +{ + const direction = match( + validateCrabMoveDirection(crab), + (_) => null, + (right) => right.direction + ); + // output: right + console.log(direction); +} + +{ + const crab = new Crab("strange crab"); + const direction = match( + validateCrabMoveDirection(crab), + (_) => null, + (right) => right.direction + ); + // output: null + console.log(direction); +} +``` + +## Type-safe action creator in Redux + +讲到这里不得不讲下 `TypeScript` 在 `Redux` 中的应用,当我们给 redux 的 action type 定义很多类型时,一个显著的问题是,不同 action creator 的函数类型不能动态获取,此时我们可以利用 `TypeScript` 的 `ReturnType` 来解决 + +以一个 Notes 应用为例 + +首先定义 Notes 的 `interface` + +```typescript +interface Note { + id: number; + title: string; + content: string; + creationDate: string; +} +``` + +然后定义 action type,可以用 const + +```typescript +const FETCH_REQUEST = "FETCH_REQUEST"; +const FETCH_SUCCESS = "FETCH_SUCCESS"; +const FETCH_ERROR = "FETCH_ERROR"; +``` + +或者使用 enum + +```typescript +const enum NotesActionTypes { + FETCH_REQUEST = "@@notes/FETCH_REQUEST", + FETCH_SUCCESS = "@@notes/FETCH_SUCCESS", + FETCH_ERROR = "@@notes/FETCH_ERROR", +} +``` + +然后定义我们的 action creator,此时用到 `typesafe-actions` 这个库 + +```typescript +const fetchRequest = createAction(NotesActionTypes.FETCH_REQUEST); +const fetchSuccess = createAction(NotesActionTypes.FETCH_SUCCESS, (action) => { + return (data: Note[]) => action(data); +}); +const fetchError = createAction(NotesActionTypes.FETCH_ERROR, (action) => { + return (message: string) => action(message); +}); +``` + +每个 action creator 的返回类型不同,此时 `ReturnType` 登场 + +```typescript +// 利用 ReturnType 定义 action 减少代码冗余 + +const actions = { fetchRequest, fetchSuccess, fetchError }; +type Action = ReturnType<(typeof actions)[keyof typeof actions]>; +``` + +定义了上述 Action,我们就可以给我们的 reducer 中的 action 做类型检查了 + +```typescript +// 定义 redux state +type State = { notes: Note[]; state: string; errorMessage?: string }; + +// 定义 redux reducer +const reducer: Reducer = (state: State, action: Action) => { + switch (action.type) { + case getType(fetchRequest): { + return { ...state, state: "LOADING" }; + } + case getType(fetchSuccess): { + return { ...state, state: "LOADED", notes: action.payload }; + } + case getType(fetchError): { + return { + ...state, + state: "ERROR", + notes: [], + errorMessage: action.payload, + }; + } + default: { + return state; + } + } +}; +``` + +简单测试 + +```typescript +let state = { notes: [], state: "INIT" }; +state = reducer(state, fetchRequest()); +// { notes: [], state: 'LOADING' } +console.log(state); +``` + +## 后记 + +关于为何螃蟹要横向走?来自维基百科 + +> 因为腿关节构造的缘故,螃蟹横著走会比较迅速,因此它们一般都是横著行进的,另外,蛙蟹科的一些生物也会直着或倒退着行进。 + +> 螃蟹富含优质蛋白质,蟹肉较细腻,肌肉纤维中含有 10 余种游离氨基酸,其中谷氨酸、脯氨酸、精氨酸含量较多,对术后、病后、慢性消耗性疾病等需要补充营养的人大有益处。螃蟹脂肪含量很低,但维生素 A、E 和 B 族较高,特别是蟹黄中富含维生素 A,有益于视力及皮肤健康。蟹富含矿物元素钙、镁以及锌、硒、铜等人体必需的微量元素。但由于螃蟹高胆固醇、高嘌呤,痛风患者食用时应自我节制,患有感冒、肝炎、心血管疾病的人不宜食蟹。死蟹不能吃,会带有大量细菌和毒素。 + +题图 https://pixabay.com/users/skylark-201564/ diff --git a/theme/book.js b/theme/book.js new file mode 100644 index 0000000..178f1e9 --- /dev/null +++ b/theme/book.js @@ -0,0 +1,690 @@ +"use strict"; + +// Fix back button cache problem +window.onunload = function () { }; + +// Global variable, shared between modules +function playground_text(playground, hidden = true) { + let code_block = playground.querySelector("code"); + + if (window.ace && code_block.classList.contains("editable")) { + let editor = window.ace.edit(code_block); + return editor.getValue(); + } else if (hidden) { + return code_block.textContent; + } else { + return code_block.innerText; + } +} + +(function codeSnippets() { + function fetch_with_timeout(url, options, timeout = 6000) { + return Promise.race([ + fetch(url, options), + new Promise((_, reject) => setTimeout(() => reject(new Error('timeout')), timeout)) + ]); + } + + var playgrounds = Array.from(document.querySelectorAll(".playground")); + if (playgrounds.length > 0) { + fetch_with_timeout("https://play.rust-lang.org/meta/crates", { + headers: { + 'Content-Type': "application/json", + }, + method: 'POST', + mode: 'cors', + }) + .then(response => response.json()) + .then(response => { + // get list of crates available in the rust playground + let playground_crates = response.crates.map(item => item["id"]); + playgrounds.forEach(block => handle_crate_list_update(block, playground_crates)); + }); + } + + function handle_crate_list_update(playground_block, playground_crates) { + // update the play buttons after receiving the response + update_play_button(playground_block, playground_crates); + + // and install on change listener to dynamically update ACE editors + if (window.ace) { + let code_block = playground_block.querySelector("code"); + if (code_block.classList.contains("editable")) { + let editor = window.ace.edit(code_block); + editor.addEventListener("change", function (e) { + update_play_button(playground_block, playground_crates); + }); + // add Ctrl-Enter command to execute rust code + editor.commands.addCommand({ + name: "run", + bindKey: { + win: "Ctrl-Enter", + mac: "Ctrl-Enter" + }, + exec: _editor => run_rust_code(playground_block) + }); + } + } + } + + // updates the visibility of play button based on `no_run` class and + // used crates vs ones available on https://play.rust-lang.org + function update_play_button(pre_block, playground_crates) { + var play_button = pre_block.querySelector(".play-button"); + + // skip if code is `no_run` + if (pre_block.querySelector('code').classList.contains("no_run")) { + play_button.classList.add("hidden"); + return; + } + + // get list of `extern crate`'s from snippet + var txt = playground_text(pre_block); + var re = /extern\s+crate\s+([a-zA-Z_0-9]+)\s*;/g; + var snippet_crates = []; + var item; + while (item = re.exec(txt)) { + snippet_crates.push(item[1]); + } + + // check if all used crates are available on play.rust-lang.org + var all_available = snippet_crates.every(function (elem) { + return playground_crates.indexOf(elem) > -1; + }); + + if (all_available) { + play_button.classList.remove("hidden"); + } else { + play_button.classList.add("hidden"); + } + } + + function run_rust_code(code_block) { + var result_block = code_block.querySelector(".result"); + if (!result_block) { + result_block = document.createElement('code'); + result_block.className = 'result hljs language-bash'; + + code_block.append(result_block); + } + + let text = playground_text(code_block); + let classes = code_block.querySelector('code').classList; + let edition = "2015"; + if(classes.contains("edition2018")) { + edition = "2018"; + } else if(classes.contains("edition2021")) { + edition = "2021"; + } + var params = { + version: "stable", + optimize: "0", + code: text, + edition: edition + }; + + if (text.indexOf("#![feature") !== -1) { + params.version = "nightly"; + } + + result_block.innerText = "Running..."; + + fetch_with_timeout("https://play.rust-lang.org/evaluate.json", { + headers: { + 'Content-Type': "application/json", + }, + method: 'POST', + mode: 'cors', + body: JSON.stringify(params) + }) + .then(response => response.json()) + .then(response => { + if (response.result.trim() === '') { + result_block.innerText = "No output"; + result_block.classList.add("result-no-output"); + } else { + result_block.innerText = response.result; + result_block.classList.remove("result-no-output"); + } + }) + .catch(error => result_block.innerText = "Playground Communication: " + error.message); + } + + // Syntax highlighting Configuration + hljs.configure({ + tabReplace: ' ', // 4 spaces + languages: [], // Languages used for auto-detection + }); + + let code_nodes = Array + .from(document.querySelectorAll('code')) + // Don't highlight `inline code` blocks in headers. + .filter(function (node) {return !node.parentElement.classList.contains("header"); }); + + if (window.ace) { + // language-rust class needs to be removed for editable + // blocks or highlightjs will capture events + code_nodes + .filter(function (node) {return node.classList.contains("editable"); }) + .forEach(function (block) { block.classList.remove('language-rust'); }); + + code_nodes + .filter(function (node) {return !node.classList.contains("editable"); }) + .forEach(function (block) { hljs.highlightBlock(block); }); + } else { + code_nodes.forEach(function (block) { hljs.highlightBlock(block); }); + } + + // Adding the hljs class gives code blocks the color css + // even if highlighting doesn't apply + code_nodes.forEach(function (block) { block.classList.add('hljs'); }); + + Array.from(document.querySelectorAll("code.hljs")).forEach(function (block) { + + var lines = Array.from(block.querySelectorAll('.boring')); + // If no lines were hidden, return + if (!lines.length) { return; } + block.classList.add("hide-boring"); + + var buttons = document.createElement('div'); + buttons.className = 'buttons'; + buttons.innerHTML = ""; + + // add expand button + var pre_block = block.parentNode; + pre_block.insertBefore(buttons, pre_block.firstChild); + + pre_block.querySelector('.buttons').addEventListener('click', function (e) { + if (e.target.classList.contains('fa-eye')) { + e.target.classList.remove('fa-eye'); + e.target.classList.add('fa-eye-slash'); + e.target.title = 'Hide lines'; + e.target.setAttribute('aria-label', e.target.title); + + block.classList.remove('hide-boring'); + } else if (e.target.classList.contains('fa-eye-slash')) { + e.target.classList.remove('fa-eye-slash'); + e.target.classList.add('fa-eye'); + e.target.title = 'Show hidden lines'; + e.target.setAttribute('aria-label', e.target.title); + + block.classList.add('hide-boring'); + } + }); + }); + + if (window.playground_copyable) { + Array.from(document.querySelectorAll('pre code')).forEach(function (block) { + var pre_block = block.parentNode; + if (!pre_block.classList.contains('playground')) { + var buttons = pre_block.querySelector(".buttons"); + if (!buttons) { + buttons = document.createElement('div'); + buttons.className = 'buttons'; + pre_block.insertBefore(buttons, pre_block.firstChild); + } + + var clipButton = document.createElement('button'); + clipButton.className = 'clip-button'; + clipButton.title = 'Copy to clipboard'; + clipButton.setAttribute('aria-label', clipButton.title); + clipButton.innerHTML = ''; + + buttons.insertBefore(clipButton, buttons.firstChild); + } + }); + } + + // Process playground code blocks + Array.from(document.querySelectorAll(".playground")).forEach(function (pre_block) { + // Add play button + var buttons = pre_block.querySelector(".buttons"); + if (!buttons) { + buttons = document.createElement('div'); + buttons.className = 'buttons'; + pre_block.insertBefore(buttons, pre_block.firstChild); + } + + var runCodeButton = document.createElement('button'); + runCodeButton.className = 'fa fa-play play-button'; + runCodeButton.hidden = true; + runCodeButton.title = 'Run this code'; + runCodeButton.setAttribute('aria-label', runCodeButton.title); + + buttons.insertBefore(runCodeButton, buttons.firstChild); + runCodeButton.addEventListener('click', function (e) { + run_rust_code(pre_block); + }); + + if (window.playground_copyable) { + var copyCodeClipboardButton = document.createElement('button'); + copyCodeClipboardButton.className = 'clip-button'; + copyCodeClipboardButton.innerHTML = ''; + copyCodeClipboardButton.title = 'Copy to clipboard'; + copyCodeClipboardButton.setAttribute('aria-label', copyCodeClipboardButton.title); + + buttons.insertBefore(copyCodeClipboardButton, buttons.firstChild); + } + + let code_block = pre_block.querySelector("code"); + if (window.ace && code_block.classList.contains("editable")) { + var undoChangesButton = document.createElement('button'); + undoChangesButton.className = 'fa fa-history reset-button'; + undoChangesButton.title = 'Undo changes'; + undoChangesButton.setAttribute('aria-label', undoChangesButton.title); + + buttons.insertBefore(undoChangesButton, buttons.firstChild); + + undoChangesButton.addEventListener('click', function () { + let editor = window.ace.edit(code_block); + editor.setValue(editor.originalCode); + editor.clearSelection(); + }); + } + }); +})(); + +(function themes() { + var html = document.querySelector('html'); + var themeToggleButton = document.getElementById('theme-toggle'); + var themePopup = document.getElementById('theme-list'); + var themeColorMetaTag = document.querySelector('meta[name="theme-color"]'); + var themeIds = []; + themePopup.querySelectorAll('button.theme').forEach(function (el) { + themeIds.push(el.id); + }); + var stylesheets = { + ayuHighlight: document.querySelector("[href$='ayu-highlight.css']"), + tomorrowNight: document.querySelector("[href$='tomorrow-night.css']"), + highlight: document.querySelector("[href$='highlight.css']"), + }; + + function showThemes() { + themePopup.style.display = 'block'; + themeToggleButton.setAttribute('aria-expanded', true); + themePopup.querySelector("button#" + get_theme()).focus(); + } + + function updateThemeSelected() { + themePopup.querySelectorAll('.theme-selected').forEach(function (el) { + el.classList.remove('theme-selected'); + }); + themePopup.querySelector("button#" + get_theme()).classList.add('theme-selected'); + } + + function hideThemes() { + themePopup.style.display = 'none'; + themeToggleButton.setAttribute('aria-expanded', false); + themeToggleButton.focus(); + } + + function get_theme() { + var theme; + try { theme = localStorage.getItem('mdbook-theme'); } catch (e) { } + if (theme === null || theme === undefined || !themeIds.includes(theme)) { + return default_theme; + } else { + return theme; + } + } + + function set_theme(theme, store = true) { + let ace_theme; + + if (theme == 'coal' || theme == 'navy') { + stylesheets.ayuHighlight.disabled = true; + stylesheets.tomorrowNight.disabled = false; + stylesheets.highlight.disabled = true; + + ace_theme = "ace/theme/tomorrow_night"; + } else if (theme == 'ayu') { + stylesheets.ayuHighlight.disabled = false; + stylesheets.tomorrowNight.disabled = true; + stylesheets.highlight.disabled = true; + ace_theme = "ace/theme/tomorrow_night"; + } else { + stylesheets.ayuHighlight.disabled = true; + stylesheets.tomorrowNight.disabled = true; + stylesheets.highlight.disabled = false; + ace_theme = "ace/theme/dawn"; + } + + setTimeout(function () { + themeColorMetaTag.content = getComputedStyle(document.documentElement).backgroundColor; + }, 1); + + if (window.ace && window.editors) { + window.editors.forEach(function (editor) { + editor.setTheme(ace_theme); + }); + } + + var previousTheme = get_theme(); + + if (store) { + try { localStorage.setItem('mdbook-theme', theme); } catch (e) { } + } + + html.classList.remove(previousTheme); + html.classList.add(theme); + updateThemeSelected(); + } + + // Set theme + var theme = get_theme(); + + set_theme(theme, false); + + themeToggleButton.addEventListener('click', function () { + if (themePopup.style.display === 'block') { + hideThemes(); + } else { + showThemes(); + } + }); + + themePopup.addEventListener('click', function (e) { + var theme; + if (e.target.className === "theme") { + theme = e.target.id; + } else if (e.target.parentElement.className === "theme") { + theme = e.target.parentElement.id; + } else { + return; + } + set_theme(theme); + }); + + themePopup.addEventListener('focusout', function(e) { + // e.relatedTarget is null in Safari and Firefox on macOS (see workaround below) + if (!!e.relatedTarget && !themeToggleButton.contains(e.relatedTarget) && !themePopup.contains(e.relatedTarget)) { + hideThemes(); + } + }); + + // Should not be needed, but it works around an issue on macOS & iOS: https://github.com/rust-lang/mdBook/issues/628 + document.addEventListener('click', function(e) { + if (themePopup.style.display === 'block' && !themeToggleButton.contains(e.target) && !themePopup.contains(e.target)) { + hideThemes(); + } + }); + + document.addEventListener('keydown', function (e) { + if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; } + if (!themePopup.contains(e.target)) { return; } + + switch (e.key) { + case 'Escape': + e.preventDefault(); + hideThemes(); + break; + case 'ArrowUp': + e.preventDefault(); + var li = document.activeElement.parentElement; + if (li && li.previousElementSibling) { + li.previousElementSibling.querySelector('button').focus(); + } + break; + case 'ArrowDown': + e.preventDefault(); + var li = document.activeElement.parentElement; + if (li && li.nextElementSibling) { + li.nextElementSibling.querySelector('button').focus(); + } + break; + case 'Home': + e.preventDefault(); + themePopup.querySelector('li:first-child button').focus(); + break; + case 'End': + e.preventDefault(); + themePopup.querySelector('li:last-child button').focus(); + break; + } + }); +})(); + +(function sidebar() { + var body = document.querySelector("body"); + var sidebar = document.getElementById("sidebar"); + var sidebarLinks = document.querySelectorAll('#sidebar a'); + var sidebarToggleButton = document.getElementById("sidebar-toggle"); + var sidebarResizeHandle = document.getElementById("sidebar-resize-handle"); + var firstContact = null; + + function showSidebar() { + body.classList.remove('sidebar-hidden') + body.classList.add('sidebar-visible'); + Array.from(sidebarLinks).forEach(function (link) { + link.setAttribute('tabIndex', 0); + }); + sidebarToggleButton.setAttribute('aria-expanded', true); + sidebar.setAttribute('aria-hidden', false); + try { localStorage.setItem('mdbook-sidebar', 'visible'); } catch (e) { } + } + + function hideSidebar() { + body.classList.remove('sidebar-visible') + body.classList.add('sidebar-hidden'); + Array.from(sidebarLinks).forEach(function (link) { + link.setAttribute('tabIndex', -1); + }); + sidebarToggleButton.setAttribute('aria-expanded', false); + sidebar.setAttribute('aria-hidden', true); + try { localStorage.setItem('mdbook-sidebar', 'hidden'); } catch (e) { } + } + + // Toggle sidebar + sidebarToggleButton.addEventListener('click', function sidebarToggle() { + if (body.classList.contains("sidebar-hidden")) { + var current_width = parseInt( + document.documentElement.style.getPropertyValue('--sidebar-width'), 10); + if (current_width < 150) { + document.documentElement.style.setProperty('--sidebar-width', '150px'); + } + showSidebar(); + } else if (body.classList.contains("sidebar-visible")) { + hideSidebar(); + } else { + if (getComputedStyle(sidebar)['transform'] === 'none') { + hideSidebar(); + } else { + showSidebar(); + } + } + }); + + sidebarResizeHandle.addEventListener('mousedown', initResize, false); + + function initResize(e) { + window.addEventListener('mousemove', resize, false); + window.addEventListener('mouseup', stopResize, false); + body.classList.add('sidebar-resizing'); + } + function resize(e) { + var pos = (e.clientX - sidebar.offsetLeft); + if (pos < 20) { + hideSidebar(); + } else { + if (body.classList.contains("sidebar-hidden")) { + showSidebar(); + } + pos = Math.min(pos, window.innerWidth - 100); + document.documentElement.style.setProperty('--sidebar-width', pos + 'px'); + } + } + //on mouseup remove windows functions mousemove & mouseup + function stopResize(e) { + body.classList.remove('sidebar-resizing'); + window.removeEventListener('mousemove', resize, false); + window.removeEventListener('mouseup', stopResize, false); + } + + document.addEventListener('touchstart', function (e) { + firstContact = { + x: e.touches[0].clientX, + time: Date.now() + }; + }, { passive: true }); + + document.addEventListener('touchmove', function (e) { + if (!firstContact) + return; + + var curX = e.touches[0].clientX; + var xDiff = curX - firstContact.x, + tDiff = Date.now() - firstContact.time; + + if (tDiff < 250 && Math.abs(xDiff) >= 150) { + if (xDiff >= 0 && firstContact.x < Math.min(document.body.clientWidth * 0.25, 300)) + showSidebar(); + else if (xDiff < 0 && curX < 300) + hideSidebar(); + + firstContact = null; + } + }, { passive: true }); +})(); + +(function chapterNavigation() { + document.addEventListener('keydown', function (e) { + if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; } + if (window.search && window.search.hasFocus()) { return; } + var html = document.querySelector('html'); + + function next() { + var nextButton = document.querySelector('.nav-chapters.next'); + if (nextButton) { + window.location.href = nextButton.href; + } + } + function prev() { + var previousButton = document.querySelector('.nav-chapters.previous'); + if (previousButton) { + window.location.href = previousButton.href; + } + } + switch (e.key) { + case 'ArrowRight': + e.preventDefault(); + if (html.dir == 'rtl') { + prev(); + } else { + next(); + } + break; + case 'ArrowLeft': + e.preventDefault(); + if (html.dir == 'rtl') { + next(); + } else { + prev(); + } + break; + } + }); +})(); + +(function clipboard() { + var clipButtons = document.querySelectorAll('.clip-button'); + + function hideTooltip(elem) { + elem.firstChild.innerText = ""; + elem.className = 'clip-button'; + } + + function showTooltip(elem, msg) { + elem.firstChild.innerText = msg; + elem.className = 'clip-button tooltipped'; + } + + var clipboardSnippets = new ClipboardJS('.clip-button', { + text: function (trigger) { + hideTooltip(trigger); + let playground = trigger.closest("pre"); + return playground_text(playground, false); + } + }); + + Array.from(clipButtons).forEach(function (clipButton) { + clipButton.addEventListener('mouseout', function (e) { + hideTooltip(e.currentTarget); + }); + }); + + clipboardSnippets.on('success', function (e) { + e.clearSelection(); + showTooltip(e.trigger, "Copied!"); + }); + + clipboardSnippets.on('error', function (e) { + showTooltip(e.trigger, "Clipboard error!"); + }); +})(); + +(function scrollToTop () { + var menuTitle = document.querySelector('.menu-title'); + + menuTitle.addEventListener('click', function () { + document.scrollingElement.scrollTo({ top: 0, behavior: 'smooth' }); + }); +})(); + +(function controllMenu() { + var menu = document.getElementById('menu-bar'); + + (function controllPosition() { + var scrollTop = document.scrollingElement.scrollTop; + var prevScrollTop = scrollTop; + var minMenuY = -menu.clientHeight - 50; + // When the script loads, the page can be at any scroll (e.g. if you reforesh it). + menu.style.top = scrollTop + 'px'; + // Same as parseInt(menu.style.top.slice(0, -2), but faster + var topCache = menu.style.top.slice(0, -2); + menu.classList.remove('sticky'); + var stickyCache = false; // Same as menu.classList.contains('sticky'), but faster + document.addEventListener('scroll', function () { + scrollTop = Math.max(document.scrollingElement.scrollTop, 0); + // `null` means that it doesn't need to be updated + var nextSticky = null; + var nextTop = null; + var scrollDown = scrollTop > prevScrollTop; + var menuPosAbsoluteY = topCache - scrollTop; + if (scrollDown) { + nextSticky = false; + if (menuPosAbsoluteY > 0) { + nextTop = prevScrollTop; + } + } else { + if (menuPosAbsoluteY > 0) { + nextSticky = true; + } else if (menuPosAbsoluteY < minMenuY) { + nextTop = prevScrollTop + minMenuY; + } + } + if (nextSticky === true && stickyCache === false) { + menu.classList.add('sticky'); + stickyCache = true; + } else if (nextSticky === false && stickyCache === true) { + menu.classList.remove('sticky'); + stickyCache = false; + } + if (nextTop !== null) { + menu.style.top = nextTop + 'px'; + topCache = nextTop; + } + prevScrollTop = scrollTop; + }, { passive: true }); + })(); + (function controllBorder() { + function updateBorder() { + if (menu.offsetTop === 0) { + menu.classList.remove('bordered'); + } else { + menu.classList.add('bordered'); + } + } + updateBorder(); + document.addEventListener('scroll', updateBorder, { passive: true }); + })(); +})(); diff --git a/theme/css/chrome.css b/theme/css/chrome.css new file mode 100644 index 0000000..4cd7308 --- /dev/null +++ b/theme/css/chrome.css @@ -0,0 +1,640 @@ +/* CSS for UI elements (a.k.a. chrome) */ + +html { + scrollbar-color: var(--scrollbar) var(--bg); +} +#searchresults a, +.content a:link, +a:visited, +a > .hljs { + color: var(--links); +} + +/* + body-container is necessary because mobile browsers don't seem to like + overflow-x on the body tag when there is a tag. +*/ +#body-container { + /* + This is used when the sidebar pushes the body content off the side of + the screen on small screens. Without it, dragging on mobile Safari + will want to reposition the viewport in a weird way. + */ + overflow-x: clip; +} + +/* Menu Bar */ + +#menu-bar, +#menu-bar-hover-placeholder { + z-index: 101; + margin: auto calc(0px - var(--page-padding)); +} +#menu-bar { + position: relative; + display: flex; + flex-wrap: wrap; + background-color: var(--bg); + border-block-end-color: var(--bg); + border-block-end-width: 1px; + border-block-end-style: solid; +} +#menu-bar.sticky, +#menu-bar-hover-placeholder:hover + #menu-bar, +#menu-bar:hover, +html.sidebar-visible #menu-bar { + position: -webkit-sticky; + position: sticky; + top: 0 !important; +} +#menu-bar-hover-placeholder { + position: sticky; + position: -webkit-sticky; + top: 0; + height: var(--menu-bar-height); +} +#menu-bar.bordered { + border-block-end-color: var(--table-border-color); +} +#menu-bar i, #menu-bar .icon-button { + position: relative; + padding: 0 8px; + z-index: 10; + line-height: var(--menu-bar-height); + cursor: pointer; + transition: color 0.5s; +} +@media only screen and (max-width: 420px) { + #menu-bar i, #menu-bar .icon-button { + padding: 0 5px; + } +} + +.icon-button { + border: none; + background: none; + padding: 0; + color: inherit; +} +.icon-button i { + margin: 0; +} + +.right-buttons { + margin: 0 15px; +} +.right-buttons a { + text-decoration: none; +} + +.left-buttons { + display: flex; + margin: 0 5px; +} +html:not(.js) .left-buttons button { + display: none; +} + +.menu-title { + display: inline-block; + font-weight: 200; + font-size: 2.4rem; + line-height: var(--menu-bar-height); + text-align: center; + margin: 0; + flex: 1; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.menu-title { + cursor: pointer; +} + +.menu-bar, +.menu-bar:visited, +.nav-chapters, +.nav-chapters:visited, +.mobile-nav-chapters, +.mobile-nav-chapters:visited, +.menu-bar .icon-button, +.menu-bar a i { + color: var(--icons); +} + +.menu-bar i:hover, +.menu-bar .icon-button:hover, +.nav-chapters:hover, +.mobile-nav-chapters i:hover { + color: var(--icons-hover); +} + +/* Nav Icons */ + +.nav-chapters { + font-size: 2.5em; + text-align: center; + text-decoration: none; + + position: fixed; + top: 0; + bottom: 0; + margin: 0; + max-width: 150px; + min-width: 90px; + + display: flex; + justify-content: center; + align-content: center; + flex-direction: column; + + transition: color 0.5s, background-color 0.5s; +} + +.nav-chapters:hover { + text-decoration: none; + background-color: var(--theme-hover); + transition: background-color 0.15s, color 0.15s; +} + +.nav-wrapper { + margin-block-start: 50px; + display: none; +} + +.mobile-nav-chapters { + font-size: 2.5em; + text-align: center; + text-decoration: none; + width: 90px; + border-radius: 5px; + background-color: var(--sidebar-bg); +} + +/* Only Firefox supports flow-relative values */ +.previous { float: left; } +[dir=rtl] .previous { float: right; } + +/* Only Firefox supports flow-relative values */ +.next { + float: right; + right: var(--page-padding); +} +[dir=rtl] .next { + float: left; + right: unset; + left: var(--page-padding); +} + +/* Use the correct buttons for RTL layouts*/ +[dir=rtl] .previous i.fa-angle-left:before {content:"\f105";} +[dir=rtl] .next i.fa-angle-right:before { content:"\f104"; } + +@media only screen and (max-width: 1080px) { + .nav-wide-wrapper { display: none; } + .nav-wrapper { display: block; } +} + +/* sidebar-visible */ +@media only screen and (max-width: 1380px) { + #sidebar-toggle-anchor:checked ~ .page-wrapper .nav-wide-wrapper { display: none; } + #sidebar-toggle-anchor:checked ~ .page-wrapper .nav-wrapper { display: block; } +} + +/* Inline code */ + +:not(pre) > .hljs { + display: inline; + padding: 0.1em 0.3em; + border-radius: 3px; +} + +:not(pre):not(a) > .hljs { + color: var(--inline-code-color); + overflow-x: initial; +} + +a:hover > .hljs { + text-decoration: underline; +} + +pre { + position: relative; +} +pre > .buttons { + position: absolute; + z-index: 100; + right: 0px; + top: 2px; + margin: 0px; + padding: 2px 0px; + + color: var(--sidebar-fg); + cursor: pointer; + visibility: hidden; + opacity: 0; + transition: visibility 0.1s linear, opacity 0.1s linear; +} +pre:hover > .buttons { + visibility: visible; + opacity: 1 +} +pre > .buttons :hover { + color: var(--sidebar-active); + border-color: var(--icons-hover); + background-color: var(--theme-hover); +} +pre > .buttons i { + margin-inline-start: 8px; +} +pre > .buttons button { + cursor: inherit; + margin: 0px 5px; + padding: 4px 4px 3px 5px; + font-size: 23px; + + border-style: solid; + border-width: 1px; + border-radius: 4px; + border-color: var(--icons); + background-color: var(--theme-popup-bg); + transition: 100ms; + transition-property: color,border-color,background-color; + color: var(--icons); +} + +pre > .buttons button.clip-button { + padding: 2px 4px 0px 6px; +} +pre > .buttons button.clip-button::before { + /* clipboard image from octicons (https://github.com/primer/octicons/tree/v2.0.0) MIT license + */ + content: url('data:image/svg+xml,\ +\ +\ +'); + filter: var(--copy-button-filter); +} +pre > .buttons button.clip-button:hover::before { + filter: var(--copy-button-filter-hover); +} + +@media (pointer: coarse) { + pre > .buttons button { + /* On mobile, make it easier to tap buttons. */ + padding: 0.3rem 1rem; + } + + .sidebar-resize-indicator { + /* Hide resize indicator on devices with limited accuracy */ + display: none; + } +} +pre > code { + display: block; + padding: 1rem; +} + +/* FIXME: ACE editors overlap their buttons because ACE does absolute + positioning within the code block which breaks padding. The only solution I + can think of is to move the padding to the outer pre tag (or insert a div + wrapper), but that would require fixing a whole bunch of CSS rules. +*/ +.hljs.ace_editor { + padding: 0rem 0rem; +} + +pre > .result { + margin-block-start: 10px; +} + +/* Search */ + +#searchresults a { + text-decoration: none; +} + +mark { + border-radius: 2px; + padding-block-start: 0; + padding-block-end: 1px; + padding-inline-start: 3px; + padding-inline-end: 3px; + margin-block-start: 0; + margin-block-end: -1px; + margin-inline-start: -3px; + margin-inline-end: -3px; + background-color: var(--search-mark-bg); + transition: background-color 300ms linear; + cursor: pointer; +} + +mark.fade-out { + background-color: rgba(0,0,0,0) !important; + cursor: auto; +} + +.searchbar-outer { + margin-inline-start: auto; + margin-inline-end: auto; + max-width: var(--content-max-width); +} + +#searchbar { + width: 100%; + margin-block-start: 5px; + margin-block-end: 0; + margin-inline-start: auto; + margin-inline-end: auto; + padding: 10px 16px; + transition: box-shadow 300ms ease-in-out; + border: 1px solid var(--searchbar-border-color); + border-radius: 3px; + background-color: var(--searchbar-bg); + color: var(--searchbar-fg); +} +#searchbar:focus, +#searchbar.active { + box-shadow: 0 0 3px var(--searchbar-shadow-color); +} + +.searchresults-header { + font-weight: bold; + font-size: 1em; + padding-block-start: 18px; + padding-block-end: 0; + padding-inline-start: 5px; + padding-inline-end: 0; + color: var(--searchresults-header-fg); +} + +.searchresults-outer { + margin-inline-start: auto; + margin-inline-end: auto; + max-width: var(--content-max-width); + border-block-end: 1px dashed var(--searchresults-border-color); +} + +ul#searchresults { + list-style: none; + padding-inline-start: 20px; +} +ul#searchresults li { + margin: 10px 0px; + padding: 2px; + border-radius: 2px; +} +ul#searchresults li.focus { + background-color: var(--searchresults-li-bg); +} +ul#searchresults span.teaser { + display: block; + clear: both; + margin-block-start: 5px; + margin-block-end: 0; + margin-inline-start: 20px; + margin-inline-end: 0; + font-size: 0.8em; +} +ul#searchresults span.teaser em { + font-weight: bold; + font-style: normal; +} + +/* Sidebar */ + +.sidebar { + position: fixed; + left: 0; + top: 0; + bottom: 0; + width: var(--sidebar-width); + font-size: 0.875em; + box-sizing: border-box; + -webkit-overflow-scrolling: touch; + overscroll-behavior-y: contain; + background-color: var(--sidebar-bg); + color: var(--sidebar-fg); +} +.sidebar-iframe-inner { + background-color: var(--sidebar-bg); + color: var(--sidebar-fg); + padding: 10px 10px; + margin: 0; + font-size: 1.4rem; +} +.sidebar-iframe-outer { + border: none; + height: 100%; + position: absolute; + top: 0; + bottom: 0; + left: 0; + right: 0; +} +[dir=rtl] .sidebar { left: unset; right: 0; } +.sidebar-resizing { + -moz-user-select: none; + -webkit-user-select: none; + -ms-user-select: none; + user-select: none; +} +html:not(.sidebar-resizing) .sidebar { + transition: transform 0.3s; /* Animation: slide away */ +} +.sidebar code { + line-height: 2em; +} +.sidebar .sidebar-scrollbox { + overflow-y: auto; + position: absolute; + top: 0; + bottom: 0; + left: 0; + right: 0; + padding: 10px 10px; +} +.sidebar .sidebar-resize-handle { + position: absolute; + cursor: col-resize; + width: 0; + right: calc(var(--sidebar-resize-indicator-width) * -1); + top: 0; + bottom: 0; + display: flex; + align-items: center; +} + +.sidebar-resize-handle .sidebar-resize-indicator { + width: 100%; + height: 12px; + background-color: var(--icons); + margin-inline-start: var(--sidebar-resize-indicator-space); +} + +[dir=rtl] .sidebar .sidebar-resize-handle { + left: calc(var(--sidebar-resize-indicator-width) * -1); + right: unset; +} +.js .sidebar .sidebar-resize-handle { + cursor: col-resize; + width: calc(var(--sidebar-resize-indicator-width) - var(--sidebar-resize-indicator-space)); +} +/* sidebar-hidden */ +#sidebar-toggle-anchor:not(:checked) ~ .sidebar { + transform: translateX(calc(0px - var(--sidebar-width) - var(--sidebar-resize-indicator-width))); + z-index: -1; +} +[dir=rtl] #sidebar-toggle-anchor:not(:checked) ~ .sidebar { + transform: translateX(calc(var(--sidebar-width) + var(--sidebar-resize-indicator-width))); +} +.sidebar::-webkit-scrollbar { + background: var(--sidebar-bg); +} +.sidebar::-webkit-scrollbar-thumb { + background: var(--scrollbar); +} + +/* sidebar-visible */ +#sidebar-toggle-anchor:checked ~ .page-wrapper { + transform: translateX(calc(var(--sidebar-width) + var(--sidebar-resize-indicator-width))); +} +[dir=rtl] #sidebar-toggle-anchor:checked ~ .page-wrapper { + transform: translateX(calc(0px - var(--sidebar-width) - var(--sidebar-resize-indicator-width))); +} +@media only screen and (min-width: 620px) { + #sidebar-toggle-anchor:checked ~ .page-wrapper { + transform: none; + margin-inline-start: calc(var(--sidebar-width) + var(--sidebar-resize-indicator-width)); + } + [dir=rtl] #sidebar-toggle-anchor:checked ~ .page-wrapper { + transform: none; + } +} + +.chapter { + list-style: none outside none; + padding-inline-start: 0; + line-height: 2.2em; +} + +.chapter ol { + width: 100%; +} + +.chapter li { + display: flex; + color: var(--sidebar-non-existant); +} +.chapter li a { + display: block; + padding: 0; + text-decoration: none; + color: var(--sidebar-fg); +} + +.chapter li a:hover { + color: var(--sidebar-active); +} + +.chapter li a.active { + color: var(--sidebar-active); +} + +.chapter li > a.toggle { + cursor: pointer; + display: block; + margin-inline-start: auto; + padding: 0 10px; + user-select: none; + opacity: 0.68; +} + +.chapter li > a.toggle div { + transition: transform 0.5s; +} + +/* collapse the section */ +.chapter li:not(.expanded) + li > ol { + display: none; +} + +.chapter li.chapter-item { + line-height: 1.5em; + margin-block-start: 0.6em; +} + +.chapter li.expanded > a.toggle div { + transform: rotate(90deg); +} + +.spacer { + width: 100%; + height: 3px; + margin: 5px 0px; +} +.chapter .spacer { + background-color: var(--sidebar-spacer); +} + +@media (-moz-touch-enabled: 1), (pointer: coarse) { + .chapter li a { padding: 5px 0; } + .spacer { margin: 10px 0; } +} + +.section { + list-style: none outside none; + padding-inline-start: 20px; + line-height: 1.9em; +} + +/* Theme Menu Popup */ + +.theme-popup { + position: absolute; + left: 10px; + top: var(--menu-bar-height); + z-index: 1000; + border-radius: 4px; + font-size: 0.7em; + color: var(--fg); + background: var(--theme-popup-bg); + border: 1px solid var(--theme-popup-border); + margin: 0; + padding: 0; + list-style: none; + display: none; + /* Don't let the children's background extend past the rounded corners. */ + overflow: hidden; +} +[dir=rtl] .theme-popup { left: unset; right: 10px; } +.theme-popup .default { + color: var(--icons); +} +.theme-popup .theme { + width: 100%; + border: 0; + margin: 0; + padding: 2px 20px; + line-height: 25px; + white-space: nowrap; + text-align: start; + cursor: pointer; + color: inherit; + background: inherit; + font-size: inherit; +} +.theme-popup .theme:hover { + background-color: var(--theme-hover); +} + +.theme-selected::before { + display: inline-block; + content: "✓"; + margin-inline-start: -14px; + width: 14px; +} diff --git a/theme/css/general.css b/theme/css/general.css new file mode 100644 index 0000000..0862b51 --- /dev/null +++ b/theme/css/general.css @@ -0,0 +1,242 @@ +/* Base styles and content styles */ + +:root { + /* Browser default font-size is 16px, this way 1 rem = 10px */ + font-size: 62.5%; + color-scheme: var(--color-scheme); +} + +html { + font-family: "Open Sans", sans-serif; + color: var(--fg); + background-color: var(--bg); + text-size-adjust: none; + -webkit-text-size-adjust: none; +} + +body { + margin: 0; + font-size: 1.6rem; + overflow-x: hidden; +} + +code { + font-family: var(--mono-font) !important; + font-size: var(--code-font-size); + direction: ltr !important; +} + +/* make long words/inline code not x overflow */ +main { + overflow-wrap: break-word; +} + +/* make wide tables scroll if they overflow */ +.table-wrapper { + overflow-x: auto; +} + +/* Don't change font size in headers. */ +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + font-size: unset; +} + +.left { float: left; } +.right { float: right; } +.boring { opacity: 0.6; } +.hide-boring .boring { display: none; } +.hidden { display: none !important; } + +h2, h3 { margin-block-start: 2.5em; } +h4, h5 { margin-block-start: 2em; } + +.header + .header h3, +.header + .header h4, +.header + .header h5 { + margin-block-start: 1em; +} + +h1:target::before, +h2:target::before, +h3:target::before, +h4:target::before, +h5:target::before, +h6:target::before { + display: inline-block; + content: "»"; + margin-inline-start: -30px; + width: 30px; +} + +/* This is broken on Safari as of version 14, but is fixed + in Safari Technology Preview 117 which I think will be Safari 14.2. + https://bugs.webkit.org/show_bug.cgi?id=218076 +*/ +:target { + /* Safari does not support logical properties */ + scroll-margin-top: calc(var(--menu-bar-height) + 0.5em); +} + +.page { + outline: 0; + padding: 0 var(--page-padding); + margin-block-start: calc(0px - var(--menu-bar-height)); /* Compensate for the #menu-bar-hover-placeholder */ +} +.page-wrapper { + box-sizing: border-box; + background-color: var(--bg); +} +.no-js .page-wrapper, +.js:not(.sidebar-resizing) .page-wrapper { + transition: margin-left 0.3s ease, transform 0.3s ease; /* Animation: slide away */ +} +[dir=rtl] .js:not(.sidebar-resizing) .page-wrapper { + transition: margin-right 0.3s ease, transform 0.3s ease; /* Animation: slide away */ +} + +.content { + overflow-y: auto; + padding: 0 5px 50px 5px; +} +.content main { + margin-inline-start: auto; + margin-inline-end: auto; + max-width: var(--content-max-width); +} +.content p { line-height: 1.45em; } +.content ol { line-height: 1.45em; } +.content ul { line-height: 1.45em; } +.content a { text-decoration: none; } +.content a:hover { text-decoration: underline; } +.content img, .content video { max-width: 100%; } +.content .header:link, +.content .header:visited { + color: var(--fg); +} +.content .header:link, +.content .header:visited:hover { + text-decoration: none; +} + +table { + margin: 0 auto; + border-collapse: collapse; +} +table td { + padding: 3px 20px; + border: 1px var(--table-border-color) solid; +} +table thead { + background: var(--table-header-bg); +} +table thead td { + font-weight: 700; + border: none; +} +table thead th { + padding: 3px 20px; +} +table thead tr { + border: 1px var(--table-header-bg) solid; +} +/* Alternate background colors for rows */ +table tbody tr:nth-child(2n) { + background: var(--table-alternate-bg); +} + + +blockquote { + margin: 20px 0; + padding: 0 20px; + color: var(--fg); + background-color: var(--quote-bg); + border-block-start: .1em solid var(--quote-border); + border-block-end: .1em solid var(--quote-border); +} + +.warning { + margin: 20px; + padding: 0 20px; + border-inline-start: 2px solid var(--warning-border); +} + +.warning:before { + position: absolute; + width: 3rem; + height: 3rem; + margin-inline-start: calc(-1.5rem - 21px); + content: "ⓘ"; + text-align: center; + background-color: var(--bg); + color: var(--warning-border); + font-weight: bold; + font-size: 2rem; +} + +blockquote .warning:before { + background-color: var(--quote-bg); +} + +kbd { + background-color: var(--table-border-color); + border-radius: 4px; + border: solid 1px var(--theme-popup-border); + box-shadow: inset 0 -1px 0 var(--theme-hover); + display: inline-block; + font-size: var(--code-font-size); + font-family: var(--mono-font); + line-height: 10px; + padding: 4px 5px; + vertical-align: middle; +} + +sup { + /* Set the line-height for superscript and footnote references so that there + isn't an awkward space appearing above lines that contain the footnote. + + See https://github.com/rust-lang/mdBook/pull/2443#discussion_r1813773583 + for an explanation. + */ + line-height: 0; +} + +:not(.footnote-definition) + .footnote-definition, +.footnote-definition + :not(.footnote-definition) { + margin-block-start: 2em; +} +.footnote-definition { + font-size: 0.9em; + margin: 0.5em 0; +} +.footnote-definition p { + display: inline; +} + +.tooltiptext { + position: absolute; + visibility: hidden; + color: #fff; + background-color: #333; + transform: translateX(-50%); /* Center by moving tooltip 50% of its width left */ + left: -8px; /* Half of the width of the icon */ + top: -35px; + font-size: 0.8em; + text-align: center; + border-radius: 6px; + padding: 5px 8px; + margin: 5px; + z-index: 1000; +} +.tooltipped .tooltiptext { + visibility: visible; +} + +.chapter li.part-title { + color: var(--sidebar-fg); + margin: 5px 0px; + font-weight: bold; +} + +.result-no-output { + font-style: italic; +} diff --git a/theme/css/print.css b/theme/css/print.css new file mode 100644 index 0000000..80ec3a5 --- /dev/null +++ b/theme/css/print.css @@ -0,0 +1,50 @@ + +#sidebar, +#menu-bar, +.nav-chapters, +.mobile-nav-chapters { + display: none; +} + +#page-wrapper.page-wrapper { + transform: none !important; + margin-inline-start: 0px; + overflow-y: initial; +} + +#content { + max-width: none; + margin: 0; + padding: 0; +} + +.page { + overflow-y: initial; +} + +code { + direction: ltr !important; +} + +pre > .buttons { + z-index: 2; +} + +a, a:visited, a:active, a:hover { + color: #4183c4; + text-decoration: none; +} + +h1, h2, h3, h4, h5, h6 { + page-break-inside: avoid; + page-break-after: avoid; +} + +pre, code { + page-break-inside: avoid; + white-space: pre-wrap; +} + +.fa { + display: none !important; +} diff --git a/theme/css/variables.css b/theme/css/variables.css new file mode 100644 index 0000000..12d1db7 --- /dev/null +++ b/theme/css/variables.css @@ -0,0 +1,309 @@ + +/* Globals */ + +:root { + --sidebar-width: 300px; + --sidebar-resize-indicator-width: 8px; + --sidebar-resize-indicator-space: 2px; + --page-padding: 15px; + --content-max-width: 750px; + --menu-bar-height: 50px; + --mono-font: "Source Code Pro", Consolas, "Ubuntu Mono", Menlo, "DejaVu Sans Mono", monospace, monospace; + --code-font-size: 0.875em /* please adjust the ace font size accordingly in editor.js */ +} + +/* Themes */ + +.ayu { + --bg: hsl(210, 25%, 8%); + --fg: #c5c5c5; + + --sidebar-bg: #14191f; + --sidebar-fg: #c8c9db; + --sidebar-non-existant: #5c6773; + --sidebar-active: #ffb454; + --sidebar-spacer: #2d334f; + + --scrollbar: var(--sidebar-fg); + + --icons: #737480; + --icons-hover: #b7b9cc; + + --links: #0096cf; + + --inline-code-color: #ffb454; + + --theme-popup-bg: #14191f; + --theme-popup-border: #5c6773; + --theme-hover: #191f26; + + --quote-bg: hsl(226, 15%, 17%); + --quote-border: hsl(226, 15%, 22%); + + --warning-border: #ff8e00; + + --table-border-color: hsl(210, 25%, 13%); + --table-header-bg: hsl(210, 25%, 28%); + --table-alternate-bg: hsl(210, 25%, 11%); + + --searchbar-border-color: #848484; + --searchbar-bg: #424242; + --searchbar-fg: #fff; + --searchbar-shadow-color: #d4c89f; + --searchresults-header-fg: #666; + --searchresults-border-color: #888; + --searchresults-li-bg: #252932; + --search-mark-bg: #e3b171; + + --color-scheme: dark; + + /* Same as `--icons` */ + --copy-button-filter: invert(45%) sepia(6%) saturate(621%) hue-rotate(198deg) brightness(99%) contrast(85%); + /* Same as `--sidebar-active` */ + --copy-button-filter-hover: invert(68%) sepia(55%) saturate(531%) hue-rotate(341deg) brightness(104%) contrast(101%); +} + +.coal { + --bg: hsl(200, 7%, 8%); + --fg: #98a3ad; + + --sidebar-bg: #292c2f; + --sidebar-fg: #a1adb8; + --sidebar-non-existant: #505254; + --sidebar-active: #3473ad; + --sidebar-spacer: #393939; + + --scrollbar: var(--sidebar-fg); + + --icons: #43484d; + --icons-hover: #b3c0cc; + + --links: #2b79a2; + + --inline-code-color: #c5c8c6; + + --theme-popup-bg: #141617; + --theme-popup-border: #43484d; + --theme-hover: #1f2124; + + --quote-bg: hsl(234, 21%, 18%); + --quote-border: hsl(234, 21%, 23%); + + --warning-border: #ff8e00; + + --table-border-color: hsl(200, 7%, 13%); + --table-header-bg: hsl(200, 7%, 28%); + --table-alternate-bg: hsl(200, 7%, 11%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #b7b7b7; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #98a3ad; + --searchresults-li-bg: #2b2b2f; + --search-mark-bg: #355c7d; + + --color-scheme: dark; + + /* Same as `--icons` */ + --copy-button-filter: invert(26%) sepia(8%) saturate(575%) hue-rotate(169deg) brightness(87%) contrast(82%); + /* Same as `--sidebar-active` */ + --copy-button-filter-hover: invert(36%) sepia(70%) saturate(503%) hue-rotate(167deg) brightness(98%) contrast(89%); +} + +.light, html:not(.js) { + --bg: hsl(0, 0%, 100%); + --fg: hsl(0, 0%, 0%); + + --sidebar-bg: #fafafa; + --sidebar-fg: hsl(0, 0%, 0%); + --sidebar-non-existant: #aaaaaa; + --sidebar-active: #1f1fff; + --sidebar-spacer: #f4f4f4; + + --scrollbar: #8F8F8F; + + --icons: #747474; + --icons-hover: #000000; + + --links: #20609f; + + --inline-code-color: #301900; + + --theme-popup-bg: #fafafa; + --theme-popup-border: #cccccc; + --theme-hover: #e6e6e6; + + --quote-bg: hsl(197, 37%, 96%); + --quote-border: hsl(197, 37%, 91%); + + --warning-border: #ff8e00; + + --table-border-color: hsl(0, 0%, 95%); + --table-header-bg: hsl(0, 0%, 80%); + --table-alternate-bg: hsl(0, 0%, 97%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #fafafa; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #888; + --searchresults-li-bg: #e4f2fe; + --search-mark-bg: #a2cff5; + + --color-scheme: light; + + /* Same as `--icons` */ + --copy-button-filter: invert(45.49%); + /* Same as `--sidebar-active` */ + --copy-button-filter-hover: invert(14%) sepia(93%) saturate(4250%) hue-rotate(243deg) brightness(99%) contrast(130%); +} + +.navy { + --bg: hsl(226, 23%, 11%); + --fg: #bcbdd0; + + --sidebar-bg: #282d3f; + --sidebar-fg: #c8c9db; + --sidebar-non-existant: #505274; + --sidebar-active: #2b79a2; + --sidebar-spacer: #2d334f; + + --scrollbar: var(--sidebar-fg); + + --icons: #737480; + --icons-hover: #b7b9cc; + + --links: #2b79a2; + + --inline-code-color: #c5c8c6; + + --theme-popup-bg: #161923; + --theme-popup-border: #737480; + --theme-hover: #282e40; + + --quote-bg: hsl(226, 15%, 17%); + --quote-border: hsl(226, 15%, 22%); + + --warning-border: #ff8e00; + + --table-border-color: hsl(226, 23%, 16%); + --table-header-bg: hsl(226, 23%, 31%); + --table-alternate-bg: hsl(226, 23%, 14%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #aeaec6; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #5f5f71; + --searchresults-border-color: #5c5c68; + --searchresults-li-bg: #242430; + --search-mark-bg: #a2cff5; + + --color-scheme: dark; + + /* Same as `--icons` */ + --copy-button-filter: invert(51%) sepia(10%) saturate(393%) hue-rotate(198deg) brightness(86%) contrast(87%); + /* Same as `--sidebar-active` */ + --copy-button-filter-hover: invert(46%) sepia(20%) saturate(1537%) hue-rotate(156deg) brightness(85%) contrast(90%); +} + +.rust { + --bg: hsl(60, 9%, 87%); + --fg: #262625; + + --sidebar-bg: #3b2e2a; + --sidebar-fg: #c8c9db; + --sidebar-non-existant: #505254; + --sidebar-active: #e69f67; + --sidebar-spacer: #45373a; + + --scrollbar: var(--sidebar-fg); + + --icons: #737480; + --icons-hover: #262625; + + --links: #2b79a2; + + --inline-code-color: #6e6b5e; + + --theme-popup-bg: #e1e1db; + --theme-popup-border: #b38f6b; + --theme-hover: #99908a; + + --quote-bg: hsl(60, 5%, 75%); + --quote-border: hsl(60, 5%, 70%); + + --warning-border: #ff8e00; + + --table-border-color: hsl(60, 9%, 82%); + --table-header-bg: #b3a497; + --table-alternate-bg: hsl(60, 9%, 84%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #fafafa; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #888; + --searchresults-li-bg: #dec2a2; + --search-mark-bg: #e69f67; + + /* Same as `--icons` */ + --copy-button-filter: invert(51%) sepia(10%) saturate(393%) hue-rotate(198deg) brightness(86%) contrast(87%); + /* Same as `--sidebar-active` */ + --copy-button-filter-hover: invert(77%) sepia(16%) saturate(1798%) hue-rotate(328deg) brightness(98%) contrast(83%); +} + +@media (prefers-color-scheme: dark) { + html:not(.js) { + --bg: hsl(200, 7%, 8%); + --fg: #98a3ad; + + --sidebar-bg: #292c2f; + --sidebar-fg: #a1adb8; + --sidebar-non-existant: #505254; + --sidebar-active: #3473ad; + --sidebar-spacer: #393939; + + --scrollbar: var(--sidebar-fg); + + --icons: #43484d; + --icons-hover: #b3c0cc; + + --links: #2b79a2; + + --inline-code-color: #c5c8c6; + + --theme-popup-bg: #141617; + --theme-popup-border: #43484d; + --theme-hover: #1f2124; + + --quote-bg: hsl(234, 21%, 18%); + --quote-border: hsl(234, 21%, 23%); + + --warning-border: #ff8e00; + + --table-border-color: hsl(200, 7%, 13%); + --table-header-bg: hsl(200, 7%, 28%); + --table-alternate-bg: hsl(200, 7%, 11%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #b7b7b7; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #98a3ad; + --searchresults-li-bg: #2b2b2f; + --search-mark-bg: #355c7d; + + --color-scheme: dark; + + /* Same as `--icons` */ + --copy-button-filter: invert(26%) sepia(8%) saturate(575%) hue-rotate(169deg) brightness(87%) contrast(82%); + /* Same as `--sidebar-active` */ + --copy-button-filter-hover: invert(36%) sepia(70%) saturate(503%) hue-rotate(167deg) brightness(98%) contrast(89%); + } +} diff --git a/theme/favicon.png b/theme/favicon.png new file mode 100644 index 0000000..a5b1aa1 Binary files /dev/null and b/theme/favicon.png differ diff --git a/theme/favicon.svg b/theme/favicon.svg new file mode 100644 index 0000000..90e0ea5 --- /dev/null +++ b/theme/favicon.svg @@ -0,0 +1,22 @@ + + + + + diff --git a/theme/fonts/OPEN-SANS-LICENSE.txt b/theme/fonts/OPEN-SANS-LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/theme/fonts/OPEN-SANS-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/theme/fonts/SOURCE-CODE-PRO-LICENSE.txt b/theme/fonts/SOURCE-CODE-PRO-LICENSE.txt new file mode 100644 index 0000000..366206f --- /dev/null +++ b/theme/fonts/SOURCE-CODE-PRO-LICENSE.txt @@ -0,0 +1,93 @@ +Copyright 2010, 2012 Adobe Systems Incorporated (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe Systems Incorporated in the United States and/or other countries. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/theme/fonts/fonts.css b/theme/fonts/fonts.css new file mode 100644 index 0000000..858efa5 --- /dev/null +++ b/theme/fonts/fonts.css @@ -0,0 +1,100 @@ +/* Open Sans is licensed under the Apache License, Version 2.0. See http://www.apache.org/licenses/LICENSE-2.0 */ +/* Source Code Pro is under the Open Font License. See https://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=OFL */ + +/* open-sans-300 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 300; + src: local('Open Sans Light'), local('OpenSans-Light'), + url('open-sans-v17-all-charsets-300.woff2') format('woff2'); +} + +/* open-sans-300italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 300; + src: local('Open Sans Light Italic'), local('OpenSans-LightItalic'), + url('open-sans-v17-all-charsets-300italic.woff2') format('woff2'); +} + +/* open-sans-regular - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 400; + src: local('Open Sans Regular'), local('OpenSans-Regular'), + url('open-sans-v17-all-charsets-regular.woff2') format('woff2'); +} + +/* open-sans-italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 400; + src: local('Open Sans Italic'), local('OpenSans-Italic'), + url('open-sans-v17-all-charsets-italic.woff2') format('woff2'); +} + +/* open-sans-600 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 600; + src: local('Open Sans SemiBold'), local('OpenSans-SemiBold'), + url('open-sans-v17-all-charsets-600.woff2') format('woff2'); +} + +/* open-sans-600italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 600; + src: local('Open Sans SemiBold Italic'), local('OpenSans-SemiBoldItalic'), + url('open-sans-v17-all-charsets-600italic.woff2') format('woff2'); +} + +/* open-sans-700 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 700; + src: local('Open Sans Bold'), local('OpenSans-Bold'), + url('open-sans-v17-all-charsets-700.woff2') format('woff2'); +} + +/* open-sans-700italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 700; + src: local('Open Sans Bold Italic'), local('OpenSans-BoldItalic'), + url('open-sans-v17-all-charsets-700italic.woff2') format('woff2'); +} + +/* open-sans-800 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 800; + src: local('Open Sans ExtraBold'), local('OpenSans-ExtraBold'), + url('open-sans-v17-all-charsets-800.woff2') format('woff2'); +} + +/* open-sans-800italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 800; + src: local('Open Sans ExtraBold Italic'), local('OpenSans-ExtraBoldItalic'), + url('open-sans-v17-all-charsets-800italic.woff2') format('woff2'); +} + +/* source-code-pro-500 - latin_vietnamese_latin-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Source Code Pro'; + font-style: normal; + font-weight: 500; + src: url('source-code-pro-v11-all-charsets-500.woff2') format('woff2'); +} diff --git a/theme/fonts/open-sans-v17-all-charsets-300.woff2 b/theme/fonts/open-sans-v17-all-charsets-300.woff2 new file mode 100644 index 0000000..9f51be3 Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-300.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-300italic.woff2 b/theme/fonts/open-sans-v17-all-charsets-300italic.woff2 new file mode 100644 index 0000000..2f54544 Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-300italic.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-600.woff2 b/theme/fonts/open-sans-v17-all-charsets-600.woff2 new file mode 100644 index 0000000..f503d55 Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-600.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-600italic.woff2 b/theme/fonts/open-sans-v17-all-charsets-600italic.woff2 new file mode 100644 index 0000000..c99aabe Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-600italic.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-700.woff2 b/theme/fonts/open-sans-v17-all-charsets-700.woff2 new file mode 100644 index 0000000..421a1ab Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-700.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-700italic.woff2 b/theme/fonts/open-sans-v17-all-charsets-700italic.woff2 new file mode 100644 index 0000000..12ce3d2 Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-700italic.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-800.woff2 b/theme/fonts/open-sans-v17-all-charsets-800.woff2 new file mode 100644 index 0000000..c94a223 Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-800.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-800italic.woff2 b/theme/fonts/open-sans-v17-all-charsets-800italic.woff2 new file mode 100644 index 0000000..eed7d3c Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-800italic.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-italic.woff2 b/theme/fonts/open-sans-v17-all-charsets-italic.woff2 new file mode 100644 index 0000000..398b68a Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-italic.woff2 differ diff --git a/theme/fonts/open-sans-v17-all-charsets-regular.woff2 b/theme/fonts/open-sans-v17-all-charsets-regular.woff2 new file mode 100644 index 0000000..8383e94 Binary files /dev/null and b/theme/fonts/open-sans-v17-all-charsets-regular.woff2 differ diff --git a/theme/fonts/source-code-pro-v11-all-charsets-500.woff2 b/theme/fonts/source-code-pro-v11-all-charsets-500.woff2 new file mode 100644 index 0000000..7222456 Binary files /dev/null and b/theme/fonts/source-code-pro-v11-all-charsets-500.woff2 differ diff --git a/theme/highlight.css b/theme/highlight.css new file mode 100644 index 0000000..352c79b --- /dev/null +++ b/theme/highlight.css @@ -0,0 +1,83 @@ +/* + * An increased contrast highlighting scheme loosely based on the + * "Base16 Atelier Dune Light" theme by Bram de Haan + * (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/dune) + * Original Base16 color scheme by Chris Kempson + * (https://github.com/chriskempson/base16) + */ + +/* Comment */ +.hljs-comment, +.hljs-quote { + color: #575757; +} + +/* Red */ +.hljs-variable, +.hljs-template-variable, +.hljs-attribute, +.hljs-attr, +.hljs-tag, +.hljs-name, +.hljs-regexp, +.hljs-link, +.hljs-name, +.hljs-selector-id, +.hljs-selector-class { + color: #d70025; +} + +/* Orange */ +.hljs-number, +.hljs-meta, +.hljs-built_in, +.hljs-builtin-name, +.hljs-literal, +.hljs-type, +.hljs-params { + color: #b21e00; +} + +/* Green */ +.hljs-string, +.hljs-symbol, +.hljs-bullet { + color: #008200; +} + +/* Blue */ +.hljs-title, +.hljs-section { + color: #0030f2; +} + +/* Purple */ +.hljs-keyword, +.hljs-selector-tag { + color: #9d00ec; +} + +.hljs { + display: block; + overflow-x: auto; + background: #f6f7f6; + color: #000; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} + +.hljs-addition { + color: #22863a; + background-color: #f0fff4; +} + +.hljs-deletion { + color: #b31d28; + background-color: #ffeef0; +} diff --git a/theme/highlight.js b/theme/highlight.js new file mode 100644 index 0000000..18d2434 --- /dev/null +++ b/theme/highlight.js @@ -0,0 +1,54 @@ +/* + Highlight.js 10.1.1 (93fd0d73) + License: BSD-3-Clause + Copyright (c) 2006-2020, Ivan Sagalaev +*/ +var hljs=function(){"use strict";function e(n){Object.freeze(n);var t="function"==typeof n;return Object.getOwnPropertyNames(n).forEach((function(r){!Object.hasOwnProperty.call(n,r)||null===n[r]||"object"!=typeof n[r]&&"function"!=typeof n[r]||t&&("caller"===r||"callee"===r||"arguments"===r)||Object.isFrozen(n[r])||e(n[r])})),n}class n{constructor(e){void 0===e.data&&(e.data={}),this.data=e.data}ignoreMatch(){this.ignore=!0}}function t(e){return e.replace(/&/g,"&").replace(//g,">").replace(/"/g,""").replace(/'/g,"'")}function r(e,...n){var t={};for(const n in e)t[n]=e[n];return n.forEach((function(e){for(const n in e)t[n]=e[n]})),t}function a(e){return e.nodeName.toLowerCase()}var i=Object.freeze({__proto__:null,escapeHTML:t,inherit:r,nodeStream:function(e){var n=[];return function e(t,r){for(var i=t.firstChild;i;i=i.nextSibling)3===i.nodeType?r+=i.nodeValue.length:1===i.nodeType&&(n.push({event:"start",offset:r,node:i}),r=e(i,r),a(i).match(/br|hr|img|input/)||n.push({event:"stop",offset:r,node:i}));return r}(e,0),n},mergeStreams:function(e,n,r){var i=0,s="",o=[];function l(){return e.length&&n.length?e[0].offset!==n[0].offset?e[0].offset"}function u(e){s+=""}function d(e){("start"===e.event?c:u)(e.node)}for(;e.length||n.length;){var g=l();if(s+=t(r.substring(i,g[0].offset)),i=g[0].offset,g===e){o.reverse().forEach(u);do{d(g.splice(0,1)[0]),g=l()}while(g===e&&g.length&&g[0].offset===i);o.reverse().forEach(c)}else"start"===g[0].event?o.push(g[0].node):o.pop(),d(g.splice(0,1)[0])}return s+t(r.substr(i))}});const s="",o=e=>!!e.kind;class l{constructor(e,n){this.buffer="",this.classPrefix=n.classPrefix,e.walk(this)}addText(e){this.buffer+=t(e)}openNode(e){if(!o(e))return;let n=e.kind;e.sublanguage||(n=`${this.classPrefix}${n}`),this.span(n)}closeNode(e){o(e)&&(this.buffer+=s)}value(){return this.buffer}span(e){this.buffer+=``}}class c{constructor(){this.rootNode={children:[]},this.stack=[this.rootNode]}get top(){return this.stack[this.stack.length-1]}get root(){return this.rootNode}add(e){this.top.children.push(e)}openNode(e){const n={kind:e,children:[]};this.add(n),this.stack.push(n)}closeNode(){if(this.stack.length>1)return this.stack.pop()}closeAllNodes(){for(;this.closeNode(););}toJSON(){return JSON.stringify(this.rootNode,null,4)}walk(e){return this.constructor._walk(e,this.rootNode)}static _walk(e,n){return"string"==typeof n?e.addText(n):n.children&&(e.openNode(n),n.children.forEach(n=>this._walk(e,n)),e.closeNode(n)),e}static _collapse(e){"string"!=typeof e&&e.children&&(e.children.every(e=>"string"==typeof e)?e.children=[e.children.join("")]:e.children.forEach(e=>{c._collapse(e)}))}}class u extends c{constructor(e){super(),this.options=e}addKeyword(e,n){""!==e&&(this.openNode(n),this.addText(e),this.closeNode())}addText(e){""!==e&&this.add(e)}addSublanguage(e,n){const t=e.root;t.kind=n,t.sublanguage=!0,this.add(t)}toHTML(){return new l(this,this.options).value()}finalize(){return!0}}function d(e){return e?"string"==typeof e?e:e.source:null}const g="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",h={begin:"\\\\[\\s\\S]",relevance:0},f={className:"string",begin:"'",end:"'",illegal:"\\n",contains:[h]},p={className:"string",begin:'"',end:'"',illegal:"\\n",contains:[h]},b={begin:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},m=function(e,n,t={}){var a=r({className:"comment",begin:e,end:n,contains:[]},t);return a.contains.push(b),a.contains.push({className:"doctag",begin:"(?:TODO|FIXME|NOTE|BUG|OPTIMIZE|HACK|XXX):",relevance:0}),a},v=m("//","$"),x=m("/\\*","\\*/"),E=m("#","$");var _=Object.freeze({__proto__:null,IDENT_RE:"[a-zA-Z]\\w*",UNDERSCORE_IDENT_RE:"[a-zA-Z_]\\w*",NUMBER_RE:"\\b\\d+(\\.\\d+)?",C_NUMBER_RE:g,BINARY_NUMBER_RE:"\\b(0b[01]+)",RE_STARTERS_RE:"!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",SHEBANG:(e={})=>{const n=/^#![ ]*\//;return e.binary&&(e.begin=function(...e){return e.map(e=>d(e)).join("")}(n,/.*\b/,e.binary,/\b.*/)),r({className:"meta",begin:n,end:/$/,relevance:0,"on:begin":(e,n)=>{0!==e.index&&n.ignoreMatch()}},e)},BACKSLASH_ESCAPE:h,APOS_STRING_MODE:f,QUOTE_STRING_MODE:p,PHRASAL_WORDS_MODE:b,COMMENT:m,C_LINE_COMMENT_MODE:v,C_BLOCK_COMMENT_MODE:x,HASH_COMMENT_MODE:E,NUMBER_MODE:{className:"number",begin:"\\b\\d+(\\.\\d+)?",relevance:0},C_NUMBER_MODE:{className:"number",begin:g,relevance:0},BINARY_NUMBER_MODE:{className:"number",begin:"\\b(0b[01]+)",relevance:0},CSS_NUMBER_MODE:{className:"number",begin:"\\b\\d+(\\.\\d+)?(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",relevance:0},REGEXP_MODE:{begin:/(?=\/[^/\n]*\/)/,contains:[{className:"regexp",begin:/\//,end:/\/[gimuy]*/,illegal:/\n/,contains:[h,{begin:/\[/,end:/\]/,relevance:0,contains:[h]}]}]},TITLE_MODE:{className:"title",begin:"[a-zA-Z]\\w*",relevance:0},UNDERSCORE_TITLE_MODE:{className:"title",begin:"[a-zA-Z_]\\w*",relevance:0},METHOD_GUARD:{begin:"\\.\\s*[a-zA-Z_]\\w*",relevance:0},END_SAME_AS_BEGIN:function(e){return Object.assign(e,{"on:begin":(e,n)=>{n.data._beginMatch=e[1]},"on:end":(e,n)=>{n.data._beginMatch!==e[1]&&n.ignoreMatch()}})}}),N="of and for in not or if then".split(" ");function w(e,n){return n?+n:function(e){return N.includes(e.toLowerCase())}(e)?0:1}const R=t,y=r,{nodeStream:k,mergeStreams:O}=i,M=Symbol("nomatch");return function(t){var a=[],i={},s={},o=[],l=!0,c=/(^(<[^>]+>|\t|)+|\n)/gm,g="Could not find the language '{}', did you forget to load/include a language module?";const h={disableAutodetect:!0,name:"Plain text",contains:[]};var f={noHighlightRe:/^(no-?highlight)$/i,languageDetectRe:/\blang(?:uage)?-([\w-]+)\b/i,classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:null,__emitter:u};function p(e){return f.noHighlightRe.test(e)}function b(e,n,t,r){var a={code:n,language:e};S("before:highlight",a);var i=a.result?a.result:m(a.language,a.code,t,r);return i.code=a.code,S("after:highlight",i),i}function m(e,t,a,s){var o=t;function c(e,n){var t=E.case_insensitive?n[0].toLowerCase():n[0];return Object.prototype.hasOwnProperty.call(e.keywords,t)&&e.keywords[t]}function u(){null!=y.subLanguage?function(){if(""!==A){var e=null;if("string"==typeof y.subLanguage){if(!i[y.subLanguage])return void O.addText(A);e=m(y.subLanguage,A,!0,k[y.subLanguage]),k[y.subLanguage]=e.top}else e=v(A,y.subLanguage.length?y.subLanguage:null);y.relevance>0&&(I+=e.relevance),O.addSublanguage(e.emitter,e.language)}}():function(){if(!y.keywords)return void O.addText(A);let e=0;y.keywordPatternRe.lastIndex=0;let n=y.keywordPatternRe.exec(A),t="";for(;n;){t+=A.substring(e,n.index);const r=c(y,n);if(r){const[e,a]=r;O.addText(t),t="",I+=a,O.addKeyword(n[0],e)}else t+=n[0];e=y.keywordPatternRe.lastIndex,n=y.keywordPatternRe.exec(A)}t+=A.substr(e),O.addText(t)}(),A=""}function h(e){return e.className&&O.openNode(e.className),y=Object.create(e,{parent:{value:y}})}function p(e){return 0===y.matcher.regexIndex?(A+=e[0],1):(L=!0,0)}var b={};function x(t,r){var i=r&&r[0];if(A+=t,null==i)return u(),0;if("begin"===b.type&&"end"===r.type&&b.index===r.index&&""===i){if(A+=o.slice(r.index,r.index+1),!l){const n=Error("0 width match regex");throw n.languageName=e,n.badRule=b.rule,n}return 1}if(b=r,"begin"===r.type)return function(e){var t=e[0],r=e.rule;const a=new n(r),i=[r.__beforeBegin,r["on:begin"]];for(const n of i)if(n&&(n(e,a),a.ignore))return p(t);return r&&r.endSameAsBegin&&(r.endRe=RegExp(t.replace(/[-/\\^$*+?.()|[\]{}]/g,"\\$&"),"m")),r.skip?A+=t:(r.excludeBegin&&(A+=t),u(),r.returnBegin||r.excludeBegin||(A=t)),h(r),r.returnBegin?0:t.length}(r);if("illegal"===r.type&&!a){const e=Error('Illegal lexeme "'+i+'" for mode "'+(y.className||"")+'"');throw e.mode=y,e}if("end"===r.type){var s=function(e){var t=e[0],r=o.substr(e.index),a=function e(t,r,a){let i=function(e,n){var t=e&&e.exec(n);return t&&0===t.index}(t.endRe,a);if(i){if(t["on:end"]){const e=new n(t);t["on:end"](r,e),e.ignore&&(i=!1)}if(i){for(;t.endsParent&&t.parent;)t=t.parent;return t}}if(t.endsWithParent)return e(t.parent,r,a)}(y,e,r);if(!a)return M;var i=y;i.skip?A+=t:(i.returnEnd||i.excludeEnd||(A+=t),u(),i.excludeEnd&&(A=t));do{y.className&&O.closeNode(),y.skip||y.subLanguage||(I+=y.relevance),y=y.parent}while(y!==a.parent);return a.starts&&(a.endSameAsBegin&&(a.starts.endRe=a.endRe),h(a.starts)),i.returnEnd?0:t.length}(r);if(s!==M)return s}if("illegal"===r.type&&""===i)return 1;if(B>1e5&&B>3*r.index)throw Error("potential infinite loop, way more iterations than matches");return A+=i,i.length}var E=T(e);if(!E)throw console.error(g.replace("{}",e)),Error('Unknown language: "'+e+'"');var _=function(e){function n(n,t){return RegExp(d(n),"m"+(e.case_insensitive?"i":"")+(t?"g":""))}class t{constructor(){this.matchIndexes={},this.regexes=[],this.matchAt=1,this.position=0}addRule(e,n){n.position=this.position++,this.matchIndexes[this.matchAt]=n,this.regexes.push([n,e]),this.matchAt+=function(e){return RegExp(e.toString()+"|").exec("").length-1}(e)+1}compile(){0===this.regexes.length&&(this.exec=()=>null);const e=this.regexes.map(e=>e[1]);this.matcherRe=n(function(e,n="|"){for(var t=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./,r=0,a="",i=0;i0&&(a+=n),a+="(";o.length>0;){var l=t.exec(o);if(null==l){a+=o;break}a+=o.substring(0,l.index),o=o.substring(l.index+l[0].length),"\\"===l[0][0]&&l[1]?a+="\\"+(+l[1]+s):(a+=l[0],"("===l[0]&&r++)}a+=")"}return a}(e),!0),this.lastIndex=0}exec(e){this.matcherRe.lastIndex=this.lastIndex;const n=this.matcherRe.exec(e);if(!n)return null;const t=n.findIndex((e,n)=>n>0&&void 0!==e),r=this.matchIndexes[t];return n.splice(0,t),Object.assign(n,r)}}class a{constructor(){this.rules=[],this.multiRegexes=[],this.count=0,this.lastIndex=0,this.regexIndex=0}getMatcher(e){if(this.multiRegexes[e])return this.multiRegexes[e];const n=new t;return this.rules.slice(e).forEach(([e,t])=>n.addRule(e,t)),n.compile(),this.multiRegexes[e]=n,n}considerAll(){this.regexIndex=0}addRule(e,n){this.rules.push([e,n]),"begin"===n.type&&this.count++}exec(e){const n=this.getMatcher(this.regexIndex);n.lastIndex=this.lastIndex;const t=n.exec(e);return t&&(this.regexIndex+=t.position+1,this.regexIndex===this.count&&(this.regexIndex=0)),t}}function i(e,n){const t=e.input[e.index-1],r=e.input[e.index+e[0].length];"."!==t&&"."!==r||n.ignoreMatch()}if(e.contains&&e.contains.includes("self"))throw Error("ERR: contains `self` is not supported at the top-level of a language. See documentation.");return function t(s,o){const l=s;if(s.compiled)return l;s.compiled=!0,s.__beforeBegin=null,s.keywords=s.keywords||s.beginKeywords;let c=null;if("object"==typeof s.keywords&&(c=s.keywords.$pattern,delete s.keywords.$pattern),s.keywords&&(s.keywords=function(e,n){var t={};return"string"==typeof e?r("keyword",e):Object.keys(e).forEach((function(n){r(n,e[n])})),t;function r(e,r){n&&(r=r.toLowerCase()),r.split(" ").forEach((function(n){var r=n.split("|");t[r[0]]=[e,w(r[0],r[1])]}))}}(s.keywords,e.case_insensitive)),s.lexemes&&c)throw Error("ERR: Prefer `keywords.$pattern` to `mode.lexemes`, BOTH are not allowed. (see mode reference) ");return l.keywordPatternRe=n(s.lexemes||c||/\w+/,!0),o&&(s.beginKeywords&&(s.begin="\\b("+s.beginKeywords.split(" ").join("|")+")(?=\\b|\\s)",s.__beforeBegin=i),s.begin||(s.begin=/\B|\b/),l.beginRe=n(s.begin),s.endSameAsBegin&&(s.end=s.begin),s.end||s.endsWithParent||(s.end=/\B|\b/),s.end&&(l.endRe=n(s.end)),l.terminator_end=d(s.end)||"",s.endsWithParent&&o.terminator_end&&(l.terminator_end+=(s.end?"|":"")+o.terminator_end)),s.illegal&&(l.illegalRe=n(s.illegal)),void 0===s.relevance&&(s.relevance=1),s.contains||(s.contains=[]),s.contains=[].concat(...s.contains.map((function(e){return function(e){return e.variants&&!e.cached_variants&&(e.cached_variants=e.variants.map((function(n){return r(e,{variants:null},n)}))),e.cached_variants?e.cached_variants:function e(n){return!!n&&(n.endsWithParent||e(n.starts))}(e)?r(e,{starts:e.starts?r(e.starts):null}):Object.isFrozen(e)?r(e):e}("self"===e?s:e)}))),s.contains.forEach((function(e){t(e,l)})),s.starts&&t(s.starts,o),l.matcher=function(e){const n=new a;return e.contains.forEach(e=>n.addRule(e.begin,{rule:e,type:"begin"})),e.terminator_end&&n.addRule(e.terminator_end,{type:"end"}),e.illegal&&n.addRule(e.illegal,{type:"illegal"}),n}(l),l}(e)}(E),N="",y=s||_,k={},O=new f.__emitter(f);!function(){for(var e=[],n=y;n!==E;n=n.parent)n.className&&e.unshift(n.className);e.forEach(e=>O.openNode(e))}();var A="",I=0,S=0,B=0,L=!1;try{for(y.matcher.considerAll();;){B++,L?L=!1:(y.matcher.lastIndex=S,y.matcher.considerAll());const e=y.matcher.exec(o);if(!e)break;const n=x(o.substring(S,e.index),e);S=e.index+n}return x(o.substr(S)),O.closeAllNodes(),O.finalize(),N=O.toHTML(),{relevance:I,value:N,language:e,illegal:!1,emitter:O,top:y}}catch(n){if(n.message&&n.message.includes("Illegal"))return{illegal:!0,illegalBy:{msg:n.message,context:o.slice(S-100,S+100),mode:n.mode},sofar:N,relevance:0,value:R(o),emitter:O};if(l)return{illegal:!1,relevance:0,value:R(o),emitter:O,language:e,top:y,errorRaised:n};throw n}}function v(e,n){n=n||f.languages||Object.keys(i);var t=function(e){const n={relevance:0,emitter:new f.__emitter(f),value:R(e),illegal:!1,top:h};return n.emitter.addText(e),n}(e),r=t;return n.filter(T).filter(I).forEach((function(n){var a=m(n,e,!1);a.language=n,a.relevance>r.relevance&&(r=a),a.relevance>t.relevance&&(r=t,t=a)})),r.language&&(t.second_best=r),t}function x(e){return f.tabReplace||f.useBR?e.replace(c,e=>"\n"===e?f.useBR?"
":e:f.tabReplace?e.replace(/\t/g,f.tabReplace):e):e}function E(e){let n=null;const t=function(e){var n=e.className+" ";n+=e.parentNode?e.parentNode.className:"";const t=f.languageDetectRe.exec(n);if(t){var r=T(t[1]);return r||(console.warn(g.replace("{}",t[1])),console.warn("Falling back to no-highlight mode for this block.",e)),r?t[1]:"no-highlight"}return n.split(/\s+/).find(e=>p(e)||T(e))}(e);if(p(t))return;S("before:highlightBlock",{block:e,language:t}),f.useBR?(n=document.createElement("div")).innerHTML=e.innerHTML.replace(/\n/g,"").replace(//g,"\n"):n=e;const r=n.textContent,a=t?b(t,r,!0):v(r),i=k(n);if(i.length){const e=document.createElement("div");e.innerHTML=a.value,a.value=O(i,k(e),r)}a.value=x(a.value),S("after:highlightBlock",{block:e,result:a}),e.innerHTML=a.value,e.className=function(e,n,t){var r=n?s[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),e.includes(r)||a.push(r),a.join(" ").trim()}(e.className,t,a.language),e.result={language:a.language,re:a.relevance,relavance:a.relevance},a.second_best&&(e.second_best={language:a.second_best.language,re:a.second_best.relevance,relavance:a.second_best.relevance})}const N=()=>{if(!N.called){N.called=!0;var e=document.querySelectorAll("pre code");a.forEach.call(e,E)}};function T(e){return e=(e||"").toLowerCase(),i[e]||i[s[e]]}function A(e,{languageName:n}){"string"==typeof e&&(e=[e]),e.forEach(e=>{s[e]=n})}function I(e){var n=T(e);return n&&!n.disableAutodetect}function S(e,n){var t=e;o.forEach((function(e){e[t]&&e[t](n)}))}Object.assign(t,{highlight:b,highlightAuto:v,fixMarkup:x,highlightBlock:E,configure:function(e){f=y(f,e)},initHighlighting:N,initHighlightingOnLoad:function(){window.addEventListener("DOMContentLoaded",N,!1)},registerLanguage:function(e,n){var r=null;try{r=n(t)}catch(n){if(console.error("Language definition for '{}' could not be registered.".replace("{}",e)),!l)throw n;console.error(n),r=h}r.name||(r.name=e),i[e]=r,r.rawDefinition=n.bind(null,t),r.aliases&&A(r.aliases,{languageName:e})},listLanguages:function(){return Object.keys(i)},getLanguage:T,registerAliases:A,requireLanguage:function(e){var n=T(e);if(n)return n;throw Error("The '{}' language is required, but not loaded.".replace("{}",e))},autoDetection:I,inherit:y,addPlugin:function(e){o.push(e)}}),t.debugMode=function(){l=!1},t.safeMode=function(){l=!0},t.versionString="10.1.1";for(const n in _)"object"==typeof _[n]&&e(_[n]);return Object.assign(t,_),t}({})}();"object"==typeof exports&&"undefined"!=typeof module&&(module.exports=hljs); +hljs.registerLanguage("apache",function(){"use strict";return function(e){var n={className:"number",begin:"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?"};return{name:"Apache config",aliases:["apacheconf"],case_insensitive:!0,contains:[e.HASH_COMMENT_MODE,{className:"section",begin:"",contains:[n,{className:"number",begin:":\\d{1,5}"},e.inherit(e.QUOTE_STRING_MODE,{relevance:0})]},{className:"attribute",begin:/\w+/,relevance:0,keywords:{nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{end:/$/,relevance:0,keywords:{literal:"on off all deny allow"},contains:[{className:"meta",begin:"\\s\\[",end:"\\]$"},{className:"variable",begin:"[\\$%]\\{",end:"\\}",contains:["self",{className:"number",begin:"[\\$%]\\d+"}]},n,{className:"number",begin:"\\d+"},e.QUOTE_STRING_MODE]}}],illegal:/\S/}}}()); +hljs.registerLanguage("bash",function(){"use strict";return function(e){const s={};Object.assign(s,{className:"variable",variants:[{begin:/\$[\w\d#@][\w\d_]*/},{begin:/\$\{/,end:/\}/,contains:[{begin:/:-/,contains:[s]}]}]});const t={className:"subst",begin:/\$\(/,end:/\)/,contains:[e.BACKSLASH_ESCAPE]},n={className:"string",begin:/"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,s,t]};t.contains.push(n);const a={begin:/\$\(\(/,end:/\)\)/,contains:[{begin:/\d+#[0-9a-f]+/,className:"number"},e.NUMBER_MODE,s]},i=e.SHEBANG({binary:"(fish|bash|zsh|sh|csh|ksh|tcsh|dash|scsh)",relevance:10}),c={className:"function",begin:/\w[\w\d_]*\s*\(\s*\)\s*\{/,returnBegin:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/\w[\w\d_]*/})],relevance:0};return{name:"Bash",aliases:["sh","zsh"],keywords:{$pattern:/\b-?[a-z\._]+\b/,keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},contains:[i,e.SHEBANG(),c,a,e.HASH_COMMENT_MODE,n,{className:"",begin:/\\"/},{className:"string",begin:/'/,end:/'/},s]}}}()); +hljs.registerLanguage("c-like",function(){"use strict";return function(e){function t(e){return"(?:"+e+")?"}var n="(decltype\\(auto\\)|"+t("[a-zA-Z_]\\w*::")+"[a-zA-Z_]\\w*"+t("<.*?>")+")",r={className:"keyword",begin:"\\b[a-z\\d_]*_t\\b"},a={className:"string",variants:[{begin:'(u8?|U|L)?"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:"(u8?|U|L)?'(\\\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4,8}|[0-7]{3}|\\S)|.)",end:"'",illegal:"."},e.END_SAME_AS_BEGIN({begin:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\(/,end:/\)([^()\\ ]{0,16})"/})]},i={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],relevance:0},s={className:"meta",begin:/#\s*[a-z]+\b/,end:/$/,keywords:{"meta-keyword":"if else elif endif define undef warning error line pragma _Pragma ifdef ifndef include"},contains:[{begin:/\\\n/,relevance:0},e.inherit(a,{className:"meta-string"}),{className:"meta-string",begin:/<.*?>/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},o={className:"title",begin:t("[a-zA-Z_]\\w*::")+e.IDENT_RE,relevance:0},c=t("[a-zA-Z_]\\w*::")+e.IDENT_RE+"\\s*\\(",l={keyword:"int float while private char char8_t char16_t char32_t catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid wchar_t short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignas alignof constexpr consteval constinit decltype concept co_await co_return co_yield requires noexcept static_assert thread_local restrict final override atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and and_eq bitand bitor compl not not_eq or or_eq xor xor_eq",built_in:"std string wstring cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set pair bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap priority_queue make_pair array shared_ptr abort terminate abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf future isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr _Bool complex _Complex imaginary _Imaginary",literal:"true false nullptr NULL"},d=[r,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,i,a],_={variants:[{begin:/=/,end:/;/},{begin:/\(/,end:/\)/},{beginKeywords:"new throw return else",end:/;/}],keywords:l,contains:d.concat([{begin:/\(/,end:/\)/,keywords:l,contains:d.concat(["self"]),relevance:0}]),relevance:0},u={className:"function",begin:"("+n+"[\\*&\\s]+)+"+c,returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:l,illegal:/[^\w\s\*&:<>]/,contains:[{begin:"decltype\\(auto\\)",keywords:l,relevance:0},{begin:c,returnBegin:!0,contains:[o],relevance:0},{className:"params",begin:/\(/,end:/\)/,keywords:l,relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,i,r,{begin:/\(/,end:/\)/,keywords:l,relevance:0,contains:["self",e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,i,r]}]},r,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,s]};return{aliases:["c","cc","h","c++","h++","hpp","hh","hxx","cxx"],keywords:l,disableAutodetect:!0,illegal:"",keywords:l,contains:["self",r]},{begin:e.IDENT_RE+"::",keywords:l},{className:"class",beginKeywords:"class struct",end:/[{;:]/,contains:[{begin://,contains:["self"]},e.TITLE_MODE]}]),exports:{preprocessor:s,strings:a,keywords:l}}}}()); +hljs.registerLanguage("c",function(){"use strict";return function(e){var n=e.getLanguage("c-like").rawDefinition();return n.name="C",n.aliases=["c","h"],n}}()); +hljs.registerLanguage("coffeescript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);return function(r){var t={keyword:e.concat(["then","unless","until","loop","by","when","and","or","is","isnt","not"]).filter((e=>n=>!e.includes(n))(["var","const","let","function","static"])).join(" "),literal:n.concat(["yes","no","on","off"]).join(" "),built_in:a.concat(["npm","print"]).join(" ")},i="[A-Za-z$_][0-9A-Za-z$_]*",s={className:"subst",begin:/#\{/,end:/}/,keywords:t},o=[r.BINARY_NUMBER_MODE,r.inherit(r.C_NUMBER_MODE,{starts:{end:"(\\s*/)?",relevance:0}}),{className:"string",variants:[{begin:/'''/,end:/'''/,contains:[r.BACKSLASH_ESCAPE]},{begin:/'/,end:/'/,contains:[r.BACKSLASH_ESCAPE]},{begin:/"""/,end:/"""/,contains:[r.BACKSLASH_ESCAPE,s]},{begin:/"/,end:/"/,contains:[r.BACKSLASH_ESCAPE,s]}]},{className:"regexp",variants:[{begin:"///",end:"///",contains:[s,r.HASH_COMMENT_MODE]},{begin:"//[gim]{0,3}(?=\\W)",relevance:0},{begin:/\/(?![ *]).*?(?![\\]).\/[gim]{0,3}(?=\W)/}]},{begin:"@"+i},{subLanguage:"javascript",excludeBegin:!0,excludeEnd:!0,variants:[{begin:"```",end:"```"},{begin:"`",end:"`"}]}];s.contains=o;var c=r.inherit(r.TITLE_MODE,{begin:i}),l={className:"params",begin:"\\([^\\(]",returnBegin:!0,contains:[{begin:/\(/,end:/\)/,keywords:t,contains:["self"].concat(o)}]};return{name:"CoffeeScript",aliases:["coffee","cson","iced"],keywords:t,illegal:/\/\*/,contains:o.concat([r.COMMENT("###","###"),r.HASH_COMMENT_MODE,{className:"function",begin:"^\\s*"+i+"\\s*=\\s*(\\(.*\\))?\\s*\\B[-=]>",end:"[-=]>",returnBegin:!0,contains:[c,l]},{begin:/[:\(,=]\s*/,relevance:0,contains:[{className:"function",begin:"(\\(.*\\))?\\s*\\B[-=]>",end:"[-=]>",returnBegin:!0,contains:[l]}]},{className:"class",beginKeywords:"class",end:"$",illegal:/[:="\[\]]/,contains:[{beginKeywords:"extends",endsWithParent:!0,illegal:/[:="\[\]]/,contains:[c]},c]},{begin:i+":",end:":",returnBegin:!0,returnEnd:!0,relevance:0}])}}}()); +hljs.registerLanguage("cpp",function(){"use strict";return function(e){var t=e.getLanguage("c-like").rawDefinition();return t.disableAutodetect=!1,t.name="C++",t.aliases=["cc","c++","h++","hpp","hh","hxx","cxx"],t}}()); +hljs.registerLanguage("csharp",function(){"use strict";return function(e){var n={keyword:"abstract as base bool break byte case catch char checked const continue decimal default delegate do double enum event explicit extern finally fixed float for foreach goto if implicit in int interface internal is lock long object operator out override params private protected public readonly ref sbyte sealed short sizeof stackalloc static string struct switch this try typeof uint ulong unchecked unsafe ushort using virtual void volatile while add alias ascending async await by descending dynamic equals from get global group into join let nameof on orderby partial remove select set value var when where yield",literal:"null false true"},i=e.inherit(e.TITLE_MODE,{begin:"[a-zA-Z](\\.?\\w)*"}),a={className:"number",variants:[{begin:"\\b(0b[01']+)"},{begin:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{begin:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],relevance:0},s={className:"string",begin:'@"',end:'"',contains:[{begin:'""'}]},t=e.inherit(s,{illegal:/\n/}),l={className:"subst",begin:"{",end:"}",keywords:n},r=e.inherit(l,{illegal:/\n/}),c={className:"string",begin:/\$"/,end:'"',illegal:/\n/,contains:[{begin:"{{"},{begin:"}}"},e.BACKSLASH_ESCAPE,r]},o={className:"string",begin:/\$@"/,end:'"',contains:[{begin:"{{"},{begin:"}}"},{begin:'""'},l]},g=e.inherit(o,{illegal:/\n/,contains:[{begin:"{{"},{begin:"}}"},{begin:'""'},r]});l.contains=[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.C_BLOCK_COMMENT_MODE],r.contains=[g,c,t,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,a,e.inherit(e.C_BLOCK_COMMENT_MODE,{illegal:/\n/})];var d={variants:[o,c,s,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},E={begin:"<",end:">",contains:[{beginKeywords:"in out"},i]},_=e.IDENT_RE+"(<"+e.IDENT_RE+"(\\s*,\\s*"+e.IDENT_RE+")*>)?(\\[\\])?",b={begin:"@"+e.IDENT_RE,relevance:0};return{name:"C#",aliases:["cs","c#"],keywords:n,illegal:/::/,contains:[e.COMMENT("///","$",{returnBegin:!0,contains:[{className:"doctag",variants:[{begin:"///",relevance:0},{begin:"\x3c!--|--\x3e"},{begin:""}]}]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"meta",begin:"#",end:"$",keywords:{"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum"}},d,a,{beginKeywords:"class interface",end:/[{;=]/,illegal:/[^\s:,]/,contains:[{beginKeywords:"where class"},i,E,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{beginKeywords:"namespace",end:/[{;=]/,illegal:/[^\s:]/,contains:[i,e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"meta",begin:"^\\s*\\[",excludeBegin:!0,end:"\\]",excludeEnd:!0,contains:[{className:"meta-string",begin:/"/,end:/"/}]},{beginKeywords:"new return throw await else",relevance:0},{className:"function",begin:"("+_+"\\s+)+"+e.IDENT_RE+"\\s*(\\<.+\\>)?\\s*\\(",returnBegin:!0,end:/\s*[{;=]/,excludeEnd:!0,keywords:n,contains:[{begin:e.IDENT_RE+"\\s*(\\<.+\\>)?\\s*\\(",returnBegin:!0,contains:[e.TITLE_MODE,E],relevance:0},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:n,relevance:0,contains:[d,a,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},b]}}}()); +hljs.registerLanguage("css",function(){"use strict";return function(e){var n={begin:/(?:[A-Z\_\.\-]+|--[a-zA-Z0-9_-]+)\s*:/,returnBegin:!0,end:";",endsWithParent:!0,contains:[{className:"attribute",begin:/\S/,end:":",excludeEnd:!0,starts:{endsWithParent:!0,excludeEnd:!0,contains:[{begin:/[\w-]+\(/,returnBegin:!0,contains:[{className:"built_in",begin:/[\w-]+/},{begin:/\(/,end:/\)/,contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.CSS_NUMBER_MODE]}]},e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,e.C_BLOCK_COMMENT_MODE,{className:"number",begin:"#[0-9A-Fa-f]+"},{className:"meta",begin:"!important"}]}}]};return{name:"CSS",case_insensitive:!0,illegal:/[=\/|'\$]/,contains:[e.C_BLOCK_COMMENT_MODE,{className:"selector-id",begin:/#[A-Za-z0-9_-]+/},{className:"selector-class",begin:/\.[A-Za-z0-9_-]+/},{className:"selector-attr",begin:/\[/,end:/\]/,illegal:"$",contains:[e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},{className:"selector-pseudo",begin:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{begin:"@(page|font-face)",lexemes:"@[a-z-]+",keywords:"@page @font-face"},{begin:"@",end:"[{;]",illegal:/:/,returnBegin:!0,contains:[{className:"keyword",begin:/@\-?\w[\w]*(\-\w+)*/},{begin:/\s/,endsWithParent:!0,excludeEnd:!0,relevance:0,keywords:"and or not only",contains:[{begin:/[a-z-]+:/,className:"attribute"},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.CSS_NUMBER_MODE]}]},{className:"selector-tag",begin:"[a-zA-Z-][a-zA-Z0-9_-]*",relevance:0},{begin:"{",end:"}",illegal:/\S/,contains:[e.C_BLOCK_COMMENT_MODE,n]}]}}}()); +hljs.registerLanguage("diff",function(){"use strict";return function(e){return{name:"Diff",aliases:["patch"],contains:[{className:"meta",relevance:10,variants:[{begin:/^@@ +\-\d+,\d+ +\+\d+,\d+ +@@$/},{begin:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{begin:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{className:"comment",variants:[{begin:/Index: /,end:/$/},{begin:/={3,}/,end:/$/},{begin:/^\-{3}/,end:/$/},{begin:/^\*{3} /,end:/$/},{begin:/^\+{3}/,end:/$/},{begin:/^\*{15}$/}]},{className:"addition",begin:"^\\+",end:"$"},{className:"deletion",begin:"^\\-",end:"$"},{className:"addition",begin:"^\\!",end:"$"}]}}}()); +hljs.registerLanguage("go",function(){"use strict";return function(e){var n={keyword:"break default func interface select case map struct chan else goto package switch const fallthrough if range type continue for import return var go defer bool byte complex64 complex128 float32 float64 int8 int16 int32 int64 string uint8 uint16 uint32 uint64 int uint uintptr rune",literal:"true false iota nil",built_in:"append cap close complex copy imag len make new panic print println real recover delete"};return{name:"Go",aliases:["golang"],keywords:n,illegal:"e(n)).join("")}return function(a){var s={className:"number",relevance:0,variants:[{begin:/([\+\-]+)?[\d]+_[\d_]+/},{begin:a.NUMBER_RE}]},i=a.COMMENT();i.variants=[{begin:/;/,end:/$/},{begin:/#/,end:/$/}];var t={className:"variable",variants:[{begin:/\$[\w\d"][\w\d_]*/},{begin:/\$\{(.*?)}/}]},r={className:"literal",begin:/\bon|off|true|false|yes|no\b/},l={className:"string",contains:[a.BACKSLASH_ESCAPE],variants:[{begin:"'''",end:"'''",relevance:10},{begin:'"""',end:'"""',relevance:10},{begin:'"',end:'"'},{begin:"'",end:"'"}]},c={begin:/\[/,end:/\]/,contains:[i,r,t,l,s,"self"],relevance:0},g="("+[/[A-Za-z0-9_-]+/,/"(\\"|[^"])*"/,/'[^']*'/].map(n=>e(n)).join("|")+")";return{name:"TOML, also INI",aliases:["toml"],case_insensitive:!0,illegal:/\S/,contains:[i,{className:"section",begin:/\[+/,end:/\]+/},{begin:n(g,"(\\s*\\.\\s*",g,")*",n("(?=",/\s*=\s*[^#\s]/,")")),className:"attr",starts:{end:/$/,contains:[i,c,r,t,l,s]}}]}}}()); +hljs.registerLanguage("java",function(){"use strict";function e(e){return e?"string"==typeof e?e:e.source:null}function n(e){return a("(",e,")?")}function a(...n){return n.map(n=>e(n)).join("")}function s(...n){return"("+n.map(n=>e(n)).join("|")+")"}return function(e){var t="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",i={className:"meta",begin:"@[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*",contains:[{begin:/\(/,end:/\)/,contains:["self"]}]},r=e=>a("[",e,"]+([",e,"_]*[",e,"]+)?"),c={className:"number",variants:[{begin:`\\b(0[bB]${r("01")})[lL]?`},{begin:`\\b(0${r("0-7")})[dDfFlL]?`},{begin:a(/\b0[xX]/,s(a(r("a-fA-F0-9"),/\./,r("a-fA-F0-9")),a(r("a-fA-F0-9"),/\.?/),a(/\./,r("a-fA-F0-9"))),/([pP][+-]?(\d+))?/,/[fFdDlL]?/)},{begin:a(/\b/,s(a(/\d*\./,r("\\d")),r("\\d")),/[eE][+-]?[\d]+[dDfF]?/)},{begin:a(/\b/,r(/\d/),n(/\.?/),n(r(/\d/)),/[dDfFlL]?/)}],relevance:0};return{name:"Java",aliases:["jsp"],keywords:t,illegal:/<\/|#/,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{begin:/\w+@/,relevance:0},{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"class",beginKeywords:"class interface",end:/[{;=]/,excludeEnd:!0,keywords:"class interface",illegal:/[:"\[\]]/,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"new throw return else",relevance:0},{className:"function",begin:"([À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(<[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(\\s*,\\s*[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*)*>)?\\s+)+"+e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,end:/[{;=]/,excludeEnd:!0,keywords:t,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"params",begin:/\(/,end:/\)/,keywords:t,relevance:0,contains:[i,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE]},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},c,i]}}}()); +hljs.registerLanguage("javascript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);function s(e){return r("(?=",e,")")}function r(...e){return e.map(e=>(function(e){return e?"string"==typeof e?e:e.source:null})(e)).join("")}return function(t){var i="[A-Za-z$_][0-9A-Za-z$_]*",c={begin:/<[A-Za-z0-9\\._:-]+/,end:/\/[A-Za-z0-9\\._:-]+>|\/>/},o={$pattern:"[A-Za-z$_][0-9A-Za-z$_]*",keyword:e.join(" "),literal:n.join(" "),built_in:a.join(" ")},l={className:"number",variants:[{begin:"\\b(0[bB][01]+)n?"},{begin:"\\b(0[oO][0-7]+)n?"},{begin:t.C_NUMBER_RE+"n?"}],relevance:0},E={className:"subst",begin:"\\$\\{",end:"\\}",keywords:o,contains:[]},d={begin:"html`",end:"",starts:{end:"`",returnEnd:!1,contains:[t.BACKSLASH_ESCAPE,E],subLanguage:"xml"}},g={begin:"css`",end:"",starts:{end:"`",returnEnd:!1,contains:[t.BACKSLASH_ESCAPE,E],subLanguage:"css"}},u={className:"string",begin:"`",end:"`",contains:[t.BACKSLASH_ESCAPE,E]};E.contains=[t.APOS_STRING_MODE,t.QUOTE_STRING_MODE,d,g,u,l,t.REGEXP_MODE];var b=E.contains.concat([{begin:/\(/,end:/\)/,contains:["self"].concat(E.contains,[t.C_BLOCK_COMMENT_MODE,t.C_LINE_COMMENT_MODE])},t.C_BLOCK_COMMENT_MODE,t.C_LINE_COMMENT_MODE]),_={className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,contains:b};return{name:"JavaScript",aliases:["js","jsx","mjs","cjs"],keywords:o,contains:[t.SHEBANG({binary:"node",relevance:5}),{className:"meta",relevance:10,begin:/^\s*['"]use (strict|asm)['"]/},t.APOS_STRING_MODE,t.QUOTE_STRING_MODE,d,g,u,t.C_LINE_COMMENT_MODE,t.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+",contains:[{className:"type",begin:"\\{",end:"\\}",relevance:0},{className:"variable",begin:i+"(?=\\s*(-)|$)",endsParent:!0,relevance:0},{begin:/(?=[^\n])\s/,relevance:0}]}]}),t.C_BLOCK_COMMENT_MODE,l,{begin:r(/[{,\n]\s*/,s(r(/(((\/\/.*)|(\/\*(.|\n)*\*\/))\s*)*/,i+"\\s*:"))),relevance:0,contains:[{className:"attr",begin:i+s("\\s*:"),relevance:0}]},{begin:"("+t.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",contains:[t.C_LINE_COMMENT_MODE,t.C_BLOCK_COMMENT_MODE,t.REGEXP_MODE,{className:"function",begin:"(\\([^(]*(\\([^(]*(\\([^(]*\\))?\\))?\\)|"+t.UNDERSCORE_IDENT_RE+")\\s*=>",returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:t.UNDERSCORE_IDENT_RE},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:o,contains:b}]}]},{begin:/,/,relevance:0},{className:"",begin:/\s/,end:/\s*/,skip:!0},{variants:[{begin:"<>",end:""},{begin:c.begin,end:c.end}],subLanguage:"xml",contains:[{begin:c.begin,end:c.end,skip:!0,contains:["self"]}]}],relevance:0},{className:"function",beginKeywords:"function",end:/\{/,excludeEnd:!0,contains:[t.inherit(t.TITLE_MODE,{begin:i}),_],illegal:/\[|%/},{begin:/\$[(.]/},t.METHOD_GUARD,{className:"class",beginKeywords:"class",end:/[{;=]/,excludeEnd:!0,illegal:/[:"\[\]]/,contains:[{beginKeywords:"extends"},t.UNDERSCORE_TITLE_MODE]},{beginKeywords:"constructor",end:/\{/,excludeEnd:!0},{begin:"(get|set)\\s+(?="+i+"\\()",end:/{/,keywords:"get set",contains:[t.inherit(t.TITLE_MODE,{begin:i}),{begin:/\(\)/},_]}],illegal:/#(?!!)/}}}()); +hljs.registerLanguage("json",function(){"use strict";return function(n){var e={literal:"true false null"},i=[n.C_LINE_COMMENT_MODE,n.C_BLOCK_COMMENT_MODE],t=[n.QUOTE_STRING_MODE,n.C_NUMBER_MODE],a={end:",",endsWithParent:!0,excludeEnd:!0,contains:t,keywords:e},l={begin:"{",end:"}",contains:[{className:"attr",begin:/"/,end:/"/,contains:[n.BACKSLASH_ESCAPE],illegal:"\\n"},n.inherit(a,{begin:/:/})].concat(i),illegal:"\\S"},s={begin:"\\[",end:"\\]",contains:[n.inherit(a)],illegal:"\\S"};return t.push(l,s),i.forEach((function(n){t.push(n)})),{name:"JSON",contains:t,keywords:e,illegal:"\\S"}}}()); +hljs.registerLanguage("kotlin",function(){"use strict";return function(e){var n={keyword:"abstract as val var vararg get set class object open private protected public noinline crossinline dynamic final enum if else do while for when throw try catch finally import package is in fun override companion reified inline lateinit init interface annotation data sealed internal infix operator out by constructor super tailrec where const inner suspend typealias external expect actual trait volatile transient native default",built_in:"Byte Short Char Int Long Boolean Float Double Void Unit Nothing",literal:"true false null"},a={className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"@"},i={className:"subst",begin:"\\${",end:"}",contains:[e.C_NUMBER_MODE]},s={className:"variable",begin:"\\$"+e.UNDERSCORE_IDENT_RE},t={className:"string",variants:[{begin:'"""',end:'"""(?=[^"])',contains:[s,i]},{begin:"'",end:"'",illegal:/\n/,contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"',illegal:/\n/,contains:[e.BACKSLASH_ESCAPE,s,i]}]};i.contains.push(t);var r={className:"meta",begin:"@(?:file|property|field|get|set|receiver|param|setparam|delegate)\\s*:(?:\\s*"+e.UNDERSCORE_IDENT_RE+")?"},l={className:"meta",begin:"@"+e.UNDERSCORE_IDENT_RE,contains:[{begin:/\(/,end:/\)/,contains:[e.inherit(t,{className:"meta-string"})]}]},c=e.COMMENT("/\\*","\\*/",{contains:[e.C_BLOCK_COMMENT_MODE]}),o={variants:[{className:"type",begin:e.UNDERSCORE_IDENT_RE},{begin:/\(/,end:/\)/,contains:[]}]},d=o;return d.variants[1].contains=[o],o.variants[1].contains=[d],{name:"Kotlin",aliases:["kt"],keywords:n,contains:[e.COMMENT("/\\*\\*","\\*/",{relevance:0,contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.C_LINE_COMMENT_MODE,c,{className:"keyword",begin:/\b(break|continue|return|this)\b/,starts:{contains:[{className:"symbol",begin:/@\w+/}]}},a,r,l,{className:"function",beginKeywords:"fun",end:"[(]|$",returnBegin:!0,excludeEnd:!0,keywords:n,illegal:/fun\s+(<.*>)?[^\s\(]+(\s+[^\s\(]+)\s*=/,relevance:5,contains:[{begin:e.UNDERSCORE_IDENT_RE+"\\s*\\(",returnBegin:!0,relevance:0,contains:[e.UNDERSCORE_TITLE_MODE]},{className:"type",begin://,keywords:"reified",relevance:0},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:n,relevance:0,contains:[{begin:/:/,end:/[=,\/]/,endsWithParent:!0,contains:[o,e.C_LINE_COMMENT_MODE,c],relevance:0},e.C_LINE_COMMENT_MODE,c,r,l,t,e.C_NUMBER_MODE]},c]},{className:"class",beginKeywords:"class interface trait",end:/[:\{(]|$/,excludeEnd:!0,illegal:"extends implements",contains:[{beginKeywords:"public protected internal private constructor"},e.UNDERSCORE_TITLE_MODE,{className:"type",begin://,excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:/[,:]\s*/,end:/[<\(,]|$/,excludeBegin:!0,returnEnd:!0},r,l]},t,{className:"meta",begin:"^#!/usr/bin/env",end:"$",illegal:"\n"},{className:"number",begin:"\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",relevance:0}]}}}()); +hljs.registerLanguage("less",function(){"use strict";return function(e){var n="([\\w-]+|@{[\\w-]+})",a=[],s=[],t=function(e){return{className:"string",begin:"~?"+e+".*?"+e}},r=function(e,n,a){return{className:e,begin:n,relevance:a}},i={begin:"\\(",end:"\\)",contains:s,relevance:0};s.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,t("'"),t('"'),e.CSS_NUMBER_MODE,{begin:"(url|data-uri)\\(",starts:{className:"string",end:"[\\)\\n]",excludeEnd:!0}},r("number","#[0-9A-Fa-f]+\\b"),i,r("variable","@@?[\\w-]+",10),r("variable","@{[\\w-]+}"),r("built_in","~?`[^`]*?`"),{className:"attribute",begin:"[\\w-]+\\s*:",end:":",returnBegin:!0,excludeEnd:!0},{className:"meta",begin:"!important"});var c=s.concat({begin:"{",end:"}",contains:a}),l={beginKeywords:"when",endsWithParent:!0,contains:[{beginKeywords:"and not"}].concat(s)},o={begin:n+"\\s*:",returnBegin:!0,end:"[;}]",relevance:0,contains:[{className:"attribute",begin:n,end:":",excludeEnd:!0,starts:{endsWithParent:!0,illegal:"[<=$]",relevance:0,contains:s}}]},g={className:"keyword",begin:"@(import|media|charset|font-face|(-[a-z]+-)?keyframes|supports|document|namespace|page|viewport|host)\\b",starts:{end:"[;{}]",returnEnd:!0,contains:s,relevance:0}},d={className:"variable",variants:[{begin:"@[\\w-]+\\s*:",relevance:15},{begin:"@[\\w-]+"}],starts:{end:"[;}]",returnEnd:!0,contains:c}},b={variants:[{begin:"[\\.#:&\\[>]",end:"[;{}]"},{begin:n,end:"{"}],returnBegin:!0,returnEnd:!0,illegal:"[<='$\"]",relevance:0,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,l,r("keyword","all\\b"),r("variable","@{[\\w-]+}"),r("selector-tag",n+"%?",0),r("selector-id","#"+n),r("selector-class","\\."+n,0),r("selector-tag","&",0),{className:"selector-attr",begin:"\\[",end:"\\]"},{className:"selector-pseudo",begin:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{begin:"\\(",end:"\\)",contains:c},{begin:"!important"}]};return a.push(e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,g,d,o,b),{name:"Less",case_insensitive:!0,illegal:"[=>'/<($\"]",contains:a}}}()); +hljs.registerLanguage("lua",function(){"use strict";return function(e){var t={begin:"\\[=*\\[",end:"\\]=*\\]",contains:["self"]},a=[e.COMMENT("--(?!\\[=*\\[)","$"),e.COMMENT("--\\[=*\\[","\\]=*\\]",{contains:[t],relevance:10})];return{name:"Lua",keywords:{$pattern:e.UNDERSCORE_IDENT_RE,literal:"true false nil",keyword:"and break do else elseif end for goto if in local not or repeat return then until while",built_in:"_G _ENV _VERSION __index __newindex __mode __call __metatable __tostring __len __gc __add __sub __mul __div __mod __pow __concat __unm __eq __lt __le assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall arg self coroutine resume yield status wrap create running debug getupvalue debug sethook getmetatable gethook setmetatable setlocal traceback setfenv getinfo setupvalue getlocal getregistry getfenv io lines write close flush open output type read stderr stdin input stdout popen tmpfile math log max acos huge ldexp pi cos tanh pow deg tan cosh sinh random randomseed frexp ceil floor rad abs sqrt modf asin min mod fmod log10 atan2 exp sin atan os exit setlocale date getenv difftime remove time clock tmpname rename execute package preload loadlib loaded loaders cpath config path seeall string sub upper len gfind rep find match char dump gmatch reverse byte format gsub lower table setn insert getn foreachi maxn foreach concat sort remove"},contains:a.concat([{className:"function",beginKeywords:"function",end:"\\)",contains:[e.inherit(e.TITLE_MODE,{begin:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{className:"params",begin:"\\(",endsWithParent:!0,contains:a}].concat(a)},e.C_NUMBER_MODE,e.APOS_STRING_MODE,e.QUOTE_STRING_MODE,{className:"string",begin:"\\[=*\\[",end:"\\]=*\\]",contains:[t],relevance:5}])}}}()); +hljs.registerLanguage("makefile",function(){"use strict";return function(e){var i={className:"variable",variants:[{begin:"\\$\\("+e.UNDERSCORE_IDENT_RE+"\\)",contains:[e.BACKSLASH_ESCAPE]},{begin:/\$[@%`]+/}]}]}]};return{name:"HTML, XML",aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf","svg"],case_insensitive:!0,contains:[{className:"meta",begin:"",relevance:10,contains:[a,i,t,s,{begin:"\\[",end:"\\]",contains:[{className:"meta",begin:"",contains:[a,s,i,t]}]}]},e.COMMENT("\x3c!--","--\x3e",{relevance:10}),{begin:"<\\!\\[CDATA\\[",end:"\\]\\]>",relevance:10},n,{className:"meta",begin:/<\?xml/,end:/\?>/,relevance:10},{className:"tag",begin:")",end:">",keywords:{name:"style"},contains:[c],starts:{end:"",returnEnd:!0,subLanguage:["css","xml"]}},{className:"tag",begin:")",end:">",keywords:{name:"script"},contains:[c],starts:{end:"<\/script>",returnEnd:!0,subLanguage:["javascript","handlebars","xml"]}},{className:"tag",begin:"",contains:[{className:"name",begin:/[^\/><\s]+/,relevance:0},c]}]}}}()); +hljs.registerLanguage("markdown",function(){"use strict";return function(n){const e={begin:"<",end:">",subLanguage:"xml",relevance:0},a={begin:"\\[.+?\\][\\(\\[].*?[\\)\\]]",returnBegin:!0,contains:[{className:"string",begin:"\\[",end:"\\]",excludeBegin:!0,returnEnd:!0,relevance:0},{className:"link",begin:"\\]\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0},{className:"symbol",begin:"\\]\\[",end:"\\]",excludeBegin:!0,excludeEnd:!0}],relevance:10},i={className:"strong",contains:[],variants:[{begin:/_{2}/,end:/_{2}/},{begin:/\*{2}/,end:/\*{2}/}]},s={className:"emphasis",contains:[],variants:[{begin:/\*(?!\*)/,end:/\*/},{begin:/_(?!_)/,end:/_/,relevance:0}]};i.contains.push(s),s.contains.push(i);var c=[e,a];return i.contains=i.contains.concat(c),s.contains=s.contains.concat(c),{name:"Markdown",aliases:["md","mkdown","mkd"],contains:[{className:"section",variants:[{begin:"^#{1,6}",end:"$",contains:c=c.concat(i,s)},{begin:"(?=^.+?\\n[=-]{2,}$)",contains:[{begin:"^[=-]*$"},{begin:"^",end:"\\n",contains:c}]}]},e,{className:"bullet",begin:"^[ \t]*([*+-]|(\\d+\\.))(?=\\s+)",end:"\\s+",excludeEnd:!0},i,s,{className:"quote",begin:"^>\\s+",contains:c,end:"$"},{className:"code",variants:[{begin:"(`{3,})(.|\\n)*?\\1`*[ ]*"},{begin:"(~{3,})(.|\\n)*?\\1~*[ ]*"},{begin:"```",end:"```+[ ]*$"},{begin:"~~~",end:"~~~+[ ]*$"},{begin:"`.+?`"},{begin:"(?=^( {4}|\\t))",contains:[{begin:"^( {4}|\\t)",end:"(\\n)$"}],relevance:0}]},{begin:"^[-\\*]{3,}",end:"$"},a,{begin:/^\[[^\n]+\]:/,returnBegin:!0,contains:[{className:"symbol",begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0},{className:"link",begin:/:\s*/,end:/$/,excludeBegin:!0}]}]}}}()); +hljs.registerLanguage("nginx",function(){"use strict";return function(e){var n={className:"variable",variants:[{begin:/\$\d+/},{begin:/\$\{/,end:/}/},{begin:"[\\$\\@]"+e.UNDERSCORE_IDENT_RE}]},a={endsWithParent:!0,keywords:{$pattern:"[a-z/_]+",literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},relevance:0,illegal:"=>",contains:[e.HASH_COMMENT_MODE,{className:"string",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:/"/,end:/"/},{begin:/'/,end:/'/}]},{begin:"([a-z]+):/",end:"\\s",endsWithParent:!0,excludeEnd:!0,contains:[n]},{className:"regexp",contains:[e.BACKSLASH_ESCAPE,n],variants:[{begin:"\\s\\^",end:"\\s|{|;",returnEnd:!0},{begin:"~\\*?\\s+",end:"\\s|{|;",returnEnd:!0},{begin:"\\*(\\.[a-z\\-]+)+"},{begin:"([a-z\\-]+\\.)+\\*"}]},{className:"number",begin:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{className:"number",begin:"\\b\\d+[kKmMgGdshdwy]*\\b",relevance:0},n]};return{name:"Nginx config",aliases:["nginxconf"],contains:[e.HASH_COMMENT_MODE,{begin:e.UNDERSCORE_IDENT_RE+"\\s+{",returnBegin:!0,end:"{",contains:[{className:"section",begin:e.UNDERSCORE_IDENT_RE}],relevance:0},{begin:e.UNDERSCORE_IDENT_RE+"\\s",end:";|{",returnBegin:!0,contains:[{className:"attribute",begin:e.UNDERSCORE_IDENT_RE,starts:a}],relevance:0}],illegal:"[^\\s\\}]"}}}()); +hljs.registerLanguage("objectivec",function(){"use strict";return function(e){var n=/[a-zA-Z@][a-zA-Z0-9_]*/,_={$pattern:n,keyword:"@interface @class @protocol @implementation"};return{name:"Objective-C",aliases:["mm","objc","obj-c"],keywords:{$pattern:n,keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN",literal:"false true FALSE TRUE nil YES NO NULL",built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once"},illegal:"/,end:/$/,illegal:"\\n"},e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE]},{className:"class",begin:"("+_.keyword.split(" ").join("|")+")\\b",end:"({|$)",excludeEnd:!0,keywords:_,contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"\\."+e.UNDERSCORE_IDENT_RE,relevance:0}]}}}()); +hljs.registerLanguage("perl",function(){"use strict";return function(e){var n={$pattern:/[\w.]+/,keyword:"getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qq fileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmget sub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedir ioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when"},t={className:"subst",begin:"[$@]\\{",end:"\\}",keywords:n},s={begin:"->{",end:"}"},r={variants:[{begin:/\$\d/},{begin:/[\$%@](\^\w\b|#\w+(::\w+)*|{\w+}|\w+(::\w*)*)/},{begin:/[\$%@][^\s\w{]/,relevance:0}]},i=[e.BACKSLASH_ESCAPE,t,r],a=[r,e.HASH_COMMENT_MODE,e.COMMENT("^\\=\\w","\\=cut",{endsWithParent:!0}),s,{className:"string",contains:i,variants:[{begin:"q[qwxr]?\\s*\\(",end:"\\)",relevance:5},{begin:"q[qwxr]?\\s*\\[",end:"\\]",relevance:5},{begin:"q[qwxr]?\\s*\\{",end:"\\}",relevance:5},{begin:"q[qwxr]?\\s*\\|",end:"\\|",relevance:5},{begin:"q[qwxr]?\\s*\\<",end:"\\>",relevance:5},{begin:"qw\\s+q",end:"q",relevance:5},{begin:"'",end:"'",contains:[e.BACKSLASH_ESCAPE]},{begin:'"',end:'"'},{begin:"`",end:"`",contains:[e.BACKSLASH_ESCAPE]},{begin:"{\\w+}",contains:[],relevance:0},{begin:"-?\\w+\\s*\\=\\>",contains:[],relevance:0}]},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\/\\/|"+e.RE_STARTERS_RE+"|\\b(split|return|print|reverse|grep)\\b)\\s*",keywords:"split return print reverse grep",relevance:0,contains:[e.HASH_COMMENT_MODE,{className:"regexp",begin:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",relevance:10},{className:"regexp",begin:"(m|qr)?/",end:"/[a-z]*",contains:[e.BACKSLASH_ESCAPE],relevance:0}]},{className:"function",beginKeywords:"sub",end:"(\\s*\\(.*?\\))?[;{]",excludeEnd:!0,relevance:5,contains:[e.TITLE_MODE]},{begin:"-\\w\\b",relevance:0},{begin:"^__DATA__$",end:"^__END__$",subLanguage:"mojolicious",contains:[{begin:"^@@.*",end:"$",className:"comment"}]}];return t.contains=a,s.contains=a,{name:"Perl",aliases:["pl","pm"],keywords:n,contains:a}}}()); +hljs.registerLanguage("php",function(){"use strict";return function(e){var r={begin:"\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*"},t={className:"meta",variants:[{begin:/<\?php/,relevance:10},{begin:/<\?[=]?/},{begin:/\?>/}]},a={className:"string",contains:[e.BACKSLASH_ESCAPE,t],variants:[{begin:'b"',end:'"'},{begin:"b'",end:"'"},e.inherit(e.APOS_STRING_MODE,{illegal:null}),e.inherit(e.QUOTE_STRING_MODE,{illegal:null})]},n={variants:[e.BINARY_NUMBER_MODE,e.C_NUMBER_MODE]},i={keyword:"__CLASS__ __DIR__ __FILE__ __FUNCTION__ __LINE__ __METHOD__ __NAMESPACE__ __TRAIT__ die echo exit include include_once print require require_once array abstract and as binary bool boolean break callable case catch class clone const continue declare default do double else elseif empty enddeclare endfor endforeach endif endswitch endwhile eval extends final finally float for foreach from global goto if implements instanceof insteadof int integer interface isset iterable list new object or private protected public real return string switch throw trait try unset use var void while xor yield",literal:"false null true",built_in:"Error|0 AppendIterator ArgumentCountError ArithmeticError ArrayIterator ArrayObject AssertionError BadFunctionCallException BadMethodCallException CachingIterator CallbackFilterIterator CompileError Countable DirectoryIterator DivisionByZeroError DomainException EmptyIterator ErrorException Exception FilesystemIterator FilterIterator GlobIterator InfiniteIterator InvalidArgumentException IteratorIterator LengthException LimitIterator LogicException MultipleIterator NoRewindIterator OutOfBoundsException OutOfRangeException OuterIterator OverflowException ParentIterator ParseError RangeException RecursiveArrayIterator RecursiveCachingIterator RecursiveCallbackFilterIterator RecursiveDirectoryIterator RecursiveFilterIterator RecursiveIterator RecursiveIteratorIterator RecursiveRegexIterator RecursiveTreeIterator RegexIterator RuntimeException SeekableIterator SplDoublyLinkedList SplFileInfo SplFileObject SplFixedArray SplHeap SplMaxHeap SplMinHeap SplObjectStorage SplObserver SplObserver SplPriorityQueue SplQueue SplStack SplSubject SplSubject SplTempFileObject TypeError UnderflowException UnexpectedValueException ArrayAccess Closure Generator Iterator IteratorAggregate Serializable Throwable Traversable WeakReference Directory __PHP_Incomplete_Class parent php_user_filter self static stdClass"};return{aliases:["php","php3","php4","php5","php6","php7"],case_insensitive:!0,keywords:i,contains:[e.HASH_COMMENT_MODE,e.COMMENT("//","$",{contains:[t]}),e.COMMENT("/\\*","\\*/",{contains:[{className:"doctag",begin:"@[A-Za-z]+"}]}),e.COMMENT("__halt_compiler.+?;",!1,{endsWithParent:!0,keywords:"__halt_compiler"}),{className:"string",begin:/<<<['"]?\w+['"]?$/,end:/^\w+;?$/,contains:[e.BACKSLASH_ESCAPE,{className:"subst",variants:[{begin:/\$\w+/},{begin:/\{\$/,end:/\}/}]}]},t,{className:"keyword",begin:/\$this\b/},r,{begin:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{className:"function",beginKeywords:"fn function",end:/[;{]/,excludeEnd:!0,illegal:"[$%\\[]",contains:[e.UNDERSCORE_TITLE_MODE,{className:"params",begin:"\\(",end:"\\)",excludeBegin:!0,excludeEnd:!0,keywords:i,contains:["self",r,e.C_BLOCK_COMMENT_MODE,a,n]}]},{className:"class",beginKeywords:"class interface",end:"{",excludeEnd:!0,illegal:/[:\(\$"]/,contains:[{beginKeywords:"extends implements"},e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"namespace",end:";",illegal:/[\.']/,contains:[e.UNDERSCORE_TITLE_MODE]},{beginKeywords:"use",end:";",contains:[e.UNDERSCORE_TITLE_MODE]},{begin:"=>"},a,n]}}}()); +hljs.registerLanguage("php-template",function(){"use strict";return function(n){return{name:"PHP template",subLanguage:"xml",contains:[{begin:/<\?(php|=)?/,end:/\?>/,subLanguage:"php",contains:[{begin:"/\\*",end:"\\*/",skip:!0},{begin:'b"',end:'"',skip:!0},{begin:"b'",end:"'",skip:!0},n.inherit(n.APOS_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0}),n.inherit(n.QUOTE_STRING_MODE,{illegal:null,className:null,contains:null,skip:!0})]}]}}}()); +hljs.registerLanguage("plaintext",function(){"use strict";return function(t){return{name:"Plain text",aliases:["text","txt"],disableAutodetect:!0}}}()); +hljs.registerLanguage("properties",function(){"use strict";return function(e){var n="[ \\t\\f]*",t="("+n+"[:=]"+n+"|[ \\t\\f]+)",a="([^\\\\:= \\t\\f\\n]|\\\\.)+",s={end:t,relevance:0,starts:{className:"string",end:/$/,relevance:0,contains:[{begin:"\\\\\\n"}]}};return{name:".properties",case_insensitive:!0,illegal:/\S/,contains:[e.COMMENT("^\\s*[!#]","$"),{begin:"([^\\\\\\W:= \\t\\f\\n]|\\\\.)+"+t,returnBegin:!0,contains:[{className:"attr",begin:"([^\\\\\\W:= \\t\\f\\n]|\\\\.)+",endsParent:!0,relevance:0}],starts:s},{begin:a+t,returnBegin:!0,relevance:0,contains:[{className:"meta",begin:a,endsParent:!0,relevance:0}],starts:s},{className:"attr",relevance:0,begin:a+n+"$"}]}}}()); +hljs.registerLanguage("python",function(){"use strict";return function(e){var n={keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10",built_in:"Ellipsis NotImplemented",literal:"False None True"},a={className:"meta",begin:/^(>>>|\.\.\.) /},i={className:"subst",begin:/\{/,end:/\}/,keywords:n,illegal:/#/},s={begin:/\{\{/,relevance:0},r={className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:/(u|b)?r?'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{begin:/(u|b)?r?"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,a],relevance:10},{begin:/(fr|rf|f)'''/,end:/'''/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/(fr|rf|f)"""/,end:/"""/,contains:[e.BACKSLASH_ESCAPE,a,s,i]},{begin:/(u|r|ur)'/,end:/'/,relevance:10},{begin:/(u|r|ur)"/,end:/"/,relevance:10},{begin:/(b|br)'/,end:/'/},{begin:/(b|br)"/,end:/"/},{begin:/(fr|rf|f)'/,end:/'/,contains:[e.BACKSLASH_ESCAPE,s,i]},{begin:/(fr|rf|f)"/,end:/"/,contains:[e.BACKSLASH_ESCAPE,s,i]},e.APOS_STRING_MODE,e.QUOTE_STRING_MODE]},l={className:"number",relevance:0,variants:[{begin:e.BINARY_NUMBER_RE+"[lLjJ]?"},{begin:"\\b(0o[0-7]+)[lLjJ]?"},{begin:e.C_NUMBER_RE+"[lLjJ]?"}]},t={className:"params",variants:[{begin:/\(\s*\)/,skip:!0,className:null},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,contains:["self",a,l,r,e.HASH_COMMENT_MODE]}]};return i.contains=[r,l,a],{name:"Python",aliases:["py","gyp","ipython"],keywords:n,illegal:/(<\/|->|\?)|=>/,contains:[a,l,{beginKeywords:"if",relevance:0},r,e.HASH_COMMENT_MODE,{variants:[{className:"function",beginKeywords:"def"},{className:"class",beginKeywords:"class"}],end:/:/,illegal:/[${=;\n,]/,contains:[e.UNDERSCORE_TITLE_MODE,t,{begin:/->/,endsWithParent:!0,keywords:"None"}]},{className:"meta",begin:/^[\t ]*@/,end:/$/},{begin:/\b(print|exec)\(/}]}}}()); +hljs.registerLanguage("python-repl",function(){"use strict";return function(n){return{aliases:["pycon"],contains:[{className:"meta",starts:{end:/ |$/,starts:{end:"$",subLanguage:"python"}},variants:[{begin:/^>>>(?=[ ]|$)/},{begin:/^\.\.\.(?=[ ]|$)/}]}]}}}()); +hljs.registerLanguage("ruby",function(){"use strict";return function(e){var n="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?",a={keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor",literal:"true false nil"},s={className:"doctag",begin:"@[A-Za-z]+"},i={begin:"#<",end:">"},r=[e.COMMENT("#","$",{contains:[s]}),e.COMMENT("^\\=begin","^\\=end",{contains:[s],relevance:10}),e.COMMENT("^__END__","\\n$")],c={className:"subst",begin:"#\\{",end:"}",keywords:a},t={className:"string",contains:[e.BACKSLASH_ESCAPE,c],variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/`/,end:/`/},{begin:"%[qQwWx]?\\(",end:"\\)"},{begin:"%[qQwWx]?\\[",end:"\\]"},{begin:"%[qQwWx]?{",end:"}"},{begin:"%[qQwWx]?<",end:">"},{begin:"%[qQwWx]?/",end:"/"},{begin:"%[qQwWx]?%",end:"%"},{begin:"%[qQwWx]?-",end:"-"},{begin:"%[qQwWx]?\\|",end:"\\|"},{begin:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/},{begin:/<<[-~]?'?(\w+)(?:.|\n)*?\n\s*\1\b/,returnBegin:!0,contains:[{begin:/<<[-~]?'?/},e.END_SAME_AS_BEGIN({begin:/(\w+)/,end:/(\w+)/,contains:[e.BACKSLASH_ESCAPE,c]})]}]},b={className:"params",begin:"\\(",end:"\\)",endsParent:!0,keywords:a},d=[t,i,{className:"class",beginKeywords:"class module",end:"$|;",illegal:/=/,contains:[e.inherit(e.TITLE_MODE,{begin:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{begin:"<\\s*",contains:[{begin:"("+e.IDENT_RE+"::)?"+e.IDENT_RE}]}].concat(r)},{className:"function",beginKeywords:"def",end:"$|;",contains:[e.inherit(e.TITLE_MODE,{begin:n}),b].concat(r)},{begin:e.IDENT_RE+"::"},{className:"symbol",begin:e.UNDERSCORE_IDENT_RE+"(\\!|\\?)?:",relevance:0},{className:"symbol",begin:":(?!\\s)",contains:[t,{begin:n}],relevance:0},{className:"number",begin:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",relevance:0},{begin:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{className:"params",begin:/\|/,end:/\|/,keywords:a},{begin:"("+e.RE_STARTERS_RE+"|unless)\\s*",keywords:"unless",contains:[i,{className:"regexp",contains:[e.BACKSLASH_ESCAPE,c],illegal:/\n/,variants:[{begin:"/",end:"/[a-z]*"},{begin:"%r{",end:"}[a-z]*"},{begin:"%r\\(",end:"\\)[a-z]*"},{begin:"%r!",end:"![a-z]*"},{begin:"%r\\[",end:"\\][a-z]*"}]}].concat(r),relevance:0}].concat(r);c.contains=d,b.contains=d;var g=[{begin:/^\s*=>/,starts:{end:"$",contains:d}},{className:"meta",begin:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+>|(\\w+-)?\\d+\\.\\d+\\.\\d(p\\d+)?[^>]+>)",starts:{end:"$",contains:d}}];return{name:"Ruby",aliases:["rb","gemspec","podspec","thor","irb"],keywords:a,illegal:/\/\*/,contains:r.concat(g).concat(d)}}}()); +hljs.registerLanguage("rust",function(){"use strict";return function(e){var n="([ui](8|16|32|64|128|size)|f(32|64))?",t="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!";return{name:"Rust",aliases:["rs"],keywords:{$pattern:e.IDENT_RE+"!?",keyword:"abstract as async await become box break const continue crate do dyn else enum extern false final fn for if impl in let loop macro match mod move mut override priv pub ref return self Self static struct super trait true try type typeof unsafe unsized use virtual where while yield",literal:"true false Some None Ok Err",built_in:t},illegal:""}]}}}()); +hljs.registerLanguage("scss",function(){"use strict";return function(e){var t={className:"variable",begin:"(\\$[a-zA-Z-][a-zA-Z0-9_-]*)\\b"},i={className:"number",begin:"#[0-9A-Fa-f]+"};return e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,e.C_BLOCK_COMMENT_MODE,{name:"SCSS",case_insensitive:!0,illegal:"[=/|']",contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,{className:"selector-id",begin:"\\#[A-Za-z0-9_-]+",relevance:0},{className:"selector-class",begin:"\\.[A-Za-z0-9_-]+",relevance:0},{className:"selector-attr",begin:"\\[",end:"\\]",illegal:"$"},{className:"selector-tag",begin:"\\b(a|abbr|acronym|address|area|article|aside|audio|b|base|big|blockquote|body|br|button|canvas|caption|cite|code|col|colgroup|command|datalist|dd|del|details|dfn|div|dl|dt|em|embed|fieldset|figcaption|figure|footer|form|frame|frameset|(h[1-6])|head|header|hgroup|hr|html|i|iframe|img|input|ins|kbd|keygen|label|legend|li|link|map|mark|meta|meter|nav|noframes|noscript|object|ol|optgroup|option|output|p|param|pre|progress|q|rp|rt|ruby|samp|script|section|select|small|span|strike|strong|style|sub|sup|table|tbody|td|textarea|tfoot|th|thead|time|title|tr|tt|ul|var|video)\\b",relevance:0},{className:"selector-pseudo",begin:":(visited|valid|root|right|required|read-write|read-only|out-range|optional|only-of-type|only-child|nth-of-type|nth-last-of-type|nth-last-child|nth-child|not|link|left|last-of-type|last-child|lang|invalid|indeterminate|in-range|hover|focus|first-of-type|first-line|first-letter|first-child|first|enabled|empty|disabled|default|checked|before|after|active)"},{className:"selector-pseudo",begin:"::(after|before|choices|first-letter|first-line|repeat-index|repeat-item|selection|value)"},t,{className:"attribute",begin:"\\b(src|z-index|word-wrap|word-spacing|word-break|width|widows|white-space|visibility|vertical-align|unicode-bidi|transition-timing-function|transition-property|transition-duration|transition-delay|transition|transform-style|transform-origin|transform|top|text-underline-position|text-transform|text-shadow|text-rendering|text-overflow|text-indent|text-decoration-style|text-decoration-line|text-decoration-color|text-decoration|text-align-last|text-align|tab-size|table-layout|right|resize|quotes|position|pointer-events|perspective-origin|perspective|page-break-inside|page-break-before|page-break-after|padding-top|padding-right|padding-left|padding-bottom|padding|overflow-y|overflow-x|overflow-wrap|overflow|outline-width|outline-style|outline-offset|outline-color|outline|orphans|order|opacity|object-position|object-fit|normal|none|nav-up|nav-right|nav-left|nav-index|nav-down|min-width|min-height|max-width|max-height|mask|marks|margin-top|margin-right|margin-left|margin-bottom|margin|list-style-type|list-style-position|list-style-image|list-style|line-height|letter-spacing|left|justify-content|initial|inherit|ime-mode|image-orientation|image-resolution|image-rendering|icon|hyphens|height|font-weight|font-variant-ligatures|font-variant|font-style|font-stretch|font-size-adjust|font-size|font-language-override|font-kerning|font-feature-settings|font-family|font|float|flex-wrap|flex-shrink|flex-grow|flex-flow|flex-direction|flex-basis|flex|filter|empty-cells|display|direction|cursor|counter-reset|counter-increment|content|column-width|column-span|column-rule-width|column-rule-style|column-rule-color|column-rule|column-gap|column-fill|column-count|columns|color|clip-path|clip|clear|caption-side|break-inside|break-before|break-after|box-sizing|box-shadow|box-decoration-break|bottom|border-width|border-top-width|border-top-style|border-top-right-radius|border-top-left-radius|border-top-color|border-top|border-style|border-spacing|border-right-width|border-right-style|border-right-color|border-right|border-radius|border-left-width|border-left-style|border-left-color|border-left|border-image-width|border-image-source|border-image-slice|border-image-repeat|border-image-outset|border-image|border-color|border-collapse|border-bottom-width|border-bottom-style|border-bottom-right-radius|border-bottom-left-radius|border-bottom-color|border-bottom|border|background-size|background-repeat|background-position|background-origin|background-image|background-color|background-clip|background-attachment|background-blend-mode|background|backface-visibility|auto|animation-timing-function|animation-play-state|animation-name|animation-iteration-count|animation-fill-mode|animation-duration|animation-direction|animation-delay|animation|align-self|align-items|align-content)\\b",illegal:"[^\\s]"},{begin:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b"},{begin:":",end:";",contains:[t,i,e.CSS_NUMBER_MODE,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,{className:"meta",begin:"!important"}]},{begin:"@(page|font-face)",lexemes:"@[a-z-]+",keywords:"@page @font-face"},{begin:"@",end:"[{;]",returnBegin:!0,keywords:"and or not only",contains:[{begin:"@[a-z-]+",className:"keyword"},t,e.QUOTE_STRING_MODE,e.APOS_STRING_MODE,i,e.CSS_NUMBER_MODE]}]}}}()); +hljs.registerLanguage("shell",function(){"use strict";return function(s){return{name:"Shell Session",aliases:["console"],contains:[{className:"meta",begin:"^\\s{0,3}[/\\w\\d\\[\\]()@-]*[>%$#]",starts:{end:"$",subLanguage:"bash"}}]}}}()); +hljs.registerLanguage("sql",function(){"use strict";return function(e){var t=e.COMMENT("--","$");return{name:"SQL",case_insensitive:!0,illegal:/[<>{}*]/,contains:[{beginKeywords:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke comment values with",end:/;/,endsWithParent:!0,keywords:{$pattern:/[\w\.]+/,keyword:"as abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias all allocate allow alter always analyze ancillary and anti any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound bucket buffer_cache buffer_pool build bulk by byte byteordermark bytes cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain explode export export_set extended extent external external_1 external_2 externally extract failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force foreign form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour hours http id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists keep keep_duplicates key keys kill language large last last_day last_insert_id last_value lateral lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minutes minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notnull notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second seconds section securefile security seed segment select self semi sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime table tables tablespace tablesample tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unnest unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace window with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek",literal:"true false null unknown",built_in:"array bigint binary bit blob bool boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text time timestamp tinyint varchar varchar2 varying void"},contains:[{className:"string",begin:"'",end:"'",contains:[{begin:"''"}]},{className:"string",begin:'"',end:'"',contains:[{begin:'""'}]},{className:"string",begin:"`",end:"`"},e.C_NUMBER_MODE,e.C_BLOCK_COMMENT_MODE,t,e.HASH_COMMENT_MODE]},e.C_BLOCK_COMMENT_MODE,t,e.HASH_COMMENT_MODE]}}}()); +hljs.registerLanguage("swift",function(){"use strict";return function(e){var i={keyword:"#available #colorLiteral #column #else #elseif #endif #file #fileLiteral #function #if #imageLiteral #line #selector #sourceLocation _ __COLUMN__ __FILE__ __FUNCTION__ __LINE__ Any as as! as? associatedtype associativity break case catch class continue convenience default defer deinit didSet do dynamic dynamicType else enum extension fallthrough false fileprivate final for func get guard if import in indirect infix init inout internal is lazy left let mutating nil none nonmutating open operator optional override postfix precedence prefix private protocol Protocol public repeat required rethrows return right self Self set static struct subscript super switch throw throws true try try! try? Type typealias unowned var weak where while willSet",literal:"true false nil",built_in:"abs advance alignof alignofValue anyGenerator assert assertionFailure bridgeFromObjectiveC bridgeFromObjectiveCUnconditional bridgeToObjectiveC bridgeToObjectiveCUnconditional c compactMap contains count countElements countLeadingZeros debugPrint debugPrintln distance dropFirst dropLast dump encodeBitsAsWords enumerate equal fatalError filter find getBridgedObjectiveCType getVaList indices insertionSort isBridgedToObjectiveC isBridgedVerbatimToObjectiveC isUniquelyReferenced isUniquelyReferencedNonObjC join lazy lexicographicalCompare map max maxElement min minElement numericCast overlaps partition posix precondition preconditionFailure print println quickSort readLine reduce reflect reinterpretCast reverse roundUpToAlignment sizeof sizeofValue sort split startsWith stride strideof strideofValue swap toString transcode underestimateCount unsafeAddressOf unsafeBitCast unsafeDowncast unsafeUnwrap unsafeReflect withExtendedLifetime withObjectAtPlusZero withUnsafePointer withUnsafePointerToObject withUnsafeMutablePointer withUnsafeMutablePointers withUnsafePointer withUnsafePointers withVaList zip"},n=e.COMMENT("/\\*","\\*/",{contains:["self"]}),t={className:"subst",begin:/\\\(/,end:"\\)",keywords:i,contains:[]},a={className:"string",contains:[e.BACKSLASH_ESCAPE,t],variants:[{begin:/"""/,end:/"""/},{begin:/"/,end:/"/}]},r={className:"number",begin:"\\b([\\d_]+(\\.[\\deE_]+)?|0x[a-fA-F0-9_]+(\\.[a-fA-F0-9p_]+)?|0b[01_]+|0o[0-7_]+)\\b",relevance:0};return t.contains=[r],{name:"Swift",keywords:i,contains:[a,e.C_LINE_COMMENT_MODE,n,{className:"type",begin:"\\b[A-Z][\\wÀ-ʸ']*[!?]"},{className:"type",begin:"\\b[A-Z][\\wÀ-ʸ']*",relevance:0},r,{className:"function",beginKeywords:"func",end:"{",excludeEnd:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/[A-Za-z$_][0-9A-Za-z$_]*/}),{begin://},{className:"params",begin:/\(/,end:/\)/,endsParent:!0,keywords:i,contains:["self",r,a,e.C_BLOCK_COMMENT_MODE,{begin:":"}],illegal:/["']/}],illegal:/\[|%/},{className:"class",beginKeywords:"struct protocol class extension enum",keywords:i,end:"\\{",excludeEnd:!0,contains:[e.inherit(e.TITLE_MODE,{begin:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/})]},{className:"meta",begin:"(@discardableResult|@warn_unused_result|@exported|@lazy|@noescape|@NSCopying|@NSManaged|@objc|@objcMembers|@convention|@required|@noreturn|@IBAction|@IBDesignable|@IBInspectable|@IBOutlet|@infix|@prefix|@postfix|@autoclosure|@testable|@available|@nonobjc|@NSApplicationMain|@UIApplicationMain|@dynamicMemberLookup|@propertyWrapper)\\b"},{beginKeywords:"import",end:/$/,contains:[e.C_LINE_COMMENT_MODE,n]}]}}}()); +hljs.registerLanguage("typescript",function(){"use strict";const e=["as","in","of","if","for","while","finally","var","new","function","do","return","void","else","break","catch","instanceof","with","throw","case","default","try","switch","continue","typeof","delete","let","yield","const","class","debugger","async","await","static","import","from","export","extends"],n=["true","false","null","undefined","NaN","Infinity"],a=[].concat(["setInterval","setTimeout","clearInterval","clearTimeout","require","exports","eval","isFinite","isNaN","parseFloat","parseInt","decodeURI","decodeURIComponent","encodeURI","encodeURIComponent","escape","unescape"],["arguments","this","super","console","window","document","localStorage","module","global"],["Intl","DataView","Number","Math","Date","String","RegExp","Object","Function","Boolean","Error","Symbol","Set","Map","WeakSet","WeakMap","Proxy","Reflect","JSON","Promise","Float64Array","Int16Array","Int32Array","Int8Array","Uint16Array","Uint32Array","Float32Array","Array","Uint8Array","Uint8ClampedArray","ArrayBuffer"],["EvalError","InternalError","RangeError","ReferenceError","SyntaxError","TypeError","URIError"]);return function(r){var t={$pattern:"[A-Za-z$_][0-9A-Za-z$_]*",keyword:e.concat(["type","namespace","typedef","interface","public","private","protected","implements","declare","abstract","readonly"]).join(" "),literal:n.join(" "),built_in:a.concat(["any","void","number","boolean","string","object","never","enum"]).join(" ")},s={className:"meta",begin:"@[A-Za-z$_][0-9A-Za-z$_]*"},i={className:"number",variants:[{begin:"\\b(0[bB][01]+)n?"},{begin:"\\b(0[oO][0-7]+)n?"},{begin:r.C_NUMBER_RE+"n?"}],relevance:0},o={className:"subst",begin:"\\$\\{",end:"\\}",keywords:t,contains:[]},c={begin:"html`",end:"",starts:{end:"`",returnEnd:!1,contains:[r.BACKSLASH_ESCAPE,o],subLanguage:"xml"}},l={begin:"css`",end:"",starts:{end:"`",returnEnd:!1,contains:[r.BACKSLASH_ESCAPE,o],subLanguage:"css"}},E={className:"string",begin:"`",end:"`",contains:[r.BACKSLASH_ESCAPE,o]};o.contains=[r.APOS_STRING_MODE,r.QUOTE_STRING_MODE,c,l,E,i,r.REGEXP_MODE];var d={begin:"\\(",end:/\)/,keywords:t,contains:["self",r.QUOTE_STRING_MODE,r.APOS_STRING_MODE,r.NUMBER_MODE]},u={className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:t,contains:[r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,s,d]};return{name:"TypeScript",aliases:["ts"],keywords:t,contains:[r.SHEBANG(),{className:"meta",begin:/^\s*['"]use strict['"]/},r.APOS_STRING_MODE,r.QUOTE_STRING_MODE,c,l,E,r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,i,{begin:"("+r.RE_STARTERS_RE+"|\\b(case|return|throw)\\b)\\s*",keywords:"return throw case",contains:[r.C_LINE_COMMENT_MODE,r.C_BLOCK_COMMENT_MODE,r.REGEXP_MODE,{className:"function",begin:"(\\([^(]*(\\([^(]*(\\([^(]*\\))?\\))?\\)|"+r.UNDERSCORE_IDENT_RE+")\\s*=>",returnBegin:!0,end:"\\s*=>",contains:[{className:"params",variants:[{begin:r.UNDERSCORE_IDENT_RE},{className:null,begin:/\(\s*\)/,skip:!0},{begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,keywords:t,contains:d.contains}]}]}],relevance:0},{className:"function",beginKeywords:"function",end:/[\{;]/,excludeEnd:!0,keywords:t,contains:["self",r.inherit(r.TITLE_MODE,{begin:"[A-Za-z$_][0-9A-Za-z$_]*"}),u],illegal:/%/,relevance:0},{beginKeywords:"constructor",end:/[\{;]/,excludeEnd:!0,contains:["self",u]},{begin:/module\./,keywords:{built_in:"module"},relevance:0},{beginKeywords:"module",end:/\{/,excludeEnd:!0},{beginKeywords:"interface",end:/\{/,excludeEnd:!0,keywords:"interface extends"},{begin:/\$[(.]/},{begin:"\\."+r.IDENT_RE,relevance:0},s,d]}}}()); +hljs.registerLanguage("yaml",function(){"use strict";return function(e){var n="true false yes no null",a="[\\w#;/?:@&=+$,.~*\\'()[\\]]+",s={className:"string",relevance:0,variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/\S+/}],contains:[e.BACKSLASH_ESCAPE,{className:"template-variable",variants:[{begin:"{{",end:"}}"},{begin:"%{",end:"}"}]}]},i=e.inherit(s,{variants:[{begin:/'/,end:/'/},{begin:/"/,end:/"/},{begin:/[^\s,{}[\]]+/}]}),l={end:",",endsWithParent:!0,excludeEnd:!0,contains:[],keywords:n,relevance:0},t={begin:"{",end:"}",contains:[l],illegal:"\\n",relevance:0},g={begin:"\\[",end:"\\]",contains:[l],illegal:"\\n",relevance:0},b=[{className:"attr",variants:[{begin:"\\w[\\w :\\/.-]*:(?=[ \t]|$)"},{begin:'"\\w[\\w :\\/.-]*":(?=[ \t]|$)'},{begin:"'\\w[\\w :\\/.-]*':(?=[ \t]|$)"}]},{className:"meta",begin:"^---s*$",relevance:10},{className:"string",begin:"[\\|>]([0-9]?[+-])?[ ]*\\n( *)[\\S ]+\\n(\\2[\\S ]+\\n?)*"},{begin:"<%[%=-]?",end:"[%-]?%>",subLanguage:"ruby",excludeBegin:!0,excludeEnd:!0,relevance:0},{className:"type",begin:"!\\w+!"+a},{className:"type",begin:"!<"+a+">"},{className:"type",begin:"!"+a},{className:"type",begin:"!!"+a},{className:"meta",begin:"&"+e.UNDERSCORE_IDENT_RE+"$"},{className:"meta",begin:"\\*"+e.UNDERSCORE_IDENT_RE+"$"},{className:"bullet",begin:"\\-(?=[ ]|$)",relevance:0},e.HASH_COMMENT_MODE,{beginKeywords:n,keywords:{literal:n}},{className:"number",begin:"\\b[0-9]{4}(-[0-9][0-9]){0,2}([Tt \\t][0-9][0-9]?(:[0-9][0-9]){2})?(\\.[0-9]*)?([ \\t])*(Z|[-+][0-9][0-9]?(:[0-9][0-9])?)?\\b"},{className:"number",begin:e.C_NUMBER_RE+"\\b"},t,g,s],c=[...b];return c.pop(),c.push(i),l.contains=c,{name:"YAML",case_insensitive:!0,aliases:["yml","YAML"],contains:b}}}()); +hljs.registerLanguage("armasm",function(){"use strict";return function(s){const e={variants:[s.COMMENT("^[ \\t]*(?=#)","$",{relevance:0,excludeBegin:!0}),s.COMMENT("[;@]","$",{relevance:0}),s.C_LINE_COMMENT_MODE,s.C_BLOCK_COMMENT_MODE]};return{name:"ARM Assembly",case_insensitive:!0,aliases:["arm"],keywords:{$pattern:"\\.?"+s.IDENT_RE,meta:".2byte .4byte .align .ascii .asciz .balign .byte .code .data .else .end .endif .endm .endr .equ .err .exitm .extern .global .hword .if .ifdef .ifndef .include .irp .long .macro .rept .req .section .set .skip .space .text .word .arm .thumb .code16 .code32 .force_thumb .thumb_func .ltorg ALIAS ALIGN ARM AREA ASSERT ATTR CN CODE CODE16 CODE32 COMMON CP DATA DCB DCD DCDU DCDO DCFD DCFDU DCI DCQ DCQU DCW DCWU DN ELIF ELSE END ENDFUNC ENDIF ENDP ENTRY EQU EXPORT EXPORTAS EXTERN FIELD FILL FUNCTION GBLA GBLL GBLS GET GLOBAL IF IMPORT INCBIN INCLUDE INFO KEEP LCLA LCLL LCLS LTORG MACRO MAP MEND MEXIT NOFP OPT PRESERVE8 PROC QN READONLY RELOC REQUIRE REQUIRE8 RLIST FN ROUT SETA SETL SETS SN SPACE SUBT THUMB THUMBX TTL WHILE WEND ",built_in:"r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 pc lr sp ip sl sb fp a1 a2 a3 a4 v1 v2 v3 v4 v5 v6 v7 v8 f0 f1 f2 f3 f4 f5 f6 f7 p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 cpsr_c cpsr_x cpsr_s cpsr_f cpsr_cx cpsr_cxs cpsr_xs cpsr_xsf cpsr_sf cpsr_cxsf spsr_c spsr_x spsr_s spsr_f spsr_cx spsr_cxs spsr_xs spsr_xsf spsr_sf spsr_cxsf s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 {PC} {VAR} {TRUE} {FALSE} {OPT} {CONFIG} {ENDIAN} {CODESIZE} {CPU} {FPU} {ARCHITECTURE} {PCSTOREOFFSET} {ARMASM_VERSION} {INTER} {ROPI} {RWPI} {SWST} {NOSWST} . @"},contains:[{className:"keyword",begin:"\\b(adc|(qd?|sh?|u[qh]?)?add(8|16)?|usada?8|(q|sh?|u[qh]?)?(as|sa)x|and|adrl?|sbc|rs[bc]|asr|b[lx]?|blx|bxj|cbn?z|tb[bh]|bic|bfc|bfi|[su]bfx|bkpt|cdp2?|clz|clrex|cmp|cmn|cpsi[ed]|cps|setend|dbg|dmb|dsb|eor|isb|it[te]{0,3}|lsl|lsr|ror|rrx|ldm(([id][ab])|f[ds])?|ldr((s|ex)?[bhd])?|movt?|mvn|mra|mar|mul|[us]mull|smul[bwt][bt]|smu[as]d|smmul|smmla|mla|umlaal|smlal?([wbt][bt]|d)|mls|smlsl?[ds]|smc|svc|sev|mia([bt]{2}|ph)?|mrr?c2?|mcrr2?|mrs|msr|orr|orn|pkh(tb|bt)|rbit|rev(16|sh)?|sel|[su]sat(16)?|nop|pop|push|rfe([id][ab])?|stm([id][ab])?|str(ex)?[bhd]?|(qd?)?sub|(sh?|q|u[qh]?)?sub(8|16)|[su]xt(a?h|a?b(16)?)|srs([id][ab])?|swpb?|swi|smi|tst|teq|wfe|wfi|yield)(eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|hs|lo)?[sptrx]?(?=\\s)"},e,s.QUOTE_STRING_MODE,{className:"string",begin:"'",end:"[^\\\\]'",relevance:0},{className:"title",begin:"\\|",end:"\\|",illegal:"\\n",relevance:0},{className:"number",variants:[{begin:"[#$=]?0x[0-9a-f]+"},{begin:"[#$=]?0b[01]+"},{begin:"[#$=]\\d+"},{begin:"\\b\\d+"}],relevance:0},{className:"symbol",variants:[{begin:"^[ \\t]*[a-z_\\.\\$][a-z0-9_\\.\\$]+:"},{begin:"^[a-z_\\.\\$][a-z0-9_\\.\\$]+"},{begin:"[=#]\\w+"}],relevance:0}]}}}()); +hljs.registerLanguage("d",function(){"use strict";return function(e){var a={$pattern:e.UNDERSCORE_IDENT_RE,keyword:"abstract alias align asm assert auto body break byte case cast catch class const continue debug default delete deprecated do else enum export extern final finally for foreach foreach_reverse|10 goto if immutable import in inout int interface invariant is lazy macro mixin module new nothrow out override package pragma private protected public pure ref return scope shared static struct super switch synchronized template this throw try typedef typeid typeof union unittest version void volatile while with __FILE__ __LINE__ __gshared|10 __thread __traits __DATE__ __EOF__ __TIME__ __TIMESTAMP__ __VENDOR__ __VERSION__",built_in:"bool cdouble cent cfloat char creal dchar delegate double dstring float function idouble ifloat ireal long real short string ubyte ucent uint ulong ushort wchar wstring",literal:"false null true"},d="((0|[1-9][\\d_]*)|0[bB][01_]+|0[xX]([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*))",n="\\\\(['\"\\?\\\\abfnrtv]|u[\\dA-Fa-f]{4}|[0-7]{1,3}|x[\\dA-Fa-f]{2}|U[\\dA-Fa-f]{8})|&[a-zA-Z\\d]{2,};",t={className:"number",begin:"\\b"+d+"(L|u|U|Lu|LU|uL|UL)?",relevance:0},_={className:"number",begin:"\\b(((0[xX](([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)\\.([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*)|\\.?([\\da-fA-F][\\da-fA-F_]*|_[\\da-fA-F][\\da-fA-F_]*))[pP][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d))|((0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)(\\.\\d*|([eE][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)))|\\d+\\.(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d)|\\.(0|[1-9][\\d_]*)([eE][+-]?(0|[1-9][\\d_]*|\\d[\\d_]*|[\\d_]+?\\d))?))([fF]|L|i|[fF]i|Li)?|"+d+"(i|[fF]i|Li))",relevance:0},r={className:"string",begin:"'("+n+"|.)",end:"'",illegal:"."},i={className:"string",begin:'"',contains:[{begin:n,relevance:0}],end:'"[cwd]?'},s=e.COMMENT("\\/\\+","\\+\\/",{contains:["self"],relevance:10});return{name:"D",keywords:a,contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,s,{className:"string",begin:'x"[\\da-fA-F\\s\\n\\r]*"[cwd]?',relevance:10},i,{className:"string",begin:'[rq]"',end:'"[cwd]?',relevance:5},{className:"string",begin:"`",end:"`[cwd]?"},{className:"string",begin:'q"\\{',end:'\\}"'},_,t,r,{className:"meta",begin:"^#!",end:"$",relevance:5},{className:"meta",begin:"#(line)",end:"$",relevance:5},{className:"keyword",begin:"@[a-zA-Z_][a-zA-Z_\\d]*"}]}}}()); +hljs.registerLanguage("handlebars",function(){"use strict";function e(...e){return e.map(e=>(function(e){return e?"string"==typeof e?e:e.source:null})(e)).join("")}return function(n){const a={"builtin-name":"action bindattr collection component concat debugger each each-in get hash if in input link-to loc log lookup mut outlet partial query-params render template textarea unbound unless view with yield"},t=/\[.*?\]/,s=/[^\s!"#%&'()*+,.\/;<=>@\[\\\]^`{|}~]+/,i=e("(",/'.*?'/,"|",/".*?"/,"|",t,"|",s,"|",/\.|\//,")+"),r=e("(",t,"|",s,")(?==)"),l={begin:i,lexemes:/[\w.\/]+/},c=n.inherit(l,{keywords:{literal:"true false undefined null"}}),o={begin:/\(/,end:/\)/},m={className:"attr",begin:r,relevance:0,starts:{begin:/=/,end:/=/,starts:{contains:[n.NUMBER_MODE,n.QUOTE_STRING_MODE,n.APOS_STRING_MODE,c,o]}}},d={contains:[n.NUMBER_MODE,n.QUOTE_STRING_MODE,n.APOS_STRING_MODE,{begin:/as\s+\|/,keywords:{keyword:"as"},end:/\|/,contains:[{begin:/\w+/}]},m,c,o],returnEnd:!0},g=n.inherit(l,{className:"name",keywords:a,starts:n.inherit(d,{end:/\)/})});o.contains=[g];const u=n.inherit(l,{keywords:a,className:"name",starts:n.inherit(d,{end:/}}/})}),b=n.inherit(l,{keywords:a,className:"name"}),h=n.inherit(l,{className:"name",keywords:a,starts:n.inherit(d,{end:/}}/})});return{name:"Handlebars",aliases:["hbs","html.hbs","html.handlebars","htmlbars"],case_insensitive:!0,subLanguage:"xml",contains:[{begin:/\\\{\{/,skip:!0},{begin:/\\\\(?=\{\{)/,skip:!0},n.COMMENT(/\{\{!--/,/--\}\}/),n.COMMENT(/\{\{!/,/\}\}/),{className:"template-tag",begin:/\{\{\{\{(?!\/)/,end:/\}\}\}\}/,contains:[u],starts:{end:/\{\{\{\{\//,returnEnd:!0,subLanguage:"xml"}},{className:"template-tag",begin:/\{\{\{\{\//,end:/\}\}\}\}/,contains:[b]},{className:"template-tag",begin:/\{\{#/,end:/\}\}/,contains:[u]},{className:"template-tag",begin:/\{\{(?=else\}\})/,end:/\}\}/,keywords:"else"},{className:"template-tag",begin:/\{\{\//,end:/\}\}/,contains:[b]},{className:"template-variable",begin:/\{\{\{/,end:/\}\}\}/,contains:[h]},{className:"template-variable",begin:/\{\{/,end:/\}\}/,contains:[h]}]}}}()); +hljs.registerLanguage("haskell",function(){"use strict";return function(e){var n={variants:[e.COMMENT("--","$"),e.COMMENT("{-","-}",{contains:["self"]})]},i={className:"meta",begin:"{-#",end:"#-}"},a={className:"meta",begin:"^#",end:"$"},s={className:"type",begin:"\\b[A-Z][\\w']*",relevance:0},l={begin:"\\(",end:"\\)",illegal:'"',contains:[i,a,{className:"type",begin:"\\b[A-Z][\\w]*(\\((\\.\\.|,|\\w+)\\))?"},e.inherit(e.TITLE_MODE,{begin:"[_a-z][\\w']*"}),n]};return{name:"Haskell",aliases:["hs"],keywords:"let in if then else case of where do module import hiding qualified type data newtype deriving class instance as default infix infixl infixr foreign export ccall stdcall cplusplus jvm dotnet safe unsafe family forall mdo proc rec",contains:[{beginKeywords:"module",end:"where",keywords:"module where",contains:[l,n],illegal:"\\W\\.|;"},{begin:"\\bimport\\b",end:"$",keywords:"import qualified as hiding",contains:[l,n],illegal:"\\W\\.|;"},{className:"class",begin:"^(\\s*)?(class|instance)\\b",end:"where",keywords:"class family instance where",contains:[s,l,n]},{className:"class",begin:"\\b(data|(new)?type)\\b",end:"$",keywords:"data family type newtype deriving",contains:[i,s,l,{begin:"{",end:"}",contains:l.contains},n]},{beginKeywords:"default",end:"$",contains:[s,l,n]},{beginKeywords:"infix infixl infixr",end:"$",contains:[e.C_NUMBER_MODE,n]},{begin:"\\bforeign\\b",end:"$",keywords:"foreign import export ccall stdcall cplusplus jvm dotnet safe unsafe",contains:[s,e.QUOTE_STRING_MODE,n]},{className:"meta",begin:"#!\\/usr\\/bin\\/env runhaskell",end:"$"},i,a,e.QUOTE_STRING_MODE,e.C_NUMBER_MODE,s,e.inherit(e.TITLE_MODE,{begin:"^[_a-z][\\w']*"}),n,{begin:"->|<-"}]}}}()); +hljs.registerLanguage("julia",function(){"use strict";return function(e){var r="[A-Za-z_\\u00A1-\\uFFFF][A-Za-z_0-9\\u00A1-\\uFFFF]*",t={$pattern:r,keyword:"in isa where baremodule begin break catch ccall const continue do else elseif end export false finally for function global if import importall let local macro module quote return true try using while type immutable abstract bitstype typealias ",literal:"true false ARGS C_NULL DevNull ENDIAN_BOM ENV I Inf Inf16 Inf32 Inf64 InsertionSort JULIA_HOME LOAD_PATH MergeSort NaN NaN16 NaN32 NaN64 PROGRAM_FILE QuickSort RoundDown RoundFromZero RoundNearest RoundNearestTiesAway RoundNearestTiesUp RoundToZero RoundUp STDERR STDIN STDOUT VERSION catalan e|0 eu|0 eulergamma golden im nothing pi γ π φ ",built_in:"ANY AbstractArray AbstractChannel AbstractFloat AbstractMatrix AbstractRNG AbstractSerializer AbstractSet AbstractSparseArray AbstractSparseMatrix AbstractSparseVector AbstractString AbstractUnitRange AbstractVecOrMat AbstractVector Any ArgumentError Array AssertionError Associative Base64DecodePipe Base64EncodePipe Bidiagonal BigFloat BigInt BitArray BitMatrix BitVector Bool BoundsError BufferStream CachingPool CapturedException CartesianIndex CartesianRange Cchar Cdouble Cfloat Channel Char Cint Cintmax_t Clong Clonglong ClusterManager Cmd CodeInfo Colon Complex Complex128 Complex32 Complex64 CompositeException Condition ConjArray ConjMatrix ConjVector Cptrdiff_t Cshort Csize_t Cssize_t Cstring Cuchar Cuint Cuintmax_t Culong Culonglong Cushort Cwchar_t Cwstring DataType Date DateFormat DateTime DenseArray DenseMatrix DenseVecOrMat DenseVector Diagonal Dict DimensionMismatch Dims DirectIndexString Display DivideError DomainError EOFError EachLine Enum Enumerate ErrorException Exception ExponentialBackOff Expr Factorization FileMonitor Float16 Float32 Float64 Function Future GlobalRef GotoNode HTML Hermitian IO IOBuffer IOContext IOStream IPAddr IPv4 IPv6 IndexCartesian IndexLinear IndexStyle InexactError InitError Int Int128 Int16 Int32 Int64 Int8 IntSet Integer InterruptException InvalidStateException Irrational KeyError LabelNode LinSpace LineNumberNode LoadError LowerTriangular MIME Matrix MersenneTwister Method MethodError MethodTable Module NTuple NewvarNode NullException Nullable Number ObjectIdDict OrdinalRange OutOfMemoryError OverflowError Pair ParseError PartialQuickSort PermutedDimsArray Pipe PollingFileWatcher ProcessExitedException Ptr QuoteNode RandomDevice Range RangeIndex Rational RawFD ReadOnlyMemoryError Real ReentrantLock Ref Regex RegexMatch RemoteChannel RemoteException RevString RoundingMode RowVector SSAValue SegmentationFault SerializationState Set SharedArray SharedMatrix SharedVector Signed SimpleVector Slot SlotNumber SparseMatrixCSC SparseVector StackFrame StackOverflowError StackTrace StepRange StepRangeLen StridedArray StridedMatrix StridedVecOrMat StridedVector String SubArray SubString SymTridiagonal Symbol Symmetric SystemError TCPSocket Task Text TextDisplay Timer Tridiagonal Tuple Type TypeError TypeMapEntry TypeMapLevel TypeName TypeVar TypedSlot UDPSocket UInt UInt128 UInt16 UInt32 UInt64 UInt8 UndefRefError UndefVarError UnicodeError UniformScaling Union UnionAll UnitRange Unsigned UpperTriangular Val Vararg VecElement VecOrMat Vector VersionNumber Void WeakKeyDict WeakRef WorkerConfig WorkerPool "},a={keywords:t,illegal:/<\//},n={className:"subst",begin:/\$\(/,end:/\)/,keywords:t},o={className:"variable",begin:"\\$"+r},i={className:"string",contains:[e.BACKSLASH_ESCAPE,n,o],variants:[{begin:/\w*"""/,end:/"""\w*/,relevance:10},{begin:/\w*"/,end:/"\w*/}]},l={className:"string",contains:[e.BACKSLASH_ESCAPE,n,o],begin:"`",end:"`"},s={className:"meta",begin:"@"+r};return a.name="Julia",a.contains=[{className:"number",begin:/(\b0x[\d_]*(\.[\d_]*)?|0x\.\d[\d_]*)p[-+]?\d+|\b0[box][a-fA-F0-9][a-fA-F0-9_]*|(\b\d[\d_]*(\.[\d_]*)?|\.\d[\d_]*)([eEfF][-+]?\d+)?/,relevance:0},{className:"string",begin:/'(.|\\[xXuU][a-zA-Z0-9]+)'/},i,l,s,{className:"comment",variants:[{begin:"#=",end:"=#",relevance:10},{begin:"#",end:"$"}]},e.HASH_COMMENT_MODE,{className:"keyword",begin:"\\b(((abstract|primitive)\\s+)type|(mutable\\s+)?struct)\\b"},{begin:/<:/}],n.contains=a.contains,a}}()); +hljs.registerLanguage("nim",function(){"use strict";return function(e){return{name:"Nim",aliases:["nim"],keywords:{keyword:"addr and as asm bind block break case cast const continue converter discard distinct div do elif else end enum except export finally for from func generic if import in include interface is isnot iterator let macro method mixin mod nil not notin object of or out proc ptr raise ref return shl shr static template try tuple type using var when while with without xor yield",literal:"shared guarded stdin stdout stderr result true false",built_in:"int int8 int16 int32 int64 uint uint8 uint16 uint32 uint64 float float32 float64 bool char string cstring pointer expr stmt void auto any range array openarray varargs seq set clong culong cchar cschar cshort cint csize clonglong cfloat cdouble clongdouble cuchar cushort cuint culonglong cstringarray semistatic"},contains:[{className:"meta",begin:/{\./,end:/\.}/,relevance:10},{className:"string",begin:/[a-zA-Z]\w*"/,end:/"/,contains:[{begin:/""/}]},{className:"string",begin:/([a-zA-Z]\w*)?"""/,end:/"""/},e.QUOTE_STRING_MODE,{className:"type",begin:/\b[A-Z]\w+\b/,relevance:0},{className:"number",relevance:0,variants:[{begin:/\b(0[xX][0-9a-fA-F][_0-9a-fA-F]*)('?[iIuU](8|16|32|64))?/},{begin:/\b(0o[0-7][_0-7]*)('?[iIuUfF](8|16|32|64))?/},{begin:/\b(0(b|B)[01][_01]*)('?[iIuUfF](8|16|32|64))?/},{begin:/\b(\d[_\d]*)('?[iIuUfF](8|16|32|64))?/}]},e.HASH_COMMENT_MODE]}}}()); +hljs.registerLanguage("nix",function(){"use strict";return function(e){var n={keyword:"rec with let in inherit assert if else then",literal:"true false or and null",built_in:"import abort baseNameOf dirOf isNull builtins map removeAttrs throw toString derivation"},i={className:"subst",begin:/\$\{/,end:/}/,keywords:n},t={className:"string",contains:[i],variants:[{begin:"''",end:"''"},{begin:'"',end:'"'}]},s=[e.NUMBER_MODE,e.HASH_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,t,{begin:/[a-zA-Z0-9-_]+(\s*=)/,returnBegin:!0,relevance:0,contains:[{className:"attr",begin:/\S+/}]}];return i.contains=s,{name:"Nix",aliases:["nixos"],keywords:n,contains:s}}}()); +hljs.registerLanguage("r",function(){"use strict";return function(e){var n="([a-zA-Z]|\\.[a-zA-Z.])[a-zA-Z0-9._]*";return{name:"R",contains:[e.HASH_COMMENT_MODE,{begin:n,keywords:{$pattern:n,keyword:"function if in break next repeat else for return switch while try tryCatch stop warning require library attach detach source setMethod setGeneric setGroupGeneric setClass ...",literal:"NULL NA TRUE FALSE T F Inf NaN NA_integer_|10 NA_real_|10 NA_character_|10 NA_complex_|10"},relevance:0},{className:"number",begin:"0[xX][0-9a-fA-F]+[Li]?\\b",relevance:0},{className:"number",begin:"\\d+(?:[eE][+\\-]?\\d*)?L\\b",relevance:0},{className:"number",begin:"\\d+\\.(?!\\d)(?:i\\b)?",relevance:0},{className:"number",begin:"\\d+(?:\\.\\d*)?(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{className:"number",begin:"\\.\\d+(?:[eE][+\\-]?\\d*)?i?\\b",relevance:0},{begin:"`",end:"`",relevance:0},{className:"string",contains:[e.BACKSLASH_ESCAPE],variants:[{begin:'"',end:'"'},{begin:"'",end:"'"}]}]}}}()); +hljs.registerLanguage("scala",function(){"use strict";return function(e){var n={className:"subst",variants:[{begin:"\\$[A-Za-z0-9_]+"},{begin:"\\${",end:"}"}]},a={className:"string",variants:[{begin:'"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE]},{begin:'"""',end:'"""',relevance:10},{begin:'[a-z]+"',end:'"',illegal:"\\n",contains:[e.BACKSLASH_ESCAPE,n]},{className:"string",begin:'[a-z]+"""',end:'"""',contains:[n],relevance:10}]},s={className:"type",begin:"\\b[A-Z][A-Za-z0-9_]*",relevance:0},t={className:"title",begin:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,relevance:0},i={className:"class",beginKeywords:"class object trait type",end:/[:={\[\n;]/,excludeEnd:!0,contains:[{beginKeywords:"extends with",relevance:10},{begin:/\[/,end:/\]/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},{className:"params",begin:/\(/,end:/\)/,excludeBegin:!0,excludeEnd:!0,relevance:0,contains:[s]},t]},l={className:"function",beginKeywords:"def",end:/[:={\[(\n;]/,excludeEnd:!0,contains:[t]};return{name:"Scala",keywords:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},contains:[e.C_LINE_COMMENT_MODE,e.C_BLOCK_COMMENT_MODE,a,{className:"symbol",begin:"'\\w[\\w\\d_]*(?!')"},s,l,i,e.C_NUMBER_MODE,{className:"meta",begin:"@[A-Za-z]+"}]}}}()); +hljs.registerLanguage("x86asm",function(){"use strict";return function(s){return{name:"Intel x86 Assembly",case_insensitive:!0,keywords:{$pattern:"[.%]?"+s.IDENT_RE,keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},contains:[s.COMMENT(";","$",{relevance:0}),{className:"number",variants:[{begin:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",relevance:0},{begin:"\\$[0-9][0-9A-Fa-f]*",relevance:0},{begin:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{begin:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QUOTE_STRING_MODE,{className:"string",variants:[{begin:"'",end:"[^\\\\]'"},{begin:"`",end:"[^\\\\]`"}],relevance:0},{className:"symbol",variants:[{begin:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{begin:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],relevance:0},{className:"subst",begin:"%[0-9]+",relevance:0},{className:"subst",begin:"%!S+",relevance:0},{className:"meta",begin:/^\s*\.[\w_-]+/}]}}}()); \ No newline at end of file diff --git a/theme/index.hbs b/theme/index.hbs new file mode 100644 index 0000000..5fd9afb --- /dev/null +++ b/theme/index.hbs @@ -0,0 +1,335 @@ + + + + + + + + + + {{ title }} + {{#if is_print }} + + {{/if}} + {{#if base_url}} + + {{/if}} + + + + {{> head}} + + + + + + {{#if favicon_svg}} + + {{/if}} + {{#if favicon_png}} + + {{/if}} + + + + {{#if print_enable}} + + {{/if}} + + + + {{#if copy_fonts}} + + {{/if}} + + + + + + + + {{#each additional_css}} + + {{/each}} + + {{#if mathjax_support}} + + + {{/if}} + + + + + + + +
+ + + + + + + + + + + + + +
+ +
+ {{> header}} + + + + {{#if search_enabled}} + + {{/if}} + + + + +
+
+ {{{ content }}} +
+ + +
+
+ + + +
+ + {{#if live_reload_endpoint}} + + + {{/if}} + + {{#if google_analytics}} + + + {{/if}} + + {{#if playground_line_numbers}} + + {{/if}} + + {{#if playground_copyable}} + + {{/if}} + + {{#if playground_js}} + + + + + + {{/if}} + + {{#if search_js}} + + + + {{/if}} + + + + + + + {{#each additional_js}} + + {{/each}} + + {{#if is_print}} + {{#if mathjax_support}} + + {{else}} + + {{/if}} + {{/if}} + +
+ +