diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml
new file mode 100644
index 000000000..36b5745db
--- /dev/null
+++ b/.github/workflows/build_awshelper.yaml
@@ -0,0 +1,19 @@
+name: Build awshelper image
+
+# Always build this image because it contains all the cloud-automation files.
+# Some jobs depend on arbitrary files and we need to test them with updated awshelper images.
+on: push
+
+jobs:
+ awshelper:
+ name: awshelper
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile"
+ OVERRIDE_REPO_NAME: "awshelper"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+
diff --git a/.github/workflows/build_python3.10.yaml b/.github/workflows/build_python3.10.yaml
new file mode 100644
index 000000000..80d2d7623
--- /dev/null
+++ b/.github/workflows/build_python3.10.yaml
@@ -0,0 +1,23 @@
+name: Build Python 3.10 image
+
+on:
+ push:
+ paths:
+ - .github/workflows/build_python3.10.yaml
+ - Docker/python-nginx/python3.10-buster/**
+
+jobs:
+ python_3-10:
+ name: Python 3.10
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster"
+ OVERRIDE_REPO_NAME: "python"
+ OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+
diff --git a/.github/workflows/build_python3.9.yaml b/.github/workflows/build_python3.9.yaml
new file mode 100644
index 000000000..540e0d4ec
--- /dev/null
+++ b/.github/workflows/build_python3.9.yaml
@@ -0,0 +1,23 @@
+name: Build Python 3.9 image
+
+on:
+ push:
+ paths:
+ - .github/workflows/build_python3.9.yaml
+ - Docker/python-nginx/python3.9-buster/**
+
+jobs:
+ python_3-9:
+ name: Python 3.9
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster"
+ OVERRIDE_REPO_NAME: "python"
+ OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+
diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml
deleted file mode 100644
index 51543f0fe..000000000
--- a/.github/workflows/image_build_push.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: Build Python Base Images and Push to Quay and ECR
-
-on: push
-
-jobs:
- python_3-9:
- name: Python 3.9 Build and Push
- uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
- with:
- DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile"
- DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster"
- OVERRIDE_REPO_NAME: "python"
- OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
- secrets:
- ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
- ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
- QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
- QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
- python_3-10:
- name: Python 3.10 Build and Push
- uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
- with:
- DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile"
- DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster"
- OVERRIDE_REPO_NAME: "python"
- OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)"
- secrets:
- ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
- ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
- QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
- QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
- awshelper:
- name: AwsHelper Build and Push
- uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
- with:
- DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile"
- OVERRIDE_REPO_NAME: "awshelper"
- secrets:
- ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
- ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
- QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
- QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml
new file mode 100644
index 000000000..094417fe5
--- /dev/null
+++ b/.github/workflows/image_build_push_jenkins.yaml
@@ -0,0 +1,65 @@
+name: Build Jenkins images
+
+on:
+ push:
+ paths:
+ - .github/workflows/image_build_push_jenkins.yaml
+ - Docker/jenkins/**
+
+jobs:
+ jenkins:
+ name: Jenkins
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins"
+ OVERRIDE_REPO_NAME: "jenkins"
+ USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+ jenkins2:
+ name: Jenkins2
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins2/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins2"
+ OVERRIDE_REPO_NAME: "jenkins2"
+ USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+ jenkins-ci-worker:
+ name: Jenkins-CI-Worker
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker"
+ OVERRIDE_REPO_NAME: "gen3-ci-worker"
+ USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
+ jenkins-qa-worker:
+ name: Jenkins-QA-Worker
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker"
+ OVERRIDE_REPO_NAME: "gen3-qa-worker"
+ USE_QUAY_ONLY: true
+ BUILD_PLATFORMS: "linux/amd64"
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml
new file mode 100644
index 000000000..ce1761d3c
--- /dev/null
+++ b/.github/workflows/image_build_push_squid.yaml
@@ -0,0 +1,22 @@
+name: Build Squid images
+
+on:
+ push:
+ paths:
+ - .github/workflows/image_build_push_squid.yaml
+ - Docker/squid/**
+
+jobs:
+ squid:
+ name: Squid image
+ uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master
+ with:
+ DOCKERFILE_LOCATION: "./Docker/squid/Dockerfile"
+ DOCKERFILE_BUILD_CONTEXT: "./Docker/squid"
+ OVERRIDE_REPO_NAME: "squid"
+ USE_QUAY_ONLY: true
+ secrets:
+ ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }}
+ ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }}
+ QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }}
+ QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }}
diff --git a/.gitignore b/.gitignore
index dbce5bd82..299bdc807 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,6 +14,7 @@ terraform
*~
*.swp
.DS_Store
+.dccache
kube/services/fluentd/varlogs/
kube/services/fluentd/dockerlogs/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 2e3ce795b..82034495d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: git@github.com:Yelp/detect-secrets
- rev: v0.13.1
+ rev: v1.4.0
hooks:
- id: detect-secrets
args: ['--baseline', '.secrets.baseline']
diff --git a/.secrets.baseline b/.secrets.baseline
index 5bb288384..0c4eba0a8 100644
--- a/.secrets.baseline
+++ b/.secrets.baseline
@@ -1,19 +1,18 @@
{
- "exclude": {
- "files": "^.secrets.baseline$",
- "lines": null
- },
- "generated_at": "2022-04-07T20:39:12Z",
+ "version": "1.4.0",
"plugins_used": [
+ {
+ "name": "ArtifactoryDetector"
+ },
{
"name": "AWSKeyDetector"
},
{
- "name": "ArtifactoryDetector"
+ "name": "AzureStorageKeyDetector"
},
{
- "base64_limit": 4.5,
- "name": "Base64HighEntropyString"
+ "name": "Base64HighEntropyString",
+ "limit": 4.5
},
{
"name": "BasicAuthDetector"
@@ -22,8 +21,14 @@
"name": "CloudantDetector"
},
{
- "hex_limit": 3,
- "name": "HexHighEntropyString"
+ "name": "DiscordBotTokenDetector"
+ },
+ {
+ "name": "GitHubTokenDetector"
+ },
+ {
+ "name": "HexHighEntropyString",
+ "limit": 3.0
},
{
"name": "IbmCloudIamDetector"
@@ -35,21 +40,30 @@
"name": "JwtTokenDetector"
},
{
- "keyword_exclude": null,
- "name": "KeywordDetector"
+ "name": "KeywordDetector",
+ "keyword_exclude": ""
},
{
"name": "MailchimpDetector"
},
+ {
+ "name": "NpmDetector"
+ },
{
"name": "PrivateKeyDetector"
},
+ {
+ "name": "SendGridDetector"
+ },
{
"name": "SlackDetector"
},
{
"name": "SoftlayerDetector"
},
+ {
+ "name": "SquareOAuthDetector"
+ },
{
"name": "StripeDetector"
},
@@ -57,2041 +71,3671 @@
"name": "TwilioKeyDetector"
}
],
+ "filters_used": [
+ {
+ "path": "detect_secrets.filters.allowlist.is_line_allowlisted"
+ },
+ {
+ "path": "detect_secrets.filters.common.is_baseline_file",
+ "filename": ".secrets.baseline"
+ },
+ {
+ "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies",
+ "min_level": 2
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_indirect_reference"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_likely_id_string"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_lock_file"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_potential_uuid"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_sequential_string"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_swagger_file"
+ },
+ {
+ "path": "detect_secrets.filters.heuristic.is_templated_secret"
+ }
+ ],
"results": {
"Chef/repo/data_bags/README.md": [
{
- "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366",
- "is_verified": false,
- "line_number": 45,
- "type": "Secret Keyword"
- },
- {
+ "type": "Secret Keyword",
+ "filename": "Chef/repo/data_bags/README.md",
"hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee",
"is_verified": false,
- "line_number": 51,
- "type": "Secret Keyword"
+ "line_number": 38
}
],
- "Docker/Jenkins-CI-Worker/Dockerfile": [
+ "Docker/sidecar/service.key": [
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
+ "type": "Private Key",
+ "filename": "Docker/sidecar/service.key",
+ "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9",
"is_verified": false,
- "line_number": 124,
- "type": "Secret Keyword"
+ "line_number": 1
}
],
- "Docker/Jenkins-Worker/Dockerfile": [
+ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
+ "type": "Secret Keyword",
+ "filename": "Jenkins/Stacks/Jenkins/jenkins.env.sample",
+ "hashed_secret": "f41a52528dd2d592d2c05de5f388101c2948aa98",
"is_verified": false,
- "line_number": 132,
- "type": "Secret Keyword"
+ "line_number": 5
}
],
- "Docker/Jenkins/Dockerfile": [
+ "Jenkinsfile": [
{
- "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603",
+ "type": "Secret Keyword",
+ "filename": "Jenkinsfile",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
"is_verified": false,
- "line_number": 113,
- "type": "Secret Keyword"
- }
- ],
- "Docker/sidecar/service.key": [
+ "line_number": 144
+ },
{
- "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9",
+ "type": "Secret Keyword",
+ "filename": "Jenkinsfile",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 1,
- "type": "Private Key"
+ "line_number": 147
}
],
- "Jenkins/Stacks/Jenkins/jenkins.env.sample": [
+ "ansible/roles/slurm/README.md": [
{
- "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd",
+ "type": "Base64 High Entropy String",
+ "filename": "ansible/roles/slurm/README.md",
+ "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42",
"is_verified": false,
- "line_number": 5,
- "type": "Secret Keyword"
+ "line_number": 86
}
],
- "ansible/roles/awslogs/defaults/main.yaml": [
+ "apis_configs/fence_settings.py": [
{
- "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684",
+ "type": "Basic Auth Credentials",
+ "filename": "apis_configs/fence_settings.py",
+ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
"is_verified": false,
- "line_number": 30,
- "type": "Basic Auth Credentials"
+ "line_number": 80
}
],
- "ansible/roles/slurm/README.md": [
+ "apis_configs/peregrine_settings.py": [
{
- "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42",
+ "type": "Basic Auth Credentials",
+ "filename": "apis_configs/peregrine_settings.py",
+ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
"is_verified": false,
- "line_number": 86,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 46
+ }
+ ],
+ "apis_configs/sheepdog_settings.py": [
{
- "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476",
+ "type": "Basic Auth Credentials",
+ "filename": "apis_configs/sheepdog_settings.py",
+ "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
"is_verified": false,
- "line_number": 86,
- "type": "Secret Keyword"
+ "line_number": 46
}
],
- "apis_configs/config_helper.py": [
+ "aws-inspec/kubernetes/chef_inspec-cron.yaml": [
{
- "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f",
+ "type": "Secret Keyword",
+ "filename": "aws-inspec/kubernetes/chef_inspec-cron.yaml",
+ "hashed_secret": "a3ba27250861948a554629a0e21168821ddfa9f1",
"is_verified": false,
- "line_number": 66,
- "type": "Basic Auth Credentials"
+ "line_number": 35
}
],
- "apis_configs/fence_credentials.json": [
+ "doc/api.md": [
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "type": "Hex High Entropy String",
+ "filename": "doc/api.md",
+ "hashed_secret": "625de83a7517422051911680cc803921ff99db90",
"is_verified": false,
- "line_number": 23,
- "type": "Secret Keyword"
+ "line_number": 47
}
],
- "apis_configs/fence_settings.py": [
+ "doc/gen3OnK8s.md": [
{
- "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf",
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "55c100ba37d2df35ec1e5f5d6302f060387df6cc",
"is_verified": false,
- "line_number": 6,
- "type": "Basic Auth Credentials"
+ "line_number": 113
},
{
- "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3",
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "262d8e9b8ac5f06e7612dfb608f7267f88679801",
"is_verified": false,
- "line_number": 58,
- "type": "Secret Keyword"
+ "line_number": 120
},
{
- "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "1c17e556736c4d23933f99d199e7c2c572895fd2",
+ "is_verified": false,
+ "line_number": 143
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "76a4acaf31b815aa2c41cc2a2176b11fa9edf00a",
+ "is_verified": false,
+ "line_number": 145
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "doc/gen3OnK8s.md",
+ "hashed_secret": "9d678cbce5a343920f754d5836f03346ee01cde5",
"is_verified": false,
- "line_number": 80,
- "type": "Basic Auth Credentials"
+ "line_number": 154
}
],
- "apis_configs/indexd_settings.py": [
+ "files/scripts/psql-fips-fix.sh": [
{
- "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc",
+ "type": "Secret Keyword",
+ "filename": "files/scripts/psql-fips-fix.sh",
+ "hashed_secret": "2f1aa1e2a58704b452a5dd60ab1bd2b761bf296a",
"is_verified": false,
- "line_number": 59,
- "type": "Basic Auth Credentials"
+ "line_number": 9
}
],
- "apis_configs/peregrine_settings.py": [
+ "gen3/bin/bucket-manifest.sh": [
{
- "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
+ "type": "Secret Keyword",
+ "filename": "gen3/bin/bucket-manifest.sh",
+ "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833",
"is_verified": false,
- "line_number": 46,
- "type": "Basic Auth Credentials"
+ "line_number": 58
}
],
- "apis_configs/sheepdog_settings.py": [
+ "gen3/bin/bucket-replicate.sh": [
{
- "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3",
+ "type": "Secret Keyword",
+ "filename": "gen3/bin/bucket-replicate.sh",
+ "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833",
"is_verified": false,
- "line_number": 46,
- "type": "Basic Auth Credentials"
+ "line_number": 39
}
],
- "doc/Gen3-data-upload.md": [
+ "gen3/bin/secrets.sh": [
{
- "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e",
+ "type": "Secret Keyword",
+ "filename": "gen3/bin/secrets.sh",
+ "hashed_secret": "fb6220478aaba649aac37271a1d7c6317abc03a6",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 135
}
],
- "doc/api.md": [
+ "gen3/lib/aws.sh": [
{
- "hashed_secret": "625de83a7517422051911680cc803921ff99db90",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/aws.sh",
+ "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce",
"is_verified": false,
- "line_number": 47,
- "type": "Hex High Entropy String"
+ "line_number": 640
}
],
- "doc/gen3OnK8s.md": [
+ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [
{
- "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069",
+ "type": "Basic Auth Credentials",
+ "filename": "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml",
+ "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3",
"is_verified": false,
- "line_number": 113,
- "type": "Secret Keyword"
- },
+ "line_number": 33
+ }
+ ],
+ "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json": [
{
- "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json",
+ "hashed_secret": "0447a636536df0264b2000403fbefd69f603ceb1",
"is_verified": false,
- "line_number": 143,
- "type": "Secret Keyword"
+ "line_number": 54
},
{
- "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 170,
- "type": "Secret Keyword"
+ "line_number": 60
},
{
- "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 189,
- "type": "Secret Keyword"
+ "line_number": 108
}
],
- "doc/kube-setup-data-ingestion-job.md": [
+ "gen3/lib/onprem.sh": [
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/onprem.sh",
+ "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3",
"is_verified": false,
- "line_number": 30,
- "type": "Secret Keyword"
- }
- ],
- "doc/logs.md": [
+ "line_number": 68
+ },
{
- "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/onprem.sh",
+ "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70",
"is_verified": false,
- "line_number": 6,
- "type": "Secret Keyword"
+ "line_number": 84
}
],
- "doc/slurm_cluster.md": [
+ "gen3/lib/testData/default/expectedFenceResult.yaml": [
{
- "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 184,
- "type": "Secret Keyword"
- }
- ],
- "files/dashboard/usage-reports/package-lock.json": [
+ "line_number": 68
+ },
{
- "hashed_secret": "65ecd0650541b6caecdb6986f1871c2e6a95bdfe",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
+ "line_number": 71
},
{
- "hashed_secret": "e35a49e53bb97044b35cc0e4d963b4ac49e9ac7e",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 15,
- "type": "Base64 High Entropy String"
- }
- ],
- "gen3/bin/api.sh": [
+ "line_number": 74
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 407,
- "type": "Secret Keyword"
+ "line_number": 84
},
{
- "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 477,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-dev-namespace.sh": [
+ "line_number": 87
+ },
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 135,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-argo.sh": [
+ "line_number": 90
+ },
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 182,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-certs.sh": [
+ "line_number": 93
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 96
+ },
{
- "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedFenceResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 50,
- "type": "Secret Keyword"
+ "line_number": 99
}
],
- "gen3/bin/kube-setup-dashboard.sh": [
+ "gen3/lib/testData/default/expectedSheepdogResult.yaml": [
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
"is_verified": false,
- "line_number": 40,
- "type": "Secret Keyword"
+ "line_number": 60
},
{
- "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 41,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-data-ingestion-job.sh": [
+ "line_number": 63
+ },
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 37,
- "type": "Secret Keyword"
+ "line_number": 69
},
{
- "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 102,
- "type": "Secret Keyword"
+ "line_number": 72
}
],
- "gen3/bin/kube-setup-dicom-server.sh": [
+ "gen3/lib/testData/etlconvert/expected2.yaml": [
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474",
"is_verified": false,
- "line_number": 43,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-jenkins.sh": [
+ "line_number": 10
+ },
{
- "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55",
"is_verified": false,
- "line_number": 18,
- "type": "Secret Keyword"
+ "line_number": 13
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26",
"is_verified": false,
- "line_number": 22,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-metadata.sh": [
+ "line_number": 16
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12",
"is_verified": false,
- "line_number": 35,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-revproxy.sh": [
+ "line_number": 18
+ },
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424",
"is_verified": false,
- "line_number": 32,
- "type": "Secret Keyword"
+ "line_number": 33
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba",
"is_verified": false,
- "line_number": 49,
- "type": "Secret Keyword"
+ "line_number": 35
},
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/expected2.yaml",
+ "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22",
"is_verified": false,
- "line_number": 51,
- "type": "Secret Keyword"
+ "line_number": 36
}
],
- "gen3/bin/kube-setup-secrets.sh": [
+ "gen3/lib/testData/etlconvert/users2.yaml": [
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55",
"is_verified": false,
- "line_number": 79,
- "type": "Secret Keyword"
+ "line_number": 543
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26",
"is_verified": false,
- "line_number": 82,
- "type": "Secret Keyword"
+ "line_number": 553
},
{
- "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474",
"is_verified": false,
- "line_number": 95,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-sftp.sh": [
+ "line_number": 558
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12",
"is_verified": false,
- "line_number": 36,
- "type": "Secret Keyword"
+ "line_number": 568
},
{
- "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424",
"is_verified": false,
- "line_number": 51,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-sheepdog.sh": [
+ "line_number": 643
+ },
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba",
+ "is_verified": false,
+ "line_number": 653
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/lib/testData/etlconvert/users2.yaml",
+ "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22",
"is_verified": false,
- "line_number": 33,
- "type": "Secret Keyword"
+ "line_number": 658
}
],
- "gen3/bin/kube-setup-sower-jobs.sh": [
+ "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml": [
{
- "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 25,
- "type": "Secret Keyword"
+ "line_number": 71
},
{
- "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 74
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 120,
- "type": "Secret Keyword"
+ "line_number": 77
},
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 122,
- "type": "Secret Keyword"
- }
- ],
- "gen3/bin/kube-setup-ssjdispatcher.sh": [
+ "line_number": 87
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 117,
- "type": "Secret Keyword"
+ "line_number": 90
},
{
- "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 184,
- "type": "Secret Keyword"
+ "line_number": 93
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 197,
- "type": "Secret Keyword"
- }
- ],
- "gen3/lib/aws.sh": [
+ "line_number": 96
+ },
{
- "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
"is_verified": false,
- "line_number": 550,
- "type": "Secret Keyword"
+ "line_number": 99
},
{
- "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 570,
- "type": "Secret Keyword"
+ "line_number": 102
}
],
- "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [
+ "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml": [
{
- "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
+ "is_verified": false,
+ "line_number": 66
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 33,
- "type": "Basic Auth Credentials"
+ "line_number": 72
},
{
- "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db",
+ "type": "Secret Keyword",
+ "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 75
+ }
+ ],
+ "gen3/test/secretsTest.sh": [
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/secretsTest.sh",
+ "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938",
"is_verified": false,
- "line_number": 286,
- "type": "Secret Keyword"
+ "line_number": 25
}
],
- "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [
+ "gen3/test/terraformTest.sh": [
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce",
+ "is_verified": false,
+ "line_number": 156
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "d869db7fe62fb07c25a0403ecaea55031744b5fb",
+ "is_verified": false,
+ "line_number": 163
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009",
+ "is_verified": false,
+ "line_number": 172
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009",
+ "is_verified": false,
+ "line_number": 172
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef",
+ "is_verified": false,
+ "line_number": 175
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef",
+ "is_verified": false,
+ "line_number": 175
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "212e1d3823c8c9af9e4c0c172164ee292b9a6768",
+ "is_verified": false,
+ "line_number": 311
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "cb80dbb67a1a5bdf4957eea1473789f1c65357c6",
+ "is_verified": false,
+ "line_number": 312
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "5f35c25f4bf588b5fad46e249fcd9221f5257ce4",
+ "is_verified": false,
+ "line_number": 313
+ },
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "type": "Secret Keyword",
+ "filename": "gen3/test/terraformTest.sh",
+ "hashed_secret": "5308421b43dde5775f1993bd25a8163070d65598",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 314
}
],
- "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [
+ "kube/services/access-backend/access-backend-deploy.yaml": [
{
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/access-backend/access-backend-deploy.yaml",
+ "hashed_secret": "dbf88a0c3d905c669c0fd13bf8172bb34d4b1168",
"is_verified": false,
- "line_number": 5,
- "type": "Secret Keyword"
+ "line_number": 60
}
],
- "gen3/lib/logs/utils.sh": [
+ "kube/services/acronymbot/acronymbot-deploy.yaml": [
{
- "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/acronymbot/acronymbot-deploy.yaml",
+ "hashed_secret": "600833390a6b9891d0d8a5f6e3326abb237ac8ca",
"is_verified": false,
- "line_number": 3,
- "type": "Secret Keyword"
+ "line_number": 49
}
],
- "gen3/lib/manifestDefaults/hatchery/hatchery.json": [
+ "kube/services/arborist/arborist-deploy-2.yaml": [
{
- "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039",
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy-2.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
"is_verified": false,
- "line_number": 78,
- "type": "Secret Keyword"
+ "line_number": 59
}
],
- "gen3/lib/onprem.sh": [
+ "kube/services/arborist/arborist-deploy.yaml": [
{
- "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 68,
- "type": "Secret Keyword"
+ "line_number": 64
},
{
- "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70",
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 84,
- "type": "Secret Keyword"
- }
- ],
- "gen3/lib/secrets/rotate-postgres.sh": [
+ "line_number": 67
+ },
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 162,
- "type": "Secret Keyword"
+ "line_number": 70
},
{
- "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 250,
- "type": "Secret Keyword"
+ "line_number": 77
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 80
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 83
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/arborist/arborist-deploy.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
+ "is_verified": false,
+ "line_number": 86
}
],
- "gen3/lib/testData/etlconvert/expected2.yaml": [
+ "kube/services/argo/workflows/fence-usersync-wf.yaml": [
{
- "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474",
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
+ "line_number": 108
},
{
- "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55",
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 13,
- "type": "Base64 High Entropy String"
+ "line_number": 111
},
{
- "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26",
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 16,
- "type": "Base64 High Entropy String"
+ "line_number": 114
},
{
- "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12",
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 18,
- "type": "Base64 High Entropy String"
+ "line_number": 117
},
{
- "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424",
+ "type": "Secret Keyword",
+ "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
"is_verified": false,
- "line_number": 33,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 120
+ }
+ ],
+ "kube/services/argocd/values.yaml": [
{
- "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba",
+ "type": "Secret Keyword",
+ "filename": "kube/services/argocd/values.yaml",
+ "hashed_secret": "bfc1b86ce643b65bd540989213254b01fd6ad418",
+ "is_verified": false,
+ "line_number": 1489
+ }
+ ],
+ "kube/services/arranger/arranger-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/arranger/arranger-deploy.yaml",
+ "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761",
"is_verified": false,
- "line_number": 35,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22",
+ "type": "Secret Keyword",
+ "filename": "kube/services/arranger/arranger-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 36,
- "type": "Base64 High Entropy String"
+ "line_number": 64
}
],
- "gen3/test/secretsTest.sh": [
+ "kube/services/audit-service/audit-service-deploy.yaml": [
{
- "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938",
+ "type": "Secret Keyword",
+ "filename": "kube/services/audit-service/audit-service-deploy.yaml",
+ "hashed_secret": "42cde1c58c36d8bb5804a076e55ac6ec07ef99fc",
"is_verified": false,
- "line_number": 25,
- "type": "Secret Keyword"
+ "line_number": 64
}
],
- "gen3/test/terraformTest.sh": [
+ "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml",
+ "hashed_secret": "7f834ccb442433fc12ec9532f75c3a4b6a748d4c",
+ "is_verified": false,
+ "line_number": 46
+ }
+ ],
+ "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [
{
- "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d",
+ "type": "Secret Keyword",
+ "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 156,
- "type": "Secret Keyword"
+ "line_number": 56
},
{
- "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009",
+ "type": "Secret Keyword",
+ "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml",
+ "hashed_secret": "5949b79e0c7082dc78d543cde662871a4f8b8913",
+ "is_verified": false,
+ "line_number": 59
+ }
+ ],
+ "kube/services/cogwheel/cogwheel-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/cogwheel/cogwheel-deploy.yaml",
+ "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab",
+ "is_verified": false,
+ "line_number": 35
+ }
+ ],
+ "kube/services/cohort-middleware/cohort-middleware-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/cohort-middleware/cohort-middleware-deploy.yaml",
+ "hashed_secret": "bf22f6c4bd03572f1ef593efc3eb1a7e0b6dcab4",
+ "is_verified": false,
+ "line_number": 62
+ }
+ ],
+ "kube/services/dashboard/dashboard-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/dashboard/dashboard-deploy.yaml",
+ "hashed_secret": "9e722d12ce045c8718ab803ed465b2fbe199f3d3",
+ "is_verified": false,
+ "line_number": 61
+ }
+ ],
+ "kube/services/datadog/values.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/datadog/values.yaml",
+ "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b",
+ "is_verified": false,
+ "line_number": 23
+ }
+ ],
+ "kube/services/datasim/datasim-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 66
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 72
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 76
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/datasim/datasim-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 79
+ }
+ ],
+ "kube/services/dicom-server/dicom-server-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/dicom-server/dicom-server-deploy.yaml",
+ "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb",
+ "is_verified": false,
+ "line_number": 40
+ }
+ ],
+ "kube/services/fence/fence-canary-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 68
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 71
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 74
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 84
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 87
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 90
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 93
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 96
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 99
+ }
+ ],
+ "kube/services/fence/fence-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 71
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 74
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 77
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 87
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 90
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 93
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 96
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 99
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fence/fence-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 102
+ }
+ ],
+ "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 62
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 65
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 68
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 78
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 81
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 84
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 87
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 90
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 93
+ }
+ ],
+ "kube/services/fenceshib/fenceshib-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 69
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 72
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 75
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 85
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 88
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 91
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 94
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 97
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 100
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/fenceshib/fenceshib-deploy.yaml",
+ "hashed_secret": "6c4789c3be186fd5dcbf06723462ccdd2c86dc37",
+ "is_verified": false,
+ "line_number": 103
+ }
+ ],
+ "kube/services/frontend-framework/frontend-framework-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
+ "is_verified": false,
+ "line_number": 66
+ }
+ ],
+ "kube/services/frontend-framework/frontend-framework-root-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
+ "is_verified": false,
+ "line_number": 66
+ }
+ ],
+ "kube/services/gdcapi/gdcapi-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gdcapi/gdcapi-deploy.yaml",
+ "hashed_secret": "e8c2f0bacaffbf2f9897217c6770413879945296",
+ "is_verified": false,
+ "line_number": 38
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gdcapi/gdcapi-deploy.yaml",
+ "hashed_secret": "517cded9f3e3ab79237fde330b97a93f5a943316",
+ "is_verified": false,
+ "line_number": 41
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gdcapi/gdcapi-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 44
+ }
+ ],
+ "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml",
+ "hashed_secret": "38ded89f83435a558169dedb91a38f72d6cebf41",
+ "is_verified": false,
+ "line_number": 27
+ }
+ ],
+ "kube/services/google-sa-validation/google-sa-validation-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 67
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 70
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 73
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 76
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
+ "is_verified": false,
+ "line_number": 79
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 82
+ }
+ ],
+ "kube/services/guppy/guppy-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/guppy/guppy-deploy.yaml",
+ "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761",
+ "is_verified": false,
+ "line_number": 65
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/guppy/guppy-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 68
+ }
+ ],
+ "kube/services/indexd/indexd-canary-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
+ "is_verified": false,
+ "line_number": 59
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
+ "is_verified": false,
+ "line_number": 62
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc",
+ "is_verified": false,
+ "line_number": 68
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 71
+ }
+ ],
+ "kube/services/indexd/indexd-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
+ "is_verified": false,
+ "line_number": 66
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc",
+ "is_verified": false,
+ "line_number": 72
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/indexd/indexd-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 75
+ }
+ ],
+ "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 143
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 146
+ }
+ ],
+ "kube/services/jenkins-worker/jenkins-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 150
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 153
+ }
+ ],
+ "kube/services/jenkins/jenkins-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins/jenkins-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 157
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins/jenkins-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 160
+ }
+ ],
+ "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 143
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 146
+ }
+ ],
+ "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 146
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 149
+ }
+ ],
+ "kube/services/jenkins2/jenkins2-deploy.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2/jenkins2-deploy.yaml",
+ "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf",
+ "is_verified": false,
+ "line_number": 153
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jenkins2/jenkins2-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
+ "is_verified": false,
+ "line_number": 156
+ }
+ ],
+ "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
+ "is_verified": false,
+ "line_number": 37
+ }
+ ],
+ "kube/services/jobs/arborist-rm-expired-access-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/arborist-rm-expired-access-job.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
+ "is_verified": false,
+ "line_number": 37
+ }
+ ],
+ "kube/services/jobs/arboristdb-create-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/arboristdb-create-job.yaml",
+ "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/aws-bucket-replicate-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml",
+ "hashed_secret": "deb02468778f4041fb189654698ac948e436732d",
+ "is_verified": false,
+ "line_number": 33
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml",
+ "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063",
+ "is_verified": false,
+ "line_number": 36
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
+ "is_verified": false,
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/bucket-manifest-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-manifest-job.yaml",
+ "hashed_secret": "6c36710fe8825b381388d7005f2c9b5c70175fba",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/bucket-replicate-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-replicate-job.yaml",
+ "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483",
+ "is_verified": false,
+ "line_number": 46
+ }
+ ],
+ "kube/services/jobs/bucket-replication-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-replication-job.yaml",
+ "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483",
+ "is_verified": false,
+ "line_number": 32
+ }
+ ],
+ "kube/services/jobs/bucket-size-report-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/bucket-size-report-job.yaml",
+ "hashed_secret": "7cccf62cb63863d9d3baabed4f576eb0f7039735",
+ "is_verified": false,
+ "line_number": 34
+ }
+ ],
+ "kube/services/jobs/cedar-ingestion-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/cedar-ingestion-job.yaml",
+ "hashed_secret": "e1c426d126dcc618dcd0686fc718d509ca6ee3b8",
+ "is_verified": false,
+ "line_number": 54
+ }
+ ],
+ "kube/services/jobs/client-modify-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 41
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 44
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 50
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 57
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/client-modify-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 60
+ }
+ ],
+ "kube/services/jobs/cogwheel-register-client-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/cogwheel-register-client-job.yaml",
+ "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab",
+ "is_verified": false,
+ "line_number": 40
+ }
+ ],
+ "kube/services/jobs/config-fence-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/config-fence-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 44
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/config-fence-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/config-fence-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 57
+ }
+ ],
+ "kube/services/jobs/covid19-etl-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/covid19-etl-job.yaml",
+ "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212",
+ "is_verified": false,
+ "line_number": 34
+ }
+ ],
+ "kube/services/jobs/covid19-notebook-etl-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/covid19-notebook-etl-job.yaml",
+ "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212",
+ "is_verified": false,
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/data-ingestion-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "81e4388059839f71aed21999aa51095c7e545094",
+ "is_verified": false,
+ "line_number": 34
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
+ "is_verified": false,
+ "line_number": 48
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
+ "is_verified": false,
+ "line_number": 51
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
+ "is_verified": false,
+ "line_number": 54
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 63
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/data-ingestion-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 66
+ }
+ ],
+ "kube/services/jobs/etl-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/etl-cronjob.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
+ "is_verified": false,
+ "line_number": 38
+ }
+ ],
+ "kube/services/jobs/etl-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/etl-job.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
+ "is_verified": false,
+ "line_number": 35
+ }
+ ],
+ "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 43
+ }
+ ],
+ "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 36
+ }
+ ],
+ "kube/services/jobs/fence-db-migrate-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-db-migrate-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 36
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-db-migrate-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 39
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-db-migrate-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/fence-delete-expired-clients-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-delete-expired-clients-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 38
+ }
+ ],
+ "kube/services/jobs/fence-visa-update-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 42
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
+ "is_verified": false,
+ "line_number": 45
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
+ "is_verified": false,
+ "line_number": 48
+ }
+ ],
+ "kube/services/jobs/fence-visa-update-job.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
+ "is_verified": false,
+ "line_number": 36
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 172,
- "type": "Base64 High Entropy String"
+ "line_number": 39
},
{
- "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fence-visa-update-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 175,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/fencedb-create-job.yaml": [
{
- "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/fencedb-create-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 175,
- "type": "Secret Keyword"
+ "line_number": 33
}
],
- "kube/services/datadog/values.yaml": [
+ "kube/services/jobs/gdcdb-create-job.yaml": [
{
- "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gdcdb-create-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 7,
- "type": "Secret Keyword"
+ "line_number": 33
}
],
- "kube/services/fenceshib/fenceshib-configmap.yaml": [
+ "kube/services/jobs/gen3qa-check-bucket-access-job.yaml": [
{
- "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 375,
- "type": "Base64 High Entropy String"
+ "line_number": 177
},
{
- "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 376,
- "type": "Base64 High Entropy String"
+ "line_number": 180
},
{
- "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 377,
- "type": "Base64 High Entropy String"
+ "line_number": 186
},
{
- "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 378,
- "type": "Base64 High Entropy String"
+ "line_number": 190
},
{
- "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 379,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 193
+ }
+ ],
+ "kube/services/jobs/gentestdata-job.yaml": [
{
- "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 380,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 381,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 382,
- "type": "Base64 High Entropy String"
+ "line_number": 76
},
{
- "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 383,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/gentestdata-job.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 384,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 83
+ }
+ ],
+ "kube/services/jobs/google-bucket-manifest-job.yaml": [
{
- "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-manifest-job.yaml",
+ "hashed_secret": "5ca8fff7767e5dd6ebed80e2c8eab66d6f3bf5eb",
"is_verified": false,
- "line_number": 385,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 31
+ }
+ ],
+ "kube/services/jobs/google-bucket-replicate-job.yaml": [
{
- "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-replicate-job.yaml",
+ "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6",
"is_verified": false,
- "line_number": 386,
- "type": "Base64 High Entropy String"
+ "line_number": 35
},
{
- "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-replicate-job.yaml",
+ "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063",
"is_verified": false,
- "line_number": 387,
- "type": "Base64 High Entropy String"
+ "line_number": 38
},
{
- "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-bucket-replicate-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
"is_verified": false,
- "line_number": 388,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 41
+ }
+ ],
+ "kube/services/jobs/google-create-bucket-job.yaml": [
{
- "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 389,
- "type": "Base64 High Entropy String"
+ "line_number": 78
},
{
- "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 390,
- "type": "Base64 High Entropy String"
+ "line_number": 81
},
{
- "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 391,
- "type": "Base64 High Entropy String"
+ "line_number": 84
},
{
- "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 392,
- "type": "Base64 High Entropy String"
+ "line_number": 91
},
{
- "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 393,
- "type": "Base64 High Entropy String"
+ "line_number": 94
},
{
- "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-create-bucket-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 394,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 97
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-access-cronjob.yaml": [
{
- "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 395,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 396,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 397,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 49
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-access-job.yaml": [
{
- "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 398,
- "type": "Base64 High Entropy String"
+ "line_number": 36
},
{
- "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 399,
- "type": "Base64 High Entropy String"
+ "line_number": 39
},
{
- "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-access-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 419,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 42
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml": [
{
- "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 420,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 423,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 424,
- "type": "Base64 High Entropy String"
+ "line_number": 57
},
{
- "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 425,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 426,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 427,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/jobs/google-delete-expired-service-account-job.yaml": [
{
- "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 428,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 429,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 430,
- "type": "Base64 High Entropy String"
+ "line_number": 49
},
{
- "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 431,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 432,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 433,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/google-init-proxy-groups-cronjob.yaml": [
{
- "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 434,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 435,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 436,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 437,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 439,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 440,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 441,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 70
+ }
+ ],
+ "kube/services/jobs/google-init-proxy-groups-job.yaml": [
{
- "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 442,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 443,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 444,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 445,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 446,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 448,
- "type": "Base64 High Entropy String"
+ "line_number": 59
},
{
- "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
+ "is_verified": false,
+ "line_number": 62
+ }
+ ],
+ "kube/services/jobs/google-manage-account-access-cronjob.yaml": [
+ {
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 449,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 450,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 451,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 452,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 453,
- "type": "Base64 High Entropy String"
- }
- ],
- "kube/services/jobs/indexd-authz-job.yaml": [
+ "line_number": 64
+ },
{
- "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 70,
- "type": "Basic Auth Credentials"
+ "line_number": 67
}
],
- "kube/services/monitoring/grafana-values.yaml": [
+ "kube/services/jobs/google-manage-account-access-job.yaml": [
{
- "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 162,
- "type": "Secret Keyword"
+ "line_number": 40
},
{
- "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 166,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/ohdsi-atlas/README.md": [
+ "line_number": 43
+ },
{
- "hashed_secret": "6e71f9f2b1e96de5a712f899ed26477ebc260a73",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 105,
- "type": "Secret Keyword"
+ "line_number": 46
},
{
- "hashed_secret": "317b889ca9fa8789dc1b85714568b1bdf2c7baf3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 108,
- "type": "Secret Keyword"
- }
- ],
- "kube/services/revproxy/helpers.js": [
+ "line_number": 53
+ },
{
- "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
- }
- ],
- "kube/services/revproxy/helpersTest.js": [
+ "line_number": 56
+ },
{
- "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-account-access-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 22,
- "type": "JSON Web Token"
+ "line_number": 59
}
],
- "package-lock.json": [
- {
- "hashed_secret": "c95b6bc99445e7ed9177040f5ef94d0cdb38fb21",
- "is_verified": false,
- "line_number": 10,
- "type": "Base64 High Entropy String"
- },
+ "kube/services/jobs/google-manage-keys-cronjob.yaml": [
{
- "hashed_secret": "a896da46c897d3a0d007843006621f78dbcabf51",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 19,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "84b662fc9a2a275f90d0afafe6ce08a4d0928ac8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 28,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "6ebe9724873357aaea25e329efb726fa61b843e7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 39,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "f1dbba169db046906924ccd784068a2306096634",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 44,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "2c7bd6cdc39b5b8a0f32aa11988a0ec769526cdb",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 52,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "1addd61f68d977408128e530959437821a6d8b66",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 57,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/jobs/google-manage-keys-job.yaml": [
{
- "hashed_secret": "9787d966f19a0d8d0021b31d34cfdfcebdb9c28a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 65,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "76693e518c3c8123e9a197821b506292322a0a95",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 70,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "fa83dcbf0f435ee38066d19a2a43815510f96bc4",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 86,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "017a7eab3d63331ecfe768927c8907a5a31888e5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 91,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "92b56edda4f2906f548fe77c015490e6ba2ee4c3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 96,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "936b0959aa13f1decc76be1d80acaac0860847b7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-manage-keys-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 101,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml": [
{
- "hashed_secret": "4bad86c43b7cd06efc130272d8e4de2b32636371",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 109,
- "type": "Base64 High Entropy String"
+ "line_number": 48
},
{
- "hashed_secret": "d11716ecfa623706b733654d78f4e7af3c117efa",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 143,
- "type": "Base64 High Entropy String"
+ "line_number": 51
},
{
- "hashed_secret": "0cc93dfdf4ae08bc374b99af985b25d2427f71d8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 148,
- "type": "Base64 High Entropy String"
+ "line_number": 54
},
{
- "hashed_secret": "80f8d53f3fedde239f695d6a4c44c78b4aff0a44",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 153,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "83307cb75a4a44ba528f4a0aefcec2a8018dc6d8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 158,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "c96d81662cc7919208154e7152fa0033391b7bcd",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 166,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/jobs/google-verify-bucket-access-group-job.yaml": [
{
- "hashed_secret": "7156492f40fb2479a45780b3d2959c29b27b6374",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 181,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "885304335818f51938422166d361cddacfd626d0",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 186,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "915ca894a8ec19ffcd55555e6c8daac1fe882751",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 191,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "7ea379a1bf787a21401c8c39f285e4e84b478d72",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 201,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "8e948a3b773d1a2e4b6f4220216efa734315246d",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 209,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "1a321d0b0d9b6d75888ce7ae121ac222cec1eddd",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 217,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/jobs/graph-create-job.yaml": [
{
- "hashed_secret": "1a6bfe25744ad6c6ce27c3a52dbd98c15be12a5c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/graph-create-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 222,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 33
+ }
+ ],
+ "kube/services/jobs/indexd-authz-job.yaml": [
{
- "hashed_secret": "04450eaacfa844f84926d04d6a07534cde99b28e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-authz-job.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
"is_verified": false,
- "line_number": 227,
- "type": "Base64 High Entropy String"
+ "line_number": 32
},
{
- "hashed_secret": "b4c295435d09bbdfb91ced9040379166d67ccbd2",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-authz-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 232,
- "type": "Base64 High Entropy String"
+ "line_number": 35
},
{
- "hashed_secret": "bb2bf296d6e086b471d45a26af9fd57f55289a75",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-authz-job.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
"is_verified": false,
- "line_number": 237,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 38
+ }
+ ],
+ "kube/services/jobs/indexd-userdb-job.yaml": [
{
- "hashed_secret": "9579b6a23d94d56f2f163233b716d8752e6b3bde",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-userdb-job.yaml",
+ "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3",
"is_verified": false,
- "line_number": 256,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "796925256bc0f4dc43cdfab7fbff852eace18f42",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-userdb-job.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 287,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "7e280af4ec2d573144d98e89ed2e1dfd817ca48f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/indexd-userdb-job.yaml",
+ "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039",
"is_verified": false,
- "line_number": 295,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 46
+ }
+ ],
+ "kube/services/jobs/metadata-aggregate-sync-job.yaml": [
{
- "hashed_secret": "941b3e7836a6f26d32311893ac5d9ad0a52c45ca",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml",
+ "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9",
"is_verified": false,
- "line_number": 300,
- "type": "Base64 High Entropy String"
+ "line_number": 31
},
{
- "hashed_secret": "34743e1f7d9541c4a726b998f20baf828c694213",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml",
+ "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d",
"is_verified": false,
- "line_number": 305,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 34
+ }
+ ],
+ "kube/services/jobs/metadata-delete-expired-objects-job.yaml": [
{
- "hashed_secret": "c4fea87bd49c4427d7215d57ada9ff3177e0c471",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/metadata-delete-expired-objects-job.yaml",
+ "hashed_secret": "0cc8bac3fabe63722716d1e6fe04a8dded1e3ad0",
"is_verified": false,
- "line_number": 310,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 24
+ }
+ ],
+ "kube/services/jobs/remove-objects-from-clouds-job.yaml": [
{
- "hashed_secret": "85324324e21d0dfbfb5248ac92fa0f289d2e25f8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml",
+ "hashed_secret": "deb02468778f4041fb189654698ac948e436732d",
"is_verified": false,
- "line_number": 315,
- "type": "Base64 High Entropy String"
+ "line_number": 34
},
{
- "hashed_secret": "19eea0e64f6a3311b04e472035df10c23f23dd0a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml",
+ "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6",
"is_verified": false,
- "line_number": 352,
- "type": "Base64 High Entropy String"
+ "line_number": 37
},
{
- "hashed_secret": "acce4ef8d841ffa646256da3af7b79ad5cb78158",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
"is_verified": false,
- "line_number": 364,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 43
+ }
+ ],
+ "kube/services/jobs/replicate-validation-job.yaml": [
{
- "hashed_secret": "22e7ae9b65ade417baac61e6f0d84a54783ba759",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "deb02468778f4041fb189654698ac948e436732d",
"is_verified": false,
- "line_number": 369,
- "type": "Base64 High Entropy String"
+ "line_number": 34
},
{
- "hashed_secret": "8e71b7828c7c554f05dbbabddd63301b5fc56771",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6",
"is_verified": false,
- "line_number": 374,
- "type": "Base64 High Entropy String"
+ "line_number": 37
},
{
- "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063",
"is_verified": false,
- "line_number": 379,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "ebe2160ede628e0faeac9fe70c215cd38d28d8f6",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/replicate-validation-job.yaml",
+ "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb",
"is_verified": false,
- "line_number": 384,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 43
+ }
+ ],
+ "kube/services/jobs/s3sync-cronjob.yaml": [
{
- "hashed_secret": "9cb2b0347722893cde39bbe83f9df7c3c6e1b7c3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/s3sync-cronjob.yaml",
+ "hashed_secret": "27f6dfe15698a3bfaa183c84701cfb2bf4115415",
"is_verified": false,
- "line_number": 398,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 44
+ }
+ ],
+ "kube/services/jobs/usersync-job.yaml": [
{
- "hashed_secret": "344e37e02a35dd31cc7dc945b7fe7b2da88344c0",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 403,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "31a41817127c8d2b7b304c326b05d7319934e7a6",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 413,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "150852e9f1e877547306d59618a136fb535b40e3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 418,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "277e32c5ba00ef90c6f76c7004fde2ecac6d2e18",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 423,
- "type": "Base64 High Entropy String"
+ "line_number": 77
},
{
- "hashed_secret": "b95e69c7f4328ea641952f875c3b079a1585c9d1",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 431,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "6b30fe731c8444c0263b57aacbdaedb771ec01a5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 436,
- "type": "Base64 High Entropy String"
+ "line_number": 83
},
{
- "hashed_secret": "98eafa06e0c7e089c19e79dedf5989c3eb2f0568",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/usersync-job.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
"is_verified": false,
- "line_number": 445,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 86
+ }
+ ],
+ "kube/services/jobs/useryaml-job.yaml": [
{
- "hashed_secret": "bf47364c2d4ad0308ef016fe4a89f6c7dc21ef86",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 464,
- "type": "Base64 High Entropy String"
+ "line_number": 40
},
{
- "hashed_secret": "3e6c18abd5b90c63da0bd8b4c0d3a142e3d5a83d",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 474,
- "type": "Base64 High Entropy String"
+ "line_number": 43
},
{
- "hashed_secret": "209bf9cfe9000c6851cd4f94165d30ee1cd3dca1",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 482,
- "type": "Base64 High Entropy String"
+ "line_number": 46
},
{
- "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 487,
- "type": "Base64 High Entropy String"
+ "line_number": 53
},
{
- "hashed_secret": "6c1392daf02b9ba2a21c49c82508048525d5bc4b",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 492,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "b4e2bf4f3a071b223da2f270d5a2348d65105d3e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 497,
- "type": "Base64 High Entropy String"
+ "line_number": 59
},
{
- "hashed_secret": "98d583792218c3c06ecbcac66e5bedcdaabd63e7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/jobs/useryaml-job.yaml",
+ "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73",
"is_verified": false,
- "line_number": 507,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 65
+ }
+ ],
+ "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml": [
{
- "hashed_secret": "575c9b4e0765ae6ab9a4f38eb1186ea361691f73",
+ "type": "Secret Keyword",
+ "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 514,
- "type": "Base64 High Entropy String"
+ "line_number": 56
},
{
- "hashed_secret": "16225dde2ec301d038a0bdbda68de4a174fbfdd0",
+ "type": "Secret Keyword",
+ "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml",
+ "hashed_secret": "fb7ea689a364feb7aafbf8d553eb77073fa7ba11",
"is_verified": false,
- "line_number": 519,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 59
+ }
+ ],
+ "kube/services/kubecost-standalone/thanos-deploy.yaml": [
{
- "hashed_secret": "80d73b6f7e87f07e3ae70ef1e692aa9569574551",
+ "type": "Secret Keyword",
+ "filename": "kube/services/kubecost-standalone/thanos-deploy.yaml",
+ "hashed_secret": "064376809efc3acda5bd341aca977e149b989696",
"is_verified": false,
- "line_number": 524,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 127
+ }
+ ],
+ "kube/services/kubecost-standalone/values.yaml": [
{
- "hashed_secret": "38952752ebde485c02a80bff1d81ebe95664bcca",
+ "type": "Secret Keyword",
+ "filename": "kube/services/kubecost-standalone/values.yaml",
+ "hashed_secret": "ec9786daee68e3541963a51299160859fe4db663",
"is_verified": false,
- "line_number": 529,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 30
+ }
+ ],
+ "kube/services/manifestservice/manifestservice-deploy.yaml": [
{
- "hashed_secret": "150b60d278251f2470dd690016afe038bc1bb7f1",
+ "type": "Secret Keyword",
+ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml",
+ "hashed_secret": "3da2c49c267b6c58401bbf05e379b38d20434f78",
"is_verified": false,
- "line_number": 534,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "535582d92da3a4158e592ec29868bfd8467b8bce",
+ "type": "Secret Keyword",
+ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml",
+ "hashed_secret": "469e0c2b1a67aa94955bae023ddc727be31581a7",
"is_verified": false,
- "line_number": 539,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "23b096d9b48ed5d9a778d3db5807c5c7a2357c93",
+ "type": "Secret Keyword",
+ "filename": "kube/services/manifestservice/manifestservice-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 544,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/metadata/metadata-deploy.yaml": [
{
- "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612",
+ "type": "Secret Keyword",
+ "filename": "kube/services/metadata/metadata-deploy.yaml",
+ "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9",
"is_verified": false,
- "line_number": 549,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "f74b21c2fc87ad48118b3723372ecfe25aaae730",
+ "type": "Secret Keyword",
+ "filename": "kube/services/metadata/metadata-deploy.yaml",
+ "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d",
"is_verified": false,
- "line_number": 559,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 66
+ }
+ ],
+ "kube/services/monitoring/grafana-values.yaml": [
{
- "hashed_secret": "bc788b9febb8e95114c2e78a9d5297f80bbedb2c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/monitoring/grafana-values.yaml",
+ "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385",
"is_verified": false,
- "line_number": 564,
- "type": "Base64 High Entropy String"
+ "line_number": 162
},
{
- "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/monitoring/grafana-values.yaml",
+ "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8",
"is_verified": false,
- "line_number": 575,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 331
+ }
+ ],
+ "kube/services/monitoring/thanos-deploy.yaml": [
{
- "hashed_secret": "36a64bd1be32f031420a87c448636720426e0072",
+ "type": "Secret Keyword",
+ "filename": "kube/services/monitoring/thanos-deploy.yaml",
+ "hashed_secret": "064376809efc3acda5bd341aca977e149b989696",
"is_verified": false,
- "line_number": 580,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 130
+ }
+ ],
+ "kube/services/ohif-viewer/ohif-viewer-deploy.yaml": [
{
- "hashed_secret": "06a3dc8802aa9b4f2f48ad081cbe64482ce9f491",
+ "type": "Secret Keyword",
+ "filename": "kube/services/ohif-viewer/ohif-viewer-deploy.yaml",
+ "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818",
"is_verified": false,
- "line_number": 585,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 40
+ }
+ ],
+ "kube/services/orthanc/orthanc-deploy.yaml": [
{
- "hashed_secret": "6c8453f18e4aa0280d847454c9a803c12e2d14d7",
+ "type": "Secret Keyword",
+ "filename": "kube/services/orthanc/orthanc-deploy.yaml",
+ "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818",
"is_verified": false,
- "line_number": 590,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 41
+ }
+ ],
+ "kube/services/peregrine/peregrine-canary-deploy.yaml": [
{
- "hashed_secret": "3df46004e168f8d8e3422adfbf0b7c237a41f437",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3",
"is_verified": false,
- "line_number": 595,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "5c270f653b2fcd5b7c700b53f8543df4147a4aba",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 600,
- "type": "Base64 High Entropy String"
+ "line_number": 64
},
{
- "hashed_secret": "98a159a135963e5e65a546879c332b2c3942aec3",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec",
"is_verified": false,
- "line_number": 605,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "58d846ede841bbec0d67a42d03426806635fee2f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 610,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 73
+ }
+ ],
+ "kube/services/peregrine/peregrine-deploy.yaml": [
{
- "hashed_secret": "23e42656fba130d56c20abddb94b6b7bfcad69a8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3",
"is_verified": false,
- "line_number": 618,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "f883f0bd87d8455814f491e2067bd3f62454c7c2",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 623,
- "type": "Base64 High Entropy String"
+ "line_number": 70
},
{
- "hashed_secret": "8ece0f01da9189bae69a60da116040400bbc10e5",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec",
"is_verified": false,
- "line_number": 628,
- "type": "Base64 High Entropy String"
+ "line_number": 76
},
{
- "hashed_secret": "75a3c0b9934bd460ff7af9763edb25d749ab7b4e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/peregrine/peregrine-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 633,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 79
+ }
+ ],
+ "kube/services/pidgin/pidgin-deploy.yaml": [
{
- "hashed_secret": "baac57cb314beab87420d1da6906a1d2377c7d73",
+ "type": "Secret Keyword",
+ "filename": "kube/services/pidgin/pidgin-deploy.yaml",
+ "hashed_secret": "49af232c7adfcd54a40202e06261396a757e4ddd",
"is_verified": false,
- "line_number": 638,
- "type": "Base64 High Entropy String"
+ "line_number": 59
},
{
- "hashed_secret": "d0a953de593a0a7b26b925a6476d8382cd31cb0e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/pidgin/pidgin-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 654,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 62
+ }
+ ],
+ "kube/services/portal/portal-deploy.yaml": [
{
- "hashed_secret": "8b15238d25347ab18f4cbbe191de9aed597c8ea4",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 659,
- "type": "Base64 High Entropy String"
+ "line_number": 55
},
{
- "hashed_secret": "1e2ab7a2fd9b6afcbe08afcb9dc652b76cf367d8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa",
"is_verified": false,
- "line_number": 668,
- "type": "Base64 High Entropy String"
+ "line_number": 58
},
{
- "hashed_secret": "ae745d719f97b3ddb9791348b1f29ff8208c0c5c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb",
"is_verified": false,
- "line_number": 676,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "b72a53c8bebd6540eeffeba5b0c28965bbb2a664",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
"is_verified": false,
- "line_number": 681,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/portal/portal-root-deploy.yaml": [
{
- "hashed_secret": "97cbb7fbdfe498c80489e26bcdc78fce5db9b258",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 686,
- "type": "Base64 High Entropy String"
+ "line_number": 55
},
{
- "hashed_secret": "bc98c415b1c6ee93adf8e97a4a536b6342337c19",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa",
"is_verified": false,
- "line_number": 691,
- "type": "Base64 High Entropy String"
+ "line_number": 58
},
{
- "hashed_secret": "5a6baaacb03a030567b857cb8cfe440407e6385e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb",
"is_verified": false,
- "line_number": 696,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "e55a8322e5c7485be2f721155d9ed15afc586a4c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/portal/portal-root-deploy.yaml",
+ "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9",
"is_verified": false,
- "line_number": 705,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 67
+ }
+ ],
+ "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [
{
- "hashed_secret": "47709a15a1b02a87f65dfcd5f3e78e0d2206c95f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55",
"is_verified": false,
- "line_number": 710,
- "type": "Base64 High Entropy String"
+ "line_number": 74
},
{
- "hashed_secret": "5782d0f39536b22f2c6aa29d3b815a57f43e4800",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb",
"is_verified": false,
- "line_number": 719,
- "type": "Base64 High Entropy String"
+ "line_number": 77
},
{
- "hashed_secret": "401f90e6afa890c5ee44071351e4a149e7c1f5e0",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634",
"is_verified": false,
- "line_number": 724,
- "type": "Base64 High Entropy String"
+ "line_number": 80
},
{
- "hashed_secret": "51f38b23af543da8b637a3bd62f5fb2c460e3b3d",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd",
"is_verified": false,
- "line_number": 729,
- "type": "Base64 High Entropy String"
+ "line_number": 90
},
{
- "hashed_secret": "8287678ab8009ae16b02930c9e260d1f28578fbe",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d",
"is_verified": false,
- "line_number": 734,
- "type": "Base64 High Entropy String"
+ "line_number": 93
},
{
- "hashed_secret": "d4c050e6914eb68a5c657fb8bb09f6ac5eae1e86",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb",
"is_verified": false,
- "line_number": 739,
- "type": "Base64 High Entropy String"
+ "line_number": 96
},
{
- "hashed_secret": "922ac7db4914c20910496a41c474631928d6c2f2",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457",
"is_verified": false,
- "line_number": 750,
- "type": "Base64 High Entropy String"
+ "line_number": 99
},
{
- "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295",
"is_verified": false,
- "line_number": 771,
- "type": "Base64 High Entropy String"
+ "line_number": 102
},
{
- "hashed_secret": "d7966031d8525b080d7234049cbb040ac9a3f908",
+ "type": "Secret Keyword",
+ "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 798,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 105
+ }
+ ],
+ "kube/services/qa-dashboard/qa-dashboard-deployment.yaml": [
{
- "hashed_secret": "ff3d359d573f78d89424e03ec8688eee19305f9f",
+ "type": "Secret Keyword",
+ "filename": "kube/services/qa-dashboard/qa-dashboard-deployment.yaml",
+ "hashed_secret": "253939a955a575ac69f409e5914dd0191b704760",
"is_verified": false,
- "line_number": 808,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 63
+ }
+ ],
+ "kube/services/qabot/qabot-deploy.yaml": [
{
- "hashed_secret": "949b4ff40f26797f9290fe46eaa8691caef5c5ab",
+ "type": "Secret Keyword",
+ "filename": "kube/services/qabot/qabot-deploy.yaml",
+ "hashed_secret": "a9fa7aa8c08b647c3fb696e6598642d4a63e25be",
"is_verified": false,
- "line_number": 817,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 86
+ }
+ ],
+ "kube/services/requestor/requestor-deploy.yaml": [
{
- "hashed_secret": "ce4ea19f66e9140bdb497b19c6ae94c32ee565f0",
+ "type": "Secret Keyword",
+ "filename": "kube/services/requestor/requestor-deploy.yaml",
+ "hashed_secret": "15debe4170aa5b89858d939f4c0644307ae7789b",
"is_verified": false,
- "line_number": 825,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 61
+ }
+ ],
+ "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf": [
{
- "hashed_secret": "f6368525e9e22577efc8d8b737794e845958ba92",
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf",
+ "hashed_secret": "f89523833036f85fed37ce3ebf25492189bc9397",
"is_verified": false,
- "line_number": 834,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 41
+ }
+ ],
+ "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf": [
{
- "hashed_secret": "1508bbaf29927b5348d4df62823dab122a0d3b48",
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf",
+ "hashed_secret": "18c0871af26eb9875c0f840b13211f097c133fd2",
"is_verified": false,
- "line_number": 839,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 24
+ }
+ ],
+ "kube/services/revproxy/helpers.js": [
{
- "hashed_secret": "12917e7235ce486ca51a296b896afa5e3b4fda54",
+ "type": "Base64 High Entropy String",
+ "filename": "kube/services/revproxy/helpers.js",
+ "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af",
"is_verified": false,
- "line_number": 844,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 10
+ }
+ ],
+ "kube/services/revproxy/helpersTest.js": [
{
- "hashed_secret": "49e05eb75fd04d8f44cf235d4e8eddc30a2b93e5",
+ "type": "Base64 High Entropy String",
+ "filename": "kube/services/revproxy/helpersTest.js",
+ "hashed_secret": "389c3ec21b7325359051e97ff569b078843d2d37",
"is_verified": false,
- "line_number": 849,
- "type": "Base64 High Entropy String"
+ "line_number": 19
},
{
- "hashed_secret": "aa8ea120ddc5aaa27cb02e0b04ac1c53b249a724",
+ "type": "JSON Web Token",
+ "filename": "kube/services/revproxy/helpersTest.js",
+ "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb",
"is_verified": false,
- "line_number": 869,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 22
+ }
+ ],
+ "kube/services/revproxy/revproxy-deploy.yaml": [
{
- "hashed_secret": "b3e00452fd69737cc747d0661fa3b3949a4a0805",
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/revproxy-deploy.yaml",
+ "hashed_secret": "c7a87a61893a647e29289845cb51e61afb06800b",
"is_verified": false,
- "line_number": 876,
- "type": "Base64 High Entropy String"
+ "line_number": 74
},
{
- "hashed_secret": "af2ceb518ddc689b0e2a03ffebb64d4499817c17",
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/revproxy-deploy.yaml",
+ "hashed_secret": "b3a4e2dea4c1fae8c58a07a84065b73b3a2d831c",
"is_verified": false,
- "line_number": 887,
- "type": "Base64 High Entropy String"
+ "line_number": 77
},
{
- "hashed_secret": "7da94b235f996b5c65b66c3e70b5eeaf97bab5d4",
+ "type": "Secret Keyword",
+ "filename": "kube/services/revproxy/revproxy-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 892,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 80
+ }
+ ],
+ "kube/services/sftp/sftp-deploy.yaml": [
{
- "hashed_secret": "f8363d7113ba35fd06b33afe20c8ad21a3202197",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sftp/sftp-deploy.yaml",
+ "hashed_secret": "9fdebf62e477d59d25730744c8b3089c67c3db85",
"is_verified": false,
- "line_number": 900,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 39
+ }
+ ],
+ "kube/services/sheepdog/sheepdog-canary-deploy.yaml": [
{
- "hashed_secret": "6902b24068ea12c3a3e31596614aa6fa0fba3c39",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
"is_verified": false,
- "line_number": 908,
- "type": "Base64 High Entropy String"
+ "line_number": 58
},
{
- "hashed_secret": "2c732c0a0dccfc1588888172188ce9a1abb7166e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 916,
- "type": "Base64 High Entropy String"
+ "line_number": 61
},
{
- "hashed_secret": "c59aac9ab2704f627d29c762e716ba84b15be3f1",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 921,
- "type": "Base64 High Entropy String"
+ "line_number": 67
},
{
- "hashed_secret": "20249a3c96028e5ad19143d86ec5d2ee233935ed",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 937,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 70
+ }
+ ],
+ "kube/services/sheepdog/sheepdog-deploy.yaml": [
{
- "hashed_secret": "2a57a9814486d6f83257ec94e65d1024819611b8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d",
"is_verified": false,
- "line_number": 942,
- "type": "Base64 High Entropy String"
+ "line_number": 63
},
{
- "hashed_secret": "d5e822897b1f37e6ce1a864e2ba9af8f9bfc5539",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc",
"is_verified": false,
- "line_number": 950,
- "type": "Base64 High Entropy String"
+ "line_number": 66
},
{
- "hashed_secret": "dbee1beb29275ad50ef0a68067ca144985beca2c",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2",
"is_verified": false,
- "line_number": 957,
- "type": "Base64 High Entropy String"
+ "line_number": 72
},
{
- "hashed_secret": "b0cb4b5554183f2c7bc1ca25d902db5769798a7a",
+ "type": "Secret Keyword",
+ "filename": "kube/services/sheepdog/sheepdog-deploy.yaml",
+ "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d",
"is_verified": false,
- "line_number": 962,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 75
+ }
+ ],
+ "kube/services/shiny/shiny-deploy.yaml": [
{
- "hashed_secret": "29f79b77802802c5ae2d3c2acb9179280de37914",
+ "type": "Secret Keyword",
+ "filename": "kube/services/shiny/shiny-deploy.yaml",
+ "hashed_secret": "327a1bbc6dc0ce857472ee9162a3415133862d50",
"is_verified": false,
- "line_number": 967,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 43
+ }
+ ],
+ "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml": [
{
- "hashed_secret": "18469023a89dd192b5275d8b955c9fd2202e0c03",
+ "type": "Secret Keyword",
+ "filename": "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml",
+ "hashed_secret": "7f932449df74fc78573fea502df8a484aef3f69d",
"is_verified": false,
- "line_number": 983,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 61
+ }
+ ],
+ "kube/services/superset/superset-deploy.yaml": [
{
- "hashed_secret": "0d3ce7468071b4e48ba9cd014ade7037dc57ef41",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/superset-deploy.yaml",
+ "hashed_secret": "3e9d1737117ff62b23e37aedc72b522b0134997a",
"is_verified": false,
- "line_number": 991,
- "type": "Base64 High Entropy String"
+ "line_number": 235
},
{
- "hashed_secret": "955d2d24c472b4eb0b4488f935a0f65e38001df8",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/superset-deploy.yaml",
+ "hashed_secret": "6ac08eaa58d425783ff8b5a38fe16ee66c0bce15",
"is_verified": false,
- "line_number": 996,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 311
+ }
+ ],
+ "kube/services/superset/superset-redis.yaml": [
{
- "hashed_secret": "42e05c82cd06a9ed1d15e0f472c2efc4b3254cae",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/superset-redis.yaml",
+ "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9",
"is_verified": false,
- "line_number": 1010,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 265
+ }
+ ],
+ "kube/services/superset/values.yaml": [
{
- "hashed_secret": "7a87fb248397359e9c6ca6e46f39805789059102",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "9a09d4081ddc128a80384712ce6df3578e6bc58e",
"is_verified": false,
- "line_number": 1018,
- "type": "Base64 High Entropy String"
+ "line_number": 173
},
{
- "hashed_secret": "7fbf450bf4ee54f013454f70af3a9743c0909f54",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "118c413f3fc929a1624f4c3e1da1e3d24377a693",
"is_verified": false,
- "line_number": 1034,
- "type": "Base64 High Entropy String"
+ "line_number": 299
},
{
- "hashed_secret": "df8e0babfad52a541f6e470cf3a143402c2c2a1e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "d2a8d1ddfa75398366cff06545380c73481ec17d",
"is_verified": false,
- "line_number": 1039,
- "type": "Base64 High Entropy String"
+ "line_number": 445
},
{
- "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e",
+ "type": "Secret Keyword",
+ "filename": "kube/services/superset/values.yaml",
+ "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc",
"is_verified": false,
- "line_number": 1044,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 459
+ }
+ ],
+ "kube/services/thor/thor-deploy.yaml": [
{
- "hashed_secret": "9e897caf5658aea914e1034f46663cadb5a76348",
+ "type": "Secret Keyword",
+ "filename": "kube/services/thor/thor-deploy.yaml",
+ "hashed_secret": "1f3f96a3887209d0dda357e5516231ee9c5cd9a7",
"is_verified": false,
- "line_number": 1054,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 100
+ }
+ ],
+ "kube/services/tube/tube-deploy.yaml": [
{
- "hashed_secret": "3aec99f39b829f94874ccd0a0d90315c6690cb94",
+ "type": "Secret Keyword",
+ "filename": "kube/services/tube/tube-deploy.yaml",
+ "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966",
"is_verified": false,
- "line_number": 1064,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 58
+ }
+ ],
+ "kube/services/ws-storage/ws-storage-deploy.yaml": [
{
- "hashed_secret": "eca5fc6e4f5f895143d3fcedefc42dfe6e79f918",
+ "type": "Secret Keyword",
+ "filename": "kube/services/ws-storage/ws-storage-deploy.yaml",
+ "hashed_secret": "ec2d9395e11f353370a4abac21a1565641b35ce9",
"is_verified": false,
- "line_number": 1069,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 66
+ }
+ ],
+ "kube/services/wts/wts-deploy.yaml": [
{
- "hashed_secret": "307a947aa422c67fdefb07178198a004fb2c0d94",
+ "type": "Secret Keyword",
+ "filename": "kube/services/wts/wts-deploy.yaml",
+ "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e",
"is_verified": false,
- "line_number": 1074,
- "type": "Base64 High Entropy String"
- },
+ "line_number": 65
+ }
+ ],
+ "packer/buildAll.sh": [
{
- "hashed_secret": "0ba2fc9a137313ae1fdda2b5476dedf0595bda3a",
+ "type": "Secret Keyword",
+ "filename": "packer/buildAll.sh",
+ "hashed_secret": "6e1d66a1596528c308e601c10aa0b92d53606ab9",
"is_verified": false,
- "line_number": 1083,
- "type": "Base64 High Entropy String"
+ "line_number": 15
}
],
- "tf_files/aws/cognito/README.md": [
+ "packer/variables.example.json": [
{
- "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70",
+ "type": "Secret Keyword",
+ "filename": "packer/variables.example.json",
+ "hashed_secret": "a3a0648a036bebf78ba1a1eb498a66081059da10",
"is_verified": false,
- "line_number": 106,
- "type": "Secret Keyword"
+ "line_number": 5
}
],
"tf_files/aws/commons/README.md": [
{
- "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed",
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/commons/README.md",
+ "hashed_secret": "5f02a3fb14ab1ce5c18c362b04b8ffc603ea5951",
+ "is_verified": false,
+ "line_number": 60
+ },
+ {
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/commons/README.md",
+ "hashed_secret": "49cfceed8aa8df159e53aa5c5951cad48a3f1216",
"is_verified": false,
- "line_number": 60,
- "type": "Secret Keyword"
+ "line_number": 67
},
{
- "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e",
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/commons/README.md",
+ "hashed_secret": "18ad13589ca5fb3c432d7d9f0fe49f8ed6e2c478",
+ "is_verified": false,
+ "line_number": 70
+ }
+ ],
+ "tf_files/aws/eks/sample.tfvars": [
+ {
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/eks/sample.tfvars",
+ "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884",
"is_verified": false,
- "line_number": 78,
- "type": "Secret Keyword"
+ "line_number": 107
}
],
"tf_files/aws/eks/variables.tf": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/eks/variables.tf",
"hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884",
"is_verified": false,
- "line_number": 135,
- "type": "Hex High Entropy String"
+ "line_number": 133
}
],
"tf_files/aws/modules/common-logging/README.md": [
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/README.md",
"hashed_secret": "83442aa5a16cb1992731c32367ef464564388017",
"is_verified": false,
- "line_number": 57,
- "type": "Base64 High Entropy String"
- },
- {
- "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2",
- "is_verified": false,
- "line_number": 59,
- "type": "Hex High Entropy String"
+ "line_number": 57
}
],
"tf_files/aws/modules/common-logging/lambda_function.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
"is_verified": false,
- "line_number": 18,
- "type": "Base64 High Entropy String"
+ "line_number": 18
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
+ "is_verified": false,
+ "line_number": 18
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/lambda_function.py",
"hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff",
"is_verified": false,
- "line_number": 30,
- "type": "Hex High Entropy String"
+ "line_number": 30
}
],
"tf_files/aws/modules/common-logging/testLambda.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
"is_verified": false,
- "line_number": 5,
- "type": "Base64 High Entropy String"
+ "line_number": 5
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
+ "is_verified": false,
+ "line_number": 5
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
+ },
+ {
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/common-logging/testLambda.py",
+ "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff",
+ "is_verified": false,
+ "line_number": 10
}
],
"tf_files/aws/modules/eks/variables.tf": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/eks/variables.tf",
"hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884",
"is_verified": false,
- "line_number": 113,
- "type": "Hex High Entropy String"
+ "line_number": 113
}
],
"tf_files/aws/modules/management-logs/README.md": [
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/README.md",
"hashed_secret": "83442aa5a16cb1992731c32367ef464564388017",
"is_verified": false,
- "line_number": 54,
- "type": "Base64 High Entropy String"
- },
- {
- "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2",
- "is_verified": false,
- "line_number": 56,
- "type": "Hex High Entropy String"
+ "line_number": 54
}
],
"tf_files/aws/modules/management-logs/lambda_function.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
"is_verified": false,
- "line_number": 18,
- "type": "Base64 High Entropy String"
+ "line_number": 18
+ },
+ {
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
+ "is_verified": false,
+ "line_number": 18
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 18,
- "type": "Hex High Entropy String"
+ "line_number": 18
},
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/lambda_function.py",
"hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff",
"is_verified": false,
- "line_number": 30,
- "type": "Hex High Entropy String"
+ "line_number": 30
}
],
"tf_files/aws/modules/management-logs/testLambda.py": [
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
"hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de",
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
},
{
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
"hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef",
"is_verified": false,
- "line_number": 5,
- "type": "Base64 High Entropy String"
+ "line_number": 5
},
{
- "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38",
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94",
"is_verified": false,
- "line_number": 5,
- "type": "Hex High Entropy String"
+ "line_number": 5
},
{
- "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a",
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a",
"is_verified": false,
- "line_number": 6,
- "type": "Base64 High Entropy String"
+ "line_number": 5
},
{
- "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d",
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a",
"is_verified": false,
- "line_number": 6,
- "type": "Hex High Entropy String"
+ "line_number": 6
},
{
+ "type": "Hex High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
"hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4",
"is_verified": false,
- "line_number": 6,
- "type": "Hex High Entropy String"
- }
- ],
- "tf_files/aws/rds/sample.tfvars": [
+ "line_number": 6
+ },
{
- "hashed_secret": "76c3c4836dee37d8d0642949f84092a9a24bbf46",
+ "type": "Base64 High Entropy String",
+ "filename": "tf_files/aws/modules/management-logs/testLambda.py",
+ "hashed_secret": "d484ccb4ced21e0149078377f14b913bf5c613d0",
"is_verified": false,
- "line_number": 7,
- "type": "Secret Keyword"
+ "line_number": 6
}
],
"tf_files/aws/slurm/README.md": [
{
- "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d",
+ "type": "Secret Keyword",
+ "filename": "tf_files/aws/slurm/README.md",
+ "hashed_secret": "c16686250cd583de64e02a47a8b194cd5578b2a1",
"is_verified": false,
- "line_number": 83,
- "type": "Secret Keyword"
+ "line_number": 83
}
],
"tf_files/azure/cloud.tf": [
{
- "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4",
+ "type": "Secret Keyword",
+ "filename": "tf_files/azure/cloud.tf",
+ "hashed_secret": "38d930120a56321ceaa147b2bc1f19db53a0b993",
"is_verified": false,
- "line_number": 424,
- "type": "Secret Keyword"
+ "line_number": 361
}
],
"tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [
{
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars",
"hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227",
"is_verified": false,
- "line_number": 231,
- "type": "Secret Keyword"
+ "line_number": 231
}
],
"tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [
{
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars",
"hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227",
"is_verified": false,
- "line_number": 231,
- "type": "Secret Keyword"
+ "line_number": 231
}
],
"tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [
{
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP",
"hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227",
"is_verified": false,
- "line_number": 262,
- "type": "Secret Keyword"
+ "line_number": 262
}
],
- "tf_files/gcp/commons/sample.tfvars": [
+ "tf_files/gcp/commons/root.tf": [
{
- "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd",
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp/commons/root.tf",
+ "hashed_secret": "013b6be0bd7ef38a9ee3472cec65c208a19421e6",
"is_verified": false,
- "line_number": 11,
- "type": "Secret Keyword"
- },
+ "line_number": 65
+ }
+ ],
+ "tf_files/gcp/commons/sample.tfvars": [
{
- "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d",
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp/commons/sample.tfvars",
+ "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce",
"is_verified": false,
- "line_number": 26,
- "type": "Secret Keyword"
+ "line_number": 26
},
{
- "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105",
- "is_verified": false,
- "line_number": 37,
- "type": "Secret Keyword"
- }
- ],
- "tf_files/shared/modules/k8s_configs/creds.tpl": [
- {
- "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c",
+ "type": "Secret Keyword",
+ "filename": "tf_files/gcp/commons/sample.tfvars",
+ "hashed_secret": "791191ef9eafc75f5dd28e37df837b4991556876",
"is_verified": false,
- "line_number": 8,
- "type": "Secret Keyword"
+ "line_number": 31
}
]
},
- "version": "0.13.1",
- "word_list": {
- "file": null,
- "hash": null
- }
+ "generated_at": "2024-03-07T21:26:14Z"
}
diff --git a/Docker/Jenkins-Worker/install-python3.8.sh b/Docker/Jenkins-Worker/install-python3.8.sh
deleted file mode 100755
index a01d59420..000000000
--- a/Docker/Jenkins-Worker/install-python3.8.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz
-tar xf Python-3.8.0.tar.xz
-rm Python-3.8.0.tar.xz
-cd Python-3.8.0
-./configure
-make
-make altinstall
diff --git a/Docker/Jenkins2/jenkins-master-deployment.yaml b/Docker/Jenkins2/jenkins-master-deployment.yaml
deleted file mode 100755
index 922711ad1..000000000
--- a/Docker/Jenkins2/jenkins-master-deployment.yaml
+++ /dev/null
@@ -1,355 +0,0 @@
-{{- if .Capabilities.APIVersions.Has "apps/v1" }}
-apiVersion: apps/v1
-{{- else }}
-apiVersion: apps/v1
-{{- end }}
-kind: Deployment
-metadata:
- name: {{ template "jenkins.fullname" . }}
- namespace: {{ template "jenkins.namespace" . }}
- labels:
- "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
- "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}"
- "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
- "app.kubernetes.io/instance": "{{ .Release.Name }}"
- "app.kubernetes.io/component": "{{ .Values.master.componentName }}"
- {{- range $key, $val := .Values.master.deploymentLabels }}
- {{ $key }}: {{ $val | quote }}
- {{- end}}
-spec:
- replicas: 1
- strategy:
- type: {{ if .Values.persistence.enabled }}Recreate{{ else }}RollingUpdate
- rollingUpdate:
-{{ toYaml .Values.master.rollingUpdate | indent 6 }}
- {{- end }}
- selector:
- matchLabels:
- "app.kubernetes.io/component": "{{ .Values.master.componentName }}"
- "app.kubernetes.io/instance": "{{ .Release.Name }}"
- template:
- metadata:
- labels:
- "app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
- "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}"
- "app.kubernetes.io/managed-by": "{{ .Release.Service }}"
- "app.kubernetes.io/instance": "{{ .Release.Name }}"
- "app.kubernetes.io/component": "{{ .Values.master.componentName }}"
- {{- range $key, $val := .Values.master.podLabels }}
- {{ $key }}: {{ $val | quote }}
- {{- end}}
- annotations:
- {{- if .Values.master.podAnnotations }}
-{{ toYaml .Values.master.podAnnotations | indent 8 }}
- {{- end }}
- spec:
- {{- if .Values.master.nodeSelector }}
- nodeSelector:
-{{ toYaml .Values.master.nodeSelector | indent 8 }}
- {{- end }}
- {{- if .Values.master.tolerations }}
- tolerations:
-{{ toYaml .Values.master.tolerations | indent 8 }}
- {{- end }}
- {{- if .Values.master.affinity }}
- affinity:
-{{ toYaml .Values.master.affinity | indent 8 }}
- {{- end }}
- {{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.master.priorityClassName) }}
- priorityClassName: {{ .Values.master.priorityClassName }}
- {{- end }}
-{{- if .Values.master.usePodSecurityContext }}
- securityContext:
- runAsUser: {{ default 0 .Values.master.runAsUser }}
-{{- if and (.Values.master.runAsUser) (.Values.master.fsGroup) }}
-{{- if not (eq .Values.master.runAsUser 0.0) }}
- fsGroup: {{ .Values.master.fsGroup }}
-{{- end }}
-{{- end }}
-{{- end }}
- serviceAccountName: "{{ template "jenkins.serviceAccountName" . }}"
-{{- if .Values.master.hostNetworking }}
- hostNetwork: true
- dnsPolicy: ClusterFirstWithHostNet
-{{- end }}
- {{- if .Values.master.hostAliases }}
- hostAliases:
- {{- toYaml .Values.master.hostAliases | nindent 8 }}
- {{- end }}
- initContainers:
-{{- if .Values.master.customInitContainers }}
-{{ tpl (toYaml .Values.master.customInitContainers) . | indent 8 }}
-{{- end }}
- - name: "copy-default-config"
-{{- if .Values.master.imageTag }}
- image: "{{ .Values.master.image }}:{{ .Values.master.imageTag }}"
-{{- else }}
- image: "{{ .Values.master.image }}:{{ .Values.master.tag }}"
-{{- end }}
- imagePullPolicy: "{{ .Values.master.imagePullPolicy }}"
- command: ["sh", "/var/jenkins_config/apply_config.sh"]
- env:
- {{- if .Values.master.useSecurity }}
- - name: ADMIN_PASSWORD
- valueFrom:
- secretKeyRef:
- name: {{ template "jenkins.fullname" . }}
- key: jenkins-admin-password
- - name: ADMIN_USER
- valueFrom:
- secretKeyRef:
- name: {{ template "jenkins.fullname" . }}
- key: jenkins-admin-user
- {{- end }}
- {{- if .Values.master.initContainerEnv }}
-{{ toYaml .Values.master.initContainerEnv | indent 12 }}
- {{- end }}
- resources:
-{{ toYaml .Values.master.resources | indent 12 }}
- volumeMounts:
- - mountPath: /tmp
- name: tmp
- - mountPath: /var/jenkins_home
- name: jenkins-home
- {{- if .Values.persistence.subPath }}
- subPath: {{ .Values.persistence.subPath }}
- {{- end }}
- - mountPath: /var/jenkins_config
- name: jenkins-config
- {{- if .Values.master.enableXmlConfig }}
- {{- if .Values.master.credentialsXmlSecret }}
- - mountPath: /var/jenkins_credentials
- name: jenkins-credentials
- readOnly: true
- {{- end }}
- {{- if .Values.master.jobs }}
- - mountPath: /var/jenkins_jobs
- name: jenkins-jobs
- readOnly: true
- {{- end }}
- - mountPath: /usr/share/jenkins/ref/secrets/
- name: secrets-dir
- {{- end }}
- {{- if .Values.master.secretsFilesSecret }}
- - mountPath: /var/jenkins_secrets
- name: jenkins-secrets
- readOnly: true
- {{- end }}
- - mountPath: /usr/share/jenkins/ref/plugins
- name: plugins
- - mountPath: /var/jenkins_plugins
- name: plugin-dir
- containers:
- - name: jenkins
-{{- if .Values.master.imageTag }}
- image: "{{ .Values.master.image }}:{{ .Values.master.imageTag }}"
-{{- else }}
- image: "{{ .Values.master.image }}:{{ .Values.master.tag }}"
-{{- end }}
- imagePullPolicy: "{{ .Values.master.imagePullPolicy }}"
- {{- if .Values.master.useSecurity }}
- command:
-{{ toYaml .Values.master.command | indent 10 }}
- args:
-{{ toYaml .Values.master.args | indent 10 }}
- {{- end }}
- {{- if .Values.master.lifecycle }}
- lifecycle:
-{{ toYaml .Values.master.lifecycle | indent 12 }}
- {{- end }}
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
-{{ toYaml .Values.master.additionalEnv | indent 12 }}
- - name: JAVA_OPTS
- value: >
- {{ default "" .Values.master.javaOpts }}
- {{- if .Values.master.sidecars.configAutoReload.enabled }} -Dcasc.reload.token=$(POD_NAME) {{end}}
- - name: JENKINS_OPTS
- value: "{{ if .Values.master.jenkinsUriPrefix }}--prefix={{ .Values.master.jenkinsUriPrefix }} {{ end }}{{ default "" .Values.master.jenkinsOpts}}"
- - name: JENKINS_SLAVE_AGENT_PORT
- value: "{{ .Values.master.slaveListenerPort }}"
- {{- if .Values.master.useSecurity }}
- - name: ADMIN_PASSWORD
- valueFrom:
- secretKeyRef:
- name: {{ template "jenkins.fullname" . }}
- key: jenkins-admin-password
- - name: ADMIN_USER
- valueFrom:
- secretKeyRef:
- name: {{ template "jenkins.fullname" . }}
- key: jenkins-admin-user
- {{- end }}
- {{- if .Values.master.containerEnv }}
-{{ toYaml .Values.master.containerEnv | indent 12 }}
- {{- end }}
- {{- if .Values.master.JCasC.enabled }}
- - name: CASC_JENKINS_CONFIG
- value: {{ .Values.master.sidecars.configAutoReload.folder | default "/var/jenkins_home/casc_configs" | quote }}
- {{- end }}
- ports:
- - containerPort: 8080
- name: http
- - containerPort: {{ .Values.master.slaveListenerPort }}
- name: slavelistener
- {{- if .Values.master.slaveHostPort }}
- hostPort: {{ .Values.master.slaveHostPort }}
- {{- end }}
- {{- if .Values.master.jmxPort }}
- - containerPort: {{ .Values.master.jmxPort }}
- name: jmx
- {{- end }}
-{{- range $index, $port := .Values.master.extraPorts }}
- - containerPort: {{ $port.port }}
- name: {{ $port.name }}
-{{- end }}
-{{- if .Values.master.healthProbes }}
- livenessProbe:
- httpGet:
- path: "{{ default "" .Values.master.jenkinsUriPrefix }}/login"
- port: http
- initialDelaySeconds: {{ .Values.master.healthProbeLivenessInitialDelay }}
- periodSeconds: {{ .Values.master.healthProbeLivenessPeriodSeconds }}
- timeoutSeconds: {{ .Values.master.healthProbesLivenessTimeout }}
- failureThreshold: {{ .Values.master.healthProbeLivenessFailureThreshold }}
- readinessProbe:
- httpGet:
- path: "{{ default "" .Values.master.jenkinsUriPrefix }}/login"
- port: http
- initialDelaySeconds: {{ .Values.master.healthProbeReadinessInitialDelay }}
- periodSeconds: {{ .Values.master.healthProbeReadinessPeriodSeconds }}
- timeoutSeconds: {{ .Values.master.healthProbesReadinessTimeout }}
- failureThreshold: {{ .Values.master.healthProbeReadinessFailureThreshold }}
-{{- end }}
-
- resources:
-{{ toYaml .Values.master.resources | indent 12 }}
- volumeMounts:
-{{- if .Values.persistence.mounts }}
-{{ toYaml .Values.persistence.mounts | indent 12 }}
-{{- end }}
- - mountPath: /tmp
- name: tmp
- - mountPath: /var/jenkins_home
- name: jenkins-home
- readOnly: false
- {{- if .Values.persistence.subPath }}
- subPath: {{ .Values.persistence.subPath }}
- {{- end }}
- - mountPath: /var/jenkins_config
- name: jenkins-config
- readOnly: true
- {{- if .Values.master.enableXmlConfig }}
- {{- if .Values.master.credentialsXmlSecret }}
- - mountPath: /var/jenkins_credentials
- name: jenkins-credentials
- readOnly: true
- {{- end }}
- {{- if .Values.master.jobs }}
- - mountPath: /var/jenkins_jobs
- name: jenkins-jobs
- readOnly: true
- {{- end }}
- - mountPath: /usr/share/jenkins/ref/secrets/
- name: secrets-dir
- readOnly: false
- {{- end }}
- {{- if or .Values.master.secretsFilesSecret }}
- - mountPath: /var/jenkins_secrets
- name: jenkins-secrets
- readOnly: true
- {{- end }}
- - mountPath: /usr/share/jenkins/ref/plugins/
- name: plugin-dir
- readOnly: false
- {{- if and (.Values.master.JCasC.enabled) (.Values.master.sidecars.configAutoReload.enabled) }}
- - name: sc-config-volume
- mountPath: {{ .Values.master.sidecars.configAutoReload.folder | default "/var/jenkins_home/casc_configs" | quote }}
- {{- end }}
-
-{{- if and (.Values.master.JCasC.enabled) (.Values.master.sidecars.configAutoReload.enabled) }}
- - name: jenkins-sc-config
- image: "{{ .Values.master.sidecars.configAutoReload.image }}"
- imagePullPolicy: {{ .Values.master.sidecars.configAutoReload.imagePullPolicy }}
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: LABEL
- value: "{{ template "jenkins.fullname" . }}-jenkins-config"
- - name: FOLDER
- value: "{{ .Values.master.sidecars.configAutoReload.folder }}"
- - name: NAMESPACE
- value: "{{ .Values.master.sidecars.configAutoReload.searchNamespace | default .Release.Namespace }}"
- - name: REQ_URL
- value: "http://localhost:8080/reload-configuration-as-code/?casc-reload-token=$(POD_NAME)"
- - name: REQ_METHOD
- value: "POST"
- resources:
-{{ toYaml .Values.master.sidecars.configAutoReload.resources | indent 12 }}
- volumeMounts:
- - name: sc-config-volume
- mountPath: {{ .Values.master.sidecars.configAutoReload.folder | quote }}
- - name: jenkins-home
- mountPath: /var/jenkins_home
- {{- if .Values.persistence.subPath }}
- subPath: {{ .Values.persistence.subPath }}
- {{- end }}
-{{- end}}
-
-
-{{- if .Values.master.sidecars.other}}
-{{ tpl (toYaml .Values.master.sidecars.other | indent 8) .}}
-{{- end }}
-
- volumes:
-{{- if .Values.persistence.volumes }}
-{{ tpl (toYaml .Values.persistence.volumes | indent 6) . }}
-{{- end }}
- - name: plugins
- emptyDir: {}
- - name: tmp
- emptyDir: {}
- - name: jenkins-config
- configMap:
- name: {{ template "jenkins.fullname" . }}
- {{- if .Values.master.enableXmlConfig }}
- {{- if .Values.master.credentialsXmlSecret }}
- - name: jenkins-credentials
- secret:
- secretName: {{ .Values.master.credentialsXmlSecret }}
- {{- end }}
- {{- if .Values.master.jobs }}
- - name: jenkins-jobs
- configMap:
- name: {{ template "jenkins.fullname" . }}-jobs
- {{- end }}
- - name: secrets-dir
- emptyDir: {}
- {{- end }}
- {{- if .Values.master.secretsFilesSecret }}
- - name: jenkins-secrets
- secret:
- secretName: {{ .Values.master.secretsFilesSecret }}
- {{- end }}
- - name: plugin-dir
- emptyDir: {}
- - name: jenkins-home
- {{- if .Values.persistence.enabled }}
- persistentVolumeClaim:
- claimName: {{ .Values.persistence.existingClaim | default (include "jenkins.fullname" .) }}
- {{- else }}
- emptyDir: {}
- {{- end -}}
- {{- if .Values.master.JCasC.enabled }}
- - name: sc-config-volume
- emptyDir: {}
- {{- end }}
-{{- if .Values.master.imagePullSecretName }}
- imagePullSecrets:
- - name: {{ .Values.master.imagePullSecretName }}
-{{- end -}}
diff --git a/Docker/Jenkins2/jenkins.values b/Docker/Jenkins2/jenkins.values
deleted file mode 100644
index 404b59b49..000000000
--- a/Docker/Jenkins2/jenkins.values
+++ /dev/null
@@ -1,39 +0,0 @@
-master:
- # Used for label app.kubernetes.io/component
- componentName: "k8s-jenkins-master-deployment"
- serviceType: NodePort
- NodePort: 32323
- adminUser: "admin"
- # adminPassword: ""
-
- image: "quay.io/cdis/k8s-jenkins-master"
- tag: "latest"
-
- installPlugins: false
-
- podLabels:
- app: jenkins
-
- additionalEnv:
- - name: AWS_ACCESS_KEY_ID
- valueFrom:
- secretKeyRef:
- name: jenkins-secret
- key: aws_access_key_id
- - name: AWS_SECRET_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: jenkins-secret
- key: aws_secret_access_key
-
- command:
- - /sbin/tini
- args:
- - --
- - /opt/cdis/bin/jenkins2.sh
-
-rbac:
- create: true
-persistence:
- size: "200Gi"
-
diff --git a/Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml b/Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml
deleted file mode 100644
index 81ddc8a3d..000000000
--- a/Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: NetworkPolicy
-metadata:
- name: k8sjenkins-agent-master-policy
- namespace: default
-spec:
- egress:
- - {}
- ingress:
- - ports:
- - port: 8080
- protocol: TCP
- podSelector:
- matchLabels:
- jenkins: slave
- policyTypes:
- - Ingress
- - Egress
diff --git a/Docker/Jenkins2/k8sjenkins-agent-policy.yaml b/Docker/Jenkins2/k8sjenkins-agent-policy.yaml
deleted file mode 100644
index 92bdc47d4..000000000
--- a/Docker/Jenkins2/k8sjenkins-agent-policy.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: NetworkPolicy
-metadata:
- name: k8sjenkins-agent-policy
- namespace: default
-spec:
- egress:
- - {}
- ingress:
- - ports:
- - port: 50000
- protocol: TCP
- podSelector:
- matchLabels:
- app.kubernetes.io/component: k8s-jenkins-master-deployment
- app.kubernetes.io/instance: k8sjenkins
- policyTypes:
- - Ingress
- - Egress
diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile
index 8660fa23c..f3dd7b60e 100644
--- a/Docker/awshelper/Dockerfile
+++ b/Docker/awshelper/Dockerfile
@@ -1,7 +1,7 @@
# Build from root of cloud-automation/ repo:
# docker build -f Docker/awshelper/Dockerfile
#
-FROM quay.io/cdis/ubuntu:18.04
+FROM quay.io/cdis/ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
@@ -22,10 +22,10 @@ RUN apt-get update && apt-get upgrade -y \
libpq-dev \
lsb-release \
netcat-openbsd \
+ networkd-dispatcher \
net-tools \
openssh-client \
openssh-server \
- postgresql-client \
python3 \
python3-dev \
python3-pip \
@@ -38,6 +38,9 @@ RUN apt-get update && apt-get upgrade -y \
wget \
gettext-base
+#can remove once https://github.com/yaml/pyyaml/issues/724 is solved
+RUN pip install pyyaml==5.3.1
+
RUN python3 -m pip install --upgrade pip \
&& python3 -m pip install --upgrade setuptools \
&& python3 -m pip install -U crcmod \
@@ -51,24 +54,31 @@ RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2
&& /bin/rm -rf awscliv2.zip ./aws
# From https://hub.docker.com/r/google/cloud-sdk/~/dockerfile/
-RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \
+RUN export CLOUD_SDK_REPO="cloud-sdk" && \
echo "deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
apt-get update && \
- apt-get install -y google-cloud-sdk \
- google-cloud-sdk-cbt \
- kubectl && \
+ apt-get install -y kubectl && \
apt-get install -y --no-install-recommends nodejs && \
- rm -rf /var/lib/apt/lists/* \
- gcloud config set core/disable_usage_reporting true && \
- gcloud config set component_manager/disable_update_check true && \
- gcloud config set metrics/environment github_docker_image && \
- gcloud --version && \
+ rm -rf /var/lib/apt/lists/* && \
kubectl version --client && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /var/log/*
+# Install postgres 13 client
+RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \
+ echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \
+ apt-get update && \
+ apt-get install -y postgresql-client-13
+
+# install terraform
+RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \
+ && unzip /tmp/terraform.zip -d /usr/local/bin && /bin/rm /tmp/terraform.zip
+
+RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip \
+ && unzip /tmp/terraform.zip -d /tmp && mv /tmp/terraform /usr/local/bin/terraform12 && /bin/rm /tmp/terraform.zip
+
RUN useradd -m -s /bin/bash ubuntu && \
( echo "ubuntu:gen3" | chpasswd )
@@ -107,7 +117,7 @@ RUN cd ./cloud-automation \
&& npm ci \
&& cat ./Docker/awshelper/bashrc_suffix.sh >> ~/.bashrc
-RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 -
+RUN export DEB_PYTHON_INSTALL_LAYOUT=deb && export POETRY_VERSION=1.1.15 && curl -sSL https://install.python-poetry.org | python3 -
RUN git config --global user.email gen3 \
&& git config --global user.name gen3
diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile
similarity index 61%
rename from Docker/Jenkins-CI-Worker/Dockerfile
rename to Docker/jenkins/Jenkins-CI-Worker/Dockerfile
index 3ed282c80..6eeb8f4fd 100644
--- a/Docker/Jenkins-CI-Worker/Dockerfile
+++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile
@@ -1,11 +1,10 @@
-FROM jenkins/jnlp-slave:4.9-1
+FROM jenkins/inbound-agent:jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN set -xe && apt-get update \
&& apt-get install -y lsb-release \
@@ -16,7 +15,6 @@ RUN set -xe && apt-get update \
libffi-dev \
libssl-dev \
libghc-regex-pcre-dev \
- linux-headers-amd64 \
libcurl4-openssl-dev \
libncurses5-dev \
libncursesw5-dev \
@@ -27,20 +25,19 @@ RUN set -xe && apt-get update \
libbz2-dev \
libexpat1-dev \
liblzma-dev \
- python-virtualenv \
lua5.3 \
r-base \
software-properties-common \
sudo \
tk-dev \
+ wget \
zlib1g-dev \
zsh \
ca-certificates-java \
- openjdk-11-jre-headless \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
# Use jdk11
-ENV JAVA_HOME="/usr/lib/jvm/java-11-openjdk-amd64"
+ENV JAVA_HOME="/opt/java/openjdk"
ENV PATH="$JAVA_HOME/bin:$PATH"
COPY ./certfix.sh /certfix.sh
@@ -56,32 +53,34 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
google-cloud-sdk-cbt \
kubectl
+# install go - https://go.dev/doc/install
+RUN wget https://go.dev/dl/go1.21.0.linux-amd64.tar.gz \
+ && rm -rf /usr/local/go \
+ && tar -C /usr/local -xzf go1.21.0.linux-amd64.tar.gz
+ENV PATH="$PATH:/usr/local/go/bin"
+RUN go version
+
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
- && add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/debian \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce \
- && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
- && chmod a+rx /usr/local/bin/docker-compose
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
# install nodejs
-RUN curl -sL https://deb.nodesource.com/setup_12.x | bash -
-RUN apt-get update && apt-get install -y nodejs
+RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
+RUN apt-get update && apt-get install -y nodejs npm
-# add psql: https://www.postgresql.org/download/linux/debian/
-RUN DISTRO="$(lsb_release -c -s)" \
- && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
- && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
- && apt-get update \
- && apt-get install -y postgresql-client-13 libpq-dev \
- && rm -rf /var/lib/apt/lists/*
+# Install postgres 13 client
+RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \
+ echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \
+ apt-get update && \
+ apt-get install -y postgresql-client-13
# Copy sh script responsible for installing Python
COPY install-python3.8.sh /root/tmp/install-python3.8.sh
@@ -98,9 +97,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \
sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
-RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade
-
-RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3.8 -
+RUN set -xe && python3.8 -m pip install --upgrade pip setuptools && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade
# install terraform
RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \
@@ -119,6 +116,9 @@ RUN curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-ke
&& apt-get -y update \
&& apt-get -y install google-chrome-stable
+# data-simulator needs "/usr/share/dict/words" to generate data that isn't random strings
+RUN apt-get install --reinstall wamerican
+
# update /etc/sudoers
RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \
&& /bin/echo -e "\n%sudo ALL=(ALL:ALL) NOPASSWD:ALL\n" >> /etc/sudoers.bak \
diff --git a/Docker/Jenkins-CI-Worker/README.md b/Docker/jenkins/Jenkins-CI-Worker/README.md
similarity index 100%
rename from Docker/Jenkins-CI-Worker/README.md
rename to Docker/jenkins/Jenkins-CI-Worker/README.md
diff --git a/Docker/Jenkins-CI-Worker/certfix.sh b/Docker/jenkins/Jenkins-CI-Worker/certfix.sh
similarity index 100%
rename from Docker/Jenkins-CI-Worker/certfix.sh
rename to Docker/jenkins/Jenkins-CI-Worker/certfix.sh
diff --git a/Docker/Jenkins-CI-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh
similarity index 100%
rename from Docker/Jenkins-CI-Worker/install-python3.8.sh
rename to Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh
diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile
similarity index 63%
rename from Docker/Jenkins-Worker/Dockerfile
rename to Docker/jenkins/Jenkins-Worker/Dockerfile
index 4b6939707..fec6b3203 100644
--- a/Docker/Jenkins-Worker/Dockerfile
+++ b/Docker/jenkins/Jenkins-Worker/Dockerfile
@@ -1,17 +1,14 @@
-FROM jenkins/jnlp-slave:4.3-1
+FROM jenkins/inbound-agent:jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python and pip and aws cli
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base
-RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade
-RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade
-RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade && python3 -m pip install pandas --upgrade
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN apt-get update \
&& apt-get install -y lsb-release \
+ git \
apt-transport-https \
r-base \
libffi-dev \
@@ -34,21 +31,34 @@ RUN apt-get update \
lua5.3 \
software-properties-common \
sudo \
+ wget \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
# install Ruby.
RUN apt-get install -y ruby-full
-# install GIT from buster-backports
-RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/buster-backports.list \
- && apt-get update \
- && apt-get -t=buster-backports -y install git=1:2.30.*
+#
+# install docker tools:
+#
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
# install k6 to run load tests
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 \
&& echo "deb https://dl.k6.io/deb stable main" | tee /etc/apt/sources.list.d/k6.list \
&& apt-get update \
&& apt-get install k6
+
+# install xk6-browser
+RUN cd /opt && wget --quiet https://github.com/grafana/xk6-browser/releases/download/v0.3.0/xk6-browser-v0.3.0-linux-amd64.tar.gz \
+ && tar -xvzf /opt/xk6-browser-v0.3.0-linux-amd64.tar.gz
+ENV PATH="/opt/xk6-browser-v0.3.0-linux-amd64:${PATH}"
# install google tools
RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
@@ -65,20 +75,18 @@ RUN wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && c
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
&& /usr/bin/add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -c -s) \
stable" \
&& apt-get update \
- && apt-get install -y docker-ce \
+ && apt-get install -y docker-ce-cli \
&& curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
&& chmod a+rx /usr/local/bin/docker-compose
# install nodejs
-RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
+RUN curl -sL https://deb.nodesource.com/setup_18.x | bash -
RUN apt-get update && apt-get install -y nodejs
# install chrome (supports headless mode)
@@ -99,33 +107,32 @@ RUN unzip /tmp/packer.zip -d /usr/local/bin; /bin/rm /tmp/packer.zip
# add psql: https://www.postgresql.org/download/linux/debian/
RUN DISTRO="$(lsb_release -c -s)" \
&& echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
- && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
+ && wget --quiet --no-check-certificate -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& apt-get update \
- && apt-get install -y postgresql-client-13 \
+ && apt-get install -y postgresql-client-13 libpq-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy sh script responsible for installing Python
-COPY install-python3.8.sh /root/tmp/install-python3.8.sh
+COPY install-python3.9.sh /root/tmp/install-python3.9.sh
-# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python
-RUN chmod +x /root/tmp/install-python3.8.sh; sync && \
- bash /root/tmp/install-python3.8.sh && \
- rm -rf /root/tmp/install-python3.8.sh && \
+# Run the script responsible for installing Python 3.9.0 and link it to /usr/bin/python
+RUN chmod +x /root/tmp/install-python3.9.sh; sync && \
+ bash /root/tmp/install-python3.9.sh && \
+ rm -rf /root/tmp/install-python3.9.sh && \
unlink /usr/bin/python3 && \
- ln -s /usr/local/bin/python3.8 /usr/bin/python3
+ ln -s /usr/local/bin/python3.9 /usr/bin/python && \
+ ln -s /usr/local/bin/python3.9 /usr/bin/python3
RUN env
RUN which python
-RUN which python3.8
+RUN which python3.9
# Fix shebang for lsb_release
-RUN sed -i 's/python3/python3.7/' /usr/bin/lsb_release && \
- sed -i 's/python3/python3.7/' /usr/bin/add-apt-repository
+RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \
+ sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
-RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade
-
-RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3.8 -
+RUN set -xe && python3.9 -m pip install --upgrade pip && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade
# update /etc/sudoers
RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \
diff --git a/Docker/Jenkins-Worker/README.md b/Docker/jenkins/Jenkins-Worker/README.md
similarity index 100%
rename from Docker/Jenkins-Worker/README.md
rename to Docker/jenkins/Jenkins-Worker/README.md
diff --git a/Docker/jenkins/Jenkins-Worker/install-python3.9.sh b/Docker/jenkins/Jenkins-Worker/install-python3.9.sh
new file mode 100755
index 000000000..30ee05993
--- /dev/null
+++ b/Docker/jenkins/Jenkins-Worker/install-python3.9.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+wget https://www.python.org/ftp/python/3.9.0/Python-3.9.0.tar.xz
+tar xf Python-3.9.0.tar.xz
+rm Python-3.9.0.tar.xz
+cd Python-3.9.0
+./configure
+make
+make altinstall
diff --git a/Docker/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile
similarity index 71%
rename from Docker/Jenkins/Dockerfile
rename to Docker/jenkins/Jenkins/Dockerfile
index e06eb7b71..04ebe5864 100644
--- a/Docker/Jenkins/Dockerfile
+++ b/Docker/jenkins/Jenkins/Dockerfile
@@ -1,11 +1,10 @@
-FROM jenkins/jenkins:2.298
+FROM jenkins/jenkins:2.426.3-lts-jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN set -xe && apt-get update \
&& apt-get install -y lsb-release \
@@ -25,12 +24,12 @@ RUN set -xe && apt-get update \
libbz2-dev \
libexpat1-dev \
liblzma-dev \
- python-virtualenv \
lua5.3 \
r-base \
software-properties-common \
sudo \
tk-dev \
+ wget \
zlib1g-dev \
zsh \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
@@ -46,21 +45,18 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
- && add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/debian \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce \
- && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
- && chmod a+rx /usr/local/bin/docker-compose
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
# install nodejs
-RUN curl -sL https://deb.nodesource.com/setup_12.x | bash -
+RUN curl -sL https://deb.nodesource.com/setup_18.x | bash -
RUN apt-get update && apt-get install -y nodejs
# add psql: https://www.postgresql.org/download/linux/debian/
@@ -82,13 +78,11 @@ RUN chmod +x /root/tmp/install-python3.8.sh; sync && \
ln -s /Python-3.8.0/python /usr/bin/python3
# Fix shebang for lsb_release
-RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \
- sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository
+RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \
+ sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
-RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade
-
-RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 -
+RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade
# install chrome (supports headless mode)
RUN set -xe \
@@ -117,7 +111,7 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \
# add our custom start script
COPY jenkins.sh /opt/cdis/bin/jenkins.sh
RUN chmod -R a+rx /opt/cdis
-ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins.sh"]
+ENTRYPOINT ["/usr/bin/tini", "--", "/opt/cdis/bin/jenkins.sh"]
USER jenkins
diff --git a/Docker/Jenkins/README.md b/Docker/jenkins/Jenkins/README.md
similarity index 100%
rename from Docker/Jenkins/README.md
rename to Docker/jenkins/Jenkins/README.md
diff --git a/Docker/Jenkins/install-python3.8.sh b/Docker/jenkins/Jenkins/install-python3.8.sh
similarity index 100%
rename from Docker/Jenkins/install-python3.8.sh
rename to Docker/jenkins/Jenkins/install-python3.8.sh
diff --git a/Docker/Jenkins/jenkins.sh b/Docker/jenkins/Jenkins/jenkins.sh
similarity index 100%
rename from Docker/Jenkins/jenkins.sh
rename to Docker/jenkins/Jenkins/jenkins.sh
diff --git a/Docker/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile
similarity index 73%
rename from Docker/Jenkins2/Dockerfile
rename to Docker/jenkins/Jenkins2/Dockerfile
index 26f81c143..e6b73bc76 100644
--- a/Docker/Jenkins2/Dockerfile
+++ b/Docker/jenkins/Jenkins2/Dockerfile
@@ -1,11 +1,10 @@
-FROM jenkins/jenkins:2.298
+FROM jenkins/jenkins:2.426.3-lts-jdk21
USER root
ENV DEBIAN_FRONTEND=noninteractive
-# install python
-RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base
+RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base
RUN set -xe && apt-get update \
&& apt-get install -y lsb-release \
@@ -25,12 +24,12 @@ RUN set -xe && apt-get update \
libbz2-dev \
libexpat1-dev \
liblzma-dev \
- python-virtualenv \
lua5.3 \
r-base \
software-properties-common \
sudo \
tk-dev \
+ wget \
zlib1g-dev \
zsh \
&& ln -s /usr/bin/lua5.3 /usr/local/bin/lua
@@ -46,21 +45,19 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \
#
# install docker tools:
-# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1
-# * https://docs.docker.com/compose/install/#install-compose
#
-RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
- && add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/debian \
- $(lsb_release -cs) \
- stable" \
- && apt-get update \
- && apt-get install -y docker-ce \
- && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \
- && chmod a+rx /usr/local/bin/docker-compose
+RUN sudo install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && sudo chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get update && apt-get install -y docker-ce
+
# install nodejs
-RUN curl -sL https://deb.nodesource.com/setup_12.x | bash -
+RUN curl -sL https://deb.nodesource.com/setup_18.x | bash -
RUN apt-get update && apt-get install -y nodejs
# add psql: https://www.postgresql.org/download/linux/debian/
@@ -86,9 +83,7 @@ RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \
sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository
# install aws cli, poetry, pytest, etc.
-RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade
-
-RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 -
+RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade
# install chrome (supports headless mode)
RUN set -xe \
@@ -117,7 +112,7 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \
# add our custom start script
COPY jenkins2.sh /opt/cdis/bin/jenkins2.sh
RUN chmod -R a+rx /opt/cdis
-ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins2.sh"]
+ENTRYPOINT ["/usr/bin/tini", "--", "/opt/cdis/bin/jenkins2.sh"]
USER jenkins
diff --git a/Docker/Jenkins2/README.md b/Docker/jenkins/Jenkins2/README.md
similarity index 100%
rename from Docker/Jenkins2/README.md
rename to Docker/jenkins/Jenkins2/README.md
diff --git a/Docker/Jenkins2/install-python3.8.sh b/Docker/jenkins/Jenkins2/install-python3.8.sh
similarity index 100%
rename from Docker/Jenkins2/install-python3.8.sh
rename to Docker/jenkins/Jenkins2/install-python3.8.sh
diff --git a/Docker/Jenkins2/jenkins2.sh b/Docker/jenkins/Jenkins2/jenkins2.sh
similarity index 82%
rename from Docker/Jenkins2/jenkins2.sh
rename to Docker/jenkins/Jenkins2/jenkins2.sh
index c0fb0e4ea..fe4c53329 100644
--- a/Docker/Jenkins2/jenkins2.sh
+++ b/Docker/jenkins/Jenkins2/jenkins2.sh
@@ -16,14 +16,25 @@ if [ -z "$JENKINS_S3_PATH" ]; then
JENKINS_S3_PATH="s3://cdis-terraform-state/Jenkins2Backup"
fi
+#
# Setup ~/.aws to support cloud-automation/gen3
+# terraform stuff wants a profile to query
+#
mkdir -p ~/.aws
cat - > ~/.aws/config < ~/.aws/credentials <> /etc/uwsgi/uwsgi.ini
(
ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini
diff --git a/Docker/python-nginx/python3.6-alpine3.7/Dockerfile b/Docker/python-nginx/python3.6-alpine3.7/Dockerfile
index 06eface8f..ad371dcbd 100755
--- a/Docker/python-nginx/python3.6-alpine3.7/Dockerfile
+++ b/Docker/python-nginx/python3.6-alpine3.7/Dockerfile
@@ -145,6 +145,9 @@ RUN GPG_KEYS=B0F4253373F8F6F510D42178520A9993A1C052F8 \
COPY nginx.conf /etc/nginx/nginx.conf
COPY uwsgi.conf /etc/nginx/sites-available/
+# Install ddtrace
+RUN pip install ddtrace
+
# Standard set up Nginx finished
EXPOSE 80
diff --git a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh
index e73de6697..4f4f6a6f6 100644
--- a/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh
+++ b/Docker/python-nginx/python3.6-alpine3.7/dockerrun.sh
@@ -91,7 +91,6 @@ if [[ -z $DD_ENABLED ]]; then
run uwsgi --ini /etc/uwsgi/uwsgi.ini
) &
else
-pip install ddtrace
echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini
(
ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini
diff --git a/Docker/python-nginx/python3.6-buster/Dockerfile b/Docker/python-nginx/python3.6-buster/Dockerfile
index e2d658038..55862b1d7 100644
--- a/Docker/python-nginx/python3.6-buster/Dockerfile
+++ b/Docker/python-nginx/python3.6-buster/Dockerfile
@@ -108,7 +108,7 @@ EXPOSE 443
# install uwsgi
# https://uwsgi-docs.readthedocs.io/en/latest/Install.html
RUN python -m pip install --upgrade pip
-RUN pip install uwsgi
+RUN pip install uwsgi ddtrace
# Remove default configuration from Nginx
RUN rm /etc/nginx/conf.d/default.conf
diff --git a/Docker/python-nginx/python3.6-buster/dockerrun.sh b/Docker/python-nginx/python3.6-buster/dockerrun.sh
index ba0e39b3d..583590e36 100644
--- a/Docker/python-nginx/python3.6-buster/dockerrun.sh
+++ b/Docker/python-nginx/python3.6-buster/dockerrun.sh
@@ -91,7 +91,6 @@ if [ -z $DD_ENABLED ]; then
run uwsgi --ini /etc/uwsgi/uwsgi.ini
) &
else
-pip install ddtrace
echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini
(
ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini
diff --git a/Docker/python-nginx/python3.9-buster/Dockerfile b/Docker/python-nginx/python3.9-buster/Dockerfile
index 9f81c1aac..f847b3864 100644
--- a/Docker/python-nginx/python3.9-buster/Dockerfile
+++ b/Docker/python-nginx/python3.9-buster/Dockerfile
@@ -108,7 +108,7 @@ EXPOSE 443
# install uwsgi
# https://uwsgi-docs.readthedocs.io/en/latest/Install.html
RUN python -m pip install --upgrade pip
-RUN pip install uwsgi
+RUN pip install uwsgi ddtrace
# Remove default configuration from Nginx
RUN rm /etc/nginx/conf.d/default.conf
diff --git a/Docker/python-nginx/python3.9-buster/dockerrun.sh b/Docker/python-nginx/python3.9-buster/dockerrun.sh
index ba0e39b3d..583590e36 100644
--- a/Docker/python-nginx/python3.9-buster/dockerrun.sh
+++ b/Docker/python-nginx/python3.9-buster/dockerrun.sh
@@ -91,7 +91,6 @@ if [ -z $DD_ENABLED ]; then
run uwsgi --ini /etc/uwsgi/uwsgi.ini
) &
else
-pip install ddtrace
echo "import=ddtrace.bootstrap.sitecustomize" >> /etc/uwsgi/uwsgi.ini
(
ddtrace-run uwsgi --enable-threads --ini /etc/uwsgi/uwsgi.ini
diff --git a/Docker/python-nginx/python3.9-buster/uwsgi.conf b/Docker/python-nginx/python3.9-buster/uwsgi.conf
index 97c53335d..7bafdb48d 100644
--- a/Docker/python-nginx/python3.9-buster/uwsgi.conf
+++ b/Docker/python-nginx/python3.9-buster/uwsgi.conf
@@ -15,7 +15,19 @@ server {
server {
listen 80;
- large_client_header_buffers 4 64k;
+ proxy_buffer_size 16k;
+ proxy_buffers 4 16k;
+ proxy_busy_buffers_size 32k;
+
+ uwsgi_buffer_size 16k;
+ uwsgi_buffers 4 16k;
+ uwsgi_busy_buffers_size 32k;
+
+ client_header_buffer_size 32k;
+ large_client_header_buffers 4 16k;
+
+ proxy_buffering off;
+ uwsgi_buffering off;
location / {
uwsgi_param REMOTE_ADDR $http_x_forwarded_for if_not_empty;
diff --git a/Docker/sidecar/Dockerfile b/Docker/sidecar/Dockerfile
index ad784ba55..5e07ceaf4 100644
--- a/Docker/sidecar/Dockerfile
+++ b/Docker/sidecar/Dockerfile
@@ -1,4 +1,4 @@
-FROM nginx:1.15.6-alpine
+FROM nginx:1-alpine
COPY nginx.conf /etc/nginx/nginx.conf
COPY uwsgi.conf.template /etc/nginx/gen3.conf.d/uwsgi.conf.template
diff --git a/Jenkinsfile b/Jenkinsfile
index 54b8c6d9d..908c2d01a 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -43,9 +43,52 @@ metadata:
app: ephemeral-ci-run
netnolimit: "yes"
annotations:
+ karpenter.sh/do-not-evict: true
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ initContainers:
+ - name: wait-for-jenkins-connection
+ image: quay.io/cdis/gen3-ci-worker:master
+ command: ["/bin/sh","-c"]
+ args: ["while [ $(curl -sw '%{http_code}' http://jenkins-master-service:8080/tcpSlaveAgentListener/ -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for jenkins connection ...'; done"]
containers:
+ - name: jnlp
+ command: ["/bin/sh","-c"]
+ args: ["sleep 30; /usr/local/bin/jenkins-agent"]
+ resources:
+ requests:
+ cpu: 500m
+ memory: 500Mi
+ ephemeral-storage: 500Mi
+ - name: selenium
+ image: selenium/standalone-chrome:112.0
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 4444
+ readinessProbe:
+ httpGet:
+ path: /status
+ port: 4444
+ timeoutSeconds: 60
+ resources:
+ requests:
+ cpu: 500m
+ memory: 500Mi
+ ephemeral-storage: 500Mi
- name: shell
image: quay.io/cdis/gen3-ci-worker:master
imagePullPolicy: Always
@@ -53,6 +96,11 @@ spec:
- sleep
args:
- infinity
+ resources:
+ requests:
+ cpu: 0.2
+ memory: 400Mi
+ ephemeral-storage: 1Gi
env:
- name: AWS_DEFAULT_REGION
value: us-east-1
@@ -86,8 +134,8 @@ spec:
readOnly: true
mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
subPath: "ca.pem"
- - name: dockersock
- mountPath: "/var/run/docker.sock"
+ - name: containerdsock
+ mountPath: "/var/run/containerd/containerd.sock"
serviceAccount: jenkins-service
serviceAccountName: jenkins-service
volumes:
@@ -97,9 +145,9 @@ spec:
- name: ca-volume
secret:
secretName: "service-ca"
- - name: dockersock
+ - name: containerdsock
hostPath:
- path: /var/run/docker.sock
+ path: /var/run/containerd/containerd.sock
'''
defaultContainer 'shell'
}
@@ -245,8 +293,8 @@ spec:
script {
try {
if(!skipUnitTests) {
- sh '/usr/bin/pip3 install boto3 --upgrade --user'
- sh '/usr/bin/pip3 install kubernetes --upgrade --user'
+ sh '/usr/local/bin/pip3 install boto3 --upgrade --user'
+ sh '/usr/local/bin/pip3 install kubernetes --upgrade --user'
sh 'python3 -m pytest cloud-automation/apis_configs/'
sh 'python3 -m pytest cloud-automation/gen3/lib/dcf/'
sh 'cd cloud-automation/tf_files/aws/modules/common-logging && python3 -m pytest testLambda.py'
@@ -386,6 +434,9 @@ spec:
}
stage('WaitForQuayBuild') {
+ options {
+ timeout(time: 30, unit: 'MINUTES') // timeout on this stage
+ }
steps {
script {
try {
@@ -449,6 +500,9 @@ spec:
}
stage('K8sReset') {
+ options {
+ timeout(time: 1, unit: 'HOURS') // timeout on this stage
+ }
steps {
script {
try {
@@ -531,6 +585,9 @@ spec:
}
stage('RunTests') {
+ options {
+ timeout(time: 3, unit: 'HOURS') // timeout on this stage
+ }
steps {
script {
try {
diff --git a/ansible/hosts.yaml b/ansible/hosts.yaml
index ea0378bed..ea23c72c9 100644
--- a/ansible/hosts.yaml
+++ b/ansible/hosts.yaml
@@ -279,4 +279,4 @@ all:
ansible_user: ubuntu
emalinowskiv1:
ansible_host: cdistest.csoc
- ansible_user: emalinowskiv1
+ ansible_user: emalinowskiv1
\ No newline at end of file
diff --git a/ansible/oldPeKeys/testremove b/ansible/oldPeKeys/testremove
new file mode 100644
index 000000000..a8f9bdca6
--- /dev/null
+++ b/ansible/oldPeKeys/testremove
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAC7GaYGfV3VaHX+RlzSvSHc6f+Nmu6Ikoe+fgc5m8avrWIJEDfYd+z1bGCcPiVpEiSrzHYjuvxAkHMxPlteiGxWqWnUBhf9vCNKCxj1m7BW1+wQ333iaio8JzA20M363EbSxGPe0LJplN6/aReLC5OUj4if/dnOE0Usrc4n5WTaSR8Ip6jwitDoFNLH5tZZCYMWi08flvKO7y8zvXJ7D3MrWUGroKsBRrkrFp3dDkPKCtrU6tGaRO5GkWbw408oWsFIt6fr7WBzx1HvB2u4z4Y+wZxRIl45wU8xPZR+u8e/VsL/KzKQLAnqcBqToRN83ugxyJfnbuFazjKZKEk9iSJfshpz00qFnXomBXpv5fLxTByo8EMnhNM23jyE3Fw3co8B3MJK/CF71ztosQGPxZrYZYLPY5fYXAmjeLPVahr/jKwyYJukV3LzHF2pmMrfymefmaX7s0NdY/4Md99DIRXcehQaLCa6KHA8KqzbB6KjCvWGykUHwJoCIrK/hqIJ62heBneIP3wXBHche3EA32P1QnnI3QEptOvPDe7gFqRYrfant1NRNrOxU9TtIlujgME80Bx9EVvhjf3Yim0zNyk4I4yTar7CqWxyIP/REsze24q0yyW3e2llPKrX8gqWwnl/ANYPeUgz8Y9CHAQkZm+SWotyqVeLNTUSmW90RUXwJ ubuntu@csoc_admin
\ No newline at end of file
diff --git a/ansible/peKeys/aaugustine b/ansible/peKeys/aaugustine
new file mode 100644
index 000000000..3b286b641
--- /dev/null
+++ b/ansible/peKeys/aaugustine
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu
\ No newline at end of file
diff --git a/ansible/peKeys/ahilt b/ansible/peKeys/ahilt
new file mode 100644
index 000000000..d415bce5a
--- /dev/null
+++ b/ansible/peKeys/ahilt
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan
\ No newline at end of file
diff --git a/ansible/peKeys/ecastle b/ansible/peKeys/ecastle
new file mode 100644
index 000000000..7fc0b666c
--- /dev/null
+++ b/ansible/peKeys/ecastle
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP
\ No newline at end of file
diff --git a/ansible/keys/emalinowski b/ansible/peKeys/emalinowski
similarity index 100%
rename from ansible/keys/emalinowski
rename to ansible/peKeys/emalinowski
diff --git a/ansible/keys/qureshi b/ansible/peKeys/qureshi
similarity index 100%
rename from ansible/keys/qureshi
rename to ansible/peKeys/qureshi
diff --git a/ansible/playbooks/peAddKeys.yaml b/ansible/playbooks/peAddKeys.yaml
new file mode 100644
index 000000000..edf7e4920
--- /dev/null
+++ b/ansible/playbooks/peAddKeys.yaml
@@ -0,0 +1,18 @@
+#
+# Playbook to handle keys in a particular host
+#
+# @variables:
+# _hosts => hosts in which you want the playbook to be applied
+# it must exists in hosts.yaml
+
+#This playbook will loop around each public key file in the keys/ directory and will add them to the specified vms
+
+
+---
+- hosts: "{{ _hosts }}"
+ tasks:
+ - authorized_key:
+ user: "{{ ansible_user_id }}"
+ state: present
+ key: "{{ lookup('file', item) }}"
+ with_fileglob: '../peKeys/*'
diff --git a/ansible/playbooks/peRemoveKeys.yaml b/ansible/playbooks/peRemoveKeys.yaml
new file mode 100644
index 000000000..8f6df6706
--- /dev/null
+++ b/ansible/playbooks/peRemoveKeys.yaml
@@ -0,0 +1,18 @@
+#
+# Playbook to handle keys in a particular host
+#
+# @variables:
+# _hosts => hosts in which you want the playbook to be applied
+# it must exists in hosts.yaml
+
+#This playbook will loop around each public key file in the removed_keys/ directory and remove them from the specified vms
+
+
+---
+- hosts: "{{ _hosts }}"
+ tasks:
+ - authorized_key:
+ user: "{{ ansible_user_id }}"
+ state: absent
+ key: "{{ lookup('file', item) }}"
+ with_fileglob: '../oldPeKeys/*'
\ No newline at end of file
diff --git a/ansible/updated-hosts.yaml b/ansible/updated-hosts.yaml
new file mode 100644
index 000000000..1fa913b0b
--- /dev/null
+++ b/ansible/updated-hosts.yaml
@@ -0,0 +1,282 @@
+---
+all:
+ children:
+ adminvms:
+ hosts:
+ account_admin:
+ ansible_host: account.csoc
+ anvil_admin:
+ ansible_host: anvil.csoc
+ vadc_admin:
+ ansible_host: vadc.csoc
+ dcfqa_admin:
+ ansible_host: dcfqa.csoc
+ dcfprod_admin:
+ ansible_host: dcfprod.csoc
+ genomel_admin:
+ ansible_host: genomel.csoc
+ ibdgc_admin:
+ ansible_host: ibdgc.csoc
+ occ_admin:
+ ansible_host: occ.csoc
+ occ-edc_admin:
+ ansible_host: occ-edc.csoc
+ niaiddh_admin:
+ ansible_host: niaiddh.csoc
+ gtex_admin:
+ ansible_host: gtex.csoc
+ kf_admin:
+ ansible_host: kf.csoc
+ gmkfqa_admin:
+ ansible_host: gmkfqa.csoc
+ ncicrdc_admin:
+ ansible_host: ncicrdc.csoc
+ cdistest_admin:
+ ansible_host: cdistest.csoc
+ jcoin_admin:
+ ansible_host: jcoin.csoc
+ oadc_admin:
+ ansible_host: oadc.csoc
+ vhdc_admin:
+ ansible_host: vhdc.csoc
+ covid19_admin:
+ ansible_host: covid19.csoc
+ midrc_admin:
+ ansible_host: midrc.csoc
+ heal_admin:
+ ansible_host: heal.csoc
+ brh_admin:
+ ansible_host: brh.csoc
+ vars:
+ ansible_user: ubuntu
+ ansible_python_interpreter: /usr/bin/python3
+
+ other_admins:
+ hosts:
+ canine_admin:
+ ansible_host: canine.csoc
+ # unreachable
+ # ncigdc_admin:
+ # ansible_host: 10.128.2.112
+ dcfbuckets_admin:
+ ansible_host: 10.128.2.181
+ # unreachable
+ # pdcgen3_admin:
+ # ansible_host: 10.128.2.241
+ vars:
+ ansible_user: ubuntu
+
+ commons:
+ hosts:
+ accountprod_commons:
+ ansible_user: accountprod
+ ansible_host: account.csoc
+ anvilprod_commons:
+ ansible_user: anvilprod
+ ansible_host: anvil.csoc
+ vadcprod_commons:
+ ansible_user: vadcprod
+ ansible_host: vadc.csoc
+ dcfprod_commons:
+ ansible_user: dcfprod
+ ansible_host: dcfprod.csoc
+ qa-biologin_commons:
+ ansible_user: qa-biologin
+ ansible_host: genomel.csoc
+ genomelprod_commons:
+ ansible_user: genomelprod
+ ansible_host: genomel.csoc
+ ibdgc_commons:
+ ansible_user: ibdgc
+ ansible_host: ibdgc.csoc
+ bloodv2_commons:
+ ansible_user: bloodv2
+ ansible_host: occ.csoc
+ edcprodv2_commons:
+ ansible_user: edcprodv2
+ ansible_host: occ-edc.csoc
+ niaidprod_commons:
+ ansible_user: niaidprod
+ ansible_host: niaiddh.csoc
+ dataguis_commons:
+ ansible_user: dataguids
+ ansible_host: gtex.csoc
+ prodv1_commons:
+ ansible_user: prodv1
+ ansible_host: kf.csoc
+ loginbionimbus_commons:
+ ansible_user: loginbionimbus
+ ansible_host: genomel.csoc
+ canineprod_commons:
+ ansible_user: canineprod
+ ansible_host: canine.csoc
+ icgc_commons:
+ ansible_user: icgc
+ ansible_host: genomel.csoc
+ niaiddata_commons:
+ ansible_user: niaiddata
+ ansible_host: niaiddh.csoc
+ jcoinprod_commons:
+ ansible_user: jcoinprod
+ ansible_host: jcoin.csoc
+ fitbirprod_commons:
+ ansible_user: fitbirprod
+ ansible_host: oadc.csoc
+ oadc_commons:
+ ansible_user: oadc
+ ansible_host: oadc.csoc
+ neuro_commons:
+ ansible_user: neuro
+ ansible_host: oadc.csoc
+ vhdcprod_commons:
+ ansible_user: vhdcprod
+ ansible_host: vhdc.csoc
+ covid19prod_commons:
+ ansible_user: covid19prod
+ ansible_host: covid19.csoc
+ bdcatprod_commons:
+ ansible_user: bdcatprod
+ ansible_host: gtex.csoc
+ midrc_commons:
+ ansible_user: midrcprod
+ ansible_host: midrc.csoc
+ heal_commons:
+ ansible_user: healprod
+ ansible_host: heal.csoc
+ brh_commons:
+ ansible_user: brhprod
+ ansible_host: brh.csoc
+ vars:
+ ansible_python_interpreter: /usr/bin/python3
+
+ staging:
+ hosts:
+ stagingdatastage_commons:
+ ansible_user: stagingdatastage
+ ansible_host: gtex.csoc
+ dcfstaging_commons:
+ ansible_user: staging
+ ansible_host: dcfprod.csoc
+ anvilstaging_commons:
+ ansible_user: anvilstaging
+ ansible_host: anvil.csoc
+ midrcstaging_commons:
+ ansible_user: staging-validate
+ ansible_host: midrc.csoc
+ brhstaging_commons:
+ ansible_user: brhstaging
+ ansible_host: brh.csoc
+ vars:
+ ansible_python_interpreter: /usr/bin/python3
+ namespaces:
+ hosts:
+ charlie_commons:
+ ansible_user: charlie
+ ansible_host: niaiddh.csoc
+ tb_commons:
+ ansible_user: tb
+ ansible_host: niaiddh.csoc
+ microbiome_commons:
+ ansible_user: microbiome
+ ansible_host: niaiddh.csoc
+ flu_commons:
+ ansible_user: flu
+ ansible_host: niaiddh.csoc
+ clinicaltrial_commons:
+ ansible_user: clinicaltrial
+ ansible_host: niaiddh.csoc
+ preprod_commons:
+ ansible_user: bdcat-internalstaging
+ ansible_host: gtex.csoc
+ va-testing_commons:
+ ansible_user: va-testing
+ ansible_host: vhdc.csoc
+ validate_commons:
+ ansible_user: validate
+ ansible_host: midrc.csoc
+ healpreprod_commons:
+ ansible_user: healpreprod
+ ansible_host: heal.csoc
+ healworkspaces_commons:
+ ansible_user: healworkspaces
+ ansible_host: heal.csoc
+ vars:
+ ansible_python_interpreter: /usr/bin/python3
+
+ dev:
+ hosts:
+ cdistest_dev:
+ ansible_user: devplanetv1
+ ansible_host: cdistest.csoc
+ cdistest_qav1:
+ ansible_user: qaplanetv1
+ ansible_host: cdistest.csoc
+ cdistest_qav2:
+ ansible_user: qaplanetv2
+ ansible_host: cdistest.csoc
+ cdistest_emalinowskiv1:
+ ansible_user: emalinowskiv1
+ ansible_host: cdistest.csoc
+ vars:
+ ansible_python_interpreter: /usr/bin/python3
+
+ qa:
+ hosts:
+ qa_biologin:
+ ansible_user: qa-biologin
+ ansible_host: genomel.csoc
+ kfqa_qa:
+ ansible_user: kfqa
+ ansible_host: gmkfqa.csoc
+ gmkfqa_qa:
+ ansible_user: skfqa
+ ansible_host: gmkfqa.csoc
+ kfqa2_qa:
+ ansible_user: kfqa2
+ ansible_host: gmkfqa.csoc
+ vars:
+ ansible_python_interpreter: /usr/bin/python3
+
+ demo:
+ hosts:
+ ncicrdc_demo:
+ ansible_user: ncicrdcdemo
+ ansible_host: ncicrdc.csoc
+ brh_demo:
+ ansible_user: brhdemo
+ ansible_host: brh.csoc
+
+ vpn:
+ hosts:
+ vpn_prod:
+ ansible_host: csoc-prod-vpn.planx-pla.net
+ ansible_user: ubuntu
+ vpn_dev:
+ ansible_host: csoc-dev-vpn.planx-pla.net
+ ansible_user: ubuntu
+ revproxy:
+ hosts:
+ es_a:
+ ansible_host: 10.128.7.8
+ es_b:
+ ansible_host: 10.128.7.23
+ vars:
+ ansible_user: ubuntu
+
+ cdistest:
+ hosts:
+ cdistest_fauzi:
+ ansible_host: cdistest.csoc
+ ansible_user: fauziv1
+
+ csoc_squids:
+ hosts:
+ csocsquidnlbcentral1:
+ ansible_host: 10.128.4.101
+ #unreachable
+ # csocsquidnlbcentral2:
+ # ansible_host: 10.128.4.30
+ csocsquidnlbcentral3:
+ ansible_host: 10.128.4.169
+ vars:
+ ansible_user: ubuntu
diff --git a/apis_configs/logo.svg b/apis_configs/logo.svg
index 7f056e548..da71f111e 100644
--- a/apis_configs/logo.svg
+++ b/apis_configs/logo.svg
@@ -1 +1,98 @@
-
\ No newline at end of file
+
+
diff --git a/doc/karpenter.md b/doc/karpenter.md
new file mode 100644
index 000000000..29aa35de1
--- /dev/null
+++ b/doc/karpenter.md
@@ -0,0 +1,59 @@
+# Introduction
+
+Karpenter is a modern cloud-native tool for Kubernetes cluster management and resource allocation. With its efficient and customizable scaling and orchestration capabilities, Karpenter is becoming an increasingly popular alternative to Cluster Autoscaler. In this document, we will discuss the benefits of using Karpenter over Cluster Autoscaler and why it is worth considering a switch.
+
+# Table of contents
+
+- [1. Benefits of Karpenter](#benefits-of-karpenter)
+- [2. Requirements](#requirements)
+- [3. How it Works](#how-it-works)
+- [4. Installation Steps](#installation-steps)
+- [5. Modifying the Provisioners and Awsnodetemplates](#modifying-the-provisioners-and-awsnodetemplates)
+- [6. Potential Issues](#potential-issues)
+
+## Benefits of Karpenter
+
+- Advanced Resource Allocation: Karpenter provides fine-tuned control over resource allocation, allowing for greater optimization of resource utilization. With its advanced features, Karpenter can ensure that nodes are appropriately sized and allocated, reducing the chance of overprovisioning or underutilization.
+- Scalability: Karpenter offers powerful scaling capabilities, allowing administrators to quickly and efficiently adjust the size of their cluster as needed. With its sophisticated scaling algorithms, Karpenter ensures that resources are optimized and that clusters are able to grow and shrink as needed.
+- Customizable: Karpenter allows administrators to customize and configure their cluster as needed. With its flexible and intuitive interface, administrators can easily adjust the size and composition of their cluster to meet the specific needs of their organization.
+- Efficient Management: Karpenter provides efficient and streamlined cluster management, allowing administrators to manage their resources more effectively. With its intuitive and powerful interface, administrators can easily allocate resources and monitor cluster performance, ensuring that their cluster is running smoothly and efficiently.
+
+## Requirements
+
+Karpenter requires access to AWS to be able to provision EC2 instances. It uses an EKS IAM service account with access to most EC2 resources. Once Karpenter is deployed it also requires configuration to decide which node types to spin up, described in the next section. Our base configuration relies on config provisioned using our terraform though, so it may require manual effort to install if not using our terraform. Last, since Karpenter is going to be the new cluster management system, we will need to uninstall the cluster autoscaler.
+
+## How it Works
+
+Karpenter works on the EKS level instead of the cloud level. This means the systems in place to configure which nodes to spin up are shifted from AWS to EKS configuration. Karpenter uses provisioners to replace autoscaling groups and awsnodetemplates to replace launch configs/templates. Once deployed you will need to create at least one provisioner and one awsnodetemplate so that karpenter can decide what nodes to spin up and once pods require new nodes to spin up karpenter will figure out the most efficient instance type to use based on the pod resources and allowed instance types specified within your provisioner/templates.
+
+## Installation Steps
+
+To install Karpenter using gen3 you can simply run the kube-setup-karpenter script. This script does the following to install karpenter.
+
+1. Creates a new karpenter namespace for the karpenter deployment to run in.
+2. Creates an EKS IAM service account with access to EC2 resources within AWS for the Karpenter deployment to use.
+3. Tags the relevent subnets and security groups for the karpenter deployment to autodiscover.
+4. Installs the karpenter helm deployment
+5. Installs the necessary provisioners and aws node templates.
+
+This can also be installed through the manifest by adding a .global.karpenter block to your manifest. If this block equals "arm" then it will also install the arm provisioner, which will provision arm based nodes for the default worker nodes.
+
+## Modifying the Provisioners and Awsnodetemplates
+
+If you ever need to change the behavior of the provisioners on the fly you can run the following command
+
+```bash
+kubectl edit provisioners.karpenter.sh
+```
+
+If you ever need to edit the awsnodetemplate you can do so with
+
+```bash
+kubectl edit awsnodetemplates.karpenter.k8s.aws
+```
+
+Base configuration lives in the [karpenter configration section](https://github.com/uc-cdis/cloud-automation/tree/master/kube/services/karpenter) of cloud-automation so you can edit this configuration for longer term or more widespread changes.
+
+## Potential Issues
+
+Karpenter is a powerful flexible tool, but with that can come some challenges. The first is Karpenter needs to be able to find subnets/security groups for your specific VPC. If there are multiple VPC's in an AWS account and multiple Karpenter deployments, we need to stray from the official Karpenter documentation when tagging subnets/security groups. Karpenter will find subnets/security groups tagged a certain way, so instead of setting the tag to be true for karpenter discovery we should set the value to be the VPC name, and similarly set it to be the VPC name within the karpenter configuration. Also, karpenter requires at least 2 nodes outside of any nodes it manages for it's deployment to run on. This is so that karpenter is always available and can schedule nodes without taking itself out. Because of this, we recommend running a regular EKS worker ASG with 2 min/max/desired for karpenter to run on. If these nodes ever need to be updated you will need to ensure karpenter comes back up after to ensure your cluster scales as intended.
diff --git a/doc/kube-setup-cedar-wrapper.md b/doc/kube-setup-cedar-wrapper.md
new file mode 100644
index 000000000..65ffbfd32
--- /dev/null
+++ b/doc/kube-setup-cedar-wrapper.md
@@ -0,0 +1,5 @@
+# TL;DR
+
+Setup CEDAR wrapper service and deployment
+
+Need to supply a `cedar_api_key.txt` file under `$(gen3_secrets_folder)/g3auto/cedar/`
diff --git a/doc/kube-setup-ingress.md b/doc/kube-setup-ingress.md
new file mode 100644
index 000000000..bd4dff29c
--- /dev/null
+++ b/doc/kube-setup-ingress.md
@@ -0,0 +1,37 @@
+# TL;DR
+
+Setup the aws-load-balancer-controller and an ALB.
+
+This is a replacement for the revproxy-service-elb and WAF
+
+PLEASE NOTE: This script will now also deploy AWS WAF which will be associated with the ALB. This can be deployed by setting/adding the "waf_enabled" flag to true in the manifest-global configmap (set via the global section of the manifest.json).
+
+## Overview
+
+The script deploys the `aws-load-balancer-controller` when run in the `default` namespace.
+
+## Use
+
+### deploy
+
+Deploy the aws-load-balancer-controller from https://docs.aws.amazon.com/eks/latest/userguide/aws-load-balancer-controller.html.
+Only works in the `default` namespace.
+
+If ran from a non-default namespace it will only deploy the k8s ingress resource.
+
+```
+gen3 kube-setup-ingress
+```
+
+### check
+
+Check if the ingress has been deployed by running
+
+```
+helm status aws-load-balancer-controller -n kube-system
+```
+
+Update your DNS records to the ADDRESS field from the output of
+```
+kubectl get ingress revproxy-ingress
+```
diff --git a/doc/kube-setup-kayako-wrapper.md b/doc/kube-setup-kayako-wrapper.md
new file mode 100644
index 000000000..8f0302d44
--- /dev/null
+++ b/doc/kube-setup-kayako-wrapper.md
@@ -0,0 +1,5 @@
+# TL;DR
+
+Setup Kayako wrapper service and deployment
+
+Need to supply a `kayako_api_key.txt` and a `kayako_secret_key.txt` file under `$(gen3_secrets_folder)/g3auto/kayako/`
diff --git a/doc/kube-setup-revproxy.md b/doc/kube-setup-revproxy.md
index 365d0b129..fdf0b0db5 100644
--- a/doc/kube-setup-revproxy.md
+++ b/doc/kube-setup-revproxy.md
@@ -6,5 +6,7 @@ Configure and launch the reverse proxy.
* the reverse proxy [readme](../kube/services/revproxy/README.md) has more details.
* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md).
+* IMPORTANT: The modsecurity WAF and Revproxy ELB service is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap. The Revproxy ELB is now deprecated- we suggest deploying an AWS ALB instead (please see kube-setup-ingress script)
+* Please see https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md as AWS WAF and ALB is recommended.
* [maintenance mode](./maintenance.md)
* the [ip blacklist](../gen3/lib/manifestDefaults/revproxy/) may be configured with a custom `manifests/revproxy/blacklist.conf`
diff --git a/doc/kubecost.md b/doc/kubecost.md
new file mode 100644
index 000000000..a230e6378
--- /dev/null
+++ b/doc/kubecost.md
@@ -0,0 +1,82 @@
+# TL;DR
+
+Setup a kubecost cluster
+
+
+## Use
+
+### `gen3 kube-setup-kubecost master create`
+
+Creates a master kubecost cluster
+
+Requires the following `key value` arguments
+
+* `--slave-account-id` - the account id of the slave kubecost cluster
+* `--kubecost-token` - The token for the kubecost cluster
+
+Optional `key value` arguments
+
+* `--force` - defaults to false, set --force true to force helm upgrade
+* `--disable-prometheus` - defaults to false, set --disable-prometheus true to disbale prometheus and use current setup
+* `--prometheus-namespace` - The namespace of the current prometheus, required if kubecost prometheus is disabled
+* `--prometheus-service` - The service name of the current prometheus, required if kubecost prometheus is disabled
+
+Ex:
+
+``` bash
+gen3 kube-setup-kubecost master create --slave-account-id 1234567890 --kubecost-token abcdefghijklmnop12345 --force true
+```
+
+### `gen3 kube-setup-kubecost slave create`
+
+Creates a slave kubecost cluster
+
+
+Requires the following `key value` arguments
+
+* `--s3-bucket` - the centralized s3 bucket of the master kubecost cluster
+* `--kubecost-token` - The token for the kubecost cluster
+
+Optional `key value` arguments
+
+* `--force` - defaults to false, set --force true to force helm upgrade
+* `--disable-prometheus` - defaults to false, set --disable-prometheus true to disbale prometheus and use current setup
+* `--prometheus-namespace` - The namespace of the current prometheus, required if kubecost prometheus is disabled
+* `--prometheus-service` - The service name of the current prometheus, required if kubecost prometheus is disabled
+
+Ex:
+
+``` bash
+gen3 kube-setup-kubecost slave create --s3-bucket test-kubecost-bucket --kubecost-token abcdefghijklmnop12345 --force true
+```
+
+### `gen3 kube-setup-kubecost standalone create`
+
+Creates a standalone kubecost cluster
+
+Requires the following `key value` arguments
+
+* `--kubecost-token` - The token for the kubecost cluster
+
+Optional `key value` arguments
+
+* `--force` - defaults to false, set --force true to force helm upgrade
+* `--disable-prometheus` - defaults to false, set --disable-prometheus true to disbale prometheus and use current setup
+* `--prometheus-namespace` - The namespace of the current prometheus, required if kubecost prometheus is disabled
+* `--prometheus-service` - The service name of the current prometheus, required if kubecost prometheus is disabled
+
+Ex:
+
+``` bash
+gen3 kube-setup-kubecost standalone create --kubecost-token abcdefghijklmnop12345 --force true
+```
+
+### `gen3 kube-setup-kubecost delete`
+
+Deletes a running kubecost deployment and destroys the associated infra
+
+Ex:
+
+``` bash
+gen3 kube-setup-kubecost delete
+```
diff --git a/doc/s3-to-google-replication.md b/doc/s3-to-google-replication.md
new file mode 100644
index 000000000..82d0374c7
--- /dev/null
+++ b/doc/s3-to-google-replication.md
@@ -0,0 +1,68 @@
+# S3 to Google Cloud Storage Replication Pipeline
+
+This document will guide you through setting up a replication pipeline from AWS S3 to Google Cloud Storage (GCS) using VPC Service Controls and Storage Transfer Service. This solution is compliant with security best practices, ensuring that data transfer between AWS S3 and GCS is secure and efficient.
+
+## Table of Contents
+
+- [Prerequisites](#prerequisites)
+- [Step-by-step Guide](#step-by-step-guide)
+ - [Setup VPC Service Controls](#setup-vpc-service-controls)
+ - [Initiate Storage Transfer Service](#initiate-storage-transfer-service)
+- [Compliance Benefits](#compliance-benefits)
+- [Cost Benefit Analysis](#cost-benefit-analysis)
+
+## Prerequisites
+
+1. **AWS account** with access to the S3 bucket.
+2. **Google Cloud account** with permissions to create buckets in GCS and set up VPC Service Controls and Storage Transfer Service.
+3. Familiarity with AWS IAM for S3 bucket access and Google Cloud IAM for GCS access.
+
+## Step-by-step Guide
+
+### Setup VPC Service Controls
+
+1. **Access the VPC Service Controls** in the Google Cloud Console.
+2. **Create a new VPC Service Control perimeter**.
+ - Name the perimeter and choose the desired region.
+ - Add the necessary GCP services. Ensure to include `storagetransfer.googleapis.com` for Storage Transfer Service.
+3. **Setup VPC Service Control Policy** to allow connections from AWS.
+ - Use the [documentation](https://cloud.google.com/vpc-service-controls/docs/set-up) to help set up.
+
+### Initiate Storage Transfer Service
+
+1. Navigate to **Storage Transfer Service** in the Google Cloud Console.
+2. Click **Create Transfer Job**.
+3. **Select Source**: Choose Amazon S3 bucket and provide the necessary details.
+ - Ensure to have necessary permissions for the S3 bucket in AWS IAM.
+4. **Select Destination**: Choose your GCS bucket.
+5. **Schedule & Advanced Settings**: Set the frequency and conditions for the transfer. Consider setting up notifications for job completion or errors.
+6. **Review & Create**: Confirm the details and initiate the transfer job.
+
+## Compliance Benefits
+
+Setting up a secure replication pipeline from AWS S3 to GCS using VPC Service Controls and Storage Transfer Service offers the following compliance benefits:
+
+1. **Data Security**: The VPC Service Controls provide an additional layer of security by ensuring that the transferred data remains within a defined security perimeter, reducing potential data leak risks.
+2. **Auditability**: Both AWS and GCS offer logging and monitoring tools that can provide audit trails for data transfer. This can help in meeting regulatory compliance requirements.
+3. **Consistent Data Replication**: The Storage Transfer Service ensures that data in GCS is up to date with the source S3 bucket, which is essential for consistent backup and disaster recovery strategies.
+
+## Cost Benefit Analysis
+
+**Benefits**:
+
+1. **Data Redundancy**: Having data stored in multiple cloud providers can be a part of a robust disaster recovery strategy.
+2. **Flexibility**: Replicating data to GCS provides flexibility in multi-cloud strategies, enabling seamless migrations or usage of GCP tools and services.
+3. **Security**: Utilizing VPC Service Controls strengthens the security posture.
+
+**Costs**:
+
+1. **Data Transfer Costs**: Both AWS and Google Cloud might charge for data transfer. It's crucial to analyze the cost, especially for large data transfers.
+2. **Storage Costs**: Storing data redundantly incurs additional storage costs in GCS.
+
+**Analysis**:
+
+To stay in compliance, we require multiple copies of our data in separate datacenters or clouds. After our security audit, we found the important of not keeping data in a single cloud. It may be expensive to transfer data from AWS to GCP and to store it in 2 clouds simultaniously, but if we need to, then this solution could be an easy way to achieve compliance.
+
+---
+
+Please note that while this guide is based on the provided Google Cloud documentation, it's crucial to refer to the original [documentation](https://cloud.google.com/architecture/transferring-data-from-amazon-s3-to-cloud-storage-using-vpc-service-controls-and-storage-transfer-service) for the most accurate and up-to-date information.
diff --git a/doc/update-kubeconfig.md b/doc/update-kubeconfig.md
new file mode 100644
index 000000000..0aec8f7ac
--- /dev/null
+++ b/doc/update-kubeconfig.md
@@ -0,0 +1,24 @@
+# TL;DR
+
+kubectl 1.24.0 introduces a breaking change, so the older kubeconfig doesn't work anymore.
+
+https://github.com/aws/aws-cli/issues/6920
+
+Updates Kubeconfig API version, args, and command to get rid of the following error:
+error: exec plugin: invalid apiVersion "client.authentication.k8s.io/v1alpha1"
+
+This error occurs when the client kubectl version is updated and the kubeconfig remains the same.
+
+This requires AWS cli v2.7.0 or higher.
+
+## Use
+
+### Run
+```
+gen3 update-kubeconfig
+```
+
+
+This command backs up existing kubeconfig file and regenerates a valid kubeconfig file using AWS cli. Also persists the current namespace to the context.
+
+
diff --git a/files/authorized_keys/ops_team b/files/authorized_keys/ops_team
index bdf99df54..1c41caaa0 100644
--- a/files/authorized_keys/ops_team
+++ b/files/authorized_keys/ops_team
@@ -1,4 +1,6 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJvyFWGv620FmGM+PczcQN47xJJQrvXOGtt/n+tW1DP87w2rTPuvsROc4pgB7ztj1EkFC9VkeaJbW/FmWxrw2z9CTHGBoxpBgfDDLsFzi91U2dfWxRCBt639sLBfJxHFo717Xg7L7PdFmFiowgGnqfwUOJf3Rk8OixnhEA5nhdihg5gJwCVOKty8Qx73fuSOAJwKntcsqtFCaIvoj2nOjqUOrs++HG6+Fe8tGLdS67/tvvgW445Ik5JZGMpa9y0hJxmZj1ypsZv/6cZi2ohLEBCngJO6d/zfDzP48Beddv6HtL rarya_id_rsa
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu
\ No newline at end of file
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP
\ No newline at end of file
diff --git a/files/authorized_keys/squid_authorized_keys_admin b/files/authorized_keys/squid_authorized_keys_admin
index 3208bd50e..20c4d966d 100644
--- a/files/authorized_keys/squid_authorized_keys_admin
+++ b/files/authorized_keys/squid_authorized_keys_admin
@@ -4,3 +4,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg8
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdIXKLMs14c8U9exX/sOoIcvOCZ4v2pKsjdM1VBA56GyI98E1R+hxTBecHeWri9MeQcZkrlmjqT3ZzCb87+n0W2LEWquLNfeheAEq61ogi0taxWEpnb4rIAr1U9aS3d0mk5NIIivrwaUHTIvUhH8mn4Pek0GgybZAsjN/MpZ9PZwUtXNmjZoY5gWR0QO4ZWu7ARknFoNcTXwpWyl/Khhal0KKhdB38y3MpJc03IIqhem15e78jRlko04CAZX3zlFAQwbxnrpgrJUMYeY8fZqpV6FiWC40yu+n9KwAZkmtrc45mkxahj8c3QtJ/Z3t33yXEN9PEHV6z104STYi2cPVD rpollard@news-MacBook-Pro.local
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP
\ No newline at end of file
diff --git a/files/authorized_keys/squid_authorized_keys_user b/files/authorized_keys/squid_authorized_keys_user
index 46b43a030..4b35fecd9 100644
--- a/files/authorized_keys/squid_authorized_keys_user
+++ b/files/authorized_keys/squid_authorized_keys_user
@@ -18,4 +18,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhY
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3vyd6a7tsANi149ylPQYS8Gsp/SxJyhdK/j6arv77KbM0EIzzUiclFLnMKcqUQ263FrPyx3a3UP80R77ayCnwcEHrxlJrYfyFUva8vtmI9mu8VE7oXvuR/jcOyXM9NosxyYacL/p6W5X4r8tqo/gJFjmls1YRfu3JPlTgTT0VzGJu+B6rLEsw53c37VVzSaCtu/jBOjyxI1/UaNg1cd+hcfoQxJ9zSDqqE7ZUNOc3zHP+1AGYCQ/CJsNrDl2OkppIdC9He5jgjLhyD7yvyarI+oF05oHknol/K1hXK+yxIkF2Ou5krfjw7TMBvD+JbQVb35vL9acXFF20+lHLRLbobPU/6ZZTup3q7IRm5OWaL2CJtYZbJvicKW0Ep+vTzaiQjK71L6UxcIvnzvbP9Dnatv1GBMMDaQxAa4Lood8NG2ty1yfLN972akGqBlwJASXMRd/ogzxv2KSH9w6HHYoc2WpDhUtNHmjwX1FSLYPW3qx5ICMW6j9gR2u1tG4Ohzp1CmYVElnRHbnBrTkLde65Vqedk2tQy8fcopH59ZASIuR4GbhCb2SiNkr1PHEvfhLMzg/UCSnnhX9vUNhkPjJRy/bdL3pOt/77lpIQUqQBArOiZmfG8OD0q4+3Nr+c9v5bSSvynjirlKk+wb8sKyOoSAXdFeovL/A0BUKUjCtsXQ== dev@test.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQChK/8JjpUeWcF/1Ea2M4mSbLz1tOfpq74xD2USxE54kx7VoN1G7ylV76yqSIeRq1e7PPBEg5ZD1aXUJnlI32RwLJ5kaHnoB82Ta+Fv1B/vVoHCObcALfiHPpwPf1kM2liWEB0EhYcz1OUv3YQriPqjiRoWfnbw60GIyzhpWZhKRq0zlISOaTYdV9kafX+N7M6/gSU0632TgUwwsStYrffEleyrC/Lh+4UaESozWoPFiZLl2eMCKfZNFBB99HTFifImW2yC6Ag1QhCd1i3NpfiYuaSDH7WR3slPRSd8DiUAwGC2DkIuWPp3bhaAv2V4mtLIBAaTZsINIACB2+w7yf9yvCGtdobCmp4AA7ik9rEkRLk/Jff0YBHd6Z4qyIuRht3ZeWXIYSK1zOlPfs4lPUgvbjlPgMVFV2CrvOTnS+YZdW+8AklwRC3HDPD8wv3H/eGxl3K0vHWTBbTb774nVNfRDw81wcezCXFNUn4p2he7fgKcxs/rnMsYUcY8JJNR7Iz+NNIGUCom6HFwCMQdangFMHUW5TxxrlJcwVRaAns1M6g3ilYO+uvN/XsgCpZWYWnv5rBk8qz6dBM7gpc8tSr6Hvr7/vlghF3jpL+mQiW+7vUL+UZrUFNyoacUcQ+NuxKacHtHQKuRDyWofp+CB2b2a744F3mpkxx74HIkiZ72mQ== dev@test.com
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDTX+pQvGrQVXmHGDhBP+632tgbb1j+BQWkrsUkDJGzwFiGs4dgqDs2eC+aDVq2LFz4xj0SgussFAKciB45OgmSZKX5yUE3Oo/lqov0Bb5f85iBHGv/X/JiuIYaq8GJklVyyo1sfKLUK1SOal6bE1WofezyTyDsdrHjIU50quzW7nB1CmL6rekIv/+df/seut4b3De1d2uX5WGGtcvQ5yTSgBW5aabMAJ2V9WlP/6Dw040Kq0MyKV01cIJ1HAjFhP58gbf3Eytz3AqqJVT6u0QroxhesCgKTyGcAyYy3airI/N0FHdC5oABVEJ6dKyy1rYvOchuxYeVMVVWn0vS7mZ+vP7dqaDmgEUU2qmTPBQZV2xBWCdpfyUYYARW2JzlEaySbmA+yoxFBsquunVbIgUGNEUbxefsFdM3k5pS6I1uuEM0ATYH5iNz84nKKCcksGlib0i/pEtra6N/mFF7yjHYBRb/E/VCZig0gKezDJWu/DO0emJA+kdQpqp48U+qFrSWkuiO0dCQYl3VCVo8vedgMGPjr8MbUjU7o8W1+DYyjFM8HYMknRNdVAqAoK+cedw9mAWVGpKFrl61caGTFck0634nAVFUmfGTh9XRaZeFdDnivxnqP837gcsdKnEGYnkrxWap97XeXzK0P0Svy1zBfUQyzU5vrHfHt2H7ILDMw== prodv1-usersync-sftp
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com
\ No newline at end of file
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTpJ2l8nfOvhJ4Y3pjadFU69nfJBRuE0BaHE22LK9qflFWdhGW+T/x8Qy9406DFXCh6KED+q9lC+N4nR92AfgFNaBmkXZkzWLoXcqO1IWRexHwTqAUcrtLjpx5wNdCr3+vv9hWhXtvYg8ewnrZc+WxYde4EUmXbhzPXbg0SkBXTr6bpYhs6inyttfBeJNxbeydrW7cmhFiAdOkm03o3AXdH86PNlWVfVHy8OHHzf4fbvlJlOx7OeB+wOyQUr3DW+IWBLQFJk4uyagn/ECV9OIQpxoJFTQjcSrJ6v/GqlY5PImM6YxL8NlZu46CDIxagaZkum+iJ8dtPYr6tJuLiP5Ny0Gsl1X5DoKlstgyqqPNYTnZVS4GSS5Hyxm6HmodZ78OR5+vAoyWKZ3unXU5Dbkz0Qxq9VtrGo2xd0M+dDi/7YazRpLL0tc39w48Wl7KD3jFzoesZp1JHeEGLdGXlGCw8AM1FT0WDf28ShTRds6uWPGvMtM3XkVDPMLFwroKv1RCErmqLYod4HOMuwlmdRvtDGYb3NYsliOnHPiT9nhu2J6KmT1jj8uFOLyTaJCArtBqIsXscP3R4o0wBlQl3FniMdiK7ESkv8DUaOr1Co+/3wX9n/p/BW5bxuq1R9HpNyKsrALyNJUkquVT+5aPcNKXvmAeHAw/D0TYzy6ZKBpnDw== kyuleburton@Kyules-MacBook-Pro.local
diff --git a/files/authorized_keys/vpn_authorized_keys_admin b/files/authorized_keys/vpn_authorized_keys_admin
index 8583ff198..1c41caaa0 100644
--- a/files/authorized_keys/vpn_authorized_keys_admin
+++ b/files/authorized_keys/vpn_authorized_keys_admin
@@ -2,3 +2,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiVYoa9i91YL17xWF5kXpYh+PPTriZMAwiJWKkEtMJ
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDBFbx4eZLZEOTUc4d9kP8B2fg3HPA8phqJ7FKpykg87w300H8uTsupBPggxoPMPnpCKpG4aYqgKC5aHzv2TwiHyMnDN7CEtBBBDglWJpBFCheU73dDl66z/vny5tRHWs9utQNzEBPLxSqsGgZmmN8TtIxrMKZ9eX4/1d7o+8msikCYrKr170x0zXtSx5UcWj4yK1al5ZcZieZ4KVWk9/nPkD/k7Sa6JM1QxAVZObK/Y9oA6fjEFuRGdyUMxYx3hyR8ErNCM7kMf8Yn78ycNoKB5CDlLsVpPLcQlqALnBAg1XAowLduCCuOo8HlenM7TQqohB0DO9MCDyZPoiy0kieMBLBcaC7xikBXPDoV9lxgvJf1zbEdQVfWllsb1dNsuYNyMfwYRK+PttC/W37oJT64HJVWJ1O3cl63W69V1gDGUnjfayLjvbyo9llkqJetprfLhu2PfSDJ5jBlnKYnEj2+fZQb8pUrgyVOrhZJ3aKJAC3c665avfEFRDO3EV/cStzoAnHVYVpbR/EXyufYTh7Uvkej8l7g/CeQzxTq+0UovNjRA8UEXGaMWaLq1zZycc6Dx/m7HcZuNFdamM3eGWV+ZFPVBZhXHwZ1Ysq2mpBEYoMcKdoHe3EvFu3eKyrIzaqCLT5LQPfaPJaOistXBJNxDqL6vUhAtETmM5UjKGKZaQ== emalinowski@uchicago.edu
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhYZSd4Aq+7gZ0tY0dFUKtXLpJsQVDTflINc7sLDDXNp3icuSMmxOeNgvBfi8WnzBxcATh3uqidPqE0hcnhVQbpsza1zk8jkOB2o8FfBdDTOSbgPESv/1dnGApfkZj96axERUCMzyyUSEmif2moWJaVv2Iv7O+xjQqIZcMXiAo5BCnTCFFKGVOphy65cOsbcE02tEloiZ3lMAPMamZGV7SMQiD3BusncnVctn/E1vDqeozItgDrTdajKqtW0Mt6JFONVFobzxS8AsqFwaHiikOZhKq2LoqgvbXZvNWH2zRELezP jawadq@Jawads-MacBook-Air.local
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC+iK0ZvY25lgwh4nNUTkD0bq2NES3cPEK+f52HEC2GSVI845ZOqX32kfNpDFT9zvspadOA6KwAgKsRphP/iV8k8WLjAYSYQ3sAE/enuW1+Cr0hhmtahA+uxOavUwsvJ93vIOlIlkD26gIUZTZeYUhi6Aa2FjWFTJ0CtxtUYEdBh+sqW3VoyVvOOA+2DnNYt7/pTrh0DwNxHX7+9TfkmRaVLD4xcdwNLx5N3Yyjgci+oGmw8HATYfSBTaGEXSKJflrN6TDqN87D2pJpMkEvYeZIktoU0kX4HwodrNfwhlruJ2PsePzZ28xlaaZz2fI/LGiqnwf1fRY10R5C/9RpcAcpcYaz305uBCUCI7GGbL9u7WC0W0NZsyaaybaKXyt97p/05os2oe/N5un0whv+NL8z5SLZnaelvttrmVKApvsCD/IqZv5b2PlDilY3L638eKmVOcHaLX/N67MeL9FKnipv2QPzaUKhMoEAtSPqdOWnlndt9dmMBlqT0BKmB85mm0k= ajoa@uchicago.edu
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXzpRFVdZMdW8G4hP1or6O26zHvAj+OLxP0G8kGzCiIdmCwuycurU1MhxatPLfEmWuc31FGdhD5xDb2taHEwdTcMTHaqGa/K03NDFm2Ary7FeVEuBuur1jSWHIq+o4wp9vtsAfBLmV89yxZU40BHauac5uhXcHXfQ7OeZBVZhkCdUcN2l21H44b6V3RAqqxaf4dOiapTd8YbMHMJmyeu5rMFbbW9zS8lXule4pNTREXfh3Zt9MYPZnZ2aV/hQV28KRRjWJnMXuPQxSqEKVDsVbKT9Hu0Re9I8cQLEakNQV5G5c0YDuQjzXL8rEiYKm2AEHlpri/IkOiKu0gKeyZDVTJjW1/n0fCYlcjOJ9AB5wlM6CtsdwBC4spN85E2oJrfvmKIMnRdqSQnLe+w/DyyaZJsMgvXjItB5tysOZI2BkM2Z2cQ3XFK91gwxEUVQHlbvWBI7Nl2VEykQ5O8HdcPnKPcspnOByJMFfdvbh6HXlrstPOuNm2dyw+CUIMlQpa0nEEs/fyB+PoeRYMPs6VNA1syOpH70EXslyfDiN+5eH3asUohvbe4fOjF1jyviQEYXZ2mSbL+D/5sw4x9uWpg/oa+DzWX7ACBUt+ZEwF7xMWIO2O48HWokUrshNB1ksfK+tBXf6mL9SDlxzPYfcR2INRQgllidhPZIqVHoD57HUSw== ahilt@aidans-mbp.lan
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkf6aIs6bmOAZS+Q7yFaRzPnZPa3eExrDDKqGuikGoNDMP1VcPoyb0cYTZTG5X6YzFt5Blv95WWuw6WEBdUxIax/Z9V4H83A+KRvuwiRI9zU3FaKEeYb18hcHSclAWyjl+N7b9V2KzxVBJCkmdC3XBLp/geDRIbGusg40lySYzYhs73hTYs0CQWHcLIj1jX00hbIdbKyc/fq8ODIEOo/XojvjBQyPlT/BJ5fK08LO7kIBoeQ62iT8yG+J/2vch+WsMBeOt+agYKRSn9pv10+5SdP/emX4r5PkyTS8H3ysuequMUMv5w0rXAL53uTYpJELisNTl8pv2Y4VQKCh2Aj5989NFjcqBcv7KKTfvI3WVG5SNsOtu1tAmC05Xf3fdsb3BRVu7I0pCna26NOKRSh8eLy/uUfA4fUKOQyXr5yG3a+Vse57WZiPizOamhkjYTdvyBB8ad7vZST1ir1viSZl6ps+f3bhfx//DPKYpYyZIc6uDdGQMwFoMEhpTdKYopqGmny5LoR9J9LLeGDJd3M0bj/yyd+2/6cU+1KwjLO7fgyjSCjVUKEdG0HufwS/NZc1q3QT6OrXAd8lw5A4BoHDt+Mp8uRVz5508h7XIOC718nLuiJqwqh3dS6hkybGoBCIvh1BDWsEWOUi0Ygt+Ast3Qw4/eMqvmTCN32OIVtOBpQ== elisecastle@Elises-MBP
\ No newline at end of file
diff --git a/files/dashboard/maintenance-page/index.html b/files/dashboard/maintenance-page/index.html
index a3e34479b..fac49e64e 100644
--- a/files/dashboard/maintenance-page/index.html
+++ b/files/dashboard/maintenance-page/index.html
@@ -16,7 +16,7 @@
@@ -27,12 +27,12 @@ This site is under maintenance...
Please check back later.
-
+
diff --git a/files/dashboard/usage-reports/package-lock.json b/files/dashboard/usage-reports/package-lock.json
index 4841621b6..24e3de518 100644
--- a/files/dashboard/usage-reports/package-lock.json
+++ b/files/dashboard/usage-reports/package-lock.json
@@ -5,14 +5,14 @@
"requires": true,
"dependencies": {
"jasmine-core": {
- "version": "3.6.0",
- "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.6.0.tgz",
- "integrity": "sha512-8uQYa7zJN8hq9z+g8z1bqCfdC8eoDAeVnM5sfqs7KHv9/ifoJ500m018fpFc7RDaO6SWCLCXwo/wPSNcdYTgcw=="
+ "version": "3.99.1",
+ "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.99.1.tgz",
+ "integrity": "sha512-Hu1dmuoGcZ7AfyynN3LsfruwMbxMALMka+YtZeGoLuDEySVmVAPaonkNoBRIw/ectu8b9tVQCJNgp4a4knp+tg=="
},
"lit-html": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-1.3.0.tgz",
- "integrity": "sha512-0Q1bwmaFH9O14vycPHw8C/IeHMk/uSDldVLIefu/kfbTBGIc44KGH6A8p1bDfxUfHdc8q6Ct7kQklWoHgr4t1Q=="
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-1.4.1.tgz",
+ "integrity": "sha512-B9btcSgPYb1q4oSOb/PrOT6Z/H+r6xuNzfH4lFli/AWhYwdtrgQkQWBbIc6mdnf6E2IL3gDXdkkqNktpU0OZQA=="
}
}
}
diff --git a/files/dashboard/usage-reports/package.json b/files/dashboard/usage-reports/package.json
index 6552248ae..7f66661ce 100644
--- a/files/dashboard/usage-reports/package.json
+++ b/files/dashboard/usage-reports/package.json
@@ -10,7 +10,7 @@
"author": "",
"license": "ISC",
"dependencies": {
- "jasmine-core": "^3.6.0",
- "lit-html": "^1.3.0"
+ "jasmine-core": "^3.99.1",
+ "lit-html": "^1.4.1"
}
}
diff --git a/files/openvpn_management_scripts/create_ovpn.sh b/files/openvpn_management_scripts/create_ovpn.sh
index 4e6ba7bf5..4d351464b 100755
--- a/files/openvpn_management_scripts/create_ovpn.sh
+++ b/files/openvpn_management_scripts/create_ovpn.sh
@@ -29,8 +29,8 @@ set -e
set -u
-USER_CERT_PATH="$KEY_PATH/$1.crt"
-USER_KEY_PATH="$KEY_PATH/$1.key"
+USER_CERT_PATH="$KEY_PATH/issued/$1.crt"
+USER_KEY_PATH="$KEY_PATH/private/$1.key"
#HEADER
diff --git a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh
index 1794a3b69..c7ac6ce3a 100755
--- a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh
+++ b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh
@@ -30,8 +30,8 @@ username=${username// /_}
# now, clean out anything that's not alphanumeric or an underscore
username=${username//[^a-zA-Z0-9_-.]/}
-USER_CERT_PATH="$KEY_PATH/$1.crt"
-USER_KEY_PATH="$KEY_PATH/$1.key"
+USER_CERT_PATH="$KEY_PATH/issued/$1.crt"
+USER_KEY_PATH="$KEY_PATH/private/$1.key"
#make a temp dir
TEMP_NAME="$username-$CLOUD_NAME-seperated"
@@ -47,6 +47,7 @@ cp $USER_KEY_PATH $TEMP_DIR/client.key
#This is because EXTHOST is a defined variable in the template
while read r; do eval echo $r; done < $TEMPLATE_DIR/client_ovpn_seperate.settings >> $TEMP_DIR/${username}-${CLOUD_NAME}.ovpn
+mkdir -p $KEY_DIR/ovpn_files_seperated
tar -C $TEMP_DIR/../ -zcvf $KEY_DIR/ovpn_files_seperated/${username}-${CLOUD_NAME}-seperated.tgz $TEMP_NAME
echo -e "Exiting ${BOLD}$_${CLEAR}"
diff --git a/files/openvpn_management_scripts/create_vpn_user.sh b/files/openvpn_management_scripts/create_vpn_user.sh
index 2f3ef406b..39be17fcb 100755
--- a/files/openvpn_management_scripts/create_vpn_user.sh
+++ b/files/openvpn_management_scripts/create_vpn_user.sh
@@ -49,13 +49,16 @@ export KEY_EMAIL=$email
export KEY_ALTNAMES="DNS:${KEY_CN}"
#This create the key's for the road warrior
-echo -e "running ${YELLOW} build-batch-key"
-build-key-batch $username &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
+echo -e "running ${YELLOW} easyrsa build-client-full"
+(
+ cd $EASYRSA_PATH
+ easyrsa build-client-full $username nopass &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
+)
#&& echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
-echo "Backup certs so we can revoke them if ever needed"
-[ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/
-cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
+# echo "Backup certs so we can revoke them if ever needed"
+# [ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/
+# cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
echo "Create the OVPN file for $username"
$VPN_BIN_ROOT/create_ovpn.sh $KEY_CN $KEY_EMAIL > $KEY_DIR/ovpn_files/${username}-${CLOUD_NAME}.ovpn 2> /dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR
diff --git a/files/openvpn_management_scripts/install_ovpn.sh b/files/openvpn_management_scripts/install_ovpn.sh
index 795ac17f2..4250d2ca2 100644
--- a/files/openvpn_management_scripts/install_ovpn.sh
+++ b/files/openvpn_management_scripts/install_ovpn.sh
@@ -12,13 +12,13 @@ VARS_PATH="$EASYRSA_PATH/vars"
#EASY-RSA Vars
- KEY_SIZE=4096
- COUNTRY="US"
- STATE="IL"
- CITY="Chicago"
- ORG="CDIS"
- EMAIL='support\@datacommons.io'
- KEY_EXPIRE=365
+KEY_SIZE=4096
+COUNTRY="US"
+STATE="IL"
+CITY="Chicago"
+ORG="CDIS"
+EMAIL='support\@datacommons.io'
+KEY_EXPIRE=365
#OpenVPN
diff --git a/files/openvpn_management_scripts/reset_totp_token.sh b/files/openvpn_management_scripts/reset_totp_token.sh
index b844af8f2..e937876a2 100755
--- a/files/openvpn_management_scripts/reset_totp_token.sh
+++ b/files/openvpn_management_scripts/reset_totp_token.sh
@@ -40,11 +40,15 @@ update_password_file() {
}
generate_qr_code() {
- uuid=$(uuidgen)
- qrcode_out=/var/www/qrcode/${uuid}.svg
+ mkdir -p /etc/openvpn/pki/qrcodes
+ qrcode_out=/etc/openvpn/pki/qrcodes/${vpn_username}.png
string=$( python -c "import pyotp; print( pyotp.totp.TOTP('$totp_secret').provisioning_uri('$vpn_username', issuer_name='$CLOUD_NAME') )" )
- $( python -c "import pyqrcode; pyqrcode.create('$string').svg('${qrcode_out}', scale=8)" )
- vpn_creds_url="https://${FQDN}/$uuid.svg"
+ $( python -c "import qrcode; qrcode.make('$string').save('${qrcode_out}')" )
+ # vpn_creds_url="https://${FQDN}/$uuid.svg"
+ s3Path="s3://${S3BUCKET}/qrcodes/${vpn_username}.png"
+ aws s3 cp ${qrcode_out} ${s3Path}
+ signedUrl="$(aws s3 presign "$s3Path" --expires-in "$((60*60*48))")"
+ vpn_creds_url=${signedUrl}
}
print_info() {
diff --git a/files/openvpn_management_scripts/revoke_user.sh b/files/openvpn_management_scripts/revoke_user.sh
index 0ffe5c364..89d102f38 100755
--- a/files/openvpn_management_scripts/revoke_user.sh
+++ b/files/openvpn_management_scripts/revoke_user.sh
@@ -25,18 +25,15 @@ set -e
username=${1}
-#Source the settings for EASY RSA
-source $EASYRSA_PATH/vars
#Override exports
export KEY_CN=$username
-set +e
-#revoke-full $username || echo -e "${RED}${BOLD}${BLINK}FAILED TO REVOKE ${username}${CLEAR}"
-revoke-full $username
-#Apparently it doesn't exist like I expected, and says failed even when it succeeded.
-
-set -e
+(
+ cd $EASYRSA_PATH
+ ./easyrsa revoke $username
+ ./easyrsa gen-crl
+)
sed -i "/${username},/d" $USER_PW_FILE || echo -e "${RED}${BOLD}${BLINK}Failed to remove $username from file ${USER_PW_FILE}${CLEAR}"
/etc/openvpn/bin/push_to_s3.sh
diff --git a/files/openvpn_management_scripts/send_email.sh b/files/openvpn_management_scripts/send_email.sh
index 38ec6651a..0686af206 100755
--- a/files/openvpn_management_scripts/send_email.sh
+++ b/files/openvpn_management_scripts/send_email.sh
@@ -14,7 +14,7 @@ RED="\033[31m"
echo -e "Entering ${BOLD}$_${CLEAR}"
-S3BUCKET=WHICHVPN
+export S3BUCKET=WHICHVPN
if [ "${1}" == "" ]
then
diff --git a/files/openvpn_management_scripts/templates/network_tweaks.sh.template b/files/openvpn_management_scripts/templates/network_tweaks.sh.template
index a137a8c6f..1caa8c36a 100644
--- a/files/openvpn_management_scripts/templates/network_tweaks.sh.template
+++ b/files/openvpn_management_scripts/templates/network_tweaks.sh.template
@@ -14,3 +14,5 @@ iptables -I FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# Masquerade
iptables -t nat -A POSTROUTING -s #VPN_SUBNET# -d #VM_SUBNET# -o $vpnserver_int -j MASQUERADE
echo 1 > /proc/sys/net/ipv4/ip_forward
+
+service iptables save
diff --git a/files/openvpn_management_scripts/templates/openvpn.conf.template b/files/openvpn_management_scripts/templates/openvpn.conf.template
index d539015fe..7e692113e 100644
--- a/files/openvpn_management_scripts/templates/openvpn.conf.template
+++ b/files/openvpn_management_scripts/templates/openvpn.conf.template
@@ -10,16 +10,16 @@ persist-key
persist-tun
#certificates
-ca easy-rsa/keys/ca.crt
-cert easy-rsa/keys/#FQDN#.crt
-key easy-rsa/keys/#FQDN#.key # This file should be kept secret
-dh easy-rsa/keys/dh4096.pem
-tls-auth easy-rsa/keys/ta.key 0 # This file is secret
-crl-verify easy-rsa/keys/crl.pem # Revocation files
+ca /etc/openvpn/easy-rsa/pki/ca.crt
+cert /etc/openvpn/easy-rsa/pki/issued/#FQDN#.crt
+key /etc/openvpn/easy-rsa/pki/private/#FQDN#.key # This file should be kept secret
+dh /etc/openvpn/easy-rsa/pki/dh.pem
+tls-auth /etc/openvpn/easy-rsa/pki/ta.key 0 # This file is secret
+crl-verify /etc/openvpn/easy-rsa/pki/crl.pem # Revocation files
#Password script
-auth-user-pass-verify bin/auth-user-pass-verify.sh via-env
-script-security 3 execve
+auth-user-pass-verify /etc/openvpn/bin/auth-user-pass-verify.sh via-env
+script-security 3 # execve
#Cipher suite
cipher AES-256-CBC
diff --git a/files/openvpn_management_scripts/templates/settings.sh.template b/files/openvpn_management_scripts/templates/settings.sh.template
index 2d5f46ef6..c58e8b98c 100644
--- a/files/openvpn_management_scripts/templates/settings.sh.template
+++ b/files/openvpn_management_scripts/templates/settings.sh.template
@@ -1,6 +1,7 @@
export VPN_SETTINGS_LOADED="1"
export CLOUD_NAME='#CLOUD_NAME#'
export FQDN="#FQDN#"
+export EXTHOST='#CLOUD_NAME#.planx-pla.net'
## EXTHOST is set in the easy-rsa/vars env settings. I think these values have to maych so removing from here
#sendemail vars
@@ -28,7 +29,7 @@ export OPENVPN_MY_BIN="/etc/openvpn/bin"
#CDIS OpenVPN scripts contants
export TEMPLATE_DIR="/etc/openvpn/bin/templates"
-export KEY_PATH="/etc/openvpn/easy-rsa/keys/"
+export KEY_PATH="/etc/openvpn/easy-rsa/pki/"
export CA_PATH="$KEY_PATH/ca.crt"
export TA_KEY_PATH="$KEY_PATH/ta.key"
export ARCHIVE_CERT_DIR="$KEY_DIR/user_certs/"
@@ -37,6 +38,6 @@ export USER_PW_FILE="/etc/openvpn/user_passwd.csv"
export VPN_BIN_ROOT="/etc/openvpn/bin"
export VPN_USER_CSV="/etc/openvpn/user_passwd.csv"
export VPN_FILE_ATTACHMENTS="-a$VPN_BIN_ROOT/OpenVPN_for_PLANX_Installation_Guide.pdf"
-
+export KEY_DIR="$EASYRSA_PATH/pki"
export PATH=$PATH:$EASYRSA_PATH:$OPENVPN_MY_BIN
source /etc/openvpn/bin/.venv/bin/activate
diff --git a/files/openvpn_management_scripts/templates/vars.template b/files/openvpn_management_scripts/templates/vars.template
index 0afa0c554..311f05605 100644
--- a/files/openvpn_management_scripts/templates/vars.template
+++ b/files/openvpn_management_scripts/templates/vars.template
@@ -1,81 +1,25 @@
-# easy-rsa parameter settings
-export EXTHOST="#EXTHOST#"
+# EasyRSA 3 vars file
-# NOTE: If you installed from an RPM,
-# don't edit this file in place in
-# /usr/share/openvpn/easy-rsa --
-# instead, you should copy the whole
-# easy-rsa directory to another location
-# (such as /etc/openvpn) so that your
-# edits will not be wiped out by a future
-# OpenVPN package upgrade.
+# This is a user-customized vars file for EasyRSA 3.
+# Adjust these values to suit your needs.
-# This variable should point to
-# the top level of the easy-rsa
-# tree.
-export EASY_RSA="#EASY_RSA_DIR#"
+# Key Size - Increase to 2048 if you are paranoid. This affects performance.
+set_var EASYRSA_KEY_SIZE #KEY_SIZE#
-#
-# This variable should point to
-# the requested executables
-#
-export OPENSSL="openssl"
-export PKCS11TOOL="pkcs11-tool"
-export GREP="grep"
+# CA and Certificate Expiry - Set these to your desired expiry in days
+set_var EASYRSA_CA_EXPIRE 3650
+set_var EASYRSA_CERT_EXPIRE #KEY_EXPIRE#
+# Fields for the request Distinguished Name (DN)
+# Adjust these to match your organization's information
+set_var EASYRSA_REQ_COUNTRY "#COUNTRY#"
+set_var EASYRSA_REQ_PROVINCE "#STATE#"
+set_var EASYRSA_REQ_CITY "#CITY#"
+set_var EASYRSA_REQ_ORG "#ORG#"
+set_var EASYRSA_REQ_EMAIL "#EMAIL#"
+set_var EASYRSA_REQ_OU "#OU#"
-# This variable should point to
-# the openssl.cnf file included
-# with easy-rsa.
-export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
-# Edit this variable to point to
-# your soon-to-be-created key
-# directory.
-#
-# WARNING: clean-all will do
-# a rm -rf on this directory
-# so make sure you define
-# it correctly!
-export KEY_DIR="$EASY_RSA/keys"
+set_var EASYRSA_BATCH "1"
-# Issue rm -rf warning
-echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR
-
-# PKCS11 fixes
-export PKCS11_MODULE_PATH="dummy"
-export PKCS11_PIN="dummy"
-
-# Increase this to 2048 if you
-# are paranoid. This will slow
-# down TLS negotiation performance
-# as well as the one-time DH parms
-# generation process.
-export KEY_SIZE=#KEY_SIZE#
-
-# In how many days should the root CA key expire?
-export CA_EXPIRE=3650
-
-# In how many days should certificates expire?
-export KEY_EXPIRE=#KEY_EXPIRE#
-
-# These are the default values for fields
-# which will be placed in the certificate.
-# Don't leave any of these fields blank.
-export KEY_COUNTRY="#COUNTRY#"
-export KEY_PROVINCE="#STATE#"
-export KEY_CITY="#CITY#"
-export KEY_ORG="#ORG#"
-export KEY_EMAIL="#EMAIL#"
-export KEY_OU="#OU#"
-
-# X509 Subject Field
-export KEY_NAME="#KEY_NAME#"
-
-# PKCS11 Smart Card
-# export PKCS11_MODULE_PATH="/usr/lib/changeme.so"
-# export PKCS11_PIN=1234
-
-# If you'd like to sign all keys with the same Common Name, uncomment the KEY_CN export below
-# You will also need to make sure your OpenVPN server config has the duplicate-cn option set
-# export KEY_CN="CommonName"
+# Note: Do not leave any of the fields blank as it may cause the script to fail.
diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh
index 3f1d951d2..362cfbfd5 100644
--- a/files/scripts/ci-env-pool-reset.sh
+++ b/files/scripts/ci-env-pool-reset.sh
@@ -27,21 +27,19 @@ fi
source "${GEN3_HOME}/gen3/gen3setup.sh"
cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < |
+
+and then allows each AWS account to acccess the appropriate ECR repositories. The users' ECR repositories are based on their username as stored in the table. For example, `user1@username.com`'s ECR repository is assumed to be `nextflow-approved/user1-40username-2ecom`.
+
+### Access needed
+
+- "EcrRepoPolicyUpdateRole" role in the account (Acct1) that contains the ECR repositories:
+
+**Note:** `kube-setup-ecr-access-cronjob.sh` assumes this role already exists.
+
+Permissions:
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "UpdateEcrRepoPolicy",
+ "Effect": "Allow",
+ "Action": "ecr:SetRepositoryPolicy",
+ "Resource": "arn:aws:ecr:us-east-1::repository/nextflow-approved/*"
+ }
+ ]
+}
+```
+
+Trust policy (allows Acct2):
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowAssumingRole",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam:::root"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
+```
+
+- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-job.sh`):
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "ReadDynamoDB",
+ "Effect": "Allow",
+ "Action": [
+ "dynamodb:Scan"
+ ],
+ "Resource": "arn:aws:dynamodb:::table/"
+ },
+ {
+ "Sid": "AssumeEcrRole",
+ "Effect": "Allow",
+ "Action": [
+ "sts:AssumeRole"
+ ],
+ "Resource": "arn:aws:iam:::role/"
+ }
+ ]
+}
+```
diff --git a/files/scripts/ecr-access-job.py b/files/scripts/ecr-access-job.py
new file mode 100644
index 000000000..828d94c96
--- /dev/null
+++ b/files/scripts/ecr-access-job.py
@@ -0,0 +1,177 @@
+"""
+See documentation at https://github.com/uc-cdis/cloud-automation/blob/master/files/scripts/ecr-access-job.md
+"""
+
+from decimal import Decimal
+import json
+import os
+from typing import List
+import uuid
+
+import boto3
+from boto3.dynamodb.conditions import Attr
+
+
+REGION = "us-east-1"
+
+# for local testing. in production, use a service account instead of a key.
+MAIN_ACCOUNT_CREDS = {"key_id": os.environ.get("KEY_ID"), "key_secret": os.environ.get("KEY_SECRET")}
+
+
+def escapism(string: str) -> str:
+ """
+ This is a direct translation of Hatchery's `escapism` golang function to python.
+ We need to escape the username in the same way it's escaped by Hatchery's `escapism` function because
+ special chars cannot be used in an ECR repo name, and so that the ECR repo generated here matches the
+ name expected by Hatchery.
+ """
+ safeBytes = "abcdefghijklmnopqrstuvwxyz0123456789"
+ escaped = ""
+ for v in string:
+ if v not in safeBytes:
+ hexCode = "{0:02x}".format(ord(v))
+ escaped += "-" + hexCode
+ else:
+ escaped += v
+ return escaped
+
+
+def get_configs() -> (str, str):
+ table_name = os.environ.get("PAY_MODELS_DYNAMODB_TABLE")
+ if not table_name:
+ raise Exception("Missing 'PAY_MODELS_DYNAMODB_TABLE' environment variable")
+
+ ecr_role_arn = os.environ.get("ECR_ACCESS_JOB_ARN")
+ if not ecr_role_arn:
+ raise Exception("Missing 'ECR_ACCESS_JOB_ARN' environment variable")
+
+ return table_name, ecr_role_arn
+
+
+def query_usernames_and_account_ids(table_name: str) -> List[dict]:
+ """
+ Returns:
+ List[dict]: [ { "user_id": "user1@username.com", "account_id": "123456" } ]
+ """
+ if MAIN_ACCOUNT_CREDS["key_id"]:
+ session = boto3.Session(
+ aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"],
+ aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"],
+ )
+ else:
+ session = boto3.Session()
+ dynamodb = session.resource("dynamodb", region_name=REGION)
+ table = dynamodb.Table(table_name)
+
+ # get usernames and AWS account IDs from DynamoDB
+ queried_keys = ["user_id", "account_id"]
+ filter_expr = Attr("workspace_type").eq("Direct Pay")
+ proj = ", ".join("#" + key for key in queried_keys)
+ expr = {"#" + key: key for key in queried_keys}
+ response = table.scan(
+ FilterExpression=filter_expr,
+ ProjectionExpression=proj,
+ ExpressionAttributeNames=expr,
+ )
+ assert response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200, response
+ items = response["Items"]
+ # if the response is paginated, get the rest of the items
+ while response["Count"] > 0:
+ if "LastEvaluatedKey" not in response:
+ break
+ response = table.scan(
+ FilterExpression=filter_expr,
+ ProjectionExpression=proj,
+ ExpressionAttributeNames=expr,
+ ExclusiveStartKey=response["LastEvaluatedKey"],
+ )
+ assert (
+ response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200
+ ), response
+ items.extend(response["Items"])
+
+ return items
+
+
+def update_access_in_ecr(repo_to_account_ids: List[dict], ecr_role_arn: str) -> None:
+ # get access to ECR in the account that contains the ECR repos
+ if MAIN_ACCOUNT_CREDS["key_id"]:
+ sts = boto3.client(
+ "sts",
+ aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"],
+ aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"],
+ )
+ else:
+ sts = boto3.client("sts")
+ assumed_role = sts.assume_role(
+ RoleArn=ecr_role_arn,
+ DurationSeconds=900, # minimum time for aws assume role as per boto docs
+ RoleSessionName=f"ecr-access-assume-role-{str(uuid.uuid4())[:8]}",
+ )
+ assert "Credentials" in assumed_role, "Unable to assume role"
+ ecr = boto3.client(
+ "ecr",
+ aws_access_key_id=assumed_role["Credentials"]["AccessKeyId"],
+ aws_secret_access_key=assumed_role["Credentials"]["SecretAccessKey"],
+ aws_session_token=assumed_role["Credentials"]["SessionToken"],
+ )
+
+ # for each ECR repo, whitelist the account IDs so users can access the repo
+ for repo, account_ids in repo_to_account_ids.items():
+ print(f"Allowing AWS accounts {account_ids} to use ECR repository '{repo}'")
+ policy = {
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "AllowCrossAccountPull",
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": [
+ f"arn:aws:iam::{account_id}:root"
+ for account_id in account_ids
+ ]
+ },
+ "Action": [
+ "ecr:BatchCheckLayerAvailability",
+ "ecr:BatchGetImage",
+ "ecr:GetAuthorizationToken",
+ "ecr:GetDownloadUrlForLayer",
+ ],
+ }
+ ],
+ }
+ # Note that this is overwriting the repo policy, not appending to it. This means we can't have 2 dynamodb
+ # tables pointing at the same set of ECR repos: the repos would only allow the accounts in the table for
+ # which the script was run most recently. eg QA and Staging can't use the same ECR repos.
+ # Appending is not possible since this code will eventually rely on Arborist for authorization information
+ # and we'll need to overwrite in order to remove expired access.
+ try:
+ ecr.set_repository_policy(
+ repositoryName=repo,
+ policyText=json.dumps(policy),
+ )
+ except Exception as e:
+ print(f" Unable to update '{repo}'; skipping it: {e}")
+
+
+def main() -> None:
+ table_name, ecr_role_arn = get_configs()
+ items = query_usernames_and_account_ids(table_name)
+
+ # construct mapping: { ECR repo url: [ AWS account IDs with access ] }
+ ecr_repo_prefix = "nextflow-approved"
+ repo_to_account_ids = {
+ f"{ecr_repo_prefix}/{escapism(e['user_id'])}": [e["account_id"]]
+ for e in items
+ if "account_id" in e
+ }
+ print(
+ "Mapping of ECR repository to allowed AWS accounts:\n",
+ json.dumps(repo_to_account_ids, indent=2),
+ )
+
+ update_access_in_ecr(repo_to_account_ids, ecr_role_arn)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py
new file mode 100644
index 000000000..45098400f
--- /dev/null
+++ b/files/scripts/healdata/heal-cedar-data-ingest.py
@@ -0,0 +1,401 @@
+import argparse
+import copy
+import json
+import sys
+import requests
+import pydash
+from uuid import UUID
+
+# Defines how a field in metadata is going to be mapped into a key in filters
+FILTER_FIELD_MAPPINGS = {
+ "study_metadata.study_type.study_stage": "Study Type",
+ "study_metadata.data.data_type": "Data Type",
+ "study_metadata.study_type.study_subject_type": "Subject Type",
+ "study_metadata.human_subject_applicability.gender_applicability": "Gender",
+ "study_metadata.human_subject_applicability.age_applicability": "Age",
+ "research_program": "Research Program",
+}
+
+# Defines how to handle special cases for values in filters
+SPECIAL_VALUE_MAPPINGS = {
+ "Interview/Focus Group - structured": "Interview/Focus Group",
+ "Interview/Focus Group - semi-structured": "Interview/Focus Group",
+ "Interview/Focus Group - unstructured": "Interview/Focus Group",
+ "Questionnaire/Survey/Assessment - validated instrument": "Questionnaire/Survey/Assessment",
+ "Questionnaire/Survey/Assessment - unvalidated instrument": "Questionnaire/Survey/Assessment",
+ "Cis Male": "Male",
+ "Cis Female": "Female",
+ "Trans Male": "Transgender man/trans man/female-to-male (FTM)",
+ "Female-to-male transsexual": "Transgender man/trans man/female-to-male (FTM)",
+ "Trans Female": "Transgender woman/trans woman/male-to-female (MTF)",
+ "Male-to-female transsexual": "Transgender woman/trans woman/male-to-female (MTF)",
+ "Agender, Non-binary, gender non-conforming": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Gender Queer": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Intersex": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Intersexed": "Genderqueer/gender nonconforming/neither exclusively male nor female",
+ "Buisness Development": "Business Development",
+}
+
+# Defines field that we don't want to include in the filters
+OMITTED_VALUES_MAPPING = {
+ "study_metadata.human_subject_applicability.gender_applicability": "Not applicable"
+}
+
+# repository links
+REPOSITORY_STUDY_ID_LINK_TEMPLATE = {
+ "NIDDK Central": "https://repository.niddk.nih.gov/studies//",
+ "NIDA Data Share": "https://datashare.nida.nih.gov/study/",
+ "NICHD DASH": "https://dash.nichd.nih.gov/study/",
+ "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/",
+ "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//",
+}
+
+
+def is_valid_uuid(uuid_to_test, version=4):
+ """
+ Check if uuid_to_test is a valid UUID.
+
+ Parameters
+ ----------
+ uuid_to_test : str
+ version : {1, 2, 3, 4}
+
+ Returns
+ -------
+ `True` if uuid_to_test is a valid UUID, otherwise `False`.
+
+ """
+
+ try:
+ uuid_obj = UUID(uuid_to_test, version=version)
+ except ValueError:
+ return False
+ return str(uuid_obj) == uuid_to_test
+
+
+def update_filter_metadata(metadata_to_update):
+ # Retain these from existing filters
+ save_filters = ["Common Data Elements"]
+ filter_metadata = [filter for filter in metadata_to_update["advSearchFilters"] if filter["key"] in save_filters]
+ for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items():
+ filter_field_values = pydash.get(metadata_to_update, metadata_field_key)
+ if filter_field_values:
+ if isinstance(filter_field_values, str):
+ filter_field_values = [filter_field_values]
+ if not isinstance(filter_field_values, list):
+ print(filter_field_values)
+ raise TypeError("Neither a string nor a list")
+ for filter_field_value in filter_field_values:
+ if (
+ metadata_field_key,
+ filter_field_value,
+ ) in OMITTED_VALUES_MAPPING.items():
+ continue
+ if filter_field_value in SPECIAL_VALUE_MAPPINGS:
+ filter_field_value = SPECIAL_VALUE_MAPPINGS[filter_field_value]
+ filter_metadata.append(
+ {"key": filter_field_key, "value": filter_field_value}
+ )
+ filter_metadata = pydash.uniq(filter_metadata)
+ metadata_to_update["advSearchFilters"] = filter_metadata
+ # Retain these from existing tags
+ save_tags = ["Data Repository", "Common Data Elements"]
+ tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags]
+ # Add any new tags from advSearchFilters
+ for f in metadata_to_update["advSearchFilters"]:
+ if f["key"] == "Gender":
+ continue
+ tag = {"name": f["value"], "category": f["key"]}
+ if tag not in tags:
+ tags.append(tag)
+ metadata_to_update["tags"] = tags
+ return metadata_to_update
+
+
+def get_client_token(client_id: str, client_secret: str):
+ try:
+ token_url = f"http://revproxy-service/user/oauth2/token"
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
+ params = {"grant_type": "client_credentials"}
+ data = "scope=openid user data"
+
+ token_result = requests.post(
+ token_url,
+ params=params,
+ headers=headers,
+ data=data,
+ auth=(client_id, client_secret),
+ )
+ token = token_result.json()["access_token"]
+ except:
+ raise Exception("Could not get token")
+ return token
+
+
+def get_related_studies(serial_num, guid, hostname):
+ related_study_result = []
+
+ if serial_num:
+ mds = requests.get(
+ f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000"
+ )
+ if mds.status_code == 200:
+ related_study_metadata = mds.json()
+
+ for (
+ related_study_metadata_key,
+ related_study_metadata_value,
+ ) in related_study_metadata.items():
+ if related_study_metadata_key == guid or (
+ related_study_metadata_value["_guid_type"] != "discovery_metadata"
+ and related_study_metadata_value["_guid_type"]
+ != "unregistered_discovery_metadata"
+ ):
+ # do nothing for self, or for archived studies
+ continue
+ title = (
+ related_study_metadata_value.get("gen3_discovery", {})
+ .get("study_metadata", {})
+ .get("minimal_info", {})
+ .get("study_name", "")
+ )
+ link = (
+ f"https://{hostname}/portal/discovery/{related_study_metadata_key}/"
+ )
+ related_study_result.append({"title": title, "link": link})
+ return related_study_result
+
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("--directory", help="CEDAR Directory ID for registering ")
+parser.add_argument("--cedar_client_id", help="The CEDAR client id")
+parser.add_argument("--cedar_client_secret", help="The CEDAR client secret")
+parser.add_argument("--hostname", help="Hostname")
+
+
+args = parser.parse_args()
+
+if not args.directory:
+ print("Directory ID is required!")
+ sys.exit(1)
+if not args.cedar_client_id:
+ print("CEDAR client id is required!")
+ sys.exit(1)
+if not args.cedar_client_secret:
+ print("CEDAR client secret is required!")
+ sys.exit(1)
+if not args.hostname:
+ print("Hostname is required!")
+ sys.exit(1)
+
+dir_id = args.directory
+client_id = args.cedar_client_id
+client_secret = args.cedar_client_secret
+hostname = args.hostname
+
+print("Getting CEDAR client access token")
+access_token = get_client_token(client_id, client_secret)
+token_header = {"Authorization": "bearer " + access_token}
+
+limit = 10
+offset = 0
+
+# initialize this to be bigger than our initial call so we can go through while loop
+total = 100
+
+if not is_valid_uuid(dir_id):
+ print("Directory ID is not in UUID format!")
+ sys.exit(1)
+
+while limit + offset <= total:
+ # Get the metadata from cedar to register
+ print("Querying CEDAR...")
+ cedar = requests.get(
+ f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}",
+ headers=token_header,
+ )
+
+ # If we get metadata back now register with MDS
+ if cedar.status_code == 200:
+ metadata_return = cedar.json()
+ if "metadata" not in metadata_return:
+ print(
+ "Got 200 from CEDAR wrapper but no metadata in body, something is not right!"
+ )
+ sys.exit(1)
+
+ total = metadata_return["metadata"]["totalCount"]
+ returned_records = len(metadata_return["metadata"]["records"])
+ print(f"Successfully got {returned_records} record(s) from CEDAR directory")
+ for cedar_record in metadata_return["metadata"]["records"]:
+ # get the CEDAR instance id from cedar for querying in our MDS
+ cedar_instance_id = pydash.get(
+ cedar_record, "metadata_location.cedar_study_level_metadata_template_instance_ID"
+ )
+ if cedar_instance_id is None:
+ print("This record doesn't have CEDAR instance id, skipping...")
+ continue
+
+ # Get the metadata record for the CEDAR instance id
+ mds = requests.get(
+ f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.cedar_study_level_metadata_template_instance_ID={cedar_instance_id}&data=true"
+ )
+ if mds.status_code == 200:
+ mds_res = mds.json()
+
+ # the query result key is the record of the metadata. If it doesn't return anything then our query failed.
+ if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1:
+ print(f"Query returned nothing for template_instance_ID={cedar_instance_id}&data=true")
+ continue
+
+ # get the key for our mds record
+ mds_record_guid = list(mds_res.keys())[0]
+
+ mds_res = mds_res[mds_record_guid]
+ mds_cedar_register_data_body = {**mds_res}
+ mds_discovery_data_body = {}
+ mds_clinical_trials = {}
+ if mds_res["_guid_type"] == "discovery_metadata":
+ print("Metadata is already registered. Updating MDS record")
+ elif mds_res["_guid_type"] == "unregistered_discovery_metadata":
+ print(
+ "Metadata has not been registered. Registering it in MDS record"
+ )
+ else:
+ print(
+ f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped"
+ )
+ continue
+
+ if "clinicaltrials_gov" in cedar_record:
+ mds_clinical_trials = cedar_record["clinicaltrials_gov"]
+ del cedar_record["clinicaltrials_gov"]
+
+ # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values
+ cedar_record_other_study_websites = cedar_record.get(
+ "metadata_location", {}
+ ).get("other_study_websites", [])
+ del cedar_record["metadata_location"]
+
+ mds_res["gen3_discovery"]["study_metadata"].update(cedar_record)
+ mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][
+ "other_study_websites"
+ ] = cedar_record_other_study_websites
+
+ # setup citations
+ doi_citation = mds_res["gen3_discovery"]["study_metadata"].get(
+ "doi_citation", ""
+ )
+ mds_res["gen3_discovery"]["study_metadata"]["citation"][
+ "heal_platform_citation"
+ ] = doi_citation
+
+ # setup repository_study_link
+ data_repositories = (
+ mds_res.get("gen3_discovery", {})
+ .get("study_metadata", {})
+ .get("metadata_location", {})
+ .get("data_repositories", [])
+ )
+ repository_citation = "Users must also include a citation to the data as specified by the local repository."
+ repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.'
+ for repository in data_repositories:
+ if (
+ repository["repository_name"]
+ and repository["repository_name"]
+ in REPOSITORY_STUDY_ID_LINK_TEMPLATE
+ and repository["repository_study_ID"]
+ ):
+ repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[
+ repository["repository_name"]
+ ].replace("", repository["repository_study_ID"])
+ repository.update(
+ {"repository_study_link": repository_study_link}
+ )
+ if (
+ repository_citation_additional_text
+ not in repository_citation
+ ):
+ repository_citation += repository_citation_additional_text
+ if len(data_repositories):
+ data_repositories[0] = {
+ **data_repositories[0],
+ "repository_citation": repository_citation,
+ }
+
+ mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][
+ "data_repositories"
+ ] = copy.deepcopy(data_repositories)
+
+ # set up related studies
+ serial_num = None
+ try:
+ serial_num = (
+ mds_res.get("nih_reporter", {})
+ .get("project_num_split", {})
+ .get("serial_num", None)
+ )
+ except Exception:
+ print("Unable to get serial number for study")
+
+ if serial_num is None:
+ print("Unable to get serial number for study")
+
+ related_study_result = get_related_studies(
+ serial_num, mds_record_guid, hostname
+ )
+ mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy(related_study_result)
+
+ # merge data from cedar that is not study level metadata into a level higher
+ deleted_keys = []
+ for key, value in mds_res["gen3_discovery"]["study_metadata"].items():
+ if not isinstance(value, dict):
+ mds_res["gen3_discovery"][key] = value
+ deleted_keys.append(key)
+ for key in deleted_keys:
+ del mds_res["gen3_discovery"]["study_metadata"][key]
+
+ mds_discovery_data_body = update_filter_metadata(
+ mds_res["gen3_discovery"]
+ )
+
+ mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body
+ if mds_clinical_trials:
+ mds_cedar_register_data_body["clinicaltrials_gov"] = {
+ **mds_cedar_register_data_body.get("clinicaltrials_gov", {}),
+ **mds_clinical_trials,
+ }
+
+ mds_cedar_register_data_body["_guid_type"] = "discovery_metadata"
+
+ print(f"Metadata {mds_record_guid} is now being registered.")
+ mds_put = requests.put(
+ f"http://revproxy-service/mds/metadata/{mds_record_guid}",
+ headers=token_header,
+ json=mds_cedar_register_data_body,
+ )
+ if mds_put.status_code == 200:
+ print(f"Successfully registered: {mds_record_guid}")
+ else:
+ print(
+ f"Failed to register: {mds_record_guid}. Might not be MDS admin"
+ )
+ print(f"Status from MDS: {mds_put.status_code}")
+ else:
+ print(f"Failed to get information from MDS: {mds.status_code}")
+
+ else:
+ print(
+ f"Failed to get information from CEDAR wrapper service: {cedar.status_code}"
+ )
+
+ if offset + limit == total:
+ break
+
+ offset = offset + limit
+ if (offset + limit) > total:
+ limit = total - offset
+
+ if limit < 0:
+ break
diff --git a/files/scripts/psql-fips-fix.sh b/files/scripts/psql-fips-fix.sh
index 01db2742a..fcbb6e20c 100644
--- a/files/scripts/psql-fips-fix.sh
+++ b/files/scripts/psql-fips-fix.sh
@@ -16,7 +16,7 @@ for name in indexd fence sheepdog peregrine; do
update_pass $name $username $password
done
-for name in wts metadata gearbox audit arborist access-backend argo_db atlas argo; do
+for name in wts metadata gearbox audit arborist access-backend argo_db atlas argo thor; do
if [[ ! -z $(gen3 secrets decode $name-g3auto dbcreds.json) ]]; then
username=$(gen3 secrets decode $name-g3auto dbcreds.json | jq -r .db_username)
password=$(gen3 secrets decode $name-g3auto dbcreds.json | jq -r .db_password)
diff --git a/files/scripts/tf-cleanup.sh b/files/scripts/tf-cleanup.sh
new file mode 100644
index 000000000..182d35c14
--- /dev/null
+++ b/files/scripts/tf-cleanup.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+for users in $(cut -d: -f1 /etc/passwd); do
+ for directory in $(find /home/$users/.local/share/gen3 -name .terraform); do
+ echo "Removing $directory/plugins" >> /terraformScriptLogs-$(date -u +%Y%m%d))
+ rm -rf $directory/plugins
+ done
+done
diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist
index d5a843c31..58799d6bb 100644
--- a/files/squid_whitelist/web_whitelist
+++ b/files/squid_whitelist/web_whitelist
@@ -5,20 +5,24 @@
accounts.google.com
achecker.ca
apache.github.io
+api.epigraphdb.org
api.monqcle.com
+awslabs.github.io
biodata-integration-tests.net
-biorender.com
+marketing.biorender.com
clinicaltrials.gov
+charts.bitnami.com
ctds-planx.atlassian.net
+data.cityofchicago.org
dataguids.org
api.login.yahoo.com
-api.snapcraft.io
apt.kubernetes.io
argoproj.github.io
archive.cloudera.com
archive.linux.duke.edu
aws.github.io
bay.uchicago.edu
+bioconductor.org
bionimbus.tabix.oicrsofteng.org
bits.netbeans.org
centos.chicago.waneq.com
@@ -31,6 +35,7 @@ cernvm.cern.ch
charts.bitnami.com
charts.helm.sh
cloud.r-project.org
+coredns.github.io
coreos.com
covidstoplight.org
cpan.mirrors.tds.net
@@ -66,19 +71,24 @@ ftp.usf.edu
ftp.ussg.iu.edu
fmwww.bc.edu
gcr.io
-gen3.org
get.helm.sh
+ghcr.io
git.io
go.googlesource.com
golang.org
gopkg.in
grafana.com
+grafana.github.io
+helm.elastic.co
http.us.debian.org
ifconfig.io
+ingress.coralogix.us
internet2.edu
k8s.gcr.io
ks.osdc.io
+kubecost.github.io
kubernetes.github.io
+kubernetes-sigs.github.io
lib.stat.cmu.edu
login.mathworks.com
login.microsoftonline.com
@@ -108,14 +118,19 @@ mirrors.gigenet.com
mirrors.lga7.us.voxel.net
mirrors.nics.utk.edu
mirrors.syringanetworks.net
+mps.csb.pitt.edu
mran.microsoft.com
neuro.debian.net
neurodeb.pirsquared.org
nginx.org
+nvidia.github.io
opportunityinsights.org
orcid.org
pgp.mit.edu
ppa.launchpad.net
+prometheus-community.github.io
+proxy.golang.org
+public.ecr.aws
pubmirrors.dal.corespace.com
reflector.westga.edu
registry.npmjs.org
@@ -130,7 +145,10 @@ repo.dimenoc.com
repos.mia.quadranet.com
repos.redrockhost.com
repos.sensuapp.org
+repo.vmware.com
repository.cloudera.com
+resource.metadatacenter.org
+rmq.n3c.ncats.io
rules.emergingthreats.net
rweb.quant.ku.edu
sa-update.dnswl.org
@@ -139,6 +157,7 @@ sa-update.space-pro.be
security.debian.org
services.mathworks.com
streaming.stat.iastate.edu
+us-east4-docker.pkg.dev
us-central1-docker.pkg.dev
www.google.com
www.icpsr.umich.edu
@@ -148,3 +167,5 @@ www.rabbitmq.com
www.uniprot.org
vpodc.org
yahoo.com
+idp.stage.qdr.org
+stage.qdr.org
\ No newline at end of file
diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist
index 3b04333b1..1717b4443 100644
--- a/files/squid_whitelist/web_wildcard_whitelist
+++ b/files/squid_whitelist/web_wildcard_whitelist
@@ -1,4 +1,6 @@
.adfs.federation.va.gov
+.agdr.org.nz
+.agha.umccr.org
.alpinelinux.org
.amazonaws.com
.amazoncognito.com
@@ -6,10 +8,10 @@
.anaconda.org
.apache.org
.azureedge.net
-.qg3.apps.qualys.com
.bioconductor.org
.bionimbus.org
.bitbucket.org
+.blob.core.windows.net
.bloodpac.org
.braincommons.org
.bsc.es
@@ -20,6 +22,7 @@
.centos.org
.ceph.com
.chef.io
+.chordshealth.org
.clamav.net
.cloud.google.com
.cloudfront.net
@@ -30,44 +33,55 @@
.data-commons.org
.datadoghq.com
.datastage.io
+.ddog-gov.com
.diseasedatahub.org
.docker.com
.docker.io
.dockerproject.org
.dph.illinois.gov
.elasticsearch.org
+.eramba.org
.erlang-solutions.com
+.external-secrets.io
.extjs.com
.fedoraproject.org
+.gen3.org
+.genome.jp
.github.com
.githubusercontent.com
.gitlab.com
.googleapis.com
.googleusercontent.com
+.gstatic.com
.hashicorp.com
.healdata.org
.idph.illinois.gov
.immport.org
-.jenkins-ci.org
.jenkins.io
+.jenkins-ci.org
.k8s.io
+.kegg.jp
.kidsfirstdrc.org
.letsencrypt.org
.maven.org
.metacpan.org
.midrc.org
+.nesi.org.nz
.newrelic.com
.niaiddata.org
.nih.gov
.nodesource.com
.novocraft.com
.occ-data.org
+.occ-pla.net
.oicr.on.ca
.okta.com
.opensciencedatacloud.org
.osuosl.org
.paloaltonetworks.com
+.pandemicresponsecommons.org
.perl.org
+.pedscommons.org
.planx-ci.io
.planx-pla.net
.postgresql.org
@@ -75,30 +89,28 @@
.pypi.python.org
.pythonhosted.org
.pyx4me.com
+.qg3.apps.qualys.com
.quay.io
.rcsb.org
+.rstudio.com
.rubygems.org
.sa-update.pccc.com
.sencha.com
.sks-keyservers.net
.slack.com
.slack-msgs.com
+.snapcraft.io
+.snapcraftcontent.com
.sourceforge.net
.southsideweekly.com
.theanvil.io
+.tigera.io
.twistlock.com
.ubuntu.com
.ucsc.edu
.veracode.com
.virtualbox.org
+.visitdata.org
.xmission.com
.yahooapis.com
-.pandemicresponsecommons.org
-.occ-pla.net
-.rstudio.com
-.gstatic.com
-.visitdata.org
.yarnpkg.com
-.nesi.org.nz
-.agdr.org.nz
-.agha.umccr.org
diff --git a/flavors/eks/bootstrap-explicit-proxy-docker.sh b/flavors/eks/bootstrap-explicit-proxy-docker.sh
index 13d181d03..091be1b18 100644
--- a/flavors/eks/bootstrap-explicit-proxy-docker.sh
+++ b/flavors/eks/bootstrap-explicit-proxy-docker.sh
@@ -1,3 +1,9 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/x-shellscript; charset="us-ascii"
+
#!/bin/bash -xe
# User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images
@@ -52,3 +58,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then
rm qualys-cloud-agent.x86_64.rpm
sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id}
fi
+
+sudo yum update -y
+sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+--BOUNDARY
+Content-Type: text/cloud-config; charset="us-ascii"
+
+power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+--BOUNDARY--
\ No newline at end of file
diff --git a/flavors/eks/bootstrap-with-security-updates.sh b/flavors/eks/bootstrap-with-security-updates.sh
index 1e6a0b7eb..06d962f55 100644
--- a/flavors/eks/bootstrap-with-security-updates.sh
+++ b/flavors/eks/bootstrap-with-security-updates.sh
@@ -1,3 +1,9 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/x-shellscript; charset="us-ascii"
+
#!/bin/bash -xe
# User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images
@@ -76,3 +82,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then
rm qualys-cloud-agent.x86_64.rpm
sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id}
fi
+
+sudo yum update -y
+sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+--BOUNDARY
+Content-Type: text/cloud-config; charset="us-ascii"
+
+power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+--BOUNDARY--
\ No newline at end of file
diff --git a/flavors/eks/bootstrap.sh b/flavors/eks/bootstrap.sh
index f5dbcf55e..7dda384d7 100644
--- a/flavors/eks/bootstrap.sh
+++ b/flavors/eks/bootstrap.sh
@@ -1,3 +1,9 @@
+MIME-Version: 1.0
+Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+--BOUNDARY
+Content-Type: text/x-shellscript; charset="us-ascii"
+
#!/bin/bash -xe
# User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images
@@ -25,3 +31,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then
rm qualys-cloud-agent.x86_64.rpm
sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id}
fi
+
+sudo yum update -y
+sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+sudo dracut -f
+# configure grub
+sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+--BOUNDARY
+Content-Type: text/cloud-config; charset="us-ascii"
+
+power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+--BOUNDARY--
\ No newline at end of file
diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh
index fda884743..812a9f738 100644
--- a/flavors/squid_auto/squid_running_on_docker.sh
+++ b/flavors/squid_auto/squid_running_on_docker.sh
@@ -8,6 +8,9 @@ DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release)
WORK_USER="ubuntu"
if [[ $DISTRO == "Amazon Linux" ]]; then
WORK_USER="ec2-user"
+ if [[ $(awk -F '[="]*' '/^VERSION_ID/ { print $2 }' < /etc/os-release) == "2023" ]]; then
+ DISTRO="al2023"
+ fi
fi
HOME_FOLDER="/home/${WORK_USER}"
SUB_FOLDER="${HOME_FOLDER}/cloud-automation"
@@ -60,6 +63,8 @@ fi
function install_basics(){
if [[ $DISTRO == "Ubuntu" ]]; then
apt -y install atop
+ elif [[ $DISTRO == "al2023" ]]; then
+ sudo dnf install cronie nc -y
fi
}
@@ -69,10 +74,18 @@ function install_docker(){
# Docker
###############################################################
# Install docker from sources
- curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add -
- add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable"
- apt update
- apt install -y docker-ce
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable"
+ apt update
+ apt install -y docker-ce
+ else
+ sudo yum update -y
+ sudo yum install -y docker
+ # Start and enable Docker service
+ sudo systemctl start docker
+ sudo systemctl enable docker
+ fi
mkdir -p /etc/docker
cp ${SUB_FOLDER}/flavors/squid_auto/startup_configs/docker-daemon.json /etc/docker/daemon.json
chmod -R 0644 /etc/docker
@@ -176,7 +189,7 @@ EOF
# Copy the updatewhitelist.sh script to the home directory
cp ${SUB_FOLDER}/flavors/squid_auto/updatewhitelist-docker.sh ${HOME_FOLDER}/updatewhitelist.sh
chmod +x ${HOME_FOLDER}/updatewhitelist.sh
- cp ${SUB_FOLDER}/flavors/squid_auto/healthcheck.sh ${HOME_FOLDER}/healtcheck.sh
+ cp ${SUB_FOLDER}/flavors/squid_auto/healthcheck.sh ${HOME_FOLDER}/healthcheck.sh
chmod +x ${HOME_FOLDER}/healthcheck.sh
crontab -l > crontab_file; echo "*/15 * * * * ${HOME_FOLDER}/updatewhitelist.sh >/dev/null 2>&1" >> crontab_file
@@ -201,8 +214,10 @@ function install_awslogs {
if [[ $DISTRO == "Ubuntu" ]]; then
wget ${AWSLOGS_DOWNLOAD_URL} -O amazon-cloudwatch-agent.deb
dpkg -i -E ./amazon-cloudwatch-agent.deb
- else
+ elif [[ $DISTRO == "Amazon Linux" ]]; then
sudo yum install amazon-cloudwatch-agent nc -y
+ elif [[ $DISTRO == "al2023" ]]; then
+ sudo dnf install amazon-cloudwatch-agent -y
fi
# Configure the AWS logs
@@ -292,6 +307,19 @@ function main(){
--volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \
--volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \
quay.io/cdis/squid:${SQUID_IMAGE_TAG}
+
+ max_attempts=10
+ attempt_counter=0
+ while [ $attempt_counter -lt $max_attempts ]; do
+ #((attempt_counter++))
+ sleep 10
+ if [[ -z "$(sudo lsof -i:3128)" ]]; then
+ echo "Squid not healthy, restarting."
+ docker restart squid
+ else
+ echo "Squid healthy"
+ fi
+ done
}
main
diff --git a/flavors/squid_auto/startup_configs/squid.conf b/flavors/squid_auto/startup_configs/squid.conf
index 653026200..b1e44810a 100644
--- a/flavors/squid_auto/startup_configs/squid.conf
+++ b/flavors/squid_auto/startup_configs/squid.conf
@@ -56,7 +56,6 @@ http_access deny all
persistent_request_timeout 5 seconds
-cache_dir ufs /var/cache/squid 100 16 256
pid_filename /var/run/squid/squid.pid
# vi:syntax=squid.conf
diff --git a/flavors/vpn_nlb_central/vpnvm_new.sh b/flavors/vpn_nlb_central/vpnvm_new.sh
new file mode 100644
index 000000000..00f8306fc
--- /dev/null
+++ b/flavors/vpn_nlb_central/vpnvm_new.sh
@@ -0,0 +1,533 @@
+#!/bin/bash
+
+###############################################################
+# variables
+###############################################################
+
+MAGIC_URL="http://169.254.169.254/latest/meta-data/"
+AVAILABILITY_ZONE=$(curl -s ${MAGIC_URL}placement/availability-zone)
+PRIVATE_IPV4=$(curl -s ${MAGIC_URL}local-ipv4)
+PUBLIC_IPV4=$(curl -s ${MAGIC_URL}public-ipv4)
+REGION=$(echo ${AVAILABILITY_ZONE::-1})
+#DOCKER_DOWNLOAD_URL="https://download.docker.com/linux/ubuntu"
+AWSLOGS_DOWNLOAD_URL="https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb"
+#TERRAFORM_DOWNLOAD_URL="https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip"
+DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release)
+if [[ $DISTRO == "Ubuntu" ]]; then
+ WORK_USER="ubuntu"
+else
+ WORK_USER="ec2-user"
+fi
+HOME_FOLDER="/home/${WORK_USER}"
+SUB_FOLDER="${HOME_FOLDER}/cloud-automation"
+
+OPENVPN_PATH='/etc/openvpn'
+BIN_PATH="${OPENVPN_PATH}/bin"
+EASYRSA_PATH="${OPENVPN_PATH}/easy-rsa"
+VARS_PATH="${EASYRSA_PATH}/vars"
+
+#EASY-RSA Vars
+KEY_SIZE=4096
+COUNTRY="US"
+STATE="IL"
+CITY="Chicago"
+ORG="CTDS"
+EMAIL='support\@datacommons.io'
+KEY_EXPIRE=365
+
+#OpenVPN
+PROTO=tcp
+
+
+###############################################################
+# get any variables we want coming from terraform variables
+###############################################################
+if [ $# -eq 0 ];
+then
+ echo "No arguments supplied, something is wrong"
+ exit 1
+else
+ #OIFS=$IFS
+ echo $1
+ IFS=';' read -ra ADDR <<< "$1"
+ echo ${ADDR[@]}
+ for i in "${ADDR[@]}"; do
+ echo $i
+ if [[ $i = *"cwl_group"* ]];
+ then
+ CWL_GROUP="${CWL_GROUP:-$(echo ${i} | cut -d= -f2)}"
+ elif [[ ${i} = *"vpn_nlb_name"* ]];
+ then
+ VPN_NLB_NAME="$(echo ${i} | cut -d= -f2)"
+ elif [[ ${i} = *"cloud_name"* ]];
+ then
+ CLOUD_NAME="$(echo ${i} | cut -d= -f2)"
+ elif [[ ${i} = *"csoc_vpn_subnet"* ]];
+ then
+ CSOC_VPN_SUBNET="$(echo ${i} | cut -d= -f2)"
+ elif [[ ${i} = *"csoc_vm_subnet"* ]];
+ then
+ CSOC_VM_SUBNET="$(echo ${i} | cut -d= -f2)"
+ elif [[ $i = *"account_id"* ]];
+ then
+ ACCOUNT_ID="$(echo ${i} | cut -d= -f2)"
+ elif [[ $i = *"alternate_cwlg"* ]];
+ then
+ CWL_GROUP="$(echo ${i} | cut -d= -f2)"
+ fi
+ done
+ echo $1
+fi
+
+S3_BUCKET="vpn-certs-and-files-${VPN_NLB_NAME}"
+
+function logs_helper(){
+ echo -e "****************** ${1} ******************"
+}
+
+function install_basics() {
+
+ logs_helper "Installing Basics"
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ apt -y install python3-pip build-essential sipcalc wget curl jq apt-transport-https ca-certificates software-properties-common fail2ban libyaml-dev
+ apt -y install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools
+ apt -y install openvpn bridge-utils libssl-dev openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv
+ # For openVPN
+ debconf-set-selections <<< "postfix postfix/mailname string planx-pla.net"
+ debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Internet Site'"
+ else
+ amazon-linux-extras install epel
+ yum -y -q install epel-release iptables-services
+ yum -y -q install python3-pip python3-devel gcc sipcalc wget curl jq ca-certificates software-properties-common fail2ban libyaml-dev
+ yum -y -q install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools
+ yum -y -q install openvpn bridge-utils openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv
+ fi
+ pip3 install awscli
+ useradd --shell /bin/nologin --system openvpn
+
+ logs_helper "Basics installed"
+}
+
+
+function configure_basics() {
+
+ logs_helper "Configuring Basics"
+
+ local dest_path="/root/openvpn_management_scripts"
+ local src_path="${SUB_FOLDER}/files/openvpn_management_scripts"
+ cp -r ${src_path} /root
+
+ # Different buckets for different CSOC vpn environments
+ sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/push_to_s3.sh
+ sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/recover_from_s3.sh
+ sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/send_email.sh
+
+ # Replace the User variable for hostname, VPN subnet and VM subnet
+ #sed -i "s/SERVERNAME/${VPN_NLB_NAME}/" ${dest_path}/csoc_vpn_user_variable
+ #sed -i "s/CLOUDNAME/${CLOUD_NAME}/" ${dest_path}/csoc_vpn_user_variable
+
+ #VPN_SUBNET=${CSOC_VPN_SUBNET}
+ #VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ #VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ #sed -i "s/VPN_SUBNET/$VPN_SUBNET_BASE\/$VPN_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable
+
+ #VM_SUBNET=${CSOC_VM_SUBNET}
+ #VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ #VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ #sed -i "s/VM_SUBNET/$VM_SUBNET_BASE\/$VM_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable
+
+ echo "aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh"
+ aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh
+
+ logs_helper "Copying modified scripts to /etc/openvpn"
+ cp -vr /root/openvpn_management_scripts /etc/openvpn/
+
+ logs_helper "Basics configured"
+
+}
+
+
+function configure_awscli() {
+
+ logs_helper "Configuring AWS"
+ mkdir -p ${HOME_FOLDER}/.aws
+ cat < ${HOME_FOLDER}/.aws/config
+[default]
+output = json
+region = us-east-1
+
+[profile csoc]
+output = json
+region = us-east-1
+EOT
+
+ mkdir -p /root/.aws
+ cat > /root/.aws/config <> ${config_json} < /root/server.pem
+ fi
+
+ export FQDN=${CLOUD_NAME}
+ export cloud=${VPN_NLB_NAME}
+ export SERVER_PEM="/root/server.pem"
+ export VM_SUBNET=${CSOC_VM_SUBNET}
+ export VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ export VM_SUBNET_MASK=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' )
+ export VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ export VPN_SUBNET=${CSOC_VPN_SUBNET}
+ export VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"')
+ export VPN_SUBNET_MASK=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' )
+ export VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' )
+ export server_pem="/root/server.pem"
+ echo "*******"
+ echo "${FQDN} -- ${cloud} -- ${SERVER_PEM} -- ${VPN_SUBNET} -- ${VPN_SUBNET_BASE} -- ${VPN_SUBNET_MASK_BITS} --/ ${VM_SUBNET} -- ${VM_SUBNET_BASE} -- ${VM_SUBNET_MASK_BITS}"
+ echo "*******"
+ #export FQDN="$SERVERNAME.planx-pla.net"; export cloud="$CLOUDNAME"; export SERVER_PEM="/root/server.pem";
+
+ #cp /etc/openvpn/bin/templates/lighttpd.conf.template /etc/lighttpd/lighttpd.conf
+ #mkdir -p --mode=750 /var/www/qrcode
+ #chown openvpn:www-data /var/www/qrcode
+ #mkdir -p /etc/lighttpd/certs
+ #cp /root/server.pem /etc/lighttpd/certs/server.pem
+ #service lighttpd restart
+
+ #systemctl restart openvpn
+
+ logs_helper "openVPN init complete"
+
+}
+
+function install_easyrsa() {
+
+ logs_helper "Installing easyRSA"
+ if [[ -f $EASYRSA_PATH/easyrsa ]];
+ then
+ logs_helper "easyRSA already installed"
+ return
+ fi
+ easyRsaVer="3.1.7"
+ wget https://github.com/OpenVPN/easy-rsa/releases/download/v3.1.7/EasyRSA-${easyRsaVer}.tgz
+ # extract to a folder called easyrsa
+ tar xvf EasyRSA-${easyRsaVer}.tgz
+ mv EasyRSA-${easyRsaVer}/ $EASYRSA_PATH
+ rm EasyRSA-${easyRsaVer}.tgz
+ cp "$OPENVPN_PATH/bin/templates/vars.template" $VARS_PATH
+
+# local easy_rsa_dir="$EASYRSA_PATH"
+# local exthost="$FQDN"
+# local ou="$cloud"
+# local key_name="$ou-OpenVPN"
+
+ perl -p -i -e "s|#EASY_RSA_DIR#|${EASYRSA_PATH}|" $VARS_PATH
+ perl -p -i -e "s|#EXTHOST#|${FQDN}|" $VARS_PATH
+ perl -p -i -e "s|#KEY_SIZE#|${KEY_SIZE}|" $VARS_PATH
+ perl -p -i -e "s|#COUNTRY#|${COUNTRY}|" $VARS_PATH
+ perl -p -i -e "s|#STATE#|${STATE}|" $VARS_PATH
+ perl -p -i -e "s|#CITY#|${CITY}|" $VARS_PATH
+ perl -p -i -e "s|#ORG#|${ORG}|" $VARS_PATH
+ perl -p -i -e "s|#EMAIL#|${EMAIL}|" $VARS_PATH
+ perl -p -i -e "s|#OU#|${cloud}|" $VARS_PATH
+ perl -p -i -e "s|#KEY_NAME#|${cloud}-OpenVPN|" $VARS_PATH
+ perl -p -i -e "s|#KEY_EXPIRE#|${KEY_EXPIRE}|" $VARS_PATH
+
+ sed -i 's/^subjectAltName/#subjectAltName/' $EASYRSA_PATH/openssl-*.cnf
+ logs_helper "easyRSA complete"
+}
+
+function install_custom_scripts() {
+
+ logs_helper "installing custom scripts"
+ cd $OPENVPN_PATH
+
+ #pull our openvpn scripts
+ #cp -r /root/openvpn_management_scripts /etc/openvpn/
+ ln -sfn openvpn_management_scripts bin
+ cd $BIN_PATH
+ python3 -m venv .venv
+ #virtualenv .venv
+ #This is needed or else you get : .venv/bin/activate: line 57: PS1: unbound variable
+ set +u
+ # ( source .venv/bin/activate; pip install pyotp pyqrcode bcrypt )
+ ( source .venv/bin/activate; pip3 install pyotp qrcode bcrypt )
+ set -u
+
+ logs_helper "custom scripts done"
+}
+
+install_settings() {
+
+ logs_helper "installing settings"
+ SETTINGS_PATH="$BIN_PATH/settings.sh"
+ cp "$OPENVPN_PATH/bin/templates/settings.sh.template" "$SETTINGS_PATH"
+ perl -p -i -e "s|#FQDN#|$FQDN|" $SETTINGS_PATH
+ perl -p -i -e "s|#EMAIL#|$EMAIL|" $SETTINGS_PATH
+ perl -p -i -e "s|#CLOUD_NAME#|${cloud}|" $SETTINGS_PATH
+
+ logs_helper "settings installed"
+}
+
+build_PKI() {
+
+ logs_helper "building pki"
+ cd $EASYRSA_PATH
+ # ln -s openssl-1.0.0.cnf openssl.cnf
+ echo "This is long"
+ # ./easyrsa clean-all nopass
+ ./easyrsa init-pki
+ ./easyrsa build-ca nopass
+ ./easyrsa gen-dh
+ ./easyrsa gen-crl
+ ./easyrsa build-server-full $CLOUD_NAME nopass
+ # ./easyrsa gen-req $VPN_NLB_NAME.planx-pla.net nopass
+ openvpn --genkey --secret ta.key
+ mv ta.key $EASYRSA_PATH/pki/ta.key
+
+ #This will error but thats fine, the crl.pem was created (without it openvpn server crashes)
+ set +e
+ ./revoke-full client &>/dev/null || true
+ set -e
+ logs_helper "pki done"
+
+}
+
+configure_ovpn() {
+
+ logs_helper "configuring openvpn"
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ OVPNCONF_PATH="/etc/openvpn/openvpn.conf"
+ else
+ OVPNCONF_PATH="/etc/openvpn/server/server.conf"
+ fi
+ cp "$OPENVPN_PATH/bin/templates/openvpn.conf.template" "$OVPNCONF_PATH"
+
+ perl -p -i -e "s|#FQDN#|$FQDN|" $OVPNCONF_PATH
+
+ perl -p -i -e "s|#VPN_SUBNET_BASE#|$VPN_SUBNET_BASE|" $OVPNCONF_PATH
+ perl -p -i -e "s|#VPN_SUBNET_MASK#|$VPN_SUBNET_MASK|" $OVPNCONF_PATH
+
+ perl -p -i -e "s|#VM_SUBNET_BASE#|$VM_SUBNET_BASE|" $OVPNCONF_PATH
+ perl -p -i -e "s|#VM_SUBNET_MASK#|$VM_SUBNET_MASK|" $OVPNCONF_PATH
+
+ perl -p -i -e "s|#PROTO#|$PROTO|" $OVPNCONF_PATH
+
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ systemctl restart openvpn
+ else
+ systemctl enable openvpn-server@server
+ systemctl start openvpn-server@server
+ fi
+
+ logs_helper "openvpn configured"
+}
+
+tweak_network() {
+
+ logs_helper "tweaking network"
+ local nettweaks_path="$OPENVPN_PATH/bin/network_tweaks.sh"
+ cp "$OPENVPN_PATH/bin/templates/network_tweaks.sh.template" "${nettweaks_path}"
+ perl -p -i -e "s|#VPN_SUBNET#|$VPN_SUBNET|" ${nettweaks_path}
+ perl -p -i -e "s|#VM_SUBNET#|$VM_SUBNET|" ${nettweaks_path}
+ perl -p -i -e "s|#PROTO#|$PROTO|" ${nettweaks_path}
+
+ chmod +x ${nettweaks_path}
+ ${nettweaks_path}
+
+ # Disable firewall in amazonlinux
+ systemctl stop firewalld
+ systemctl disable firewalld
+
+ #cp /etc/rc.local /etc/rc.local.bak
+ #sed -i 's/^exit/#exit/' /etc/rc.local
+ #echo /etc/openvpn/bin/network_tweaks.sh >> /etc/rc.local
+ #echo exit 0 >> /etc/rc.local
+
+
+ logs_helper "network tweaked"
+
+}
+
+install_webserver() {
+
+
+ logs_helper "installing webserver"
+ #Webserver used for QRCodes
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ apt -y install lighttpd
+ else
+ yum -y install lighttpd
+ fi
+ cp "$OPENVPN_PATH/bin/templates/lighttpd.conf.template" /etc/lighttpd/lighttpd.conf
+
+ mkdir -p --mode=750 /var/www/qrcode
+ chown openvpn:www-data /var/www/qrcode
+
+ if [ -f $SERVER_PEM ]
+ then
+ mkdir --mode=700 /etc/lighttpd/certs
+ cp $SERVER_PEM /etc/lighttpd/certs/server.pem
+ service lighttpd restart
+ fi
+
+ logs_helper "webserver installed"
+}
+
+
+install_cron() {
+ cp "$OPENVPN_PATH/bin/templates/cron.template" /etc/cron.d/openvpn
+}
+
+misc() {
+
+ logs_helper "installing misc"
+ cd $OPENVPN_PATH
+ mkdir -p easy-rsa/pki/ovpn_files
+ ln -sfn easy-rsa/pki/ovpn_files
+
+ #If openvpn fails to start its cause perms. Init needs root rw to start, but service needs openvpn rw to work
+ mkdir --mode 775 -p clients.d/
+ mkdir --mode 775 -p clients.d/tmp/
+ chown root:openvpn clients.d/tmp/
+
+ mkdir -p easy-rsa/pki/ovpn_files_seperated/
+ mkdir -p easy-rsa/pki/ovpn_files_systemd/
+ mkdir -p easy-rsa/pki/ovpn_files_resolvconf/
+
+ touch user_passwd.csv
+
+ mkdir -p environments
+ mkdir -p client-restrictions
+
+ chown -R openvpn:openvpn easy-rsa/ user_passwd.csv clients.d/tmp/
+ #ahhem.
+ chown :root /etc/openvpn/clients.d/tmp
+ chmod g+rwx /etc/openvpn/clients.d/tmp
+ # systemctl restart openvpn
+
+ logs_helper "misc done"
+}
+
+function main() {
+ install_basics
+ configure_awscli
+ configure_basics
+
+ if [[ $DISTRO == "Ubuntu" ]]; then
+ install_awslogs
+ fi
+ install_openvpn
+
+ set -e
+ set -u
+ install_custom_scripts
+ # if [! -d "/etc/openvpn/easy-rsa"]; then
+ aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || install_easyrsa
+
+ install_settings
+
+ # if [! -d "/etc/openvpn/easy-rsa"]; then
+ aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || build_PKI
+ #fi
+ misc
+ configure_ovpn
+ tweak_network
+
+ install_cron
+
+
+ mkdir -p --mode=750 /var/www/qrcode
+
+ logs_helper "openvpn setup complete"
+
+}
+
+main
diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh
index b0b4f0cac..dd19ea7a4 100644
--- a/gen3/bin/awsrole.sh
+++ b/gen3/bin/awsrole.sh
@@ -20,13 +20,22 @@ gen3_awsrole_help() {
# NOTE: service-account to role is 1 to 1
#
# @param serviceAccount to link to the role
+# @param flag (optional) - specify a flag to use a different trust policy
#
function gen3_awsrole_ar_policy() {
local serviceAccount="$1"
shift || return 1
+ if [[ -z $1 ]] || [[ $1 == -* ]]; then
+ namespace=$(gen3 db namespace)
+ else
+ namespace=$1
+ shift
+ fi
local issuer_url
local account_id
local vpc_name
+ local flag=$flag
+
vpc_name="$(gen3 api environment)" || return 1
issuer_url="$(aws eks describe-cluster \
--name ${vpc_name} \
@@ -37,7 +46,42 @@ function gen3_awsrole_ar_policy() {
local provider_arn="arn:aws:iam::${account_id}:oidc-provider/${issuer_url}"
- cat - < /dev/null; then
- g3kubectl create sa "$saName" || return 1
+ if ! g3kubectl get sa "$saName" --namespace=$namespace > /dev/null; then
+ g3kubectl create sa "$saName" --namespace=$namespace || return 1
fi
- g3kubectl annotate --overwrite sa "$saName" "eks.amazonaws.com/role-arn=$roleArn"
+ g3kubectl annotate --overwrite sa "$saName" "eks.amazonaws.com/role-arn=$roleArn" --namespace=$namespace
}
#
@@ -110,14 +161,19 @@ _get_entity_type() {
#
# @param rolename
# @param saName for assume-role policy document
+# @param flag (optional) - specify a flag to use a different trust policy
#
_tfplan_role() {
local rolename="$1"
shift || return 1
local saName="$1"
shift || return 1
+ local namespace="$1"
local arDoc
- arDoc="$(gen3_awsrole_ar_policy "$saName")" || return 1
+ local flag=$flag
+
+ arDoc="$(gen3_awsrole_ar_policy "$saName" "$namespace" "$flag")" || return 1
+
gen3 workon default "${rolename}_role"
gen3 cd
cat << EOF > config.tfvars
@@ -132,7 +188,7 @@ EOF
}
#
-# Util for applying tfplan
+# Util for applying tfplan
#
_tfapply_role() {
local rolename=$1
@@ -170,6 +226,15 @@ gen3_awsrole_create() {
gen3_log_err "use: gen3 awsrole create roleName saName"
return 1
fi
+ if [[ -z $1 ]] || [[ $1 == -* ]]; then
+ namespace=$(gen3 db namespace)
+ else
+ namespace=$1
+ shift
+ fi
+ if [[ ! -z $1 ]]; then
+ flag=$1
+ fi
# do simple validation of name
local regexp="^[a-z][a-z0-9\-]*$"
if [[ ! $rolename =~ $regexp ]];then
@@ -183,6 +248,7 @@ EOF
return 1
fi
+
# check if the name is already used by another entity
local entity_type
entity_type=$(_get_entity_type $rolename)
@@ -190,7 +256,7 @@ EOF
# That name is already used.
if [[ "$entity_type" =~ role ]]; then
gen3_log_info "A role with that name already exists"
- gen3_awsrole_sa_annotate "$saName" "$rolename"
+ gen3_awsrole_sa_annotate "$saName" "$rolename" "$namespace"
return $?
else
gen3_log_err "A $entity_type with that name already exists"
@@ -199,14 +265,16 @@ EOF
fi
TF_IN_AUTOMATION="true"
- if ! _tfplan_role $rolename $saName; then
+
+ if ! _tfplan_role $rolename $saName $namespace $flag; then
return 1
fi
+
if ! _tfapply_role $rolename; then
return 1
fi
- gen3_awsrole_sa_annotate "$saName" "$rolename"
+ gen3_awsrole_sa_annotate "$saName" "$rolename" "$namespace"
}
#
@@ -350,4 +418,4 @@ gen3_awsrole() {
# Let testsuite source file
if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
gen3_awsrole "$@"
-fi
+fi
\ No newline at end of file
diff --git a/gen3/bin/create-es7-cluster.sh b/gen3/bin/create-es7-cluster.sh
new file mode 100644
index 000000000..553dc2652
--- /dev/null
+++ b/gen3/bin/create-es7-cluster.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+# Save the new and old cluster names to vars
+environment=`gen3 api environment`
+existing_cluster_name="$environment-gen3-metadata"
+new_cluster_name="$environment-gen3-metadata-2"
+
+# Gather existing cluster information
+cluster_info=$(aws es describe-elasticsearch-domain --domain-name "$existing_cluster_name")
+
+# Extract relevant information from the existing cluster
+instance_type=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceType'`
+instance_count=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceCount'`
+volume_type=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeType'`
+volume_size=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeSize'`
+vpc_name=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.VPCId'`
+subnet_ids=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SubnetIds[]'`
+security_groups=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SecurityGroupIds[]'`
+access_policies=`echo "$cluster_info" | jq -r '.DomainStatus.AccessPolicies'`
+kms_key_id=`echo "$cluster_info" | jq -r '.DomainStatus.EncryptionAtRestOptions.KmsKeyId'`
+
+# Check if the new Elasticsearch cluster name already exists
+new_cluster=`aws es describe-elasticsearch-domain --domain-name "$new_cluster_name"`
+
+if [ -n "$new_cluster" ]; then
+ echo "Cluster $new_cluster_name already exists"
+else
+ echo "Cluster does not exist- creating..."
+ # Create the new Elasticsearch cluster
+ aws es create-elasticsearch-domain \
+ --domain-name "$new_cluster_name" \
+ --elasticsearch-version "7.10" \
+ --elasticsearch-cluster-config \
+ "InstanceType=$instance_type,InstanceCount=$instance_count" \
+ --ebs-options \
+ "EBSEnabled=true,VolumeType=$volume_type,VolumeSize=$volume_size" \
+ --vpc-options "SubnetIds=${subnet_ids[*]},SecurityGroupIds=${security_groups[*]}" \
+ --access-policies "$access_policies" \
+ --encryption-at-rest-options "Enabled=true,KmsKeyId=$kms_key_id"\
+ --node-to-node-encryption-options "Enabled=true"
+ > /dev/null 2>&1
+
+ # Wait for the new cluster to be available
+ sleep_duration=60
+ max_retries=10
+ retry_count=0
+
+ while [ $retry_count -lt $max_retries ]; do
+ cluster_status=$(aws es describe-elasticsearch-domain --domain-name "$new_cluster_name" | jq -r '.DomainStatus.Processing')
+ if [ "$cluster_status" != "true" ]; then
+ echo "New cluster is available."
+ break
+ fi
+ sleep $sleep_duration
+ ((retry_count++))
+ done
+
+ if [ $retry_count -eq $max_retries ]; then
+ echo "New cluster creation may still be in progress. Please check the AWS Management Console for the status."
+ fi
+fi
diff --git a/gen3/bin/db.sh b/gen3/bin/db.sh
index 5e1c2c30a..63995fc0e 100644
--- a/gen3/bin/db.sh
+++ b/gen3/bin/db.sh
@@ -33,6 +33,7 @@ gen3_db_farm_json() {
#
gen3_db_reset() {
local serviceName
+ local force
if [[ $# -lt 1 || -z "$1" ]]; then
gen3_log_err "gen3_db_reset" "must specify serviceName"
return 1
@@ -43,6 +44,8 @@ gen3_db_reset() {
gen3_log_err "gen3_db_reset" "may not reset peregrine - only sheepdog"
return 1
fi
+ shift
+ force=$1
# connect as the admin user for the db server associated with the service
local credsTemp="$(mktemp "$XDG_RUNTIME_DIR/credsTemp.json_XXXXXX")"
@@ -81,6 +84,11 @@ gen3_db_reset() {
fi
local result
+ if [[ $force == "--force" ]]; then
+ gen3_log_warn "--force flag applied - Dropping all connections to the db before dropping"
+ echo "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname='${dbname}' AND pid <> pg_backend_pid();" | gen3 psql "$serverName"
+ result=$?
+ fi
echo "DROP DATABASE \"${dbname}\"; CREATE DATABASE \"${dbname}\"; GRANT ALL ON DATABASE \"$dbname\" TO \"$username\" WITH GRANT OPTION;" | gen3 psql "$serverName"
result=$?
if [[ "$serviceName" == "sheepdog" ]]; then
diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh
new file mode 100644
index 000000000..eb9611a90
--- /dev/null
+++ b/gen3/bin/dbbackup.sh
@@ -0,0 +1,212 @@
+#!/bin/bash
+
+####################################################################################################
+# Script: dbdump.sh
+#
+# Description:
+# This script facilitates the management of database backups within the gen3 environment. It is
+# equipped to establish policies, service accounts, roles, and S3 buckets. Depending on the
+# command provided, it will either initiate a database dump or perform a restore.
+#
+# Usage:
+# gen3 dbbackup [dump|restore]
+#
+# dump - Initiates a database dump, creating the essential AWS resources if they are absent.
+# The dump operation is intended to be executed from the namespace/commons that requires
+# the backup.
+# restore - Initiates a database restore, creating the essential AWS resources if they are absent.
+# The restore operation is meant to be executed in the target namespace, where the backup
+# needs to be restored.
+#
+# Notes:
+# This script extensively utilizes the AWS CLI and the gen3 CLI. Proper functioning demands a
+# configured gen3 environment and the availability of the necessary CLI tools.
+#
+####################################################################################################
+
+# Exit on error
+#set -e
+
+# Print commands before executing
+#set -x
+
+#trap 'echo "Error at Line $LINENO"' ERR
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+policy_name="bucket_reader_writer_gen3_db_backup"
+account_id=$(aws sts get-caller-identity --query "Account" --output text)
+vpc_name="$(gen3 api environment)"
+namespace="$(gen3 db namespace)"
+sa_name="dbbackup-sa"
+bucket_name="gen3-db-backups-${account_id}"
+
+gen3_log_info "policy_name: $policy_name"
+gen3_log_info "account_id: $account_id"
+gen3_log_info "vpc_name: $vpc_name"
+gen3_log_info "namespace: $namespace"
+gen3_log_info "sa_name: $sa_name"
+gen3_log_info "bucket_name: $bucket_name"
+
+
+# Create an S3 access policy if it doesn't exist
+create_policy() {
+ # Check if policy exists
+ if ! aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep -q "arn:aws:iam"; then
+ # Create the S3 access policy - policy document
+ access_policy=$(cat <<-EOM
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:CreateBucket",
+ "s3:PutObject",
+ "s3:GetObject",
+ "s3:ListBucket",
+ "s3:DeleteObject"
+ ],
+ "Resource": [
+ "arn:aws:s3:::gen3-db-backups-*"
+ ]
+ }
+ ]
+}
+EOM
+ )
+
+ # Create the S3 access policy from the policy document
+ policy_arn=$(aws iam create-policy --policy-name "$policy_name" --policy-document "$access_policy" --query "Policy.Arn" --output text)
+ gen3_log_info "policy_arn: $policy_arn"
+ else
+ gen3_log_info "Policy $policy_name already exists, skipping policy creation."
+ policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep "arn:aws:iam" | head -n 1)
+ gen3_log_info "policy_arn: $policy_arn"
+ fi
+}
+
+
+# Create or update the Service Account and its corresponding IAM Role
+create_service_account_and_role() {
+ cluster_arn=$(kubectl config current-context)
+ eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}')
+ oidc_url=$(aws eks describe-cluster --name $eks_cluster --query 'cluster.identity.oidc.issuer' --output text | sed -e 's/^https:\/\///')
+ role_name="${vpc_name}-${namespace}-${sa_name}-role"
+ role_arn="arn:aws:iam::${account_id}:role/${role_name}"
+ local trust_policy=$(mktemp -p "$XDG_RUNTIME_DIR" "tmp_policy.XXXXXX")
+ gen3_log_info "trust_policy: $trust_policy"
+ gen3_log_info "eks_cluster: $eks_cluster"
+ gen3_log_info "oidc_url: $oidc_url"
+ gen3_log_info "role_name: $role_name"
+
+
+ cat > ${trust_policy} <&1; then
+ gen3_log_info "Updating existing role: $role_name"
+ aws iam update-assume-role-policy --role-name $role_name --policy-document "file://$trust_policy"
+ else
+ gen3_log_info "Creating new role: $role_name"
+ aws iam create-role --role-name $role_name --assume-role-policy-document "file://$trust_policy"
+ fi
+
+ # Attach the policy to the IAM role
+ aws iam attach-role-policy --role-name $role_name --policy-arn $policy_arn
+
+ # Create the Kubernetes service account if it doesn't exist
+ if ! kubectl get serviceaccount -n $namespace $sa_name 2>&1; then
+ kubectl create serviceaccount -n $namespace $sa_name
+ fi
+ # Annotate the KSA with the IAM role ARN
+ gen3_log_info "Annotating Service Account with IAM role ARN"
+ kubectl annotate serviceaccount -n ${namespace} ${sa_name} eks.amazonaws.com/role-arn=${role_arn} --overwrite
+
+}
+
+# Create an S3 bucket if it doesn't exist
+create_s3_bucket() {
+ # Check if bucket already exists
+ if aws s3 ls "s3://$bucket_name" 2>&1 | grep -q 'NoSuchBucket'; then
+ gen3_log_info "Bucket does not exist, creating..."
+ aws s3 mb "s3://$bucket_name"
+ else
+ gen3_log_info "Bucket $bucket_name already exists, skipping bucket creation."
+ fi
+}
+
+
+# Function to trigger the database backup job
+db_dump() {
+ gen3 job run psql-db-prep-dump
+}
+
+
+# Function to trigger the database backup restore job
+db_restore() {
+ gen3 job run psql-db-prep-restore
+}
+
+va_testing_db_dump() {
+ gen3 job run psql-db-dump-va-testing
+}
+
+
+# main function to determine whether dump or restore
+main() {
+ case "$1" in
+ dump)
+ gen3_log_info "Triggering database dump..."
+ create_policy
+ create_service_account_and_role
+ create_s3_bucket
+ db_dump
+ ;;
+ restore)
+ gen3_log_info "Triggering database restore..."
+ create_policy
+ create_service_account_and_role
+ create_s3_bucket
+ db_restore
+ ;;
+ va-dump)
+ gen3_log_info "Running a va-testing DB dump..."
+ create_policy
+ create_service_account_and_role
+ create_s3_bucket
+ va_testing_db_dump
+ ;;
+ *)
+ echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump]"
+ return 1
+ ;;
+ esac
+}
+
+main "$1"
diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh
index 6ff1c31a1..36af791ef 100644
--- a/gen3/bin/ecr.sh
+++ b/gen3/bin/ecr.sh
@@ -30,6 +30,10 @@ accountList=(
895962626746
980870151884
205252583234
+885078588865
+922467707295
+533267425233
+048463324059
)
principalStr=""
@@ -69,18 +73,34 @@ ecrReg="707767160287.dkr.ecr.us-east-1.amazonaws.com"
# lib -------------------------------
gen3_ecr_login() {
- if gen3_time_since ecr-login is 36000; then
+ if [[ -S /var/run/docker.sock ]]; then
+ if gen3_time_since ecr-login is 36000; then
# re-authenticate every 10 hours
- aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "707767160287.dkr.ecr.us-east-1.amazonaws.com" 1>&2 || exit 1
+ aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "707767160287.dkr.ecr.us-east-1.amazonaws.com" 1>&2 || exit 1
+ fi
+ elif [[ -S /var/run/containerd/containerd.sock ]]; then
+ gen3_log_info "Containerd found, logging in during each ctr command"
+ loginCommand="-u AWS:$(aws ecr get-login-password --region us-east-1)"
+ else
+ gen3_log_err "No container runtime found. Exiting"
+ exit 1
fi
}
gen3_quay_login() {
if [[ -f ~/Gen3Secrets/quay/login ]]; then
- if gen3_time_since quay-login is 36000; then
- cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io
+ if [[ -S /var/run/docker.sock ]]; then
+ if gen3_time_since quay-login is 36000; then
+ cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io
+ fi
+ elif [[ -S /var/run/containerd/containerd.sock ]]; then
+ gen3_log_info "Containerd found, logging in during each ctr command"
+ loginCommand="-u \"cdis+gen3\":\"$(cat ~/Gen3Secrets/quay/login)\""
+ else
+ gen3_log_err "No container runtime found. Exiting"
+ exit 1
fi
- else
+ else
gen3_log_err "Place credentials for the quay robot account (cdis+gen3) in this file ~/Gen3Secrets/quay/login"
exit 1
fi
@@ -95,7 +115,8 @@ gen3_quay_login() {
gen3_ecr_copy_image() {
local srcTag="$1"
local destTag="$2"
- if [[ "$destTag" == *"quay.io"* ]]; then
+ loginCommand=""
+ if [[ "$destTag" == *"quay.io"* ]]; then
gen3_quay_login || return 1
else
gen3_ecr_login || return 1
@@ -106,12 +127,23 @@ gen3_ecr_copy_image() {
fi
shift
shift
- (docker pull "$srcTag" && \
- docker tag "$srcTag" "$destTag" && \
- docker push "$destTag"
- ) || return 1
+ if [[ -S /var/run/docker.sock ]]; then
+ (docker pull "$srcTag" && \
+ docker tag "$srcTag" "$destTag" && \
+ docker push "$destTag"
+ ) || return 1
+ docker image rm "$srcTag" "$destTag"
+ elif [[ -S /var/run/containerd/containerd.sock ]]; then
+ (ctr image pull "$srcTag" --all-platforms $loginCommand && \
+ ctr image tag "$srcTag" "$destTag" && \
+ ctr image push "$destTag" $loginCommand
+ ) || return 1
+ ctr image rm "$srcTag" "$destTag"
+ else
+ gen3_log_err "No container runtime found. Exiting"
+ exit 1
+ fi
# save disk space
- docker image rm "$srcTag" "$destTag"
return 0
}
@@ -176,7 +208,7 @@ gen3_ecr_update_all() {
echo $repoList
for repo in $repoList; do
gen3_ecr_update_policy $repo
- done
+ done
}
# Check if the Quay image exists in ECR repository
@@ -201,7 +233,7 @@ gen3_ecr_describe_image() {
# @param repoName
gen3_ecr_create_repo() {
local repoName="gen3/$1"
- aws ecr create-repository --repository-name ${repoName} --image-scanning-configuration scanOnPush=true
+ aws ecr create-repository --repository-name ${repoName} --image-scanning-configuration scanOnPush=true
}
diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh
index a3b7824dc..bc0358499 100644
--- a/gen3/bin/gitops.sh
+++ b/gen3/bin/gitops.sh
@@ -291,9 +291,15 @@ gen3_gitops_sync() {
if g3kubectl get configmap manifest-versions; then
oldJson=$(g3kubectl get configmap manifest-versions -o=json | jq ".data")
fi
- newJson=$(g3k_config_lookup ".versions")
echo "old JSON is: $oldJson"
- echo "new JSON is: $newJson"
+ newJson=$(g3k_config_lookup ".versions")
+ # Make sure the script exits if newJSON contains invalid JSON
+ if [ $? -ne 0 ]; then
+ echo "Error: g3k_config_lookup command failed- invalid JSON"
+ exit 1
+ else
+ echo "new JSON is: $newJson"
+ fi
if [[ -z $newJson ]]; then
echo "Manifest does not have versions section. Unable to get new versions, skipping version update."
elif [[ -z $oldJson ]]; then
@@ -439,8 +445,13 @@ gen3_gitops_sync() {
echo "DRYRUN flag detected, not rolling"
gen3_log_info "dict_roll: $dict_roll; versions_roll: $versions_roll; portal_roll: $portal_roll; etl_roll: $etl_roll; fence_roll: $fence_roll"
else
- if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("fence_roll" = true) ]]; then
+ if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("$fence_roll" = true) ]]; then
echo "changes detected, rolling"
+ tmpHostname=$(gen3 api hostname)
+ if [[ $slack = true ]]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"Gitops-sync Cron: Changes detected on ${tmpHostname} - rolling...\"}" "${slackWebHook}"
+ fi
+
# run etl job before roll all so guppy can pick up changes
if [[ "$etl_roll" = true ]]; then
gen3 update_config etl-mapping "$(gen3 gitops folder)/etlMapping.yaml"
@@ -466,7 +477,6 @@ gen3_gitops_sync() {
rollRes=$?
# send result to slack
if [[ $slack = true ]]; then
- tmpHostname=$(gen3 api hostname)
resStr="SUCCESS"
color="#1FFF00"
if [[ $rollRes != 0 ]]; then
@@ -1020,6 +1030,12 @@ gen3_roll_path() {
local templatePath
cleanName="${depName%[-_]deploy*}"
serviceName="${cleanName/-canary/}"
+ # roll the correct root frontend service
+ frontend_root="$(g3k_config_lookup ".global.frontend_root" "$manifestPath")"
+ if [[ ($serviceName == "frontend-framework" && $frontend_root == "gen3ff") || ($serviceName == "portal" && $frontend_root != "gen3ff") ]]; then
+ cleanName="$cleanName-root"
+ fi
+
templatePath="${GEN3_HOME}/kube/services/${serviceName}/${cleanName}-deploy.yaml"
if [[ -n "$deployVersion" && "$deployVersion" != null ]]; then
templatePath="${GEN3_HOME}/kube/services/${serviceName}/${cleanName}-deploy-${deployVersion}.yaml"
diff --git a/gen3/bin/healthcheck.sh b/gen3/bin/healthcheck.sh
index b2973aa04..b658ff033 100644
--- a/gen3/bin/healthcheck.sh
+++ b/gen3/bin/healthcheck.sh
@@ -47,7 +47,7 @@ gen3_healthcheck() {
# refer to k8s api docs for pod status info
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#podstatus-v1-core
gen3_log_info "Getting all pods..."
-
+
local allPods=$(g3kubectl get pods --all-namespaces -o json | \
jq -r '[
.items[] | {
@@ -117,7 +117,7 @@ gen3_healthcheck() {
if [[ "$statusCode" -lt 200 || "$statusCode" -ge 400 ]]; then
internetAccess=false
fi
-
+
# check internet access with explicit proxy
gen3_log_info "Checking explicit proxy internet access..."
local http_proxy="http://cloud-proxy.internal.io:3128"
@@ -137,6 +137,10 @@ gen3_healthcheck() {
internetAccessExplicitProxy=false
fi
+ gen3_log_info "Clearing Evicted pods"
+ sleep 5
+ clear_evicted_pods
+
local healthJson=$(cat - < /dev/null; then
gen3_log_err "failed to assemble valid json data: $healthJson"
return 1
@@ -205,4 +209,8 @@ EOM
fi
}
+clear_evicted_pods() {
+ g3kubectl get pods -A -o json | jq '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | "kubectl delete pods \(.metadata.name) -n \(.metadata.namespace)"' | xargs -n 1 bash -c 2> /dev/null || true
+}
+
gen3_healthcheck "$@"
diff --git a/gen3/bin/iam-serviceaccount.sh b/gen3/bin/iam-serviceaccount.sh
index 0c5a8bba3..1ea055f66 100644
--- a/gen3/bin/iam-serviceaccount.sh
+++ b/gen3/bin/iam-serviceaccount.sh
@@ -115,7 +115,7 @@ EOF
# @return the resulting json from awscli
##
function create_role(){
- local role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role"
+ local role_name="${1}"
if [[ ${#role_name} -gt 63 ]]; then
role_name=$(echo "$role_name" | head -c63)
gen3_log_warning "Role name has been truncated, due to amazon role name 64 character limit. New role name is $role_name"
@@ -123,8 +123,8 @@ function create_role(){
local assume_role_policy_path="$(create_assume_role_policy)"
gen3_log_info "Entering create_role"
- gen3_log_info " ${role_name}"
- gen3_log_info " ${assume_role_policy_path}"
+ gen3_log_info " Role: ${role_name}"
+ gen3_log_info " Policy path: ${assume_role_policy_path}"
local role_json
role_json=$(aws iam create-role \
@@ -156,8 +156,8 @@ function add_policy_to_role(){
local role_name="${2}"
gen3_log_info "Entering add_policy_to_role"
- gen3_log_info " ${policy}"
- gen3_log_info " ${role_name}"
+ gen3_log_info " Policy: ${policy}"
+ gen3_log_info " Role: ${role_name}"
local result
if [[ ${policy} =~ arn:aws:iam::aws:policy/[a-zA-Z0-9]+ ]]
@@ -198,8 +198,8 @@ function create_role_with_policy() {
local role_name="${2}"
gen3_log_info "Entering create_role_with_policy"
- gen3_log_info " ${policy}"
- gen3_log_info " ${role_name}"
+ gen3_log_info " Policy: ${policy}"
+ gen3_log_info " Role: ${role_name}"
local created_role_json
created_role_json="$(create_role ${role_name})" || return $?
@@ -357,7 +357,10 @@ function main() {
local policy_validation
local policy_source
- local role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role"
+ local role_name=$ROLE_NAME
+ if [ -z "${role_name}" ]; then
+ role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role"
+ fi
if [ -z ${NAMESPACE_SCRIPT} ];
then
@@ -481,6 +484,12 @@ while getopts "$OPTSPEC" optchar; do
ACTION="c"
SERVICE_ACCOUNT_NAME=${OPTARG#*=}
;;
+ role-name)
+ ROLE_NAME="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
+ ;;
+ role-name=*)
+ ROLE_NAME=${OPTARG#*=}
+ ;;
list)
ACTION="l"
SERVICE_ACCOUNT_NAME="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 ))
diff --git a/gen3/bin/job.sh b/gen3/bin/job.sh
index 4a1c03542..09d305957 100644
--- a/gen3/bin/job.sh
+++ b/gen3/bin/job.sh
@@ -60,7 +60,7 @@ g3k_job2cronjson(){
local cronScript="$(cat - < /dev/null; then
echo "$name"
if [[ "$command" == "kill" ]]; then
- gen3_log_info "try to kill pod $name in $jnamespace"
- g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2
+ pod_creation=$(date -d $(g3kubectl get pod "$name" -n "$jnamespace" -o jsonpath='{.metadata.creationTimestamp}') +%s)
+ current_time=$(date +%s)
+ age=$((current_time - pod_creation))
+
+ # potential workspaces to be reaped for inactivity must be at least 60 minutes old
+ if ((age >= 3600)); then
+ gen3_log_info "try to kill pod $name in $jnamespace"
+ g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2
+ fi
fi
else
gen3_log_info "$clusterName not in $(cat $tempClusterFile)"
diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh
index ab22ce07b..744e8e288 100644
--- a/gen3/bin/kube-roll-all.sh
+++ b/gen3/bin/kube-roll-all.sh
@@ -51,20 +51,20 @@ fi
gen3 kube-setup-networkpolicy disable
#
-# Hopefull core secrets/config in place - start bringing up services
+# Hopefully core secrets/config in place - start bringing up services
#
-if g3k_manifest_lookup .versions.indexd 2> /dev/null; then
- gen3 kube-setup-indexd &
-else
- gen3_log_info "no manifest entry for indexd"
-fi
-
if g3k_manifest_lookup .versions.arborist 2> /dev/null; then
gen3 kube-setup-arborist || gen3_log_err "arborist setup failed?"
else
gen3_log_info "no manifest entry for arborist"
fi
+if g3k_manifest_lookup .versions.indexd 2> /dev/null; then
+ gen3 kube-setup-indexd &
+else
+ gen3_log_info "no manifest entry for indexd"
+fi
+
if g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then
gen3 kube-setup-audit-service
else
@@ -213,7 +213,7 @@ else
gen3_log_info "not deploying requestor - no manifest entry for .versions.requestor"
fi
-gen3 kube-setup-metadata &
+gen3 kube-setup-metadata
if g3k_manifest_lookup .versions.ssjdispatcher 2>&1 /dev/null; then
gen3 kube-setup-ssjdispatcher &
@@ -243,18 +243,50 @@ else
gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'"
fi
-gen3 kube-setup-revproxy &
+if g3k_manifest_lookup '.versions["gen3-discovery-ai"]' 2> /dev/null; then
+ gen3 kube-setup-gen3-discovery-ai &
+else
+ gen3_log_info "not deploying gen3-discovery-ai - no manifest entry for '.versions[\"gen3-discovery-ai\"]'"
+fi
+
+if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then
+ gen3 kube-setup-ohdsi &
+else
+ gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'"
+fi
+
+if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then
+ gen3 kube-setup-cohort-middleware
+else
+ gen3_log_info "not deploying cohort-middleware - no manifest entry for .versions[\"cohort-middleware\"]"
+fi
+
+gen3 kube-setup-revproxy
if [[ "$GEN3_ROLL_FAST" != "true" ]]; then
+ if g3k_manifest_lookup .global.argocd 2> /dev/null; then
+ gen3 kube-setup-prometheus
+ fi
# Internal k8s systems
gen3 kube-setup-fluentd &
- gen3 kube-setup-autoscaler &
- gen3 kube-setup-kube-dns-autoscaler &
+ # If there is an entry for karpenter in the manifest setup karpenter
+ if g3k_manifest_lookup .global.karpenter 2> /dev/null; then
+ if [[ "$(g3k_manifest_lookup .global.karpenter)" != "arm" ]]; then
+ gen3 kube-setup-karpenter deploy &
+ else
+ gen3 kube-setup-karpenter deploy --arm &
+ fi
+ # Otherwise, setup the cluster autoscaler
+ else
+ gen3 kube-setup-autoscaler &
+ fi
+ #gen3 kube-setup-kube-dns-autoscaler &
gen3 kube-setup-metrics deploy || true
gen3 kube-setup-tiller || true
#
gen3 kube-setup-networkpolicy disable &
gen3 kube-setup-networkpolicy &
+ gen3 kube-setup-pdb
else
gen3_log_info "roll fast mode - skipping k8s base services and netpolicy setup"
fi
@@ -302,6 +334,24 @@ else
gen3_log_info "not deploying frontend-framework - no manifest entry for '.versions[\"frontend-framework\"]'"
fi
+if g3k_manifest_lookup '.versions["cedar-wrapper"]' 2> /dev/null; then
+ gen3 kube-setup-cedar-wrapper &
+else
+ gen3_log_info "not deploying cedar-wrapper - no manifest entry for '.versions[\"cedar-wrapper\"]'"
+fi
+
+if g3k_manifest_lookup '.versions["kayako-wrapper"]' 2> /dev/null; then
+ gen3 kube-setup-kayako-wrapper &
+else
+ gen3_log_info "not deploying kayako-wrapper - no manifest entry for '.versions[\"kayako-wrapper\"]'"
+fi
+
+if g3k_manifest_lookup '.versions["argo-wrapper"]' 2> /dev/null; then
+ gen3 kube-setup-argo-wrapper &
+else
+ gen3_log_info "not deploying argo-wrapper - no manifest entry for '.versions[\"argo-wrapper\"]'"
+fi
+
gen3_log_info "enable network policy"
gen3 kube-setup-networkpolicy "enable" || true &
diff --git a/gen3/bin/kube-setup-access-backend.sh b/gen3/bin/kube-setup-access-backend.sh
index bbb3ae663..60d4758c5 100644
--- a/gen3/bin/kube-setup-access-backend.sh
+++ b/gen3/bin/kube-setup-access-backend.sh
@@ -210,8 +210,10 @@ authz:
- /programs/tutorial
- /programs/open_access
role_ids:
- - reader
- - storage_reader
+ - guppy_reader
+ - fence_reader
+ - peregrine_reader
+ - sheepdog_reader
- description: full access to indexd API
id: indexd_admin
resource_paths:
@@ -226,18 +228,22 @@ authz:
- /programs/open_access
role_ids:
- creator
- - reader
+ - guppy_reader
+ - fence_reader
+ - peregrine_reader
+ - sheepdog_reader
- updater
- deleter
- storage_writer
- - storage_reader
- description: ''
id: all_programs_reader
resource_paths:
- /programs
role_ids:
- - reader
- - storage_reader
+ - guppy_reader
+ - fence_reader
+ - peregrine_reader
+ - sheepdog_reader
- id: 'all_programs_writer'
description: ''
role_ids:
@@ -328,12 +334,37 @@ authz:
service: '*'
id: creator
- description: ''
- id: reader
+ id: guppy_reader
permissions:
- action:
method: read
- service: '*'
- id: reader
+ service: 'guppy'
+ id: guppy_reader
+ - description: ''
+ id: fence_reader
+ permissions:
+ - action:
+ method: read
+ service: 'fence'
+ id: fence_reader
+ - action:
+ method: read-storage
+ service: 'fence'
+ id: fence_storage_reader
+ - description: ''
+ id: peregrine_reader
+ permissions:
+ - action:
+ method: read
+ service: 'peregrine'
+ id: peregrine_reader
+ - description: ''
+ id: sheepdog_reader
+ permissions:
+ - action:
+ method: read
+ service: 'sheepdog'
+ id: sheepdog_reader
- description: ''
id: updater
permissions:
@@ -355,13 +386,6 @@ authz:
method: write-storage
service: '*'
id: storage_creator
- - description: ''
- id: storage_reader
- permissions:
- - action:
- method: read-storage
- service: '*'
- id: storage_reader
- id: mds_user
permissions:
- action:
diff --git a/gen3/bin/kube-setup-ambassador.sh b/gen3/bin/kube-setup-ambassador.sh
index 0f4e0be28..5f92af5cc 100644
--- a/gen3/bin/kube-setup-ambassador.sh
+++ b/gen3/bin/kube-setup-ambassador.sh
@@ -25,7 +25,6 @@ deploy_api_gateway() {
return 0
fi
gen3 roll ambassador-gen3
- g3k_kv_filter "${GEN3_HOME}/kube/services/ambassador-gen3/ambassador-gen3-service-elb.yaml" GEN3_ARN "$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')" | g3kubectl apply -f -
local luaYamlTemp="$(mktemp "$XDG_RUNTIME_DIR/lua.yaml.XXXXXX")"
cat - > "$luaYamlTemp" < /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client $fence_client --urls https://${hostname}/guac/guacamole/#/ --username guacamole --auto-approve --public --external --allowed-scopes openid profile email user | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', None) ]]; then
+ gen3_log_err "kube-setup-apache-guacamole" "Failed generating oidc client for guacamole: $secrets"
+ return 1
+ fi
+ fi
+ local FENCE_CLIENT_ID="${BASH_REMATCH[2]}"
+ local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}"
+ gen3_log_info "create guacamole-secret"
+ mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/guacamole"
+
+ cat - < /dev/null 2>&1; then
+ local credsPath="$(gen3_secrets_folder)/g3auto/guacamole/appcreds.json"
+ if [ -f "$credsPath" ]; then
+ gen3 secrets sync
+ return 0
+ fi
+ mkdir -p "$(dirname "$credsPath")"
+ if ! new_client > "$credsPath"; then
+ gen3_log_err "Failed to setup guacamole fence client"
+ rm "$credsPath" || true
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+
+ if ! g3kubectl describe secret guacamole-g3auto | grep dbcreds.json > /dev/null 2>&1; then
+ gen3_log_info "create database"
+ if ! gen3 db setup guacamole; then
+ gen3_log_err "Failed setting up database for guacamole service"
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+}
+
+setup_secrets() {
+ # guacamole-secrets.yaml populate and apply.
+ gen3_log_info "Deploying secrets for guacamole"
+ # subshell
+
+ (
+ if ! dbcreds="$(gen3 db creds guacamole)"; then
+ gen3_log_err "unable to find db creds for guacamole service"
+ return 1
+ fi
+
+ if ! appcreds="$(gen3 secrets decode guacamole-g3auto appcreds.json)"; then
+ gen3_log_err "unable to find app creds for guacamole service"
+ return 1
+ fi
+
+ local hostname=$(gen3 api hostname)
+ export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
+ export DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
+ export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds")
+ export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
+
+ export FENCE_URL="https://${hostname}/user/user"
+ export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration"
+ export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds")
+ export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds")
+
+ export OPENID_AUTHORIZATION_ENDPOINT="https://${hostname}/user/oauth2/authorize"
+ export OPENID_JWKS_ENDPOINT="https://${hostname}/user/.well-known/jwks"
+ export OPENID_REDIRECT_URI="https://${hostname}/guac/guacamole/#/"
+ export OPENID_ISSUER="https://${hostname}/user"
+ export OPENID_USERNAME_CLAIM_TYPE="sub"
+ export OPENID_SCOPE="openid profile email"
+
+ envsubst <"${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-configmap.yaml" | g3kubectl apply -f -
+ envsubst <"${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-secret.yaml" | g3kubectl apply -f -
+ )
+}
+
+# main --------------------------------------
+if [[ $# -gt 0 && "$1" == "new-client" ]]; then
+ new_client
+ exit $?
+fi
+
+setup_creds
+
+setup_secrets
+
+gen3 roll apache-guacamole
+g3kubectl apply -f "${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-service.yaml"
+
+cat < /dev/null 2>&1; then
+ gen3_log_info "Creating argo-events namespace, as it was not found"
+ kubectl create namespace argo-events
+fi
+
+# Check if target configmap exists
+if ! kubectl get configmap environment -n argo-events > /dev/null 2>&1; then
+
+ # Get value from source configmap
+ VALUE=$(kubectl get configmap global -n default -o jsonpath="{.data.environment}")
+
+ # Create target configmap
+ kubectl create configmap environment -n argo-events --from-literal=environment=$VALUE
+
+fi
+
+if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then
+ if (! helm status argo -n argo-events > /dev/null 2>&1 ) || [[ "$force" == true ]]; then
+ helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2)
+ helm repo update 2> >(grep -v 'This is insecure' >&2)
+ helm upgrade --install argo-events argo/argo-events -n argo-events --version "2.1.3"
+ else
+ gen3_log_info "argo-events Helm chart already installed. To force reinstall, run with --force"
+ fi
+
+ if kubectl get statefulset eventbus-default-stan -n argo-events >/dev/null 2>&1; then
+ gen3_log_info "Detected eventbus installation. To reinstall, please delete the eventbus first. You will need to delete any EventSource and Sensors currently in use"
+ else
+ kubectl apply -f ${GEN3_HOME}/kube/services/argo-events/eventbus.yaml
+ fi
+else
+ gen3_log_info "Not running in default namespace, will not install argo-events helm chart. This behavior can be overwritten with the --override-namespace flag"
+fi
+
+if [[ "$create_workflow_resources" == true ]]; then
+ for file in ${GEN3_HOME}/kube/services/argo-events/workflows/*.yaml; do
+ kubectl apply -f "$file"
+ done
+
+ #Creating rolebindings to allow Argo Events to create jobs, and allow those jobs to manage Karpenter resources
+ kubectl create rolebinding argo-events-job-admin-binding --role=job-admin --serviceaccount=argo-events:default --namespace=argo-events
+ kubectl create clusterrolebinding karpenter-admin-binding --clusterrole=karpenter-admin --serviceaccount=argo-events:default
+ kubectl create clusterrolebinding argo-workflows-view-binding --clusterrole=argo-argo-workflows-view --serviceaccount=argo-events:default
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-argo-wrapper.sh b/gen3/bin/kube-setup-argo-wrapper.sh
index 5727a703e..9f7cc52ce 100644
--- a/gen3/bin/kube-setup-argo-wrapper.sh
+++ b/gen3/bin/kube-setup-argo-wrapper.sh
@@ -18,6 +18,26 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
gen3 roll argo-wrapper
g3kubectl apply -f "${GEN3_HOME}/kube/services/argo-wrapper/argo-wrapper-service.yaml"
+
+
+ if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then
+ export ARGO_HOST=$(g3k_manifest_lookup .argo.argo_server_service_url)
+ else
+ export ARGO_HOST="http://argo-argo-workflows-server.argo.svc.cluster.local:2746"
+ fi
+
+ if g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json 2> /dev/null; then
+ export ARGO_NAMESPACE=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
+ else
+ export ARGO_NAMESPACE="argo"
+ fi
+
+ envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini
+
+ g3kubectl delete configmap argo-wrapper-namespace-config
+ g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini
+
+ rm /tmp/config.ini
gen3_log_info "the argo-wrapper service has been deployed onto the kubernetes cluster"
-fi
\ No newline at end of file
+fi
diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh
index e95f216fe..4c6c55eee 100644
--- a/gen3/bin/kube-setup-argo.sh
+++ b/gen3/bin/kube-setup-argo.sh
@@ -5,36 +5,55 @@ source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
gen3_load "gen3/lib/kube-setup-init"
+override_namespace=false
+force=false
+
+for arg in "${@}"; do
+ if [ "$arg" == "--override-namespace" ]; then
+ override_namespace=true
+ elif [ "$arg" == "--force" ]; then
+ force=true
+ else
+ #Print usage info and exit
+ gen3_log_info "Usage: gen3 kube-setup-argo [--override-namespace] [--force]"
+ exit 1
+ fi
+done
ctx="$(g3kubectl config current-context)"
ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+argo_namespace=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
function setup_argo_buckets {
local accountNumber
local environment
local policyFile="$XDG_RUNTIME_DIR/policy_$$.json"
+ local bucketLifecyclePolicyFile="$XDG_RUNTIME_DIR/bucket_lifecycle_policy_$$.json"
if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then
gen3_log_err "could not determine account numer"
return 1
fi
- if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then
+ if ! environment="$(g3k_environment)"; then
gen3_log_err "could not determine environment from manifest-global - bailing out of argo setup"
return 1
fi
# try to come up with a unique but composable bucket name
bucketName="gen3-argo-${accountNumber}-${environment//_/-}"
- userName="gen3-argo-${environment//_/-}-user"
- if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then
- if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then
+ nameSpace="$(gen3 db namespace)"
+ roleName="gen3-argo-${environment//_/-}-role"
+ bucketPolicy="argo-bucket-policy-${nameSpace}"
+ internalBucketPolicy="argo-internal-bucket-policy-${nameSpace}"
+ if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."downloadable-s3-bucket"') ]]; then
+ if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then
gen3_log_info "Using S3 bucket found in manifest: ${bucketName}"
- bucketName=$(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
+ bucketName=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
else
gen3_log_info "Using S3 bucket found in manifest: ${bucketName}"
- bucketName=$(g3k_config_lookup '.argo."s3-bucket"')
+ bucketName=$(g3k_config_lookup '.argo."downloadable-s3-bucket"')
fi
fi
if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then
@@ -98,65 +117,70 @@ EOF
]
}
EOF
- if ! secret="$(g3kubectl get secret argo-s3-creds -n argo 2> /dev/null)"; then
- gen3_log_info "setting up bucket $bucketName"
-
- if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then
- gen3_log_info "${bucketName} s3 bucket already exists"
- # continue on ...
- elif ! aws s3 mb "s3://${bucketName}"; then
- gen3_log_err "failed to create bucket ${bucketName}"
- fi
-
-
- gen3_log_info "Creating IAM user ${userName}"
- if ! aws iam get-user --user-name ${userName} > /dev/null 2>&1; then
- aws iam create-user --user-name ${userName}
- else
- gen3_log_info "IAM user ${userName} already exits.."
- fi
-
- secret=$(aws iam create-access-key --user-name ${userName})
- if ! g3kubectl get namespace argo > /dev/null 2>&1; then
- gen3_log_info "Creating argo namespace"
- g3kubectl create namespace argo
- g3kubectl label namespace argo app=argo
- g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo
- fi
- else
- # Else we want to recreate the argo-s3-creds secret so make a temp file with the current creds and delete argo-s3-creds secret
- gen3_log_info "Argo S3 setup already completed"
- local secretFile="$XDG_RUNTIME_DIR/temp_key_file_$$.json"
- cat > "$secretFile" < "$bucketLifecyclePolicyFile" < /dev/null 2>&1; then
+ gen3_log_info "${bucketName} s3 bucket already exists"
+ # continue on ...
+ elif ! aws s3 mb "s3://${bucketName}"; then
+ gen3_log_err "failed to create bucket ${bucketName}"
fi
-
-
- gen3_log_info "Creating s3 creds secret in argo namespace"
- if [[ -z $internalBucketName ]]; then
- g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName}
+ if ! g3kubectl get namespace argo > /dev/null 2>&1; then
+ gen3_log_info "Creating argo namespace"
+ g3kubectl create namespace argo || true
+ g3kubectl label namespace argo app=argo || true
+ # Grant admin access within the argo namespace to the default SA in the argo namespace
+ g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n $argo_namespace || true
+ fi
+ gen3_log_info "Creating IAM role ${roleName}"
+ if aws iam get-role --role-name "${roleName}" > /dev/null 2>&1; then
+ gen3_log_info "IAM role ${roleName} already exists.."
+ roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text)
+ gen3_log_info "Role annotate"
+ g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace
+ g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $nameSpace
else
- g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName}
+ gen3 awsrole create $roleName argo $nameSpace -all_namespaces
+ roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text)
+ g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace
fi
+ # Grant admin access within the current namespace to the argo SA in the current namespace
+ g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$nameSpace:argo -n $nameSpace || true
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile || true
+ if [[ -z $internalBucketName ]]; then
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile || true
+ fi
## if new bucket then do the following
# Get the aws keys from secret
+ # Create and attach lifecycle policy
# Set bucket policies
# Update secret to have new bucket
+ gen3_log_info "Creating bucket lifecycle policy"
+ aws s3api put-bucket-lifecycle --bucket ${bucketName} --lifecycle-configuration file://$bucketLifecyclePolicyFile
+
# Always update the policy, in case manifest buckets change
- aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile
if [[ ! -z $internalBucketPolicyFile ]]; then
- aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile
+ aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile
fi
if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo.indexd_admin_user') ]]; then
if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then
@@ -168,39 +192,53 @@ EOF
for serviceName in indexd; do
secretName="${serviceName}-creds"
# Only delete if secret is found to prevent early exits
- if [[ ! -z $(g3kubectl get secrets -n argo | grep $secretName) ]]; then
- g3kubectl delete secret "$secretName" -n argo > /dev/null 2>&1
+ if [[ ! -z $(g3kubectl get secrets -n $argo_namespace | grep $secretName) ]]; then
+ g3kubectl delete secret "$secretName" -n $argo_namespace > /dev/null 2>&1
fi
done
sleep 1 # I think delete is async - give backend a second to finish
indexdFencePassword=$(cat $(gen3_secrets_folder)/creds.json | jq -r .indexd.user_db.$indexd_admin_user)
- g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n argo
+ g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n $argo_namespace
fi
}
function setup_argo_db() {
- if ! secret="$(g3kubectl get secret argo-db-creds -n argo 2> /dev/null)"; then
+ if ! secret="$(g3kubectl get secret argo-db-creds -n $argo_namespace 2> /dev/null)"; then
gen3_log_info "Setting up argo db persistence"
gen3 db setup argo || true
dbCreds=$(gen3 secrets decode argo-g3auto dbcreds.json)
- g3kubectl create secret -n argo generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database)
+ g3kubectl create secret -n $argo_namespace generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database)
else
gen3_log_info "Argo DB setup already completed"
fi
}
+function setup_argo_template_secret() {
+ gen3_log_info "Started the template secret process"
+ downloadable_bucket_name=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json)
+ # Check if the secret already exists
+ if [[ ! -z $(g3kubectl get secret argo-template-values-secret -n $argo_namespace) ]]; then
+ gen3_log_info "Argo template values secret already exists, assuming it's stale and deleting"
+ g3kubectl delete secret argo-template-values-secret -n $argo_namespace
+ fi
+ gen3_log_info "Creating argo template values secret"
+ g3kubectl create secret generic argo-template-values-secret --from-literal=DOWNLOADABLE_BUCKET=$downloadable_bucket_name -n $argo_namespace
+}
+
+setup_argo_buckets
# only do this if we are running in the default namespace
-if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
- setup_argo_buckets
+if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then
setup_argo_db
- if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then
- DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d)
- DBNAME=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_database | base64 -d)
- if [[ -z $(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d) ]]; then
- BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.bucketname | base64 -d)
+ setup_argo_template_secret
+ if (! helm status argo -n $argo_namespace > /dev/null 2>&1 ) || [[ "$force" == true ]]; then
+ DBHOST=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_host | base64 -d)
+ DBNAME=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_database | base64 -d)
+ if [[ -z $internalBucketName ]]; then
+ BUCKET=$bucketName
else
- BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d)
+ BUCKET=$internalBucketName
fi
+
valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
valuesTemplate="${GEN3_HOME}/kube/services/argo/values.yaml"
@@ -208,10 +246,10 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
- helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile}
+ helm upgrade --install argo argo/argo-workflows -n $argo_namespace -f ${valuesFile} --version 0.29.1
else
gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy"
fi
else
gen3_log_info "kube-setup-argo exiting - only deploys from default namespace"
-fi
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-argocd.sh b/gen3/bin/kube-setup-argocd.sh
new file mode 100644
index 000000000..4a9ac0f74
--- /dev/null
+++ b/gen3/bin/kube-setup-argocd.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Deploy the argocd
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+if g3kubectl get namespace argocd > /dev/null 2>&1;
+then
+ gen3_log_info "ArgoCD is already deployed. Skipping..."
+else
+ kubectl create namespace argocd
+ kubectl label namespace argocd app="argocd"
+ kubectl annotate namespace argocd app="argocd"
+ helm repo add argo https://argoproj.github.io/argo-helm
+ helm upgrade --install argocd -f "$GEN3_HOME/kube/services/argocd/values.yaml" argo/argo-cd -n argocd
+ gen3 kube-setup-revproxy
+ export argocdsecret=`kubectl get secret argocd-initial-admin-secret -n argocd -o json | jq .data.password -r | base64 -d` # pragma: allowlist secret
+ gen3_log_info "You can now access the ArgoCD endpoint with the following credentials: Username= admin and Password= $argocdsecret"
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh
index aa3fa5f9e..92c70f352 100644
--- a/gen3/bin/kube-setup-audit-service.sh
+++ b/gen3/bin/kube-setup-audit-service.sh
@@ -20,8 +20,8 @@ setup_database_and_config() {
fi
# Setup config file that audit-service consumes
- if [[ ! -f "$secretsFolder/audit-service-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
- local secretsFolder="$(gen3_secrets_folder)/g3auto/audit"
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/audit"
+ if [[ ! -f "$secretsFolder/audit-service-config.yaml" ]]; then
if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
if ! gen3 db setup audit; then
gen3_log_err "Failed setting up database for audit-service"
@@ -60,14 +60,12 @@ DB_USER: $(jq -r .db_username < "$secretsFolder/dbcreds.json")
DB_PASSWORD: $(jq -r .db_password < "$secretsFolder/dbcreds.json")
DB_DATABASE: $(jq -r .db_database < "$secretsFolder/dbcreds.json")
EOM
- # make it easy for nginx to get the Authorization header ...
- # echo -n "gateway:$password" | base64 > "$secretsFolder/base64Authz.txt"
fi
gen3 secrets sync 'setup audit-g3auto secrets'
}
setup_audit_sqs() {
- local sqsName="$(gen3 api safe-name audit-sqs)"
+ local sqsName="audit-sqs"
sqsInfo="$(gen3 sqs create-queue-if-not-exist $sqsName)" || exit 1
sqsUrl="$(jq -e -r '.["url"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; }
sqsArn="$(jq -e -r '.["arn"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; }
diff --git a/gen3/bin/kube-setup-aurora-monitoring.sh b/gen3/bin/kube-setup-aurora-monitoring.sh
new file mode 100644
index 000000000..5029a87ca
--- /dev/null
+++ b/gen3/bin/kube-setup-aurora-monitoring.sh
@@ -0,0 +1,167 @@
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+databaseArray=()
+databaseFarmArray=()
+
+# This function is going to retrieve and return all the top-level entries from creds.json, that has the db items we want.
+# This way, we can use this information while we're creating schemas and the like
+get_all_dbs() {
+ databases=$(jq 'to_entries[] | select (.value.db_password) | .key' $(gen3_secrets_folder)/creds.json)
+
+ OLD_IFS=$IFS
+ IFS=$'\n' databaseArray=($databases)
+ IFS=$OLD_IFS
+}
+
+get_all_dbs_db_farm() {
+ databases=$(jq 'to_entries[] | .key' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json)
+
+ OLD_IFS=$IFS
+ IFS=$'\n' databaseFarmArray=($databases)
+ IFS=$OLD_IFS
+}
+
+create_new_datadog_user() {
+ # Generate a new password for the datadog user in psql
+ datadogPsqlPassword=$(random_alphanumeric)
+
+ # update creds.json
+ if [ ! -d "$(gen3_secrets_folder)/datadog" ]
+ then
+ mkdir "$(gen3_secrets_folder)/datadog"
+ fi
+
+ if [ ! -s "$(gen3_secrets_folder)/datadog/datadog_db_users" ]
+ then
+ echo "{}" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json"
+ fi
+
+ output=$(jq --arg host "$1" --arg password "$datadogPsqlPassword" '.[$host].datadog_db_password=$password' "$(gen3_secrets_folder)/datadog/datadog_db_users.json")
+ echo "$output" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json"
+
+ username=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+ password=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+
+ # Create the Datadog user in the database
+ if PGPASSWORD=$password psql -h "$1" -U "$username" -c "SELECT 1 FROM pg_roles WHERE rolname='datadog'" | grep -q 1;
+ then
+ PGPASSWORD=$password psql -h "$1" -U "$username" -c "ALTER USER datadog WITH password '$datadogPsqlPassword';"
+ else
+ PGPASSWORD=$password psql -h "$1" -U "$username" -c "CREATE USER datadog WITH password '$datadogPsqlPassword';"
+ fi
+
+ echo $datadogPsqlPassword
+}
+
+get_datadog_db_password() {
+ # Create the Datadog user
+ datadogPsqlPassword="$(jq --arg host "$1" '.[$host].datadog_db_password' < $(gen3_secrets_folder)/datadog/datadog_db_users.json)"
+ if [[ -z "$datadogPsqlPassword" ]]
+ then
+ datadogPsqlPassword=$(create_new_datadog_user $1)
+ fi
+
+ echo $datadogPsqlPassword
+}
+
+create_schema_and_function() {
+ svc=$(echo $1 | tr -d '"')
+ host=$(jq --arg service "$svc" '.[$service].db_host' $(gen3_secrets_folder)/creds.json | tr -d '"')
+ database=$(jq --arg service "$svc" '.[$service].db_database' $(gen3_secrets_folder)/creds.json | tr -d '"')
+
+ username=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+ password=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"')
+
+ ddPass=$(get_datadog_db_password $host)
+
+ PGPASSWORD=$password psql -h $host -U $username -d $database -t < /dev/null
+then
+ gen3_log_info "We detected an ArgoCD application named 'datadog-application,' so we're modifying that"
+
+ patch=$(yq -n --yaml-output --arg confd "$confd" '.spec.source.helm.values = $confd')
+
+ echo "$patch" > /tmp/confd.yaml
+
+ kubectl patch applications.argoproj.io datadog-application --type merge -n argocd --patch-file /tmp/confd.yaml
+
+else
+ gen3_log_info "We didn't detect an ArgoCD application named 'datadog-application,' so we're going to reinstall the DD Helm chart"
+
+ (cat kube/services/datadog/values.yaml | yq --arg endpoints "$postgresString" --yaml-output '.clusterAgent.confd."postgres.yaml" = $endpoints | .clusterChecksRunner.enabled = true') > $(gen3_secrets_folder)/datadog/datadog_values.yaml
+ helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2)
+ helm repo update 2> >(grep -v 'This is insecure' >&2)
+ helm upgrade --install datadog -f "$(gen3_secrets_folder)/datadog/datadog_values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2)
+fi
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh b/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh
new file mode 100644
index 000000000..5bf4df8b7
--- /dev/null
+++ b/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Set the resources block for the deployment
+kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"limits":{"cpu":"6","memory":"30Gi"},"requests":{"cpu":"1","memory":"4Gi"}}}]'
+
+# Add options to the command for the container, if they are not already present
+if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scale-down-delay-after-delete=2m'; then
+ kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scale-down-delay-after-delete=2m"}]'
+else
+ echo "Flag --scale-down-delay-after-delete=2m already present"
+fi
+
+if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scale-down-unneeded-time=2m'; then
+ kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scale-down-unneeded-time=2m"}]'
+else
+ echo "Flag --scale-down-unneeded-time=2m already present"
+fi
+
+if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scan-interval=60s'; then
+ kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scan-interval=60s"}]'
+else
+ echo "Flag --scan-interval=60s already present"
+fi
+
+# Add PriorityClass to the pod
+kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/priorityClassName", "value": "system-node-critical"}]'
diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh
index 01a6cdd95..8aeff8b5b 100644
--- a/gen3/bin/kube-setup-autoscaler.sh
+++ b/gen3/bin/kube-setup-autoscaler.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# cluster-autoscaler allow a kubernetes cluste scale out or in depending on the
+# cluster-autoscaler allow a kubernetes cluste scale out or in depending on the
# specification set in deployment. It'll talk to the ASG where the worker nodes are
# and send a signal to add or remove instances based upon requirements.
#
@@ -11,6 +11,9 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
if [[ -n "$JENKINS_HOME" ]]; then
echo "Jenkins skipping fluentd setup: $JENKINS_HOME"
exit 0
@@ -30,6 +33,9 @@ function get_autoscaler_version(){
local casv
case ${k8s_version} in
+ "1.22+")
+ casv="v1.22.2"
+ ;;
"1.21+")
casv="v1.21.2"
;;
@@ -66,34 +72,52 @@ function get_autoscaler_version(){
function deploy() {
+ if [["$ctxNamespace" == "default" || "$ctxNamespace" == "null"]]; then
+ if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1 || "${FORCE}" == true); then
+ if ! [ -z ${CAS_VERSION} ];
+ then
+ casv=${CAS_VERSION}
+ else
+ casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler
+ fi
+ echo "Deploying cluster autoscaler ${casv} in ${vpc_name}"
+ g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" apply -f -
+ else
+ echo "kube-setup-autoscaler exiting - cluster-autoscaler already deployed, use --force to redeploy"
+ fi
+ fi
+}
+
+function remove() {
- if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1) || [[ "$FORCE" == true ]]; then
+ if ( g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1); then
if ! [ -z ${CAS_VERSION} ];
then
casv=${CAS_VERSION}
else
casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler
fi
- echo "Deploying cluster autoscaler ${casv} in ${vpc_name}"
- g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" apply -f -
+ echo "Removing cluster autoscaler ${casv} in ${vpc_name}"
+ g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" delete -f -
else
- echo "kube-setup-autoscaler exiting - cluster-autoscaler already deployed, use --force to redeploy"
+ echo "kube-setup-autoscaler exiting - cluster-autoscaler not deployed"
fi
}
function HELP(){
- echo "Usage: $SCRIPT [-v] [-f] "
+ echo "Usage: $SCRIPT [-v] [-f] [-r]"
echo "Options:"
echo "No option is mandatory, however you can provide the following:"
echo " -v num --version num --create=num Cluster autoscaler version number"
echo " -f --force Force and update if it is already installed"
+ echo " -r --remove remove deployment if already installed"
}
#echo $(get_autoscaler_version)
-OPTSPEC="hfv:-:"
+OPTSPEC="hfvr:-:"
while getopts "$OPTSPEC" optchar; do
case "${optchar}" in
-)
@@ -107,6 +131,10 @@ while getopts "$OPTSPEC" optchar; do
version=*)
CAS_VERSION=${OPTARG#*=}
;;
+ remove)
+ remove
+ exit 0
+ ;;
*)
if [ "$OPTERR" = 1 ] && [ "${OPTSPEC:0:1}" != ":" ]; then
echo "Unknown option --${OPTARG}" >&2
@@ -121,6 +149,10 @@ while getopts "$OPTSPEC" optchar; do
v)
CAS_VERSION=${OPTARG}
;;
+ r)
+ remove
+ exit 0
+ ;;
*)
if [ "$OPTERR" != 1 ] || [ "${OPTSPEC:0:1}" = ":" ]; then
echo "Non-option argument: '-${OPTARG}'" >&2
@@ -131,4 +163,4 @@ while getopts "$OPTSPEC" optchar; do
esac
done
-deploy
+deploy
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh
index d3aafcedc..986c5bf05 100644
--- a/gen3/bin/kube-setup-aws-es-proxy.sh
+++ b/gen3/bin/kube-setup-aws-es-proxy.sh
@@ -8,23 +8,46 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/lib/kube-setup-init"
+# Deploy Datadog with argocd if flag is set in the manifest path
+manifestPath=$(g3k_manifest_path)
+es7="$(jq -r ".[\"global\"][\"es7\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')"
+
[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then
envname="$(gen3 api environment)"
- if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \
- && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then
- gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}"
- g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml"
- gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster."
+
+ if [ "$es7" = true ]; then
+ if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \
+ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then
+ gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml"
+ gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster."
+ else
+ #
+ # probably running in jenkins or job environment
+ # try to make sure network policy labels are up to date
+ #
+ gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up."
+ gen3 kube-setup-networkpolicy service aws-es-proxy
+ g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true
+ fi
else
- #
- # probably running in jenkins or job environment
- # try to make sure network policy labels are up to date
- #
- gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up."
- gen3 kube-setup-networkpolicy service aws-es-proxy
- g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true
+ if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \
+ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then
+ gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml"
+ gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster."
+ else
+ #
+ # probably running in jenkins or job environment
+ # try to make sure network policy labels are up to date
+ #
+ gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up."
+ gen3 kube-setup-networkpolicy service aws-es-proxy
+ g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true
+ fi
fi
gen3 job cron es-garbage '@daily'
else
diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh
new file mode 100644
index 000000000..a56bebc40
--- /dev/null
+++ b/gen3/bin/kube-setup-cedar-wrapper.sh
@@ -0,0 +1,78 @@
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+create_client_and_secret() {
+ local hostname=$(gen3 api hostname)
+ local client_name="cedar_ingest_client"
+ gen3_log_info "kube-setup-cedar-wrapper" "creating fence ${client_name} for $hostname"
+ # delete any existing fence cedar clients
+ g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client ${client_name} > /dev/null 2>&1
+ local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client ${client_name} --grant-types client_credentials | tail -1)
+ # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET')
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-cedar-wrapper" "Failed generating ${client_name}"
+ return 1
+ else
+ local client_id="${BASH_REMATCH[2]}"
+ local client_secret="${BASH_REMATCH[3]}"
+ gen3_log_info "Create cedar-client secrets file"
+ cat - < /dev/null 2>&1; then
+ local have_cedar_client_secret="1"
+ else
+ gen3_log_info "No g3auto cedar-client key present in secret"
+ fi
+
+ local client_name="cedar_ingest_client"
+ local client_list=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-list)
+ local client_count=$(echo "$client_list=" | grep -cE "'name':.*'${client_name}'")
+ gen3_log_info "CEDAR client count = ${client_count}"
+
+ if [[ -z $have_cedar_client_secret ]] || [[ ${client_count} -lt 1 ]]; then
+ gen3_log_info "Creating new cedar-ingest client and secret"
+ local credsPath="$(gen3_secrets_folder)/g3auto/cedar/${cedar_creds_file}"
+ if ! create_client_and_secret > $credsPath; then
+ gen3_log_err "Failed to setup cedar-ingest secret"
+ return 1
+ else
+ gen3 secrets sync
+ gen3 job run usersync
+ fi
+ fi
+}
+
+[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
+
+if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then
+ gen3_log_err "No cedar-g3auto secret, not rolling CEDAR wrapper"
+ return 1
+fi
+
+if [[ -n "$JENKINS_HOME" ]]; then
+ gen3_log_info "Skipping cedar-client creds setup in non-adminvm environment"
+else
+ gen3_log_info "Checking cedar-client creds"
+ setup_creds
+fi
+
+if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then
+ gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper"
+ return 1
+fi
+
+g3kubectl apply -f "${GEN3_HOME}/kube/services/cedar-wrapper/cedar-wrapper-service.yaml"
+gen3 roll cedar-wrapper
+
+gen3_log_info "The CEDAR wrapper service has been deployed onto the kubernetes cluster"
diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh
new file mode 100644
index 000000000..a6a024578
--- /dev/null
+++ b/gen3/bin/kube-setup-cohort-middleware.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+# Deploy cohort-middleware into existing commons
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+setup_secrets() {
+ gen3_log_info "Deploying secrets for cohort-middleware"
+ # subshell
+ if [[ -n "$JENKINS_HOME" ]]; then
+ gen3_log_err "skipping secrets setup in non-adminvm environment"
+ return 0
+ fi
+
+ (
+ if ! dbcreds="$(gen3 db creds ohdsi)"; then
+ gen3_log_err "unable to find db creds for ohdsi service (was Atlas deployed?)"
+ return 1
+ fi
+
+ mkdir -p $(gen3_secrets_folder)/g3auto/cohort-middleware
+ credsFile="$(gen3_secrets_folder)/g3auto/cohort-middleware/development.yaml"
+
+ if [[ (! -f "$credsFile") ]]; then
+ DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
+ export DB_NAME
+ DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
+ export DB_USER
+ DB_PASS=$(jq -r ".db_password" <<< "$dbcreds")
+ export DB_PASS
+ DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
+ export DB_HOST
+
+ cat - > "$credsFile" < /dev/null 2>&1); then
gen3_log_info "Creating namespace datadog"
g3kubectl create namespace datadog
@@ -44,7 +48,45 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
fi
helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2)
helm repo update 2> >(grep -v 'This is insecure' >&2)
- helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 2.28.13 2> >(grep -v 'This is insecure' >&2)
+ if [ "$argocd" = true ]; then
+ g3kubectl apply -f "$GEN3_HOME/kube/services/datadog/datadog-application.yaml" --namespace=argocd
+ else
+ helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2)
+ fi
+
+ # Check the manifest to see if we want to set up database monitoring
+ # Get the name of the cluster
+ # Run the command
+
+ if g3k_manifest_lookup .datadog.db_monitoring_enabled &> /dev/null; then
+ gen3_log_info "Detected that this commons is using database monitoring. Setting that up now."
+ clusters=$(aws rds describe-db-clusters --query "DBClusters[].DBClusterIdentifier" --output text)
+ clusterArray=($clusters)
+
+ for i in "${!clusterArray[@]}"; do
+ echo "$((i+1)). ${clusterArray[i]}"
+ done
+
+ selected="false"
+ selection=""
+
+ until [ $selected == "true" ]
+ do
+ read -p "Enter the number of the cluster you want to monitor (1-${#clusterArray[@]}): " num
+ if [[ "$num" =~ ^[0-9]+$ ]] && ((num >= 1 && num <= ${#clusterArray[@]})); then
+ echo "You entered: $num"
+ selected="true"
+ selection=${clusterArray[$num - 1]}
+ else
+ echo "Invalid input: $num"
+ fi
+ done
+
+ gen3 kube-setup-aurora-monitoring "$selection"
+ else
+ gen3_log_info "No database monitoring detected. We're done here."
+ fi
+
)
else
gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy"
diff --git a/gen3/bin/kube-setup-dicom-server.sh b/gen3/bin/kube-setup-dicom-server.sh
index d0f498ff1..59bcb8f39 100644
--- a/gen3/bin/kube-setup-dicom-server.sh
+++ b/gen3/bin/kube-setup-dicom-server.sh
@@ -4,8 +4,8 @@ gen3_load "gen3/gen3setup"
setup_database_and_config() {
gen3_log_info "setting up dicom-server DB and config"
- if g3kubectl describe secret dicom-server-g3auto > /dev/null 2>&1; then
- gen3_log_info "dicom-server-g3auto secret already configured"
+ if g3kubectl describe secret orthanc-g3auto > /dev/null 2>&1; then
+ gen3_log_info "orthanc-g3auto secret already configured"
return 0
fi
if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
@@ -13,8 +13,8 @@ setup_database_and_config() {
return 0
fi
- # Setup config file that dicom-server consumes
- local secretsFolder="$(gen3_secrets_folder)/g3auto/dicom-server"
+ # Setup config files that dicom-server consumes
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/orthanc"
if [[ ! -f "$secretsFolder/orthanc_config_overwrites.json" ]]; then
if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
if ! gen3 db setup orthanc; then
@@ -32,7 +32,7 @@ setup_database_and_config() {
# "SslCertificate": ""
cat - > "$secretsFolder/orthanc_config_overwrites.json" < /dev/null 2>&1; then
+ gen3_log_info "orthanc-s3-g3auto secret already configured"
+ return 0
+ fi
+ if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
+ gen3_log_err "skipping db setup in non-adminvm environment"
+ return 0
+ fi
+
+ # Setup config files that dicom-server consumes
+ local secretsFolder
+ secretsFolder="$(gen3_secrets_folder)/g3auto/orthanc-s3"
+ if [[ ! -f "$secretsFolder/orthanc_config_overwrites.json" ]]; then
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ if ! gen3 db setup orthanc-s3; then
+ gen3_log_err "Failed setting up orthanc database for dicom-server"
+ return 1
+ fi
+ fi
+
+ ref_hostname="${hostname//\./-}"
+ bucketname="${ref_hostname}-orthanc-storage"
+ awsuser="${ref_hostname}-orthanc"
+
+ if [[ ! -f "$secretsFolder/s3creds.json" ]]; then
+ gen3 s3 create "${bucketname}"
+ gen3 awsuser create "${awsuser}"
+ gen3 s3 attach-bucket-policy "${bucketname}" --read-write --user-name "${awsuser}"
+
+ user=$(gen3 secrets decode "${awsuser}"-g3auto awsusercreds.json)
+ key_id=$(jq -r .id <<< "$user")
+ access_key=$(jq -r .secret <<< "$user")
+
+ cat - > "$secretsFolder/s3creds.json" < "$secretsFolder/orthanc_config_overwrites.json" < /dev/null 2>&1; then
+ export DICOM_SERVER_URL="/dicom-server"
+ gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)"
+ fi
+
+ if g3k_manifest_lookup .versions.orthanc > /dev/null 2>&1; then
+ export DICOM_SERVER_URL="/orthanc"
+ gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)"
+ fi
+
+ envsubst <"${GEN3_HOME}/kube/services/ohif-viewer/app-config.js" > "$secretsFolder/app-config.js"
+
+ gen3 secrets sync 'setup orthanc-s3-g3auto secrets'
+}
+
+if ! setup_database_and_config; then
+ gen3_log_err "kube-setup-dicom bailing out - database/config failed setup"
+ exit 1
+fi
+
+gen3 roll orthanc
+g3kubectl apply -f "${GEN3_HOME}/kube/services/orthanc/orthanc-service.yaml"
+
+cat < /dev/null; then
+ ecrRoleArn=$(g3kubectl get configmap manifest-global -o jsonpath={.data.ecr-access-job-role-arn})
+ fi
+ if [ -z "$ecrRoleArn" ]; then
+ gen3_log_err "Missing 'global.ecr-access-job-role-arn' configuration in manifest.json"
+ return 1
+ fi
+
+ local saName="ecr-access-job-sa"
+ if ! g3kubectl get sa "$saName" > /dev/null 2>&1; then
+ tempFile="ecr-access-job-policy.json"
+ cat - > $tempFile </dev/null 2>&1; then
-# echo "fence-cleanup-expired-ga4gh-info being added as a cronjob b/c fence >= 6.0.0 or 2022.04"
-# gen3 job cron fence-cleanup-expired-ga4gh-info "*/5 * * * *"
-# fi
-#
-# # Setup visa update cronjob
-# if g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then
-# echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.04"
-# gen3 job cron fence-visa-update "30 * * * *"
-# fi
-# fi
+if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then
+ # Setup db cleanup cronjob
+ if ! g3kubectl get cronjob fence-cleanup-expired-ga4gh-info >/dev/null 2>&1; then
+ echo "fence-cleanup-expired-ga4gh-info being added as a cronjob b/c fence >= 6.0.0 or 2022.07"
+ gen3 job cron fence-cleanup-expired-ga4gh-info "*/5 * * * *"
+ fi
+
+ # Extract the value of ENABLE_VISA_UPDATE_CRON from the configmap manifest-fence (fence-config-public.yaml)
+ ENABLE_VISA_UPDATE_CRON=$(kubectl get cm manifest-fence -o=jsonpath='{.data.fence-config-public\.yaml}' | yq -r .ENABLE_VISA_UPDATE_CRON)
+
+ # Delete the fence-visa-update cronjob if ENABLE_VISA_UPDATE_CRON is set to false or not set or null in the configmap manifest-fence
+ if [[ "$ENABLE_VISA_UPDATE_CRON" == "false" ]] || [[ "$ENABLE_VISA_UPDATE_CRON" == "null" ]] || [[ -z "$ENABLE_VISA_UPDATE_CRON" ]]; then
+ echo "Deleting fence-visa-update cronjob"
+ kubectl delete cronjob fence-visa-update
+ elif [[ "$ENABLE_VISA_UPDATE_CRON" == "true" ]]; then
+ if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then
+ echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.07"
+ gen3 job cron fence-visa-update "30 * * * *"
+ fi
+ else
+ echo "ENABLE_VISA_UPDATE_CRON has an unexpected value in the configmap manifest-fence. Skipping fence-visa-update cronjob setup."
+ fi
+fi
+
+# add cronjob for removing expired OIDC clients for required fence versions
+if isServiceVersionGreaterOrEqual "fence" "6.2.0" "2023.01"; then
+ if ! g3kubectl get cronjob fence-delete-expired-clients >/dev/null 2>&1; then
+ echo "fence-delete-expired-clients being added as a cronjob b/c fence >= 6.2.0 or 2023.01"
+ gen3 job cron fence-delete-expired-clients "0 7 * * *"
+ fi
+fi
diff --git a/gen3/bin/kube-setup-fluentd.sh b/gen3/bin/kube-setup-fluentd.sh
index 81fb0d2f6..02214be9e 100644
--- a/gen3/bin/kube-setup-fluentd.sh
+++ b/gen3/bin/kube-setup-fluentd.sh
@@ -25,11 +25,11 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
export KUBECTL_NAMESPACE=logging
# lets check the the version of fluentd, and use the right configuration
- # as of 2020-05-06 the latest version is v1.10.2
- if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ];
+ # if we are using newer versions of fluentd, assume we are using containerd which needs the newer config
+ if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ];
then
fluentdConfigmap="${XDG_RUNTIME_DIR}/gen3.conf"
- cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.10.2.conf | tee ${fluentdConfigmap} > /dev/null
+ cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.15.3.conf | tee ${fluentdConfigmap} > /dev/null
gen3 update_config fluentd-gen3 "${fluentdConfigmap}"
rm ${fluentdConfigmap}
else
@@ -45,10 +45,16 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
if g3kubectl --namespace=logging get daemonset fluentd > /dev/null 2>&1; then
g3kubectl "--namespace=logging" delete daemonset fluentd
fi
- (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor`
+ if [ "${clusterversion}" = "24+" ]; then
+ (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-eks-1.24.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ else
+ (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-karpenter.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f -
+ fi
# We need this serviceaccount to be in the default namespace for the job and cronjob to properly work
g3kubectl apply -f "${GEN3_HOME}/kube/services/fluentd/fluent-jobs-serviceaccount.yaml" -n default
- if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ];
+ if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ];
then
(
unset KUBECTL_NAMESPACE
diff --git a/gen3/bin/kube-setup-gen3-discovery-ai.sh b/gen3/bin/kube-setup-gen3-discovery-ai.sh
new file mode 100644
index 000000000..44a472a74
--- /dev/null
+++ b/gen3/bin/kube-setup-gen3-discovery-ai.sh
@@ -0,0 +1,154 @@
+#!/bin/bash
+#
+# Deploy the gen3-discovery-ai service
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+# NOTE: no db for this service yet, but we'll likely need it in the future
+setup_database() {
+ gen3_log_info "setting up gen3-discovery-ai service ..."
+
+ if g3kubectl describe secret gen3-discovery-ai-g3auto > /dev/null 2>&1; then
+ gen3_log_info "gen3-discovery-ai-g3auto secret already configured"
+ return 0
+ fi
+ if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
+ gen3_log_err "skipping db setup in non-adminvm environment"
+ return 0
+ fi
+ # Setup .env file that gen3-discovery-ai service consumes
+ if [[ ! -f "$secretsFolder/gen3-discovery-ai.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai"
+
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ if ! gen3 db setup gen3-discovery-ai; then
+ gen3_log_err "Failed setting up database for gen3-discovery-ai service"
+ return 1
+ fi
+ fi
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ gen3_log_err "dbcreds not present in Gen3Secrets/"
+ return 1
+ fi
+
+ # go ahead and rotate the password whenever we regen this file
+ local password="$(gen3 random)"
+ cat - > "$secretsFolder/gen3-discovery-ai.env" < "$secretsFolder/base64Authz.txt"
+ fi
+ gen3 secrets sync 'setup gen3-discovery-ai-g3auto secrets'
+}
+
+if ! g3k_manifest_lookup '.versions."gen3-discovery-ai"' 2> /dev/null; then
+ gen3_log_info "kube-setup-gen3-discovery-ai exiting - gen3-discovery-ai service not in manifest"
+ exit 0
+fi
+
+# There's no db for this service *yet*
+#
+# if ! setup_database; then
+# gen3_log_err "kube-setup-gen3-discovery-ai bailing out - database failed setup"
+# exit 1
+# fi
+
+setup_storage() {
+ local saName="gen3-discovery-ai-sa"
+ g3kubectl create sa "$saName" > /dev/null 2>&1 || true
+
+ local secret
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai"
+
+ secret="$(g3kubectl get secret gen3-discovery-ai-g3auto -o json 2> /dev/null)"
+ local hasStorageCfg
+ hasStorageCfg=$(jq -r '.data | has("storage_config.json")' <<< "$secret")
+
+ if [ "$hasStorageCfg" = "false" ]; then
+ gen3_log_info "setting up storage for gen3-discovery-ai service"
+ #
+ # gen3-discovery-ai-g3auto secret still does not exist
+ # we need to setup an S3 bucket and IAM creds
+ # let's avoid creating multiple buckets for different
+ # deployments to the same k8s cluster (dev, etc)
+ #
+ local bucketName
+ local accountNumber
+ local environment
+
+ if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then
+ gen3_log_err "could not determine account numer"
+ return 1
+ fi
+
+ gen3_log_info "accountNumber: ${accountNumber}"
+
+ if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then
+ gen3_log_err "could not determine environment from manifest-global - bailing out of gen3-discovery-ai setup"
+ return 1
+ fi
+
+ gen3_log_info "environment: ${environment}"
+
+ # try to come up with a unique but composable bucket name
+ bucketName="gen3-discovery-ai-${accountNumber}-${environment//_/-}"
+
+ gen3_log_info "bucketName: ${bucketName}"
+
+ if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then
+ gen3_log_info "${bucketName} s3 bucket already exists - probably in use by another namespace - copy the creds from there to $(gen3_secrets_folder)/g3auto/gen3-discovery-ai"
+ # continue on ...
+ elif ! gen3 s3 create "${bucketName}"; then
+ gen3_log_err "maybe failed to create bucket ${bucketName}, but maybe not, because the terraform script is flaky"
+ fi
+
+ local hostname
+ hostname="$(gen3 api hostname)"
+ jq -r -n --arg bucket "${bucketName}" --arg hostname "${hostname}" '.bucket=$bucket | .prefix=$hostname' > "${secretsFolder}/storage_config.json"
+ gen3 secrets sync 'setup gen3-discovery-ai credentials'
+
+ local roleName
+ roleName="$(gen3 api safe-name gen3-discovery-ai)" || return 1
+
+ if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role
+ bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || return 1
+ gen3 awsrole create "$roleName" "$saName" || return 1
+ gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${roleName}"
+ # try to give the gitops role read/write permissions on the bucket
+ local gitopsRoleName
+ gitopsRoleName="$(gen3 api safe-name gitops)"
+ gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${gitopsRoleName}"
+ fi
+ fi
+
+ return 0
+}
+
+if ! setup_storage; then
+ gen3_log_err "kube-setup-gen3-discovery-ai bailing out - storage failed setup"
+ exit 1
+fi
+
+gen3_log_info "Setup complete, syncing configuration to bucket"
+
+bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || exit 1
+aws s3 sync "$(dirname $(g3k_manifest_path))/gen3-discovery-ai/knowledge" "s3://$bucketName" --delete
+
+gen3 roll gen3-discovery-ai
+g3kubectl apply -f "${GEN3_HOME}/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml"
+
+if [[ -z "$GEN3_ROLL_ALL" ]]; then
+ gen3 kube-setup-networkpolicy
+ gen3 kube-setup-revproxy
+fi
+
+gen3_log_info "The gen3-discovery-ai service has been deployed onto the kubernetes cluster"
+gen3_log_info "test with: curl https://commons-host/ai"
diff --git a/gen3/bin/kube-setup-google.sh b/gen3/bin/kube-setup-google.sh
index 31d487b85..d8bd54166 100644
--- a/gen3/bin/kube-setup-google.sh
+++ b/gen3/bin/kube-setup-google.sh
@@ -19,15 +19,14 @@ goog_launch() {
local path
# add cronjob for removing cached google access for fence versions
- # supporting Passports to DRS(
- # TODO: WILL UNCOMMENT THIS ONCE FEATURE IN FENCE IS RELEASED
-# if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.02"; then
-# filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml"
-# if [[ -f "$filePath" ]]; then
-# echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.02"
-# cronList+=("--from-file" "$filePath")
-# fi
-# fi
+ # supporting Passports to DRS
+ if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then
+ filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml"
+ if [[ -f "$filePath" ]]; then
+ echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.07"
+ cronList+=("--from-file" "$filePath")
+ fi
+ fi
for path in "${cronList[@]}"; do
gen3 job run "$path"
@@ -42,14 +41,13 @@ goog_stop() {
# add cronjob for removing cached google access for fence versions
# supporting Passports -> DRS
- # TODO: WILL UNCOMMENT THIS ONCE FEATURE IN FENCE IS RELEASED
-# if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.02"; then
-# filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml"
-# if [[ -f "$filePath" ]]; then
-# echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.02"
-# cronList+=("--from-file" "$filePath")
-# fi
-# fi
+ if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then
+ filePath="${GEN3_HOME}/kube/services/jobs/google-delete-expired-access-cronjob.yaml"
+ if [[ -f "$filePath" ]]; then
+ echo "$filePath being added as a cronjob b/c fence >= 6.0.0 or 2022.07"
+ cronList+=("--from-file" "$filePath")
+ fi
+ fi
for path in "${cronList[@]}"; do
if jobName="$(gen3 gitops filter "$path" | yq -r .metadata.name)" && [[ -n "$jobName" ]]; then
diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh
index e4d537f2f..dadbbd930 100644
--- a/gen3/bin/kube-setup-hatchery.sh
+++ b/gen3/bin/kube-setup-hatchery.sh
@@ -5,6 +5,44 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
+function CostUsagePolicy() {
+ roleName="$(gen3 api safe-name hatchery-sa)"
+ # Cost Usage Report policy
+ curPolicy="costUsageReportPolicy"
+
+ # Use the AWS CLI to list all policies attached to the role and then grep to search for the policy name
+ policyArn=$(aws iam list-role-policies --role-name "$roleName" | grep "$curPolicy")
+
+ # Check if the policy ARN variable is empty or not
+ if [ -n "$policyArn" ]; then
+ echo "Policy $curPolicy is attached to the role $roleName."
+ else
+ echo "Policy $curPolicy is NOT attached to the role $roleName."
+ echo "Attaching policy"
+ # Define the policy document
+ policyDocument='{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": "ce:GetCostAndUsage",
+ "Resource": "*"
+ }
+ ]
+ }'
+
+ # Create an inline policy for the role
+ aws iam put-role-policy --role-name "$roleName" --policy-name "$curPolicy" --policy-document "$policyDocument"
+ if [ $? -eq 0 ]; then
+ echo "Inline policy $curPolicy has been successfully created and attached to the role $roleName."
+ else
+ echo "There was an error creating the inline policy $curPolicy."
+ fi
+
+ fi
+}
+
# Jenkins friendly
export WORKSPACE="${WORKSPACE:-$HOME}"
@@ -20,11 +58,81 @@ gen3 jupyter j-namespace setup
#
(g3k_kv_filter ${GEN3_HOME}/kube/services/hatchery/serviceaccount.yaml BINDING_ONE "name: hatchery-binding1-$namespace" BINDING_TWO "name: hatchery-binding2-$namespace" CURRENT_NAMESPACE "namespace: $namespace" | g3kubectl apply -f -) || true
+function exists_or_create_gen3_license_table() {
+ # Create dynamodb table for gen3-license if it does not exist.
+ TARGET_TABLE="$1"
+ echo "Checking for dynamoDB table: ${TARGET_TABLE}"
-# cron job to distribute licenses if using Stata workspaces
-if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ];
-then
- gen3 job cron distribute-licenses '* * * * *'
+ FOUND_TABLE=`aws dynamodb list-tables | jq -r .TableNames | jq -c -r '.[]' | grep $TARGET_TABLE`
+ if [ -n "$FOUND_TABLE" ]; then
+ echo "Target table already exists in dynamoDB: $FOUND_TABLE"
+ else
+ echo "Creating table ${TARGET_TABLE}"
+ GSI=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-global-secondary-index"'`
+ if [[ -z "$GSI" || "$GSI" == "null" ]]; then
+ echo "Error: No global-secondary-index in configuration"
+ return 0
+ fi
+ aws dynamodb create-table \
+ --no-cli-pager \
+ --table-name "$TARGET_TABLE" \
+ --attribute-definitions AttributeName=itemId,AttributeType=S \
+ AttributeName=environment,AttributeType=S \
+ AttributeName=isActive,AttributeType=S \
+ --key-schema AttributeName=itemId,KeyType=HASH \
+ AttributeName=environment,KeyType=RANGE \
+ --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \
+ --global-secondary-indexes \
+ "[
+ {
+ \"IndexName\": \"$GSI\",
+ \"KeySchema\": [{\"AttributeName\":\"environment\",\"KeyType\":\"HASH\"},
+ {\"AttributeName\":\"isActive\",\"KeyType\":\"RANGE\"}],
+ \"Projection\":{
+ \"ProjectionType\":\"INCLUDE\",
+ \"NonKeyAttributes\":[\"itemId\",\"userId\",\"licenseId\",\"licenseType\"]
+ },
+ \"ProvisionedThroughput\": {
+ \"ReadCapacityUnits\": 5,
+ \"WriteCapacityUnits\": 3
+ }
+ }
+ ]"
+ fi
+}
+
+TARGET_TABLE=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-dynamodb-table"'`
+if [[ -z "$TARGET_TABLE" || "$TARGET_TABLE" == "null" ]]; then
+ echo "No gen3-license table in configuration"
+ # cron job to distribute licenses if using Stata workspaces but not using dynamoDB
+ if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ];
+ then
+ gen3 job cron distribute-licenses '* * * * *'
+ fi
+else
+ echo "Found gen3-license table in configuration: $TARGET_TABLE"
+ exists_or_create_gen3_license_table "$TARGET_TABLE"
+fi
+
+# if `nextflow-global.imagebuilder-reader-role-arn` is set in hatchery config, allow hatchery
+# to assume the configured role
+imagebuilderRoleArn=$(g3kubectl get configmap manifest-hatchery -o jsonpath={.data.nextflow-global} | jq -r '."imagebuilder-reader-role-arn"')
+assumeImageBuilderRolePolicyBlock=""
+if [ -z "$imagebuilderRoleArn" ]; then
+ gen3_log_info "No 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, not granting AssumeRole"
+else
+ gen3_log_info "Found 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, granting AssumeRole"
+ assumeImageBuilderRolePolicyBlock=$( cat < /dev/null 2>&1; then
- role_name="${vpc_name}-${saName}-role"
- gen3 awsrole create $role_name $saName
- policyName="hatchery-role-sts"
- policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hathcery to assume csoc_adminvm role in other accounts, for multi-account workspaces")
+ roleName="$(gen3 api safe-name hatchery-sa)"
+ gen3 awsrole create $roleName $saName
+ policyName="$(gen3 api safe-name hatchery-policy)"
+ policyInfo=$(gen3_aws_run aws iam create-policy --policy-name "$policyName" --policy-document "$policy" --description "Allow hatchery to assume csoc_adminvm role in other accounts and manage dynamodb for multi-account workspaces, and to create resources for nextflow workspaces")
if [ -n "$policyInfo" ]; then
- policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; }
+ policyArn="$(jq -e -r '.["Policy"].Arn' <<< "$policyInfo")" || { echo "Cannot get 'Policy.Arn' from output: $policyInfo"; return 1; }
else
- echo "Unable to create policy $policyName. Assuming it already exists and continuing"
+ echo "Unable to create policy '$policyName'. Assume it already exists and create a new version to update the permissions..."
policyArn=$(gen3_aws_run aws iam list-policies --query "Policies[?PolicyName=='$policyName'].Arn" --output text)
- fi
- gen3_log_info "Attaching policy '${policyName}' to role '${role_name}'"
- gen3 awsrole attach-policy ${policyArn} --role-name ${role_name} --force-aws-cli || exit 1
- gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${role_name} --force-aws-cli || exit 1
+ # there can only be up to 5 versions, so delete old versions (except the current default one)
+ versions="$(gen3_aws_run aws iam list-policy-versions --policy-arn $policyArn | jq -r '.Versions[] | select(.IsDefaultVersion != true) | .VersionId')"
+ versions=(${versions}) # string to array
+ for v in "${versions[@]}"; do
+ echo "Deleting old version '$v'"
+ gen3_aws_run aws iam delete-policy-version --policy-arn $policyArn --version-id $v
+ done
+
+ # create the new version
+ gen3_aws_run aws iam create-policy-version --policy-arn "$policyArn" --policy-document "$policy" --set-as-default
+ fi
+ gen3_log_info "Attaching policy '${policyName}' to role '${roleName}'"
+ gen3 awsrole attach-policy ${policyArn} --role-name ${roleName} --force-aws-cli || exit 1
+ gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1
fi
+# function to setup IAM policies for CostUsageReport
+CostUsagePolicy
+
+if [[ -f "$(gen3_secrets_folder)/prisma/apikey.json" ]]; then
+ ACCESSKEYID=$(jq -r .AccessKeyID "$(gen3_secrets_folder)/prisma/apikey.json")
+ SECRETKEY=$(jq -r .SecretKey "$(gen3_secrets_folder)/prisma/apikey.json")
+ if [[ ! -z "$ACCESSKEYID" && ! -z "$SECRETKEY" ]]; then
+ gen3_log_info "Found prisma apikey, creating kubernetes secret so hatchery can do prismacloud stuff.."
+ g3kubectl delete secret prisma-secret --ignore-not-found
+ g3kubectl create secret generic prisma-secret --from-literal=AccessKeyId=$ACCESSKEYID --from-literal=SecretKey=$SECRETKEY
+ fi
+fi
g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml"
gen3 roll hatchery
-gen3 job cron hatchery-reaper '@daily'
\ No newline at end of file
+gen3 job cron hatchery-reaper "*/5 * * * *"
diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh
new file mode 100644
index 000000000..b75470f73
--- /dev/null
+++ b/gen3/bin/kube-setup-ingress.sh
@@ -0,0 +1,354 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+gen3_load "gen3/lib/kube-setup-init"
+gen3_load "gen3/lib/g3k_manifest"
+
+# Deploy WAF if flag set in manifest
+manifestPath=$(g3k_manifest_path)
+deployWaf="$(jq -r ".[\"global\"][\"waf_enabled\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')"
+
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
+scriptDir="${GEN3_HOME}/kube/services/ingress"
+
+gen3_ingress_setup_waf() {
+ gen3_log_info "Starting GPE-312 waf setup"
+ #variable to see if WAF already exists
+ export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).Name'`
+if [[ -z $waf ]]; then
+ gen3_log_info "Creating Web ACL. This may take a few minutes."
+ aws wafv2 create-web-acl\
+ --name $vpc_name-waf \
+ --scope REGIONAL \
+ --default-action Allow={} \
+ --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=GPE-312WebAclMetrics \
+ --rules file://${GEN3_HOME}/gen3/bin/waf-rules-GPE-312.json \
+ --region us-east-1
+ #Need to sleep to avoid "WAFUnavailableEntityException" error since the waf takes a bit to spin up
+ sleep 300
+else
+ gen3_log_info "WAF already exists. Skipping..."
+fi
+ gen3_log_info "Attaching ACL to ALB."
+ export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).ARN'`
+ export alb_name=`kubectl get ingress gen3-ingress | awk '{print $4}' | tail +2 | sed 's/^\([A-Za-z0-9]*-[A-Za-z0-9]*-[A-Za-z0-9]*\).*/\1/;q'`
+ export alb_arn=`aws elbv2 describe-load-balancers --name $alb_name | yq -r .LoadBalancers[0].LoadBalancerArn`
+ export association=`aws wafv2 list-resources-for-web-acl --web-acl-arn $acl_arn | grep $alb_arn| sed -e 's/^[ \t]*//' | sed -e 's/^"//' -e 's/"$//'`
+ #variable to see if the association already exists
+ echo "acl_arn: $acl_arn"
+ echo "alb_arn: $alb_arn"
+if [[ $association != $alb_arn ]]; then
+ aws wafv2 associate-web-acl\
+ --web-acl-arn $acl_arn \
+ --resource-arn $alb_arn \
+ --region us-east-1
+
+ gen3_log_info "Add ACL arn annotation to ALB ingress"
+ kubectl annotate ingress gen3-ingress "alb.ingress.kubernetes.io/wafv2-acl-arn=$acl_arn"
+else
+ gen3_log_info "ALB is already associated with ACL. Skipping..."
+fi
+}
+
+
+gen3_ingress_setup_role() {
+# https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/
+# https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.1/docs/install/iam_policy.json
+# only do this if we are running in the default namespace
+ local saName="aws-load-balancer-controller"
+ local roleName=$(gen3 api safe-name ingress)
+ local policyName=$(gen3 api safe-name ingress-policy)
+ local ingressPolicy="$(mktemp "$XDG_RUNTIME_DIR/ingressPolicy.json_XXXXXX")"
+ local arPolicyFile="$(mktemp "$XDG_RUNTIME_DIR/arPolicy.json_XXXXXX")"
+
+ # Create an inline policy for the ingress-controller
+ cat - > "$ingressPolicy" < /dev/null; then # setup role
+ gen3_log_info "creating IAM role for ingress: $roleName, linking to sa $saName"
+ gen3 awsrole create "$roleName" "$saName" "kube-system" || return 1
+ aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2
+ gen3 awsrole sa-annotate $saName $roleName kube-system
+ else
+ # update the annotation - just to be thorough
+ gen3 awsrole sa-annotate "$saName" "$roleName" kube-system
+ fi
+}
+
+gen3_ingress_deploy_helm_chart() {
+ kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master"
+ if (! helm status aws-load-balancer-controller -n kube-system > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then
+ helm repo add eks https://aws.github.io/eks-charts 2> >(grep -v 'This is insecure' >&2)
+ helm repo update 2> >(grep -v 'This is insecure' >&2)
+
+ # # TODO: Move to values.yaml file
+ helm upgrade --install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$(gen3 api environment) --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller 2> >(grep -v 'This is insecure' >&2)
+ else
+ gen3_log_info "kube-setup-ingress exiting - ingress already deployed, use --force to redeploy"
+ fi
+}
+
+if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then
+ # Create role/SA for the alb's
+ gen3_ingress_setup_role
+ # Deploy the aws-load-balancer-controller helm chart and upgrade if --force flag applied
+ gen3_ingress_deploy_helm_chart $1
+else
+ if [[ -z $(kubectl get sa -n kube-system | grep aws-load-balancer-controller) ]]; then
+ gen3_log_err "Please run this in the default namespace first to setup the necessary roles"
+ exit 1
+ fi
+fi
+
+
+gen3_log_info "Applying ingress resource"
+export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')
+g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml"
+envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f -
+if [ "$deployWaf" = true ]; then
+ gen3_ingress_setup_waf
+fi
diff --git a/gen3/bin/kube-setup-jenkins2.sh b/gen3/bin/kube-setup-jenkins2.sh
new file mode 100644
index 000000000..f5233f978
--- /dev/null
+++ b/gen3/bin/kube-setup-jenkins2.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Just a little helper for deploying jenkins onto k8s the first time
+#
+
+set -e
+
+export WORKSPACE="${WORKSPACE:-$HOME}"
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+gen3 kube-setup-secrets
+
+#
+# Assume Jenkins should use 'jenkins' profile credentials in "${WORKSPACE}"/.aws/credentials
+#
+aws_access_key_id="$(aws configure get jenkins.aws_access_key_id)"
+aws_secret_access_key="$(aws configure get jenkins.aws_secret_access_key)"
+google_acct1_email="$(jq -r '.jenkins.google_acct1.email' < $(gen3_secrets_folder)/creds.json)"
+google_acct1_password="$(jq -r '.jenkins.google_acct1.password' < $(gen3_secrets_folder)/creds.json)"
+google_acct2_email="$(jq -r '.jenkins.google_acct2.email' < $(gen3_secrets_folder)/creds.json)"
+google_acct2_password="$(jq -r '.jenkins.google_acct2.password' < $(gen3_secrets_folder)/creds.json)"
+
+if [ -z "$aws_access_key_id" -o -z "$aws_secret_access_key" ]; then
+ gen3_log_err 'not configuring jenkins - could not extract secrets from aws configure'
+ exit 1
+fi
+if [[ -z "$google_acct1_email" || -z "$google_acct1_password" || -z "$google_acct2_email" || -z "$google_acct2_password" ]]; then
+ gen3_log_err "missing google credentials in '.jenkins' of creds.json"
+ exit 1
+fi
+
+if ! g3kubectl get secrets jenkins-secret > /dev/null 2>&1; then
+ # make it easy to rerun kube-setup-jenkins.sh
+ g3kubectl create secret generic jenkins-secret "--from-literal=aws_access_key_id=$aws_access_key_id" "--from-literal=aws_secret_access_key=$aws_secret_access_key"
+fi
+if ! g3kubectl get secrets google-acct1 > /dev/null 2>&1; then
+ g3kubectl create secret generic google-acct1 "--from-literal=email=${google_acct1_email}" "--from-literal=password=${google_acct1_password}"
+fi
+if ! g3kubectl get secrets google-acct2 > /dev/null 2>&1; then
+ g3kubectl create secret generic google-acct2 "--from-literal=email=${google_acct2_email}" "--from-literal=password=${google_acct2_password}"
+fi
+
+if ! g3kubectl get storageclass gp2 > /dev/null 2>&1; then
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/10storageclass.yaml"
+fi
+if ! g3kubectl get persistentvolumeclaim datadir-jenkins > /dev/null 2>&1; then
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/00pvc.yaml"
+fi
+
+# Note: jenkins service account is configured by `kube-setup-roles`
+gen3 kube-setup-roles
+# Note: only the 'default' namespace jenkins-service account gets a cluster rolebinding
+g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/clusterrolebinding-devops.yaml"
+
+# Note: requires Jenkins entry in cdis-manifest
+gen3 roll jenkins2
+gen3 roll jenkins2-worker
+gen3 roll jenkins2-ci-worker
+
+#
+# Get the ARN of the SSL certificate for the commons -
+# We'll optimistically assume it's a wildcard cert that
+# is appropriate to also attach to the jenkins ELB
+#
+export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')
+if [[ ! -z $ARN ]]; then
+ envsubst <"${GEN3_HOME}/kube/services/jenkins/jenkins-service.yaml" | g3kubectl apply -f -
+else
+ gen3_log_info "Global configmap not configured - not launching service (require SSL cert ARN)"
+fi
diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh
new file mode 100644
index 000000000..0a743f7ed
--- /dev/null
+++ b/gen3/bin/kube-setup-karpenter.sh
@@ -0,0 +1,270 @@
+#!/bin/bash
+
+#set -e
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
+gen3_deploy_karpenter() {
+ # Only do cluster level changes in the default namespace to prevent conflicts
+ if [[ ("$ctxNamespace" == "default" || "$ctxNamespace" == "null") ]]; then
+ gen3_log_info "Deploying karpenter"
+ # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy
+ if [[ ( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) ]]; then
+ gen3_log_info "Ensuring that the spot instance service linked role is setup"
+ # Ensure the spot instance service linked role is setup
+ # It is required for running spot instances
+ #### Uncomment this when we fix the sqs helper to allow for usage by more than one service
+ #gen3_create_karpenter_sqs_eventbridge
+ aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true
+ if g3k_config_lookup .global.karpenter_version; then
+ karpenter=$(g3k_config_lookup .global.karpenter_version)
+ fi
+ export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor`
+ if [ "${clusterversion}" = "28+" ]; then
+ karpenter=${karpenter:-v0.32.9}
+ elif [ "${clusterversion}" = "25+" ]; then
+ karpenter=${karpenter:-v0.27.0}
+ elif [ "${clusterversion}" = "24+" ]; then
+ karpenter=${karpenter:-v0.24.0}
+ else
+ karpenter=${karpenter:-v0.32.9}
+ fi
+ local queue_name="$(gen3 api safe-name karpenter-sqs)"
+ echo '{
+ "Statement": [
+ {
+ "Action": [
+ "ssm:GetParameter",
+ "iam:PassRole",
+ "iam:*InstanceProfile",
+ "ec2:DescribeImages",
+ "ec2:RunInstances",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeLaunchTemplates",
+ "ec2:DescribeInstances",
+ "ec2:DescribeInstanceTypes",
+ "ec2:DescribeInstanceTypeOfferings",
+ "ec2:DescribeAvailabilityZones",
+ "ec2:DeleteLaunchTemplate",
+ "ec2:CreateTags",
+ "ec2:CreateLaunchTemplate",
+ "ec2:CreateFleet",
+ "ec2:DescribeSpotPriceHistory",
+ "pricing:GetProducts"
+ ],
+ "Effect": "Allow",
+ "Resource": "*",
+ "Sid": "Karpenter"
+ },
+ {
+ "Action": [
+ "sqs:DeleteMessage",
+ "sqs:GetQueueAttributes",
+ "sqs:GetQueueUrl",
+ "sqs:ReceiveMessage"
+ ],
+ "Effect": "Allow",
+ "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-'$(echo vpc_name)'",
+ "Sid": "Karpenter2"
+ },
+ {
+ "Action": "ec2:TerminateInstances",
+ "Condition": {
+ "StringLike": {
+ "ec2:ResourceTag/Name": "*karpenter*"
+ }
+ },
+ "Effect": "Allow",
+ "Resource": "*",
+ "Sid": "ConditionalEC2Termination"
+ },
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": [
+ "kms:*"
+ ],
+ "Resource": "*"
+ }
+ ],
+ "Version": "2012-10-17"
+ }' > $XDG_RUNTIME_DIR/controller-policy.json
+
+ gen3_log_info "Creating karpenter namespace"
+ g3kubectl create namespace karpenter 2> /dev/null || true
+
+ gen3_log_info "Creating karpenter AWS role and k8s service accounts"
+ gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true
+ gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter || true
+ # Have to delete SA because helm chart will create the SA and there will be a conflict
+
+ gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict"
+ #g3kubectl delete sa karpenter -n karpenter
+
+ gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true"
+ aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true
+ gen3_log_info "Need to tag the subnets/sg's so that karpenter can discover them automatically"
+ # Need to tag the subnets/sg's so that karpenter can discover them automatically
+ subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text)
+ # Will apprend secondary CIDR block subnets to be tagged as well, and if none are found then will not append anything to list
+ subnets+=" $(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_secondary_cidr_subnet_*' --query 'Subnets[].SubnetId' --output text)"
+ security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) || true
+ security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) || true
+ security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) || true
+ cluster_endpoint="$(aws eks describe-cluster --name ${vpc_name} --query "cluster.endpoint" --output text)"
+
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} || true
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} || true
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} || true
+ aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-workflow" --resources ${security_groups_workflow} || true
+ echo '{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Condition": {
+ "ArnLike": {
+ "aws:SourceArn": "arn:aws:eks:us-east-1:'$(aws sts get-caller-identity --output text --query "Account")':fargateprofile/'$(echo $vpc_name)'/*"
+ }
+ },
+ "Principal": {
+ "Service": "eks-fargate-pods.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }' > $XDG_RUNTIME_DIR/fargate-policy.json
+ aws iam create-role --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} --assume-role-policy-document file://"$XDG_RUNTIME_DIR/fargate-policy.json" || true
+ aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} || true
+ # Wait for IAM changes to take effect
+ sleep 15
+ aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true
+ gen3_log_info "Installing karpenter using helm"
+ helm template karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version ${karpenter} --namespace "karpenter" | g3kubectl apply -f -
+ helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \
+ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \
+ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \
+ --set settings.aws.clusterName=${vpc_name} \
+ --set settings.aws.interruptionQueueName="${queue_name}" \
+ --set serviceAccount.name=karpenter \
+ --set serviceAccount.create=false \
+ --set controller.env[0].name=AWS_REGION \
+ --set controller.env[0].value=us-east-1 \
+ --set controller.resources.requests.memory="2Gi" \
+ --set controller.resources.requests.cpu="2" \
+ --set controller.resources.limits.memory="2Gi" \
+ --set controller.resources.limits.cpu="2"
+ fi
+ gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter
+ gen3_log_info "Remove cluster-autoscaler"
+ gen3 kube-setup-autoscaler --remove
+ # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues
+ gen3 kube-setup-fluentd --force
+ gen3_update_karpenter_configs
+ fi
+}
+
+gen3_update_karpenter_configs() {
+ # depoloy node templates and provisioners if not set in manifest
+ if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then
+ gen3_log_info "karpenter manifest found, skipping node template and provisioner deployment"
+ # apply each manifest in the karpenter folder
+ for manifest in $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter/*.yaml; do
+ g3k_kv_filter $manifest VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ done
+ else
+ gen3_log_info "Adding node templates for karpenter"
+ g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateWorkflow.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f -
+ if [[ $ARM ]]; then
+ gen3_log_info "Deploy binfmt daemonset so the emulation tools run on arm nodes"
+ # Deploy binfmt daemonset so the emulation tools run on arm nodes
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/binfmt.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerArm.yaml
+ else
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerDefault.yaml
+ fi
+ if [[ $GPU ]]; then
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerGPU.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerGPUShared.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/nodeTemplateGPU.yaml
+ helm repo add nvdp https://nvidia.github.io/k8s-device-plugin
+ helm repo update
+ helm upgrade -i nvdp nvdp/nvidia-device-plugin \
+ --namespace nvidia-device-plugin \
+ --create-namespace -f ${GEN3_HOME}/kube/services/karpenter/nvdp.yaml
+ fi
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerJupyter.yaml
+ g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerWorkflow.yaml
+ fi
+}
+
+gen3_create_karpenter_sqs_eventbridge() {
+ local queue_name="$(gen3 api safe-name karpenter-sqs)"
+ local eventbridge_rule_name="karpenter-eventbridge-${vpc_name}"
+ gen3 sqs create-queue-if-not-exist karpenter-sqs >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json"
+ local queue_url=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.url')
+ local queue_arn=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.arn')
+ # Create eventbridge rules
+ aws events put-rule --name "Karpenter-${vpc_name}-SpotInterruptionRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Spot Instance Interruption Warning"]}' 2> /dev/null
+ aws events put-rule --name "Karpenter-${vpc_name}-RebalanceRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Instance Rebalance Recommendation"]}' 2> /dev/null
+ aws events put-rule --name "Karpenter-${vpc_name}-ScheduledChangeRule" --event-pattern '{"source": ["aws.health"], "detail-type": ["AWS Health Event"]}' 2> /dev/null
+ aws events put-rule --name "Karpenter-${vpc_name}-InstanceStateChangeRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Instance State-change Notification"]}' 2> /dev/null
+ # Add SQS as a target for the eventbridge rules
+ aws events put-targets --rule "Karpenter-${vpc_name}-SpotInterruptionRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws events put-targets --rule "Karpenter-${vpc_name}-RebalanceRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws events put-targets --rule "Karpenter-${vpc_name}-ScheduledChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws events put-targets --rule "Karpenter-${vpc_name}-InstanceStateChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true
+ aws sqs set-queue-attributes --queue-url "${queue_url}" --attributes "Policy"="$(aws sqs get-queue-attributes --queue-url "${queue_url}" --attribute-names "Policy" --query "Attributes.Policy" --output text | jq -r '.Statement += [{"Sid": "AllowKarpenter", "Effect": "Allow", "Principal": {"Service": ["sqs.amazonaws.com","events.amazonaws.com"]}, "Action": "sqs:SendMessage", "Resource": "'${queue_arn}'"}]')" 2> /dev/null || true
+ #g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/karpenter-global-settings.yaml SQS_NAME ${queue_name} | g3kubectl apply -f -
+}
+
+gen3_remove_karpenter() {
+ aws iam delete-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-name "karpenter-controller-policy" 1>&2 || true
+ aws iam delete-role --role-name "karpenter-controller-role-$vpc_name"
+ helm uninstall karpenter -n karpenter
+ g3kubectl delete namespace karpenter
+ gen3 kube-setup-autoscaler
+}
+
+#---------- main
+
+if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
+ # Support sourcing this file for test suite
+ command="$1"
+ shift
+ case "$command" in
+ "deploy")
+ for flag in $@; do
+ if [[ $# -gt 0 ]]; then
+ flag="$1"
+ shift
+ fi
+ case "$flag" in
+ "--force")
+ FORCE=true
+ ;;
+ "--arm")
+ ARM=true
+ ;;
+ esac
+ done
+ gen3_deploy_karpenter
+ ;;
+ "remove")
+ gen3_remove_karpenter
+ ;;
+ "update")
+ gen3_update_karpenter_configs
+ ;;
+ *)
+ gen3_deploy_karpenter
+ ;;
+ esac
+fi
diff --git a/gen3/bin/kube-setup-kayako-wrapper.sh b/gen3/bin/kube-setup-kayako-wrapper.sh
new file mode 100644
index 000000000..59abc829c
--- /dev/null
+++ b/gen3/bin/kube-setup-kayako-wrapper.sh
@@ -0,0 +1,25 @@
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+[[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets
+
+if ! g3kubectl get secrets/kayako-g3auto > /dev/null 2>&1; then
+ gen3_log_err "No kayako-g3auto secret, not rolling Kayako wrapper"
+ return 1
+fi
+
+if ! gen3 secrets decode kayako-g3auto kayako_api_key.txt> /dev/null 2>&1; then
+ gen3_log_err "No Kayako api key present in kayako-g3auto secret, not rolling Kayako wrapper"
+ return 1
+fi
+
+if ! gen3 secrets decode kayako-g3auto kayako_secret_key.txt> /dev/null 2>&1; then
+ gen3_log_err "No Kayako secret key present in kayako-g3auto secret, not rolling Kayako wrapper"
+ return 1
+fi
+
+
+g3kubectl apply -f "${GEN3_HOME}/kube/services/kayako-wrapper/kayako-wrapper-service.yaml"
+gen3 roll kayako-wrapper
+
+gen3_log_info "The kayako wrapper service has been deployed onto the kubernetes cluster"
diff --git a/gen3/bin/kube-setup-kubecost.sh b/gen3/bin/kube-setup-kubecost.sh
new file mode 100644
index 000000000..2166f051c
--- /dev/null
+++ b/gen3/bin/kube-setup-kubecost.sh
@@ -0,0 +1,169 @@
+#!/bin/bash
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+gen3_load "gen3/lib/kube-setup-init"
+
+accountID=$(aws sts get-caller-identity --output text --query 'Account')
+awsRegion=$(aws configure get region)
+
+gen3_setup_kubecost_infrastructure() {
+ gen3 workon default "${vpc_name}__kubecost"
+ gen3 cd
+ echo "vpc_name=\"$vpc_name\"" > config.tfvars
+ if [[ ! -z "$curBucketCreated" ]]; then
+ echo "cur_s3_bucket=\"$curBucket\"" >> config.tfvars
+ fi
+ if [[ ! -z "$reportBucketCreated" ]]; then
+ echo "reports_s3_bucket=\"$reportBucket\"" >> config.tfvars
+ fi
+ gen3 tfplan 2>&1
+ gen3 tfapply 2>&1
+}
+
+gen3_destroy_kubecost_infrastructure() {
+ gen3 workon default "${vpc_name}__kubecost"
+ gen3 tfplan --destroy 2>&1
+ gen3 tfapply 2>&1
+ gen3 cd
+ cd ..
+ rm -rf "${vpc_name}__kubecost"
+}
+
+gen3_setup_kubecost_service_account() {
+ # Kubecost SA
+ roleName="$vpc_name-kubecost-user"
+ saName="kubecost-cost-analyzer"
+ gen3 awsrole create "$roleName" "$saName" "kubecost" || return 1
+ aws iam attach-role-policy --role-name "$roleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2
+ #gen3 awsrole sa-annotate "$saName" "$roleName" "kubecost"
+ kubectl delete sa -n kubecost $saName
+ # SA for reports
+ reportsRoleName="$vpc_name-opencost-report-role"
+ reportsSaName="reports-service-account"
+ gen3 awsrole create "$reportsRoleName" "$reportsSaName" "kubecost" || return 1
+ aws iam attach-role-policy --role-name "$reportsRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-report-policy" 1>&2
+ gen3 awsrole sa-annotate "$reportsSaName" "$reportsRoleName" "kubecost"
+}
+
+gen3_delete_kubecost_service_account() {
+ aws iam detach-role-policy --role-name "${vpc_name}-kubecost-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2
+ gen3 workon default "${vpc_name}-kubecost-user_role"
+ gen3 tfplan --destroy 2>&1
+ gen3 tfapply 2>&1
+}
+
+gen3_delete_kubecost() {
+ gen3_delete_kubecost_service_account
+ gen3_destroy_kubecost_infrastructure
+ helm delete kubecost -n kubecost
+}
+
+gen3_kubecost_create_alb() {
+ kubectl apply -f "${GEN3_HOME}/kube/services/kubecost/kubecost-alb.yaml" -n kubecost
+}
+
+gen3_setup_kubecost() {
+ kubectl create namespace kubecost || true
+ # If s3 bucket not supplied, create a new one
+ if [[ -z $curBucket ]]; then
+ curBucket="$vpc_name-kubecost-bucket"
+ fi
+ # If report bucket not supplied, use the same as cur bucket
+ if [[ -z $reportBucket ]]; then
+ reportBucket=$curBucket
+ fi
+ gen3_setup_kubecost_infrastructure
+ aws ec2 create-spot-datafeed-subscription --bucket $curBucket --prefix spot-feed || true
+ # Change the SA permissions based on slave/master/standalone
+ if [[ -z $(kubectl get sa -n kubecost | grep $vpc_name-kubecost-user) ]]; then
+ gen3_setup_kubecost_service_account
+ fi
+ if (! helm status kubecost -n kubecost > /dev/null 2>&1 ) || [[ ! -z "$FORCE" ]]; then
+ valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml"
+ valuesTemplate="${GEN3_HOME}/kube/services/kubecost/values.yaml"
+ g3k_kv_filter $valuesTemplate KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" ATHENA_BUCKET "$curBucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile
+ helm repo add kubecost https://kubecost.github.io/cost-analyzer/ --force-update 2> >(grep -v 'This is insecure' >&2)
+ helm repo update 2> >(grep -v 'This is insecure' >&2)
+ helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile}
+ else
+ gen3_log_info "kube-setup-kubecost exiting - kubecost already deployed, use --force true to redeploy"
+ fi
+ gen3_kubecost_create_alb
+}
+
+gen3_setup_reports_cronjob() {
+ gen3 job cron opencost-report-argo '0 0 * * 0' BUCKET_NAME $reportBucket
+}
+
+if [[ -z "$GEN3_SOURCE_ONLY" ]]; then
+ if [[ -z "$1" || "$1" =~ ^-*help$ ]]; then
+ gen3_logs_help
+ exit 0
+ fi
+ command="$1"
+ shift
+ case "$command" in
+ "create")
+ for flag in $@; do
+ if [[ $# -gt 0 ]]; then
+ flag="$1"
+ shift
+ fi
+ case "$flag" in
+ "--force")
+ if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then
+ FORCE=true
+ fi
+ ;;
+ "--cur-bucket")
+ curBucket="$1"
+ curBucketCreated=true
+ ;;
+ "--report-bucket")
+ reportBucket="$1"
+ reportBucketCreated=true
+ ;;
+ esac
+ done
+ gen3_setup_kubecost "$@"
+ ;;
+ "cronjob")
+ subcommand=""
+ if [[ $# -gt 0 ]]; then
+ subcommand="$1"
+ shift
+ fi
+ case "$subcommand" in
+ "create")
+ for flag in $@; do
+ if [[ $# -gt 0 ]]; then
+ flag="$1"
+ shift
+ fi
+ case "$flag" in
+ "--report-bucket")
+ reportBucket="$1"
+ ;;
+ esac
+ done
+ if [[ -z $reportBucket ]]; then
+ gen3_log_err "Please ensure you set the reportBucket for setting up cronjob without full opencost deployment."
+ exit 1
+ fi
+ gen3_setup_reports_cronjob
+ ;;
+ *)
+ gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost"
+ ;;
+ esac
+ ;;
+ "delete")
+ gen3_delete_kubecost
+ ;;
+ *)
+ gen3_setup_kubecost "$@"
+ ;;
+ esac
+fi
diff --git a/gen3/bin/kube-setup-manifestservice.sh b/gen3/bin/kube-setup-manifestservice.sh
index a3df01d42..ccbde4691 100644
--- a/gen3/bin/kube-setup-manifestservice.sh
+++ b/gen3/bin/kube-setup-manifestservice.sh
@@ -12,28 +12,23 @@ gen3_load "gen3/gen3setup"
hostname="$(gen3 api hostname)"
bucketname="manifest-${hostname//./-}"
-username="manifest-bot-${hostname//./-}"
+username="manifestbot-${hostname//./-}"
mkdir -p $(gen3_secrets_folder)/g3auto/manifestservice
credsFile="$(gen3_secrets_folder)/g3auto/manifestservice/config.json"
+gen3_log_info "kube-setup-manifestservice" "setting up manifest-service resources"
+gen3 s3 create "$bucketname" || true
+gen3 awsrole create ${username} manifestservice-sa || true
+gen3 s3 attach-bucket-policy "$bucketname" --read-write --role-name ${username} || true
if (! (g3kubectl describe secret manifestservice-g3auto 2> /dev/null | grep config.js > /dev/null 2>&1)) \
- && [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]];
+ && [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]];
then
- gen3_log_info "kube-setup-manifestservice" "setting up manifest-service resources"
- gen3 s3 create "$bucketname"
- gen3 awsuser create ${username}
- gen3 s3 attach-bucket-policy "$bucketname" --read-write --user-name ${username}
gen3_log_info "initializing manifestservice config.json"
- user=$(gen3 secrets decode ${username}-g3auto awsusercreds.json)
- key_id=$(jq -r .id <<< $user)
- access_key=$(jq -r .secret <<< $user)
cat - > "$credsFile" < /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client metadata-delete-expired-objects-job --grant-types client_credentials | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-metadata-delete-expired-objects-job" "Failed generating oidc client: $secrets"
+ return 1
+ fi
+ fi
+ local client_id="${BASH_REMATCH[2]}"
+ local client_secret="${BASH_REMATCH[3]}"
+
+ gen3_log_info "create metadata-delete-expired-objects secret"
+ mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/metadata-delete-expired-objects"
+
+ cat - > "$secretsFolder/config.json" < /dev/null 2>&1; then
diff --git a/gen3/bin/kube-setup-metrics.sh b/gen3/bin/kube-setup-metrics.sh
index ca287197a..139c9679c 100644
--- a/gen3/bin/kube-setup-metrics.sh
+++ b/gen3/bin/kube-setup-metrics.sh
@@ -17,7 +17,7 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
-DESIRED_VERSION=0.3.7
+DESIRED_VERSION=0.6.2
CURRENT_VERSION=$(kubectl get deployment -n kube-system metrics-server -o json | jq -r .spec.template.spec.containers[0].image | awk -F :v '{print $2}')
gen3_metrics_deploy() {
@@ -47,4 +47,4 @@ case "$command" in
gen3_log_err "unknown option: $command"
gen3 help kube-setup-metrics
;;
-esac
\ No newline at end of file
+esac
diff --git a/gen3/bin/kube-setup-networkpolicy.sh b/gen3/bin/kube-setup-networkpolicy.sh
index 44df40d44..176383a61 100644
--- a/gen3/bin/kube-setup-networkpolicy.sh
+++ b/gen3/bin/kube-setup-networkpolicy.sh
@@ -6,7 +6,7 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
-serverVersion="$(g3kubectl version server -o json | jq -r '.serverVersion.major + "." + .serverVersion.minor' | head -c4).0"
+serverVersion="$(g3kubectl version -o json | jq -r '.serverVersion.major + "." + .serverVersion.minor' | head -c4).0"
if ! semver_ge "$serverVersion" "1.8.0"; then
gen3_log_info "kube-setup-netpolciy" "K8s server version $serverVersion does not yet support network policy"
exit 0
diff --git a/gen3/bin/kube-setup-ohdsi-tools.sh b/gen3/bin/kube-setup-ohdsi-tools.sh
deleted file mode 100644
index 891cc48ee..000000000
--- a/gen3/bin/kube-setup-ohdsi-tools.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-#
-# Deploy Atlas/WebAPI into existing commons
-#
-
-source "${GEN3_HOME}/gen3/lib/utils.sh"
-gen3_load "gen3/lib/kube-setup-init"
-
-gen3 roll ohdsi-webapi
-g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml"
-gen3 roll ohdsi-atlas
-g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml"
-g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-service-elb.yaml"
-
-cat < /dev/null 2>&1; then
+ local credsPath="$(gen3_secrets_folder)/g3auto/ohdsi/appcreds.json"
+ if [ -f "$credsPath" ]; then
+ gen3 secrets sync
+ return 0
+ fi
+ mkdir -p "$(dirname "$credsPath")"
+ if ! new_client > "$credsPath"; then
+ gen3_log_err "Failed to setup ohdsi fence client"
+ rm "$credsPath" || true
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+
+ if ! g3kubectl describe secret ohdsi-g3auto | grep dbcreds.json > /dev/null 2>&1; then
+ gen3_log_info "create database"
+ if ! gen3 db setup ohdsi; then
+ gen3_log_err "Failed setting up database for ohdsi service"
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+}
+
+setup_secrets() {
+ # ohdsi-secrets.yaml populate and apply.
+ gen3_log_info "Deploying secrets for ohdsi"
+ # subshell
+
+ (
+ if ! dbcreds="$(gen3 db creds ohdsi)"; then
+ gen3_log_err "unable to find db creds for ohdsi service"
+ return 1
+ fi
+
+ if ! appcreds="$(gen3 secrets decode ohdsi-g3auto appcreds.json)"; then
+ gen3_log_err "unable to find app creds for ohdsi service"
+ return 1
+ fi
+
+ local hostname=$(gen3 api hostname)
+ export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
+ export DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
+ export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds")
+ export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
+
+ export FENCE_URL="https://${hostname}/user/user"
+ # get arborist_url from manifest.json:
+ export ARBORIST_URL=$(g3k_manifest_lookup .global.arborist_url)
+ export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration"
+ export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds")
+ export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds")
+ envsubst <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml" | g3kubectl apply -f -
+
+ envsubst '$hostname' <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml" | g3kubectl apply -f -
+ )
+}
+
+setup_ingress() {
+ certs=$(aws acm list-certificates --certificate-statuses ISSUED | jq --arg hostname $hostname -c '.CertificateSummaryList[] | select(.DomainName | contains("*."+$hostname))')
+ if [ "$certs" = "" ]; then
+ gen3_log_info "no certs found for *.${hostname}. exiting"
+ exit 22
+ fi
+ gen3_log_info "Found ACM certificate for *.$hostname"
+ export ARN=$(jq -r .CertificateArn <<< $certs)
+ export ohdsi_hostname="atlas.${hostname}"
+ envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-ingress.yaml | g3kubectl apply -f -
+}
+
+# main --------------------------------------
+# deploy superset
+if [[ $# -gt 0 && "$1" == "new-client" ]]; then
+ new_client
+ exit $?
+elif [[ $# -gt 0 && "$1" == "ingress" ]]; then
+ setup_ingress
+ exit $?
+fi
+
+setup_creds
+
+setup_secrets
+setup_ingress
+
+envsubst <${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml | g3kubectl apply -f -
+
+gen3 roll ohdsi-webapi
+g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml"
+
+gen3 roll ohdsi-atlas
+g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-service.yaml"
+
+cat < /dev/n
user=$(gen3 secrets decode $awsuser-g3auto awsusercreds.json)
key_id=$(jq -r .id <<< $user)
access_key=$(jq -r .secret <<< $user)
+
+ # setup fence OIDC client with client_credentials grant for access to MDS API
+ hostname=$(gen3 api hostname)
+ gen3_log_info "kube-setup-sower-jobs" "creating fence oidc client for $hostname"
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client pelican-export-job --grant-types client_credentials | tail -1)
+ # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET')
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ # try delete client
+ g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client pelican-export-job > /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client pelican-export-job --grant-types client_credentials | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-sower-jobs" "Failed generating oidc client: $secrets"
+ return 1
+ fi
+ fi
+ pelican_export_client_id="${BASH_REMATCH[2]}"
+ pelican_export_client_secret="${BASH_REMATCH[3]}"
+
cat - > "$credsFile" < /dev/null 2>&1; then
# helm3 has no default repo, need to add it manually
- helm repo add stable https://charts.helm.sh/stable --force-update
+ #helm repo add stable https://charts.helm.sh/stable --force-update
+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
fi
}
@@ -27,25 +32,25 @@ function delete_prometheus()
gen3 arun helm delete prometheus --namespace prometheus
}
-function delete_grafana()
-{
- gen3 arun helm delete grafana --namespace grafana
-}
+# function delete_grafana()
+# {
+# gen3 arun helm delete grafana --namespace grafana
+# }
-function create_grafana_secrets()
-{
- if ! g3kubectl get secrets/grafana-admin > /dev/null 2>&1; then
- credsFile=$(mktemp -p "$XDG_RUNTIME_DIR" "creds.json_XXXXXX")
- creds="$(base64 /dev/urandom | head -c 12)"
- if [[ "$creds" != null ]]; then
- echo ${creds} >> "$credsFile"
- g3kubectl create secret generic grafana-admin "--from-file=credentials=${credsFile}"
- rm -f ${credsFile}
- else
- echo "WARNING: there was an error creating the secrets for grafana"
- fi
- fi
-}
+# function create_grafana_secrets()
+# {
+# if ! g3kubectl get secrets/grafana-admin > /dev/null 2>&1; then
+# credsFile=$(mktemp -p "$XDG_RUNTIME_DIR" "creds.json_XXXXXX")
+# creds="$(base64 /dev/urandom | head -c 12)"
+# if [[ "$creds" != null ]]; then
+# echo ${creds} >> "$credsFile"
+# g3kubectl create secret generic grafana-admin "--from-file=credentials=${credsFile}"
+# rm -f ${credsFile}
+# else
+# echo "WARNING: there was an error creating the secrets for grafana"
+# fi
+# fi
+# }
function deploy_prometheus()
{
@@ -55,56 +60,80 @@ function deploy_prometheus()
# but we only have one prometheus.
#
helm_repository
- if (! g3kubectl --namespace=prometheus get deployment prometheus-server > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then
- if (! g3kubectl get namespace prometheus > /dev/null 2>&1);
+ if (! g3kubectl --namespace=monitoring get deployment prometheus-server > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then
+ if (! g3kubectl get namespace monitoring> /dev/null 2>&1);
then
- g3kubectl create namespace prometheus
- g3kubectl label namespace prometheus app=prometheus
+ g3kubectl create namespace monitoring
+ g3kubectl label namespace monitoring app=prometheus
fi
- if (g3kubectl --namespace=prometheus get deployment prometheus-server > /dev/null 2>&1);
+ if (g3kubectl --namespace=monitoring get deployment prometheus-server > /dev/null 2>&1);
then
- delete_prometheus
+ #delete_prometheus
+ echo "skipping delete"
fi
if ! g3kubectl get storageclass prometheus > /dev/null 2>&1; then
g3kubectl apply -f "${GEN3_HOME}/kube/services/monitoring/prometheus-storageclass.yaml"
fi
- gen3 arun helm upgrade --install prometheus stable/prometheus --namespace prometheus -f "${GEN3_HOME}/kube/services/monitoring/prometheus-values.yaml"
+ if [ "$argocd" = true ]; then
+ g3kubectl apply -f "$GEN3_HOME/kube/services/monitoring/prometheus-application.yaml" --namespace=argocd
+ else
+ gen3 arun helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --namespace monitoring -f "${GEN3_HOME}/kube/services/monitoring/values.yaml"
+ fi
+ deploy_thanos
else
gen3_log_info "Prometheus is already installed, use --force to try redeploying"
fi
}
-function deploy_grafana()
-{
- helm_repository
- if (! g3kubectl get namespace grafana > /dev/null 2>&1);
- then
- g3kubectl create namespace grafana
- g3kubectl label namespace grafana app=grafana
- fi
+# function deploy_grafana()
+# {
+# helm_repository
+# if (! g3kubectl get namespace grafana > /dev/null 2>&1);
+# then
+# g3kubectl create namespace grafana
+# g3kubectl label namespace grafana app=grafana
+# fi
- #create_grafana_secrets
- TMPGRAFANAVALUES=$(mktemp -p "$XDG_RUNTIME_DIR" "grafana.json_XXXXXX")
- ADMINPASS=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
- yq '.adminPassword = "'${ADMINPASS}'"' "${GEN3_HOME}/kube/services/monitoring/grafana-values.yaml" --yaml-output > ${TMPGRAFANAVALUES}
- # curl -o grafana-values.yaml https://raw.githubusercontent.com/helm/charts/master/stable/grafana/values.yaml
+# #create_grafana_secrets
+# TMPGRAFANAVALUES=$(mktemp -p "$XDG_RUNTIME_DIR" "grafana.json_XXXXXX")
+# ADMINPASS=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
+# yq '.adminPassword = "'${ADMINPASS}'"' "${GEN3_HOME}/kube/services/monitoring/grafana-values.yaml" --yaml-output > ${TMPGRAFANAVALUES}
+# # curl -o grafana-values.yaml https://raw.githubusercontent.com/helm/charts/master/stable/grafana/values.yaml
- if (! g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then
- if ( g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1);
- then
- delete_grafana
- fi
+# if (! g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then
+# if ( g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1);
+# then
+# delete_grafana
+# fi
- local HOSTNAME
- HOSTNAME=$(gen3 api hostname)
+# local HOSTNAME
+# HOSTNAME=$(gen3 api hostname)
- g3k_kv_filter "${TMPGRAFANAVALUES}" DOMAIN ${HOSTNAME} | gen3 arun helm upgrade --install grafana stable/grafana --namespace grafana -f -
- gen3 kube-setup-revproxy
- else
- echo "Grafana is already installed, use --force to try redeploying"
+# g3k_kv_filter "${TMPGRAFANAVALUES}" DOMAIN ${HOSTNAME} | gen3 arun helm upgrade --install grafana stable/grafana --namespace grafana -f -
+# gen3 kube-setup-revproxy
+# else
+# echo "Grafana is already installed, use --force to try redeploying"
+# fi
+# }
+
+function deploy_thanos() {
+ if [[ -z $vpc_name ]]; then
+ local vpc_name="$(gen3 api environment)"
fi
+ roleName="$vpc_name-thanos-role"
+ saName="thanos"
+ bucketName="$vpc_name-thanos-bucket"
+ gen3 s3 create "$bucketName"
+ gen3 awsrole create "$roleName" "$saName" "monitoring" || return 1
+ gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name ${roleName} || true
+ thanosValuesFile="$XDG_RUNTIME_DIR/thanos.yaml"
+ thanosValuesTemplate="${GEN3_HOME}/kube/services/monitoring/thanos.yaml"
+ g3k_kv_filter $thanosValuesTemplate S3_BUCKET $bucketName > $thanosValuesFile
+ g3kubectl delete secret -n monitoring thanos-objstore-config || true
+ g3kubectl create secret generic -n monitoring thanos-objstore-config --from-file="$thanosValuesFile"
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/monitoring/thanos-deploy.yaml"
}
command=""
@@ -116,11 +145,11 @@ case "$command" in
prometheus)
deploy_prometheus "$@"
;;
- grafana)
- deploy_grafana "$@"
- ;;
+ # grafana)
+ # deploy_grafana "$@"
+ # ;;
*)
deploy_prometheus "$@"
- deploy_grafana "$@"
+ # deploy_grafana "$@"
;;
esac
diff --git a/gen3/bin/kube-setup-requestor.sh b/gen3/bin/kube-setup-requestor.sh
index 8cd38df7d..b4b8ae0e2 100644
--- a/gen3/bin/kube-setup-requestor.sh
+++ b/gen3/bin/kube-setup-requestor.sh
@@ -19,8 +19,8 @@ setup_database() {
return 0
fi
# Setup config file that requestor consumes
- if [[ ! -f "$secretsFolder/requestor-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
- local secretsFolder="$(gen3_secrets_folder)/g3auto/requestor"
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/requestor"
+ if [[ ! -f "$secretsFolder/requestor-config.yaml" ]]; then
if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
if ! gen3 db setup requestor; then
gen3_log_err "Failed setting up database for requestor service"
@@ -44,8 +44,6 @@ DB_USER: $(jq -r .db_username < "$secretsFolder/dbcreds.json")
DB_PASSWORD: $(jq -r .db_password < "$secretsFolder/dbcreds.json")
DB_DATABASE: $(jq -r .db_database < "$secretsFolder/dbcreds.json")
EOM
- # make it easy for nginx to get the Authorization header ...
- # echo -n "gateway:$password" | base64 > "$secretsFolder/base64Authz.txt"
fi
gen3 secrets sync 'setup requestor-g3auto secrets'
}
diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh
index 6b1bfa2fd..fd30b478b 100644
--- a/gen3/bin/kube-setup-revproxy.sh
+++ b/gen3/bin/kube-setup-revproxy.sh
@@ -13,6 +13,12 @@ set -e
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
+gen3_load "gen3/lib/g3k_manifest"
+
+# Deploy ELB Service if flag set in manifest
+manifestPath=$(g3k_manifest_path)
+deployELB="$(jq -r ".[\"global\"][\"deploy_elb\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')"
+
#
# Setup indexd basic-auth gateway user creds enforced
@@ -85,6 +91,18 @@ fi
for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name'); do
filePath="$scriptDir/gen3.nginx.conf/${name}.conf"
+
+ if [[ $name == "portal-service" || $name == "frontend-framework-service" ]]; then
+ FRONTEND_ROOT=$(g3kubectl get configmap manifest-global --output=jsonpath='{.data.frontend_root}')
+ if [[ $FRONTEND_ROOT == "gen3ff" ]]; then
+ #echo "setup gen3ff as root frontend service"
+ filePath="$scriptDir/gen3.nginx.conf/gen3ff-as-root/${name}.conf"
+ else
+ #echo "setup windmill as root frontend service"
+ filePath="$scriptDir/gen3.nginx.conf/portal-as-root/${name}.conf"
+ fi
+ fi
+
#echo "$filePath"
if [[ -f "$filePath" ]]; then
#echo "$filePath exists in $BASHPID!"
@@ -93,53 +111,66 @@ for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name'
fi
done
-if [[ $current_namespace == "default" ]];
+
+if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then
+ argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url)
+ g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url$(gen3 db namespace).conf
+ filePath="/tmp/argo-server-with-url$(gen3 db namespace).conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
+fi
+
+if g3kubectl get namespace argocd > /dev/null 2>&1;
then
- if g3kubectl get namespace argo > /dev/null 2>&1;
- then
- for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}');
- do
- filePath="$scriptDir/gen3.nginx.conf/${argo}.conf"
- if [[ -f "$filePath" ]]; then
- confFileList+=("--from-file" "$filePath")
- fi
- done
- fi
+ filePath="$scriptDir/gen3.nginx.conf/argocd-server.conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
fi
-if [[ $current_namespace == "default" ]];
+if g3kubectl get namespace monitoring > /dev/null 2>&1;
then
- if g3kubectl get namespace prometheus > /dev/null 2>&1;
- then
- for prometheus in $(g3kubectl get services -n prometheus -o jsonpath='{.items[*].metadata.name}');
- do
- filePath="$scriptDir/gen3.nginx.conf/${prometheus}.conf"
- if [[ -f "$filePath" ]]; then
- confFileList+=("--from-file" "$filePath")
- fi
- done
- fi
+ filePath="$scriptDir/gen3.nginx.conf/prometheus-server.conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
fi
-#echo "${confFileList[@]}" $BASHPID
-if [[ $current_namespace == "default" ]]; then
- if g3kubectl get namespace grafana > /dev/null 2>&1; then
- for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}');
- do
- filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf"
- touch "${XDG_RUNTIME_DIR}/${grafana}.conf"
- tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf"
- adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
- adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0)
- sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile}
- if [[ -f "${tmpCredsFile}" ]]; then
- confFileList+=("--from-file" "${tmpCredsFile}")
- fi
- #rm -f ${tmpCredsFile}
- done
- fi
+if g3kubectl get namespace kubecost > /dev/null 2>&1;
+then
+ filePath="$scriptDir/gen3.nginx.conf/kubecost-service.conf"
+ if [[ -f "$filePath" ]]; then
+ confFileList+=("--from-file" "$filePath")
+ fi
fi
+# #echo "${confFileList[@]}" $BASHPID
+# if [[ $current_namespace == "default" ]]; then
+# if g3kubectl get namespace grafana > /dev/null 2>&1; then
+# for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}');
+# do
+# filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf"
+# touch "${XDG_RUNTIME_DIR}/${grafana}.conf"
+# tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf"
+# adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d)
+# adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0)
+# sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile}
+# if [[ -f "${tmpCredsFile}" ]]; then
+# confFileList+=("--from-file" "${tmpCredsFile}")
+# fi
+# #rm -f ${tmpCredsFile}
+# done
+# fi
+# fi
+
+if g3k_manifest_lookup .global.document_url > /dev/null 2>&1; then
+ documentUrl="$(g3k_manifest_lookup .global.document_url)"
+ if [[ "$documentUrl" != null ]]; then
+ filePath="$scriptDir/gen3.nginx.conf/documentation-site/documentation-site.conf"
+ confFileList+=("--from-file" "$filePath")
+ fi
+fi
#
# Funny hook to load the portal-workspace-parent nginx config
#
@@ -239,6 +270,9 @@ export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_a
# revproxy deployment using http proxy protocol.
#
# port 81 == proxy-protocol listener - main service entry
+
+gen3_deploy_revproxy_elb() {
+gen3_log_info "Deploying revproxy-service-elb..."
export TARGET_PORT_HTTPS=81
# port 82 == proxy-protocol listener - redirects to https
export TARGET_PORT_HTTP=82
@@ -264,6 +298,10 @@ else
envsubst <$scriptDir/revproxy-service-elb.yaml
gen3_log_info "DRY RUN"
fi
-
+}
# Don't automatically apply this right now
#kubectl apply -f $scriptDir/revproxy-service.yaml
+
+if [ "$deployELB" = true ]; then
+ gen3_deploy_revproxy_elb
+fi
diff --git a/gen3/bin/kube-setup-roles.sh b/gen3/bin/kube-setup-roles.sh
index 040aaca05..aba7bf402 100644
--- a/gen3/bin/kube-setup-roles.sh
+++ b/gen3/bin/kube-setup-roles.sh
@@ -12,6 +12,8 @@ gen3_load "gen3/gen3setup"
g3kubectl patch serviceaccount default -p 'automountServiceAccountToken: false'
g3kubectl patch serviceaccount --namespace "$(gen3 jupyter j-namespace)" default -p 'automountServiceAccountToken: false' > /dev/null || true
+namespace="$(gen3 api namespace)"
+
# Don't do this in a Jenkins job
if [[ -z "$JENKINS_HOME" ]]; then
if ! g3kubectl get serviceaccounts/useryaml-job > /dev/null 2>&1; then
@@ -29,10 +31,10 @@ if [[ -z "$JENKINS_HOME" ]]; then
roleName="$(gen3 api safe-name gitops)"
gen3 awsrole create "$roleName" gitops-sa
# do this here, since we added the new role to this binding
- g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml"
+ g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "$namespace"|g3kubectl apply -f -
fi
if ! g3kubectl get rolebindings/devops-binding > /dev/null 2>&1; then
- g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml"
+ g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "$namespace"|g3kubectl apply -f -
fi
ctx="$(g3kubectl config current-context)"
diff --git a/gen3/bin/kube-setup-sheepdog.sh b/gen3/bin/kube-setup-sheepdog.sh
index b72d36690..7eec86def 100644
--- a/gen3/bin/kube-setup-sheepdog.sh
+++ b/gen3/bin/kube-setup-sheepdog.sh
@@ -42,8 +42,8 @@ if [[ -z "$JENKINS_HOME" && -f "$(gen3_secrets_folder)/creds.json" ]]; then
if gen3_time_since postgres_checkup is 120; then
# Grant permissions to peregrine
sqlList=(
- "GRANT SELECT ON ALL TABLES IN SCHEMA public TO $peregrine_db_user;"
- "ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO $peregrine_db_user;"
+ "GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"$peregrine_db_user\";"
+ "ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO \"$peregrine_db_user\";"
);
for sql in "${sqlList[@]}"; do
gen3_log_info "Running: $sql"
diff --git a/gen3/bin/kube-setup-superset.sh b/gen3/bin/kube-setup-superset.sh
new file mode 100644
index 000000000..0f1219695
--- /dev/null
+++ b/gen3/bin/kube-setup-superset.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/lib/kube-setup-init"
+
+# lib ---------------------
+
+new_client() {
+ local hostname=$(gen3 api hostname)
+ superset_hostname="superset.${hostname}"
+ gen3_log_info "kube-setup-superset" "creating fence oidc client for $superset_hostname"
+ local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client superset --urls https://${superset_hostname}/oauth-authorized/fence --username superset | tail -1)
+ # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET')
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ # try delete client
+ g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client superset > /dev/null 2>&1
+ secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client superset --urls https://${superset_hostname}/oauth-authorized/fence --username superset | tail -1)
+ if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then
+ gen3_log_err "kube-setup-superset" "Failed generating oidc client for superset: $secrets"
+ return 1
+ fi
+ fi
+ local FENCE_CLIENT_ID="${BASH_REMATCH[2]}"
+ local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}"
+ gen3_log_info "create superset-secret"
+ mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/superset"
+
+ cat - < /dev/null 2>&1; then
+ local credsPath="$(gen3_secrets_folder)/g3auto/superset/appcreds.json"
+ if [ -f "$credsPath" ]; then
+ gen3 secrets sync
+ return 0
+ fi
+ mkdir -p "$(dirname "$credsPath")"
+ if ! new_client > "$credsPath"; then
+ gen3_log_err "Failed to setup superset fence client"
+ rm "$credsPath" || true
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+
+ if ! g3kubectl describe secret superset-g3auto | grep dbcreds.json > /dev/null 2>&1; then
+ gen3_log_info "create database"
+ if ! gen3 db setup superset; then
+ gen3_log_err "Failed setting up database for superset service"
+ return 1
+ fi
+ gen3 secrets sync
+ fi
+}
+
+
+setup_secrets() {
+ # superset_secret.yaml populate and apply.
+ gen3_log_info "Deploying secrets for superset"
+ # subshell
+
+ (
+ if ! dbcreds="$(gen3 db creds superset)"; then
+ gen3_log_err "unable to find db creds for superset service"
+ return 1
+ fi
+
+ if ! appcreds="$(gen3 secrets decode superset-g3auto appcreds.json)"; then
+ gen3_log_err "unable to find app creds for superset service"
+ return 1
+ fi
+
+ local hostname=$(gen3 api hostname)
+ export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds")
+ export DB_USER=$(jq -r ".db_username" <<< "$dbcreds")
+ export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds")
+ export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds")
+
+ export FENCE_URL="https://${hostname}/user/user"
+ export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration"
+ export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds" )
+ export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds" )
+ if secret_key="$(gen3 secrets decode superset-env SECRET_KEY)"; then
+ export SECRET_KEY="$secret_key"
+ else
+ export SECRET_KEY=$(random_alphanumeric 32)
+ fi
+ envsubst <"${GEN3_HOME}/kube/services/superset/superset-secrets-template.yaml" | g3kubectl apply -f -
+ )
+}
+
+setup_ingress() {
+ local hostname=$(gen3 api hostname)
+ certs=$(aws acm list-certificates --certificate-statuses ISSUED | jq --arg hostname $hostname -c '.CertificateSummaryList[] | select(.DomainName | contains("*."+$hostname))')
+ if [ "$certs" = "" ]; then
+ gen3_log_info "no certs found for *.${hostname}. exiting"
+ exit 22
+ fi
+ gen3_log_info "Found ACM certificate for *.$hostname"
+ export ARN=$(jq -r .CertificateArn <<< $certs)
+ export superset_hostname="superset.${hostname}"
+ envsubst <${GEN3_HOME}/kube/services/superset/superset-ingress.yaml | g3kubectl apply -f -
+}
+
+setup_redis() {
+ g3kubectl apply -f "${GEN3_HOME}/kube/services/superset/superset-redis.yaml"
+}
+
+# main --------------------------------------
+# deploy superset
+if [[ $# -gt 0 && "$1" == "new-client" ]]; then
+ new_client
+ exit $?
+elif [[ $# -gt 0 && "$1" == "ingress" ]]; then
+ setup_ingress
+ exit $?
+fi
+
+setup_redis
+setup_creds
+
+setup_secrets
+setup_ingress
+
+g3kubectl apply -f "${GEN3_HOME}/kube/services/superset/superset-deploy.yaml"
+
+gen3_log_info "The superset service has been deployed onto the k8s cluster."
diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh
index f0f6cc21d..c26a04cb5 100644
--- a/gen3/bin/kube-setup-system-services.sh
+++ b/gen3/bin/kube-setup-system-services.sh
@@ -16,10 +16,11 @@
source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
-kubeproxy=${kubeproxy:-1.16.13}
-coredns=${coredns:-1.6.6}
-cni=${cni:-1.7.5}
-calico=${calico:-1.7.5}
+kubeproxy=${kubeproxy:-1.24.7}
+coredns=${coredns:-1.8.7}
+kubednsautoscaler=${kubednsautoscaler:-1.8.6}
+cni=${cni:-1.14.1}
+calico=${calico:-1.7.8}
while [ $# -gt 0 ]; do
@@ -30,13 +31,15 @@ while [ $# -gt 0 ]; do
shift
done
-kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.1"
+kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.2"
coredns_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/coredns:v${coredns}"
-cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/v1.7/aws-k8s-cni.yaml"
-calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico}/config/v$(echo ${calico} | sed -e 's/\.[0-9]\+$//')/calico.yaml"
+kubednsautoscaler_image="k8s.gcr.io/cpa/cluster-proportional-autoscaler:${kubednsautoscaler}"
+cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/master/aws-k8s-cni.yaml"
+calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico}/config/master/calico.yaml"
g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image}
g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image}
+#g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f -
g3kubectl apply -f ${cni_image}
g3kubectl apply -f ${calico_yaml}
@@ -68,3 +71,4 @@ g3kubectl apply -f ${calico_yaml}
envsubst < $GEN3_HOME/kube/services/kube-proxy/kube-proxy-daemonset.yaml > $tempFile
g3kubectl apply -f $tempFile
)
+
diff --git a/gen3/bin/kube-setup-thor.sh b/gen3/bin/kube-setup-thor.sh
new file mode 100644
index 000000000..50de4d5bb
--- /dev/null
+++ b/gen3/bin/kube-setup-thor.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# Deploy the thor service.
+#
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+setup_database() {
+ gen3_log_info "setting up database for thor service ..."
+
+ if g3kubectl describe secret thor-g3auto > /dev/null 2>&1; then
+ gen3_log_info "thor-g3auto secret already configured"
+ return 0
+ fi
+ if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then
+ gen3_log_err "skipping db setup in non-adminvm environment"
+ return 0
+ fi
+ # Setup .env file that thor consumes
+ if [[ ! -f "$secretsFolder/thor.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then
+ local secretsFolder="$(gen3_secrets_folder)/g3auto/thor"
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ if ! gen3 db setup thor; then
+ gen3_log_err "Failed setting up database for thor service"
+ return 1
+ fi
+ fi
+ if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then
+ gen3_log_err "dbcreds not present in Gen3Secrets/"
+ return 1
+ fi
+
+ # go ahead and rotate the password whenever we regen this file
+ local password="$(gen3 random)" # pragma: allowlist secret
+ cat - > "$secretsFolder/thor.env" < "$secretsFolder/base64Authz.txt"
+ fi
+ gen3 secrets sync 'setup thor-g3auto secrets'
+}
+
+github_token="$(cat $(gen3_secrets_folder)/g3auto/thor/github_token.json)" # pragma: allowlist secret
+jira_api_token="$(cat $(gen3_secrets_folder)/g3auto/thor/jira_api_token.json)" # pragma: allowlist secret
+
+if [[ -z "$github_token" ]]; then
+ gen3_log_err "missing github credential for thor"
+ exit 1
+fi
+if [[ -z "$jira_api_token" ]]; then
+ gen3_log_err "missing jira credential for thor"
+ exit 1
+fi
+
+if ! setup_database; then
+ gen3_log_err "kube-setup-thor bailing out - database failed setup"
+ exit 1
+fi
+
+gen3 roll thor
+g3kubectl apply -f "${GEN3_HOME}/kube/services/thor/thor-service.yaml"
+
+gen3_log_info "The thor service has been deployed onto the kubernetes cluster"
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh
index dfc884a7f..53424f89d 100644
--- a/gen3/bin/kube-setup-workvm.sh
+++ b/gen3/bin/kube-setup-workvm.sh
@@ -6,6 +6,7 @@
#
s3_bucket="${s3_bucket:-${2:-unknown}}"
+export DEBIAN_FRONTEND=noninteractive
# Make it easy to run this directly ...
_setup_workvm_dir="$(dirname -- "${BASH_SOURCE:-$0}")"
@@ -29,38 +30,41 @@ fi
if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
# -E passes through *_proxy environment
- sudo -E apt-get update
- sudo -E apt-get install -y git jq pwgen python-dev python-pip unzip python3-dev python3-pip python3-venv
+ gen3_log_info "Install git jq pwgen unzip python3-dev python3-pip python3-venv libpq-dev apt-transport-https ca-certificates gnupg apt-utils"
+ sudo -E apt-get update -qq
+ sudo -E apt-get install -qq -y git jq pwgen unzip python3-dev python3-pip python3-venv libpq-dev apt-transport-https ca-certificates gnupg apt-utils > /dev/null
( # subshell
# install aws cli v2 - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
# increase min version periodically - see https://github.com/aws/aws-cli/blob/v2/CHANGELOG.rst
update_awscli() {
local version="0.0.0"
- if aws --version; then
+ if aws --version > /dev/null 2>&1; then
version="$(aws --version | awk '{ print $1 }' | awk -F / '{ print $2 }')"
fi
- if semver_ge "$version" "2.1.15"; then
+ if semver_ge "$version" "2.7.0"; then
gen3_log_info "awscli up to date"
return 0
fi
# update to latest version
( # subshell
+ gen3_log_info "Installing aws cli"
export DEBIAN_FRONTEND=noninteractive
- if [[ -f /usr/local/bin/aws ]] && ! semver_ge "$version" "2.0.0"; then
+ if [[ -f /usr/local/bin/aws ]] && ! semver_ge "$version" "2.7.0"; then
sudo rm /usr/local/bin/aws
fi
cd $HOME
temp_dir="aws_install-$(date +%m%d%Y)"
mkdir $temp_dir
cd $temp_dir
- curl -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip
- unzip awscli.zip
- if semver_ge "$version" "2.0.0"; then
+ curl -s -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m || "x86_64").zip
+ unzip -qq awscli.zip
+ if semver_ge "$version" "2.7.0"; then
yes | sudo ./aws/install --update
else
yes | sudo ./aws/install
fi
+ aws --version
# cleanup
cd $HOME
rm -rf $temp_dir
@@ -70,18 +74,23 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
update_awscli
)
- sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install --upgrade pip
+ gen3_log_info "Upgrading pip.."
+ sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install -q --upgrade pip
+
+ gen3_log_info "Installing jinja2 via pip"
+
# jinja2 needed by render_creds.py
- sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install jinja2
- # yq === jq for yaml
- sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install yq
+ sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install -q jinja2 yq --ignore-installed
+
# install nodejs
- if ! which node > /dev/null 2>&1; then
- curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
- sudo -E apt-get update
- sudo -E apt-get install -y nodejs
- fi
+ gen3_log_info "Install node js 16"
+ curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash - > /dev/null
+ sudo apt install -qq -y nodejs > /dev/null
+
+ gen3_log_info "Node: Version $(node --version)"
+
+
if [[ ! -f /etc/apt/sources.list.d/google-cloud-sdk.list ]]; then
# might need to uninstall gcloud installed from ubuntu repo
if which gcloud > /dev/null 2>&1; then
@@ -89,7 +98,8 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
fi
fi
if ! which psql > /dev/null 2>&1; then
- (
+ (
+ gen3_log_info "Install postgres-client"
# use the postgres dpkg server
# https://www.postgresql.org/download/linux/ubuntu/
DISTRO="$(lsb_release -c -s)" # ex - xenial
@@ -97,36 +107,31 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" | sudo tee /etc/apt/sources.list.d/pgdg.list
fi
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
- sudo -E apt-get update
- sudo -E apt-get install -y postgresql-client-13
+ sudo -E apt-get -qq update
+ sudo -E apt-get install -qq -y postgresql-client-13 > /dev/null
)
fi
- # gen3sdk currently requires this
- sudo -E apt-get install -y libpq-dev apt-transport-https ca-certificates curl
- sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
- ##kubernetes-xenial packages are supported in Bionic and Focal.
- echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/google-cloud-sdk.list
- sudo -E apt-get update
- #client_version=$(kubectl version --short --client=true | awk -F[v.] '{print $2"."$3}')
- server_version=$(kubectl version --short | awk -F[v.] '/Server/ {print $3"."$4}')
-
- if [[ ! -z "${server_version// }" ]]; then
- (
- install_version=$(apt-cache madison kubectl | awk '$3 ~ /'$server_version'/ {print $3}'| head -n 1)
- sudo -E apt-get install -y kubectl=$install_version --allow-downgrades
- )
- else
- sudo -E apt-get install -y kubectl
- fi
- if [[ -f /usr/local/bin/kubectl && -f /usr/bin/kubectl ]]; then # pref dpkg managed kubectl
- sudo -E /bin/rm /usr/local/bin/kubectl
- fi
if ! which gcloud > /dev/null 2>&1; then
(
- sudo -E apt-get install -y google-cloud-sdk \
- google-cloud-sdk-cbt
+ gen3_log_info "Install google cloud cli"
+ sudo -E bash -c "echo 'deb https://packages.cloud.google.com/apt cloud-sdk main' | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list"
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add -
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
+ sudo -E apt-get update -qq
+ sudo -E apt-get install -qq -y google-cloud-sdk \
+ google-cloud-sdk-cbt > /dev/null
+
)
+
+ fi
+
+ if ! which kubectl > /dev/null 2>&1; then
+ gen3_log_info "Installing kubectl"
+ sudo -E apt-get install -qq -y kubectl > /dev/null
+ else
+ gen3_log_info "Upgrading kubectl"
+ sudo -E apt-get upgrade -qq -y kubectl > /dev/null
fi
mkdir -p ~/.config
@@ -134,21 +139,33 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
( # in a subshell - install terraform
install_terraform() {
- curl -o "${XDG_RUNTIME_DIR}/terraform.zip" https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip
+ gen3_log_info "Installing terraform 0.11"
+ curl -s -o "${XDG_RUNTIME_DIR}/terraform.zip" https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip
sudo /bin/rm -rf /usr/local/bin/terraform > /dev/null 2>&1 || true
- sudo unzip "${XDG_RUNTIME_DIR}/terraform.zip" -d /usr/local/bin;
+ sudo unzip -qq "${XDG_RUNTIME_DIR}/terraform.zip" -d /usr/local/bin;
/bin/rm "${XDG_RUNTIME_DIR}/terraform.zip"
}
install_terraform12() {
+ gen3_log_info "Installing terraform 0.12"
mkdir "${XDG_RUNTIME_DIR}/t12"
- curl -o "${XDG_RUNTIME_DIR}/t12/terraform12.zip" https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip
+ curl -s -o "${XDG_RUNTIME_DIR}/t12/terraform12.zip" https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip
sudo /bin/rm -rf /usr/local/bin/terraform12 > /dev/null 2>&1 || true
- unzip "${XDG_RUNTIME_DIR}/t12/terraform12.zip" -d "${XDG_RUNTIME_DIR}/t12";
+ unzip -qq "${XDG_RUNTIME_DIR}/t12/terraform12.zip" -d "${XDG_RUNTIME_DIR}/t12";
sudo cp "${XDG_RUNTIME_DIR}/t12/terraform" "/usr/local/bin/terraform12"
/bin/rm -rf "${XDG_RUNTIME_DIR}/t12"
}
+ install_terraform1.2() {
+ gen3_log_info "Installing terraform 1.2"
+ mkdir "${XDG_RUNTIME_DIR}/t1.2"
+ curl -s -o "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" https://releases.hashicorp.com/terraform/1.2.3/terraform_1.2.3_linux_amd64.zip
+ sudo /bin/rm -rf /usr/local/bin/terraform1.2 > /dev/null 2>&1 || true
+ unzip -qq "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" -d "${XDG_RUNTIME_DIR}/t1.2";
+ sudo cp "${XDG_RUNTIME_DIR}/t1.2/terraform" "/usr/local/bin/terraform1.2"
+ /bin/rm -rf "${XDG_RUNTIME_DIR}/t1.2"
+ }
+
if ! which terraform > /dev/null 2>&1; then
install_terraform
else
@@ -165,6 +182,14 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then
install_terraform12
fi
fi
+ if ! which terraform1.2 > /dev/null 2>&1; then
+ install_terraform1.2
+ else
+ T12_VERSION=$(terraform1.2 --version | head -1 | awk '{ print $2 }' | sed 's/^[^0-9]*//')
+ if ! semver_ge "$T12_VERSION" "1.2.3"; then
+ install_terraform12
+ fi
+ fi
)
if [[ -f /etc/systemd/timesyncd.conf ]] \
@@ -200,8 +225,9 @@ EOM
)
fi
if ! which packer > /dev/null 2>&1; then
- curl -o "${XDG_RUNTIME_DIR}/packer.zip" https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip
- sudo unzip "${XDG_RUNTIME_DIR}/packer.zip" -d /usr/local/bin
+ gen3_log_info "Installing packer"
+ curl -s -o "${XDG_RUNTIME_DIR}/packer.zip" https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip
+ sudo unzip -qq "${XDG_RUNTIME_DIR}/packer.zip" -d /usr/local/bin
/bin/rm "${XDG_RUNTIME_DIR}/packer.zip"
fi
# https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html
@@ -209,23 +235,16 @@ EOM
(
gen3_log_info "installing aws-iam-authenticator"
cd /usr/local/bin
- sudo curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator
+ sudo curl -s -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator
sudo chmod a+rx ./aws-iam-authenticator
- sudo rm /usr/local/bin/heptio-authenticator-aws || true
- # link heptio-authenticator-aws for backward compatability with old scripts
- sudo ln -s /usr/local/bin/aws-iam-authenticator heptio-authenticator-aws
)
fi
( # in a subshell install helm
install_helm() {
- helm_release_URL="https://get.helm.sh/helm-v3.4.0-linux-amd64.tar.gz"
- curl -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL}
+ helm_release_URL="https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz"
+ curl -s -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL}
tar xf "${XDG_RUNTIME_DIR}/helm.tar.gz" -C ${XDG_RUNTIME_DIR}
sudo mv -f "${XDG_RUNTIME_DIR}/linux-amd64/helm" /usr/local/bin
-
- # helm3 has no default repo, need to add it manually
- helm repo add stable https://charts.helm.sh/stable --force-update
- helm repo update
}
migrate_helm() {
@@ -341,3 +360,5 @@ fi
npm install || true
fi
)
+
+source ${WORKSPACE}/.${RC_FILE}
\ No newline at end of file
diff --git a/gen3/bin/kube-setup-wts.sh b/gen3/bin/kube-setup-wts.sh
index b807da2d5..ad8211d03 100644
--- a/gen3/bin/kube-setup-wts.sh
+++ b/gen3/bin/kube-setup-wts.sh
@@ -42,6 +42,8 @@ new_client() {
"oidc_client_id": "$client_id",
"oidc_client_secret": "$client_secret",
+ "aggregate_endpoint_allowlist": ["/authz/mapping"],
+
"external_oidc": []
}
EOM
diff --git a/gen3/bin/kube-wait4-pods.sh b/gen3/bin/kube-wait4-pods.sh
index 2da695e4c..03068b50d 100644
--- a/gen3/bin/kube-wait4-pods.sh
+++ b/gen3/bin/kube-wait4-pods.sh
@@ -11,20 +11,20 @@ help() {
in the 'waiting' state.
Use to wait till all launched services
are up and healthy before performing some action.
- Waits for up to 15 minutes. Non-zero exit code
- if 15 minutes expires, and pods are still not ready.
+ Waits for up to 60 minutes. Non-zero exit code
+ if 60 minutes expires, and pods are still not ready.
EOM
return 0
}
-MAX_RETRIES=${1:-180}
+MAX_RETRIES=${1:-360}
IS_K8S_RESET="${2:-false}"
if [[ ! "$MAX_RETRIES" =~ ^[0-9]+$ ]];
then
gen3_log_err "ignoring invalid retry count: $1"
- MAX_RETRIES=180
+ MAX_RETRIES=360
fi
if [[ ! "$IS_K8S_RESET" =~ ^(true$|false$) ]];
diff --git a/gen3/bin/migrate-to-vpc-cni.sh b/gen3/bin/migrate-to-vpc-cni.sh
new file mode 100644
index 000000000..510d9ebef
--- /dev/null
+++ b/gen3/bin/migrate-to-vpc-cni.sh
@@ -0,0 +1,138 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+#Get the K8s NS
+ctx="$(g3kubectl config current-context)"
+ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")"
+
+# Set the cluster name variable
+CLUSTER_NAME=`gen3 api environment`
+
+# Check if in default ns
+if [[ ("$ctxNamespace" != "default" && "$ctxNamespace" != "null") ]]; then
+ gen3_log_err "Namespace must be default"
+ exit 1
+fi
+
+# Cd into Cloud-automation repo and pull the latest from master
+gen3_log_info "Pulling the latest from Cloud-Auto"
+cd /home/$CLUSTER_NAME/cloud-automation || { gen3_log_err "Cloud-automation repo not found"; exit 1; }
+#### Change to master
+git checkout master || { gen3_log_err "Failed to checkout master branch"; exit 1; }
+git pull || { gen3_log_err "Failed to pull from the repository"; exit 1; }
+
+# Update the Karpenter Node Template
+gen3_log_info "Apply new Karpenter Node Template"
+if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then
+ gen3_log_info "Karpenter setup in manifest. Open a cdismanifest PR and add this line to aws node templates: https://github.com/uc-cdis/cloud-automation/blob/master/kube/services/karpenter/nodeTemplateDefault.yaml#L40"
+ while true; do
+ read -p "Have you updated your manifest? (yes/no): " yn
+ case $yn in
+ [Yy]* )
+ gen3_log_info "Proceeding with Karpenter deployment..."
+ gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; }
+ break
+ ;;
+ [Nn]* )
+ gen3_log_info "Please update the cdismanifest before proceeding."
+ exit 1
+ ;;
+ * )
+ gen3_log_info "Please answer yes or no."
+ ;;
+ esac
+ done
+else
+ gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; }
+fi
+
+# Cordon all the nodes before running gen3 roll all"
+gen3_log_info "Cordoning all nodes"
+kubectl get nodes --no-headers -o custom-columns=":metadata.name" | grep -v '^fargate' | xargs -I{} kubectl cordon {}
+
+# Run a "gen3 roll all" so all nodes use the new mounted BPF File System
+gen3_log_info "Cycling all the nodes by running gen3 roll all"
+gen3 roll all --fast || exit 1
+
+# Confirm that all nodes have been rotated
+while true; do
+ read -p "Roll all complete. Have all cordoned nodes been rotated? (yes/no): " yn
+ case $yn in
+ [Yy]* )
+ gen3_log_info "Continuing with script..."
+ break
+ ;;
+ [Nn]* )
+ gen3_log_info "Please drain any remaining nodes with 'kubectl drain --ignore-daemonsets --delete-emptydir-data'"
+ ;;
+ * )
+ gen3_log_info "Please answer yes or no."
+ ;;
+ esac
+done
+
+
+# Delete all existing network policies
+gen3_log_info "Deleting networkpolicies"
+kubectl delete networkpolicies --all
+
+# Delete all Calico related resources from the “kube-system” namespace
+gen3_log_info "Deleting all Calico related resources"
+kubectl get deployments -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete deployment -n kube-system
+kubectl get daemonsets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete daemonset -n kube-system
+kubectl get services -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete service -n kube-system
+kubectl get replicasets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete replicaset -n kube-system
+
+# Backup the current VPC CNI configuration in case of rollback
+gen3_log_info "Backing up current VPC CNI Configuration..."
+kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml || { gen3_log_err "Error backig up VPC CNI configuration"; exit 1; }
+
+# Check to ensure we are not using an AWS plugin to manage the VPC CNI Plugin
+if aws eks describe-addon --cluster-name "$CLUSTER_NAME" --addon-name vpc-cni --query addon.addonVersion --output text 2>/dev/null; then
+ gen3_log_err "Error: VPC CNI Plugin is managed by AWS. Please log into the AWS UI and delete the VPC CNI Plugin in Amazon EKS, then re-run this script."
+ exit 1
+else
+ gen3_log_info "No managed VPC CNI Plugin found, proceeding with the script."
+fi
+
+# Apply the new VPC CNI Version
+gen3_log_info "Applying new version of VPC CNI"
+g3kubectl apply -f https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.14.1/config/master/aws-k8s-cni.yaml || { gen3_log_err "Failed to apply new VPC CNI version"; exit 1; }
+
+# Check the version to make sure it updated
+NEW_VERSION=$(kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3)
+gen3_log_info "Current version of aws-k8s-cni is: $NEW_VERSION"
+if [ "$NEW_VERSION" != "v1.14.1" ]; then
+ gen3_log_info "The version of aws-k8s-cni has not been updated correctly."
+ exit 1
+fi
+
+# Edit the amazon-vpc-cni configmap to enable network policy controller
+gen3_log_info "Enabling NetworkPolicies in VPC CNI Configmap"
+kubectl patch configmap -n kube-system amazon-vpc-cni --type merge -p '{"data":{"enable-network-policy-controller":"true"}}' || { gen3_log_err "Configmap patch failed"; exit 1; }
+
+# Edit the aws-node daemonset
+gen3_log_info "Enabling NetworkPolicies in aws-node Daemonset"
+kubectl patch daemonset aws-node -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/1/args", "value": ["--enable-network-policy=true", "--enable-ipv6=false", "--enable-cloudwatch-logs=false", "--metrics-bind-addr=:8162", "--health-probe-bind-addr=:8163"]}]' || { gen3_log_err "Daemonset edit failed"; exit 1; }
+
+# Ensure all the aws-nodes are running
+kubectl get pods -n kube-system | grep aws
+while true; do
+ read -p "Do all the aws-node pods in the kube-system ns have 2/2 containers running? (yes/no): " yn
+ case $yn in
+ [Yy]* )
+ gen3_log_info "Running kube-setup-networkpolicy..."
+ gen3 kube-setup-networkpolicy || exit 1
+ break
+ ;;
+ [Nn]* )
+ gen3_log_err "Look at aws-node logs to figure out what went wrong. View this document for more details: https://docs.google.com/document/d/1fcBTciQSSwjvHktEnO_7EObY-xR_EvJ2NtgUa70wvL8"
+ gen3_log_info "Rollback instructions are also available in the above document"
+ ;;
+ * )
+ gen3_log_info "Please answer yes or no."
+ ;;
+ esac
+done
\ No newline at end of file
diff --git a/gen3/bin/mutate-guppy-config-for-guppy-test.sh b/gen3/bin/mutate-guppy-config-for-guppy-test.sh
index de7da10d5..151bb7169 100644
--- a/gen3/bin/mutate-guppy-config-for-guppy-test.sh
+++ b/gen3/bin/mutate-guppy-config-for-guppy-test.sh
@@ -16,7 +16,7 @@ sed -i 's/\(.*\)"index": "\(.*\)_etl",$/\1"index": "jenkins_subject_alias",/' or
# for bloodpac-like envs
sed -i 's/\(.*\)"index": "\(.*\)_case",$/\1"index": "jenkins_subject_alias",/' original_guppy_config.yaml
# the pre-defined Canine index works with subject ONLY (never case)
-sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml
+# sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml
sed -i 's/\(.*\)"index": "\(.*\)_file",$/\1"index": "jenkins_file_alias",/' original_guppy_config.yaml
sed -i 's/\(.*\)"config_index": "\(.*\)_array-config",$/\1"config_index": "jenkins_configs_alias",/' original_guppy_config.yaml
diff --git a/gen3/bin/netpolicy.sh b/gen3/bin/netpolicy.sh
index a7245a434..eb01eb737 100644
--- a/gen3/bin/netpolicy.sh
+++ b/gen3/bin/netpolicy.sh
@@ -192,7 +192,7 @@ gen3_net_db_access() {
local ip
serviceName="$1"
hostname="$(gen3 db creds "$serviceName" | jq -r .db_host)"
- ip="$(dig +short "$hostname")"
+ ip="$(dig +short "$hostname" | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}')"
if ! gen3_net_isIp "$ip"; then
gen3_log_err "gen3_net_db_access" "unable to determine address of $serviceName database"
return 1
diff --git a/gen3/bin/prometheus.sh b/gen3/bin/prometheus.sh
index 878971925..1d71c6a7a 100644
--- a/gen3/bin/prometheus.sh
+++ b/gen3/bin/prometheus.sh
@@ -5,7 +5,8 @@ source "${GEN3_HOME}/gen3/lib/utils.sh"
gen3_load "gen3/gen3setup"
-export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-server.prometheus.svc.cluster.local"}"
+#export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-server.prometheus.svc.cluster.local"}"
+export GEN3_PROMHOST="${GEN3_PROMHOST:-"http://prometheus-operated.monitoring.svc.cluster.local:9090"}"
gen3_prom_help() {
gen3 help prometheus
@@ -15,7 +16,7 @@ function gen3_prom_curl() {
local urlBase="$1"
shift || return 1
local hostOrKey="${1:-${GEN3_PROMHOST}}"
- local urlPath="prometheus/api/v1/$urlBase"
+ local urlPath="api/v1/$urlBase"
if [[ "$hostOrKey" =~ ^http ]]; then
gen3_log_info "fetching $hostOrKey/$urlPath"
diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh
index 085440eec..045da4319 100644
--- a/gen3/bin/reset.sh
+++ b/gen3/bin/reset.sh
@@ -20,7 +20,7 @@ wait_for_pods_down() {
podsDownFlag=1
while [[ podsDownFlag -ne 0 ]]; do
g3kubectl get pods
- if [[ 0 == "$(g3kubectl get pods -o json | jq -r '[.items[] | { name: .metadata.labels.app } ] | map(select(.name=="fence" or .name=="sheepdog" or .name=="peregrine" or .name=="indexd")) | length')" ]]; then
+ if [[ 0 == "$(g3kubectl get pods -o json | jq -r '[.items[] | { name: .metadata.labels.app } ] | length')" ]]; then
gen3_log_info "pods are down, ready to drop databases"
podsDownFlag=0
else
@@ -130,13 +130,19 @@ gen3 shutdown namespace
# also clean out network policies
g3kubectl delete networkpolicies --all
wait_for_pods_down
-
+# Give it 30 seconds to ensure connections gets drained
+sleep 30
#
# Reset our databases
#
for serviceName in $(gen3 db services); do
if [[ "$serviceName" != "peregrine" ]]; then # sheepdog and peregrine share the same db
- gen3 db reset "$serviceName"
+ if [[ "$serviceName" != "argo"]]; then
+ # --force will also drop connections to the database to ensure database gets dropped
+ gen3 db reset "$serviceName" --force
+ else
+ echo "Skipping the Argo DB reset, as that will delete archived workflows."
+ fi
fi
done
diff --git a/gen3/bin/roll.sh b/gen3/bin/roll.sh
index 1859504d5..baed75aa4 100644
--- a/gen3/bin/roll.sh
+++ b/gen3/bin/roll.sh
@@ -69,7 +69,7 @@ gen3_roll() {
# Get the service name, so we can verify it's in the manifest
local serviceName
- serviceName="$(basename "$templatePath" | sed 's/-deploy.*yaml$//')"
+ serviceName="$(basename "$templatePath" | sed 's/\(-root\)*-deploy.*yaml$//')"
if g3k_config_lookup ".versions[\"$serviceName\"]" < "$manifestPath" > /dev/null 2>&1; then
if ! (g3k_manifest_filter "$templatePath" "" "$@" | g3kubectl apply -f -); then
diff --git a/gen3/bin/sqs.sh b/gen3/bin/sqs.sh
index dccb1ff7b..7448437a0 100644
--- a/gen3/bin/sqs.sh
+++ b/gen3/bin/sqs.sh
@@ -50,15 +50,15 @@ EOM
# @sqsName
#
gen3_sqs_create_queue() {
- local sqsName=$1
- if ! shift || [[ -z "$sqsName" ]]; then
- gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'"
+ local serviceName=$1
+ if ! shift || [[ -z "$serviceName" ]]; then
+ gen3_log_err "Must provide 'serviceName' to 'gen3_sqs_create_queue'"
return 1
fi
+ local sqsName="$(gen3 api safe-name $serviceName)"
gen3_log_info "Creating SQS '$sqsName'"
- local prefix="$(gen3 api safe-name sqs-create)"
( # subshell - do not pollute parent environment
- gen3 workon default ${prefix}__sqs 1>&2
+ gen3 workon default ${sqsName}__sqs 1>&2
gen3 cd 1>&2
cat << EOF > config.tfvars
sqs_name="$sqsName"
@@ -76,7 +76,8 @@ EOF
# @sqsName
#
gen3_sqs_create_queue_if_not_exist() {
- local sqsName=$1
+ local serviceName=$1
+ local sqsName="$(gen3 api safe-name $serviceName)"
if ! shift || [[ -z "$sqsName" ]]; then
gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'"
return 1
@@ -90,7 +91,7 @@ gen3_sqs_create_queue_if_not_exist() {
gen3_log_info "The '$sqsName' SQS already exists"
else
# create the queue
- sqsInfo="$(gen3_sqs_create_queue $sqsName)" || exit 1
+ sqsInfo="$(gen3_sqs_create_queue $serviceName)" || exit 1
sqsUrl="$(jq -e -r '.["sqs-url"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; }
sqsArn="$(jq -e -r '.["sqs-arn"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; }
fi
diff --git a/gen3/bin/update-kubeconfig.sh b/gen3/bin/update-kubeconfig.sh
new file mode 100644
index 000000000..0da60bcb7
--- /dev/null
+++ b/gen3/bin/update-kubeconfig.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+source "${GEN3_HOME}/gen3/lib/utils.sh"
+gen3_load "gen3/gen3setup"
+
+cd "${HOME}/Gen3Secrets/"
+
+aws_version="0.0.0"
+if aws --version 2>&1 > /dev/null; then
+ aws_version="$(aws --version | awk '{ print $1 }' | awk -F / '{ print $2 }')"
+fi
+if ! semver_ge "$aws_version" "2.7.0"; then
+ gen3_log_err "awscli is on version $aws_version. Please update to latest version before running this command again. \nHint: 'gen3 kube-setup-workvm' can take care of that for you."
+ exit 0
+fi
+
+namespace=$(gen3 api namespace)
+
+if [ ! -z "$KUBECONFIG" ]; then
+ if [ -f "$FILE" ]; then
+ gen3_log_info "Backing up existing kubeconfig located at $KUBECONFIG"
+ mv "$KUBECONFIG" "$KUBECONFIG.backup"
+ fi
+else
+ gen3_log_warn "KUBECONFIG env var is not set. Cannot take backup of existing kubeconfig."
+fi
+
+gen3_log_info "Updating kubeconfig by running 'aws eks update-kubeconfig --name $vpc_name'"
+aws eks update-kubeconfig --name $vpc_name
+
+gen3_log_info "Setting namespace to $namespace. ('kubectl config set-context --current --namespace=$namespace')"
+kubectl config set-context --current --namespace=$namespace
diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json
new file mode 100644
index 000000000..b8cdccabe
--- /dev/null
+++ b/gen3/bin/waf-rules-GPE-312.json
@@ -0,0 +1,153 @@
+[
+ {
+ "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet",
+ "Priority": 0,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesAdminProtectionRuleSet",
+ "RuleActionOverrides": [
+ {
+ "Name": "AdminProtection_URIPATH",
+ "ActionToUse": {
+ "Challenge": {}
+ }
+ }
+ ]
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesAmazonIpReputationList",
+ "Priority": 1,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesAmazonIpReputationList",
+ "RuleActionOverrides": [
+ {
+ "Name": "AWSManagedReconnaissanceList",
+ "ActionToUse": {
+ "Count": {}
+ }
+ }
+ ]
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesCommonRuleSet",
+ "Priority": 2,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesCommonRuleSet",
+ "Version": "Version_1.4",
+ "RuleActionOverrides": [
+ {
+ "Name": "EC2MetaDataSSRF_BODY",
+ "ActionToUse": {
+ "Count": {}
+ }
+ },
+ {
+ "Name": "GenericLFI_BODY",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_QUERYSTRING",
+ "ActionToUse": {
+ "Count": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_BODY",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ },
+ {
+ "Name": "CrossSiteScripting_BODY",
+ "ActionToUse": {
+ "Count": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_URIPATH",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ },
+ {
+ "Name": "SizeRestrictions_Cookie_HEADER",
+ "ActionToUse": {
+ "Allow": {}
+ }
+ }
+ ]
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesCommonRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet",
+ "Priority": 3,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesKnownBadInputsRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet"
+ }
+ },
+ {
+ "Name": "AWS-AWSManagedRulesLinuxRuleSet",
+ "Priority": 4,
+ "Statement": {
+ "ManagedRuleGroupStatement": {
+ "VendorName": "AWS",
+ "Name": "AWSManagedRulesLinuxRuleSet"
+ }
+ },
+ "OverrideAction": {
+ "None": {}
+ },
+ "VisibilityConfig": {
+ "SampledRequestsEnabled": true,
+ "CloudWatchMetricsEnabled": true,
+ "MetricName": "AWS-AWSManagedRulesLinuxRuleSet"
+ }
+ }
+]
\ No newline at end of file
diff --git a/gen3/bin/workon.sh b/gen3/bin/workon.sh
index b6ba562a2..f614cf662 100644
--- a/gen3/bin/workon.sh
+++ b/gen3/bin/workon.sh
@@ -113,7 +113,7 @@ if [[ ! -f "$bucketCheckFlag" && "$GEN3_FLAVOR" == "AWS" ]]; then
}
EOM
)
- gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET"
+ gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" $([[ $(aws configure get $GEN3_PROFILE.region) = "us-east-1" ]] && echo "" || echo --create-bucket-configuration LocationConstraint="$(aws configure get $GEN3_PROFILE.region)")
sleep 5 # Avoid race conditions
if gen3_aws_run aws s3api put-bucket-encryption --bucket "$GEN3_S3_BUCKET" --server-side-encryption-configuration "$S3_POLICY"; then
touch "$bucketCheckFlag"
diff --git a/gen3/lib/aws.sh b/gen3/lib/aws.sh
index 1fa972997..096b95753 100644
--- a/gen3/lib/aws.sh
+++ b/gen3/lib/aws.sh
@@ -454,10 +454,100 @@ EOM
if [[ "$GEN3_WORKSPACE" =~ _eks$ ]]; then
commonsName=${GEN3_WORKSPACE//_eks/}
cat - < ".
g3kubectl get pods -o json | \
- jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: .spec.containers | map(.name) } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \
+ jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | .[] | .metadata.name as $pod | (.spec.containers + .spec.initContainers) | map(select(.name != "pause" and .name != "jupyterhub")) | .[] | {pod: $pod, cont: .name} | "\(.pod) \(.cont)"' | \
while read -r line; do
gen3_logs_snapshot_container $line
done
}
-
diff --git a/gen3/lib/manifestDefaults/modsec/modsecurity.conf b/gen3/lib/manifestDefaults/modsec/modsecurity.conf
index 117d92e00..508834620 100644
--- a/gen3/lib/manifestDefaults/modsec/modsecurity.conf
+++ b/gen3/lib/manifestDefaults/modsec/modsecurity.conf
@@ -39,15 +39,15 @@ SecRule REQUEST_HEADERS:Content-Type "application/json" \
# to the size of data, with files excluded. You want to keep that value as
# low as practical.
#
-SecRequestBodyLimit 13107200
-SecRequestBodyNoFilesLimit 131072
+SecRequestBodyLimit 524288000
+SecRequestBodyNoFilesLimit 1048576
# What do do if the request body size is above our configured limit.
# Keep in mind that this setting will automatically be set to ProcessPartial
# when SecRuleEngine is set to DetectionOnly mode in order to minimize
# disruptions when initially deploying ModSecurity.
#
-SecRequestBodyLimitAction Reject
+SecRequestBodyLimitAction ProcessPartial
# Verify that we've correctly processed the request body.
# As a rule of thumb, when failing to process a request body
diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml
index 7bc373ad0..98c360531 100644
--- a/gen3/lib/testData/default/expectedFenceResult.yaml
+++ b/gen3/lib/testData/default/expectedFenceResult.yaml
@@ -32,7 +32,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -41,6 +41,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
# -----------------------------------------------------------------------------
@@ -127,6 +143,7 @@ spec:
ports:
- containerPort: 80
- containerPort: 443
+ - containerPort: 6567
volumeMounts:
# -----------------------------------------------------------------------------
# DEPRECATED! Remove when all commons are no longer using local_settings.py
@@ -191,12 +208,12 @@ spec:
mountPath: "/fence/jwt-keys.tar"
subPath: "jwt-keys.tar"
resources:
- requests:
- cpu: 0.4
- memory: 1200Mi
- limits:
- cpu: 1.0
- memory: 2400Mi
+ requests:
+ cpu: 0.4
+ memory: 1200Mi
+ limits:
+ cpu: 1.0
+ memory: 2400Mi
command: ["/bin/bash"]
args:
- "-c"
diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml
index ea8f81dbd..a2bd3efcc 100644
--- a/gen3/lib/testData/default/expectedSheepdogResult.yaml
+++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml
@@ -17,6 +17,7 @@ spec:
template:
metadata:
labels:
+ netnolimit: "yes"
app: sheepdog
release: production
public: "yes"
@@ -27,7 +28,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +37,22 @@ spec:
values:
- sheepdog
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -148,8 +165,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.8
- memory: 1024Mi
+ cpu: 100m
+ memory: 200Mi
limits:
- cpu: 2
- memory: 2048Mi
\ No newline at end of file
+ memory: 800Mi
\ No newline at end of file
diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
index 976b3aea6..80538842e 100644
--- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
+++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml
@@ -35,7 +35,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -44,6 +44,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
# -----------------------------------------------------------------------------
@@ -275,6 +291,7 @@ spec:
if fence-create migrate --help > /dev/null 2>&1; then
if ! grep -E 'ENABLE_DB_MIGRATION"?: *false' /var/www/fence/fence-config.yaml; then
echo "Running db migration: fence-create migrate"
+ cd /fence
fence-create migrate
else
echo "Db migration disabled in fence-config"
diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml
index 5d0025950..08407ae52 100644
--- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml
+++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml
@@ -31,7 +31,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -40,6 +40,22 @@ spec:
values:
- sheepdog
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -182,8 +198,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.8
- memory: 1024Mi
+ cpu: 100m
+ memory: 200Mi
limits:
- cpu: 2
- memory: 2048Mi
+ memory: 800Mi
diff --git a/gen3/test/ec2Test.sh b/gen3/test/ec2Test.sh
index 21310a24c..4981c925c 100644
--- a/gen3/test/ec2Test.sh
+++ b/gen3/test/ec2Test.sh
@@ -1,6 +1,6 @@
-if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[0].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then
+if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[3].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then
gen3_log_err "ec2Test failed to acquire IP address of a k8s node to test against"
fi
diff --git a/gen3/test/ecrTest.sh b/gen3/test/ecrTest.sh
index 91edf798b..57847abe5 100644
--- a/gen3/test/ecrTest.sh
+++ b/gen3/test/ecrTest.sh
@@ -10,8 +10,8 @@ test_ecr_login() {
test_ecr_setup() {
if [[ -n "$JENKINS_HOME" ]]; then
- # give ourselves read/write permissions on /var/run/docker.sock
- sudo chmod a+rw /var/run/docker.sock; because $? "ecr_setup modified docker.sock"
+ # give ourselves permissions on /run/containerd/containerd.sock
+ sudo chown root:sudo /run/containerd/containerd.sock; because $? "ecr_setup modified containerd.sock"
fi
}
diff --git a/gen3/test/jobTest.sh b/gen3/test/jobTest.sh
index 84a4d046b..bb37b4f72 100644
--- a/gen3/test/jobTest.sh
+++ b/gen3/test/jobTest.sh
@@ -6,7 +6,7 @@
excludeJob() {
local jobKey="$1"
local excludeList=(
- /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup
+ /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup /etl- /indexd- /metadata-
)
for exclude in "${excludeList[@]}"; do
if [[ "$it" =~ $exclude ]]; then return 0; fi
diff --git a/gen3/test/jupyterTest.sh b/gen3/test/jupyterTest.sh
index f0e327d71..db6a62618 100644
--- a/gen3/test/jupyterTest.sh
+++ b/gen3/test/jupyterTest.sh
@@ -30,7 +30,7 @@ test_jupyter_metrics() {
}
shunit_runtest "test_jupyter_idle" "jupyter"
-shunit_runtest "test_jupyter_metrics" "jupyter"
+# shunit_runtest "test_jupyter_metrics" "jupyter"
shunit_runtest "test_jupyter_prepuller" "local,jupyter"
shunit_runtest "test_jupyter_namespace" "local,jupyter"
shunit_runtest "test_jupyter_setup" "jupyter"
diff --git a/gen3/test/terraformTest.sh b/gen3/test/terraformTest.sh
deleted file mode 100644
index 17bcc03c2..000000000
--- a/gen3/test/terraformTest.sh
+++ /dev/null
@@ -1,461 +0,0 @@
-GEN3_TEST_PROFILE="${GEN3_TEST_PROFILE:-cdistest}"
-GEN3_TEST_WORKSPACE="gen3test"
-GEN3_TEST_ACCOUNT=707767160287
-
-#
-# TODO - generalize these tests to setup their own test VPC,
-# rather than relying on qaplanetv1 or devplanetv1 being there
-#
-
-#
-# Little macos/linux stat wrapper
-#
-file_mode() {
- if [[ $(uname -s) == 'Linux' ]]; then
- stat -c %a "$1"
- else
- stat -f %p "$1"
- fi
-}
-
-test_workspace() {
- gen3 workon $GEN3_TEST_PROFILE $GEN3_TEST_WORKSPACE; because $? "Calling gen3 workon multiple times should be harmless"
- [[ $GEN3_PROFILE = $GEN3_TEST_PROFILE ]]; because $? "gen3 workon sets the GEN3_PROFILE env variable: $GEN3_PROFILE"
- [[ $GEN3_WORKSPACE = $GEN3_TEST_WORKSPACE ]]; because $? "gen3 workon sets the GEN3_WORKSPACE env variable: $GEN3_WORKSPACE"
- [[ $GEN3_FLAVOR = "AWS" || \
- ($GEN3_FLAVOR == "GCP" && $GEN3_PROFILE =~ ^gcp-) || \
- ($GEN3_FLAVOR == "ONPREM" && $GEN3_PROFILE =~ ^onprem-) ]]; because $? "GEN3_FLAVOR is gcp for gcp-* profiles, else AWS"
- [[ $GEN3_FLAVOR != "AWS" || $GEN3_S3_BUCKET = "cdis-state-ac${GEN3_TEST_ACCOUNT}-gen3" || $GEN3_S3_BUCKET = "cdis-terraform-state.account-${GEN3_TEST_ACCOUNT}.gen3" ]]; because $? "gen3 workon sets the GEN3_S3_BUCKET env variable: $GEN3_S3_BUCKET"
- [[ (! -z $GEN3_WORKDIR) && -d $GEN3_WORKDIR ]]; because $? "gen3 workon sets the GEN3_WORKDIR env variable, and initializes the folder: $GEN3_WORKDIR"
- [[ $(file_mode $GEN3_WORKDIR) =~ 700$ ]]; because $? "gen3 workon sets the GEN3_WORKDIR to mode 0700, because secrets are in there"
- gen3 cd && [[ $(pwd) = "$GEN3_WORKDIR" ]]; because $? "gen3 cd should take us to the workspace by default: $(pwd) =? $GEN3_WORKDIR"
- for fileName in README.md config.tfvars backend.tfvars; do
- [[ -f $fileName ]]; because $? "gen3 workon ensures we have a $fileName - local copy || s3 copy || generated from template"
- done
- [[ ! -z "$MD5" ]]; because $? "commons.sh sets MD5 to $MD5"
-
- if [[ $GEN3_TEST_WORKSPACE =~ __custom$ ]]; then
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_WORKDIR" ]]; because $? "a __custom workspace loads from the workspace folder"
- elif [[ "$GEN3_TEST_PROFILE" =~ ^gcp- ]]; then
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/gcp/commons" ]]; because $? "a gcp- profile currently only support a commons workspace"
- elif [[ "$GEN3_TEST_PROFILE" =~ ^onprem- ]]; then
- for fileName in README.md creds.json 00configmap.yaml kube-setup.sh; do
- filePath="onprem_scripts/$fileName"
- [[ -f $filePath ]]; because $? "gen3 workon ensures we have a $filePath generated from template"
- done
- else # aws profile
- [[ "$GEN3_TFSCRIPT_FOLDER" =~ ^"$GEN3_HOME/tf_files/aws/" ]]; because $? "an aws workspace references the aws/ folder: $GEN3_TFSCRIPT_FOLDER"
- fi
-}
-
-workspace_cleanup() {
- # try to avoid accidentally erasing the user's data ...
- cd /tmp && [[ -n "$GEN3_WORKDIR" && "$GEN3_WORKDIR" =~ /gen3/ && -f "$GEN3_WORKDIR/config.tfvars" ]] && /bin/rm -rf "$GEN3_WORKDIR";
- because $? "was able to cleanup $GEN3_WORKDIR"
-}
-
-test_uservpc_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_user"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_vpc" ]]; because $? "a _user workspace should use the ./aws/user_vpc resources: $GEN3_TFSCRIPT_FOLDER"
- workspace_cleanup
-}
-
-test_usergeneric_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_usergeneric"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_generic" ]]; because $? "a _usergeneric workspace should use the ./aws/user_generic resources: $GEN3_TFSCRIPT_FOLDER"
- cat << EOF > config.tfvars
-username="frickjack"
-EOF
- gen3 tfplan; because $? "_usergeneric tfplan should work";
- workspace_cleanup
-}
-
-test_snapshot_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_snapshot"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/rds_snapshot" ]]; because $? "a _snapshot workspace should use the ./aws/rds_snapshot resources: $GEN3_TFSCRIPT_FOLDER"
- workspace_cleanup
-}
-
-test_databucket_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_databucket"
- test_workspace
- [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/data_bucket" ]]; because $? "a _databucket workspace should use the ./aws/data_bucket resources: $GEN3_TFSCRIPT_FOLDER"
- cat - > config.tfvars < config.tfvars < config.tfvars < @ in password
-db_password_fence="whatever"
-
-db_password_gdcapi="whatever"
-db_password_sheepdog="whatever"
-db_password_peregrine="whatever"
-
-db_password_indexd="g6pmYkcoR7qECjGoErzVb5gkX3kum0yo"
-
-# password for write access to indexd
-gdcapi_indexd_password="oYva39mIPV5uXskv7jWnKuVZBUFBQcxd"
-
-fence_snapshot=""
-gdcapi_snapshot=""
-indexd_snapshot=""
-# mailgun for sending alert e-mails
-mailgun_api_key=""
-mailgun_api_url=""
-mailgun_smtp_host=""
-
-kube_ssh_key=""
-EOM
- [[ "$(pwd)" =~ "/$GEN3_WORKSPACE"$ ]]; because $? "commons workspace should have base $GEN3_WORKSPACE - $(pwd)"
- gen3 tfplan; because $? "tfplan should run even with some invalid config variables"
- [[ -f "$GEN3_WORKDIR/plan.terraform" ]]; because $? "'gen3 tfplan' generates a plan.terraform file used by 'gen3 tfapply'"
- workspace_cleanup
-}
-
-test_custom_workspace() {
- GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}__custom"
- test_workspace
-
- local sourceFolder="../../../../../cloud-automation/tf_files/aws/modules/s3-bucket"
- if [[ ! -d "$sourceFolder" ]]; then
- # Jenkins has a different relative path setup
- sourceFolder="../../../../cloud-automation/tf_files/aws/modules/s3-bucket"
- fi
- cat - > bucket.tf < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars <> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 100Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/argo-events/workflows/eventsource-completed.yaml b/kube/services/argo-events/workflows/eventsource-completed.yaml
new file mode 100644
index 000000000..b3c7488fa
--- /dev/null
+++ b/kube/services/argo-events/workflows/eventsource-completed.yaml
@@ -0,0 +1,20 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: argo-workflow-ended-source
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ resource:
+ workflow-ended:
+ namespace: argo
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ eventTypes:
+ - UPDATE
+ filter:
+ labels:
+ - key: workflows.argoproj.io/completed
+ value: "true"
diff --git a/kube/services/argo-events/workflows/eventsource-created.yaml b/kube/services/argo-events/workflows/eventsource-created.yaml
new file mode 100644
index 000000000..11d7084ca
--- /dev/null
+++ b/kube/services/argo-events/workflows/eventsource-created.yaml
@@ -0,0 +1,18 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: argo-workflow-created-source
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ resource:
+ workflow-created:
+ namespace: argo
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ eventTypes:
+ - ADD
+ filter:
+ afterStart: true
diff --git a/kube/services/argo-events/workflows/eventsource-deleted.yaml b/kube/services/argo-events/workflows/eventsource-deleted.yaml
new file mode 100644
index 000000000..54a00464e
--- /dev/null
+++ b/kube/services/argo-events/workflows/eventsource-deleted.yaml
@@ -0,0 +1,16 @@
+apiVersion: argoproj.io/v1alpha1
+kind: EventSource
+metadata:
+ name: argo-workflow-deleted-source
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ resource:
+ workflow-deleted:
+ namespace: argo
+ group: argoproj.io
+ version: v1alpha1
+ resource: workflows
+ eventTypes:
+ - DELETE
diff --git a/kube/services/argo-events/workflows/job-admin-role.yaml b/kube/services/argo-events/workflows/job-admin-role.yaml
new file mode 100644
index 000000000..462652c97
--- /dev/null
+++ b/kube/services/argo-events/workflows/job-admin-role.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: job-admin
+ namespace: argo-events
+rules:
+ - apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - '*'
diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml
new file mode 100644
index 000000000..293c0e119
--- /dev/null
+++ b/kube/services/argo-events/workflows/sensor-completed.yaml
@@ -0,0 +1,64 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Sensor
+metadata:
+ name: argo-workflow-ended-sensor
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ container:
+ env:
+ - name: DEBUG_LOG
+ value: "true"
+ dependencies:
+ - name: argo-workflow-ended
+ eventSourceName: argo-workflow-ended-source
+ eventName: workflow-ended
+ triggers:
+ - template:
+ name: log-event
+ log:
+ intervalSeconds: 10
+ - template:
+ name: argo-workflow
+ k8s:
+ operation: create
+ parameters:
+ - src:
+ dependencyName: argo-workflow-ended
+ dataKey: body.metadata.name
+ dest: spec.template.spec.containers.0.env.0.value
+ source:
+ resource:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: delete-karpenter-resources-
+ namespace: argo-events
+ labels:
+ workflow: ""
+ spec:
+ ttlSecondsAfterFinished: 900
+ completions: 1
+ parallelism: 1
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: karpenter-resource-creator
+ image: quay.io/cdis/awshelper
+ command: ["/bin/sh"]
+ args:
+ - "-c"
+ - |
+ if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
+ fi
+
+ if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete provisioners workflow-$WORKFLOW_NAME
+ fi
+ env:
+ - name: WORKFLOW_NAME
+ value: ""
+ backoffLimit: 20
diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml
new file mode 100644
index 000000000..9f6de2c83
--- /dev/null
+++ b/kube/services/argo-events/workflows/sensor-created.yaml
@@ -0,0 +1,100 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Sensor
+metadata:
+ name: argo-workflow-created-sensor
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ container:
+ env:
+ - name: DEBUG_LOG
+ value: "true"
+ dependencies:
+ - name: workflow-created-event
+ eventSourceName: argo-workflow-created-source
+ eventName: workflow-created
+ triggers:
+ - template:
+ name: log-event
+ log:
+ intervalSeconds: 10
+ - template:
+ name: argo-workflow
+ k8s:
+ operation: create
+ parameters:
+ - src:
+ dependencyName: workflow-created-event
+ dataKey: body.metadata.name
+ dest: spec.template.spec.containers.0.env.0.value
+ - src:
+ dependencyName: workflow-created-event
+ dataKey: body.metadata.name
+ dest: metadata.labels.workflow
+ - src:
+ dependencyName: workflow-created-event
+ dataKey: body.metadata.labels.gen3username
+ dest: spec.template.spec.containers.0.env.1.value
+ source:
+ resource:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: create-karpenter-resources-
+ namespace: argo-events
+ labels:
+ workflow: ""
+ spec:
+ completions: 1
+ ttlSecondsAfterFinished: 900
+ parallelism: 1
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: karpenter-resource-creator
+ image: quay.io/cdis/awshelper
+ command: ["/bin/sh"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ if [ -z "$PROVISIONER_TEMPLATE" ]; then
+ PROVISIONER_TEMPLATE="provisioner.yaml"
+ fi
+
+ if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then
+ AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml"
+ fi
+
+
+ if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f -
+ fi
+
+ if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f -
+ fi
+ env:
+ - name: WORKFLOW_NAME
+ value: ""
+ - name: GEN3_USERNAME
+ value: ""
+ - name: ENVIRONMENT
+ valueFrom:
+ configMapKeyRef:
+ name: environment
+ key: environment
+ - name: PROVISIONER_TEMPLATE
+ value: /manifests/provisioner.yaml
+ - name: AWSNODETEMPLATE_TEMPLATE
+ value: /manifests/nodetemplate.yaml
+ volumeMounts:
+ - name: karpenter-templates-volume
+ mountPath: /manifests
+ volumes:
+ - name: karpenter-templates-volume
+ configMap:
+ name: karpenter-templates
+ backoffLimit: 20
diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml
new file mode 100644
index 000000000..c235a820a
--- /dev/null
+++ b/kube/services/argo-events/workflows/sensor-deleted.yaml
@@ -0,0 +1,60 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Sensor
+metadata:
+ name: argo-workflow-deleted-sensor
+ namespace: argo-events
+spec:
+ template:
+ serviceAccountName: default
+ dependencies:
+ - name: argo-workflow-deleted
+ eventSourceName: argo-workflow-deleted-source
+ eventName: workflow-deleted
+ triggers:
+ - template:
+ name: log-event
+ log:
+ intervalSeconds: 10
+ - template:
+ name: argo-workflow
+ k8s:
+ operation: create
+ parameters:
+ - src:
+ dependencyName: argo-workflow-deleted
+ dataKey: body.metadata.name
+ dest: spec.template.spec.containers.0.env.0.value
+ source:
+ resource:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ generateName: delete-karpenter-resources-
+ namespace: argo-events
+ labels:
+ workflow: ""
+ spec:
+ ttlSecondsAfterFinished: 900
+ completions: 1
+ parallelism: 1
+ template:
+ spec:
+ restartPolicy: OnFailure
+ containers:
+ - name: karpenter-resource-creator
+ image: quay.io/cdis/awshelper
+ command: ["/bin/sh"]
+ args:
+ - "-c"
+ - |
+ if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME
+ fi
+
+ if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then
+ kubectl delete provisioners workflow-$WORKFLOW_NAME
+ fi
+ env:
+ - name: WORKFLOW_NAME
+ value: ""
+ backoffLimit: 20
diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml
index f00bd2cc2..89ec29ecc 100644
--- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml
+++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml
@@ -21,12 +21,14 @@ spec:
tags.datadoghq.com/service: "argo-wrapper"
netnolimit: "yes"
public: "yes"
+ GEN3_ENV_LABEL
GEN3_ARGO-WRAPPER_VERSION
+ GEN3_DATE_LABEL
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -35,11 +37,31 @@ spec:
values:
- argo-wrapper
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: argo-config
configMap:
name: manifest-argo
-
+ optional: true
+ - name: argo-wrapper-namespace-config
+ configMap:
+ name: argo-wrapper-namespace-config
+
containers:
- name: argo-wrapper
GEN3_ARGO-WRAPPER_IMAGE
@@ -51,3 +73,7 @@ spec:
readOnly: true
mountPath: /argo.json
subPath: argo.json
+ - name: argo-wrapper-namespace-config
+ readOnly: true
+ mountPath: /argowrapper/config.ini
+ subPath: config.ini
diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini
new file mode 100644
index 000000000..0693ee2e2
--- /dev/null
+++ b/kube/services/argo-wrapper/config.ini
@@ -0,0 +1,6 @@
+[DEFAULT]
+ARGO_ACCESS_METHOD = access
+ARGO_HOST = $ARGO_HOST
+ARGO_NAMESPACE = $ARGO_NAMESPACE
+COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={}
+COHORT_MIDDLEWARE_URL = http://cohort-middleware-service
diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml
index bf407d651..23dda4a5a 100644
--- a/kube/services/argo/values.yaml
+++ b/kube/services/argo/values.yaml
@@ -1,11 +1,46 @@
controller:
parallelism: 10
+ namespaceParallelism: 5
metricsConfig:
# -- Enables prometheus metrics server
- enabled: false
+ enabled: true
+ servicePort: 9090
+
+ resources:
+ requests:
+ memory: 8Gi
+ limits:
+ memory: 8Gi
+
+ podAnnotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9090"
+
+ ad.datadoghq.com/controller.checks: |
+ {
+ "openmetrics": {
+ "init_config": {},
+ "instances": [
+ {
+ "openmetrics_endpoint": "http://%%host%%:%%port%%/metrics ",
+ "namespace": "argo",
+ "metrics": ["*"]
+ }
+ ]
+ }
+ }
+
+ resourceRateLimit:
+ limit: 40
+ burst: 4
# -- enable persistence using postgres
persistence:
+ connectionPool:
+ maxIdleConns: 100
+ maxOpenConns: 0
+ connMaxLifetime: 300s
archive: true
archiveLabelSelector:
matchLabels:
@@ -15,15 +50,16 @@ controller:
port: 5432
database: GEN3_ARGO_DB_NAME
tableName: argo_workflows
- # # the database secrets must be in the same namespace of the controller
+ # # the database secrets must be in the same namespace of the controller
userNameSecret:
name: argo-db-creds
key: db_username
passwordSecret:
name: argo-db-creds
key: db_password
+ nodeStatusOffLoad: true
- workflowDefaults:
+ workflowDefaults:
spec:
archiveLogs: true
@@ -42,11 +78,16 @@ server:
baseHref: "/argo/"
# -- Extra arguments to provide to the Argo server binary, such as for disabling authentication.
extraArgs:
- - --auth-mode=server
- - --auth-mode=client
+ - --auth-mode=server
+ - --auth-mode=client
extraEnv:
- - name: ARGO_HTTP1
- value: "true"
+ - name: ARGO_HTTP1
+ value: "true"
+ resources:
+ requests:
+ memory: 8Gi
+ limits:
+ memory: 8Gi
# -- Influences the creation of the ConfigMap for the workflow-controller itself.
useDefaultArtifactRepo: true
diff --git a/kube/services/argo/workflows/fence-usersync-cron.yaml b/kube/services/argo/workflows/fence-usersync-cron.yaml
new file mode 100644
index 000000000..4723ce10f
--- /dev/null
+++ b/kube/services/argo/workflows/fence-usersync-cron.yaml
@@ -0,0 +1,10 @@
+apiVersion: argoproj.io/v1alpha1
+kind: CronWorkflow
+metadata:
+ name: fence-usersync-cron
+spec:
+ serviceAccountName: argo
+ schedule: "*/30 * * * *"
+ workflowSpec:
+ workflowTemplateRef:
+ name: fence-usersync-workflow
diff --git a/kube/services/argo/workflows/fence-usersync-wf.yaml b/kube/services/argo/workflows/fence-usersync-wf.yaml
new file mode 100644
index 000000000..d7f56a2ce
--- /dev/null
+++ b/kube/services/argo/workflows/fence-usersync-wf.yaml
@@ -0,0 +1,257 @@
+apiVersion: argoproj.io/v1alpha1
+kind: WorkflowTemplate
+metadata:
+ name: fence-usersync-workflow
+spec:
+ volumeClaimTemplates:
+ - metadata:
+ name: shared-data
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 1Gi
+ serviceAccountName: argo
+ entrypoint: fence-usersync
+ arguments:
+ parameters:
+ - name: ADD_DBGAP
+ value: "false"
+ - name: ONLY_DBGAP
+ value: "false"
+ templates:
+ - name: fence-usersync
+ steps:
+ - - name: wait-for-fence
+ template: wait-for-fence
+ - - name: awshelper
+ template: awshelper
+ - - name: usersyncer
+ template: usersyncer
+
+ - name: wait-for-fence
+ container:
+ image: curlimages/curl:latest
+ command: ["/bin/sh","-c"]
+ args: ["while [ $(curl -sw '%{http_code}' http://fence-service -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for fence...'; done"]
+
+ - name: awshelper
+ container:
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ securityContext:
+ runAsUser: 0
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: userYamlS3Path
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: useryaml_s3path
+ - name: slackWebHook
+ value: None
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ GEN3_HOME=/home/ubuntu/cloud-automation
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+
+ if [ "${userYamlS3Path}" = 'none' ]; then
+ # echo "using local user.yaml"
+ # cp /var/www/fence/user.yaml /mnt/shared/user.yaml
+ echo "s3 yaml not provided - bailing out"
+ exit 1
+ else
+ # -----------------
+ echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml"
+ n=0
+ until [ $n -ge 5 ]; do
+ echo "Download attempt $n"
+ aws s3 cp "${userYamlS3Path}" /mnt/shared/user.yaml && break
+ n=$[$n+1]
+ sleep 2
+ done
+ fi
+ if [[ ! -f /mnt/shared/user.yaml ]]; then
+ echo "awshelper failed to retrieve /mnt/shared/user.yaml"
+ exit 1
+ fi
+ #-----------
+ echo "awshelper updating etl configmap"
+ if ! gen3 gitops etl-convert < /mnt/shared/user.yaml > /tmp/user.yaml; then
+ echo "ERROR: failed to generate ETL config"
+ exit 1
+ fi
+ # kubectl delete configmap fence > /dev/null 2>&1
+ # kubectl create configmap fence --from-file=/tmp/user.yaml
+ if [ "${slackWebHook}" != 'None' ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"AWSHelper: Syncing users on ${gen3Env}\"}" "${slackWebHook}"
+ fi
+ echo "Helper exit ok"
+
+ - name: usersyncer
+ volumes:
+ - name: yaml-merge
+ configMap:
+ name: "fence-yaml-merge"
+ - name: config-volume
+ secret:
+ secretName: "fence-config"
+ - name: creds-volume
+ secret:
+ secretName: "fence-creds"
+ - name: fence-google-app-creds-secret-volume
+ secret:
+ secretName: "fence-google-app-creds-secret"
+ - name: fence-google-storage-creds-secret-volume
+ secret:
+ secretName: "fence-google-storage-creds-secret"
+ - name: fence-ssh-keys
+ secret:
+ secretName: "fence-ssh-keys"
+ defaultMode: 0400
+ - name: fence-sshconfig
+ configMap:
+ name: "fence-sshconfig"
+ - name: projects
+ configMap:
+ name: "projects"
+ container:
+ image: quay.io/cdis/fence:master
+ imagePullPolicy: Always
+ env:
+ - name: PYTHONPATH
+ value: /var/www/fence
+ - name: SYNC_FROM_DBGAP
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: sync_from_dbgap
+ - name: ADD_DBGAP
+ value: "{{workflow.parameters.ADD_DBGAP}}"
+ - name: ONLY_DBGAP
+ value: "{{workflow.parameters.ONLY_DBGAP}}"
+ - name: SLACK_SEND_DBGAP
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: slack_send_dbgap
+ optional: true
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ optional: true
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: FENCE_PUBLIC_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-fence
+ key: fence-config-public.yaml
+ optional: true
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ - name: "config-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence-config.yaml"
+ subPath: fence-config.yaml
+ - name: "creds-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/creds.json"
+ - name: "yaml-merge"
+ readOnly: true
+ mountPath: "/var/www/fence/yaml_merge.py"
+ - name: "fence-google-app-creds-secret-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence_google_app_creds_secret.json"
+ subPath: fence_google_app_creds_secret.json
+ - name: "fence-google-storage-creds-secret-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence_google_storage_creds_secret.json"
+ subPath: fence_google_storage_creds_secret.json
+ - name: "fence-ssh-keys"
+ mountPath: "/root/.ssh/id_rsa"
+ subPath: "id_rsa"
+ - name: "fence-ssh-keys"
+ mountPath: "/root/.ssh/id_rsa.pub"
+ subPath: "id_rsa.pub"
+ - name: "fence-sshconfig"
+ mountPath: "/root/.ssh/config"
+ subPath: "config"
+ - name: "projects"
+ mountPath: "/var/www/fence/projects.yaml"
+ subPath: "projects.yaml"
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ # Script always succeeds if it runs (echo exits with 0)
+ - |
+ echo "${ADD_DBGAP}"
+ echo "${ONLY_DBGAP}"
+ echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
+ python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
+ echo 'options use-vc' >> /etc/resolv.conf
+ let count=0
+ while [[ ! -f /mnt/shared/user.yaml && $count -lt 50 ]]; do
+ echo "fence container waiting for /mnt/shared/user.yaml";
+ sleep 2
+ let count=$count+1
+ done
+ if [[ "$SYNC_FROM_DBGAP" != True && "$ADD_DBGAP" != "true" ]]; then
+ if [[ -f /mnt/shared/user.yaml ]]; then
+ echo "running fence-create"
+ time fence-create sync --arborist http://arborist-service --yaml /mnt/shared/user.yaml
+ else
+ echo "/mnt/shared/user.yaml did not appear within timeout :-("
+ false # non-zero exit code
+ fi
+ exitcode=$?
+ else
+ output=$(mktemp "/tmp/fence-create-output_XXXXXX")
+ if [[ -f /mnt/shared/user.yaml && "$ONLY_DBGAP" != "true" ]]; then
+ echo "Running fence-create dbgap-sync with user.yaml - see $output"
+ time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml --yaml /mnt/shared/user.yaml 2>&1 | tee "$output"
+ else
+ echo "Running fence-create dbgap-sync without user.yaml - see $output"
+ time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml 2>&1 | tee "$output"
+ fi
+ exitcode="${PIPESTATUS[0]}"
+ echo "$output"
+ # Echo what files we are seeing on dbgap ftp to Slack
+ # We only do this step every 12 hours and not on weekends to reduce noise
+ if [[ -n "$SLACK_SEND_DBGAP" && "$SLACK_SEND_DBGAP" = True ]]; then
+ files=$(grep "Reading file" "$output")
+ let hour=$(date -u +10#%H)
+ let dow=$(date -u +10#%u)
+ if ! (( hour % 12 )) && (( dow < 6 )); then
+ if [ "${slackWebHook}" != 'None' ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"FenceHelper: \n\`\`\`\n${files}\n\`\`\`\"}" "${slackWebHook}"
+ fi
+ fi
+ fi
+ fi
+ if [[ $exitcode -ne 0 && "${slackWebHook}" != 'None' ]]; then
+ emptyfile=$(grep "EnvironmentError:" "$output")
+ if [ ! -z "$emptyfile" ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"JOBSKIPPED: User sync skipped on ${gen3Env} ${emptyfile}\"}" "${slackWebHook}";
+ else
+ curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: User sync failed on ${gen3Env}\"}" "${slackWebHook}"
+ fi
+ fi
+ echo "Exit code: $exitcode"
+ exit "$exitcode"
\ No newline at end of file
diff --git a/kube/services/argocd/values.yaml b/kube/services/argocd/values.yaml
new file mode 100644
index 000000000..4d799c055
--- /dev/null
+++ b/kube/services/argocd/values.yaml
@@ -0,0 +1,2894 @@
+## Argo CD configuration
+## Ref: https://github.com/argoproj/argo-cd
+##
+
+# -- Provide a name in place of `argocd`
+nameOverride: argocd
+# -- String to fully override `"argo-cd.fullname"`
+fullnameOverride: ""
+# -- Override the Kubernetes version, which is used to evaluate certain manifests
+kubeVersionOverride: ""
+# Override APIVersions
+# If you want to template helm charts but cannot access k8s API server
+# you can set api versions here
+apiVersionOverrides:
+ # -- String to override apiVersion of cert-manager resources rendered by this helm chart
+ certmanager: "" # cert-manager.io/v1
+ # -- String to override apiVersion of GKE resources rendered by this helm chart
+ cloudgoogle: "" # cloud.google.com/v1
+ # -- String to override apiVersion of autoscaling rendered by this helm chart
+ autoscaling: "" # autoscaling/v2
+
+# -- Create clusterroles that extend existing clusterroles to interact with argo-cd crds
+## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
+createAggregateRoles: false
+
+openshift:
+ # -- enables using arbitrary uid for argo repo server
+ enabled: false
+
+## Custom resource configuration
+crds:
+ # -- Install and upgrade CRDs
+ install: true
+ # -- Keep CRDs on chart uninstall
+ keep: true
+ # -- Annotations to be added to all CRDs
+ annotations: {}
+
+## Globally shared configuration
+global:
+ # -- Common labels for the all resources
+ additionalLabels: {}
+ # app: argo-cd
+
+ # -- Number of old deployment ReplicaSets to retain. The rest will be garbage collected.
+ revisionHistoryLimit: 3
+
+ # Default image used by all components
+ image:
+ # -- If defined, a repository applied to all Argo CD deployments
+ repository: quay.io/argoproj/argocd
+ # -- Overrides the global Argo CD image tag whose default is the chart appVersion
+ tag: ""
+ # -- If defined, a imagePullPolicy applied to all Argo CD deployments
+ imagePullPolicy: IfNotPresent
+
+ # -- Secrets with credentials to pull images from a private registry
+ imagePullSecrets: []
+
+ # Default logging options used by all components
+ logging:
+ # -- Set the global logging format. Either: `text` or `json`
+ format: text
+ # -- Set the global logging level. One of: `debug`, `info`, `warn` or `error`
+ level: info
+
+ # -- Annotations for the all deployed Statefulsets
+ statefulsetAnnotations: {}
+
+ # -- Annotations for the all deployed Deployments
+ deploymentAnnotations: {}
+
+ # -- Annotations for the all deployed pods
+ podAnnotations: {}
+
+ # -- Labels for the all deployed pods
+ podLabels: {}
+
+ # -- Toggle and define pod-level security context.
+ # @default -- `{}` (See [values.yaml])
+ securityContext: {}
+ # runAsUser: 999
+ # runAsGroup: 999
+ # fsGroup: 999
+
+ # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files
+ hostAliases: []
+ # - ip: 10.20.30.40
+ # hostnames:
+ # - git.myhostname
+
+ networkPolicy:
+ # -- Create NetworkPolicy objects for all components
+ create: false
+ # -- Default deny all ingress traffic
+ defaultDenyIngress: false
+
+## Argo Configs
+configs:
+ # General Argo CD configuration
+ ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml
+ cm:
+ # -- Create the argocd-cm configmap for [declarative setup]
+ create: true
+
+ # -- Annotations to be added to argocd-cm configmap
+ annotations: {}
+
+ # -- Argo CD's externally facing base URL (optional). Required when configuring SSO
+ url: ""
+
+ # -- The name of tracking label used by Argo CD for resource pruning
+ # @default -- Defaults to app.kubernetes.io/instance
+ application.instanceLabelKey: argocd.argoproj.io/instance
+
+ # -- Enable logs RBAC enforcement
+ ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement
+ server.rbac.log.enforce.enable: false
+
+ # -- Enable exec feature in Argo UI
+ ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource
+ exec.enabled: false
+
+ # -- Enable local admin user
+ ## Ref: https://argo-cd.readthedocs.io/en/latest/faq/#how-to-disable-admin-user
+ admin.enabled: true
+
+ # -- Timeout to discover if a new manifests version got published to the repository
+ timeout.reconciliation: 180s
+
+ # -- Timeout to refresh application data as well as target manifests cache
+ timeout.hard.reconciliation: 0s
+
+ # Dex configuration
+ # dex.config: |
+ # connectors:
+ # # GitHub example
+ # - type: github
+ # id: github
+ # name: GitHub
+ # config:
+ # clientID: aabbccddeeff00112233
+ # clientSecret: $dex.github.clientSecret # Alternatively $:dex.github.clientSecret
+ # orgs:
+ # - name: your-github-org
+
+ # OIDC configuration as an alternative to dex (optional).
+ # oidc.config: |
+ # name: AzureAD
+ # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0
+ # clientID: CLIENT_ID
+ # clientSecret: $oidc.azuread.clientSecret
+ # rootCA: |
+ # -----BEGIN CERTIFICATE-----
+ # ... encoded certificate data here ...
+ # -----END CERTIFICATE-----
+ # requestedIDTokenClaims:
+ # groups:
+ # essential: true
+ # requestedScopes:
+ # - openid
+ # - profile
+ # - email
+
+ # Argo CD configuration parameters
+ ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cmd-params-cm.yaml
+ params:
+ # -- Annotations to be added to the argocd-cmd-params-cm ConfigMap
+ annotations: {}
+
+ ## Generic parameters
+ # -- Open-Telemetry collector address: (e.g. "otel-collector:4317")
+ otlp.address: ''
+
+ ## Controller Properties
+ # -- Number of application status processors
+ controller.status.processors: 20
+ # -- Number of application operation processors
+ controller.operation.processors: 10
+ # -- Specifies timeout between application self heal attempts
+ controller.self.heal.timeout.seconds: 5
+ # -- Repo server RPC call timeout seconds.
+ controller.repo.server.timeout.seconds: 60
+
+ ## Server properties
+ # -- Run server without TLS
+ server.insecure: false
+ # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from /
+ server.basehref: "/argocd/"
+ # -- Used if Argo CD is running behind reverse proxy under subpath different from /
+ server.rootpath: ""
+ # -- Directory path that contains additional static assets
+ server.staticassets: /shared/app
+ # -- Disable Argo CD RBAC for user authentication
+ server.disable.auth: false
+ # -- Enable GZIP compression
+ server.enable.gzip: false
+ # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "".
+ server.x.frame.options: sameorigin
+
+ ## Repo-server properties
+ # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit.
+ reposerver.parallelism.limit: 0
+
+ # Argo CD RBAC policy configuration
+ ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md
+ rbac:
+ # -- Create the argocd-rbac-cm configmap with ([Argo CD RBAC policy]) definitions.
+ # If false, it is expected the configmap will be created by something else.
+ # Argo CD will not work if there is no configmap created with the name above.
+ create: true
+
+ # -- Annotations to be added to argocd-rbac-cm configmap
+ annotations: {}
+
+ # -- The name of the default role which Argo CD will falls back to, when authorizing API requests (optional).
+ # If omitted or empty, users may be still be able to login, but will see no apps, projects, etc...
+ policy.default: ''
+
+ # -- File containing user-defined policies and role definitions.
+ # @default -- `''` (See [values.yaml])
+ policy.csv: ''
+ # Policy rules are in the form:
+ # p, subject, resource, action, object, effect
+ # Role definitions and bindings are in the form:
+ # g, subject, inherited-subject
+ # policy.csv |
+ # p, role:org-admin, applications, *, */*, allow
+ # p, role:org-admin, clusters, get, *, allow
+ # p, role:org-admin, repositories, *, *, allow
+ # p, role:org-admin, logs, get, *, allow
+ # p, role:org-admin, exec, create, */*, allow
+ # g, your-github-org:your-team, role:org-admin
+
+ # -- OIDC scopes to examine during rbac enforcement (in addition to `sub` scope).
+ # The scope value can be a string, or a list of strings.
+ scopes: "[groups]"
+
+ # GnuPG public keys for commit verification
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/gpg-verification/
+ gpg:
+ # -- Annotations to be added to argocd-gpg-keys-cm configmap
+ annotations: {}
+
+ # -- [GnuPG] public keys to add to the keyring
+ # @default -- `{}` (See [values.yaml])
+ ## Note: Public keys should be exported with `gpg --export --armor `
+ keys: {}
+ # 4AEE18F83AFDEB23: |
+ # -----BEGIN PGP PUBLIC KEY BLOCK-----
+ # ...
+ # -----END PGP PUBLIC KEY BLOCK-----
+
+
+ # -- Provide one or multiple [external cluster credentials]
+ # @default -- `[]` (See [values.yaml])
+ ## Ref:
+ ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters
+ ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials
+ clusterCredentials: []
+ # - name: mycluster
+ # server: https://mycluster.com
+ # labels: {}
+ # annotations: {}
+ # config:
+ # bearerToken: ""
+ # tlsClientConfig:
+ # insecure: false
+ # caData: ""
+ # - name: mycluster2
+ # server: https://mycluster2.com
+ # labels: {}
+ # annotations: {}
+ # namespaces: namespace1,namespace2
+ # clusterResources: true
+ # config:
+ # bearerToken: ""
+ # tlsClientConfig:
+ # insecure: false
+ # caData: ""
+
+ # -- Known Hosts configmap annotations
+ knownHostsAnnotations: {}
+ knownHosts:
+ data:
+ # -- Known Hosts
+ # @default -- See [values.yaml]
+ ssh_known_hosts: |
+ bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
+ github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
+ github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
+ github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
+ gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
+ gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
+ gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
+ ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
+ vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H
+ # -- TLS certificate configmap annotations
+ tlsCertsAnnotations: {}
+ # -- TLS certificate
+ # @default -- See [values.yaml]
+ tlsCerts:
+ {}
+ # data:
+ # argocd.example.com: |
+ # -----BEGIN CERTIFICATE-----
+ # MIIF1zCCA7+gAwIBAgIUQdTcSHY2Sxd3Tq/v1eIEZPCNbOowDQYJKoZIhvcNAQEL
+ # BQAwezELMAkGA1UEBhMCREUxFTATBgNVBAgMDExvd2VyIFNheG9ueTEQMA4GA1UE
+ # BwwHSGFub3ZlcjEVMBMGA1UECgwMVGVzdGluZyBDb3JwMRIwEAYDVQQLDAlUZXN0
+ # c3VpdGUxGDAWBgNVBAMMD2Jhci5leGFtcGxlLmNvbTAeFw0xOTA3MDgxMzU2MTda
+ # Fw0yMDA3MDcxMzU2MTdaMHsxCzAJBgNVBAYTAkRFMRUwEwYDVQQIDAxMb3dlciBT
+ # YXhvbnkxEDAOBgNVBAcMB0hhbm92ZXIxFTATBgNVBAoMDFRlc3RpbmcgQ29ycDES
+ # MBAGA1UECwwJVGVzdHN1aXRlMRgwFgYDVQQDDA9iYXIuZXhhbXBsZS5jb20wggIi
+ # MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCv4mHMdVUcafmaSHVpUM0zZWp5
+ # NFXfboxA4inuOkE8kZlbGSe7wiG9WqLirdr39Ts+WSAFA6oANvbzlu3JrEQ2CHPc
+ # CNQm6diPREFwcDPFCe/eMawbwkQAPVSHPts0UoRxnpZox5pn69ghncBR+jtvx+/u
+ # P6HdwW0qqTvfJnfAF1hBJ4oIk2AXiip5kkIznsAh9W6WRy6nTVCeetmIepDOGe0G
+ # ZJIRn/OfSz7NzKylfDCat2z3EAutyeT/5oXZoWOmGg/8T7pn/pR588GoYYKRQnp+
+ # YilqCPFX+az09EqqK/iHXnkdZ/Z2fCuU+9M/Zhrnlwlygl3RuVBI6xhm/ZsXtL2E
+ # Gxa61lNy6pyx5+hSxHEFEJshXLtioRd702VdLKxEOuYSXKeJDs1x9o6cJ75S6hko
+ # Ml1L4zCU+xEsMcvb1iQ2n7PZdacqhkFRUVVVmJ56th8aYyX7KNX6M9CD+kMpNm6J
+ # kKC1li/Iy+RI138bAvaFplajMF551kt44dSvIoJIbTr1LigudzWPqk31QaZXV/4u
+ # kD1n4p/XMc9HYU/was/CmQBFqmIZedTLTtK7clkuFN6wbwzdo1wmUNgnySQuMacO
+ # gxhHxxzRWxd24uLyk9Px+9U3BfVPaRLiOPaPoC58lyVOykjSgfpgbus7JS69fCq7
+ # bEH4Jatp/10zkco+UQIDAQABo1MwUTAdBgNVHQ4EFgQUjXH6PHi92y4C4hQpey86
+ # r6+x1ewwHwYDVR0jBBgwFoAUjXH6PHi92y4C4hQpey86r6+x1ewwDwYDVR0TAQH/
+ # BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAFE4SdKsX9UsLy+Z0xuHSxhTd0jfn
+ # Iih5mtzb8CDNO5oTw4z0aMeAvpsUvjJ/XjgxnkiRACXh7K9hsG2r+ageRWGevyvx
+ # CaRXFbherV1kTnZw4Y9/pgZTYVWs9jlqFOppz5sStkfjsDQ5lmPJGDii/StENAz2
+ # XmtiPOgfG9Upb0GAJBCuKnrU9bIcT4L20gd2F4Y14ccyjlf8UiUi192IX6yM9OjT
+ # +TuXwZgqnTOq6piVgr+FTSa24qSvaXb5z/mJDLlk23npecTouLg83TNSn3R6fYQr
+ # d/Y9eXuUJ8U7/qTh2Ulz071AO9KzPOmleYPTx4Xty4xAtWi1QE5NHW9/Ajlv5OtO
+ # OnMNWIs7ssDJBsB7VFC8hcwf79jz7kC0xmQqDfw51Xhhk04kla+v+HZcFW2AO9so
+ # 6ZdVHHQnIbJa7yQJKZ+hK49IOoBR6JgdB5kymoplLLiuqZSYTcwSBZ72FYTm3iAr
+ # jzvt1hxpxVDmXvRnkhRrIRhK4QgJL0jRmirBjDY+PYYd7bdRIjN7WNZLFsgplnS8
+ # 9w6CwG32pRlm0c8kkiQ7FXA6BYCqOsDI8f1VGQv331OpR2Ck+FTv+L7DAmg6l37W
+ # +LB9LGh4OAp68ImTjqf6ioGKG0RBSznwME+r4nXtT1S/qLR6ASWUS4ViWRhbRlNK
+ # XWyb96wrUlv+E8I=
+ # -----END CERTIFICATE-----
+
+ # -- Repository credentials to be used as Templates for other repos
+ ## Creates a secret for each key/value specified below to create repository credentials
+ credentialTemplates: {}
+ # github-enterprise-creds-1:
+ # url: https://github.com/argoproj
+ # githubAppID: 1
+ # githubAppInstallationID: 2
+ # githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3
+ # githubAppPrivateKey: |
+ # -----BEGIN OPENSSH PRIVATE KEY-----
+ # ...
+ # -----END OPENSSH PRIVATE KEY-----
+ # https-creds:
+ # url: https://github.com/argoproj
+ # password: my-password
+ # username: my-username
+ # ssh-creds:
+ # url: git@github.com:argoproj-labs
+ # sshPrivateKey: |
+ # -----BEGIN OPENSSH PRIVATE KEY-----
+ # ...
+ # -----END OPENSSH PRIVATE KEY-----
+
+ # -- Annotations to be added to `configs.credentialTemplates` Secret
+ credentialTemplatesAnnotations: {}
+
+ # -- Repositories list to be used by applications
+ ## Creates a secret for each key/value specified below to create repositories
+ ## Note: the last example in the list would use a repository credential template, configured under "configs.repositoryCredentials".
+ repositories: {}
+ # istio-helm-repo:
+ # url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts
+ # name: istio.io
+ # type: helm
+ # private-helm-repo:
+ # url: https://my-private-chart-repo.internal
+ # name: private-repo
+ # type: helm
+ # password: my-password
+ # username: my-username
+ # private-repo:
+ # url: https://github.com/argoproj/private-repo
+
+ # -- Annotations to be added to `configs.repositories` Secret
+ repositoriesAnnotations: {}
+
+ # Argo CD sensitive data
+ # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets
+ secret:
+ # -- Create the argocd-secret
+ createSecret: true
+ # -- Annotations to be added to argocd-secret
+ annotations: {}
+
+ # -- Shared secret for authenticating GitHub webhook events
+ githubSecret: ""
+ # -- Shared secret for authenticating GitLab webhook events
+ gitlabSecret: ""
+ # -- Shared secret for authenticating BitbucketServer webhook events
+ bitbucketServerSecret: ""
+ # -- UUID for authenticating Bitbucket webhook events
+ bitbucketUUID: ""
+ # -- Shared secret for authenticating Gogs webhook events
+ gogsSecret: ""
+
+ # -- add additional secrets to be added to argocd-secret
+ ## Custom secrets. Useful for injecting SSO secrets into environment variables.
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets
+ ## Note that all values must be non-empty.
+ extra:
+ {}
+ # LDAP_PASSWORD: "mypassword"
+
+ # -- Argo TLS Data
+ # DEPRECATED - Use server.certificate or server.certificateSecret
+ # argocdServerTlsConfig:
+ # key: ''
+ # crt: ''
+
+ # -- Bcrypt hashed admin password
+ ## Argo expects the password in the secret to be bcrypt hashed. You can create this hash with
+ ## `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
+ argocdServerAdminPassword: ""
+ # -- Admin password modification time. Eg. `"2006-01-02T15:04:05Z"`
+ # @default -- `""` (defaults to current time)
+ argocdServerAdminPasswordMtime: ""
+
+ # -- Define custom [CSS styles] for your argo instance.
+ # This setting will automatically mount the provided CSS and reference it in the argo configuration.
+ # @default -- `""` (See [values.yaml])
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/
+ styles: ""
+ # styles: |
+ # .nav-bar {
+ # background: linear-gradient(to bottom, #999, #777, #333, #222, #111);
+ # }
+
+# -- Array of extra K8s manifests to deploy
+extraObjects: []
+ # - apiVersion: secrets-store.csi.x-k8s.io/v1
+ # kind: SecretProviderClass
+ # metadata:
+ # name: argocd-secrets-store
+ # spec:
+ # provider: aws
+ # parameters:
+ # objects: |
+ # - objectName: "argocd"
+ # objectType: "secretsmanager"
+ # jmesPath:
+ # - path: "client_id"
+ # objectAlias: "client_id"
+ # - path: "client_secret"
+ # objectAlias: "client_secret"
+ # secretObjects:
+ # - data:
+ # - key: client_id
+ # objectName: client_id
+ # - key: client_secret
+ # objectName: client_secret
+ # secretName: argocd-secrets-store
+ # type: Opaque
+ # labels:
+ # app.kubernetes.io/part-of: argocd
+
+## Application controller
+controller:
+ # -- Application controller name string
+ name: application-controller
+
+ # -- The number of application controller pods to run.
+ # Additional replicas will cause sharding of managed clusters across number of replicas.
+ replicas: 1
+
+ ## Application controller Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the application controller
+ enabled: false
+ # -- Labels to be added to application controller pdb
+ labels: {}
+ # -- Annotations to be added to application controller pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `controller.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Application controller image
+ image:
+ # -- Repository to use for the application controller
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the application controller
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the application controller
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- DEPRECATED - Application controller commandline flags
+ args: {}
+ # DEPRECATED - Use configs.params to override
+ # # -- define the application controller `--status-processors`
+ # statusProcessors: "20"
+ # # -- define the application controller `--operation-processors`
+ # operationProcessors: "10"
+ # # -- define the application controller `--app-hard-resync`
+ # appHardResyncPeriod: "0"
+ # # -- define the application controller `--app-resync`
+ # appResyncPeriod: "180"
+ # # -- define the application controller `--self-heal-timeout-seconds`
+ # selfHealTimeout: "5"
+ # # -- define the application controller `--repo-server-timeout-seconds`
+ # repoServerTimeoutSeconds: "60"
+
+ # -- Additional command line arguments to pass to application controller
+ extraArgs: []
+
+ # -- Environment variables to pass to application controller
+ env: []
+
+ # -- envFrom to pass to application controller
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the application controller pod
+ extraContainers: []
+
+ # -- Init containers to add to the application controller pod
+ ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin
+ ## you could use this (and the same in the server pod) to provide such executable
+ ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
+ initContainers: []
+ # - name: download-tools
+ # image: alpine:3
+ # command: [sh, -c]
+ # args:
+ # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip &&
+ # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/
+ # volumeMounts:
+ # - mountPath: /custom-tools
+ # name: custom-tools
+
+ # -- Additional volumeMounts to the application controller main container
+ volumeMounts: []
+ # - mountPath: /usr/local/bin/kubelogin
+ # name: custom-tools
+ # subPath: kubelogin
+
+ # -- Additional volumes to the application controller pod
+ volumes: []
+ # - name: custom-tools
+ # emptyDir: {}
+
+ # -- Annotations for the application controller StatefulSet
+ statefulsetAnnotations: {}
+
+ # -- Annotations to be added to application controller pods
+ podAnnotations: {}
+
+ # -- Labels to be added to application controller pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the application controller pods
+ resources: {}
+ # limits:
+ # cpu: 500m
+ # memory: 512Mi
+ # requests:
+ # cpu: 250m
+ # memory: 256Mi
+
+ # -- Application controller container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Application controller listening port
+ containerPort: 8082
+
+ # Rediness probe for application controller
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to the application controller
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for the application controller pods
+ priorityClassName: ""
+
+ serviceAccount:
+ # -- Create a service account for the application controller
+ create: true
+ # -- Service account name
+ name: argocd-application-controller
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ ## Application controller metrics configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ applicationLabels:
+ # -- Enables additional labels in argocd_app_labels metric
+ enabled: false
+ # -- Additional labels
+ labels: []
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8082
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+ rules:
+ # -- Deploy a PrometheusRule for the application controller
+ enabled: false
+ # -- PrometheusRule.Spec for the application controller
+ spec: []
+ # - alert: ArgoAppMissing
+ # expr: |
+ # absent(argocd_app_info) == 1
+ # for: 15m
+ # labels:
+ # severity: critical
+ # annotations:
+ # summary: "[Argo CD] No reported applications"
+ # description: >
+ # Argo CD has not reported any applications data for the past 15 minutes which
+ # means that it must be down or not functioning properly. This needs to be
+ # resolved for this cloud to continue to maintain state.
+ # - alert: ArgoAppNotSynced
+ # expr: |
+ # argocd_app_info{sync_status!="Synced"} == 1
+ # for: 12h
+ # labels:
+ # severity: warning
+ # annotations:
+ # summary: "[{{`{{$labels.name}}`}}] Application not synchronized"
+ # description: >
+ # The application [{{`{{$labels.name}}`}} has not been synchronized for over
+ # 12 hours which means that the state of this cloud has drifted away from the
+ # state inside Git.
+ # selector:
+ # prometheus: kube-prometheus
+ # namespace: monitoring
+ # additionalLabels: {}
+ # annotations: {}
+
+ ## Enable if you would like to grant rights to Argo CD to deploy to the local Kubernetes cluster.
+ clusterAdminAccess:
+ # -- Enable RBAC for local cluster deployments
+ enabled: true
+
+ ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource.
+ ## Defaults to off
+ clusterRoleRules:
+ # -- Enable custom rules for the application controller's ClusterRole resource
+ enabled: false
+ # -- List of custom rules for the application controller's ClusterRole resource
+ rules: []
+
+## Dex
+dex:
+ # -- Enable dex
+ enabled: false
+ # -- Dex name
+ name: dex-server
+
+ # -- Additional command line arguments to pass to the Dex server
+ extraArgs: []
+
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ ## Dex Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Dex server
+ enabled: false
+ # -- Labels to be added to Dex server pdb
+ labels: {}
+ # -- Annotations to be added to Dex server pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `dex.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Dex image
+ image:
+ # -- Dex image repository
+ repository: ghcr.io/dexidp/dex
+ # -- Dex image tag
+ tag: v2.35.3
+ # -- Dex imagePullPolicy
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # Argo CD init image that creates Dex config
+ initImage:
+ # -- Argo CD init image repository
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Argo CD init image tag
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Argo CD init image imagePullPolicy
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Environment variables to pass to the Dex server
+ env: []
+
+ # -- envFrom to pass to the Dex server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the dex pod
+ extraContainers: []
+
+ # -- Init containers to add to the dex pod
+ initContainers: []
+
+ # -- Additional volumeMounts to the dex main container
+ volumeMounts: []
+
+ # -- Additional volumes to the dex pod
+ volumes: []
+
+ # TLS certificate configuration via Secret
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server
+ ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers.
+ certificateSecret:
+ # -- Create argocd-dex-server-tls secret
+ enabled: false
+ # -- Labels to be added to argocd-dex-server-tls secret
+ labels: {}
+ # -- Annotations to be added to argocd-dex-server-tls secret
+ annotations: {}
+ # -- Certificate authority. Required for self-signed certificates.
+ ca: ''
+ # -- Certificate private key
+ key: ''
+ # -- Certificate data. Must contain SANs of Dex service (ie: argocd-dex-server, argocd-dex-server.argo-cd.svc)
+ crt: ''
+
+ # -- Annotations to be added to the Dex server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to the Dex server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to the Dex server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for dex
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+
+ # -- Dex container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ ## Probes for Dex server
+ ## Supported from Dex >= 2.28.0
+ livenessProbe:
+ # -- Enable Kubernetes liveness probe for Dex >= 2.28.0
+ enabled: false
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ readinessProbe:
+ # -- Enable Kubernetes readiness probe for Dex >= 2.28.0
+ enabled: false
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ serviceAccount:
+ # -- Create dex service account
+ create: true
+ # -- Dex service account name
+ name: argocd-dex-server
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ # -- Container port for HTTP access
+ containerPortHttp: 5556
+ # -- Service port for HTTP access
+ servicePortHttp: 5556
+ # -- Service port name for HTTP access
+ servicePortHttpName: http
+ # -- Container port for gRPC access
+ containerPortGrpc: 5557
+ # -- Service port for gRPC access
+ servicePortGrpc: 5557
+ # -- Service port name for gRPC access
+ servicePortGrpcName: grpc
+ # -- Container port for metrics access
+ containerPortMetrics: 5558
+ # -- Service port for metrics access
+ servicePortMetrics: 5558
+
+ # -- [Node selector]
+ nodeSelector: {}
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to dex
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for dex
+ priorityClassName: ""
+
+## Redis
+redis:
+ # -- Enable redis
+ enabled: true
+ # -- Redis name
+ name: redis
+
+ ## Redis Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Redis
+ enabled: false
+ # -- Labels to be added to Redis pdb
+ labels: {}
+ # -- Annotations to be added to Redis pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `redis.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Redis image
+ image:
+ # -- Redis repository
+ repository: public.ecr.aws/docker/library/redis
+ # -- Redis tag
+ tag: 7.0.5-alpine
+ # -- Redis imagePullPolicy
+ imagePullPolicy: IfNotPresent
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Additional command line arguments to pass to redis-server
+ extraArgs: []
+ # - --bind
+ # - "0.0.0.0"
+
+ # -- Environment variables to pass to the Redis server
+ env: []
+
+ # -- envFrom to pass to the Redis server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the redis pod
+ extraContainers: []
+
+ # -- Init containers to add to the redis pod
+ initContainers: []
+
+ # -- Additional volumeMounts to the redis container
+ volumeMounts: []
+
+ # -- Additional volumes to the redis pod
+ volumes: []
+
+ # -- Annotations to be added to the Redis server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to the Redis server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to the Redis server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for redis
+ resources: {}
+ # limits:
+ # cpu: 200m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 64Mi
+
+ # -- Redis pod-level security context
+ # @default -- See [values.yaml]
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 999
+ seccompProfile:
+ type: RuntimeDefault
+
+ # -- Redis container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Redis container port
+ containerPort: 6379
+ # -- Redis service port
+ servicePort: 6379
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to redis
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for redis
+ priorityClassName: ""
+
+ serviceAccount:
+ # -- Create a service account for the redis pod
+ create: false
+ # -- Service account name for redis pod
+ name: ""
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: false
+
+ service:
+ # -- Redis service annotations
+ annotations: {}
+ # -- Additional redis service labels
+ labels: {}
+
+ metrics:
+ # -- Deploy metrics service and redis-exporter sidecar
+ enabled: false
+ image:
+ # -- redis-exporter image repository
+ repository: public.ecr.aws/bitnami/redis-exporter
+ # -- redis-exporter image tag
+ tag: 1.26.0-debian-10-r2
+ # -- redis-exporter image PullPolicy
+ imagePullPolicy: IfNotPresent
+ # -- Port to use for redis-exporter sidecar
+ containerPort: 9121
+
+ # -- Redis exporter security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Resource limits and requests for redis-exporter sidecar
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 64Mi
+ # requests:
+ # cpu: 10m
+ # memory: 32Mi
+ service:
+ # -- Metrics service type
+ type: ClusterIP
+ # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP)
+ clusterIP: None
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 9121
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Interval at which metrics should be scraped
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+# This key configures Redis-HA subchart and when enabled (redis-ha.enabled=true)
+# the custom redis deployment is omitted
+# Check the redis-ha chart for more properties
+redis-ha:
+ # -- Enables the Redis HA subchart and disables the custom Redis single node deployment
+ enabled: false
+ exporter:
+ # -- If `true`, the prometheus exporter sidecar is enabled
+ enabled: true
+ persistentVolume:
+ # -- Configures persistency on Redis nodes
+ enabled: false
+ redis:
+ # -- Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated
+ masterGroupName: argocd
+ # -- Any valid redis config options in this section will be applied to each server (see `redis-ha` chart)
+ # @default -- See [values.yaml]
+ config:
+ # -- Will save the DB if both the given number of seconds and the given number of write operations against the DB occurred. `""` is disabled
+ # @default -- `'""'`
+ save: '""'
+ haproxy:
+ # -- Enabled HAProxy LoadBalancing/Proxy
+ enabled: true
+ metrics:
+ # -- HAProxy enable prometheus metric scraping
+ enabled: true
+ image:
+ # -- Redis tag
+ tag: 7.0.5-alpine
+
+ ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ topologySpreadConstraints:
+ # -- Enable Redis HA topology spread constraints
+ enabled: false
+ # -- Max skew of pods tolerated
+ # @default -- `""` (defaults to `1`)
+ maxSkew: ""
+ # -- Topology key for spread
+ # @default -- `""` (defaults to `topology.kubernetes.io/zone`)
+ topologyKey: ""
+ # -- Enforcement policy, hard or soft
+ # @default -- `""` (defaults to `ScheduleAnyway`)
+ whenUnsatisfiable: ""
+
+# External Redis parameters
+externalRedis:
+ # -- External Redis server host
+ host: ""
+ # -- External Redis username
+ username: ""
+ # -- External Redis password
+ password: ""
+ # -- External Redis server port
+ port: 6379
+ # -- The name of an existing secret with Redis credentials (must contain key `redis-password`).
+ # When it's set, the `externalRedis.password` parameter is ignored
+ existingSecret: ""
+ # -- External Redis Secret annotations
+ secretAnnotations: {}
+
+## Server
+server:
+ # -- Argo CD server name
+ name: server
+
+ # -- The number of server pods to run
+ replicas: 1
+
+ ## Argo CD server Horizontal Pod Autoscaler
+ autoscaling:
+ # -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo CD server
+ enabled: false
+ # -- Minimum number of replicas for the Argo CD server [HPA]
+ minReplicas: 1
+ # -- Maximum number of replicas for the Argo CD server [HPA]
+ maxReplicas: 5
+ # -- Average CPU utilization percentage for the Argo CD server [HPA]
+ targetCPUUtilizationPercentage: 50
+ # -- Average memory utilization percentage for the Argo CD server [HPA]
+ targetMemoryUtilizationPercentage: 50
+ # -- Configures the scaling behavior of the target in both Up and Down directions.
+ # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+
+ ## Argo CD server Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Argo CD server
+ enabled: false
+ # -- Labels to be added to Argo CD server pdb
+ labels: {}
+ # -- Annotations to be added to Argo CD server pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `server.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Argo CD server image
+ image:
+ # -- Repository to use for the Argo CD server
+ # @default -- `""` (defaults to global.image.repository)
+ repository: "" # defaults to global.image.repository
+ # -- Tag to use for the Argo CD server
+ # @default -- `""` (defaults to global.image.tag)
+ tag: "" # defaults to global.image.tag
+ # -- Image pull policy for the Argo CD server
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: "" # IfNotPresent
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Additional command line arguments to pass to Argo CD server
+ extraArgs: [--insecure]
+
+ # -- Environment variables to pass to Argo CD server
+ env: []
+
+ # -- envFrom to pass to Argo CD server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Specify postStart and preStop lifecycle hooks for your argo-cd-server container
+ lifecycle: {}
+
+ ## Argo UI extensions
+ ## This function in tech preview stage, do expect unstability or breaking changes in newer versions.
+ ## Ref: https://github.com/argoproj-labs/argocd-extensions
+ extensions:
+ # -- Enable support for Argo UI extensions
+ enabled: false
+
+ ## Argo UI extensions image
+ image:
+ # -- Repository to use for extensions image
+ repository: "ghcr.io/argoproj-labs/argocd-extensions"
+ # -- Tag to use for extensions image
+ tag: "v0.1.0"
+ # -- Image pull policy for extensions
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Server UI extensions container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Resource limits and requests for the argocd-extensions container
+ resources: {}
+ # limits:
+ # cpu: 50m
+ # memory: 128Mi
+ # requests:
+ # cpu: 10m
+ # memory: 64Mi
+
+ # -- Additional containers to be added to the server pod
+ ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
+ extraContainers: []
+ # - name: my-sidecar
+ # image: nginx:latest
+ # - name: lemonldap-ng-controller
+ # image: lemonldapng/lemonldap-ng-controller:0.2.0
+ # args:
+ # - /lemonldap-ng-controller
+ # - --alsologtostderr
+ # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
+ # env:
+ # - name: POD_NAME
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.name
+ # - name: POD_NAMESPACE
+ # valueFrom:
+ # fieldRef:
+ # fieldPath: metadata.namespace
+ # volumeMounts:
+ # - name: copy-portal-skins
+ # mountPath: /srv/var/lib/lemonldap-ng/portal/skins
+
+ # -- Init containers to add to the server pod
+ ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin
+ ## you could use this (and the same in the application controller pod) to provide such executable
+ ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
+ initContainers: []
+ # - name: download-tools
+ # image: alpine:3
+ # command: [sh, -c]
+ # args:
+ # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip &&
+ # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/
+ # volumeMounts:
+ # - mountPath: /custom-tools
+ # name: custom-tools
+
+ # -- Additional volumeMounts to the server main container
+ volumeMounts: []
+ # - mountPath: /usr/local/bin/kubelogin
+ # name: custom-tools
+ # subPath: kubelogin
+
+ # -- Additional volumes to the server pod
+ volumes: []
+ # - name: custom-tools
+ # emptyDir: {}
+
+ # -- Annotations to be added to server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the Argo CD server
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 50m
+ # memory: 64Mi
+
+ # -- Configures the server port
+ containerPort: 8080
+
+ ## Readiness and liveness probes for default backend
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ livenessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ # -- [Node selector]
+ nodeSelector: {}
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to the Argo CD server
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for the Argo CD server
+ priorityClassName: ""
+
+ # -- Server container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # TLS certificate configuration via cert-manager
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server
+ certificate:
+ # -- Deploy a Certificate resource (requires cert-manager)
+ enabled: false
+ # -- The name of the Secret that will be automatically created and managed by this Certificate resource
+ secretName: argocd-server-tls
+ # -- Certificate primary domain (commonName)
+ domain: argocd.example.com
+ # -- Certificate Subject Alternate Names (SANs)
+ additionalHosts: []
+ # -- The requested 'duration' (i.e. lifetime) of the certificate.
+ # @default -- `""` (defaults to 2160h = 90d if not specified)
+ ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal
+ duration: ""
+ # -- How long before the expiry a certificate should be renewed.
+ # @default -- `""` (defaults to 360h = 15d if not specified)
+ ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal
+ renewBefore: ""
+ # Certificate issuer
+ ## Ref: https://cert-manager.io/docs/concepts/issuer
+ issuer:
+ # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io`
+ group: ""
+ # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer`
+ kind: ""
+ # -- Certificate isser name. Eg. `letsencrypt`
+ name: ""
+ # Private key of the certificate
+ privateKey:
+ # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always`
+ rotationPolicy: Never
+ # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8`
+ encoding: PKCS1
+ # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA`
+ algorithm: RSA
+ # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored.
+ size: 2048
+
+ # TLS certificate configuration via Secret
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server
+ certificateSecret:
+ # -- Create argocd-server-tls secret
+ enabled: false
+ # -- Annotations to be added to argocd-server-tls secret
+ annotations: {}
+ # -- Labels to be added to argocd-server-tls secret
+ labels: {}
+ # -- Private Key of the certificate
+ key: ''
+ # -- Certificate data
+ crt: ''
+
+ ## Server service configuration
+ service:
+ # -- Server service annotations
+ annotations: {}
+ # -- Server service labels
+ labels: {}
+ # -- Server service type
+ type: ClusterIP
+ # -- Server service http port for NodePort service type (only if `server.service.type` is set to "NodePort")
+ nodePortHttp: 30080
+ # -- Server service https port for NodePort service type (only if `server.service.type` is set to "NodePort")
+ nodePortHttps: 30443
+ # -- Server service http port
+ servicePortHttp: 80
+ # -- Server service https port
+ servicePortHttps: 443
+ # -- Server service http port name, can be used to route traffic via istio
+ servicePortHttpName: http
+ # -- Server service https port name, can be used to route traffic via istio
+ servicePortHttpsName: https
+ # -- Use named target port for argocd
+ ## Named target ports are not supported by GCE health checks, so when deploying argocd on GKE
+ ## and exposing it via GCE ingress, the health checks fail and the load balancer returns a 502.
+ namedTargetPort: true
+ # -- LoadBalancer will get created with the IP specified in this field
+ loadBalancerIP: ""
+ # -- Source IP ranges to allow access to service from
+ loadBalancerSourceRanges: []
+ # -- Server service external IPs
+ externalIPs: []
+ # -- Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ externalTrafficPolicy: ""
+ # -- Used to maintain session affinity. Supports `ClientIP` and `None`
+ sessionAffinity: ""
+
+ ## Server metrics service configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8083
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # monitoring
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ serviceAccount:
+ # -- Create server service account
+ create: true
+ # -- Server service account name
+ name: argocd-server
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ ingress:
+ # -- Enable an ingress resource for the Argo CD server
+ enabled: false
+ # -- Additional ingress annotations
+ annotations: {}
+ # -- Additional ingress labels
+ labels: {}
+ # -- Defines which ingress controller will implement the resource
+ ingressClassName: ""
+
+ # -- List of ingress hosts
+ ## Argo Ingress.
+ ## Hostnames must be provided if Ingress is enabled.
+ ## Secrets must be manually created in the namespace
+ hosts: []
+ # - argocd.example.com
+
+ # -- List of ingress paths
+ paths:
+ - /
+ # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
+ pathType: Prefix
+ # -- Additional ingress paths
+ extraPaths: []
+ # - path: /*
+ # pathType: Prefix
+ # backend:
+ # service:
+ # name: ssl-redirect
+ # port:
+ # name: use-annotation
+
+ # -- Ingress TLS configuration
+ tls: []
+ # - secretName: your-certificate-name
+ # hosts:
+ # - argocd.example.com
+
+ # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp`
+ https: false
+
+ # dedicated ingress for gRPC as documented at
+ # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/
+ ingressGrpc:
+ # -- Enable an ingress resource for the Argo CD server for dedicated [gRPC-ingress]
+ enabled: false
+ # -- Setup up gRPC ingress to work with an AWS ALB
+ isAWSALB: false
+ # -- Additional ingress annotations for dedicated [gRPC-ingress]
+ annotations: {}
+ # -- Additional ingress labels for dedicated [gRPC-ingress]
+ labels: {}
+ # -- Defines which ingress controller will implement the resource [gRPC-ingress]
+ ingressClassName: ""
+
+ awsALB:
+ # -- Service type for the AWS ALB gRPC service
+ ## Service Type if isAWSALB is set to true
+ ## Can be of type NodePort or ClusterIP depending on which mode you are
+ ## are running. Instance mode needs type NodePort, IP mode needs type
+ ## ClusterIP
+ ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic
+ serviceType: NodePort
+ # -- Backend protocol version for the AWS ALB gRPC service
+ ## This tells AWS to send traffic from the ALB using HTTP2. Can use gRPC as well if you want to leverage gRPC specific features
+ backendProtocolVersion: HTTP2
+
+ # -- List of ingress hosts for dedicated [gRPC-ingress]
+ ## Argo Ingress.
+ ## Hostnames must be provided if Ingress is enabled.
+ ## Secrets must be manually created in the namespace
+ ##
+ hosts: []
+ # - argocd.example.com
+
+ # -- List of ingress paths for dedicated [gRPC-ingress]
+ paths:
+ - /
+ # -- Ingress path type for dedicated [gRPC-ingress]. One of `Exact`, `Prefix` or `ImplementationSpecific`
+ pathType: Prefix
+ # -- Additional ingress paths for dedicated [gRPC-ingress]
+ extraPaths: []
+ # - path: /*
+ # pathType: Prefix
+ # backend:
+ # service:
+ # name: ssl-redirect
+ # port:
+ # name: use-annotation
+
+ # -- Ingress TLS configuration for dedicated [gRPC-ingress]
+ tls: []
+ # - secretName: your-certificate-name
+ # hosts:
+ # - argocd.example.com
+
+ # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp`
+ https: false
+
+ # Create a OpenShift Route with SSL passthrough for UI and CLI
+ # Consider setting 'hostname' e.g. https://argocd.apps-crc.testing/ using your Default Ingress Controller Domain
+ # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain:
+ # If 'hostname' is an empty string "" OpenShift will create a hostname for you.
+ route:
+ # -- Enable an OpenShift Route for the Argo CD server
+ enabled: false
+ # -- Openshift Route annotations
+ annotations: {}
+ # -- Hostname of OpenShift Route
+ hostname: ""
+ # -- Termination type of Openshift Route
+ termination_type: passthrough
+ # -- Termination policy of Openshift Route
+ termination_policy: None
+
+ ## Enable Admin ClusterRole resources.
+ ## Enable if you would like to grant rights to Argo CD to deploy to the local Kubernetes cluster.
+ clusterAdminAccess:
+ # -- Enable RBAC for local cluster deployments
+ enabled: true
+
+ GKEbackendConfig:
+ # -- Enable BackendConfig custom resource for Google Kubernetes Engine
+ enabled: false
+ # -- [BackendConfigSpec]
+ spec: {}
+ # spec:
+ # iap:
+ # enabled: true
+ # oauthclientCredentials:
+ # secretName: argocd-secret
+
+ ## Create a Google Managed Certificate for use with the GKE Ingress Controller
+ ## https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs
+ GKEmanagedCertificate:
+ # -- Enable ManagedCertificate custom resource for Google Kubernetes Engine.
+ enabled: false
+ # -- Domains for the Google Managed Certificate
+ domains:
+ - argocd.example.com
+
+ ## Create a Google FrontendConfig Custom Resource, for use with the GKE Ingress Controller
+ ## https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters
+ GKEfrontendConfig:
+ # -- Enable FrontConfig custom resource for Google Kubernetes Engine
+ enabled: false
+ # -- [FrontendConfigSpec]
+ spec: {}
+ # spec:
+ # redirectToHttps:
+ # enabled: true
+ # responseCodeName: RESPONSE_CODE
+
+## Repo Server
+repoServer:
+ # -- Repo server name
+ name: repo-server
+
+ # -- The number of repo server pods to run
+ replicas: 1
+
+ ## Repo server Horizontal Pod Autoscaler
+ autoscaling:
+ # -- Enable Horizontal Pod Autoscaler ([HPA]) for the repo server
+ enabled: false
+ # -- Minimum number of replicas for the repo server [HPA]
+ minReplicas: 1
+ # -- Maximum number of replicas for the repo server [HPA]
+ maxReplicas: 5
+ # -- Average CPU utilization percentage for the repo server [HPA]
+ targetCPUUtilizationPercentage: 50
+ # -- Average memory utilization percentage for the repo server [HPA]
+ targetMemoryUtilizationPercentage: 50
+ # -- Configures the scaling behavior of the target in both Up and Down directions.
+ # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer
+ behavior: {}
+ # scaleDown:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 1
+ # periodSeconds: 180
+ # scaleUp:
+ # stabilizationWindowSeconds: 300
+ # policies:
+ # - type: Pods
+ # value: 2
+ # periodSeconds: 60
+
+ ## Repo server Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the repo server
+ enabled: false
+ # -- Labels to be added to repo server pdb
+ labels: {}
+ # -- Annotations to be added to repo server pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `repoServer.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Repo server image
+ image:
+ # -- Repository to use for the repo server
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the repo server
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the repo server
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Additional command line arguments to pass to repo server
+ extraArgs: []
+
+ # -- Environment variables to pass to repo server
+ env: []
+
+ # -- envFrom to pass to repo server
+ # @default -- `[]` (See [values.yaml])
+ envFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the repo server pod
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/
+ extraContainers: []
+ # - name: cmp
+ # # Entrypoint should be Argo CD lightweight CMP server i.e. argocd-cmp-server
+ # command: [/var/run/argocd/argocd-cmp-server]
+ # image: busybox # This can be off-the-shelf or custom-built image
+ # securityContext:
+ # runAsNonRoot: true
+ # runAsUser: 999
+ # volumeMounts:
+ # - mountPath: /var/run/argocd
+ # name: var-files
+ # - mountPath: /home/argocd/cmp-server/plugins
+ # name: plugins
+ # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image.
+ # - mountPath: /home/argocd/cmp-server/config/plugin.yaml
+ # subPath: plugin.yaml
+ # name: cmp-plugin
+ # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps
+ # # mitigate path traversal attacks.
+ # - mountPath: /tmp
+ # name: cmp-tmp
+
+ # -- Init containers to add to the repo server pods
+ initContainers: []
+
+ # -- Additional volumeMounts to the repo server main container
+ volumeMounts: []
+
+ # -- Additional volumes to the repo server pod
+ volumes: []
+ # - name: cmp-plugin
+ # configMap:
+ # name: cmp-plugin
+ # - name: cmp-tmp
+ # emptyDir: {}
+
+ # -- Annotations to be added to repo server Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be added to repo server pods
+ podAnnotations: {}
+
+ # -- Labels to be added to repo server pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the repo server pods
+ resources:
+ limits:
+ cpu: 100m
+ memory: 256Mi
+ ephemeral-storage: 2Gi
+ requests:
+ cpu: 100m
+ memory: 256Mi
+ ephemeral-storage: 2Gi
+
+ # -- Configures the repo server port
+ containerPort: 8081
+
+ ## Readiness and liveness probes for default backend
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ livenessProbe:
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+
+ # -- [Node selector]
+ nodeSelector: {}
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+ # -- Assign custom [affinity] rules to the deployment
+ affinity: {}
+
+ # -- Assign custom [TopologySpreadConstraints] rules to the repo server
+ ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+
+ # -- Priority class for the repo server
+ priorityClassName: ""
+
+ # -- Repo server container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # TLS certificate configuration via Secret
+ ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-repo-server
+ ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart repo server automatically without extra controllers.
+ certificateSecret:
+ # -- Create argocd-repo-server-tls secret
+ enabled: false
+ # -- Annotations to be added to argocd-repo-server-tls secret
+ annotations: {}
+ # -- Labels to be added to argocd-repo-server-tls secret
+ labels: {}
+ # -- Certificate authority. Required for self-signed certificates.
+ ca: ''
+ # -- Certificate private key
+ key: ''
+ # -- Certificate data. Must contain SANs of Repo service (ie: argocd-repo-server, argocd-repo-server.argo-cd.svc)
+ crt: ''
+
+ ## Repo server service configuration
+ service:
+ # -- Repo server service annotations
+ annotations: {}
+ # -- Repo server service labels
+ labels: {}
+ # -- Repo server service port
+ port: 8081
+ # -- Repo server service port name
+ portName: https-repo-server
+
+ ## Repo server metrics service configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8084
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # "monitoring"
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ ## Enable Admin ClusterRole resources.
+ ## Enable if you would like to grant cluster rights to Argo CD repo server.
+ clusterAdminAccess:
+ # -- Enable RBAC for local cluster deployments
+ enabled: false
+ ## Enable Custom Rules for the Repo server's Cluster Role resource
+ ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource.
+ ## Defaults to off
+ clusterRoleRules:
+ # -- Enable custom rules for the Repo server's Cluster Role resource
+ enabled: false
+ # -- List of custom rules for the Repo server's Cluster Role resource
+ rules: []
+
+ ## Repo server service account
+ ## If create is set to true, make sure to uncomment the name and update the rbac section below
+ serviceAccount:
+ # -- Create repo server service account
+ create: true
+ # -- Repo server service account name
+ name: "" # "argocd-repo-server"
+ # -- Annotations applied to created service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- Automount API credentials for the Service Account
+ automountServiceAccountToken: true
+
+ # -- Repo server rbac rules
+ rbac: []
+ # - apiGroups:
+ # - argoproj.io
+ # resources:
+ # - applications
+ # verbs:
+ # - get
+ # - list
+ # - watch
+
+## ApplicationSet controller
+applicationSet:
+ # -- Enable ApplicationSet controller
+ enabled: true
+
+ # -- ApplicationSet controller name string
+ name: applicationset-controller
+
+ # -- The number of ApplicationSet controller pods to run
+ replicaCount: 1
+
+ ## ApplicationSet controller Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the ApplicationSet controller
+ enabled: false
+ # -- Labels to be added to ApplicationSet controller pdb
+ labels: {}
+ # -- Annotations to be added to ApplicationSet controller pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `applicationSet.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## ApplicationSet controller image
+ image:
+ # -- Repository to use for the ApplicationSet controller
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the ApplicationSet controller
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the ApplicationSet controller
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- If defined, uses a Secret to pull an image from a private Docker registry or repository.
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- ApplicationSet controller log format. Either `text` or `json`
+ # @default -- `""` (defaults to global.logging.format)
+ logFormat: ""
+ # -- ApplicationSet controller log level. One of: `debug`, `info`, `warn`, `error`
+ # @default -- `""` (defaults to global.logging.level)
+ logLevel: ""
+
+ args:
+ # -- The default metric address
+ metricsAddr: :8080
+ # -- The default health check port
+ probeBindAddr: :8081
+ # -- How application is synced between the generator and the cluster
+ policy: sync
+ # -- Enable dry run mode
+ dryRun: false
+
+ # -- List of extra cli args to add
+ extraArgs: []
+
+ # -- Environment variables to pass to the ApplicationSet controller
+ extraEnv: []
+ # - name: "MY_VAR"
+ # value: "value"
+
+ # -- envFrom to pass to the ApplicationSet controller
+ # @default -- `[]` (See [values.yaml])
+ extraEnvFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- Additional containers to be added to the ApplicationSet controller pod
+ extraContainers: []
+
+ # -- List of extra mounts to add (normally used with extraVolumes)
+ extraVolumeMounts: []
+
+ # -- List of extra volumes to add
+ extraVolumes: []
+
+ ## Metrics service configuration
+ metrics:
+ # -- Deploy metrics service
+ enabled: false
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port
+ servicePort: 8085
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor interval
+ interval: 30s
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus ServiceMonitor namespace
+ namespace: "" # monitoring
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+
+ ## ApplicationSet service configuration
+ service:
+ # -- ApplicationSet service annotations
+ annotations: {}
+ # -- ApplicationSet service labels
+ labels: {}
+ # -- ApplicationSet service port
+ port: 7000
+ # -- ApplicationSet service port name
+ portName: webhook
+
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+ # -- Annotations to add to the service account
+ annotations: {}
+ # -- Labels applied to created service account
+ labels: {}
+ # -- The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+ # -- Annotations to be added to ApplicationSet controller Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations for the ApplicationSet controller pods
+ podAnnotations: {}
+
+ # -- Labels for the ApplicationSet controller pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the ApplicationSet controller pods.
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- ApplicationSet controller container-level security context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ ## Probes for ApplicationSet controller (optional)
+ ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
+ readinessProbe:
+ # -- Enable Kubernetes liveness probe for ApplicationSet controller
+ enabled: false
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+
+ livenessProbe:
+ # -- Enable Kubernetes liveness probe for ApplicationSet controller
+ enabled: false
+ # -- Number of seconds after the container has started before [probe] is initiated
+ initialDelaySeconds: 10
+ # -- How often (in seconds) to perform the [probe]
+ periodSeconds: 10
+ # -- Number of seconds after which the [probe] times out
+ timeoutSeconds: 1
+ # -- Minimum consecutive successes for the [probe] to be considered successful after having failed
+ successThreshold: 1
+ # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded
+ failureThreshold: 3
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules
+ affinity: {}
+
+ # -- If specified, indicates the pod's priority. If not specified, the pod priority will be default or zero if there is no default.
+ priorityClassName: ""
+
+ ## Webhook for the Git Generator
+ ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration)
+ webhook:
+ ingress:
+ # -- Enable an ingress resource for Webhooks
+ enabled: false
+ # -- Additional ingress annotations
+ annotations: {}
+ # -- Additional ingress labels
+ labels: {}
+ # -- Defines which ingress ApplicationSet controller will implement the resource
+ ingressClassName: ""
+
+ # -- List of ingress hosts
+ ## Hostnames must be provided if Ingress is enabled.
+ ## Secrets must be manually created in the namespace
+ hosts: []
+ # - argocd-applicationset.example.com
+
+ # -- List of ingress paths
+ paths:
+ - /api/webhook
+ # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific`
+ pathType: Prefix
+ # -- Additional ingress paths
+ extraPaths: []
+ # - path: /*
+ # backend:
+ # serviceName: ssl-redirect
+ # servicePort: use-annotation
+ ## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used)
+ # - path: /*
+ # pathType: Prefix
+ # backend:
+ # service:
+ # name: ssl-redirect
+ # port:
+ # name: use-annotation
+
+ # -- Ingress TLS configuration
+ tls: []
+ # - secretName: argocd-applicationset-tls
+ # hosts:
+ # - argocd-applicationset.example.com
+
+## Notifications controller
+notifications:
+ # -- Enable notifications controller
+ enabled: true
+
+ # -- Notifications controller name string
+ name: notifications-controller
+
+ # -- Argo CD dashboard url; used in place of {{.context.argocdUrl}} in templates
+ argocdUrl:
+
+ ## Notifications controller Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the notifications controller
+ enabled: false
+ # -- Labels to be added to notifications controller pdb
+ labels: {}
+ # -- Annotations to be added to notifications controller pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `notifications.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Notifications controller image
+ image:
+ # -- Repository to use for the notifications controller
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the notifications controller
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the notifications controller
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ # -- Notifications controller log format. Either `text` or `json`
+ # @default -- `""` (defaults to global.logging.format)
+ logFormat: ""
+ # -- Notifications controller log level. One of: `debug`, `info`, `warn`, `error`
+ # @default -- `""` (defaults to global.logging.level)
+ logLevel: ""
+
+ # -- Extra arguments to provide to the notifications controller
+ extraArgs: []
+
+ # -- Additional container environment variables
+ extraEnv: []
+
+ # -- envFrom to pass to the notifications controller
+ # @default -- `[]` (See [values.yaml])
+ extraEnvFrom: []
+ # - configMapRef:
+ # name: config-map-name
+ # - secretRef:
+ # name: secret-name
+
+ # -- List of extra mounts to add (normally used with extraVolumes)
+ extraVolumeMounts: []
+
+ # -- List of extra volumes to add
+ extraVolumes: []
+
+ # -- Define user-defined context
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/#defining-user-defined-context
+ context: {}
+ # region: east
+ # environmentName: staging
+
+ secret:
+ # -- Whether helm chart creates notifications controller secret
+ create: true
+
+ # -- key:value pairs of annotations to be added to the secret
+ annotations: {}
+
+ # -- Generic key:value pairs to be inserted into the secret
+ ## Can be used for templates, notification services etc. Some examples given below.
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/
+ items: {}
+ # slack-token:
+ # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/slack/
+
+ # grafana-apiKey:
+ # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/grafana/
+
+ # webhooks-github-token:
+
+ # email-username:
+ # email-password:
+ # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/email/
+
+ metrics:
+ # -- Enables prometheus metrics server
+ enabled: false
+ # -- Metrics port
+ port: 9001
+ service:
+ # -- Metrics service annotations
+ annotations: {}
+ # -- Metrics service labels
+ labels: {}
+ # -- Metrics service port name
+ portName: http-metrics
+ serviceMonitor:
+ # -- Enable a prometheus ServiceMonitor
+ enabled: false
+ # -- Prometheus ServiceMonitor selector
+ selector: {}
+ # prometheus: kube-prometheus
+ # -- Prometheus ServiceMonitor labels
+ additionalLabels: {}
+ # -- Prometheus ServiceMonitor annotations
+ annotations: {}
+ # namespace: monitoring
+ # interval: 30s
+ # scrapeTimeout: 10s
+ # -- Prometheus ServiceMonitor scheme
+ scheme: ""
+ # -- Prometheus ServiceMonitor tlsConfig
+ tlsConfig: {}
+ # -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
+ metricRelabelings: []
+
+ # -- Configures notification services such as slack, email or custom webhook
+ # @default -- See [values.yaml]
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/
+ notifiers: {}
+ # service.slack: |
+ # token: $slack-token
+
+ # -- Annotations to be applied to the notifications controller Deployment
+ deploymentAnnotations: {}
+
+ # -- Annotations to be applied to the notifications controller Pods
+ podAnnotations: {}
+
+ # -- Labels to be applied to the notifications controller Pods
+ podLabels: {}
+
+ # -- Resource limits and requests for the notifications controller
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Notification controller container-level security Context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- [Node selector]
+ nodeSelector: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- Assign custom [affinity] rules
+ affinity: {}
+
+ # -- Priority class for the notifications controller pods
+ priorityClassName: ""
+
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+
+ # -- The name of the service account to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ name: argocd-notifications-controller
+
+ # -- Annotations applied to created service account
+ annotations: {}
+
+ # -- Labels applied to created service account
+ labels: {}
+ cm:
+ # -- Whether helm chart creates notifications controller config map
+ create: true
+
+ # -- Contains centrally managed global application subscriptions
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/subscriptions/
+ subscriptions: []
+ # # subscription for on-sync-status-unknown trigger notifications
+ # - recipients:
+ # - slack:test2
+ # - email:test@gmail.com
+ # triggers:
+ # - on-sync-status-unknown
+ # # subscription restricted to applications with matching labels only
+ # - recipients:
+ # - slack:test3
+ # selector: test=true
+ # triggers:
+ # - on-sync-status-unknown
+
+ # -- The notification template is used to generate the notification content
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/
+ templates: {}
+ # template.app-deployed: |
+ # email:
+ # subject: New version of an application {{.app.metadata.name}} is up and running.
+ # message: |
+ # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests.
+ # slack:
+ # attachments: |
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#18be52",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Revision",
+ # "value": "{{.app.status.sync.revision}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-health-degraded: |
+ # email:
+ # subject: Application {{.app.metadata.name}} has degraded.
+ # message: |
+ # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded.
+ # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}.
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#f4c030",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-failed: |
+ # email:
+ # subject: Failed to sync application {{.app.metadata.name}}.
+ # message: |
+ # {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}}
+ # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true .
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#E96D76",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-running: |
+ # email:
+ # subject: Start syncing application {{.app.metadata.name}}.
+ # message: |
+ # The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}.
+ # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true .
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#0DADEA",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-status-unknown: |
+ # email:
+ # subject: Application {{.app.metadata.name}} sync status is 'Unknown'
+ # message: |
+ # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'.
+ # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}.
+ # {{if ne .serviceType "slack"}}
+ # {{range $c := .app.status.conditions}}
+ # * {{$c.message}}
+ # {{end}}
+ # {{end}}
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#E96D76",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+ # template.app-sync-succeeded: |
+ # email:
+ # subject: Application {{.app.metadata.name}} has been successfully synced.
+ # message: |
+ # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}.
+ # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true .
+ # slack:
+ # attachments: |-
+ # [{
+ # "title": "{{ .app.metadata.name}}",
+ # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}",
+ # "color": "#18be52",
+ # "fields": [
+ # {
+ # "title": "Sync Status",
+ # "value": "{{.app.status.sync.status}}",
+ # "short": true
+ # },
+ # {
+ # "title": "Repository",
+ # "value": "{{.app.spec.source.repoURL}}",
+ # "short": true
+ # }
+ # {{range $index, $c := .app.status.conditions}}
+ # {{if not $index}},{{end}}
+ # {{if $index}},{{end}}
+ # {
+ # "title": "{{$c.type}}",
+ # "value": "{{$c.message}}",
+ # "short": true
+ # }
+ # {{end}}
+ # ]
+ # }]
+
+ # -- The trigger defines the condition when the notification should be sent
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/
+ triggers: {}
+ # trigger.on-deployed: |
+ # - description: Application is synced and healthy. Triggered once per commit.
+ # oncePer: app.status.sync.revision
+ # send:
+ # - app-deployed
+ # when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy'
+ # trigger.on-health-degraded: |
+ # - description: Application has degraded
+ # send:
+ # - app-health-degraded
+ # when: app.status.health.status == 'Degraded'
+ # trigger.on-sync-failed: |
+ # - description: Application syncing has failed
+ # send:
+ # - app-sync-failed
+ # when: app.status.operationState.phase in ['Error', 'Failed']
+ # trigger.on-sync-running: |
+ # - description: Application is being synced
+ # send:
+ # - app-sync-running
+ # when: app.status.operationState.phase in ['Running']
+ # trigger.on-sync-status-unknown: |
+ # - description: Application status is 'Unknown'
+ # send:
+ # - app-sync-status-unknown
+ # when: app.status.sync.status == 'Unknown'
+ # trigger.on-sync-succeeded: |
+ # - description: Application syncing has succeeded
+ # send:
+ # - app-sync-succeeded
+ # when: app.status.operationState.phase in ['Succeeded']
+ #
+ # For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/#default-triggers
+ # defaultTriggers: |
+ # - on-sync-status-unknown
+
+ ## The optional bot component simplifies managing subscriptions
+ ## For more information: https://argocd-notifications.readthedocs.io/en/stable/bots/overview/
+ bots:
+ slack:
+ # -- Enable slack bot
+ ## You have to set secret.notifiers.slack.signingSecret
+ enabled: false
+
+ ## Slack bot Pod Disruption Budget
+ ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
+ pdb:
+ # -- Deploy a [PodDisruptionBudget] for the Slack bot
+ enabled: false
+ # -- Labels to be added to Slack bot pdb
+ labels: {}
+ # -- Annotations to be added to Slack bot pdb
+ annotations: {}
+ # -- Number of pods that are available after eviction as number or percentage (eg.: 50%)
+ # @default -- `""` (defaults to 0 if not specified)
+ minAvailable: ""
+ # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%).
+ ## Has higher precedence over `notifications.bots.slack.pdb.minAvailable`
+ maxUnavailable: ""
+
+ ## Slack bot image
+ image:
+ # -- Repository to use for the Slack bot
+ # @default -- `""` (defaults to global.image.repository)
+ repository: ""
+ # -- Tag to use for the Slack bot
+ # @default -- `""` (defaults to global.image.tag)
+ tag: ""
+ # -- Image pull policy for the Slack bot
+ # @default -- `""` (defaults to global.image.imagePullPolicy)
+ imagePullPolicy: ""
+
+ # -- Secrets with credentials to pull images from a private registry
+ # @default -- `[]` (defaults to global.imagePullSecrets)
+ imagePullSecrets: []
+
+ service:
+ # -- Service annotations for Slack bot
+ annotations: {}
+ # -- Service port for Slack bot
+ port: 80
+ # -- Service type for Slack bot
+ type: LoadBalancer
+
+ serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+
+ # -- The name of the service account to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ name: argocd-notifications-bot
+
+ # -- Annotations applied to created service account
+ annotations: {}
+
+ # -- Slack bot container-level security Context
+ # @default -- See [values.yaml]
+ containerSecurityContext:
+ runAsNonRoot: true
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ seccompProfile:
+ type: RuntimeDefault
+ capabilities:
+ drop:
+ - ALL
+
+ # -- Resource limits and requests for the Slack bot
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # -- Assign custom [affinity] rules
+ affinity: {}
+
+ # -- [Tolerations] for use with node taints
+ tolerations: []
+
+ # -- [Node selector]
+ nodeSelector: {}
\ No newline at end of file
diff --git a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml
index ae9a26a43..8707a79d5 100644
--- a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml
+++ b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- arranger
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
containers:
- name: arranger-dashboard
diff --git a/kube/services/arranger/arranger-deploy.yaml b/kube/services/arranger/arranger-deploy.yaml
index 57e19ae29..31d715d7c 100644
--- a/kube/services/arranger/arranger-deploy.yaml
+++ b/kube/services/arranger/arranger-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- arranger
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: arranger-config
diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml
index 78e7d6df1..935cab408 100644
--- a/kube/services/audit-service/audit-service-deploy.yaml
+++ b/kube/services/audit-service/audit-service-deploy.yaml
@@ -32,7 +32,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -41,6 +41,22 @@ spec:
values:
- audit-service
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -79,11 +95,10 @@ spec:
subPath: "audit-service-config.yaml"
resources:
requests:
- cpu: 0.4
- memory: 512Mi
+ cpu: 100m
+ memory: 100Mi
limits:
- cpu: 0.8
- memory: 1024Mi
+ memory: 512Mi
initContainers:
- name: audit-db-migrate
GEN3_AUDIT-SERVICE_IMAGE
diff --git a/kube/services/auspice/auspice-deploy.yaml b/kube/services/auspice/auspice-deploy.yaml
index 88324fec4..ce228be9f 100644
--- a/kube/services/auspice/auspice-deploy.yaml
+++ b/kube/services/auspice/auspice-deploy.yaml
@@ -23,7 +23,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -32,6 +32,22 @@ spec:
values:
- auspice
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
containers:
- name: auspice
@@ -64,8 +80,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.5
- memory: 1024Mi
+ cpu: 100m
+ memory: 128Mi
limits:
- cpu: 1
- memory: 2400Mi
+ memory: 1024Mi
diff --git a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml
index bd42e1645..e99e3fd15 100644
--- a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml
+++ b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml
@@ -35,6 +35,7 @@ rules:
- apiGroups: [""]
resources:
- "pods"
+ - "namespaces"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
@@ -62,6 +63,16 @@ rules:
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources:
+ - storageclasses
+ - csinodes
+ - csidrivers
+ - csistoragecapacities
+ verbs:
+ - watch
+ - list
+ - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -142,8 +153,7 @@ spec:
name: cluster-autoscaler
resources:
limits:
- cpu: 100m
- memory: 600Mi
+ memory: 1600Mi
requests:
cpu: 100m
memory: 300Mi
diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml
index a2a3170d5..34f18d973 100644
--- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml
+++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml
@@ -22,11 +22,29 @@ spec:
netvpc: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: credentials
secret:
secretName: "aws-es-proxy"
+ priorityClassName: aws-es-proxy-high-priority
containers:
- name: esproxy
GEN3_AWS-ES-PROXY_IMAGE|-image: quay.io/cdis/aws-es-proxy:0.8-|
@@ -67,5 +85,5 @@ spec:
cpu: 250m
memory: 256Mi
limits:
- cpu: 1
+ cpu: 1000m
memory: 2Gi
diff --git a/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml
new file mode 100644
index 000000000..6bd619a22
--- /dev/null
+++ b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml
@@ -0,0 +1,7 @@
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: aws-es-proxy-high-priority
+value: 1000000
+globalDefault: false
+description: "Priority class for aws-es-proxy service"
diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml
new file mode 100644
index 000000000..fa6b741a2
--- /dev/null
+++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml
@@ -0,0 +1,105 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: cedar-wrapper-deployment
+spec:
+ selector:
+ matchLabels:
+ app: cedar-wrapper
+ revisionHistoryLimit: 2
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 2
+ maxUnavailable: 25%
+ template:
+ metadata:
+ labels:
+ app: cedar-wrapper
+ public: "yes"
+ netnolimit: "yes"
+ userhelper: "yes"
+ GEN3_DATE_LABEL
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 25
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - cedar-wrapper
+ topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
+ automountServiceAccountToken: false
+ volumes:
+ - name: ca-volume
+ secret:
+ secretName: "service-ca"
+ - name: config-volume
+ secret:
+ secretName: "cedar-wrapper-config"
+ - name: privacy-policy
+ configMap:
+ name: "privacy-policy"
+ containers:
+ - name: cedar-wrapper
+ GEN3_CEDAR-WRAPPER_IMAGE
+ readinessProbe:
+ httpGet:
+ path: /_status/
+ port: 8000
+ initialDelaySeconds: 30
+ periodSeconds: 60
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /_status/
+ port: 8000
+ initialDelaySeconds: 60
+ periodSeconds: 60
+ timeoutSeconds: 30
+ failureThreshold: 6
+ resources:
+ requests:
+ cpu: 100m
+ memory: 64Mi
+ limits:
+ memory: 4096Mi
+ ports:
+ - containerPort: 8000
+ command:
+ - /bin/bash
+ - /src/start.sh
+ env:
+ - name: HOSTNAME
+ value: revproxy-service
+ - name: API_KEY
+ valueFrom:
+ secretKeyRef:
+ name: cedar-g3auto
+ key: "cedar_api_key.txt"
+ volumeMounts:
+ - name: "ca-volume"
+ readOnly: true
+ mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
+ subPath: "ca.pem"
+ imagePullPolicy: Always
diff --git a/kube/services/cedar-wrapper/cedar-wrapper-service.yaml b/kube/services/cedar-wrapper/cedar-wrapper-service.yaml
new file mode 100644
index 000000000..c22585213
--- /dev/null
+++ b/kube/services/cedar-wrapper/cedar-wrapper-service.yaml
@@ -0,0 +1,19 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: cedar-wrapper-service
+spec:
+ selector:
+ app: cedar-wrapper
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8000
+ name: http
+ nodePort: null
+ - protocol: TCP
+ port: 443
+ targetPort: 8000
+ name: https
+ nodePort: null
+ type: ClusterIP
diff --git a/kube/services/cogwheel/cogwheel-deploy.yaml b/kube/services/cogwheel/cogwheel-deploy.yaml
index ef274220a..c66f4d3b3 100644
--- a/kube/services/cogwheel/cogwheel-deploy.yaml
+++ b/kube/services/cogwheel/cogwheel-deploy.yaml
@@ -12,6 +12,23 @@ spec:
app: cogwheel
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cogwheel-g3auto
secret:
diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml
index a21d97900..c7c411f4c 100644
--- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml
+++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml
@@ -19,7 +19,7 @@ spec:
metadata:
labels:
app: cohort-middleware
- dbatlas: "yes"
+ dbohdsi: "yes"
dbomop-data: "yes"
public: "yes"
tags.datadoghq.com/service: "cohort-middleware"
@@ -30,7 +30,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -39,11 +39,28 @@ spec:
values:
- cohort-middleware
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- - name: cohort-middleware-config
+ - name: cohort-middleware-g3auto
secret:
- secretName: cohort-middleware-config
+ secretName: cohort-middleware-g3auto
+ optional: true
containers:
- name: cohort-middleware
GEN3_COHORT-MIDDLEWARE_IMAGE|-image: quay.io/cdis/cohort-middleware:latest-|
@@ -94,15 +111,14 @@ spec:
ports:
- containerPort: 8080
volumeMounts:
- - name: cohort-middleware-config
+ - name: cohort-middleware-g3auto
readOnly: true
mountPath: /config/development.yaml
subPath: development.yaml
imagePullPolicy: Always
resources:
requests:
- cpu: 500m
- memory: 4Gi
+ cpu: 100m
+ memory: 128Mi
limits:
- cpu: 500m
memory: 4Gi
diff --git a/kube/services/dashboard/dashboard-deploy.yaml b/kube/services/dashboard/dashboard-deploy.yaml
index 14a3379cc..451d99552 100644
--- a/kube/services/dashboard/dashboard-deploy.yaml
+++ b/kube/services/dashboard/dashboard-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- dashboard
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -62,10 +78,9 @@ spec:
mountPath: "/etc/gen3"
resources:
requests:
- cpu: 0.3
- memory: 200Mi
+ cpu: 100m
+ memory: 20Mi
limits:
- cpu: 0.5
memory: 500Mi
imagePullPolicy: Always
livenessProbe:
diff --git a/kube/services/datadog/datadog-application.yaml b/kube/services/datadog/datadog-application.yaml
new file mode 100644
index 000000000..19e0e1d86
--- /dev/null
+++ b/kube/services/datadog/datadog-application.yaml
@@ -0,0 +1,27 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: datadog-application
+ namespace: argocd
+spec:
+ project: default
+ sources:
+ - chart: datadog
+ repoURL: 'https://helm.datadoghq.com'
+ targetRevision: 3.6.4
+ helm:
+ valueFiles:
+ - $values/kube/services/datadog/values.yaml
+ releaseName: datadog
+ - repoURL: 'https://github.com/uc-cdis/cloud-automation.git'
+ targetRevision: master
+ ref: values
+ destination:
+ server: 'https://kubernetes.default.svc'
+ namespace: datadog
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/datadog/datadog_db_user.json b/kube/services/datadog/datadog_db_user.json
new file mode 100644
index 000000000..0eca1be9f
--- /dev/null
+++ b/kube/services/datadog/datadog_db_user.json
@@ -0,0 +1,4 @@
+{
+ "datadog_db_user": "datadog",
+ "datadog_db_password": null
+}
\ No newline at end of file
diff --git a/kube/services/datadog/postgres.yaml b/kube/services/datadog/postgres.yaml
new file mode 100644
index 000000000..f85dc0970
--- /dev/null
+++ b/kube/services/datadog/postgres.yaml
@@ -0,0 +1,8 @@
+cluster_check: true
+init_config:
+instances:
+ - dbm: true
+ host:
+ port: 5432
+ username: datadog
+ password:
\ No newline at end of file
diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml
index 5ad681b5d..fc0bbab8b 100644
--- a/kube/services/datadog/values.yaml
+++ b/kube/services/datadog/values.yaml
@@ -10,9 +10,28 @@ datadog:
useHostPort: true
nonLocalTraffic: true
+ #This is used to configure a lot of checks that Datadog does. Normally, we would annotate a service, but since we
+ #use aurora, we'll have to configure from confd instead
+
+ #Enables Optional Universal Service Monitoring
+ ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm
+ serviceMonitoring:
+ enabled: false
+
# datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret.
## If set, this parameter takes precedence over "apiKey".
- apiKeyExistingSecret: "datadog-agent"
+ apiKeyExistingSecret: "ddgov-apikey"
+
+ # datadog.site -- The site of the Datadog intake to send Agent data to.
+ # (documentation: https://docs.datadoghq.com/getting_started/site/)
+
+ ## Set to 'datadoghq.com' to send data to the US1 site (default).
+ ## Set to 'datadoghq.eu' to send data to the EU site.
+ ## Set to 'us3.datadoghq.com' to send data to the US3 site.
+ ## Set to 'us5.datadoghq.com' to send data to the US5 site.
+ ## Set to 'ddog-gov.com' to send data to the US1-FED site.
+ ## Set to 'ap1.datadoghq.com' to send data to the AP1 site.
+ site: ddog-gov.com
# datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment
## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics
@@ -51,11 +70,13 @@ datadog:
apm:
# datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
- socketEnabled: true
+ socketEnabled: false
# datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default)
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
- portEnabled: true
+ portEnabled: false
+
+ enabled: false
# datadog.apm.port -- Override the trace Agent port
## Note: Make sure your client is sending to the same UDP port.
@@ -84,6 +105,13 @@ datadog:
## Enable systemProbe agent and provide custom configs
systemProbe:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ limits:
+ cpu: 100m
+ memory: 200Mi
# datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent
debugPort: 0
@@ -156,7 +184,7 @@ datadog:
networkMonitoring:
# datadog.networkMonitoring.enabled -- Enable network performance monitoring
- enabled: true
+ enabled: false
## Enable security agent and provide custom configs
@@ -206,7 +234,8 @@ datadog:
# - send_distribution_buckets: true
# timeout: 5
-
+ containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task"
+ containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring kube_namespace:datadog"
## This is the Datadog Cluster Agent implementation that handles cluster-wide
## metrics more cleanly, separates concerns for better rbac, and implements
## the external metrics API so you can autoscale HPAs based on datadog metrics
@@ -221,7 +250,7 @@ clusterAgent:
name: cluster-agent
# clusterAgent.image.tag -- Cluster Agent image tag to use
- tag: 1.16.0
+ # tag: 1.16.0
# clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent
repository:
@@ -261,10 +290,6 @@ agents:
# agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6)
tolerations:
- - effect: NoSchedule
- key: role
- operator: Equal
- value: workflow
- effect: NoSchedule
key: role
operator: Equal
@@ -278,7 +303,7 @@ agents:
name: agent
# agents.image.tag -- Define the Agent version to use
- tag: 7.32.4
+ # tag: 7.32.4
# agents.image.tagSuffix -- Suffix to append to Agent tag
## Ex:
@@ -315,4 +340,3 @@ agents:
# agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true
serviceAccountAnnotations: {}
-
diff --git a/kube/services/datasim/datasim-deploy.yaml b/kube/services/datasim/datasim-deploy.yaml
index c48075b89..0f6f21d68 100644
--- a/kube/services/datasim/datasim-deploy.yaml
+++ b/kube/services/datasim/datasim-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- datasim
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml
index 7925c2974..43bd90e5d 100644
--- a/kube/services/dicom-server/dicom-server-deploy.yaml
+++ b/kube/services/dicom-server/dicom-server-deploy.yaml
@@ -17,14 +17,70 @@ spec:
public: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
volumes:
- name: config-volume-g3auto
secret:
- secretName: dicom-server-g3auto
+ secretName: orthanc-g3auto
containers:
- name: dicom-server
GEN3_DICOM-SERVER_IMAGE
+ env:
+ - name: DD_ENABLED
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: dd_enabled
+ optional: true
+ - name: DD_ENV
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/env']
+ - name: DD_SERVICE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/service']
+ - name: DD_VERSION
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/version']
+ - name: DD_LOGS_INJECTION
+ value: "true"
+ - name: DD_AGENT_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
imagePullPolicy: Always
+ readinessProbe:
+ httpGet:
+ path: /system
+ port: 8042
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /system
+ port: 8042
+ initialDelaySeconds: 5
+ periodSeconds: 60
+ timeoutSeconds: 30
ports:
- containerPort: 8042
volumeMounts:
diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml
index d37addca2..9df6fbc93 100644
--- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml
+++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml
@@ -17,10 +17,66 @@ spec:
public: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
containers:
- name: dicom-viewer
GEN3_DICOM-VIEWER_IMAGE
+ env:
+ - name: DD_ENABLED
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: dd_enabled
+ optional: true
+ - name: DD_ENV
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/env']
+ - name: DD_SERVICE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/service']
+ - name: DD_VERSION
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.labels['tags.datadoghq.com/version']
+ - name: DD_LOGS_INJECTION
+ value: "true"
+ - name: DD_AGENT_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
imagePullPolicy: Always
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 5
+ periodSeconds: 20
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 5
+ periodSeconds: 60
+ timeoutSeconds: 30
ports:
- containerPort: 80
diff --git a/kube/services/dicom-viewer/dicom-viewer-service.yaml b/kube/services/dicom-viewer/dicom-viewer-service.yaml
index ea2576584..26f3a21b0 100644
--- a/kube/services/dicom-viewer/dicom-viewer-service.yaml
+++ b/kube/services/dicom-viewer/dicom-viewer-service.yaml
@@ -12,4 +12,4 @@ spec:
nodePort: null
name: http
type: ClusterIP
-
\ No newline at end of file
+
diff --git a/kube/services/fence/fence-canary-deploy.yaml b/kube/services/fence/fence-canary-deploy.yaml
index 12e5a8ee8..513a1a998 100644
--- a/kube/services/fence/fence-canary-deploy.yaml
+++ b/kube/services/fence/fence-canary-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml
index e3543b502..9524315d9 100644
--- a/kube/services/fence/fence-deploy.yaml
+++ b/kube/services/fence/fence-deploy.yaml
@@ -35,7 +35,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -44,6 +44,22 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
# -----------------------------------------------------------------------------
@@ -275,6 +291,7 @@ spec:
if fence-create migrate --help > /dev/null 2>&1; then
if ! grep -E 'ENABLE_DB_MIGRATION"?: *false' /var/www/fence/fence-config.yaml; then
echo "Running db migration: fence-create migrate"
+ cd /fence
fence-create migrate
else
echo "Db migration disabled in fence-config"
diff --git a/kube/services/fenceshib/fenceshib-canary-deploy.yaml b/kube/services/fenceshib/fenceshib-canary-deploy.yaml
index 152edefec..74085009f 100644
--- a/kube/services/fenceshib/fenceshib-canary-deploy.yaml
+++ b/kube/services/fenceshib/fenceshib-canary-deploy.yaml
@@ -30,7 +30,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -39,6 +39,15 @@ spec:
values:
- fence
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/fenceshib/fenceshib-configmap.yaml b/kube/services/fenceshib/fenceshib-configmap.yaml
index 2412518c0..b8e55243d 100644
--- a/kube/services/fenceshib/fenceshib-configmap.yaml
+++ b/kube/services/fenceshib/fenceshib-configmap.yaml
@@ -231,48 +231,48 @@ data:
few exceptions for newer attributes where the name is the same for both versions. You will
usually want to uncomment or map the names for both SAML versions as a unit.
-->
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
@@ -286,7 +286,7 @@ data:
-
+
@@ -416,47 +416,51 @@ data:
- MIIGeDCCBWCgAwIBAgITKwAE3xjJ0BmsXYl8hwAAAATfGDANBgkqhkiG9w0BAQsF
- ADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZFgxESEhT
- U0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIxMDMyMzEwMjMz
- MloXDTIzMDMyMzEwMjMzMlowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1EMREw
- DwYDVQQHEwhCZXRoZXNkYTEMMAoGA1UEChMDSEhTMQwwCgYDVQQLEwNOSUgxJTAj
- BgNVBAMTHHdhbXNpZ25pbmdmZWRlcmF0aW9uLm5paC5nb3YwggEiMA0GCSqGSIb3
- DQEBAQUAA4IBDwAwggEKAoIBAQDrng8ItLe/PdN7+GT50g0xd4Kc5zVLk5JhHV/M
- C0ICo3ulYpNnK8f0vGYvKXhG9B4gyYjjAVgY8dHL1Yi9Vw4OCMHiAhT80qidFhah
- xdcz8EaKWueqlMV+SZ8/6luahSmYYjKHAxICMg253gHsG6A64pWBsf58fzOYeEV/
- HIItkthIJ7Rh71gXeZwmcir3fAve1sQXrgXsRb265yFQaxLrRI+QA7k+Tiemlt4+
- 7wBOXdROm0kxGJT6u6+IG8g2Qdbc1JWaAmwROGCByREQzfMNUVpXCXJHhKSrHype
- z8Z0o4p2sLXyOysbBAmNoShMhvaaPlsrJt7PyDN5uj6KaXNNAgMBAAGjggMrMIID
- JzAdBgNVHQ4EFgQUb/4wTaSXJ6P1tAmI8mWJhMv1VHowHwYDVR0jBBgwFoAUeWw4
- jBnSyRkHcaYQ+YnwrdCDBZMwggESBgNVHR8EggEJMIIBBTCCAQGggf6ggfuGgcFs
- ZGFwOi8vL0NOPU5JSC1EUEtJLUNBLTFBLENOPU5JSERQS0lDQVNWQyxDTj1DRFAs
- Q049UHVibGljJTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmln
- dXJhdGlvbixEQz1ESEhTU0VDVVJJVFksREM9TE9DQUw/Y2VydGlmaWNhdGVSZXZv
- Y2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3RyaWJ1dGlvblBvaW50
- hjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09WL0NlcnREYXRhL05JSC1EUEtJLUNB
- LTFBLmNybDCCATkGCCsGAQUFBwEBBIIBKzCCAScwgbQGCCsGAQUFBzAChoGnbGRh
- cDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1BSUEsQ049UHVibGljJTIwS2V5JTIw
- U2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1ESEhTU0VD
- VVJJVFksREM9TE9DQUw/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNzPWNl
- cnRpZmljYXRpb25BdXRob3JpdHkwQQYIKwYBBQUHMAKGNWh0dHA6Ly9OSUhEUEtJ
- Q1JMLk5JSC5HT1YvQ2VydERhdGEvTklILURQS0ktQ0EtMUEuY3J0MCsGCCsGAQUF
- BzABhh9odHRwOi8vTklIRFBLSU9DU1AuTklILkdPVi9vY3NwMAsGA1UdDwQEAwIF
- oDA9BgkrBgEEAYI3FQcEMDAuBiYrBgEEAYI3FQiHscIohpH8F4b5jwiG7rxzgbud
- JR2F39lChY/gIQIBZQIBJDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEw
- JwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDAjAKBggrBgEFBQcDATANBgkqhkiG
- 9w0BAQsFAAOCAQEAkgyJY5Pdyz7hF83hu9BsijKHOdMWe8fDyN7GsDR1O0URBuJW
- oK7FsemmITwMCiDhH+NDkrRWM27EQhuv4w4yIUIFVqPeJS+Ff3gKyqB/VNcrDbfc
- 1RU7Q0qyxwpItm/cEUTTTnfNppf/O6wn/FUbpvPbHMNukqhjtbiYJrmKcO1U0lEu
- i7FlnPW6rRmEbhp/bChVJMkxw8sBH4K3Vrx9c15nPuBgv4E1cFLe1rwrt3wEeRlU
- OaWMTbLwYBaBo2BC3iDHzWioSl4OtzItEkT5XxNOhViuoty09Tu5zd7byqiV7To3
- YVc+Yi/VBubgB+osvPXPAv0AQCLo88dO7MBWQg==
+ MIIGrDCCBZSgAwIBAgITKwAL5UokKuFiZ7VPlQAAAAvlSjANBgkqhkiG9w0B
+ AQsFADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZ
+ FgxESEhTU0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIy
+ MTIwNjE2NTUzNloXDTI0MTIwNTE2NTUzNlowgaMxCzAJBgNVBAYTAlVTMREw
+ DwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQmV0aGVzZGExDDAKBgNVBAoT
+ A05JSDEMMAoGA1UECxMDQ0lUMSUwIwYDVQQDExx3YW1zaWduaW5nZmVkZXJh
+ dGlvbi5uaWguZ292MSswKQYJKoZIhvcNAQkBFhxuaWhsb2dpbnN1cHBvcnRA
+ bWFpbC5uaWguZ292MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+ o3aHcoq0SAof+GXCl6aZOw9w8CrWTSxz3hxEvG2RaJ4Bm0+UQEcQHArCiQ+Y
+ Wjmx8eORRwOblQKmcozpQAOxNRu7fbJn8msdryKdju+nBJg/gn0Ygn44EJEq
+ pZmBn+FBRgH/lADRdpLM8uO654i1x5Pr8TQtNMevGNot8oiacOZkB1A5N6+l
+ 4guxToA2ZuNhHRhwrpd1wIyq6sgY3J8XpWlx54HjDc8bZvia0bEhJns/qZpM
+ mAh5wvIP1I2JngqJ55mpl/btbIXX+uTn3tIomWre3KKjDKh9ZjUQom8VqTzp
+ oGYHSjTExuopsHnnVpC1HTW0QJoxFa5yR1f2fiUTZwIDAQABo4IDKzCCAycw
+ HQYDVR0OBBYEFMqGnTB0W0rFy8tD2y6JnApAzRCyMB8GA1UdIwQYMBaAFHls
+ OIwZ0skZB3GmEPmJ8K3QgwWTMIIBEgYDVR0fBIIBCTCCAQUwggEBoIH+oIH7
+ hoHBbGRhcDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1OSUhEUEtJQ0FTVkMs
+ Q049Q0RQLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz
+ LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2Nl
+ cnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q/YmFzZT9vYmplY3RDbGFzcz1jUkxE
+ aXN0cmlidXRpb25Qb2ludIY1aHR0cDovL05JSERQS0lDUkwuTklILkdPVi9D
+ ZXJ0RGF0YS9OSUgtRFBLSS1DQS0xQS5jcmwwggE5BggrBgEFBQcBAQSCASsw
+ ggEnMIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049TklILURQS0ktQ0EtMUEs
+ Q049QUlBLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz
+ LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2NB
+ Q2VydGlmaWNhdGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0
+ aG9yaXR5MEEGCCsGAQUFBzAChjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09W
+ L0NlcnREYXRhL05JSC1EUEtJLUNBLTFBLmNydDArBggrBgEFBQcwAYYfaHR0
+ cDovL05JSERQS0lPQ1NQLk5JSC5HT1Yvb2NzcDALBgNVHQ8EBAMCBaAwPQYJ
+ KwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIh7HCKIaR/BeG+Y8Ihu68c4G7nSUd
+ gZOnCYKOiSECAWQCAUwwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC
+ MCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwDQYJ
+ KoZIhvcNAQELBQADggEBAGxvrAxX3RUmFXeUa1UewCWfzWCnI3wTMKkqvmI2
+ CySFEOniXNXC/hhu0i000QD9mS527u+lGqgN6eaUaEaSDXMszYR753whJ1Wf
+ xJ50zji2mvUWDyzdRbcvxbVfYe6h6+TzQl0gd8z1DjAxkUWydv9aAFYHNiIY
+ BbhPqvrlOT+oV8CYI8ghEg7qyxo1mso99aVGCbnBA+6IC+jt8lvwQYFISW8J
+ lxJbz5P9fyAbQFuMvcvSkx1WWCCK+d3WsLzU2JETjmYNoID5skFaIfrq+rV1
+ nBqQfCSKApojRaUMwn83IRcosSu0Y3dhpmxz2oDkOURbwOkuPJRgYnZRLBDn
+ e50=
-
+
urn:oasis:names:tc:SAML:2.0:nameid-format:persistent
-
+
diff --git a/kube/services/fenceshib/fenceshib-deploy.yaml b/kube/services/fenceshib/fenceshib-deploy.yaml
index 528726262..ed5d67535 100644
--- a/kube/services/fenceshib/fenceshib-deploy.yaml
+++ b/kube/services/fenceshib/fenceshib-deploy.yaml
@@ -30,7 +30,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -39,6 +39,22 @@ spec:
values:
- fenceshib
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
@@ -210,11 +226,10 @@ spec:
subPath: "incommon-login.bionimbus.org.crt"
resources:
requests:
- cpu: 0.8
- memory: 2400Mi
+ cpu: 100m
+ memory: 500Mi
limits:
- cpu: 2.0
- memory: 6400Mi
+ memory: 2400Mi
command: ["/bin/bash"]
args:
- "-c"
diff --git a/kube/services/fluentd/fluentd-eks-1.24.yaml b/kube/services/fluentd/fluentd-eks-1.24.yaml
new file mode 100644
index 000000000..1fb748840
--- /dev/null
+++ b/kube/services/fluentd/fluentd-eks-1.24.yaml
@@ -0,0 +1,86 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: fluentd
+ namespace: logging
+ labels:
+ k8s-app: fluentd-eks-1.24-logging
+ version: v1
+ GEN3_DATE_LABEL
+ kubernetes.io/cluster-service: "true"
+spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-eks-1.24-logging
+ version: v1
+ template:
+ metadata:
+ labels:
+ k8s-app: fluentd-eks-1.24-logging
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ spec:
+ priorityClassName: system-cluster-critical
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: "role"
+ operator: "Equal"
+ value: "jupyter"
+ effect: "NoSchedule"
+ - key: "role"
+ operator: "Equal"
+ value: "workflow"
+ effect: "NoSchedule"
+ containers:
+ - name: fluentd
+ # Hardcode fluentd version to ensure we don't run into containerd logging issues
+ image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0
+ env:
+ # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Deploy with kube-setup-fluentd.sh ...
+ - name: LOG_GROUP_NAME
+ GEN3_LOG_GROUP_NAME
+ - name: AWS_REGION
+ value: "us-east-1"
+ - name: FLUENTD_CONF
+ value: "gen3.conf"
+ - name: FLUENT_CONTAINER_TAIL_PARSER_TYPE
+ value: "cri"
+ resources:
+ limits:
+ memory: 1Gi
+ requests:
+ cpu: 100m
+ memory: 1Gi
+ volumeMounts:
+ - name: fluentd-gen3
+ mountPath: /fluentd/etc/gen3.conf
+ subPath: gen3.conf
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ command: ["/bin/bash" ]
+ args:
+ - "-c"
+ # Script always succeeds if it runs (echo exits with 0)
+ - |
+ /fluentd/entrypoint.sh
+ terminationGracePeriodSeconds: 30
+ serviceAccountName: fluentd
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: fluentd-gen3
+ configMap:
+ name: fluentd-gen3
diff --git a/kube/services/fluentd/fluentd-karpenter.yaml b/kube/services/fluentd/fluentd-karpenter.yaml
new file mode 100644
index 000000000..807ef1198
--- /dev/null
+++ b/kube/services/fluentd/fluentd-karpenter.yaml
@@ -0,0 +1,95 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: fluentd-karpenter
+ namespace: logging
+ labels:
+ k8s-app: fluentd-karpenter-logging
+ version: v1
+ GEN3_DATE_LABEL
+ kubernetes.io/cluster-service: "true"
+spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-karpenter-logging
+ version: v1
+ template:
+ metadata:
+ labels:
+ k8s-app: fluentd-karpenter-logging
+ version: v1
+ kubernetes.io/cluster-service: "true"
+ spec:
+ priorityClassName: system-cluster-critical
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: karpenter.sh/initialized
+ operator: In
+ values:
+ - "true"
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ - key: "role"
+ operator: "Equal"
+ value: "jupyter"
+ effect: "NoSchedule"
+ - key: "role"
+ operator: "Equal"
+ value: "workflow"
+ effect: "NoSchedule"
+ containers:
+ - name: fluentd
+ # Hardcode fluentd version to ensure we don't run into containerd logging issues
+ image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0
+ env:
+ # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
+ - name: K8S_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Deploy with kube-setup-fluentd.sh ...
+ - name: LOG_GROUP_NAME
+ GEN3_LOG_GROUP_NAME
+ - name: AWS_REGION
+ value: "us-east-1"
+ - name: FLUENTD_CONF
+ value: "gen3.conf"
+ - name: FLUENT_CONTAINER_TAIL_PARSER_TYPE
+ value: "cri"
+ resources:
+ limits:
+ memory: 1Gi
+ requests:
+ cpu: 100m
+ memory: 1Gi
+ volumeMounts:
+ - name: fluentd-gen3
+ mountPath: /fluentd/etc/gen3.conf
+ subPath: gen3.conf
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ command: ["/bin/bash" ]
+ args:
+ - "-c"
+ # Script always succeeds if it runs (echo exits with 0)
+ - |
+ /fluentd/entrypoint.sh
+ terminationGracePeriodSeconds: 30
+ serviceAccountName: fluentd
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: fluentd-gen3
+ configMap:
+ name: fluentd-gen3
diff --git a/kube/services/fluentd/fluentd.yaml b/kube/services/fluentd/fluentd.yaml
index f6526ea56..112a0cab2 100644
--- a/kube/services/fluentd/fluentd.yaml
+++ b/kube/services/fluentd/fluentd.yaml
@@ -20,6 +20,16 @@ spec:
version: v1
kubernetes.io/cluster-service: "true"
spec:
+ priorityClassName: system-cluster-critical
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: karpenter.sh/initialized
+ operator: NotIn
+ values:
+ - "true"
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@@ -27,9 +37,14 @@ spec:
operator: "Equal"
value: "jupyter"
effect: "NoSchedule"
+ - key: "role"
+ operator: "Equal"
+ value: "workflow"
+ effect: "NoSchedule"
containers:
- name: fluentd
- GEN3_FLUENTD_IMAGE
+ # Hardcode fluentd version to match karpenter daemonset
+ image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0
env:
# See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes
- name: K8S_NODE_NAME
diff --git a/kube/services/fluentd/gen3-1.15.3.conf b/kube/services/fluentd/gen3-1.15.3.conf
new file mode 100644
index 000000000..d9b6bed5d
--- /dev/null
+++ b/kube/services/fluentd/gen3-1.15.3.conf
@@ -0,0 +1,231 @@
+#
+# Gen3 customization of fluent config.
+# - tries to extract structure from gen3 service logs
+# - includes the default conf at the bottom - just adds prefix rules
+#
+# Deploy by:
+# - mount this file into the container at /fluentd/etc/gen3.conf
+# - set environment variable FLUENTD_CONF=gen3.conf
+#
+# https://www.fluentd.org/guides/recipes/docker-logging
+# https://docs.fluentd.org/v0.12/articles/config-file#introduction:-the-life-of-a-fluentd-event
+# https://docs.fluentd.org/v1.0/articles/out_rewrite_tag_filter
+
+
+
+
+
+ @type tail
+ @id in_tail_container_logs
+ path /var/log/containers/*.log
+ pos_file /var/log/fluentd-containers.log.pos
+ tag "#{ENV['FLUENT_CONTAINER_TAIL_TAG'] || 'kubernetes.*'}"
+ exclude_path "#{ENV['FLUENT_CONTAINER_TAIL_EXCLUDE_PATH'] || use_default}"
+ read_from_head true
+
+ @type "#{ENV['FLUENT_CONTAINER_TAIL_PARSER_TYPE'] || 'json'}"
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+
+
+
+ @type tail
+ path /var/log/messages
+ pos_file /var/log/host-messages.log.pos
+
+ @type syslog
+
+ tag host.messages
+
+
+
+
+ @type tail
+ path /var/log/secure
+ pos_file /var/log/host-secure.log.pos
+
+ @type syslog
+
+ tag host.secure
+
+
+
+ @type tail
+ @id in_tail_docker
+ path /var/log/docker.log
+ pos_file /var/log/fluentd-docker.log.pos
+ tag docker
+
+ @type regexp
+ expression /^time="(?
+
+
+
+
+ @type tail
+ @id in_tail_kubelet
+ multiline_flush_interval 5s
+ path /var/log/kubelet.log
+ pos_file /var/log/fluentd-kubelet.log.pos
+ tag kubelet
+
+ @type kubernetes
+
+
+
+
+
+
+ @type kubernetes_metadata
+ @id filter_kube_metadata
+ kubernetes_url "#{ENV['FLUENT_FILTER_KUBERNETES_URL'] || 'https://' + ENV.fetch('KUBERNETES_SERVICE_HOST') + ':' + ENV.fetch('KUBERNETES_SERVICE_PORT') + '/api'}"
+ verify_ssl "#{ENV['KUBERNETES_VERIFY_SSL'] || true}"
+ ca_file "#{ENV['KUBERNETES_CA_FILE']}"
+ skip_labels "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_LABELS'] || 'false'}"
+ skip_container_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA'] || 'false'}"
+ skip_master_url "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL'] || 'false'}"
+ skip_namespace_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA'] || 'false'}"
+
+
+
+ @type null
+
+
+
+ @type null
+
+
+
+ @type rewrite_tag_filter
+
+ key $._HOSTNAME
+ pattern ^(.+)$
+ tag $1.docker
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $._HOSTNAME
+ pattern ^(.+)$
+ tag $1.kubelet
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $.host
+ pattern ^(.+)$
+ tag $1.messages
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $.host
+ pattern ^(.+)$
+ tag $1.secure
+
+
+
+
+ @type rewrite_tag_filter
+
+ # json structured log - consider adoption a standard json schema:
+ # https://github.com/timberio/log-event-json-schema
+ key message
+ pattern /^\{\s*"gen3log":/
+ tag kubernetes.gen3.json.${tag}
+
+
+ # combined log format - default Apache and nginx structure
+ # https://httpd.apache.org/docs/1.3/logs.html#combined
+ key message
+ pattern /^(((\d+\.\d+\.\d+\.\d+)|-)\s+){2}\S+\s+\[\d\d?\//
+ tag kubernetes.gen3.combined.${tag}
+
+
+ # unstructured log line
+ key message
+ pattern /\S/
+ tag kubernetes.gen3.raw.${tag}
+
+
+
+
+
+ @type record_transformer
+
+ log_type json
+ # This one doesn't work for whatever reason, if you do ${record["kubernetes"]} the whole blob would be added, but can't access subobjects
+ #container_name ${record["kubernetes"]["container_name"]}
+
+
+
+
+ @type record_transformer
+
+ log_type combined
+
+
+
+
+ @type record_transformer
+
+ log_type raw
+
+
+
+
+ @type rewrite_tag_filter
+
+ key $.kubernetes.pod_name
+ pattern ^(.+)$
+ tag "#{Time.now.strftime('%Y-%m-%d')}.$1"
+
+#
+# key $.kubernetes
+# pattern ^(.+)$
+# tag $1.container_name
+#
+
+
+#
+# @type rewrite_tag_filter
+#
+# key $.kubernetes.container_name
+# pattern ^(.+)$
+ #tag $1.${tag}
+# tag ${tag}.$1
+#
+#
+
+# TODO:
+# * python stack traces: "Traceback (most recent call last):""
+# https://docs.fluentd.org/v0.12/articles/parser_multiline#formatn
+#
+# Idea: add `visitor` cookie to revproxy ...
+
+
+
+ @type cloudwatch_logs
+ @id out_cloudwatch_logs
+ log_group_name "#{ENV['LOG_GROUP_NAME']}"
+ auto_create_stream true
+ use_tag_as_stream true
+ retention_in_days "#{ENV['RETENTION_IN_DAYS'] || 'nil'}"
+ json_handler yajl # To avoid UndefinedConversionError
+ log_rejected_request "#{ENV['LOG_REJECTED_REQUEST']}" # Log rejected request for missing parts
+
+
+
+#@include fluent.conf
+#@include conf.d/*.conf
diff --git a/kube/services/frontend-framework/frontend-framework-deploy.yaml b/kube/services/frontend-framework/frontend-framework-deploy.yaml
index 843002844..f0da277dc 100644
--- a/kube/services/frontend-framework/frontend-framework-deploy.yaml
+++ b/kube/services/frontend-framework/frontend-framework-deploy.yaml
@@ -22,7 +22,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -31,6 +31,22 @@ spec:
values:
- frontend-framework
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: ca-volume
@@ -68,10 +84,9 @@ spec:
failureThreshold: 6
resources:
requests:
- cpu: 0.6
+ cpu: 100m
memory: 512Mi
limits:
- cpu: 2
memory: 4096Mi
ports:
- containerPort: 3000
diff --git a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml
new file mode 100644
index 000000000..8cad981c8
--- /dev/null
+++ b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml
@@ -0,0 +1,115 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: frontend-framework-deployment
+spec:
+ selector:
+ matchLabels:
+ app: frontend-framework
+ revisionHistoryLimit: 2
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 2
+ maxUnavailable: 25%
+ template:
+ metadata:
+ labels:
+ app: frontend-framework
+ public: "yes"
+ GEN3_DATE_LABEL
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 25
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - frontend-framework
+ topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ automountServiceAccountToken: false
+ volumes:
+ - name: ca-volume
+ secret:
+ secretName: "service-ca"
+ - name: config-volume
+ secret:
+ secretName: "frontend-framework-config"
+ - name: sponsor-img-volume
+ secret:
+ secretName: "frontend-framework-sponsor-config"
+ - name: privacy-policy
+ configMap:
+ name: "privacy-policy"
+ - name: cert-volume
+ secret:
+ secretName: "cert-portal-service"
+ containers:
+ - name: frontend-framework
+ GEN3_FRONTEND-FRAMEWORK_IMAGE
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 3000
+ initialDelaySeconds: 30
+ periodSeconds: 60
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 3000
+ initialDelaySeconds: 60
+ periodSeconds: 60
+ timeoutSeconds: 30
+ failureThreshold: 6
+ resources:
+ requests:
+ cpu: 0.6
+ memory: 512Mi
+ limits:
+ cpu: 2
+ memory: 4096Mi
+ ports:
+ - containerPort: 3000
+ command:
+ - /bin/bash
+ - ./start.sh
+ env:
+ - name: HOSTNAME
+ value: revproxy-service
+ - name: NEXT_PUBLIC_PORTAL_BASENAME
+ value: /portal
+ volumeMounts:
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.crt"
+ subPath: "service.crt"
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.key"
+ subPath: "service.key"
+ - name: "ca-volume"
+ readOnly: true
+ mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
+ subPath: "ca.pem"
+ imagePullPolicy: Always
diff --git a/kube/services/gdcapi/gdcapi-deploy.yaml b/kube/services/gdcapi/gdcapi-deploy.yaml
index cd397cab8..5967663f0 100644
--- a/kube/services/gdcapi/gdcapi-deploy.yaml
+++ b/kube/services/gdcapi/gdcapi-deploy.yaml
@@ -14,6 +14,23 @@ spec:
labels:
app: gdcapi
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/gen3-discovery-ai/README.md b/kube/services/gen3-discovery-ai/README.md
new file mode 100644
index 000000000..4c20678e0
--- /dev/null
+++ b/kube/services/gen3-discovery-ai/README.md
@@ -0,0 +1,42 @@
+# Gen3 Discovery AI Configuration
+
+Expects data in a `gen3-discovery-ai` folder relative to
+where the `manifest.json` is.
+
+Basic setup:
+
+`{{dir where manifest.json is}}/gen3-discovery-ai/knowledge/`
+
+- `tsvs` folder
+ - tsvs with topic_name at beginning of file
+- `markdown` folder
+ - {{topic_name_1}}
+ - markdown file(s)
+ - {{topic_name_2}}
+ - markdown file(s)
+
+The `kube-setup-gen3-discovery-ai` script syncs the above `/knowledge` folder to
+an S3 bucket. The service configuration then pulls from the S3 bucket and runs load commands
+to get the data into chromadb.
+
+> Note: See the `gen3-discovery-ai` service repo docs and README for more details on data load capabilities.
+
+Check the `gen3-discovery-ai-deploy.yaml` for what commands are being run in the automation.
+
+Expects secrets setup in `g3auto/gen3-discovery-ai` folder
+ - `credentials.json`: Google service account key if using a topic with Google Vertex AI
+ - `env`: .env file contents for service configuration (see service repo for a default one)
+
+## Populating Disk for In-Memory Vectordb Chromadb
+
+In order to setup pre-configured topics, we need to load a bunch of data
+into Chromadb (which is an in-mem vectordb with an option to persist to disk).
+
+To load topics consistently, we setup an S3 bucket to house the persisted
+data for the vectordb.
+
+### Getting data from S3 in mem
+
+We specify a path for Chromadb to use for persisted data and when it sees
+data there, it loads it in. So the deployment automation: 1. aws syncs the bucket
+and then 2. calls a script to load the files into the in-mem vectorstore from there.
diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml
new file mode 100644
index 000000000..dcfe03248
--- /dev/null
+++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml
@@ -0,0 +1,181 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: gen3-discovery-ai-deployment
+spec:
+ selector:
+ # Only select pods based on the 'app' label
+ matchLabels:
+ app: gen3-discovery-ai
+ release: production
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ app: gen3-discovery-ai
+ release: production
+ GEN3_DATE_LABEL
+ spec:
+ serviceAccountName: gen3-discovery-ai-sa
+ volumes:
+ - name: gen3-discovery-ai-g3auto-volume
+ secret:
+ secretName: gen3-discovery-ai-g3auto
+ - name: gen3-discovery-ai-knowledge-library-volume
+ emptyDir: {}
+ initContainers:
+ # chromadb's persisted disk support requires the ability to write. We don't technically need this ability
+ # since we're populating the entirety of the database from configured files (no live updates).
+ #
+ # Solution: utilize emptyDir as a writable space.
+ #
+ # Procedure: in init containers, copy files from s3 to writable
+ # temporary space in emptyDir, use files from writable space
+ # to load into knowledge libary, move final knowledge library
+ # files into top-level emptyDir and make available in final container
+ - name: gen3-discovery-ai-aws-init
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ env:
+ - name: GEN3_DEBUG
+ GEN3_DEBUG_FLAG|-value: "False"-|
+ volumeMounts:
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/.env
+ subPath: env
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/credentials.json
+ subPath: credentials.json
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/storage_config.json
+ subPath: storage_config.json
+ - name: gen3-discovery-ai-knowledge-library-volume
+ mountPath: /gen3discoveryai/knowledge
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: 1
+ limits:
+ cpu: 2
+ memory: 512Mi
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ bucketName=$(grep -o "\"bucket\": *\"[^\"]*\"" /gen3discoveryai/storage_config.json | awk -F'"' '{print $4}')
+ echo BUCKET: "$bucketName"
+ echo
+ echo BEFORE /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ echo
+ echo syncing from s3
+ aws s3 sync "s3://${bucketName}" "/gen3discoveryai/knowledge/tmp"
+ echo
+ echo AFTER /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ - name: gen3-discovery-ai-knowledge-init
+ GEN3_GEN3-DISCOVERY-AI_IMAGE
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ env:
+ - name: GEN3_DEBUG
+ GEN3_DEBUG_FLAG|-value: "False"-|
+ - name: ANONYMIZED_TELEMETRY
+ value: "False"
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: /gen3discoveryai/credentials.json
+ volumeMounts:
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/.env
+ subPath: env
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/credentials.json
+ subPath: credentials.json
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/storage_config.json
+ subPath: storage_config.json
+ - name: gen3-discovery-ai-knowledge-library-volume
+ mountPath: /gen3discoveryai/knowledge
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: 1
+ limits:
+ cpu: 2
+ memory: 512Mi
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ echo
+ echo BEFORE /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ echo running load_into_knowledge_store.py
+ poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py tsvs /gen3discoveryai/knowledge/tmp/tsvs
+
+ if [ -d "/gen3discoveryai/knowledge/tmp/markdown" ]; then
+ for dir in "/gen3discoveryai/knowledge/tmp/markdown"/*; do
+ if [ -d "$dir" ]; then
+ dir_name=$(basename "$dir")
+
+ echo "Processing directory: $dir_name. Full path: $dir"
+ poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py markdown --topic $dir_name $dir
+ fi
+ done
+ else
+ echo "Not syncing markdown, directory not found: /gen3discoveryai/knowledge/tmp/markdown"
+ fi
+
+ rm -r /gen3discoveryai/knowledge/tmp/
+ echo
+ echo AFTER /gen3discoveryai/knowledge
+ ls -Ra /gen3discoveryai/knowledge
+ containers:
+ - name: gen3-discovery-ai
+ GEN3_GEN3-DISCOVERY-AI_IMAGE
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 8080
+ env:
+ - name: GEN3_DEBUG
+ GEN3_DEBUG_FLAG|-value: "False"-|
+ - name: ANONYMIZED_TELEMETRY
+ value: "False"
+ - name: GOOGLE_APPLICATION_CREDENTIALS
+ value: /gen3discoveryai/credentials.json
+ volumeMounts:
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/.env
+ subPath: env
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/credentials.json
+ subPath: credentials.json
+ - name: gen3-discovery-ai-g3auto-volume
+ readOnly: true
+ mountPath: /gen3discoveryai/storage_config.json
+ subPath: storage_config.json
+ - name: gen3-discovery-ai-knowledge-library-volume
+ mountPath: /gen3discoveryai/knowledge
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: 1
+ limits:
+ cpu: 2
+ # NOTE: If the configured data for the knowledge library (vector database) is large, you may need to bump this
+ memory: 512Mi
diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml
new file mode 100644
index 000000000..b4734c3b8
--- /dev/null
+++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml
@@ -0,0 +1,21 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: gen3-discovery-ai-service
+spec:
+ selector:
+ app: gen3-discovery-ai
+ release: production
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8089
+ name: http
+ nodePort: null
+ - protocol: TCP
+ port: 443
+ targetPort: 443
+ name: https
+ nodePort: null
+ type: ClusterIP
+
diff --git a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml
index 880ce5fb3..b35fda845 100644
--- a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml
+++ b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml
@@ -20,6 +20,23 @@ spec:
dbfence: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml
index 16486d3a9..c3e8d121c 100644
--- a/kube/services/guppy/guppy-deploy.yaml
+++ b/kube/services/guppy/guppy-deploy.yaml
@@ -27,7 +27,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +36,22 @@ spec:
values:
- guppy
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: guppy-config
@@ -138,8 +154,7 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.5
- memory: 1024Mi
+ cpu: 100m
+ memory: 256Mi
limits:
- cpu: 1
- memory: 2400Mi
+ memory: 2000Mi
diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml
index 5ac1bb805..f7de81d79 100644
--- a/kube/services/hatchery/hatchery-deploy.yaml
+++ b/kube/services/hatchery/hatchery-deploy.yaml
@@ -28,7 +28,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -37,6 +37,22 @@ spec:
values:
- hatchery
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: hatchery-service-account
securityContext:
fsGroup: 1001
@@ -104,6 +120,18 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.hostIP
+ - name: PRISMA_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: prisma-secret
+ key: AccessKeyId
+ optional: true
+ - name: PRISMA_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: prisma-secret
+ key: SecretKey
+ optional: true
volumeMounts:
- name: hatchery-config
readOnly: true
diff --git a/kube/services/indexd/indexd-canary-deploy.yaml b/kube/services/indexd/indexd-canary-deploy.yaml
index 92c329f26..7e17ba9af 100644
--- a/kube/services/indexd/indexd-canary-deploy.yaml
+++ b/kube/services/indexd/indexd-canary-deploy.yaml
@@ -27,7 +27,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +36,22 @@ spec:
values:
- indexd
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml
index c9961fba7..239079058 100644
--- a/kube/services/indexd/indexd-deploy.yaml
+++ b/kube/services/indexd/indexd-deploy.yaml
@@ -31,7 +31,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -40,6 +40,22 @@ spec:
values:
- indexd
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -152,8 +168,7 @@ spec:
subPath: "ca.pem"
resources:
requests:
- cpu: 0.5
- memory: 1024Mi
+ cpu: 100m
+ memory: 512Mi
limits:
- cpu: 1.0
- memory: 2048Mi
+ memory: 1024Mi
diff --git a/kube/services/influxdb/influxdb-deployment.yaml b/kube/services/influxdb/influxdb-deployment.yaml
index 72d4b57d7..3279e3c55 100644
--- a/kube/services/influxdb/influxdb-deployment.yaml
+++ b/kube/services/influxdb/influxdb-deployment.yaml
@@ -15,6 +15,23 @@ spec:
labels:
app: influxdb
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- image: docker.io/influxdb:1.8.0
imagePullPolicy: IfNotPresent
diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml
new file mode 100644
index 000000000..1db08e8ef
--- /dev/null
+++ b/kube/services/ingress/ingress.yaml
@@ -0,0 +1,27 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: gen3-ingress
+ annotations:
+ # TODO: Make this configurable
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/tags: Environment=$vpc_name
+ alb.ingress.kubernetes.io/certificate-arn: $ARN
+ alb.ingress.kubernetes.io/group.name: "$vpc_name"
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
+ alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600
+ alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
+ alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04
+spec:
+ ingressClassName: alb
+ rules:
+ - host: $GEN3_CACHE_HOSTNAME
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: revproxy-service
+ port:
+ number: 80
diff --git a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml
index f7b874111..466e4a7df 100644
--- a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml
+++ b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml
@@ -16,6 +16,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -105,6 +119,7 @@ spec:
limits:
cpu: 0.9
memory: 4096Mi
+ ephemeral-storage: 500Mi
imagePullPolicy: Always
volumeMounts:
- name: "cert-volume"
diff --git a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml
index 4e13eea69..aea836a4f 100644
--- a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml
+++ b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml
@@ -16,6 +16,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -24,7 +38,8 @@ spec:
- args:
- -c
- |
- # fix permissions for /var/run/docker.sock
+ # fix permissions for docker and containerd
+ chmod 666 /var/run/containerd/containerd.sock
chmod 666 /var/run/docker.sock
echo "done"
command:
@@ -39,6 +54,8 @@ spec:
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
+ - mountPath: /var/run/containerd/containerd.sock
+ name: containerdsock
- mountPath: /var/run/docker.sock
name: dockersock
containers:
@@ -107,7 +124,7 @@ spec:
fieldPath: status.hostIP
resources:
limits:
- cpu: 0.6
+ cpu: "0.6"
memory: 2048Mi
imagePullPolicy: Always
volumeMounts:
@@ -125,7 +142,8 @@ spec:
subPath: "ca.pem"
- name: dockersock
mountPath: "/var/run/docker.sock"
- imagePullPolicy: Always
+ - name: containerdsock
+ mountPath: "/var/run/containerd/containerd.sock"
volumes:
- name: cert-volume
secret:
@@ -136,3 +154,6 @@ spec:
- name: dockersock
hostPath:
path: /var/run/docker.sock
+ - name: containerdsock
+ hostPath:
+ path: "/var/run/containerd/containerd.sock"
diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml
index 2c6afb76d..954e996f2 100644
--- a/kube/services/jenkins/jenkins-deploy.yaml
+++ b/kube/services/jenkins/jenkins-deploy.yaml
@@ -24,6 +24,24 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - key: topology.kubernetes.io/zone
+ operator: In
+ values:
+ - us-east-1a
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -97,8 +115,8 @@ spec:
port: 8080
resources:
limits:
- cpu: 0.9
- memory: 8192Mi
+ cpu: 2
+ memory: 6Gi
imagePullPolicy: Always
volumeMounts:
- name: datadir
diff --git a/kube/services/jenkins/rolebinding-devops.yaml b/kube/services/jenkins/rolebinding-devops.yaml
index 47c98e47b..dd99bdd86 100644
--- a/kube/services/jenkins/rolebinding-devops.yaml
+++ b/kube/services/jenkins/rolebinding-devops.yaml
@@ -11,3 +11,41 @@ roleRef:
kind: ClusterRole
name: admin
apiGroup: ""
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: argo-role-binding-CURRENT_NAMESPACE
+ namespace: argo
+subjects:
+- kind: ServiceAccount
+ name: gitops-sa
+ namespace: CURRENT_NAMESPACE
+ apiGroup: ""
+roleRef:
+ kind: ClusterRole
+ name: admin
+ apiGroup: ""
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: gitops-cluster-role-CURRENT_NAMESPACE
+rules:
+- apiGroups: [""]
+ resources: ["namespaces","services"]
+ verbs: ["get", "list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gitops-cluster-binding-CURRENT_NAMESPACE
+subjects:
+- kind: ServiceAccount
+ name: gitops-sa
+ namespace: CURRENT_NAMESPACE
+ apiGroup: ""
+roleRef:
+ kind: ClusterRole
+ name: gitops-cluster-role-CURRENT_NAMESPACE
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml
new file mode 100644
index 000000000..7f4e58109
--- /dev/null
+++ b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: jenkins-agent-service
+ name: jenkins-agent
+ namespace: default
+spec:
+ ports:
+ - name: slavelistener
+ port: 50000
+ protocol: TCP
+ targetPort: 50000
+ selector:
+ app: jenkins
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml
new file mode 100644
index 000000000..3dea38a5c
--- /dev/null
+++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml
@@ -0,0 +1,149 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: jenkins-ci-worker-deployment
+spec:
+ selector:
+ # Only select pods based on the 'app' label
+ matchLabels:
+ app: jenkins-ci-worker
+ template:
+ metadata:
+ labels:
+ app: jenkins-ci-worker
+ # for network policy
+ netnolimit: "yes"
+ annotations:
+ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ serviceAccountName: jenkins-service
+ securityContext:
+ runAsUser: 1000
+ fsGroup: 1000
+ initContainers:
+ - args:
+ - -c
+ - |
+ # fix permissions for /var/run/docker.sock
+ chmod 666 /var/run/docker.sock
+ echo "done"
+ command:
+ - /bin/bash
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ name: awshelper
+ resources: {}
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ containers:
+ #
+ # See for details on running docker in a pod:
+ # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b
+ #
+ - name: jenkins-worker
+ image: "quay.io/cdis/gen3-ci-worker:master"
+ ports:
+ - containerPort: 8080
+ env:
+ - name: JENKINS_URL
+ value: "https://jenkins2.planx-pla.net"
+ - name: JENKINS_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-ci-worker-g3auto
+ key: jenkins-jnlp-agent-secret
+ - name: JENKINS_AGENT_NAME
+ value: "gen3-ci-worker"
+ - name: JENKINS_TUNNEL
+ value: "jenkins-agent:50000"
+ - name: AWS_DEFAULT_REGION
+ value: us-east-1
+ - name: JAVA_OPTS
+ value: "-Xmx3072m"
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-secret
+ key: aws_access_key_id
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-secret
+ key: aws_secret_access_key
+ - name: GOOGLE_EMAIL_AUX1
+ valueFrom:
+ secretKeyRef:
+ name: google-acct1
+ key: email
+ - name: GOOGLE_PASSWORD_AUX1
+ valueFrom:
+ secretKeyRef:
+ name: google-acct1
+ key: password
+ - name: GOOGLE_EMAIL_AUX2
+ valueFrom:
+ secretKeyRef:
+ name: google-acct2
+ key: email
+ - name: GOOGLE_PASSWORD_AUX2
+ valueFrom:
+ secretKeyRef:
+ name: google-acct2
+ key: password
+ - name: GOOGLE_APP_CREDS_JSON
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-g3auto
+ key: google_app_creds.json
+ resources:
+ limits:
+ cpu: 0.9
+ memory: 4096Mi
+ ephemeral-storage: 500Mi
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.crt"
+ subPath: "service.crt"
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.key"
+ subPath: "service.key"
+ - name: "ca-volume"
+ readOnly: true
+ mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
+ subPath: "ca.pem"
+ - name: dockersock
+ mountPath: "/var/run/docker.sock"
+ imagePullPolicy: Always
+ volumes:
+ - name: cert-volume
+ secret:
+ secretName: "cert-jenkins-service"
+ - name: ca-volume
+ secret:
+ secretName: "service-ca"
+ - name: dockersock
+ hostPath:
+ path: /var/run/docker.sock
diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml
new file mode 100644
index 000000000..047e4e966
--- /dev/null
+++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml
@@ -0,0 +1,12 @@
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: datadir-jenkins-ci
+ annotations:
+ volume.beta.kubernetes.io/storage-class: gp2
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 200Gi
diff --git a/kube/services/jenkins2-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-worker/jenkins2-agent-service.yaml
new file mode 100644
index 000000000..7f4e58109
--- /dev/null
+++ b/kube/services/jenkins2-worker/jenkins2-agent-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: jenkins-agent-service
+ name: jenkins-agent
+ namespace: default
+spec:
+ ports:
+ - name: slavelistener
+ port: 50000
+ protocol: TCP
+ targetPort: 50000
+ selector:
+ app: jenkins
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml
new file mode 100644
index 000000000..5646e8bc2
--- /dev/null
+++ b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml
@@ -0,0 +1,152 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: jenkins-worker-deployment
+spec:
+ selector:
+ # Only select pods based on the 'app' label
+ matchLabels:
+ app: jenkins-worker
+ template:
+ metadata:
+ labels:
+ app: jenkins-worker
+ # for network policy
+ netnolimit: "yes"
+ annotations:
+ "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ serviceAccountName: jenkins-service
+ securityContext:
+ runAsUser: 1000
+ fsGroup: 1000
+ initContainers:
+ - args:
+ - -c
+ - |
+ # fix permissions for /var/run/docker.sock
+ chmod 666 /var/run/docker.sock
+ echo "done"
+ command:
+ - /bin/bash
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ name: awshelper
+ resources: {}
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /var/run/docker.sock
+ name: dockersock
+ containers:
+ #
+ # See for details on running docker in a pod:
+ # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b
+ #
+ - name: jenkins-worker
+ image: "quay.io/cdis/gen3-qa-worker:master"
+ ports:
+ - containerPort: 8080
+ env:
+ - name: JENKINS_URL
+ value: "https://jenkins2.planx-pla.net"
+ - name: JENKINS_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-worker-g3auto
+ key: jenkins-jnlp-agent-secret
+ - name: JENKINS_AGENT_NAME
+ value: "gen3-qa-worker"
+ - name: JENKINS_TUNNEL
+ value: "jenkins-agent:50000"
+ - name: AWS_DEFAULT_REGION
+ value: us-east-1
+ - name: JAVA_OPTS
+ value: "-Xmx3072m"
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-secret
+ key: aws_access_key_id
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-secret
+ key: aws_secret_access_key
+ - name: GOOGLE_EMAIL_AUX1
+ valueFrom:
+ secretKeyRef:
+ name: google-acct1
+ key: email
+ - name: GOOGLE_PASSWORD_AUX1
+ valueFrom:
+ secretKeyRef:
+ name: google-acct1
+ key: password
+ - name: GOOGLE_EMAIL_AUX2
+ valueFrom:
+ secretKeyRef:
+ name: google-acct2
+ key: email
+ - name: GOOGLE_PASSWORD_AUX2
+ valueFrom:
+ secretKeyRef:
+ name: google-acct2
+ key: password
+ - name: GOOGLE_APP_CREDS_JSON
+ valueFrom:
+ secretKeyRef:
+ name: jenkins-g3auto
+ key: google_app_creds.json
+ - name: DD_AGENT_HOST
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ resources:
+ limits:
+ cpu: 0.6
+ memory: 2048Mi
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.crt"
+ subPath: "service.crt"
+ - name: "cert-volume"
+ readOnly: true
+ mountPath: "/mnt/ssl/service.key"
+ subPath: "service.key"
+ - name: "ca-volume"
+ readOnly: true
+ mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
+ subPath: "ca.pem"
+ - name: dockersock
+ mountPath: "/var/run/docker.sock"
+ imagePullPolicy: Always
+ volumes:
+ - name: cert-volume
+ secret:
+ secretName: "cert-jenkins-service"
+ - name: ca-volume
+ secret:
+ secretName: "service-ca"
+ - name: dockersock
+ hostPath:
+ path: /var/run/docker.sock
diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml
index 673686d17..08365f811 100644
--- a/kube/services/jenkins2/jenkins2-deploy.yaml
+++ b/kube/services/jenkins2/jenkins2-deploy.yaml
@@ -24,6 +24,20 @@ spec:
annotations:
"cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ - matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
serviceAccountName: jenkins-service
securityContext:
runAsUser: 1000
@@ -34,7 +48,7 @@ spec:
# https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b
#
- name: jenkins
- GEN3_JENKINS_IMAGE
+ GEN3_JENKINS2_IMAGE
ports:
- containerPort: 8080
name: http
diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml
index 328894689..a72623736 100644
--- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml
+++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: arborist-rm-expired-access
@@ -14,6 +14,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: arborist-secret
secret:
diff --git a/kube/services/jobs/arborist-rm-expired-access-job.yaml b/kube/services/jobs/arborist-rm-expired-access-job.yaml
index 34833dded..6985906d0 100644
--- a/kube/services/jobs/arborist-rm-expired-access-job.yaml
+++ b/kube/services/jobs/arborist-rm-expired-access-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
dnsConfig:
options:
diff --git a/kube/services/jobs/arboristdb-create-job.yaml b/kube/services/jobs/arboristdb-create-job.yaml
index 74d7bebe4..7898a0c91 100644
--- a/kube/services/jobs/arboristdb-create-job.yaml
+++ b/kube/services/jobs/arboristdb-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: arborist-secret
diff --git a/kube/services/jobs/aws-bucket-replicate-job.yaml b/kube/services/jobs/aws-bucket-replicate-job.yaml
index d9f0f08ad..d3893d2bb 100644
--- a/kube/services/jobs/aws-bucket-replicate-job.yaml
+++ b/kube/services/jobs/aws-bucket-replicate-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/bucket-manifest-job.yaml b/kube/services/jobs/bucket-manifest-job.yaml
index 98506331e..9cfbe054b 100644
--- a/kube/services/jobs/bucket-manifest-job.yaml
+++ b/kube/services/jobs/bucket-manifest-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: sa-#SA_NAME_PLACEHOLDER#
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/bucket-replicate-job.yaml b/kube/services/jobs/bucket-replicate-job.yaml
index fbaf15816..0f7ae9260 100644
--- a/kube/services/jobs/bucket-replicate-job.yaml
+++ b/kube/services/jobs/bucket-replicate-job.yaml
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: batch-operations-account
securityContext:
fsGroup: 1000
diff --git a/kube/services/jobs/bucket-replication-job.yaml b/kube/services/jobs/bucket-replication-job.yaml
index 4ef56367e..c8e541d9e 100644
--- a/kube/services/jobs/bucket-replication-job.yaml
+++ b/kube/services/jobs/bucket-replication-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: sa-#SA_NAME_PLACEHOLDER#
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/bucket-size-report-job.yaml b/kube/services/jobs/bucket-size-report-job.yaml
index 253d010e4..89d927f15 100644
--- a/kube/services/jobs/bucket-size-report-job.yaml
+++ b/kube/services/jobs/bucket-size-report-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
securityContext:
fsGroup: 1000
diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml
new file mode 100644
index 000000000..f6be4dd23
--- /dev/null
+++ b/kube/services/jobs/cedar-ingestion-job.yaml
@@ -0,0 +1,157 @@
+#
+# run with:
+# gen3 job run cedar-ingestion [CEDAR_DIRECTORY_ID $cedar_directory_id]
+#
+# CEDAR_DIRECTORY_ID
+# The directory id will be read from 'directory_id.txt' in the
+# 'cedar-g3auto' secret.
+# You can override the secret value with an optional command line argument.
+#
+# The deployed CEDAR wrapper services must be able to read from this directory.
+#
+# ACCESS TOKENS
+# Access tokens will be generated for an existing fence-client, cedar_ingest_client.
+# The client_id and client_secret will be read from
+# 'cedar_client_credentials.json' in the 'cedar-g3auto' secret.
+#
+# The fence-client must have MDS admin and CEDAR polices granted.
+#
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cedar-ingestion
+spec:
+ backoffLimit: 0
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: useryaml-job
+ volumes:
+ - name: shared-data
+ emptyDir: {}
+ - name: cedar-client-volume-g3auto
+ secret:
+ secretName: cedar-g3auto # the secret name in kube
+ initContainers:
+ - name: cedar
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 80
+ env:
+ - name: HOSTNAME
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: CEDAR_DIRECTORY_ID
+ GEN3_CEDAR_DIRECTORY_ID|-value: ""-|
+ - name: CEDAR_DIRECTORY_ID_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: cedar-g3auto
+ key: "directory_id.txt"
+ - name: CEDAR_CLIENT_CREDENTIALS
+ valueFrom:
+ secretKeyRef:
+ name: cedar-g3auto
+ key: "cedar_client_credentials.json"
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ resources:
+ limits:
+ cpu: 1
+ memory: 5Gi
+
+ command: ["/bin/bash" ]
+ args:
+ - "-c"
+ - |
+ if [[ -z "$CEDAR_DIRECTORY_ID" ]]; then
+ if [[ ! -z "$CEDAR_DIRECTORY_ID_SECRET" ]]; then
+ echo "CEDAR_DIRECTORY_ID is from g3auto secret"
+ export CEDAR_DIRECTORY_ID=$CEDAR_DIRECTORY_ID_SECRET
+ else
+ echo -e "ERROR: CEDAR_DIRECTORY_ID must be in secret or on command line" 1>&2
+ exit 0
+ fi
+ else
+ echo "CEDAR_DIRECTORY_ID is from command line parameter"
+ fi
+
+ if [[ ! -z "$CEDAR_CLIENT_CREDENTIALS" ]]; then
+ export CEDAR_CLIENT_ID=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_id)
+ export CEDAR_CLIENT_SECRET=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_secret)
+ else
+ echo -e "Could not read cedar-client credentials" 1>&2
+ exit 0
+ fi
+
+ pip install pydash
+ export GEN3_HOME="$HOME/cloud-automation"
+ python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --directory $CEDAR_DIRECTORY_ID --cedar_client_id $CEDAR_CLIENT_ID --cedar_client_secret $CEDAR_CLIENT_SECRET --hostname $HOSTNAME
+ status=$?
+ if [[ $status -ne 0 ]]; then
+ echo "WARNING: non zero exit code: $status"
+ else
+ echo "All done - exit code: $status"
+ touch /mnt/shared/success
+ fi
+ containers:
+ - name: awshelper
+ env:
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: hostname
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ if [[ ! "$slackWebHook" =~ ^http ]]; then
+ echo "Slack webhook not set"
+ exit 0
+ fi
+ if ! [ -f /mnt/shared/success ]; then
+ success="FAILED"
+ color="ff0000"
+ else
+ success="SUCCESS"
+ color="2EB67D"
+ fi
+ echo "Sending ${success} message to slack..."
+ payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}"
+ echo "Payload=${payload}"
+ curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}"
+ restartPolicy: Never
diff --git a/kube/services/jobs/client-modify-job.yaml b/kube/services/jobs/client-modify-job.yaml
index 995fdd483..5726092be 100644
--- a/kube/services/jobs/client-modify-job.yaml
+++ b/kube/services/jobs/client-modify-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: yaml-merge
configMap:
diff --git a/kube/services/jobs/cogwheel-register-client-job.yaml b/kube/services/jobs/cogwheel-register-client-job.yaml
index 3458ef163..1bdbf906d 100644
--- a/kube/services/jobs/cogwheel-register-client-job.yaml
+++ b/kube/services/jobs/cogwheel-register-client-job.yaml
@@ -17,13 +17,30 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cogwheel-g3auto
secret:
secretName: cogwheel-g3auto
containers:
- name: cogwheel
- GEN3_COGWHEEL_IMAGE
+ GEN3_COGWHEEL_IMAGE|-image: quay.io/cdis/cogwheel:master-|
imagePullPolicy: Always
env:
- name: HOSTNAME
diff --git a/kube/services/jobs/config-fence-job.yaml b/kube/services/jobs/config-fence-job.yaml
index 7fd655937..38be19d61 100644
--- a/kube/services/jobs/config-fence-job.yaml
+++ b/kube/services/jobs/config-fence-job.yaml
@@ -18,6 +18,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: shared-data
diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml
index 951668b0c..01e71bade 100644
--- a/kube/services/jobs/covid19-bayes-cronjob.yaml
+++ b/kube/services/jobs/covid19-bayes-cronjob.yaml
@@ -1,5 +1,5 @@
# gen3 job run covid19-bayes-cronjob S3_BUCKET
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: covid19-bayes
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
restartPolicy: Never
nodeSelector:
diff --git a/kube/services/jobs/covid19-bayes-job.yaml b/kube/services/jobs/covid19-bayes-job.yaml
index a47ed9fc5..0afc186b9 100644
--- a/kube/services/jobs/covid19-bayes-job.yaml
+++ b/kube/services/jobs/covid19-bayes-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
restartPolicy: Never
containers:
diff --git a/kube/services/jobs/covid19-etl-job.yaml b/kube/services/jobs/covid19-etl-job.yaml
index d94c24808..dd2f6571f 100644
--- a/kube/services/jobs/covid19-etl-job.yaml
+++ b/kube/services/jobs/covid19-etl-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/covid19-notebook-etl-job.yaml b/kube/services/jobs/covid19-notebook-etl-job.yaml
index 3d22b0240..e482c0505 100644
--- a/kube/services/jobs/covid19-notebook-etl-job.yaml
+++ b/kube/services/jobs/covid19-notebook-etl-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: s3-access-opencdn-databucket-gen3
volumes:
- name: cred-volume
diff --git a/kube/services/jobs/data-ingestion-job.yaml b/kube/services/jobs/data-ingestion-job.yaml
index 9530d0c8c..797b18912 100644
--- a/kube/services/jobs/data-ingestion-job.yaml
+++ b/kube/services/jobs/data-ingestion-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
volumes:
- name: shared-data
diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml
index 8418f08e7..1c2ad4284 100644
--- a/kube/services/jobs/distribute-licenses-job.yaml
+++ b/kube/services/jobs/distribute-licenses-job.yaml
@@ -19,6 +19,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: hatchery-service-account
containers:
@@ -31,6 +48,11 @@ spec:
configMapKeyRef:
name: manifest-hatchery
key: "user-namespace"
+ - name: GEN3_STATA_LICENSE
+ valueFrom:
+ secretKeyRef:
+ name: stata-workspace-gen3-license-g3auto
+ key: "stata_license.txt"
command: ["python"]
args:
- "-c"
@@ -55,7 +77,7 @@ spec:
for container in pod.get('spec', {}).get('containers', []):
- if "stata-heal" in container['image']:
+ if "jupyter-pystata-gen3-licensed" in container['image']:
existing_license_id = pod.get("metadata", {}).get("annotations", {}).get("stata-license")
@@ -79,15 +101,14 @@ spec:
used_licenses.sort()
print(f"Licenses currently in use: {used_licenses}")
- # This is a free trial license for demo purposes only
- # Todo: store, mount licenses secret
- license_file = """
- 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100!
- 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100!
- 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100!
- """.strip()
+ # The Gen3 Stata license strings should be stored in a kubernetes secret using g3auto.
+ # The format of the secret is one license string per line.
+ # The license strings are generated with 'stinit' using the information in a license PDF.
+ license_secrets = os.environ['GEN3_STATA_LICENSE']
+ license_secrets = license_secrets.strip()
- licenses = license_file.split("\n")
+ licenses = license_secrets.split("\n")
+ print(f"Number of licenses = {len(licenses)}")
available_license_ids = [
license_id for license_id, license in enumerate(licenses)
if license_id not in used_licenses
diff --git a/kube/services/jobs/ecr-access-job.yaml b/kube/services/jobs/ecr-access-job.yaml
new file mode 100644
index 000000000..89bb49d6d
--- /dev/null
+++ b/kube/services/jobs/ecr-access-job.yaml
@@ -0,0 +1,83 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: ecr-access
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ restartPolicy: Never
+ serviceAccountName: ecr-access-job-sa
+ securityContext:
+ fsGroup: 1000
+ containers:
+ - name: awshelper
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ imagePullPolicy: Always
+ resources:
+ limits:
+ cpu: 0.5
+ memory: 1Gi
+ env:
+ - name: SLACK_WEBHOOK
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ optional: true
+ - name: HOSTNAME
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: hostname
+ - name: PAY_MODELS_DYNAMODB_TABLE
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-hatchery
+ key: pay-models-dynamodb-table
+ optional: true
+ - name: ECR_ACCESS_JOB_ARN
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: ecr-access-job-role-arn
+ optional: true
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ cd cloud-automation/files/scripts/
+ echo Installing requirements...
+ pip3 install -r ecr-access-job-requirements.txt
+ python3 ecr-access-job.py
+ exitcode=$?
+
+ if [[ "${SLACK_WEBHOOK}" != 'None' ]]; then
+ if [[ $exitcode == 1 ]]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}"
+ else
+ curl -X POST --data-urlencode "payload={\"text\": \"SUCCESS: ECR access job on ${HOSTNAME}\"}" "${SLACK_WEBHOOK}"
+ fi
+ fi
+
+ echo "Exit code: $exitcode"
+ exit "$exitcode"
diff --git a/kube/services/jobs/envtest-job.yaml b/kube/services/jobs/envtest-job.yaml
index 6f2c72383..382b725ff 100644
--- a/kube/services/jobs/envtest-job.yaml
+++ b/kube/services/jobs/envtest-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
restartPolicy: Never
automountServiceAccountToken: false
containers:
diff --git a/kube/services/jobs/es-garbage-job.yaml b/kube/services/jobs/es-garbage-job.yaml
index 13385f446..9d5dcf33f 100644
--- a/kube/services/jobs/es-garbage-job.yaml
+++ b/kube/services/jobs/es-garbage-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: gitops-sa
securityContext:
diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml
index f7ca5fd5b..3c3828dac 100644
--- a/kube/services/jobs/etl-cronjob.yaml
+++ b/kube/services/jobs/etl-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: etl
@@ -15,6 +15,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: creds-volume
secret:
@@ -62,7 +79,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
volumeMounts:
- name: "creds-volume"
readOnly: true
@@ -78,8 +95,10 @@ spec:
subPath: user.yaml
resources:
limits:
- cpu: 1
+ cpu: 2
memory: 10Gi
+ requests:
+ cpu: 2
command: ["/bin/bash"]
args:
- "-c"
diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml
index d9af1df0d..266b0410c 100644
--- a/kube/services/jobs/etl-job.yaml
+++ b/kube/services/jobs/etl-job.yaml
@@ -2,6 +2,8 @@
apiVersion: batch/v1
kind: Job
metadata:
+ annotations:
+ karpenter.sh/do-not-evict: "true"
name: etl
spec:
backoffLimit: 0
@@ -10,6 +12,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: creds-volume
secret:
@@ -46,6 +65,11 @@ spec:
value: 6g
- name: ETL_FORCED
GEN3_ETL_FORCED|-value: "TRUE"-|
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: hostname
- name: slackWebHook
valueFrom:
configMapKeyRef:
@@ -67,8 +91,10 @@ spec:
subPath: user.yaml
resources:
limits:
- cpu: 1
+ cpu: 2
memory: 10Gi
+ requests:
+ cpu: 2
command: ["/bin/bash" ]
args:
- "-c"
diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml
index bce341aac..93eaf7652 100644
--- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml
+++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: fence-cleanup-expired-ga4gh-info
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml
index bed88c308..afeaebf72 100644
--- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml
+++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/fence-db-migrate-job.yaml b/kube/services/jobs/fence-db-migrate-job.yaml
index 22b9dcbad..53dda3e21 100644
--- a/kube/services/jobs/fence-db-migrate-job.yaml
+++ b/kube/services/jobs/fence-db-migrate-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -30,11 +47,6 @@ spec:
GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
imagePullPolicy: Always
env:
- - name: gen3Env
- valueFrom:
- configMapKeyRef:
- name: global
- key: environment
- name: JENKINS_HOME
value: ""
- name: GEN3_NOPROXY
@@ -89,6 +101,7 @@ spec:
- |
echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
+ cd /fence
fence-create migrate
if [[ $? != 0 ]]; then
echo "WARNING: non zero exit code: $?"
@@ -98,11 +111,6 @@ spec:
GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
imagePullPolicy: Always
env:
- - name: gen3Env
- valueFrom:
- configMapKeyRef:
- name: global
- key: environment
- name: JENKINS_HOME
value: ""
- name: GEN3_NOPROXY
diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml
new file mode 100644
index 000000000..9252f6828
--- /dev/null
+++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml
@@ -0,0 +1,78 @@
+# Delete all expired Fence OIDC clients and optionally post about expired clients on Slack.
+# To set up as a daily cronjob: `gen3 job cron fence-delete-expired-clients "0 7 * * *"`
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: fence-delete-expired-clients
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: useryaml-job
+ volumes:
+ - name: yaml-merge
+ configMap:
+ name: "fence-yaml-merge"
+ - name: config-volume
+ secret:
+ secretName: "fence-config"
+ containers:
+ - name: fence
+ GEN3_FENCE_IMAGE
+ imagePullPolicy: Always
+ env:
+ - name: PYTHONPATH
+ value: /var/www/fence
+ - name: FENCE_PUBLIC_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-fence
+ key: fence-config-public.yaml
+ optional: true
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ volumeMounts:
+ - name: "config-volume"
+ readOnly: true
+ mountPath: "/var/www/fence/fence-config-secret.yaml"
+ subPath: fence-config.yaml
+ - name: "yaml-merge"
+ readOnly: true
+ mountPath: "/var/www/fence/yaml_merge.py"
+ subPath: yaml_merge.py
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml"
+ python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml
+ if [[ "$slackWebHook" =~ ^http ]]; then
+ fence-create client-delete-expired --slack-webhook $slackWebHook --warning-days 7
+ else
+ fence-create client-delete-expired
+ fi
+ exit $?
+ restartPolicy: Never
diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml
index 5409da672..eba842ddf 100644
--- a/kube/services/jobs/fence-visa-update-cronjob.yaml
+++ b/kube/services/jobs/fence-visa-update-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: fence-visa-update
@@ -15,6 +15,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -56,7 +73,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: FENCE_PUBLIC_CONFIG
valueFrom:
configMapKeyRef:
diff --git a/kube/services/jobs/fence-visa-update-job.yaml b/kube/services/jobs/fence-visa-update-job.yaml
index a34c9cff7..973ba2e3d 100644
--- a/kube/services/jobs/fence-visa-update-job.yaml
+++ b/kube/services/jobs/fence-visa-update-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -50,7 +67,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: FENCE_PUBLIC_CONFIG
valueFrom:
configMapKeyRef:
diff --git a/kube/services/jobs/fencedb-create-job.yaml b/kube/services/jobs/fencedb-create-job.yaml
index 7b3417c7e..a99c7aca3 100644
--- a/kube/services/jobs/fencedb-create-job.yaml
+++ b/kube/services/jobs/fencedb-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/jobs/fluentd-restart-job.yaml b/kube/services/jobs/fluentd-restart-job.yaml
index 5c984b7ae..e843d9c68 100644
--- a/kube/services/jobs/fluentd-restart-job.yaml
+++ b/kube/services/jobs/fluentd-restart-job.yaml
@@ -10,6 +10,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: fluentd-restart
containers:
diff --git a/kube/services/jobs/gdcdb-create-job.yaml b/kube/services/jobs/gdcdb-create-job.yaml
index 2ceb333b0..1668429ad 100644
--- a/kube/services/jobs/gdcdb-create-job.yaml
+++ b/kube/services/jobs/gdcdb-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml
index 45ccab34d..87ebc56be 100644
--- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml
+++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml
@@ -8,9 +8,26 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
containers:
- name: gen3qa-check-bucket-access
- GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:0.5-|
+ GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:fix_gen3qa_get_check-|
workingDir: /var/sdet_home
imagePullPolicy: Always
env:
@@ -116,14 +133,14 @@ spec:
fi
fi
echo "generate access token"
- echo "fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client --exp $TOKEN_EXPIRATION"
+ echo "fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client,data --exp $TOKEN_EXPIRATION"
tempFile="$(mktemp -p /tmp token.txt_XXXXXX)"
success=false
count=0
sleepTime=10
# retry loop
while [[ $count -lt 3 && $success == false ]]; do
- if fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then
+ if fence-create --path fence token-create --type access_token --username $TEST_OPERATOR --scopes openid,user,test-client,data --exp $TOKEN_EXPIRATION > "$tempFile"; then
echo "fence-create success!"
tail -1 "$tempFile" > /mnt/shared/access_token.txt
# base64 --decode complains about invalid characters - don't know why
diff --git a/kube/services/jobs/gentestdata-job.yaml b/kube/services/jobs/gentestdata-job.yaml
index b0c856e91..db2fcd82d 100644
--- a/kube/services/jobs/gentestdata-job.yaml
+++ b/kube/services/jobs/gentestdata-job.yaml
@@ -34,6 +34,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/gitops-sync-job.yaml b/kube/services/jobs/gitops-sync-job.yaml
index 6044aff01..664bdf4c1 100644
--- a/kube/services/jobs/gitops-sync-job.yaml
+++ b/kube/services/jobs/gitops-sync-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: gitops-sa
securityContext:
diff --git a/kube/services/jobs/google-bucket-manifest-job.yaml b/kube/services/jobs/google-bucket-manifest-job.yaml
index dcd6cd35e..619c1c03e 100644
--- a/kube/services/jobs/google-bucket-manifest-job.yaml
+++ b/kube/services/jobs/google-bucket-manifest-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/google-bucket-replicate-job.yaml b/kube/services/jobs/google-bucket-replicate-job.yaml
index f61a47868..7e9b2e0a7 100644
--- a/kube/services/jobs/google-bucket-replicate-job.yaml
+++ b/kube/services/jobs/google-bucket-replicate-job.yaml
@@ -12,6 +12,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/google-create-bucket-job.yaml b/kube/services/jobs/google-create-bucket-job.yaml
index eed19dfbb..6e3f248a7 100644
--- a/kube/services/jobs/google-create-bucket-job.yaml
+++ b/kube/services/jobs/google-create-bucket-job.yaml
@@ -47,6 +47,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml
index a491865c3..2b9e4e49a 100644
--- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml
+++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-delete-expired-access
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-access-job.yaml b/kube/services/jobs/google-delete-expired-access-job.yaml
index 24e00742c..c50272254 100644
--- a/kube/services/jobs/google-delete-expired-access-job.yaml
+++ b/kube/services/jobs/google-delete-expired-access-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml
index cbe8c049c..b40e22624 100644
--- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml
+++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-delete-expired-service-account
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-delete-expired-service-account-job.yaml b/kube/services/jobs/google-delete-expired-service-account-job.yaml
index 99a7f8749..04c19f9e7 100644
--- a/kube/services/jobs/google-delete-expired-service-account-job.yaml
+++ b/kube/services/jobs/google-delete-expired-service-account-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml
index 2453f5009..6b4fc10aa 100644
--- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml
+++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-init-proxy-groups
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-init-proxy-groups-job.yaml b/kube/services/jobs/google-init-proxy-groups-job.yaml
index b342c7db5..3fa0eb63d 100644
--- a/kube/services/jobs/google-init-proxy-groups-job.yaml
+++ b/kube/services/jobs/google-init-proxy-groups-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml
index 856c3b056..fd8bba606 100644
--- a/kube/services/jobs/google-manage-account-access-cronjob.yaml
+++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-manage-account-access
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-account-access-job.yaml b/kube/services/jobs/google-manage-account-access-job.yaml
index 09259088c..d7f6204a0 100644
--- a/kube/services/jobs/google-manage-account-access-job.yaml
+++ b/kube/services/jobs/google-manage-account-access-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml
index ee92611ba..eff76d30a 100644
--- a/kube/services/jobs/google-manage-keys-cronjob.yaml
+++ b/kube/services/jobs/google-manage-keys-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-manage-keys
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-manage-keys-job.yaml b/kube/services/jobs/google-manage-keys-job.yaml
index 64773af34..84c855fb6 100644
--- a/kube/services/jobs/google-manage-keys-job.yaml
+++ b/kube/services/jobs/google-manage-keys-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml
index b8bc21f88..49e83374f 100644
--- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml
+++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml
@@ -1,6 +1,6 @@
---
-# Note: change to batch/v1beta1 once we bump to k8s 1.8
-apiVersion: batch/v1beta1
+# Note: change to batch/v1 once we bump to k8s 1.8
+apiVersion: batch/v1
kind: CronJob
metadata:
name: google-verify-bucket-access-group
@@ -17,6 +17,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/google-verify-bucket-access-group-job.yaml b/kube/services/jobs/google-verify-bucket-access-group-job.yaml
index 3f756eaa5..93eae91dc 100644
--- a/kube/services/jobs/google-verify-bucket-access-group-job.yaml
+++ b/kube/services/jobs/google-verify-bucket-access-group-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
diff --git a/kube/services/jobs/graph-create-job.yaml b/kube/services/jobs/graph-create-job.yaml
index 6fd859cc2..f6595cdd2 100644
--- a/kube/services/jobs/graph-create-job.yaml
+++ b/kube/services/jobs/graph-create-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: creds-volume
diff --git a/kube/services/jobs/hatchery-metrics-job.yaml b/kube/services/jobs/hatchery-metrics-job.yaml
index 3a4e571f6..26f5ad973 100644
--- a/kube/services/jobs/hatchery-metrics-job.yaml
+++ b/kube/services/jobs/hatchery-metrics-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: hatchery-service-account
securityContext:
diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml
index 9278fb727..77d249e37 100644
--- a/kube/services/jobs/hatchery-reaper-job.yaml
+++ b/kube/services/jobs/hatchery-reaper-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: hatchery-service-account
securityContext:
@@ -41,7 +58,60 @@ spec:
- |
export GEN3_HOME="$HOME/cloud-automation"
source "$GEN3_HOME/gen3/gen3setup.sh"
+ # 60 minute idle timeout max
+ limit=3600
+ namespace=$(gen3 jupyter j-namespace)
+ remote_users=$(kubectl get svc -n $namespace -o json | jq -r . | jq -r '.items[].metadata.annotations."getambassador.io/config"' | yq -r .headers.remote_user)
+
+ # helper function to construct service name
+ function escape() {
+ string="$1"
+ shift
+ safeBytes="abcdefghijklmnopqrstuvwxyz0123456789"
+ retString=""
+ while read -n 1 char ; do
+ if [[ $safeBytes == *"$char"* ]]; then
+ retString+=$char
+ else
+ hex=$(printf "%02x" "'${char}'")
+ retString+="-"$hex
+ fi
+ done <<< "$string"
+ echo $retString
+ }
+
+ for user in $remote_users; do
+ gen3_log_info "Checking possible workspaces to reap for $user"
+ status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/status | jq -r .status)
+ if [ "$status" == "Running" ] || [ "$status" == "Launching" ]; then
+ gen3_log_info "$user has workspace that is $status"
+ serviceName=h-$(escape $user)-s
+ service="ambassador-service"
+ status_code=$(curl -s -w '%{http_code}' -o status.json -H "REMOTE_USER: $user" $service/api/status)
+ if [ "$status_code" == "200" ]; then
+ last_activity=$(curl -s -H "REMOTE_USER: $user" $service/api/status | jq -r .last_activity )
+ now=$(date +%s)
+ delta=$(expr $now - $(date -d "$last_activity" +%s))
+ gen3_log_info "Workspace for $user has been idle for $delta seconds"
+ if [ "$delta" -gt "$limit" ]; then
+ gen3_log_info "Workspace for $user has been running for $delta seconds, which is higher than the $limit... Terminating"
+ curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate
+ fi
+ else
+ gen3_log_err "Error: Got HTTP status $status_code trying to get last_activity for $user. Not able to reap workspace"
+ fi
+ gen3_log_info "Checking if paymodel for $user is above limit"
+ pm_status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/paymodels | jq -r .request_status)
+ if [ "$pm_status" == "above limit" ]; then
+ gen3_log_warn "Paymodel status is above limit for user: $user. Reaping the workspace"
+ curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate
+ fi
+ fi
+ done
+
+ # legacy reaper code
+ gen3_log_info "Running legacy reaper job (based on local cluster/ prometheus)"
if appList="$(gen3 jupyter idle none "$(gen3 db namespace)" kill)" && [[ -n "$appList" && -n "$slackWebHook" && "$slackWebHook" != "None" ]]; then
curl -X POST --data-urlencode "payload={\"text\": \"hatchery-reaper in $gen3Hostname: \n\`\`\`\n${appList}\n\`\`\`\"}" "${slackWebHook}"
fi
- echo "All Done!"
+ gen3_log_info "All Done!"
\ No newline at end of file
diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml
index 25888f32c..1ca71fc8d 100644
--- a/kube/services/jobs/healthcheck-cronjob.yaml
+++ b/kube/services/jobs/healthcheck-cronjob.yaml
@@ -1,4 +1,4 @@
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: healthcheck
@@ -15,6 +15,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
restartPolicy: Never
serviceAccountName: jenkins-service
containers:
diff --git a/kube/services/jobs/indexd-authz-job.yaml b/kube/services/jobs/indexd-authz-job.yaml
index a3fbb8658..8b041740e 100644
--- a/kube/services/jobs/indexd-authz-job.yaml
+++ b/kube/services/jobs/indexd-authz-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml
index e018f7a34..676307481 100644
--- a/kube/services/jobs/indexd-userdb-job.yaml
+++ b/kube/services/jobs/indexd-userdb-job.yaml
@@ -16,6 +16,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: config-volume
diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml
index 58ce0b332..7f4043753 100644
--- a/kube/services/jobs/metadata-aggregate-sync-job.yaml
+++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml
@@ -8,6 +8,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: config-volume-g3auto
secret:
@@ -20,7 +37,9 @@ spec:
configMap:
name: manifest-metadata
optional: true
- containers:
+ - name: shared-data
+ emptyDir: {}
+ initContainers:
- name: metadata
GEN3_METADATA_IMAGE
volumeMounts:
@@ -36,6 +55,8 @@ spec:
readOnly: true
mountPath: /metadata.json
subPath: json
+ - name: shared-data
+ mountPath: /mnt/shared
env:
- name: GEN3_DEBUG
GEN3_DEBUG_FLAG|-value: "False"-|
@@ -53,10 +74,57 @@ spec:
name: manifest-metadata
key: AGG_MDS_NAMESPACE
optional: true
+ - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-metadata
+ key: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ optional: true
imagePullPolicy: Always
- command: ["/bin/ash"]
+ command: ["/bin/sh"]
args:
- "-c"
- |
/env/bin/python /src/src/mds/populate.py --config /aggregate_config.json
+ if [ $? -ne 0 ]; then
+ echo "WARNING: non zero exit code: $?"
+ else
+ touch /mnt/shared/success
+ fi
+ containers:
+ - name: awshelper
+ env:
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-global
+ key: hostname
+ GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-|
+ volumeMounts:
+ - name: shared-data
+ mountPath: /mnt/shared
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ if [[ ! "$slackWebHook" =~ ^http ]]; then
+ echo "Slack webhook not set"
+ exit 0
+ fi
+ if ! [ -f /mnt/shared/success ]; then
+ success="FAILED"
+ color="ff0000"
+ else
+ success="SUCCESS"
+ color="2EB67D"
+ fi
+ echo "Sending ${success} message to slack..."
+ payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: metadata-aggregate-sync cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: metadata-aggregate-sync cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}"
+ echo "Payload=${payload}"
+ curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}"
restartPolicy: Never
diff --git a/kube/services/jobs/metadata-delete-expired-objects-job.yaml b/kube/services/jobs/metadata-delete-expired-objects-job.yaml
new file mode 100644
index 000000000..221b964a0
--- /dev/null
+++ b/kube/services/jobs/metadata-delete-expired-objects-job.yaml
@@ -0,0 +1,33 @@
+# Delete all expired MDS objects.
+#
+# Run `gen3 kube-setup-metadata-delete-expired-objects-job` to configure this job
+# and set it up as a cronjob.
+#
+# Add the job image to the manifest:
+# `"metadata-delete-expired-objects": "quay.io/cdis/metadata-delete-expired-objects:master"`
+#
+# Once set up, the job can be run with `gen3 job run metadata-delete-expired-objects-job`.
+
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: metadata-delete-expired-objects
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ volumes:
+ - name: config-volume
+ secret:
+ secretName: "metadata-delete-expired-objects-g3auto"
+ containers:
+ - name: metadata-delete-expired-objects
+ GEN3_METADATA-DELETE-EXPIRED-OBJECTS_IMAGE
+ imagePullPolicy: Always
+ volumeMounts:
+ - name: config-volume
+ readOnly: true
+ mountPath: /mnt
+ restartPolicy: Never
diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml
new file mode 100644
index 000000000..788bd1dec
--- /dev/null
+++ b/kube/services/jobs/opencost-report-argo-job.yaml
@@ -0,0 +1,91 @@
+#
+# run with:
+# gen3 job run opencost-report-argo \
+# BUCKET_NAME $GEN3_BUCKET_NAME \
+# OPENCOST_URL $OPENCOST_URL \
+#
+# BUCKET_NAME(required)
+# Name of the bucket to upload the generated reports to.
+# Make sure that there is a service account called "reports-service-account" with access to this bucket.
+#
+# OPENCOST_URL(optional)
+# URL to query OpenCost API's. Default is https://kubecost-cost-analyzer.kubecost
+#
+# CHANNEL(optional)
+# The slack channel ID that the alert will get sent to. Easiest way to find is to open slack in a browser, navigate to
+# the webpage and copy down the ID at the end of the URL that begins with a C.
+#
+# Example
+# gen3 job run opencost-report-argo BUCKET_NAME opencost-report-bucket
+#
+# Cronjob Example
+# gen3 job cron opencost-report-argo @daily BUCKET_NAME opencost-report-bucket
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: opencost-report-argo
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: reports-service-account
+ containers:
+ - name: send-report
+ GEN3_OPENCOST-REPORTER_IMAGE|-image: quay.io/cdis/proto-opencost-reporter:master-|
+ imagePullPolicy: Always
+ env:
+ - name: OPENCOST_URL
+ GEN3_OPENCOST_URL|-value: https://kubecost-cost-analyzer.kubecost-|
+ - name: ENV
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: BUCKET_NAME
+ GEN3_BUCKET_NAME|-value: ""-|
+ - name: slackWebHook
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: slack_webhook
+ optional: true
+ - name: channel
+ GEN3_CHANNEL|-value: ""-|
+ command: [ "/bin/bash" ]
+ args:
+ - "-c"
+ - |
+ proto-opencost-reporter GetAllocationReport \
+ --from_days_before 2 \
+ --to_days_before 1 \
+ --aggregate_by label:gen3username label:workflows.argoproj.io/workflow \
+ --filter_namespaces argo \
+ --share_idle_by_node
+ rc=$?
+ if [[ "${slackWebHook}" != 'None' ]]; then
+ if [ $rc != 0 ]; then
+ curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-FAILED: Opencost report job failed to create a report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}" "${slackWebHook}";
+ else
+ curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-SUCCEEDED: Opencost report job created report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}" "${slackWebHook}"
+ fi
+ fi
+ restartPolicy: Never
diff --git a/kube/services/jobs/psql-db-dump-va-testing-job.yaml b/kube/services/jobs/psql-db-dump-va-testing-job.yaml
new file mode 100644
index 000000000..8a8037e16
--- /dev/null
+++ b/kube/services/jobs/psql-db-dump-va-testing-job.yaml
@@ -0,0 +1,80 @@
+---
+# NOTE: This job was created specifically to dump all the databases in va-testing, in preparation for a move to second cluster
+# If you aren't doing that, this probably is not the job you're looking for
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: psql-db-dump-va-testing
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: dbbackup-sa
+ containers:
+ - name: pgdump
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: JENKINS_HOME
+ value: "devterm"
+ - name: GEN3_HOME
+ value: /home/ubuntu/cloud-automation
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+ account_id=$(aws sts get-caller-identity --query "Account" --output text)
+ default_bucket_name="gen3-db-backups-${account_id}"
+ default_databases=("fence" "indexd" "sheepdog" "peregrine" "arborist" "argo" "atlas" "metadata" "ohdsi" "omop-data" "wts")
+ s3_dir="va-testing-$(date +"%Y-%m-%d-%H-%M-%S")"
+ databases=("${default_databases[@]}")
+ bucket_name=$default_bucket_name
+
+ for database in "${databases[@]}"; do
+ gen3_log_info "Starting database backup for ${database}"
+ gen3 db backup "${database}" > "${database}.sql"
+
+ if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then
+ gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql"
+ aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql"
+
+ if [ $? -eq 0 ]; then
+ gen3_log_info "Successfully uploaded ${database}.sql to S3"
+ else
+ gen3_log_err "Failed to upload ${database}.sql to S3"
+ fi
+ gen3_log_info "Deleting temporary backup file ${database}.sql"
+ rm -f "${database}.sql"
+ else
+ gen3_log_err "Backup operation failed for ${database}"
+ rm -f "${database}.sql"
+ fi
+ done
+ sleep 600
+ restartPolicy: Never
diff --git a/kube/services/jobs/psql-db-prep-dump-job.yaml b/kube/services/jobs/psql-db-prep-dump-job.yaml
new file mode 100644
index 000000000..86c513b78
--- /dev/null
+++ b/kube/services/jobs/psql-db-prep-dump-job.yaml
@@ -0,0 +1,79 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: psql-db-prep-dump
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: dbbackup-sa
+ containers:
+ - name: pgdump
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: JENKINS_HOME
+ value: "devterm"
+ - name: GEN3_HOME
+ value: /home/ubuntu/cloud-automation
+ command: [ "/bin/bash" ]
+ args:
+ - "-c"
+ - |
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+ account_id=$(aws sts get-caller-identity --query "Account" --output text)
+ default_bucket_name="gen3-db-backups-${account_id}"
+ default_databases=("indexd" "sheepdog" "metadata")
+ s3_dir="$(date +"%Y-%m-%d-%H-%M-%S")"
+ databases=("${default_databases[@]}")
+ bucket_name=$default_bucket_name
+
+ for database in "${databases[@]}"; do
+ gen3_log_info "Starting database backup for ${database}"
+ gen3 db backup "${database}" > "${database}.sql"
+
+ if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then
+ gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql"
+ aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql"
+
+ if [ $? -eq 0 ]; then
+ gen3_log_info "Successfully uploaded ${database}.sql to S3"
+ else
+ gen3_log_err "Failed to upload ${database}.sql to S3"
+ fi
+ gen3_log_info "Deleting temporary backup file ${database}.sql"
+ rm -f "${database}.sql"
+ else
+ gen3_log_err "Backup operation failed for ${database}"
+ rm -f "${database}.sql"
+ fi
+ done
+ sleep 600
+ restartPolicy: Never
+
diff --git a/kube/services/jobs/psql-db-prep-restore-job.yaml b/kube/services/jobs/psql-db-prep-restore-job.yaml
new file mode 100644
index 000000000..710e6f4f1
--- /dev/null
+++ b/kube/services/jobs/psql-db-prep-restore-job.yaml
@@ -0,0 +1,90 @@
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: psql-db-prep-restore
+spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ serviceAccountName: dbbackup-sa
+ containers:
+ - name: pgrestore
+ image: quay.io/cdis/awshelper:master
+ imagePullPolicy: Always
+ env:
+ - name: gen3Env
+ valueFrom:
+ configMapKeyRef:
+ name: global
+ key: environment
+ - name: JENKINS_HOME
+ value: "devterm"
+ - name: GEN3_HOME
+ value: /home/ubuntu/cloud-automation
+ command: [ "/bin/bash" ]
+ args:
+ - "-c"
+ - |
+ source "${GEN3_HOME}/gen3/lib/utils.sh"
+ gen3_load "gen3/gen3setup"
+ account_id=$(aws sts get-caller-identity --query "Account" --output text)
+ default_bucket_name="gen3-db-backups-${account_id}"
+ default_databases=("indexd" "sheepdog" "metadata")
+ backup_directories=$(aws s3 ls "s3://${default_bucket_name}/")
+ newest_directory=$(echo "$backup_directories" | awk '/PRE/ {if ($2 > max) max = $2} END {print max}')
+ databases=("${default_databases[@]}")
+ bucket_name=$default_bucket_name
+ namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
+ date_str=$(date -u +%y%m%d_%H%M%S)
+ gen3_log_info "Database backup location in S3: ${bucket_name}/${newest_directory}"
+ gen3_log_info "namespace: $namespace \n\n"
+
+ for database in "${databases[@]}"; do
+ gen3_log_info "Downloading database backup file s3://${default_bucket_name}/${newest_directory}${database}.sql"
+ aws s3 cp "s3://${default_bucket_name}/${newest_directory}${database}.sql" "${database}.sql"
+ server=$(gen3 db creds "$database" | jq -r '.g3FarmServer')
+ username=$(gen3 db creds "$database" | jq -r '.db_username')
+ db_name="${namespace}_${database}_${date_str}"
+ if [[ -z "$server" || -z "$username" ]]; then
+ gen3_log_info "Error: Unable to extract server name or username."
+ return 1
+ fi
+ gen3 psql $database -c "create database $db_name;" 2>&1 | grep -q "permission denied"
+ if [ $? -eq 0 ]; then
+ gen3_log_info "User does not have permission to create database. Granting required permission..."
+ gen3 psql $server -c "alter user $username createdb;"
+ gen3 psql $database -c "create database $db_name;"
+ if [ $? -eq 0 ]; then
+ gen3_log_info "Database $db_name created successfully!"
+ else
+ gen3_log_info "Error creating database $db_name after granting permission."
+ fi
+ else
+ gen3_log_info "Database $db_name created successfully!"
+ fi
+ gen3_log_info "Starting database restore for ${database} to database $db_name"
+ gen3 psql "$database" -d "$db_name" -f "${database}.sql" 1>&2
+ gen3_log_info "cleanup temporary backup file ${database}.sql \n\n\n"
+ done
+ sleep 600
+ restartPolicy: Never
diff --git a/kube/services/jobs/psql-fix-job.yaml b/kube/services/jobs/psql-fix-job.yaml
index 20f453c2a..40fa74b96 100644
--- a/kube/services/jobs/psql-fix-job.yaml
+++ b/kube/services/jobs/psql-fix-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: jenkins-service
containers:
- name: fix
diff --git a/kube/services/jobs/remove-objects-from-clouds-job.yaml b/kube/services/jobs/remove-objects-from-clouds-job.yaml
index 46aa3d43f..b839b24e7 100644
--- a/kube/services/jobs/remove-objects-from-clouds-job.yaml
+++ b/kube/services/jobs/remove-objects-from-clouds-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
diff --git a/kube/services/jobs/replicate-validation-job.yaml b/kube/services/jobs/replicate-validation-job.yaml
index 13f767d69..d64cfcc13 100644
--- a/kube/services/jobs/replicate-validation-job.yaml
+++ b/kube/services/jobs/replicate-validation-job.yaml
@@ -11,6 +11,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: aws-cred-volume
secret:
diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml
index 14053492f..69d66ec3f 100644
--- a/kube/services/jobs/s3sync-cronjob.yaml
+++ b/kube/services/jobs/s3sync-cronjob.yaml
@@ -5,7 +5,7 @@
#####REQUIRED VARIABLE########
#SOURCE_BUCKET
#TARGET_BUCKET
-apiVersion: batch/v1beta1
+apiVersion: batch/v1
kind: CronJob
metadata:
name: s3sync
@@ -21,6 +21,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
volumes:
- name: cred-volume
secret:
@@ -40,7 +57,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: SOURCE_BUCKET
GEN3_SOURCE_BUCKET
- name: TARGET_BUCKET
diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml
index 915f1a588..8a5471a20 100644
--- a/kube/services/jobs/usersync-job.yaml
+++ b/kube/services/jobs/usersync-job.yaml
@@ -31,6 +31,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: useryaml-job
volumes:
- name: yaml-merge
@@ -75,7 +92,7 @@ spec:
configMap:
name: "projects"
containers:
- - name: fence
+ - name: usersync
GEN3_FENCE_IMAGE
imagePullPolicy: Always
env:
@@ -108,7 +125,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: FENCE_PUBLIC_CONFIG
valueFrom:
configMapKeyRef:
@@ -243,7 +260,7 @@ spec:
exit 1
fi
#-----------------
- echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/useryaml";
+ echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml";
n=0
until [ $n -ge 5 ]; do
echo "Download attempt $n"
@@ -277,7 +294,7 @@ spec:
valueFrom:
configMapKeyRef:
name: global
- key: environment
+ key: hostname
- name: slackWebHook
valueFrom:
configMapKeyRef:
diff --git a/kube/services/jobs/useryaml-job.yaml b/kube/services/jobs/useryaml-job.yaml
index bf3812951..5853a05c4 100644
--- a/kube/services/jobs/useryaml-job.yaml
+++ b/kube/services/jobs/useryaml-job.yaml
@@ -9,6 +9,23 @@ spec:
labels:
app: gen3job
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: false
volumes:
- name: yaml-merge
diff --git a/kube/services/jupyterhub/jupyterhub-deploy.yaml b/kube/services/jupyterhub/jupyterhub-deploy.yaml
index b2b96ff75..38b2cd41d 100644
--- a/kube/services/jupyterhub/jupyterhub-deploy.yaml
+++ b/kube/services/jupyterhub/jupyterhub-deploy.yaml
@@ -18,6 +18,23 @@ spec:
userhelper: "yes"
GEN3_DATE_LABEL
spec:
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
serviceAccountName: jupyter-service
volumes:
- name: config-volume
diff --git a/kube/services/karpenter-reconciler/application.yaml b/kube/services/karpenter-reconciler/application.yaml
new file mode 100644
index 000000000..fb0fab871
--- /dev/null
+++ b/kube/services/karpenter-reconciler/application.yaml
@@ -0,0 +1,22 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: karpenter-reconciler-application
+ namespace: argocd
+spec:
+ destination:
+ namespace: kube-system
+ server: https://kubernetes.default.svc
+ project: default
+ source:
+ repoURL: https://github.com/uc-cdis/cloud-automation.git
+ targetRevision: master
+ path: kube/services/karpenter-reconciler
+ directory:
+ exclude: "application.yaml"
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
diff --git a/kube/services/karpenter-reconciler/auth.yaml b/kube/services/karpenter-reconciler/auth.yaml
new file mode 100644
index 000000000..c159028ab
--- /dev/null
+++ b/kube/services/karpenter-reconciler/auth.yaml
@@ -0,0 +1,44 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: karpenter-reconciler
+ namespace: argo-events
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: karpenter-admin-binding-reconciler
+subjects:
+ - kind: ServiceAccount
+ name: karpenter-reconciler
+ namespace: argo-events
+roleRef:
+ kind: ClusterRole
+ name: karpenter-admin
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: workflow-viewer-reconciler
+subjects:
+ - kind: ServiceAccount
+ name: karpenter-reconciler
+ namespace: argo-events
+roleRef:
+ kind: ClusterRole
+ name: argo-argo-workflows-view
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: viewer-reconciler
+subjects:
+ - kind: ServiceAccount
+ name: karpenter-reconciler
+ namespace: argo-events
+roleRef:
+ kind: ClusterRole
+ name: system:aggregate-to-view
+ apiGroup: rbac.authorization.k8s.io
diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml
new file mode 100644
index 000000000..aaba57b07
--- /dev/null
+++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml
@@ -0,0 +1,71 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: karpenter-reconciler-cronjob-va-testing
+ namespace: argo-events
+spec:
+ schedule: "*/5 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccount: karpenter-reconciler
+ volumes:
+ - name: karpenter-templates-volume
+ configMap:
+ name: karpenter-templates
+ containers:
+ - name: karpenter-reconciler
+ image: quay.io/cdis/awshelper
+ volumeMounts:
+ - name: karpenter-templates-volume
+ mountPath: /manifests
+ env:
+ - name: PROVISIONER_TEMPLATE
+ value: /manifests/provisioner.yaml
+ - name: AWSNODETEMPLATE_TEMPLATE
+ value: /manifests/nodetemplate.yaml
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ if [ -z "$PROVISIONER_TEMPLATE" ]; then
+ PROVISIONER_TEMPLATE="provisioner.yaml"
+ fi
+
+ if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then
+ AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml"
+ fi
+
+ ENVIRONMENT=$(kubectl -n va-testing get configmap global -o jsonpath="{.data.environment}")
+
+ WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}')
+
+ WORKFLOW_ARRAY=()
+
+ while IFS= read -r line; do
+ WORKFLOW_ARRAY+=("$line")
+ done <<< "$WORKFLOWS"
+
+ for workflow in "${WORKFLOW_ARRAY[@]}"
+ do
+ echo "Running loop for workflow: $workflow"
+ workflow_name=$(echo "$workflow" | awk '{print $1}')
+ workflow_user=$(echo "$workflow" | awk '{print $2}')
+
+ if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No awsnodetemplate found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f -
+ fi
+
+ if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No provisioner found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f -
+
+ fi
+ done
+ restartPolicy: OnFailure
diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml
new file mode 100644
index 000000000..aef5d6c49
--- /dev/null
+++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml
@@ -0,0 +1,74 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: karpenter-reconciler-cronjob
+ namespace: argo-events
+spec:
+ schedule: "*/5 * * * *"
+ jobTemplate:
+ spec:
+ template:
+ metadata:
+ labels:
+ app: gen3job
+ spec:
+ serviceAccount: karpenter-reconciler
+ volumes:
+ - name: karpenter-templates-volume
+ configMap:
+ name: karpenter-templates
+ containers:
+ - name: karpenter-reconciler
+ image: quay.io/cdis/awshelper
+ volumeMounts:
+ - name: karpenter-templates-volume
+ mountPath: /manifests
+ env:
+ - name: PROVISIONER_TEMPLATE
+ value: /manifests/provisioner.yaml
+ - name: AWSNODETEMPLATE_TEMPLATE
+ value: /manifests/nodetemplate.yaml
+ command: ["/bin/bash"]
+ args:
+ - "-c"
+ - |
+ #!/bin/bash
+ if [ -z "$PROVISIONER_TEMPLATE" ]; then
+ PROVISIONER_TEMPLATE="provisioner.yaml"
+ fi
+
+ if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then
+ AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml"
+ fi
+
+ ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}")
+
+ WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}')
+
+ WORKFLOW_ARRAY=()
+
+ while IFS= read -r line; do
+ WORKFLOW_ARRAY+=("$line")
+ done <<< "$WORKFLOWS"
+
+ echo $WORKFLOWS
+
+ for workflow in "${WORKFLOW_ARRAY[@]}"
+ do
+ workflow_name=$(echo "$workflow" | awk '{print $1}')
+ workflow_user=$(echo "$workflow" | awk '{print $2}')
+
+ if [ ! -z "$workflow_name" ]; then
+ if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No awsnodetemplate found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f -
+ fi
+
+ if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then
+ echo "No provisioner found for ${workflow_name}, creating one"
+ sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f -
+
+ fi
+ fi
+ done
+ restartPolicy: OnFailure
diff --git a/kube/services/karpenter/binfmt.yaml b/kube/services/karpenter/binfmt.yaml
new file mode 100644
index 000000000..35cf5b559
--- /dev/null
+++ b/kube/services/karpenter/binfmt.yaml
@@ -0,0 +1,42 @@
+# Run binfmt setup on any new node
+# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset
+# https://github.com/docker/buildx/issues/342#issuecomment-680715762
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: binfmt
+ # namespace: kube-system
+ labels:
+ app: binfmt-setup
+spec:
+ selector:
+ matchLabels:
+ name: binfmt
+ # https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates
+ template:
+ metadata:
+ labels:
+ name: binfmt
+ spec:
+ nodeSelector:
+ kubernetes.io/arch: "arm64"
+ initContainers:
+ - name: binfmt
+ image: tonistiigi/binfmt
+ # command: []
+ args: ["--install", "all"]
+ # Run the container with the privileged flag
+ # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core
+ securityContext:
+ privileged: true
+ containers:
+ - name: pause
+ image: gcr.io/google_containers/pause:3.2
+ resources:
+ limits:
+ cpu: 50m
+ memory: 50Mi
+ requests:
+ cpu: 50m
+ memory: 50Mi
diff --git a/kube/services/karpenter/karpenter-global-settings.yaml b/kube/services/karpenter/karpenter-global-settings.yaml
new file mode 100644
index 000000000..4c09a465d
--- /dev/null
+++ b/kube/services/karpenter/karpenter-global-settings.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: karpenter-global-settings
+ namespace: karpenter
+data:
+ aws.interruptionQueueName: SQS_NAME
\ No newline at end of file
diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml
new file mode 100644
index 000000000..6ba8b3a0f
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateDefault.yaml
@@ -0,0 +1,66 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: default
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME
+ tags:
+ karpenter.sh/discovery: VPC_NAME
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-karpenter
+ purpose: default
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 50Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml
new file mode 100644
index 000000000..925e7a9a0
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateGPU.yaml
@@ -0,0 +1,64 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: gpu
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME-gpu
+ tags:
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-gpu-karpenter
+ karpenter.sh/discovery: VPC_NAME
+ purpose: gpu
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 200Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml
new file mode 100644
index 000000000..1c8970ad6
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateJupyter.yaml
@@ -0,0 +1,64 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: jupyter
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME-jupyter
+ tags:
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-jupyter-karpenter
+ karpenter.sh/discovery: VPC_NAME
+ purpose: jupyter
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 50Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml
new file mode 100644
index 000000000..6e47b22f9
--- /dev/null
+++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml
@@ -0,0 +1,64 @@
+apiVersion: karpenter.k8s.aws/v1alpha1
+kind: AWSNodeTemplate
+metadata:
+ name: workflow
+spec:
+ subnetSelector:
+ karpenter.sh/discovery: VPC_NAME
+ securityGroupSelector:
+ karpenter.sh/discovery: VPC_NAME-workflow
+ tags:
+ Environment: VPC_NAME
+ Name: eks-VPC_NAME-workflow-karpenter
+ karpenter.sh/discovery: VPC_NAME
+ purpose: workflow
+ metadataOptions:
+ httpEndpoint: enabled
+ httpProtocolIPv6: disabled
+ httpPutResponseHopLimit: 2
+ httpTokens: optional
+ userData: |
+ MIME-Version: 1.0
+ Content-Type: multipart/mixed; boundary="BOUNDARY"
+
+ --BOUNDARY
+ Content-Type: text/x-shellscript; charset="us-ascii"
+
+ #!/bin/bash -x
+ instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId)
+ curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys
+
+ echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json
+
+ sysctl -w fs.inotify.max_user_watches=12000
+
+ sudo yum update -y
+ sudo yum install -y dracut-fips openssl >> /opt/fips-install.log
+ sudo dracut -f
+ # configure grub
+ sudo /sbin/grubby --update-kernel=ALL --args="fips=1"
+
+ # --BOUNDARY
+ # Content-Type: text/cloud-config; charset="us-ascii"
+
+ # mounts:
+ # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime']
+
+ --BOUNDARY
+ Content-Type: text/cloud-config; charset="us-ascii"
+
+ power_state:
+ delay: now
+ mode: reboot
+ message: Powering off
+ timeout: 2
+ condition: true
+
+ --BOUNDARY--
+ blockDeviceMappings:
+ - deviceName: /dev/xvda
+ ebs:
+ volumeSize: 50Gi
+ volumeType: gp2
+ encrypted: true
+ deleteOnTermination: true
diff --git a/kube/services/karpenter/nvdp.yaml b/kube/services/karpenter/nvdp.yaml
new file mode 100644
index 000000000..4c37a9c27
--- /dev/null
+++ b/kube/services/karpenter/nvdp.yaml
@@ -0,0 +1,33 @@
+config:
+ # ConfigMap name if pulling from an external ConfigMap
+ name: ""
+ # Set of named configs to build an integrated ConfigMap from
+ map:
+ default: |-
+ version: v1
+ flags:
+ migStrategy: "none"
+ failOnInitError: true
+ nvidiaDriverRoot: "/"
+ plugin:
+ passDeviceSpecs: false
+ deviceListStrategy: envvar
+ deviceIDStrategy: uuid
+ shared_gpu: |-
+ version: v1
+ flags:
+ migStrategy: "none"
+ failOnInitError: true
+ nvidiaDriverRoot: "/"
+ plugin:
+ passDeviceSpecs: false
+ deviceListStrategy: envvar
+ deviceIDStrategy: uuid
+ sharing:
+ timeSlicing:
+ renameByDefault: false
+ resources:
+ - name: nvidia.com/gpu
+ replicas: 10
+nodeSelector:
+ jina.ai/gpu-type: nvidia
\ No newline at end of file
diff --git a/kube/services/karpenter/provisionerArm.yaml b/kube/services/karpenter/provisionerArm.yaml
new file mode 100644
index 000000000..2f53581a2
--- /dev/null
+++ b/kube/services/karpenter/provisionerArm.yaml
@@ -0,0 +1,35 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: default
+spec:
+ # Allow for spot and on demand instances
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand", "spot"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - arm64
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ # Set a limit of 1000 vcpus
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: default
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
+
diff --git a/kube/services/karpenter/provisionerDefault.yaml b/kube/services/karpenter/provisionerDefault.yaml
new file mode 100644
index 000000000..ac08284ce
--- /dev/null
+++ b/kube/services/karpenter/provisionerDefault.yaml
@@ -0,0 +1,35 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: default
+spec:
+ # Allow for spot and on demand instances
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand", "spot"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ # Set a limit of 1000 vcpus
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: default
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
+
+
diff --git a/kube/services/karpenter/provisionerGPU.yaml b/kube/services/karpenter/provisionerGPU.yaml
new file mode 100644
index 000000000..77a6b3876
--- /dev/null
+++ b/kube/services/karpenter/provisionerGPU.yaml
@@ -0,0 +1,29 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: gpu
+spec:
+ ttlSecondsAfterEmpty: 300
+ labels:
+ jina.ai/node-type: gpu
+ jina.ai/gpu-type: nvidia
+ requirements:
+ - key: node.kubernetes.io/instance-type
+ operator: In
+ values: ["g4dn.xlarge", "g4dn.2xlarge", "g4dn.4xlarge", "g4dn.12xlarge"]
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["spot", "on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values: ["amd64"]
+ taints:
+ - key: nvidia.com/gpu
+ effect: "NoSchedule"
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: gpu
+ ttlSecondsAfterEmpty: 30
diff --git a/kube/services/karpenter/provisionerGPUShared.yaml b/kube/services/karpenter/provisionerGPUShared.yaml
new file mode 100644
index 000000000..fa108c512
--- /dev/null
+++ b/kube/services/karpenter/provisionerGPUShared.yaml
@@ -0,0 +1,30 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: gpu-shared
+spec:
+ ttlSecondsAfterEmpty: 300
+ labels:
+ jina.ai/node-type: gpu-shared
+ jina.ai/gpu-type: nvidia
+ nvidia.com/device-plugin.config: shared_gpu
+ requirements:
+ - key: karpenter.k8s.aws/instance-family
+ operator: In
+ values: ["g4dn", "g5","p4","p3"]
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["spot", "on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values: ["amd64"]
+ taints:
+ - key: nvidia.com/gpu-shared
+ effect: "NoSchedule"
+ limits:
+ resources:
+ cpu: 1000
+ # Use the default node template
+ providerRef:
+ name: gpu
+ ttlSecondsAfterEmpty: 30
diff --git a/kube/services/karpenter/provisionerJupyter.yaml b/kube/services/karpenter/provisionerJupyter.yaml
new file mode 100644
index 000000000..0d4b1c85e
--- /dev/null
+++ b/kube/services/karpenter/provisionerJupyter.yaml
@@ -0,0 +1,40 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: jupyter
+spec:
+ # Only allow on demand instance
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ # Set a taint for jupyter pods
+ taints:
+ - key: role
+ value: jupyter
+ effect: NoSchedule
+ labels:
+ role: jupyter
+ # Set a limit of 1000 vcpus
+ limits:
+ resources:
+ cpu: 1000
+ # Use the jupyter node template
+ providerRef:
+ name: jupyter
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
diff --git a/kube/services/karpenter/provisionerWorkflow.yaml b/kube/services/karpenter/provisionerWorkflow.yaml
new file mode 100644
index 000000000..f43dbf648
--- /dev/null
+++ b/kube/services/karpenter/provisionerWorkflow.yaml
@@ -0,0 +1,37 @@
+apiVersion: karpenter.sh/v1alpha5
+kind: Provisioner
+metadata:
+ name: workflow
+spec:
+ requirements:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values: ["on-demand"]
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - key: karpenter.k8s.aws/instance-category
+ operator: In
+ values:
+ - c
+ - m
+ - r
+ - t
+ taints:
+ - key: role
+ value: workflow
+ effect: NoSchedule
+ labels:
+ role: workflow
+ limits:
+ resources:
+ cpu: 1000
+ providerRef:
+ name: workflow
+ # Allow pods to be rearranged
+ consolidation:
+ enabled: true
+ # Kill nodes after 30 days to ensure they stay up to date
+ ttlSecondsUntilExpired: 2592000
+
diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml
new file mode 100644
index 000000000..9805a8e38
--- /dev/null
+++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml
@@ -0,0 +1,111 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: kayako-wrapper-deployment
+spec:
+ selector:
+ matchLabels:
+ app: kayako-wrapper
+ revisionHistoryLimit: 2
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 2
+ maxUnavailable: 25%
+ template:
+ metadata:
+ labels:
+ app: kayako-wrapper
+ public: "yes"
+ netnolimit: "yes"
+ userhelper: "yes"
+ GEN3_DATE_LABEL
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 25
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - kayako-wrapper
+ topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
+ automountServiceAccountToken: false
+ volumes:
+ - name: ca-volume
+ secret:
+ secretName: "service-ca"
+ - name: config-volume
+ secret:
+ secretName: "kayako-wrapper-config"
+ - name: privacy-policy
+ configMap:
+ name: "privacy-policy"
+ containers:
+ - name: kayako-wrapper
+ GEN3_KAYAKO-WRAPPER_IMAGE
+ readinessProbe:
+ httpGet:
+ path: /_status/
+ port: 8000
+ initialDelaySeconds: 30
+ periodSeconds: 60
+ timeoutSeconds: 30
+ livenessProbe:
+ httpGet:
+ path: /_status/
+ port: 8000
+ initialDelaySeconds: 60
+ periodSeconds: 60
+ timeoutSeconds: 30
+ failureThreshold: 6
+ resources:
+ requests:
+ cpu: 0.6
+ memory: 512Mi
+ limits:
+ cpu: 2
+ memory: 4096Mi
+ ports:
+ - containerPort: 8000
+ command:
+ - /bin/bash
+ - /src/start.sh
+ env:
+ - name: HOSTNAME
+ value: revproxy-service
+ - name: API_KEY
+ valueFrom:
+ secretKeyRef:
+ name: kayako-g3auto
+ key: "kayako_api_key.txt"
+ - name: SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: kayako-g3auto
+ key: "kayako_secret_key.txt"
+ volumeMounts:
+ - name: "ca-volume"
+ readOnly: true
+ mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt"
+ subPath: "ca.pem"
+ imagePullPolicy: Always
diff --git a/kube/services/kayako-wrapper/kayako-wrapper-service.yaml b/kube/services/kayako-wrapper/kayako-wrapper-service.yaml
new file mode 100644
index 000000000..23045722d
--- /dev/null
+++ b/kube/services/kayako-wrapper/kayako-wrapper-service.yaml
@@ -0,0 +1,19 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: kayako-wrapper-service
+spec:
+ selector:
+ app: kayako-wrapper
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8000
+ name: http
+ nodePort: null
+ - protocol: TCP
+ port: 443
+ targetPort: 8000
+ name: https
+ nodePort: null
+ type: ClusterIP
diff --git a/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml b/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml
index 1235c1d2b..c0d4ba8ff 100644
--- a/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml
+++ b/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml
@@ -33,7 +33,7 @@ rules:
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- - apiGroups: ["extensions"]
+ - apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
@@ -82,7 +82,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: autoscaler
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.2.0
+ image: IMAGE|-k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.6-|
resources:
requests:
cpu: "20m"
diff --git a/kube/services/kube-proxy/kube-proxy-daemonset.yaml b/kube/services/kube-proxy/kube-proxy-daemonset.yaml
index 3e32f0bc5..13672e72b 100644
--- a/kube/services/kube-proxy/kube-proxy-daemonset.yaml
+++ b/kube/services/kube-proxy/kube-proxy-daemonset.yaml
@@ -150,6 +150,11 @@ spec:
containers:
- name: kube-proxy
image: ${kube_proxy_image}
+ ports:
+ - containerPort: 10249
+ hostPort: 10249
+ name: metrics
+ protocol: TCP
resources:
requests:
cpu: 100m
diff --git a/kube/services/kubecost-master/kubecost-alb.yaml b/kube/services/kubecost-master/kubecost-alb.yaml
new file mode 100644
index 000000000..24fbe7edc
--- /dev/null
+++ b/kube/services/kubecost-master/kubecost-alb.yaml
@@ -0,0 +1,19 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: kubecost-alb-ingress
+ annotations:
+ kubernetes.io/ingress.class: alb
+ alb.ingress.kubernetes.io/target-type: ip
+ alb.ingress.kubernetes.io/scheme: internal
+spec:
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: kubecost-cost-analyzer
+ port:
+ number: 443
\ No newline at end of file
diff --git a/kube/services/kubecost-master/object-store.yaml b/kube/services/kubecost-master/object-store.yaml
new file mode 100644
index 000000000..bcfadc752
--- /dev/null
+++ b/kube/services/kubecost-master/object-store.yaml
@@ -0,0 +1,16 @@
+type: S3
+config:
+ bucket: KUBECOST_S3_BUCKET
+ endpoint: "s3.amazonaws.com"
+ region: AWS_REGION
+ insecure: false
+ signature_version2: false
+ put_user_metadata:
+ "X-Amz-Acl": "bucket-owner-full-control"
+ http_config:
+ idle_conn_timeout: 90s
+ response_header_timeout: 2m
+ insecure_skip_verify: false
+ trace:
+ enable: true
+ part_size: 134217728
\ No newline at end of file
diff --git a/kube/services/kubecost-master/values.yaml b/kube/services/kubecost-master/values.yaml
new file mode 100644
index 000000000..0b4269778
--- /dev/null
+++ b/kube/services/kubecost-master/values.yaml
@@ -0,0 +1,40 @@
+## Full values listed here, https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values.yaml
+
+kubecostToken: KUBECOST_TOKEN
+
+serviceAccount:
+ create: true # Set this to false if you're bringing your own service account.
+ annotations:
+ KUBECOST_SA
+
+kubecostProductConfigs:
+ athenaBucketName: ATHENA_BUCKET
+ athenaRegion: AWS_REGION
+ athenaDatabase: ATHENA_DATABASE
+ athenaTable: ATHENA_TABLE
+ athenaProjectID: AWS_ACCOUNT_ID
+ clusterName: master-cluster
+ #serviceKeySecretName: aws-service-key , might work with SA attached instead
+ projectID: AWS_ACCOUNT_ID
+ # awsSpotDataRegion: AWS_kubecostProductConfigs_awsSpotDataRegion
+ # awsSpotDataBucket: AWS_kubecostProductConfigs_awsSpotDataBucket
+
+prometheus:
+ serviceAccounts:
+ server:
+ create: false
+ name: "THANOS_SA"
+ server:
+ global:
+ external_labels:
+ # Slave cluster name
+ cluster_id: "master-cluster"
+
+networkCosts:
+ enabled: true
+
+thanos:
+ store:
+ serviceAccount: "THANOS_SA"
+ compact:
+ serviceAccount: "THANOS_SA"
\ No newline at end of file
diff --git a/kube/services/kubecost-slave/values.yaml b/kube/services/kubecost-slave/values.yaml
new file mode 100644
index 000000000..8b3786054
--- /dev/null
+++ b/kube/services/kubecost-slave/values.yaml
@@ -0,0 +1,50 @@
+## Full values listed here, https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values.yaml
+
+kubecostToken: KUBECOST_TOKEN
+
+serviceAccount:
+ create: true # Set this to false if you're bringing your own service account.
+ annotations:
+ KUBECOST_SA
+
+kubecostProductConfigs:
+ clusterName: slave-cluster
+ athenaBucketName: ATHENA_BUCKET
+ athenaRegion: AWS_REGION
+ athenaDatabase: ATHENA_DATABASE
+ athenaTable: ATHENA_TABLE
+ athenaProjectID: AWS_ACCOUNT_ID
+ #serviceKeySecretName: aws-service-key , might work with SA attached instead
+ projectID: AWS_ACCOUNT_ID
+
+kubecostModel:
+ warmCache: false
+ warmSavingsCache: false
+ etl: false
+
+global:
+ grafana:
+ enabled: false
+ proxy: false
+ alertmanager:
+ enabled: false
+
+prometheus:
+ serviceAccounts:
+ server:
+ create: false
+ name: THANOS_SA
+ server:
+ global:
+ external_labels:
+ # Slave cluster name
+ cluster_id: "slave-cluster"
+
+networkCosts:
+ enabled: true
+
+thanos:
+ store:
+ serviceAccount: THANOS_SA
+ compact:
+ serviceAccount: THANOS_SA
\ No newline at end of file
diff --git a/kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml b/kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml
new file mode 100644
index 000000000..3f4db5944
--- /dev/null
+++ b/kube/services/kubecost-standalone/kubecost-cost-analyzer-service.yaml
@@ -0,0 +1,20 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: kubecost-cost-analyzer-service
+spec:
+ selector:
+ app: indexd
+ release: production
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 80
+ name: http
+ nodePort: null
+ - protocol: TCP
+ port: 443
+ targetPort: 443
+ name: https
+ nodePort: null
+ type: ClusterIP
\ No newline at end of file
diff --git a/kube/services/kubecost-standalone/thanos-deploy.yaml b/kube/services/kubecost-standalone/thanos-deploy.yaml
new file mode 100644
index 000000000..8c9493d60
--- /dev/null
+++ b/kube/services/kubecost-standalone/thanos-deploy.yaml
@@ -0,0 +1,221 @@
+---
+# querier-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: thanos-query
+ namespace: monitoring
+ labels:
+ app: thanos-query
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: thanos-query
+ template:
+ metadata:
+ labels:
+ app: thanos-query
+ spec:
+ containers:
+ - name: thanos-query
+ image: quay.io/thanos/thanos:v0.23.0
+ args:
+ - 'query'
+ - '--log.level=debug'
+ - '--query.replica-label=prometheus_replica'
+ - '--store=prometheus-kube-prometheus-thanos-discovery.monitoring.svc:10901'
+ resources:
+ requests:
+ cpu: '100m'
+ memory: '64Mi'
+ limits:
+ cpu: '250m'
+ memory: '256Mi'
+ ports:
+ - name: http
+ containerPort: 10902
+ - name: grpc
+ containerPort: 10901
+ - name: cluster
+ containerPort: 10900
+
+---
+# querier-service-servicemonitor.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: thanos-query
+ labels:
+ app: thanos-query
+ release: prometheus-operator
+ jobLabel: thanos
+ namespace: monitoring
+spec:
+ selector:
+ app: thanos-query
+ ports:
+ - port: 9090
+ protocol: TCP
+ targetPort: http
+ name: http-query
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: prom-thanos-query
+ namespace: monitoring
+spec:
+ jobLabel: thanos
+ selector:
+ matchLabels:
+ app: thanos-query
+ namespaceSelector:
+ matchNames:
+ - 'monitoring'
+ endpoints:
+ - port: http-query
+ path: /metrics
+ interval: 5s
+
+---
+# store-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: thanos-store
+ namespace: monitoring
+ labels:
+ app: thanos-store
+spec:
+ serviceName: 'thanos-store'
+ replicas: 1
+ selector:
+ matchLabels:
+ app: thanos-store
+ template:
+ metadata:
+ labels:
+ app: thanos-store
+ spec:
+ containers:
+ - name: thanos-store
+ image: quay.io/thanos/thanos:v0.23.0
+ args:
+ - 'store'
+ - '--log.level=debug'
+ - '--data-dir=/var/thanos/store'
+ - '--objstore.config-file=/config/thanos.yaml'
+ ports:
+ - name: http
+ containerPort: 10902
+ - name: grpc
+ containerPort: 10901
+ - name: cluster
+ containerPort: 10900
+ volumeMounts:
+ - name: config
+ mountPath: /config/
+ readOnly: true
+ - name: data
+ mountPath: /var/thanos/store
+ volumes:
+ - name: data
+ emptyDir: {}
+ - name: config
+ secret:
+ secretName: thanos-objstore-config
+---
+# store-servicemonitor.yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: thanos-store
+ namespace: monitoring
+ labels:
+ release: prom-op
+spec:
+ jobLabel: thanos
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 30s
+ selector:
+ matchLabels:
+ app: thanos-store
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: thanos-compactor
+ namespace: monitoring
+ labels:
+ app: thanos-compactor
+spec:
+ serviceName: 'thanos-compactor'
+ replicas: 1
+ selector:
+ matchLabels:
+ app: thanos-compactor
+ template:
+ metadata:
+ labels:
+ app: thanos-compactor
+ spec:
+ containers:
+ - name: thanos-compactor
+ image: quay.io/thanos/thanos:v0.23.0
+ args:
+ - 'compact'
+ - '--log.level=debug'
+ - '--data-dir=/var/thanos/store'
+ - '--objstore.config-file=/config/thanos.yaml'
+ - '--wait'
+ ports:
+ - name: http
+ containerPort: 10902
+ volumeMounts:
+ - name: config
+ mountPath: /config/
+ readOnly: true
+ - name: data
+ mountPath: /var/thanos/store
+ volumes:
+ - name: data
+ emptyDir: {}
+ - name: config
+ secret:
+ secretName: thanos-objstore-config
+---
+# compactor-service-servicemonitor.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: thanos-compactor
+ labels:
+ app: thanos-compactor
+ namespace: monitoring
+spec:
+ selector:
+ app: thanos-compactor
+ ports:
+ - port: 10902
+ name: http
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: thanos-compactor
+ namespace: monitoring
+ labels:
+ release: prom-op
+spec:
+ jobLabel: thanos
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 30s
+ selector:
+ matchLabels:
+ app: thanos-compactor
+
diff --git a/kube/services/kubecost-standalone/values.yaml b/kube/services/kubecost-standalone/values.yaml
new file mode 100644
index 000000000..6235dee4b
--- /dev/null
+++ b/kube/services/kubecost-standalone/values.yaml
@@ -0,0 +1,35 @@
+## Full values listed here, https://github.com/kubecost/cost-analyzer-helm-chart/blob/master/cost-analyzer/values.yaml
+
+kubecostToken: KUBECOST_TOKEN
+
+global:
+ prometheus:
+ enabled: false
+ fqdn: http://prometheus-operated.monitoring.svc:9090
+
+serviceAccount:
+ create: true # Set this to false if you're bringing your own service account.
+ annotations:
+ KUBECOST_SA
+
+kubecostProductConfigs:
+ athenaBucketName: ATHENA_BUCKET
+ athenaRegion: AWS_REGION
+ athenaDatabase: ATHENA_DATABASE
+ athenaTable: ATHENA_TABLE
+ athenaProjectID: AWS_ACCOUNT_ID
+ clusterName: master-cluster
+ #serviceKeySecretName: aws-service-key , might work with SA attached instead
+ projectID: AWS_ACCOUNT_ID
+ # awsSpotDataRegion: AWS_kubecostProductConfigs_awsSpotDataRegion
+ # awsSpotDataBucket: AWS_kubecostProductConfigs_awsSpotDataBucket
+
+kubecostFrontend:
+ tls:
+ enabled: true
+ secretName: "cert-kubecost-cost-analyzer"
+
+
+networkCosts:
+ enabled: true
+
diff --git a/kube/services/kubecost/kubecost-alb.yaml b/kube/services/kubecost/kubecost-alb.yaml
new file mode 100644
index 000000000..24fbe7edc
--- /dev/null
+++ b/kube/services/kubecost/kubecost-alb.yaml
@@ -0,0 +1,19 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: kubecost-alb-ingress
+ annotations:
+ kubernetes.io/ingress.class: alb
+ alb.ingress.kubernetes.io/target-type: ip
+ alb.ingress.kubernetes.io/scheme: internal
+spec:
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: kubecost-cost-analyzer
+ port:
+ number: 443
\ No newline at end of file
diff --git a/kube/services/kubecost/object-store.yaml b/kube/services/kubecost/object-store.yaml
new file mode 100644
index 000000000..bd38ac3e9
--- /dev/null
+++ b/kube/services/kubecost/object-store.yaml
@@ -0,0 +1,19 @@
+type: S3
+config:
+ bucket: KUBECOST_S3_BUCKET
+ endpoint: "s3.amazonaws.com"
+ region: AWS_REGION
+ insecure: false
+ aws_sdk_auth: true
+ signature_version2: false
+ put_user_metadata:
+ "X-Amz-Acl": "bucket-owner-full-control"
+ http_config:
+ idle_conn_timeout: 90s
+ response_header_timeout: 2m
+ insecure_skip_verify: false
+ trace:
+ enable: true
+ part_size: 134217728
+ sse_config:
+ type: "SSE-S3"
\ No newline at end of file
diff --git a/kube/services/kubecost/values.yaml b/kube/services/kubecost/values.yaml
new file mode 100644
index 000000000..d1ac47246
--- /dev/null
+++ b/kube/services/kubecost/values.yaml
@@ -0,0 +1,183 @@
+#kubecostToken: KUBECOST_TOKEN
+
+global:
+ grafana:
+ enabled: false
+ proxy: false
+pricingCsv:
+ enabled: false
+ location:
+ provider: "AWS"
+ region: "us-east-1"
+ URI: s3://kc-csv-test/pricing_schema.csv # a valid file URI
+ csvAccessCredentials: pricing-schema-access-secret
+
+tolerations:
+- key: "role"
+ operator: "Equal"
+ value: "prometheus"
+ effect: "NoSchedule"
+
+nodeSelector: {}
+
+affinity: {}
+
+# If true, creates a PriorityClass to be used by the cost-analyzer pod
+priority:
+ enabled: false
+
+# If true, enable creation of NetworkPolicy resources.
+networkPolicy:
+ enabled: false
+
+podSecurityPolicy:
+ enabled: false
+
+# Enable this flag if you need to install with specfic image tags
+# imageVersion: prod-1.97.0
+
+kubecostFrontend:
+ image: public.ecr.aws/kubecost/frontend
+ imagePullPolicy: Always
+ resources:
+ requests:
+ cpu: "10m"
+ memory: "55Mi"
+ #limits:
+ # cpu: "100m"
+ # memory: "256Mi"
+
+kubecostModel:
+ image: public.ecr.aws/kubecost/cost-model
+ imagePullPolicy: Always
+ warmCache: true
+ warmSavingsCache: true
+ etl: true
+ # The total number of days the ETL storage will build
+ etlStoreDurationDays: 120
+ maxQueryConcurrency: 5
+ # utcOffset represents a timezone in hours and minutes east (+) or west (-)
+ # of UTC, itself, which is defined as +00:00.
+ # See the tz database of timezones to look up your local UTC offset:
+ # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+ utcOffset: "+00:00"
+ resources:
+ requests:
+ cpu: "500m"
+ memory: "256Mi"
+ #limits:
+ # cpu: "800m"
+ # memory: "256Mi"
+
+# Define persistence volume for cost-analyzer
+persistentVolume:
+ size: 32Gi
+ dbSize: 32.0Gi
+ enabled: true # Note that setting this to false means configurations will be wiped out on pod restart.
+ # storageClass: "-" #
+ # existingClaim: kubecost-cost-analyzer # a claim in the same namespace as kubecost
+
+service:
+ type: ClusterIP
+ port: 9090
+ targetPort: 9090
+ # nodePort:
+ labels: {}
+ annotations: {}
+
+prometheus:
+ server:
+ # If clusterIDConfigmap is defined, instead use user-generated configmap with key CLUSTER_ID
+ # to use as unique cluster ID in kubecost cost-analyzer deployment.
+ # This overrides the cluster_id set in prometheus.server.global.external_labels.
+ # NOTE: This does not affect the external_labels set in prometheus config.
+ # clusterIDConfigmap: cluster-id-configmap
+ image:
+ repository: public.ecr.aws/kubecost/prometheus
+ tag: v2.35.0
+ resources:
+ requests:
+ memory: 3Gi
+ # requests:
+ # cpu: 500m
+ # memory: 30Gi
+ global:
+ scrape_interval: 1m
+ scrape_timeout: 10s
+ evaluation_interval: 1m
+ external_labels:
+ cluster_id: kubecost
+ persistentVolume:
+ size: 32Gi
+ enabled: true
+ extraArgs:
+ query.max-concurrency: 1
+ query.max-samples: 100000000
+ tolerations:
+ - key: "role"
+ operator: "Equal"
+ value: "prometheus"
+ effect: "NoSchedule"
+
+ configmapReload:
+ prometheus:
+ ## If false, the configmap-reload container will not be deployed
+ ##
+ enabled: false
+
+ ## configmap-reload container name
+ ##
+ name: configmap-reload
+ ## configmap-reload container image
+ ##
+ image:
+ repository: public.ecr.aws/bitnami/configmap-reload
+ tag: 0.7.1
+ pullPolicy: IfNotPresent
+ ## Additional configmap-reload container arguments
+ ##
+ extraArgs: {}
+ ## Additional configmap-reload volume directories
+ ##
+ extraVolumeDirs: []
+ ## Additional configmap-reload mounts
+ ##
+ extraConfigmapMounts: []
+ # - name: prometheus-alerts
+ # mountPath: /etc/alerts.d
+ # subPath: ""
+ # configMap: prometheus-alerts
+ # readOnly: true
+ ## configmap-reload resource requests and limits
+ ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources: {}
+
+ kube-state-metrics:
+ disabled: false
+ nodeExporter:
+ enabled: false
+
+reporting:
+ productAnalytics: false
+
+serviceAccount:
+ create: true # Set this to false if you're bringing your own service account.
+ annotations:
+ KUBECOST_SA
+
+kubecostProductConfigs:
+ athenaBucketName: s3://ATHENA_BUCKET
+ athenaRegion: AWS_REGION
+ athenaDatabase: ATHENA_DATABASE
+ athenaTable: ATHENA_TABLE
+ athenaProjectID: AWS_ACCOUNT_ID
+ clusterName: kubecost
+ #serviceKeySecretName: aws-service-key , might work with SA attached instead
+ projectID: AWS_ACCOUNT_ID
+ awsSpotDataRegion: AWS_REGION
+ awsSpotDataBucket: ATHENA_BUCKET
+ awsSpotDataPrefix: "spot-feed"
+
+networkCosts:
+ enabled: true
diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml
index 52460cfbf..0966f2480 100644
--- a/kube/services/manifestservice/manifestservice-deploy.yaml
+++ b/kube/services/manifestservice/manifestservice-deploy.yaml
@@ -22,12 +22,14 @@ spec:
s3: "yes"
public: "yes"
userhelper: "yes"
+ netvpc: "yes"
GEN3_DATE_LABEL
spec:
+ serviceAccountName: manifestservice-sa
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -36,6 +38,22 @@ spec:
values:
- manifestservice
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume
@@ -83,10 +101,9 @@ spec:
imagePullPolicy: Always
resources:
requests:
- cpu: 0.5
- memory: 512Mi
+ cpu: 100m
+ memory: 300Mi
limits:
- cpu: 1
memory: 1024Mi
livenessProbe:
httpGet:
diff --git a/kube/services/mariner/mariner-deploy.yaml b/kube/services/mariner/mariner-deploy.yaml
index 0912ea705..ec4b8a0d4 100644
--- a/kube/services/mariner/mariner-deploy.yaml
+++ b/kube/services/mariner/mariner-deploy.yaml
@@ -37,7 +37,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -46,6 +46,22 @@ spec:
values:
- mariner
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - on-demand
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - ONDEMAND
automountServiceAccountToken: true
containers:
- name: mariner
diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml
index c4842dadc..72986e795 100644
--- a/kube/services/metadata/metadata-deploy.yaml
+++ b/kube/services/metadata/metadata-deploy.yaml
@@ -29,7 +29,7 @@ spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
+ - weight: 25
podAffinityTerm:
labelSelector:
matchExpressions:
@@ -38,6 +38,22 @@ spec:
values:
- metadata
topologyKey: "kubernetes.io/hostname"
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ preference:
+ matchExpressions:
+ - key: karpenter.sh/capacity-type
+ operator: In
+ values:
+ - spot
+ - weight: 99
+ preference:
+ matchExpressions:
+ - key: eks.amazonaws.com/capacityType
+ operator: In
+ values:
+ - SPOT
automountServiceAccountToken: false
volumes:
- name: config-volume-g3auto
@@ -75,6 +91,12 @@ spec:
name: manifest-metadata
key: AGG_MDS_NAMESPACE
optional: true
+ - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ valueFrom:
+ configMapKeyRef:
+ name: manifest-metadata
+ key: AGG_MDS_DEFAULT_DATA_DICT_FIELD
+ optional: true
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -104,11 +126,10 @@ spec:
subPath: json
resources:
requests:
- cpu: 0.4
- memory: 512Mi
+ cpu: 100m
+ memory: 128Mi
limits:
- cpu: 1
- memory: 2048Mi
+ memory: 512Mi
initContainers:
- name: metadata-db-migrate
GEN3_METADATA_IMAGE
diff --git a/kube/services/metrics-server/components.yaml b/kube/services/metrics-server/components.yaml
index 743d61965..a683ca0d6 100644
--- a/kube/services/metrics-server/components.yaml
+++ b/kube/services/metrics-server/components.yaml
@@ -1,22 +1,81 @@
-# Copied contents from here: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.7/components.yaml
+# Copied contents from here: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml
# https://github.com/kubernetes-sigs/metrics-server/releases for more information on installation of a different version.
---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: system:aggregated-metrics-reader
labels:
- rbac.authorization.k8s.io/aggregate-to-view: "true"
- rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
rules:
-- apiGroups: ["metrics.k8s.io"]
- resources: ["pods", "nodes"]
- verbs: ["get", "list", "watch"]
+- apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes/metrics
+ - nodes/stats
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
+ labels:
+ k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
@@ -28,126 +87,116 @@ subjects:
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
+kind: ClusterRoleBinding
metadata:
- name: metrics-server-auth-reader
- namespace: kube-system
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: extension-apiserver-authentication-reader
+ kind: ClusterRole
+ name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
-apiVersion: apiregistration.k8s.io/v1beta1
-kind: APIService
-metadata:
- name: v1beta1.metrics.k8s.io
-spec:
- service:
- name: metrics-server
- namespace: kube-system
- group: metrics.k8s.io
- version: v1beta1
- insecureSkipTLSVerify: true
- groupPriorityMinimum: 100
- versionPriority: 100
----
apiVersion: v1
-kind: ServiceAccount
+kind: Service
metadata:
+ labels:
+ k8s-app: metrics-server
name: metrics-server
namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
- name: metrics-server
- namespace: kube-system
labels:
k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
template:
metadata:
- name: metrics-server
labels:
k8s-app: metrics-server
spec:
- serviceAccountName: metrics-server
- volumes:
- # mount in tmp so we can safely use from-scratch images and/or read-only containers
- - name: tmp-dir
- emptyDir: {}
containers:
- - name: metrics-server
- image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2
imagePullPolicy: IfNotPresent
- args:
- - --cert-dir=/tmp
- - --secure-port=4443
- - --v=2
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /livez
+ port: https
+ scheme: HTTPS
+ periodSeconds: 10
+ name: metrics-server
ports:
- - name: main-port
- containerPort: 4443
+ - containerPort: 4443
+ name: https
protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /readyz
+ port: https
+ scheme: HTTPS
+ initialDelaySeconds: 20
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
securityContext:
+ allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- - name: tmp-dir
- mountPath: /tmp
+ - mountPath: /tmp
+ name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
---
-apiVersion: v1
-kind: Service
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
metadata:
- name: metrics-server
- namespace: kube-system
labels:
- kubernetes.io/name: "Metrics-server"
- kubernetes.io/cluster-service: "true"
-spec:
- selector:
k8s-app: metrics-server
- ports:
- - port: 443
- protocol: TCP
- targetPort: main-port
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: system:metrics-server
-rules:
-- apiGroups:
- - ""
- resources:
- - pods
- - nodes
- - nodes/stats
- - namespaces
- - configmaps
- verbs:
- - get
- - list
- - watch
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: system:metrics-server
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: system:metrics-server
-subjects:
-- kind: ServiceAccount
- name: metrics-server
- namespace: kube-system
\ No newline at end of file
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100
diff --git a/kube/services/monitoring/prometheus-application.yaml b/kube/services/monitoring/prometheus-application.yaml
new file mode 100644
index 000000000..75b085719
--- /dev/null
+++ b/kube/services/monitoring/prometheus-application.yaml
@@ -0,0 +1,24 @@
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: prometheus-application
+ namespace: argocd
+spec:
+ project: default
+ source:
+ chart: kube-prometheus-stack
+ repoURL: https://prometheus-community.github.io/helm-charts
+ targetRevision: 43.1.3
+ helm:
+ valueFiles:
+ - https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/kube/services/monitoring/values.yaml
+ releaseName: prometheus
+ destination:
+ server: 'https://kubernetes.default.svc'
+ namespace: monitoring
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ syncOptions:
+ - CreateNamespace=true
\ No newline at end of file
diff --git a/kube/services/monitoring/prometheus-values.yaml b/kube/services/monitoring/prometheus-values.yaml
index 9ae425abd..e49bfba09 100644
--- a/kube/services/monitoring/prometheus-values.yaml
+++ b/kube/services/monitoring/prometheus-values.yaml
@@ -1261,4 +1261,4 @@ extraScrapeConfigs:
networkPolicy:
## Enable creation of NetworkPolicy resources.
##
- enabled: false
+ enabled: false
\ No newline at end of file
diff --git a/kube/services/monitoring/thanos-deploy.yaml b/kube/services/monitoring/thanos-deploy.yaml
new file mode 100644
index 000000000..f5c07a656
--- /dev/null
+++ b/kube/services/monitoring/thanos-deploy.yaml
@@ -0,0 +1,225 @@
+---
+# querier-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: thanos-query
+ namespace: monitoring
+ labels:
+ app: thanos-query
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: thanos-query
+ template:
+ metadata:
+ labels:
+ app: thanos-query
+ spec:
+ containers:
+ - name: thanos-query
+ image: quay.io/thanos/thanos:v0.25.2
+ args:
+ - 'query'
+ - '--log.level=debug'
+ - '--query.replica-label=prometheus_replica'
+ - '--store=prometheus-kube-prometheus-thanos-discovery.monitoring.svc:10901'
+ - '--web.external-prefix=/thanos-query/'
+ - '--web.route-prefix=/thanos-query/'
+ resources:
+ requests:
+ cpu: '100m'
+ memory: '64Mi'
+ limits:
+ cpu: '250m'
+ memory: '256Mi'
+ ports:
+ - name: http
+ containerPort: 10902
+ - name: grpc
+ containerPort: 10901
+ - name: cluster
+ containerPort: 10900
+
+---
+# querier-service-servicemonitor.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: thanos-query
+ labels:
+ app: thanos-query
+ release: prometheus-operator
+ jobLabel: thanos
+ namespace: monitoring
+spec:
+ selector:
+ app: thanos-query
+ ports:
+ - port: 9090
+ protocol: TCP
+ targetPort: http
+ name: http-query
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: prom-thanos-query
+ namespace: monitoring
+spec:
+ jobLabel: thanos
+ selector:
+ matchLabels:
+ app: thanos-query
+ namespaceSelector:
+ matchNames:
+ - 'monitoring'
+ endpoints:
+ - port: http-query
+ path: /metrics
+ interval: 5s
+
+---
+# store-statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: thanos-store
+ namespace: monitoring
+ labels:
+ app: thanos-store
+spec:
+ serviceName: 'thanos-store'
+ replicas: 1
+ selector:
+ matchLabels:
+ app: thanos-store
+ template:
+ metadata:
+ labels:
+ app: thanos-store
+ spec:
+ serviceAccount: thanos
+ containers:
+ - name: thanos-store
+ image: quay.io/thanos/thanos:v0.25.2
+ args:
+ - 'store'
+ - '--log.level=debug'
+ - '--data-dir=/var/thanos/store'
+ - '--objstore.config-file=/config/thanos.yaml'
+ ports:
+ - name: http
+ containerPort: 10902
+ - name: grpc
+ containerPort: 10901
+ - name: cluster
+ containerPort: 10900
+ volumeMounts:
+ - name: config
+ mountPath: /config/
+ readOnly: true
+ - name: data
+ mountPath: /var/thanos/store
+ volumes:
+ - name: data
+ emptyDir: {}
+ - name: config
+ secret:
+ secretName: thanos-objstore-config
+---
+# store-servicemonitor.yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: thanos-store
+ namespace: monitoring
+ labels:
+ release: prom-op
+spec:
+ jobLabel: thanos
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 30s
+ selector:
+ matchLabels:
+ app: thanos-store
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: thanos-compactor
+ namespace: monitoring
+ labels:
+ app: thanos-compactor
+spec:
+ serviceName: 'thanos-compactor'
+ replicas: 1
+ selector:
+ matchLabels:
+ app: thanos-compactor
+ template:
+ metadata:
+ labels:
+ app: thanos-compactor
+ spec:
+ serviceAccount: thanos
+ containers:
+ - name: thanos-compactor
+ image: quay.io/thanos/thanos:v0.25.2
+ args:
+ - 'compact'
+ - '--log.level=debug'
+ - '--data-dir=/var/thanos/store'
+ - '--objstore.config-file=/config/thanos.yaml'
+ - '--wait'
+ - '--web.external-prefix=/thanos-compactor/'
+ ports:
+ - name: http
+ containerPort: 10902
+ volumeMounts:
+ - name: config
+ mountPath: /config/
+ readOnly: true
+ - name: data
+ mountPath: /var/thanos/store
+ volumes:
+ - name: data
+ emptyDir: {}
+ - name: config
+ secret:
+ secretName: thanos-objstore-config
+---
+# compactor-service-servicemonitor.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: thanos-compactor
+ labels:
+ app: thanos-compactor
+ namespace: monitoring
+spec:
+ selector:
+ app: thanos-compactor
+ ports:
+ - port: 10902
+ name: http
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: thanos-compactor
+ namespace: monitoring
+ labels:
+ release: prom-op
+spec:
+ jobLabel: thanos
+ endpoints:
+ - port: http
+ path: /metrics
+ interval: 30s
+ selector:
+ matchLabels:
+ app: thanos-compactor
\ No newline at end of file
diff --git a/kube/services/monitoring/thanos.yaml b/kube/services/monitoring/thanos.yaml
new file mode 100644
index 000000000..e5106f22f
--- /dev/null
+++ b/kube/services/monitoring/thanos.yaml
@@ -0,0 +1,19 @@
+type: S3
+config:
+ bucket: S3_BUCKET
+ endpoint: "s3.amazonaws.com"
+ region: us-east-1
+ insecure: false
+ aws_sdk_auth: true
+ signature_version2: false
+ put_user_metadata:
+ "X-Amz-Acl": "bucket-owner-full-control"
+ http_config:
+ idle_conn_timeout: 90s
+ response_header_timeout: 2m
+ insecure_skip_verify: false
+ trace:
+ enable: true
+ part_size: 134217728
+ sse_config:
+ type: "SSE-S3"
\ No newline at end of file
diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml
new file mode 100644
index 000000000..d93e5098a
--- /dev/null
+++ b/kube/services/monitoring/values.yaml
@@ -0,0 +1,3505 @@
+# Default values for kube-prometheus-stack.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+## Provide a name in place of kube-prometheus-stack for `app:` labels
+##
+nameOverride: ""
+
+## Override the deployment namespace
+##
+namespaceOverride: ""
+
+## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
+##
+kubeTargetVersionOverride: ""
+
+## Allow kubeVersion to be overridden while creating the ingress
+##
+kubeVersionOverride: ""
+
+## Provide a name to substitute for the full names of resources
+##
+fullnameOverride: ""
+
+## Labels to apply to all resources
+##
+commonLabels: {}
+# scmhash: abc123
+# myLabel: aakkmd
+
+## Create default rules for monitoring the cluster
+##
+defaultRules:
+ create: true
+ rules:
+ alertmanager: true
+ etcd: true
+ configReloaders: true
+ general: true
+ k8s: true
+ kubeApiserverAvailability: true
+ kubeApiserverBurnrate: true
+ kubeApiserverHistogram: true
+ kubeApiserverSlos: true
+ kubelet: true
+ kubeProxy: true
+ kubePrometheusGeneral: true
+ kubePrometheusNodeRecording: true
+ kubernetesApps: true
+ kubernetesResources: true
+ kubernetesStorage: true
+ kubernetesSystem: true
+ kubeScheduler: true
+ kubeStateMetrics: true
+ network: true
+ node: true
+ nodeExporterAlerting: true
+ nodeExporterRecording: true
+ prometheus: true
+ prometheusOperator: true
+
+ ## Reduce app namespace alert scope
+ appNamespacesTarget: ".*"
+
+ ## Labels for default rules
+ labels: {}
+ ## Annotations for default rules
+ annotations: {}
+
+ ## Additional labels for PrometheusRule alerts
+ additionalRuleLabels: {}
+
+ ## Additional annotations for PrometheusRule alerts
+ additionalRuleAnnotations: {}
+
+ ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
+ runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
+
+ ## Disabled PrometheusRule alerts
+ disabled: {}
+ # KubeAPIDown: true
+ # NodeRAIDDegraded: true
+
+## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
+##
+# additionalPrometheusRules: []
+# - name: my-rule-file
+# groups:
+# - name: my_group
+# rules:
+# - record: my_record
+# expr: 100 * my_record
+
+## Provide custom recording or alerting rules to be deployed into the cluster.
+##
+additionalPrometheusRulesMap: {}
+# rule-name:
+# groups:
+# - name: my_group
+# rules:
+# - record: my_record
+# expr: 100 * my_record
+
+##
+global:
+ rbac:
+ create: true
+
+ ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
+ ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
+ createAggregateClusterRoles: false
+ pspEnabled: false
+ pspAnnotations: {}
+ ## Specify pod annotations
+ ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
+ ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
+ ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
+ ##
+ # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+ # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
+ # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+
+ ## Reference to one or more secrets to be used when pulling images
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ imagePullSecrets: []
+ # - name: "image-pull-secret"
+ # or
+ # - "image-pull-secret"
+
+## Configuration for alertmanager
+## ref: https://prometheus.io/docs/alerting/alertmanager/
+##
+alertmanager:
+
+ ## Deploy alertmanager
+ ##
+ enabled: true
+
+ ## Annotations for Alertmanager
+ ##
+ annotations: {}
+
+ ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
+ ##
+ apiVersion: v2
+
+ ## Service account for Alertmanager to use.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ create: true
+ name: ""
+ annotations: {}
+
+ ## Configure pod disruption budgets for Alertmanager
+ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
+ ## This configuration is immutable once created and will require the PDB to be deleted to be changed
+ ## https://github.com/kubernetes/kubernetes/issues/45398
+ ##
+ podDisruptionBudget:
+ enabled: false
+ minAvailable: 1
+ maxUnavailable: ""
+
+ ## Alertmanager configuration directives
+ ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
+ ## https://prometheus.io/webtools/alerting/routing-tree-editor/
+ ##
+ config:
+ global:
+ resolve_timeout: 5m
+ inhibit_rules:
+ - source_matchers:
+ - 'severity = critical'
+ target_matchers:
+ - 'severity =~ warning|info'
+ equal:
+ - 'namespace'
+ - 'alertname'
+ - source_matchers:
+ - 'severity = warning'
+ target_matchers:
+ - 'severity = info'
+ equal:
+ - 'namespace'
+ - 'alertname'
+ - source_matchers:
+ - 'alertname = InfoInhibitor'
+ target_matchers:
+ - 'severity = info'
+ equal:
+ - 'namespace'
+ route:
+ group_by: ['namespace']
+ group_wait: 30s
+ group_interval: 5m
+ repeat_interval: 12h
+ receiver: 'null'
+ routes:
+ - receiver: 'null'
+ matchers:
+ - alertname =~ "InfoInhibitor|Watchdog"
+ receivers:
+ - name: 'null'
+ templates:
+ - '/etc/alertmanager/config/*.tmpl'
+
+ ## Pass the Alertmanager configuration directives through Helm's templating
+ ## engine. If the Alertmanager configuration contains Alertmanager templates,
+ ## they'll need to be properly escaped so that they are not interpreted by
+ ## Helm
+ ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
+ ## https://prometheus.io/docs/alerting/configuration/#tmpl_string
+ ## https://prometheus.io/docs/alerting/notifications/
+ ## https://prometheus.io/docs/alerting/notification_examples/
+ tplConfig: false
+
+ ## Alertmanager template files to format alerts
+ ## By default, templateFiles are placed in /etc/alertmanager/config/ and if
+ ## they have a .tmpl file suffix will be loaded. See config.templates above
+ ## to change, add other suffixes. If adding other suffixes, be sure to update
+ ## config.templates above to include those suffixes.
+ ## ref: https://prometheus.io/docs/alerting/notifications/
+ ## https://prometheus.io/docs/alerting/notification_examples/
+ ##
+ templateFiles: {}
+ #
+ ## An example template:
+ # template_1.tmpl: |-
+ # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
+ #
+ # {{ define "slack.myorg.text" }}
+ # {{- $root := . -}}
+ # {{ range .Alerts }}
+ # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
+ # *Cluster:* {{ template "cluster" $root }}
+ # *Description:* {{ .Annotations.description }}
+ # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
+ # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
+ # *Details:*
+ # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
+ # {{ end }}
+ # {{ end }}
+ # {{ end }}
+
+ ingress:
+ enabled: true
+
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ #ingressClassName: nginx
+
+ labels: {}
+
+ ## Redirect ingress to an additional defined port on the service
+ # servicePort: 8081
+
+ ## Hosts must be provided if Ingress is enabled.
+ ##
+ hosts: []
+ #- prometheus.emalinowskiv1.planx-pla.net
+ # - alertmanager.domain.com
+
+ ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
+ ##
+ paths: []
+ # - /
+
+ ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
+ ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
+ # pathType: ImplementationSpecific
+
+ ## TLS configuration for Alertmanager Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: alertmanager-general-tls
+ # hosts:
+ # - alertmanager.example.com
+
+ ## Configuration for Alertmanager secret
+ ##
+ secret:
+ annotations: {}
+
+ ## Configuration for creating an Ingress that will map to each Alertmanager replica service
+ ## alertmanager.servicePerReplica must be enabled
+ ##
+ ingressPerReplica:
+ enabled: false
+
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ # ingressClassName: nginx
+
+ annotations: {}
+ labels: {}
+
+ ## Final form of the hostname for each per replica ingress is
+ ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
+ ##
+ ## Prefix for the per replica ingress that will have `-$replicaNumber`
+ ## appended to the end
+ hostPrefix: ""
+ ## Domain that will be used for the per replica ingress
+ hostDomain: ""
+
+ ## Paths to use for ingress rules
+ ##
+ paths: []
+ # - /
+
+ ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
+ ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
+ # pathType: ImplementationSpecific
+
+ ## Secret name containing the TLS certificate for alertmanager per replica ingress
+ ## Secret must be manually created in the namespace
+ tlsSecretName: ""
+
+ ## Separated secret for each per replica Ingress. Can be used together with cert-manager
+ ##
+ tlsSecretPerReplica:
+ enabled: false
+ ## Final form of the secret for each per replica ingress is
+ ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
+ ##
+ prefix: "alertmanager"
+
+ ## Configuration for Alertmanager service
+ ##
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+ ## Port for Alertmanager Service to listen on
+ ##
+ port: 9093
+ ## To be used with a proxy extraContainer port
+ ##
+ targetPort: 9093
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30903
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+
+ ## Additional ports to open for Alertmanager service
+ additionalPorts: []
+ # additionalPorts:
+ # - name: authenticated
+ # port: 8081
+ # targetPort: 8081
+
+ externalIPs: []
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## Configuration for creating a separate Service for each statefulset Alertmanager replica
+ ##
+ servicePerReplica:
+ enabled: false
+ annotations: {}
+
+ ## Port for Alertmanager Service per replica to listen on
+ ##
+ port: 9093
+
+ ## To be used with a proxy extraContainer port
+ targetPort: 9093
+
+ ## Port to expose on each node
+ ## Only used if servicePerReplica.type is 'NodePort'
+ ##
+ nodePort: 30904
+
+ ## Loadbalancer source IP ranges
+ ## Only used if servicePerReplica.type is "LoadBalancer"
+ loadBalancerSourceRanges: []
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## If true, create a serviceMonitor for alertmanager
+ ##
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+ selfMonitor: true
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
+ scheme: ""
+
+ ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
+ ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
+ tlsConfig: {}
+
+ bearerTokenFile:
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Settings affecting alertmanagerSpec
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec
+ ##
+ alertmanagerSpec:
+ ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
+ ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
+ ##
+ podMetadata: {}
+
+ ## Image of Alertmanager
+ ##
+ image:
+ repository: prometheus/alertmanager
+ tag: v0.24.0
+ sha: ""
+
+ ## If true then the user will be responsible to provide a secret with alertmanager configuration
+ ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
+ ##
+ useExistingSecret: false
+
+ ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
+ ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
+ ##
+ secrets: []
+
+ ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
+ ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
+ ##
+ configMaps: []
+
+ ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
+ ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
+ ##
+ # configSecret:
+
+ ## WebTLSConfig defines the TLS parameters for HTTPS
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec
+ web: {}
+
+ ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
+ ##
+ alertmanagerConfigSelector: {}
+ ## Example which selects all alertmanagerConfig resources
+ ## with label "alertconfig" with values any of "example-config" or "example-config-2"
+ # alertmanagerConfigSelector:
+ # matchExpressions:
+ # - key: alertconfig
+ # operator: In
+ # values:
+ # - example-config
+ # - example-config-2
+ #
+ ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
+ # alertmanagerConfigSelector:
+ # matchLabels:
+ # role: example-config
+
+ ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
+ ##
+ alertmanagerConfigNamespaceSelector: {}
+ ## Example which selects all namespaces
+ ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
+ # alertmanagerConfigNamespaceSelector:
+ # matchExpressions:
+ # - key: alertmanagerconfig
+ # operator: In
+ # values:
+ # - example-namespace
+ # - example-namespace-2
+
+ ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
+ # alertmanagerConfigNamespaceSelector:
+ # matchLabels:
+ # alertmanagerconfig: enabled
+
+ ## AlermanagerConfig to be used as top level configuration
+ ##
+ alertmanagerConfiguration: {}
+ ## Example with select a global alertmanagerconfig
+ # alertmanagerConfiguration:
+ # name: global-alertmanager-Configuration
+
+ ## Define Log Format
+ # Use logfmt (default) or json logging
+ logFormat: logfmt
+
+ ## Log level for Alertmanager to be configured with.
+ ##
+ logLevel: info
+
+ ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
+ ## running cluster equal to the expected size.
+ replicas: 1
+
+ ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
+ ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
+ ##
+ #retention: 2160h
+ retention: 120h
+
+ ## Storage is the definition of how storage will be used by the Alertmanager instances.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
+ ##
+ storage: {}
+ # volumeClaimTemplate:
+ # spec:
+ # storageClassName: gluster
+ # accessModes: ["ReadWriteOnce"]
+ # resources:
+ # requests:
+ # storage: 50Gi
+ # selector: {}
+
+
+ ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
+ ##
+ externalUrl:
+
+ ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
+ ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
+ ##
+ routePrefix: /alertmanager/
+
+ ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
+ ##
+ paused: false
+
+ ## Define which Nodes the Pods are scheduled on.
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+
+ ## Define resources requests and limits for single Pods.
+ ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
+ ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
+ ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
+ ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
+ ##
+ podAntiAffinity: ""
+
+ ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
+ ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
+ ##
+ podAntiAffinityTopologyKey: kubernetes.io/hostname
+
+ ## Assign custom affinity rules to the alertmanager instance
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+
+ ## If specified, the pod's tolerations.
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ ## If specified, the pod's topology spread constraints.
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ##
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app: alertmanager
+
+ ## SecurityContext holds pod-level security attributes and common container settings.
+ ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ##
+ securityContext:
+ runAsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 1000
+ fsGroup: 2000
+
+ ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
+ ## Note this is only for the Alertmanager UI, not the gossip communication.
+ ##
+ listenLocal: false
+
+ ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
+ ##
+ containers: []
+ # containers:
+ # - name: oauth-proxy
+ # image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.2
+ # args:
+ # - --upstream=http://127.0.0.1:9093
+ # - --http-address=0.0.0.0:8081
+ # - ...
+ # ports:
+ # - containerPort: 8081
+ # name: oauth-proxy
+ # protocol: TCP
+ # resources: {}
+
+ # Additional volumes on the output StatefulSet definition.
+ volumes: []
+
+ # Additional VolumeMounts on the output StatefulSet definition.
+ volumeMounts: []
+
+ ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
+ ## (permissions, dir tree) on mounted volumes before starting prometheus
+ initContainers: []
+
+ ## Priority class assigned to the Pods
+ ##
+ priorityClassName: ""
+
+ ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
+ ##
+ additionalPeers: []
+
+ ## PortName to use for Alert Manager.
+ ##
+ portName: "http-web"
+
+ ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
+ ##
+ clusterAdvertiseAddress: false
+
+ ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
+ ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
+ forceEnableClusterMode: false
+
+ ## ExtraSecret can be used to store various data in an extra secret
+ ## (use it for example to store hashed basic auth credentials)
+ extraSecret:
+ ## if not set, name will be auto generated
+ # name: ""
+ annotations: {}
+ data: {}
+ # auth: |
+ # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
+ # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
+
+## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
+##
+grafana:
+ enabled: true
+ namespaceOverride: ""
+
+ ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
+ ##
+ forceDeployDatasources: false
+
+ ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
+ ##
+ forceDeployDashboards: false
+
+ ## Deploy default dashboards
+ ##
+ defaultDashboardsEnabled: true
+
+ ## Timezone for the default dashboards
+ ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
+ ##
+ defaultDashboardsTimezone: utc
+
+ adminPassword: prom-operator #pragma: allowlist secret
+
+ rbac:
+ ## If true, Grafana PSPs will be created
+ ##
+ pspEnabled: false
+
+ ingress:
+ ## If true, Grafana Ingress will be created
+ ##
+ enabled: true
+
+ ## IngressClassName for Grafana Ingress.
+ ## Should be provided if Ingress is enable.
+ ##
+ ingressClassName: nginx
+
+ ## Annotations for Grafana Ingress
+ ##
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/use-regex: "true"
+
+ ## Labels to be added to the Ingress
+ ##
+ labels: {}
+
+ ## Hostnames.
+ ## Must be provided if Ingress is enable.
+ ##
+ # hosts:
+ # - grafana.domain.com
+ hosts: []
+ # - data.bloodpac.org
+
+ ## Path for grafana ingress
+ path: /grafana/?(.*)
+
+ grafana.ini:
+ server:
+ root_url: http://localhost:3000/grafana # this host can be localhost
+
+ ## TLS configuration for grafana Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: grafana-general-tls
+ # hosts:
+ # - grafana.example.com
+
+ sidecar:
+ dashboards:
+ enabled: true
+ label: grafana_dashboard
+ labelValue: "1"
+
+ ## Annotations for Grafana dashboard configmaps
+ ##
+ annotations: {}
+ multicluster:
+ global:
+ enabled: false
+ etcd:
+ enabled: false
+ provider:
+ allowUiUpdates: false
+ datasources:
+ enabled: true
+ defaultDatasourceEnabled: true
+
+ uid: prometheus
+
+ ## URL of prometheus datasource
+ ##
+ # url: http://prometheus-stack-prometheus:9090/
+
+ # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
+ # defaultDatasourceScrapeInterval: 15s
+
+ ## Annotations for Grafana datasource configmaps
+ ##
+ annotations: {}
+
+ ## Create datasource for each Pod of Prometheus StatefulSet;
+ ## this uses headless service `prometheus-operated` which is
+ ## created by Prometheus Operator
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
+ createPrometheusReplicasDatasources: false
+ label: grafana_datasource
+ labelValue: "1"
+
+ ## Field with internal link pointing to existing data source in Grafana.
+ ## Can be provisioned via additionalDataSources
+ exemplarTraceIdDestinations: {}
+ # datasourceUid: Jaeger
+ # traceIdLabelName: trace_id
+
+ extraConfigmapMounts: []
+ # - name: certs-configmap
+ # mountPath: /etc/grafana/ssl/
+ # configMap: certs-configmap
+ # readOnly: true
+
+ deleteDatasources: []
+ # - name: example-datasource
+ # orgId: 1
+
+ ## Configure additional grafana datasources (passed through tpl)
+ ## ref: http://docs.grafana.org/administration/provisioning/#datasources
+ additionalDataSources: []
+ # - name: prometheus-sample
+ # access: proxy
+ # basicAuth: true
+ # basicAuthPassword: pass
+ # basicAuthUser: daco
+ # editable: false
+ # jsonData:
+ # tlsSkipVerify: true
+ # orgId: 1
+ # type: prometheus
+ # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
+ # version: 1
+
+ ## Passed to grafana subchart and used by servicemonitor below
+ ##
+ service:
+ portName: http-web
+
+ serviceMonitor:
+ # If true, a ServiceMonitor CRD is created for a prometheus operator
+ # https://github.com/coreos/prometheus-operator
+ #
+ enabled: true
+
+ # Path to use for scraping metrics. Might be different if server.root_url is set
+ # in grafana.ini
+ path: "/metrics"
+
+ # namespace: monitoring (defaults to use the namespace this chart is deployed to)
+
+ # labels for the ServiceMonitor
+ labels: {}
+
+ # Scrape interval. If not set, the Prometheus default scrape interval is used.
+ #
+ interval: ""
+ scheme: http
+ tlsConfig: {}
+ scrapeTimeout: 30s
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+## Component scraping the kube api server
+##
+kubeApiServer:
+ enabled: true
+ tlsConfig:
+ serverName: kubernetes
+ insecureSkipVerify: false
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ jobLabel: component
+ selector:
+ matchLabels:
+ component: apiserver
+ provider: kubernetes
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings:
+ # Drop excessively noisy apiserver buckets.
+ - action: drop
+ regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50)
+ sourceLabels:
+ - __name__
+ - le
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels:
+ # - __meta_kubernetes_namespace
+ # - __meta_kubernetes_service_name
+ # - __meta_kubernetes_endpoint_port_name
+ # action: keep
+ # regex: default;kubernetes;https
+ # - targetLabel: __address__
+ # replacement: kubernetes.default.svc:443
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping the kubelet and kubelet-hosted cAdvisor
+##
+kubelet:
+ enabled: true
+ namespace: kube-system
+
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## Enable scraping the kubelet over https. For requirements to enable this see
+ ## https://github.com/prometheus-operator/prometheus-operator/issues/926
+ ##
+ https: true
+
+ ## Enable scraping /metrics/cadvisor from kubelet's service
+ ##
+ cAdvisor: true
+
+ ## Enable scraping /metrics/probes from kubelet's service
+ ##
+ probes: true
+
+ ## Enable scraping /metrics/resource from kubelet's service
+ ## This is disabled by default because container metrics are already exposed by cAdvisor
+ ##
+ resource: false
+ # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
+ resourcePath: "/metrics/resource/v1alpha1"
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ cAdvisorMetricRelabelings:
+ # Drop less useful container CPU metrics.
+ - sourceLabels: [__name__]
+ action: drop
+ regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
+ # Drop less useful container / always zero filesystem metrics.
+ - sourceLabels: [__name__]
+ action: drop
+ regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
+ # Drop less useful / always zero container memory metrics.
+ - sourceLabels: [__name__]
+ action: drop
+ regex: 'container_memory_(mapped_file|swap)'
+ # Drop less useful container process metrics.
+ - sourceLabels: [__name__]
+ action: drop
+ regex: 'container_(file_descriptors|tasks_state|threads_max)'
+ # Drop container spec metrics that overlap with kube-state-metrics.
+ - sourceLabels: [__name__]
+ action: drop
+ regex: 'container_spec.*'
+ # Drop cgroup metrics with no pod.
+ - sourceLabels: [id, pod]
+ action: drop
+ regex: '.+;'
+ # - sourceLabels: [__name__, image]
+ # separator: ;
+ # regex: container_([a-z_]+);
+ # replacement: $1
+ # action: drop
+ # - sourceLabels: [__name__]
+ # separator: ;
+ # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
+ # replacement: $1
+ # action: drop
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ probesMetricRelabelings: []
+ # - sourceLabels: [__name__, image]
+ # separator: ;
+ # regex: container_([a-z_]+);
+ # replacement: $1
+ # action: drop
+ # - sourceLabels: [__name__]
+ # separator: ;
+ # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
+ # replacement: $1
+ # action: drop
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ ## metrics_path is required to match upstream rules and charts
+ cAdvisorRelabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ probesRelabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ resourceRelabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - sourceLabels: [__name__, image]
+ # separator: ;
+ # regex: container_([a-z_]+);
+ # replacement: $1
+ # action: drop
+ # - sourceLabels: [__name__]
+ # separator: ;
+ # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
+ # replacement: $1
+ # action: drop
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ ## metrics_path is required to match upstream rules and charts
+ relabelings:
+ - sourceLabels: [__metrics_path__]
+ targetLabel: metrics_path
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping the kube controller manager
+##
+kubeControllerManager:
+ enabled: true
+
+ ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ ## If using kubeControllerManager.endpoints only the port and targetPort are used
+ ##
+ service:
+ enabled: true
+ ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
+ ## of default port in Kubernetes 1.22.
+ ##
+ port: null
+ targetPort: null
+ # selector:
+ # component: kube-controller-manager
+
+ serviceMonitor:
+ enabled: true
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## Enable scraping kube-controller-manager over https.
+ ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
+ ## If null or unset, the value is determined dynamically based on target Kubernetes version.
+ ##
+ https: null
+
+ # Skip TLS certificate validation when scraping
+ insecureSkipVerify: null
+
+ # Name of the server to use when validating TLS certificate
+ serverName: null
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping coreDns. Use either this or kubeDns
+##
+coreDns:
+ enabled: true
+ service:
+ port: 9153
+ targetPort: 9153
+ # selector:
+ # k8s-app: kube-dns
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping kubeDns. Use either this or coreDns
+##
+kubeDns:
+ enabled: false
+ service:
+ dnsmasq:
+ port: 10054
+ targetPort: 10054
+ skydns:
+ port: 10055
+ targetPort: 10055
+ # selector:
+ # k8s-app: kube-dns
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ dnsmasqMetricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ dnsmasqRelabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping etcd
+##
+kubeEtcd:
+ enabled: true
+
+ ## If your etcd is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
+ ##
+ service:
+ enabled: true
+ port: 2379
+ targetPort: 2379
+ # selector:
+ # component: etcd
+
+ ## Configure secure access to the etcd cluster by loading a secret into prometheus and
+ ## specifying security configuration below. For example, with a secret named etcd-client-cert
+ ##
+ ## serviceMonitor:
+ ## scheme: https
+ ## insecureSkipVerify: false
+ ## serverName: localhost
+ ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
+ ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
+ ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
+ ##
+ serviceMonitor:
+ enabled: true
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+ scheme: http
+ insecureSkipVerify: false
+ serverName: ""
+ caFile: ""
+ certFile: ""
+ keyFile: ""
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping kube scheduler
+##
+kubeScheduler:
+ enabled: true
+
+ ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ ## If using kubeScheduler.endpoints only the port and targetPort are used
+ ##
+ service:
+ enabled: true
+ ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
+ ## of default port in Kubernetes 1.23.
+ ##
+ port: null
+ targetPort: null
+ # selector:
+ # component: kube-scheduler
+
+ serviceMonitor:
+ enabled: true
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+ ## Enable scraping kube-scheduler over https.
+ ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
+ ## If null or unset, the value is determined dynamically based on target Kubernetes version.
+ ##
+ https: null
+
+ ## Skip TLS certificate validation when scraping
+ insecureSkipVerify: null
+
+ ## Name of the server to use when validating TLS certificate
+ serverName: null
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping kube proxy
+##
+kubeProxy:
+ enabled: true
+
+ ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
+ ##
+ endpoints: []
+ # - 10.141.4.22
+ # - 10.141.4.23
+ # - 10.141.4.24
+
+ service:
+ enabled: true
+ port: 10249
+ targetPort: 10249
+ selector:
+ k8s-app: kube-proxy
+
+ serviceMonitor:
+ enabled: true
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## Enable scraping kube-proxy over https.
+ ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
+ ##
+ https: false
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## Additional labels
+ ##
+ additionalLabels: {}
+ # foo: bar
+
+## Component scraping kube state metrics
+##
+kubeStateMetrics:
+ enabled: true
+
+## Configuration for kube-state-metrics subchart
+##
+kube-state-metrics:
+ namespaceOverride: ""
+ rbac:
+ create: true
+ releaseLabel: true
+ prometheus:
+ monitor:
+ enabled: true
+
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used.
+ ##
+ scrapeTimeout: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ # Keep labels from scraped data, overriding server-side labels
+ ##
+ honorLabels: true
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ selfMonitor:
+ enabled: false
+
+## Deploy node exporter as a daemonset to all nodes
+##
+nodeExporter:
+ enabled: true
+
+## Configuration for prometheus-node-exporter subchart
+##
+prometheus-node-exporter:
+ namespaceOverride: ""
+ podLabels:
+ ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards
+ ##
+ jobLabel: node-exporter
+ extraArgs:
+ - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
+ - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
+ service:
+ portName: http-metrics
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: "eks.amazonaws.com/compute-type"
+ operator: NotIn
+ values:
+ - fargate
+ prometheus:
+ monitor:
+ enabled: true
+
+ jobLabel: jobLabel
+
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+
+ ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
+ ##
+ scrapeTimeout: ""
+
+ ## proxyUrl: URL of a proxy that should be used for scraping.
+ ##
+ proxyUrl: ""
+
+ ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ metricRelabelings: []
+ # - sourceLabels: [__name__]
+ # separator: ;
+ # regex: ^node_mountstats_nfs_(event|operations|transport)_.+
+ # replacement: $1
+ # action: drop
+
+ ## RelabelConfigs to apply to samples before scraping
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+ rbac:
+ ## If true, create PSPs for node-exporter
+ ##
+ pspEnabled: false
+
+## Manages Prometheus and Alertmanager components
+##
+prometheusOperator:
+ enabled: true
+
+ ## Prometheus-Operator v0.39.0 and later support TLS natively.
+ ##
+ tls:
+ enabled: true
+ # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
+ tlsMinVersion: VersionTLS13
+ # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
+ internalPort: 10250
+
+ ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
+ ## rules from making their way into prometheus and potentially preventing the container from starting
+ admissionWebhooks:
+ failurePolicy: Fail
+ enabled: true
+ ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+ ## If unspecified, system trust roots on the apiserver are used.
+ caBundle: ""
+ ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
+ ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
+ ## certs ahead of time if you wish.
+ ##
+ patch:
+ enabled: true
+ image:
+ repository: ingress-nginx/kube-webhook-certgen
+ tag: v1.1.1
+ sha: ""
+ pullPolicy: IfNotPresent
+ resources: {}
+ ## Provide a priority class name to the webhook patching job
+ ##
+ priorityClassName: ""
+ podAnnotations: {}
+ nodeSelector: {}
+ affinity: {}
+ tolerations: []
+
+ ## SecurityContext holds pod-level security attributes and common container settings.
+ ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ##
+ securityContext:
+ runAsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 2000
+
+ # Use certmanager to generate webhook certs
+ certManager:
+ enabled: false
+ # self-signed root certificate
+ rootCert:
+ duration: "" # default to be 5y
+ admissionCert:
+ duration: "" # default to be 1y
+ # issuerRef:
+ # name: "issuer"
+ # kind: "ClusterIssuer"
+
+ ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
+ ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
+ ##
+ namespaces: {}
+ # releaseNamespace: true
+ # additional:
+ # - kube-system
+
+ ## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
+ ##
+ denyNamespaces: []
+
+ ## Filter namespaces to look for prometheus-operator custom resources
+ ##
+ alertmanagerInstanceNamespaces: []
+ prometheusInstanceNamespaces: []
+ thanosRulerInstanceNamespaces: []
+
+ ## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
+ ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
+ ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
+ ##
+ # clusterDomain: "cluster.local"
+
+ ## Service account for Alertmanager to use.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ create: true
+ name: ""
+
+ ## Configuration for Prometheus operator service
+ ##
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30080
+
+ nodePortTls: 30443
+
+ ## Additional ports to open for Prometheus service
+ ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
+ ##
+ additionalPorts: []
+
+ ## Loadbalancer IP
+ ## Only use if service.type is "LoadBalancer"
+ ##
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ## NodePort, ClusterIP, LoadBalancer
+ ##
+ type: ClusterIP
+
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ ## Annotations to add to the operator deployment
+ ##
+ annotations: {}
+
+ ## Labels to add to the operator pod
+ ##
+ podLabels: {}
+
+ ## Annotations to add to the operator pod
+ ##
+ podAnnotations: {}
+
+ ## Assign a PriorityClassName to pods if set
+ # priorityClassName: ""
+
+ ## Define Log Format
+ # Use logfmt (default) or json logging
+ # logFormat: logfmt
+
+ ## Decrease log verbosity to errors only
+ # logLevel: error
+
+ ## If true, the operator will create and maintain a service for scraping kubelets
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
+ ##
+ kubeletService:
+ enabled: true
+ namespace: kube-system
+ ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
+ name: ""
+
+ ## Create a servicemonitor for the operator
+ ##
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+ ## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
+ scrapeTimeout: ""
+ selfMonitor: true
+
+ ## Metric relabel configs to apply to samples before ingestion.
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Resource limits & requests
+ ##
+ resources: {}
+ # limits:
+ # cpu: 200m
+ # memory: 200Mi
+ # requests:
+ # cpu: 100m
+ # memory: 100Mi
+
+ # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
+ # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
+ ##
+ hostNetwork: false
+
+ ## Define which Nodes the Pods are scheduled on.
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+
+ ## Tolerations for use with node taints
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ ## Assign custom affinity rules to the prometheus operator
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+ dnsConfig: {}
+ # nameservers:
+ # - 1.2.3.4
+ # searches:
+ # - ns1.svc.cluster-domain.example
+ # - my.dns.search.suffix
+ # options:
+ # - name: ndots
+ # value: "2"
+ # - name: edns0
+ securityContext:
+ fsGroup: 65534
+ runAsGroup: 65534
+ runAsNonRoot: true
+ runAsUser: 65534
+
+ ## Container-specific security context configuration
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ ##
+ containerSecurityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+
+ ## Prometheus-operator image
+ ##
+ image:
+ repository: prometheus-operator/prometheus-operator
+ tag: v0.57.0
+ sha: ""
+ pullPolicy: IfNotPresent
+
+ ## Prometheus image to use for prometheuses managed by the operator
+ ##
+ # prometheusDefaultBaseImage: quay.io/prometheus/prometheus
+
+ ## Alertmanager image to use for alertmanagers managed by the operator
+ ##
+ # alertmanagerDefaultBaseImage: quay.io/prometheus/alertmanager
+
+ ## Prometheus-config-reloader
+ ##
+ prometheusConfigReloader:
+ # image to use for config and rule reloading
+ image:
+ repository: prometheus-operator/prometheus-config-reloader
+ tag: v0.57.0
+ sha: ""
+
+ # resource config for prometheusConfigReloader
+ resources:
+ requests:
+ cpu: 200m
+ memory: 50Mi
+ limits:
+ cpu: 200m
+ memory: 50Mi
+
+ ## Thanos side-car image when configured
+ ##
+ thanosImage:
+ repository: thanos/thanos
+ tag: v0.25.2
+ sha: ""
+
+ ## Set a Field Selector to filter watched secrets
+ ##
+ secretFieldSelector: ""
+
+## Deploy a Prometheus instance
+##
+prometheus:
+
+ enabled: true
+
+ ## Annotations for Prometheus
+ ##
+ annotations: {}
+
+ ## Service account for Prometheuses to use.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ##
+ serviceAccount:
+ create: false
+ name: "thanos"
+ annotations: {}
+
+ # Service for thanos service discovery on sidecar
+ # Enable this can make Thanos Query can use
+ # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
+ # Thanos sidecar on prometheus nodes
+ # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
+ thanosService:
+ enabled: true
+ annotations: {}
+ labels: {}
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## gRPC port config
+ portName: grpc
+ port: 10901
+ targetPort: "grpc"
+
+ ## HTTP port config (for metrics)
+ httpPortName: http
+ httpPort: 10902
+ targetHttpPort: "http"
+
+ ## ClusterIP to assign
+ # Default is to make this a headless service ("None")
+ clusterIP: "None"
+
+ ## Port to expose on each node, if service type is NodePort
+ ##
+ nodePort: 30901
+ httpNodePort: 30902
+
+ # ServiceMonitor to scrape Sidecar metrics
+ # Needs thanosService to be enabled as well
+ thanosServiceMonitor:
+ enabled: true
+ interval: ""
+
+ ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
+ scheme: ""
+
+ ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
+ ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
+ tlsConfig: {}
+
+ bearerTokenFile:
+
+ ## Metric relabel configs to apply to samples before ingestion.
+ metricRelabelings: []
+
+ ## relabel configs to apply to samples before ingestion.
+ relabelings: []
+
+ # Service for external access to sidecar
+ # Enabling this creates a service to expose thanos-sidecar outside the cluster.
+ thanosServiceExternal:
+ enabled: false
+ annotations: {}
+ labels: {}
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+
+ ## gRPC port config
+ portName: grpc
+ port: 10901
+ targetPort: "grpc"
+
+ ## HTTP port config (for metrics)
+ httpPortName: http
+ httpPort: 10902
+ targetHttpPort: "http"
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ##
+ type: LoadBalancer
+
+ ## Port to expose on each node
+ ##
+ nodePort: 30901
+ httpNodePort: 30902
+
+ ## Configuration for Prometheus service
+ ##
+ service:
+ annotations: {}
+ labels: {}
+ clusterIP: ""
+
+ ## Port for Prometheus Service to listen on
+ ##
+ port: 9090
+
+ ## To be used with a proxy extraContainer port
+ targetPort: 9090
+
+ ## List of IP addresses at which the Prometheus server service is available
+ ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
+ ##
+ externalIPs: []
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30090
+
+ ## Loadbalancer IP
+ ## Only use if service.type is "LoadBalancer"
+ loadBalancerIP: ""
+ loadBalancerSourceRanges: []
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## Additional port to define in the Service
+ additionalPorts: []
+ # additionalPorts:
+ # - name: authenticated
+ # port: 8081
+ # targetPort: 8081
+
+ ## Consider that all endpoints are considered "ready" even if the Pods themselves are not
+ ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
+ publishNotReadyAddresses: false
+
+ sessionAffinity: ""
+
+ ## Configuration for creating a separate Service for each statefulset Prometheus replica
+ ##
+ servicePerReplica:
+ enabled: false
+ annotations: {}
+
+ ## Port for Prometheus Service per replica to listen on
+ ##
+ port: 9090
+
+ ## To be used with a proxy extraContainer port
+ targetPort: 9090
+
+ ## Port to expose on each node
+ ## Only used if servicePerReplica.type is 'NodePort'
+ ##
+ nodePort: 30091
+
+ ## Loadbalancer source IP ranges
+ ## Only used if servicePerReplica.type is "LoadBalancer"
+ loadBalancerSourceRanges: []
+
+ ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
+ ##
+ externalTrafficPolicy: Cluster
+
+ ## Service type
+ ##
+ type: ClusterIP
+
+ ## Configure pod disruption budgets for Prometheus
+ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
+ ## This configuration is immutable once created and will require the PDB to be deleted to be changed
+ ## https://github.com/kubernetes/kubernetes/issues/45398
+ ##
+ podDisruptionBudget:
+ enabled: false
+ minAvailable: 1
+ maxUnavailable: ""
+
+ # Ingress exposes thanos sidecar outside the cluster
+ thanosIngress:
+ enabled: false
+
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ # ingressClassName: nginx
+
+ annotations: {}
+ labels: {}
+ servicePort: 10901
+
+ ## Port to expose on each node
+ ## Only used if service.type is 'NodePort'
+ ##
+ nodePort: 30901
+
+ ## Hosts must be provided if Ingress is enabled.
+ ##
+ hosts: []
+ # - thanos-gateway.domain.com
+
+ ## Paths to use for ingress rules
+ ##
+ paths: []
+ # - /
+
+ ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
+ ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
+ # pathType: ImplementationSpecific
+
+ ## TLS configuration for Thanos Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: thanos-gateway-tls
+ # hosts:
+ # - thanos-gateway.domain.com
+ #
+
+ ## ExtraSecret can be used to store various data in an extra secret
+ ## (use it for example to store hashed basic auth credentials)
+ extraSecret:
+ ## if not set, name will be auto generated
+ # name: ""
+ annotations: {}
+ data: {}
+ # auth: |
+ # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
+ # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
+
+ ingress:
+ enabled: false
+
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ # ingressClassName: nginx
+
+ annotations: {}
+ labels: {}
+
+ ## Redirect ingress to an additional defined port on the service
+ # servicePort: 8081
+
+ ## Hostnames.
+ ## Must be provided if Ingress is enabled.
+ ##
+ # hosts:
+ # - prometheus.domain.com
+ hosts: []
+
+ ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
+ ##
+ paths: []
+ # - /
+
+ ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
+ ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
+ # pathType: ImplementationSpecific
+
+ ## TLS configuration for Prometheus Ingress
+ ## Secret must be manually created in the namespace
+ ##
+ tls: []
+ # - secretName: prometheus-general-tls
+ # hosts:
+ # - prometheus.example.com
+
+ ## Configuration for creating an Ingress that will map to each Prometheus replica service
+ ## prometheus.servicePerReplica must be enabled
+ ##
+ ingressPerReplica:
+ enabled: false
+
+ # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
+ # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
+ # ingressClassName: nginx
+
+ annotations: {}
+ labels: {}
+
+ ## Final form of the hostname for each per replica ingress is
+ ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
+ ##
+ ## Prefix for the per replica ingress that will have `-$replicaNumber`
+ ## appended to the end
+ hostPrefix: ""
+ ## Domain that will be used for the per replica ingress
+ hostDomain: ""
+
+ ## Paths to use for ingress rules
+ ##
+ paths: []
+ # - /
+
+ ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
+ ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
+ # pathType: ImplementationSpecific
+
+ ## Secret name containing the TLS certificate for Prometheus per replica ingress
+ ## Secret must be manually created in the namespace
+ tlsSecretName: ""
+
+ ## Separated secret for each per replica Ingress. Can be used together with cert-manager
+ ##
+ tlsSecretPerReplica:
+ enabled: false
+ ## Final form of the secret for each per replica ingress is
+ ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
+ ##
+ prefix: "prometheus"
+
+ ## Configure additional options for default pod security policy for Prometheus
+ ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
+ podSecurityPolicy:
+ allowedCapabilities: []
+ allowedHostPaths: []
+ volumes: []
+
+ serviceMonitor:
+ ## Scrape interval. If not set, the Prometheus default scrape interval is used.
+ ##
+ interval: ""
+ selfMonitor: true
+
+ ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
+ scheme: ""
+
+ ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
+ ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
+ tlsConfig: {}
+
+ bearerTokenFile:
+
+ ## Metric relabel configs to apply to samples before ingestion.
+ ##
+ metricRelabelings: []
+ # - action: keep
+ # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
+ # sourceLabels: [__name__]
+
+ # relabel configs to apply to samples before ingestion.
+ ##
+ relabelings: []
+ # - sourceLabels: [__meta_kubernetes_pod_node_name]
+ # separator: ;
+ # regex: ^(.*)$
+ # targetLabel: nodename
+ # replacement: $1
+ # action: replace
+
+ ## Settings affecting prometheusSpec
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec
+ ##
+ prometheusSpec:
+ ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
+ ##
+ disableCompaction: false
+ ## APIServerConfig
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig
+ ##
+ apiserverConfig: {}
+
+ ## Interval between consecutive scrapes.
+ ## Defaults to 30s.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
+ ##
+ scrapeInterval: ""
+
+ ## Number of seconds to wait for target to respond before erroring
+ ##
+ scrapeTimeout: ""
+
+ ## Interval between consecutive evaluations.
+ ##
+ evaluationInterval: ""
+
+ ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
+ ##
+ listenLocal: false
+
+ ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
+ ## This is disabled by default.
+ ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
+ ##
+ enableAdminAPI: false
+
+ ## WebTLSConfig defines the TLS parameters for HTTPS
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig
+ web: {}
+
+ # EnableFeatures API enables access to Prometheus disabled features.
+ # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
+ enableFeatures: []
+ # - exemplar-storage
+
+ ## Image of Prometheus.
+ ##
+ image:
+ repository: prometheus/prometheus
+ tag: v2.36.1
+ sha: ""
+
+ ## Tolerations for use with node taints
+ ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+ ##
+ tolerations: []
+ # - key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+
+ ## If specified, the pod's topology spread constraints.
+ ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
+ ##
+ topologySpreadConstraints: []
+ # - maxSkew: 1
+ # topologyKey: topology.kubernetes.io/zone
+ # whenUnsatisfiable: DoNotSchedule
+ # labelSelector:
+ # matchLabels:
+ # app: prometheus
+
+ ## Alertmanagers to which alerts will be sent
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints
+ ##
+ ## Default configuration will connect to the alertmanager deployed as part of this release
+ ##
+ alertingEndpoints: []
+ # - name: ""
+ # namespace: ""
+ # port: http
+ # scheme: http
+ # pathPrefix: ""
+ # tlsConfig: {}
+ # bearerTokenFile: ""
+ # apiVersion: v2
+
+ ## External labels to add to any time series or alerts when communicating with external systems
+ ##
+ externalLabels: {}
+
+ ## enable --web.enable-remote-write-receiver flag on prometheus-server
+ ##
+ enableRemoteWriteReceiver: false
+
+ ## Name of the external label used to denote replica name
+ ##
+ replicaExternalLabelName: ""
+
+ ## If true, the Operator won't add the external label used to denote replica name
+ ##
+ replicaExternalLabelNameClear: false
+
+ ## Name of the external label used to denote Prometheus instance name
+ ##
+ prometheusExternalLabelName: ""
+
+ ## If true, the Operator won't add the external label used to denote Prometheus instance name
+ ##
+ prometheusExternalLabelNameClear: false
+
+ ## External URL at which Prometheus will be reachable.
+ ##
+ externalUrl: "/prometheus/"
+
+ ## Define which Nodes the Pods are scheduled on.
+ ## ref: https://kubernetes.io/docs/user-guide/node-selection/
+ ##
+ nodeSelector: {}
+
+ ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
+ ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
+ ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
+ ## with the new list of secrets.
+ ##
+ secrets: []
+
+ ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
+ ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
+ ##
+ configMaps: []
+
+ ## QuerySpec defines the query command line flags when starting Prometheus.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec
+ ##
+ query: {}
+
+ ## Namespaces to be selected for PrometheusRules discovery.
+ ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
+ ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage
+ ##
+ ruleNamespaceSelector: {}
+
+ ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
+ ## prometheus resource to be created with selectors based on values in the helm deployment,
+ ## which will also match the PrometheusRule resources created
+ ##
+ ruleSelectorNilUsesHelmValues: true
+
+ ## PrometheusRules to be selected for target discovery.
+ ## If {}, select all PrometheusRules
+ ##
+ ruleSelector: {}
+ ## Example which select all PrometheusRules resources
+ ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
+ # ruleSelector:
+ # matchExpressions:
+ # - key: prometheus
+ # operator: In
+ # values:
+ # - example-rules
+ # - example-rules-2
+ #
+ ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
+ # ruleSelector:
+ # matchLabels:
+ # role: example-rules
+
+ ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
+ ## prometheus resource to be created with selectors based on values in the helm deployment,
+ ## which will also match the servicemonitors created
+ ##
+ serviceMonitorSelectorNilUsesHelmValues: true
+
+ ## ServiceMonitors to be selected for target discovery.
+ ## If {}, select all ServiceMonitors
+ ##
+ serviceMonitorSelector: {}
+ ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
+ # serviceMonitorSelector:
+ # matchLabels:
+ # prometheus: somelabel
+
+ ## Namespaces to be selected for ServiceMonitor discovery.
+ ##
+ serviceMonitorNamespaceSelector: {}
+ ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
+ # serviceMonitorNamespaceSelector:
+ # matchLabels:
+ # prometheus: somelabel
+
+ ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
+ ## prometheus resource to be created with selectors based on values in the helm deployment,
+ ## which will also match the podmonitors created
+ ##
+ podMonitorSelectorNilUsesHelmValues: true
+
+ ## PodMonitors to be selected for target discovery.
+ ## If {}, select all PodMonitors
+ ##
+ podMonitorSelector: {}
+ ## Example which selects PodMonitors with label "prometheus" set to "somelabel"
+ # podMonitorSelector:
+ # matchLabels:
+ # prometheus: somelabel
+
+ ## Namespaces to be selected for PodMonitor discovery.
+ ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage
+ ##
+ podMonitorNamespaceSelector: {}
+
+ ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
+ ## prometheus resource to be created with selectors based on values in the helm deployment,
+ ## which will also match the probes created
+ ##
+ probeSelectorNilUsesHelmValues: true
+
+ ## Probes to be selected for target discovery.
+ ## If {}, select all Probes
+ ##
+ probeSelector: {}
+ ## Example which selects Probes with label "prometheus" set to "somelabel"
+ # probeSelector:
+ # matchLabels:
+ # prometheus: somelabel
+
+ ## Namespaces to be selected for Probe discovery.
+ ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage
+ ##
+ probeNamespaceSelector: {}
+
+ ## How long to retain metrics
+ ##
+ retention: 90d
+
+ ## Maximum size of metrics
+ ##
+ retentionSize: ""
+
+ ## Enable compression of the write-ahead log using Snappy.
+ ##
+ walCompression: false
+
+ ## If true, the Operator won't process any Prometheus configuration changes
+ ##
+ paused: false
+
+ ## Number of replicas of each shard to deploy for a Prometheus deployment.
+ ## Number of replicas multiplied by shards is the total number of Pods created.
+ ##
+ replicas: 1
+
+ ## EXPERIMENTAL: Number of shards to distribute targets onto.
+ ## Number of replicas multiplied by shards is the total number of Pods created.
+ ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
+ ## Increasing shards will not reshard data either but it will continue to be available from the same instances.
+ ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
+ ## Sharding is done on the content of the `__address__` target meta-label.
+ ##
+ shards: 1
+
+ ## Log level for Prometheus be configured in
+ ##
+ logLevel: info
+
+ ## Log format for Prometheus be configured in
+ ##
+ logFormat: logfmt
+
+ ## Prefix used to register routes, overriding externalUrl route.
+ ## Useful for proxies that rewrite URLs.
+ ##
+ routePrefix: /
+
+ ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
+ ## Metadata Labels and Annotations gets propagated to the prometheus pods.
+ ##
+ podMetadata: {}
+ # labels:
+ # app: prometheus
+ # k8s-app: prometheus
+
+ ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
+ ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
+ ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
+ ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
+ podAntiAffinity: ""
+
+ ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
+ ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
+ ##
+ podAntiAffinityTopologyKey: kubernetes.io/hostname
+
+ ## Assign custom affinity rules to the prometheus instance
+ ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ ##
+ affinity: {}
+ # nodeAffinity:
+ # requiredDuringSchedulingIgnoredDuringExecution:
+ # nodeSelectorTerms:
+ # - matchExpressions:
+ # - key: kubernetes.io/e2e-az-name
+ # operator: In
+ # values:
+ # - e2e-az1
+ # - e2e-az2
+
+ ## The remote_read spec configuration for Prometheus.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec
+ remoteRead: []
+ # - url: http://remote1/read
+ ## additionalRemoteRead is appended to remoteRead
+ additionalRemoteRead: []
+
+ ## The remote_write spec configuration for Prometheus.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec
+ remoteWrite: []
+ # - url: http://remote1/push
+ ## additionalRemoteWrite is appended to remoteWrite
+ additionalRemoteWrite: []
+
+ ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
+ remoteWriteDashboards: false
+
+ ## Resource limits & requests
+ ##
+ resources: {}
+ # requests:
+ # memory: 400Mi
+
+ ## Prometheus StorageSpec for persistent data
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
+ ##
+ storageSpec:
+ ## Using PersistentVolumeClaim
+ ##
+ volumeClaimTemplate:
+ spec:
+ storageClassName: standard
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 80Gi
+ #selector: {}
+
+ ## Using tmpfs volume
+ ##
+ # emptyDir:
+ # medium: Memory
+
+ # Additional volumes on the output StatefulSet definition.
+ volumes: []
+
+ # Additional VolumeMounts on the output StatefulSet definition.
+ volumeMounts: []
+
+ ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
+ ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
+ ## as specified in the official Prometheus documentation:
+ ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
+ ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
+ ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
+ ## scrape configs are going to break Prometheus after the upgrade.
+ ## AdditionalScrapeConfigs can be defined as a list or as a templated string.
+ ##
+ ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
+ ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
+ ##
+ additionalScrapeConfigs:
+ - job_name: statsd_ambassador_gen3
+ metrics_path: '/metrics'
+ static_configs:
+ - targets:
+ - statsd-exporter.default.svc.cluster.local:9102
+ - job_name: prometheus
+ static_configs:
+ - targets:
+ - localhost:9090
+ - job_name: kubecost
+ honor_labels: true
+ scrape_interval: 1m
+ scrape_timeout: 10s
+ metrics_path: /metrics
+ scheme: http
+ static_configs:
+ - targets:
+ - kubecost-cost-analyzer.kubecost.svc.cluster.local:9003
+ - job_name: 'kubernetes-service-endpoints'
+ kubernetes_sd_configs:
+ - role: endpoints
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: ([^:]+)(?::\d+)?;(\d+)
+ replacement: $1:$2
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+ - source_labels: [__meta_kubernetes_pod_node_name]
+ action: replace
+ target_label: kubernetes_node
+
+ - job_name: 'prometheus-pushgateway'
+ honor_labels: true
+ kubernetes_sd_configs:
+ - role: service
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+ action: keep
+ regex: pushgateway
+
+ - job_name: 'kubernetes-apiservers'
+ kubernetes_sd_configs:
+ - role: endpoints
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ insecure_skip_verify: true
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+ - job_name: 'kubernetes-nodes'
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ insecure_skip_verify: true
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ kubernetes_sd_configs:
+ - role: node
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+ - target_label: __address__
+ replacement: kubernetes.default.svc:443
+ - source_labels: [__meta_kubernetes_node_name]
+ regex: (.+)
+ target_label: __metrics_path__
+ replacement: /api/v1/nodes/$1/proxy/metrics
+ - job_name: 'kubernetes-nodes-cadvisor'
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ insecure_skip_verify: true
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ kubernetes_sd_configs:
+ - role: node
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+ - target_label: __address__
+ replacement: kubernetes.default.svc:443
+ - source_labels: [__meta_kubernetes_node_name]
+ regex: (.+)
+ target_label: __metrics_path__
+ replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
+ - job_name: 'kubernetes-services'
+ metrics_path: /probe
+ params:
+ module: [http_2xx]
+ kubernetes_sd_configs:
+ - role: service
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+ action: keep
+ regex: true
+ - source_labels: [__address__]
+ target_label: __param_target
+ - target_label: __address__
+ replacement: blackbox
+ - source_labels: [__param_target]
+ target_label: instance
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ target_label: kubernetes_name
+ - job_name: 'kubernetes-pods'
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+ action: replace
+ regex: ([^:]+)(?::\d+)?;(\d+)
+ replacement: $1:$2
+ target_label: __address__
+ - action: labelmap
+ regex: __meta_kubernetes_pod_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_pod_name]
+ action: replace
+ target_label: kubernetes_pod_name
+
+
+
+ # If scrape config contains a repetitive section, you may want to use a template.
+ # In the following example, you can see how to define `gce_sd_configs` for multiple zones
+
+
+ #additionalScrapeConfigs: []
+ # - job_name: kube-etcd
+ # kubernetes_sd_configs:
+ # - role: node
+ # scheme: https
+ # tls_config:
+ # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
+ # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
+ # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
+ # relabel_configs:
+ # - action: labelmap
+ # regex: __meta_kubernetes_node_label_(.+)
+ # - source_labels: [__address__]
+ # action: replace
+ # targetLabel: __address__
+ # regex: ([^:;]+):(\d+)
+ # replacement: ${1}:2379
+ # - source_labels: [__meta_kubernetes_node_name]
+ # action: keep
+ # regex: .*mst.*
+ # - source_labels: [__meta_kubernetes_node_name]
+ # action: replace
+ # targetLabel: node
+ # regex: (.*)
+ # replacement: ${1}
+ # metric_relabel_configs:
+ # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
+ # action: labeldrop
+ #
+ ## If scrape config contains a repetitive section, you may want to use a template.
+ ## In the following example, you can see how to define `gce_sd_configs` for multiple zones
+ # additionalScrapeConfigs: |
+ # - job_name: "node-exporter"
+ # gce_sd_configs:
+ # {{range $zone := .Values.gcp_zones}}
+ # - project: "project1"
+ # zone: "{{$zone}}"
+ # port: 9100
+ # {{end}}
+ # relabel_configs:
+ # ...
+
+
+ ## If additional scrape configurations are already deployed in a single secret file you can use this section.
+ ## Expected values are the secret name and key
+ ## Cannot be used with additionalScrapeConfigs
+ additionalScrapeConfigsSecret: {}
+ # enabled: false
+ # name:
+ # key:
+
+ ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
+ ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
+ additionalPrometheusSecretsAnnotations: {}
+
+ ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
+ ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#.
+ ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
+ ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
+ ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
+ ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
+ ##
+ additionalAlertManagerConfigs: []
+ # - consul_sd_configs:
+ # - server: consul.dev.test:8500
+ # scheme: http
+ # datacenter: dev
+ # tag_separator: ','
+ # services:
+ # - metrics-prometheus-alertmanager
+
+ ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
+ ## them separately from the helm deployment, you can use this section.
+ ## Expected values are the secret name and key
+ ## Cannot be used with additionalAlertManagerConfigs
+ additionalAlertManagerConfigsSecret: {}
+ # name:
+ # key:
+
+ ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
+ ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
+ ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
+ ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
+ ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
+ ## configs are going to break Prometheus after the upgrade.
+ ##
+ additionalAlertRelabelConfigs: []
+ # - separator: ;
+ # regex: prometheus_replica
+ # replacement: $1
+ # action: labeldrop
+
+ ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
+ ## them separately from the helm deployment, you can use this section.
+ ## Expected values are the secret name and key
+ ## Cannot be used with additionalAlertRelabelConfigs
+ additionalAlertRelabelConfigsSecret: {}
+ # name:
+ # key:
+
+ ## SecurityContext holds pod-level security attributes and common container settings.
+ ## This defaults to non root user with uid 1000 and gid 2000.
+ ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
+ ##
+ securityContext:
+ runAsGroup: 2000
+ runAsNonRoot: true
+ runAsUser: 1000
+ fsGroup: 2000
+
+ ## Priority class assigned to the Pods
+ ##
+ priorityClassName: ""
+
+ ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
+ ## This section is experimental, it may change significantly without deprecation notice in any release.
+ ## This is experimental and may change significantly without backward compatibility in any release.
+ ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec
+ ##
+ thanos:
+ objectStorageConfig:
+ key: thanos.yaml
+ name: thanos-objstore-config
+ image: quay.io/thanos/thanos:v0.25.2
+ #image: thanosio/thanos:v0.24.0
+ version: v0.25.2
+ tag: v0.25.2
+ # secretProviderClass:
+ # provider: s3
+ # parameters:
+ # secrets: |
+ # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
+ # fileName: "objstore.yaml"
+ # objectStorageConfigFile: /var/secrets/object-store.yaml
+
+ ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
+ ## if using proxy extraContainer update targetPort with proxy container port
+ containers: []
+ # containers:
+ # - name: oauth-proxy
+ # image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.2
+ # args:
+ # - --upstream=http://127.0.0.1:9093
+ # - --http-address=0.0.0.0:8081
+ # - ...
+ # ports:
+ # - containerPort: 8081
+ # name: oauth-proxy
+ # protocol: TCP
+ # resources: {}
+
+ ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
+ ## (permissions, dir tree) on mounted volumes before starting prometheus
+ initContainers: []
+
+ ## PortName to use for Prometheus.
+ ##
+ portName: "http-web"
+
+ ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
+ ## on the file system of the Prometheus container e.g. bearer token files.
+ arbitraryFSAccessThroughSMs: false
+
+ ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
+ ## or PodMonitor to true, this overrides honor_labels to false.
+ overrideHonorLabels: false
+
+ ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
+ overrideHonorTimestamps: false
+
+ ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor
+ ## configs, and they will only discover endpoints within their current namespace. Defaults to false.
+ ignoreNamespaceSelectors: false
+
+ ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
+ ## The label value will always be the namespace of the object that is being created.
+ ## Disabled by default
+ enforcedNamespaceLabel: ""
+
+ ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
+ ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
+ ## Deprecated, use `excludedFromEnforcement` instead
+ prometheusRulesExcludedFromEnforce: []
+
+ ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
+ ## to be excluded from enforcing a namespace label of origin.
+ ## Works only if enforcedNamespaceLabel set to true.
+ ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference
+ excludedFromEnforcement: []
+
+ ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
+ ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
+ ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
+ ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
+ queryLogFile: false
+
+ ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
+ ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
+ ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
+ enforcedSampleLimit: false
+
+ ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
+ ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
+ ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
+ ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
+ enforcedTargetLimit: false
+
+
+ ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
+ ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
+ ## 2.27.0 and newer.
+ enforcedLabelLimit: false
+
+ ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
+ ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
+ ## 2.27.0 and newer.
+ enforcedLabelNameLengthLimit: false
+
+ ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
+ ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
+ ## versions 2.27.0 and newer.
+ enforcedLabelValueLengthLimit: false
+
+ ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
+ ## in Prometheus so it may change in any upcoming release.
+ allowOverlappingBlocks: false
+
+ additionalRulesForClusterRole: []
+ # - apiGroups: [ "" ]
+ # resources:
+ # - nodes/proxy
+ # verbs: [ "get", "list", "watch" ]
+
+ additionalServiceMonitors: []
+ ## Name of the ServiceMonitor to create
+ ##
+ # - name: ""
+
+ ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
+ ## the chart
+ ##
+ # additionalLabels: {}
+
+ ## Service label for use in assembling a job name of the form