From 835a1cac2e8e477c41b51e5d7e5f721f01b0a0cf Mon Sep 17 00:00:00 2001 From: BinamB Date: Mon, 14 Oct 2024 11:23:13 -0500 Subject: [PATCH 01/22] (Chore): Improve Single Table Migration Job Script --- .../jobs/indexd-single-table-migration-job.yaml | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/indexd-single-table-migration-job.yaml b/kube/services/jobs/indexd-single-table-migration-job.yaml index e1e6a81a3..ff99e3028 100644 --- a/kube/services/jobs/indexd-single-table-migration-job.yaml +++ b/kube/services/jobs/indexd-single-table-migration-job.yaml @@ -1,8 +1,9 @@ # Setup for running this migration https://github.com/uc-cdis/indexd/blob/master/docs/migration_to_single_table_indexd.md apiVersion: batch/v1 -kind: Job +kind: PriorityClass metadata: name: indexd-single-table-migration +preemtionPolicy: Never spec: template: metadata: @@ -46,6 +47,10 @@ spec: env: - name: START_DID GEN3_START_DID|-value: ""-| + - name: BATCH_SIZE + GEN3_BATCH_SIZE |-value: ""-| + - name: OFFSET_VALUE + GEN3_OFFSET_VALUE |-value: ""-| volumeMounts: - name: "config-volume" readOnly: true @@ -72,10 +77,16 @@ spec: args: - "-c" - | - flags="--creds-path /var/www/indexd/creds_new.json" + flags="--creds-file /var/www/indexd/creds_new.json" if [[ -n "$START_DID" ]]; then flags="$flags --start-did $START_DID" fi + if [[ -n "$BATCH_SIZE" ]]; then + flags="$flags --batch-size $BATCH_SIZE" + fi + if [[ -n "$OFFSET_VALUE" ]]; then + flags="$flags --start-offset $OFFSET_VALUE" + fi time python /indexd/bin/migrate_to_single_table.py $flags echo "Exit code: $?" restartPolicy: Never From 96711710b7072524c75a9a90fe11c0702fb5546c Mon Sep 17 00:00:00 2001 From: BinamB Date: Mon, 14 Oct 2024 13:32:56 -0500 Subject: [PATCH 02/22] add param for creds --- .../jobs/indexd-single-table-migration-job.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/indexd-single-table-migration-job.yaml b/kube/services/jobs/indexd-single-table-migration-job.yaml index ff99e3028..ee2836f00 100644 --- a/kube/services/jobs/indexd-single-table-migration-job.yaml +++ b/kube/services/jobs/indexd-single-table-migration-job.yaml @@ -46,11 +46,13 @@ spec: GEN3_INDEXD_IMAGE env: - name: START_DID - GEN3_START_DID|-value: ""-| + GEN3_START_DID |-value: ""-| - name: BATCH_SIZE GEN3_BATCH_SIZE |-value: ""-| - name: OFFSET_VALUE GEN3_OFFSET_VALUE |-value: ""-| + - name: USE_DEFAULT_CREDS + GEN3_USE_DEFAULT_CREDS |-value: "True"-| volumeMounts: - name: "config-volume" readOnly: true @@ -77,7 +79,11 @@ spec: args: - "-c" - | - flags="--creds-file /var/www/indexd/creds_new.json" + if [[ -n "$USE_DEFAULT_CREDS" == "false" ]]; then + flags="--creds-file /var/www/indexd/creds_new.json" + else + flags="--creds-file /var/www/indexd/creds_new.json" + fi if [[ -n "$START_DID" ]]; then flags="$flags --start-did $START_DID" fi From 92f3dd151b06f531d7136e4b6dc3a0e2d5a7973b Mon Sep 17 00:00:00 2001 From: BinamB Date: Tue, 15 Oct 2024 10:49:48 -0500 Subject: [PATCH 03/22] Add priority class --- .../jobs/indexd-single-table-migration-job.yaml | 14 +++++++------- ...dexd-single-table-migration-priority-class.yaml | 7 +++++++ 2 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 kube/services/jobs/indexd-single-table-migration-priority-class.yaml diff --git a/kube/services/jobs/indexd-single-table-migration-job.yaml b/kube/services/jobs/indexd-single-table-migration-job.yaml index ee2836f00..619463f85 100644 --- a/kube/services/jobs/indexd-single-table-migration-job.yaml +++ b/kube/services/jobs/indexd-single-table-migration-job.yaml @@ -1,15 +1,15 @@ # Setup for running this migration https://github.com/uc-cdis/indexd/blob/master/docs/migration_to_single_table_indexd.md apiVersion: batch/v1 -kind: PriorityClass +kind: Job metadata: name: indexd-single-table-migration -preemtionPolicy: Never spec: template: metadata: labels: app: gen3job spec: + priorityClassName: indexd-single-table-migration-priority # Assigning the priority class affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -46,13 +46,13 @@ spec: GEN3_INDEXD_IMAGE env: - name: START_DID - GEN3_START_DID |-value: ""-| + GEN3_START_DID|-value: ""-| - name: BATCH_SIZE - GEN3_BATCH_SIZE |-value: ""-| + GEN3_BATCH_SIZE|-value: ""-| - name: OFFSET_VALUE - GEN3_OFFSET_VALUE |-value: ""-| + GEN3_OFFSET_VALUE|-value: ""-| - name: USE_DEFAULT_CREDS - GEN3_USE_DEFAULT_CREDS |-value: "True"-| + GEN3_USE_DEFAULT_CREDS|-value: "false"-| volumeMounts: - name: "config-volume" readOnly: true @@ -82,7 +82,7 @@ spec: if [[ -n "$USE_DEFAULT_CREDS" == "false" ]]; then flags="--creds-file /var/www/indexd/creds_new.json" else - flags="--creds-file /var/www/indexd/creds_new.json" + flags="--creds-file /var/www/indexd/creds.json" fi if [[ -n "$START_DID" ]]; then flags="$flags --start-did $START_DID" diff --git a/kube/services/jobs/indexd-single-table-migration-priority-class.yaml b/kube/services/jobs/indexd-single-table-migration-priority-class.yaml new file mode 100644 index 000000000..3a35319f7 --- /dev/null +++ b/kube/services/jobs/indexd-single-table-migration-priority-class.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: indexd-single-table-migration-priority +value: 100000 # High value for high priority +globalDefault: false +description: "Priority class for Indexd migration job" \ No newline at end of file From eda0c17cc550346aff396efabc2427250de03e66 Mon Sep 17 00:00:00 2001 From: BinamB Date: Tue, 22 Oct 2024 10:02:55 -0500 Subject: [PATCH 04/22] Add comments --- .../jobs/indexd-single-table-migration-job.yaml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/kube/services/jobs/indexd-single-table-migration-job.yaml b/kube/services/jobs/indexd-single-table-migration-job.yaml index 619463f85..f56c32010 100644 --- a/kube/services/jobs/indexd-single-table-migration-job.yaml +++ b/kube/services/jobs/indexd-single-table-migration-job.yaml @@ -1,4 +1,5 @@ # Setup for running this migration https://github.com/uc-cdis/indexd/blob/master/docs/migration_to_single_table_indexd.md +# Need to apply PriorityClass before running this job. Run kubectl apply -f indexd-single-table-migration-priority-class.yaml apiVersion: batch/v1 kind: Job metadata: @@ -79,20 +80,28 @@ spec: args: - "-c" - | - if [[ -n "$USE_DEFAULT_CREDS" == "false" ]]; then - flags="--creds-file /var/www/indexd/creds_new.json" + $flags="" + + if [[ "$USE_DEFAULT_CREDS" == "false" ]]; then + echo "Using new creds" + flags="$flags --creds-file /var/www/indexd/creds_new.json" else - flags="--creds-file /var/www/indexd/creds.json" + echo "using default creds" + flags="$flags --creds-file /var/www/indexd/creds.json" fi + if [[ -n "$START_DID" ]]; then flags="$flags --start-did $START_DID" fi + if [[ -n "$BATCH_SIZE" ]]; then flags="$flags --batch-size $BATCH_SIZE" fi + if [[ -n "$OFFSET_VALUE" ]]; then flags="$flags --start-offset $OFFSET_VALUE" fi + time python /indexd/bin/migrate_to_single_table.py $flags echo "Exit code: $?" restartPolicy: Never From edf59256d663abd205f46f093a58c97aebee6b9b Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 23 Oct 2024 11:12:56 -0600 Subject: [PATCH 05/22] Making ES7 domain default (#2655) * Making ES7 domain default * updating es proxy script --- gen3/bin/kube-setup-aws-es-proxy.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh index 1ce80fd8e..5a1f5ac0e 100644 --- a/gen3/bin/kube-setup-aws-es-proxy.sh +++ b/gen3/bin/kube-setup-aws-es-proxy.sh @@ -21,7 +21,7 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${esDomain}" --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${esDomain}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" - g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else @@ -33,8 +33,8 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then gen3 kube-setup-networkpolicy service aws-es-proxy g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true fi - elif [ "$es7" = true ]; then - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ + elif [ "$es7" = false ]; then + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" @@ -50,9 +50,10 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true fi else - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names "${envname}"-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else From a4e9a639cac9434fc26d2ed49219d99f5d97f058 Mon Sep 17 00:00:00 2001 From: smvgarcia <111767892+smvgarcia@users.noreply.github.com> Date: Tue, 5 Nov 2024 13:29:48 -0600 Subject: [PATCH 06/22] Update README.md (#2662) update broken link --- doc/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/README.md b/doc/README.md index c3c6602b3..4fc893935 100644 --- a/doc/README.md +++ b/doc/README.md @@ -85,7 +85,7 @@ For example - `gen3 help aws` opens `aws.md` * [utility vm](../tf_files/aws/modules/utility-vm/README.md) * [explorer infrastructure](https://github.com/uc-cdis/cdis-wiki/blob/master/dev/gen3/data_explorer/README.md) * [automation for gcp](../tf_files/gcp/commons/README.md) -* [gcp bucket access flows for DCF](https://github.com/uc-cdis/fence/blob/master/docs/google_architecture.md) +* [gcp bucket access flows for DCF](https://github.com/uc-cdis/fence/blob/master/docs/additional_documentation/google_architecture.md) * [authn and authz with fence](https://github.com/uc-cdis/fence/blob/master/README.md) * [jenkins](../kube/services/jenkins/README.md) * [jupyterhub configuration](../kube/services/jupyterhub/README.md) From b3b2f0b3f63542c796413f2f67ee574df37a03d5 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:42:11 -0700 Subject: [PATCH 07/22] adding changes for new al image changes (#2661) * adding changes for new al image changes --- .../audit-service/audit-service-deploy.yaml | 13 ++++++++++++- kube/services/metadata/metadata-deploy.yaml | 14 +++++++++++++- kube/services/requestor/requestor-deploy.yaml | 13 ++++++++++++- kube/services/wts/wts-deploy.yaml | 3 ++- 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index b7081a7f5..b0cf5c661 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -94,6 +94,11 @@ spec: readOnly: true mountPath: "/src/audit-service-config.yaml" subPath: "audit-service-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: "config-volume" + readOnly: true + mountPath: "/audit/audit-service-config.yaml" + subPath: "audit-service-config.yaml" resources: requests: cpu: 100m @@ -109,6 +114,11 @@ spec: readOnly: true mountPath: "/src/audit-service-config.yaml" subPath: "audit-service-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: "config-volume" + readOnly: true + mountPath: "/audit/audit-service-config.yaml" + subPath: "audit-service-config.yaml" resources: limits: cpu: 0.8 @@ -117,4 +127,5 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index 68a83078e..71ab7b484 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -117,6 +117,11 @@ spec: readOnly: true mountPath: /src/.env subPath: metadata.env + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: config-volume-g3auto + readOnly: true + mountPath: /mds/.env + subPath: metadata.env - name: config-volume readOnly: true mountPath: /aggregate_config.json @@ -140,6 +145,11 @@ spec: readOnly: true mountPath: /src/.env subPath: metadata.env + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibility. + - name: config-volume-g3auto + readOnly: true + mountPath: /mds/.env + subPath: metadata.env resources: limits: cpu: 0.8 @@ -148,4 +158,6 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head + diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml index 2ed886638..954cb847c 100644 --- a/kube/services/requestor/requestor-deploy.yaml +++ b/kube/services/requestor/requestor-deploy.yaml @@ -91,6 +91,11 @@ spec: readOnly: true mountPath: "/src/requestor-config.yaml" subPath: "requestor-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibilit + - name: "config-volume" + readOnly: true + mountPath: "/requestor/requestor-config.yaml" + subPath: "requestor-config.yaml" resources: requests: cpu: 100m @@ -106,6 +111,11 @@ spec: readOnly: true mountPath: "/src/requestor-config.yaml" subPath: "requestor-config.yaml" + # Added an additional volume mount for new images using the / directory, while retaining the 'src' mount for backward compatibilit + - name: "config-volume" + readOnly: true + mountPath: "/requestor/requestor-config.yaml" + subPath: "requestor-config.yaml" resources: limits: cpu: 0.8 @@ -114,4 +124,5 @@ spec: args: - "-c" - | - /env/bin/alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || /env/bin/alembic upgrade head diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index 06f43fe01..a3e63f19b 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -166,7 +166,8 @@ spec: if hash alembic 2>/dev/null; then echo "Running DB migration" cd /wts - alembic upgrade head + # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility + poetry run alembic upgrade head || alembic upgrade head else # WTS < 0.3.0 does not have the DB migration setup echo "Alembic not installed - not running DB migration" From 66adf837636cde9aa8b6c1f30d3e47fc00100d47 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:02:37 -0700 Subject: [PATCH 08/22] updating alembic logic to work with the new al images that manage alembic through poetry (#2663) --- kube/services/wts/wts-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index a3e63f19b..c6c4ffe74 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -163,7 +163,7 @@ spec: args: - "-c" - | - if hash alembic 2>/dev/null; then + if hash alembic 2>/dev/null || poetry run alembic --version >/dev/null 2>&1; then echo "Running DB migration" cd /wts # Managing virtual environments via poetry instead of python since the AL base image update, but retaining backwards compatibility From e217e67fc3b12024c6537900230156704be6d66b Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Thu, 7 Nov 2024 16:43:24 -0600 Subject: [PATCH 09/22] fix to include special characters in db name (#2664) --- kube/services/jobs/psql-db-copy-aurora-job.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kube/services/jobs/psql-db-copy-aurora-job.yaml b/kube/services/jobs/psql-db-copy-aurora-job.yaml index 8fd6e899a..a29274146 100644 --- a/kube/services/jobs/psql-db-copy-aurora-job.yaml +++ b/kube/services/jobs/psql-db-copy-aurora-job.yaml @@ -168,11 +168,11 @@ spec: gen3_log_info "Source DB: $source_db_database, Username: $db_username, Current DB: $db_database, Target DB: $target_db" # DB commands - gen3 psql aurora -c "GRANT $db_username TO $aurora_master_username" + gen3 psql aurora -c "GRANT \"$db_username\" TO \"$aurora_master_username\"" gen3 psql aurora -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$source_db_database' AND pid <> pg_backend_pid()" - gen3 psql aurora -c "CREATE DATABASE $target_db WITH TEMPLATE $source_db_database OWNER $db_username" - pg_command="DO \$\$ DECLARE tbl record; BEGIN FOR tbl IN (SELECT table_schema || '.' || table_name AS full_table_name FROM information_schema.tables WHERE table_schema = 'public') LOOP EXECUTE 'ALTER TABLE ' || tbl.full_table_name || ' OWNER TO $db_username;'; END LOOP; END \$\$;" - PGPASSWORD=${aurora_master_password} psql -h $aurora_host_name -U $aurora_master_username -d "$target_db" -c "$pg_command" + gen3 psql aurora -c "CREATE DATABASE \"$target_db\" WITH TEMPLATE \"$source_db_database\" OWNER \"$db_username\"" + pg_command="DO \$\$ DECLARE tbl record; BEGIN FOR tbl IN (SELECT table_schema || '.' || table_name AS full_table_name FROM information_schema.tables WHERE table_schema = 'public') LOOP EXECUTE 'ALTER TABLE ' || tbl.full_table_name || ' OWNER TO \"$db_username\";'; END LOOP; END \$\$;" + PGPASSWORD=${aurora_master_password} psql -h $aurora_host_name -U "$aurora_master_username" -d "$target_db" -c "$pg_command" if [ $? -eq 0 ]; then gen3_log_info "Successfully processed $database" new_databases+=("$target_db") From b099d24dfab3f519ed9545104b9d9f2e17ec937b Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:45:34 -0700 Subject: [PATCH 10/22] updating userdb job to work with new al images (#2665) --- kube/services/jobs/indexd-userdb-job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml index 676307481..228a1989d 100644 --- a/kube/services/jobs/indexd-userdb-job.yaml +++ b/kube/services/jobs/indexd-userdb-job.yaml @@ -74,7 +74,7 @@ spec: # Script always succeeds if it runs (echo exits with 0) # indexd image does not include jq, so use python - | - eval $(python 2> /dev/null < /dev/null || poetry run python 2> /dev/null) < /dev/null || poetry run python /indexd/bin/index_admin.py create --username "$user" --password "${user_db[$user]}") done echo "Exit code: $?" restartPolicy: Never From 308f04be52e5276eefa4935ab67a3a9e22452e54 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 11 Nov 2024 06:00:42 -0600 Subject: [PATCH 11/22] Update web_whitelist --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 6484f3940..42095986a 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -33,6 +33,7 @@ centos.mirrors.hoobly.com centos.mirrors.tds.net centos.mirrors.wvstateu.edu cernvm.cern.ch +charts.authelia.com charts.helm.sh cloud.r-project.org coredns.github.io From 7fe48a58445260faa3d5022c27ce39d45f2d5e38 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 11 Nov 2024 11:54:04 -0500 Subject: [PATCH 12/22] Feat/team project cost tracking (#2658) * Setting up migration to new karpenter CRDs * Adding some references missed earlier * Really can't stop screwing up * Not even sure why it's working this way * Let's try this, this should help * Let's make sure this part works * Let's try this * Let's try this too * Let's check the decoding * Fixing a silly parentheses issue * Fixing some other naming issues * I think we can't do decoding this early in the process * Trying wrapping in quotes * Let's try just cutting some stuff * Need to update the deleted sensor * Think underscores are also a no-go * No decoding * Could the decoding be the issue? * Adding the selector for 1.31 AMIs * Changing the tag name * Fixing the cleanup jobs * I think we may just need to change the deletion order --- .../argo-events/workflows/configmap.yaml | 237 +++++++++--------- .../workflows/sensor-completed.yaml | 8 +- .../argo-events/workflows/sensor-created.yaml | 32 ++- .../argo-events/workflows/sensor-deleted.yaml | 8 +- ...rpenter-reconciler-cronjob-va-testing.yaml | 71 ------ .../karpenter-reconciler-cronjob.yaml | 27 +- 6 files changed, 166 insertions(+), 217 deletions(-) delete mode 100644 kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index f57ae07d0..c084533fe 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -4,119 +4,44 @@ metadata: name: karpenter-templates namespace: argo-events data: - provisioner.yaml: | - apiVersion: karpenter.sh/v1alpha5 - kind: Provisioner + nodeclass.yaml: | + apiVersion: karpenter.k8s.aws/v1beta1 + kind: EC2NodeClass metadata: name: workflow-WORKFLOW_NAME spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["on-demand"] - - key: kubernetes.io/arch - operator: In - values: - - amd64 - - key: node.kubernetes.io/instance-type - operator: In - values: - - c6a.large - - c6a.xlarge - - c6a.2xlarge - - c6a.4xlarge - - c6a.8xlarge - - c6a.12xlarge - - c7a.large - - c7a.xlarge - - c7a.2xlarge - - c7a.4xlarge - - c7a.8xlarge - - c7a.12xlarge - - c6i.large - - c6i.xlarge - - c6i.2xlarge - - c6i.4xlarge - - c6i.8xlarge - - c6i.12xlarge - - c7i.large - - c7i.xlarge - - c7i.2xlarge - - c7i.4xlarge - - c7i.8xlarge - - c7i.12xlarge - - m6a.2xlarge - - m6a.4xlarge - - m6a.8xlarge - - m6a.12xlarge - - m6a.16xlarge - - m6a.24xlarge - - m7a.2xlarge - - m7a.4xlarge - - m7a.8xlarge - - m7a.12xlarge - - m7a.16xlarge - - m7a.24xlarge - - m6i.2xlarge - - m6i.4xlarge - - m6i.8xlarge - - m6i.12xlarge - - m6i.16xlarge - - m6i.24xlarge - - m7i.2xlarge - - m7i.4xlarge - - m7i.8xlarge - - m7i.12xlarge - - m7i.16xlarge - - m7i.24xlarge - - r7iz.2xlarge - - r7iz.4xlarge - - r7iz.8xlarge - - r7iz.12xlarge - - r7iz.16xlarge - - r7iz.24xlarge - taints: - - key: role - value: WORKFLOW_NAME - effect: NoSchedule - labels: - role: WORKFLOW_NAME - purpose: workflow - limits: - resources: - cpu: 4000 - providerRef: - name: workflow-WORKFLOW_NAME - # Kill nodes after 2 days to ensure they stay up to date - ttlSecondsUntilExpired: 172800 - ttlSecondsAfterEmpty: 10 - - nodetemplate.yaml: | - apiVersion: karpenter.k8s.aws/v1alpha1 - kind: AWSNodeTemplate - metadata: - name: workflow-WORKFLOW_NAME - spec: - amiSelector: - aws::name: EKS-FIPS* - aws::owners: "143731057154" - subnetSelector: - karpenter.sh/discovery: ENVIRONMENT - securityGroupSelector: - karpenter.sh/discovery: ENVIRONMENT-workflow - tags: - Environment: ENVIRONMENT - Name: eks-ENVIRONMENT-workflow-karpenter - karpenter.sh/discovery: ENVIRONMENT - workflowname: WORKFLOW_NAME - gen3username: GEN3_USERNAME - gen3service: argo-workflows - purpose: workflow + amiFamily: AL2 + amiSelectorTerms: + - name: 1-31-EKS-FIPS* + owner: "143731057154" + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + deleteOnTermination: true + encrypted: true + volumeSize: 100Gi + volumeType: gp2 metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled httpPutResponseHopLimit: 2 httpTokens: optional + role: eks_ENVIRONMENT_workers_role + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: ENVIRONMENT-workflow + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: ENVIRONMENT + tags: + Environment: ENVIRONMENT + Name: eks-ENVIRONMENT-workflow-karpenter + gen3service: argo-workflows + gen3username: GEN3_USERNAME + gen3teamproject: "GEN3_TEAMNAME" + karpenter.sh/discovery: ENVIRONMENT + purpose: workflow + workflowname: WORKFLOW_NAME userData: | MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="BOUNDARY" @@ -133,10 +58,98 @@ data: sysctl -w fs.inotify.max_user_watches=12000 --BOUNDARY-- - blockDeviceMappings: - - deviceName: /dev/xvda - ebs: - volumeSize: 100Gi - volumeType: gp2 - encrypted: true - deleteOnTermination: true + + nodepool.yaml: | + apiVersion: karpenter.sh/v1beta1 + kind: NodePool + metadata: + name: workflow-WORKFLOW_NAME + spec: + disruption: + consolidateAfter: 10s + consolidationPolicy: WhenEmpty + expireAfter: 48h0m0s + limits: + cpu: 4k + template: + metadata: + labels: + purpose: workflow + role: WORKFLOW_NAME + spec: + nodeClassRef: + name: workflow-WORKFLOW_NAME + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: node.kubernetes.io/instance-type + operator: In + values: + - c6a.large + - c6a.xlarge + - c6a.2xlarge + - c6a.4xlarge + - c6a.8xlarge + - c6a.12xlarge + - c7a.large + - c7a.xlarge + - c7a.2xlarge + - c7a.4xlarge + - c7a.8xlarge + - c7a.12xlarge + - c6i.large + - c6i.xlarge + - c6i.2xlarge + - c6i.4xlarge + - c6i.8xlarge + - c6i.12xlarge + - c7i.large + - c7i.xlarge + - c7i.2xlarge + - c7i.4xlarge + - c7i.8xlarge + - c7i.12xlarge + - m6a.2xlarge + - m6a.4xlarge + - m6a.8xlarge + - m6a.12xlarge + - m6a.16xlarge + - m6a.24xlarge + - m7a.2xlarge + - m7a.4xlarge + - m7a.8xlarge + - m7a.12xlarge + - m7a.16xlarge + - m7a.24xlarge + - m6i.2xlarge + - m6i.4xlarge + - m6i.8xlarge + - m6i.12xlarge + - m6i.16xlarge + - m6i.24xlarge + - m7i.2xlarge + - m7i.4xlarge + - m7i.8xlarge + - m7i.12xlarge + - m7i.16xlarge + - m7i.24xlarge + - r7iz.2xlarge + - r7iz.4xlarge + - r7iz.8xlarge + - r7iz.12xlarge + - r7iz.16xlarge + - r7iz.24xlarge + - key: kubernetes.io/os + operator: In + values: + - linux + taints: + - effect: NoSchedule + key: role + value: WORKFLOW_NAME diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml index 293c0e119..e483d3297 100644 --- a/kube/services/argo-events/workflows/sensor-completed.yaml +++ b/kube/services/argo-events/workflows/sensor-completed.yaml @@ -51,12 +51,12 @@ spec: args: - "-c" - | - if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + if kubectl get nodepool workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete nodepool workflow-$WORKFLOW_NAME fi - if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete provisioners workflow-$WORKFLOW_NAME + if kubectl get ec2nodeclass workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete ec2nodeclass workflow-$WORKFLOW_NAME fi env: - name: WORKFLOW_NAME diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 9f6de2c83..05da3bc38 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -36,6 +36,10 @@ spec: dependencyName: workflow-created-event dataKey: body.metadata.labels.gen3username dest: spec.template.spec.containers.0.env.1.value + - src: + dependencyName: workflow-created-event + dataKey: body.metadata.labels.gen3teamproject + dest: spec.template.spec.containers.0.env.2.value source: resource: apiVersion: batch/v1 @@ -60,36 +64,38 @@ spec: - "-c" - | #!/bin/bash - if [ -z "$PROVISIONER_TEMPLATE" ]; then - PROVISIONER_TEMPLATE="provisioner.yaml" - fi - if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then - AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + if [ -z "$NODEPOOL_TEMPLATE" ]; then + NODEPOOL_TEMPLATE="/manifests/nodepool.yaml" fi + if [ -z "$NODECLASS_TEMPLATE" ]; then + NODECLASS_TEMPLATE="/manifests/nodeclass.yaml" + fi - if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + if ! kubectl get ec2nodeclass workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/GEN3_TEAMNAME/$GEN3_TEAMNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODECLASS_TEMPLATE" | kubectl apply -f - fi - if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + if ! kubectl get nodepool workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/GEN3_TEAMNAME/$GEN3_TEAMNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODEPOOL_TEMPLATE" | kubectl apply -f - fi env: - name: WORKFLOW_NAME value: "" - name: GEN3_USERNAME value: "" + - name: GEN3_TEAMNAME + value: "" - name: ENVIRONMENT valueFrom: configMapKeyRef: name: environment key: environment - - name: PROVISIONER_TEMPLATE - value: /manifests/provisioner.yaml - - name: AWSNODETEMPLATE_TEMPLATE - value: /manifests/nodetemplate.yaml + - name: NODEPOOL_TEMPLATE + value: /manifests/nodepool.yaml + - name: NODECLASS_TEMPLATE + value: /manifests/nodeclass.yaml volumeMounts: - name: karpenter-templates-volume mountPath: /manifests diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml index c235a820a..0b12cb118 100644 --- a/kube/services/argo-events/workflows/sensor-deleted.yaml +++ b/kube/services/argo-events/workflows/sensor-deleted.yaml @@ -47,12 +47,12 @@ spec: args: - "-c" - | - if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + if kubectl get nodepool workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete nodepool workflow-$WORKFLOW_NAME fi - if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - kubectl delete provisioners workflow-$WORKFLOW_NAME + if kubectl get ec2nodeclass workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete ec2nodeclass workflow-$WORKFLOW_NAME fi env: - name: WORKFLOW_NAME diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml deleted file mode 100644 index aaba57b07..000000000 --- a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: karpenter-reconciler-cronjob-va-testing - namespace: argo-events -spec: - schedule: "*/5 * * * *" - jobTemplate: - spec: - template: - metadata: - labels: - app: gen3job - spec: - serviceAccount: karpenter-reconciler - volumes: - - name: karpenter-templates-volume - configMap: - name: karpenter-templates - containers: - - name: karpenter-reconciler - image: quay.io/cdis/awshelper - volumeMounts: - - name: karpenter-templates-volume - mountPath: /manifests - env: - - name: PROVISIONER_TEMPLATE - value: /manifests/provisioner.yaml - - name: AWSNODETEMPLATE_TEMPLATE - value: /manifests/nodetemplate.yaml - command: ["/bin/bash"] - args: - - "-c" - - | - #!/bin/bash - if [ -z "$PROVISIONER_TEMPLATE" ]; then - PROVISIONER_TEMPLATE="provisioner.yaml" - fi - - if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then - AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" - fi - - ENVIRONMENT=$(kubectl -n va-testing get configmap global -o jsonpath="{.data.environment}") - - WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}') - - WORKFLOW_ARRAY=() - - while IFS= read -r line; do - WORKFLOW_ARRAY+=("$line") - done <<< "$WORKFLOWS" - - for workflow in "${WORKFLOW_ARRAY[@]}" - do - echo "Running loop for workflow: $workflow" - workflow_name=$(echo "$workflow" | awk '{print $1}') - workflow_user=$(echo "$workflow" | awk '{print $2}') - - if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then - echo "No awsnodetemplate found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - - fi - - if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then - echo "No provisioner found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - - - fi - done - restartPolicy: OnFailure diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml index aef5d6c49..3c6d58768 100644 --- a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -24,26 +24,26 @@ spec: - name: karpenter-templates-volume mountPath: /manifests env: - - name: PROVISIONER_TEMPLATE - value: /manifests/provisioner.yaml - - name: AWSNODETEMPLATE_TEMPLATE - value: /manifests/nodetemplate.yaml + - name: NODEPOOL_TEMPLATE + value: /manifests/nodepool.yaml + - name: NODECLASS_TEMPLATE + value: /manifests/nodeclass.yaml command: ["/bin/bash"] args: - "-c" - | #!/bin/bash - if [ -z "$PROVISIONER_TEMPLATE" ]; then - PROVISIONER_TEMPLATE="provisioner.yaml" + if [ -z "$NODEPOOL_TEMPLATE" ]; then + NODEPOOL_TEMPLATE="/manifests/provisioner.yaml" fi - if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then - AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + if [ -z "$NODECLASS_TEMPLATE" ]; then + NODECLASS_TEMPLATE="/manifests/nodeclass.yaml" fi ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}") - WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}') + WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{" "}{.metadata.labels.gen3teamproject}{"\n"}') WORKFLOW_ARRAY=() @@ -57,16 +57,17 @@ spec: do workflow_name=$(echo "$workflow" | awk '{print $1}') workflow_user=$(echo "$workflow" | awk '{print $2}') + workflow_team=$(echo "$workflow" | awk '{print $3}') if [ ! -z "$workflow_name" ]; then - if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + if ! kubectl get ec2nodeclass workflow-$workflow_name >/dev/null 2>&1; then echo "No awsnodetemplate found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_TEAMNAME/$workflow_team/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODECLASS_TEMPLATE" | kubectl apply -f - fi - if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + if ! kubectl get nodepool workflow-$workflow_name >/dev/null 2>&1; then echo "No provisioner found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_TEAMNAME/$workflow_team/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$NODEPOOL_TEMPLATE" | kubectl apply -f - fi fi From de9cb3ec1d016987caaa9d0d70b566d1558ff98c Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 12 Nov 2024 09:13:49 -0700 Subject: [PATCH 13/22] modifying gdcdb create job (#2668) * modifying gdcdb create job so it is backwards compatible and works with the new image that utilizes poetry for its virtual environments * Update gdcdb-create-job.yaml --- kube/services/jobs/gdcdb-create-job.yaml | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/kube/services/jobs/gdcdb-create-job.yaml b/kube/services/jobs/gdcdb-create-job.yaml index 1668429ad..9bdc8bd09 100644 --- a/kube/services/jobs/gdcdb-create-job.yaml +++ b/kube/services/jobs/gdcdb-create-job.yaml @@ -51,7 +51,7 @@ spec: - "-c" # Script always succeeds if it runs (echo exits with 0) - | - eval $(python 2> /dev/null < /dev/null && python || poetry run python) < /dev/null; then + echo datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P XXXXXXX -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P "${db_creds[db_password]}" -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + echo python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password XXXXX --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password "${db_creds[db_password]}" --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + + else + echo poetry run datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P XXXXXXX -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + poetry run datamodel_postgres_admin create-all -U "${db_creds[db_username]}" -P "${db_creds[db_password]}" -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + echo poetry run python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password XXXXX --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + poetry run python /sheepdog/bin/setup_transactionlogs.py --user "${db_creds[db_username]}" --password "${db_creds[db_password]}" --host "${db_creds[db_host]}" --database "${db_creds[db_database]}" + fi echo "Exit code: $?" restartPolicy: Never From 3130a30dd9edc52d8df9e9108b25338757045d58 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:48:43 -0700 Subject: [PATCH 14/22] Update squid.conf (#2641) Update squid.conf to work with new AL based squid image. --- flavors/squid_auto/squid_running_on_docker.sh | 4 ++-- flavors/squid_auto/startup_configs/squid.conf | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 812a9f738..836ceb01f 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -166,7 +166,7 @@ $(command -v docker) run --name squid --network=host -d \ --volume ${SQUID_PID_DIR}:${SQUID_PID_DIR} \ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ - quay.io/cdis/squid:${SQUID_IMAGE_TAG} + quay.io/cdis/squid-al:${SQUID_IMAGE_TAG} exit 0 EOF @@ -306,7 +306,7 @@ function main(){ --volume ${SQUID_PID_DIR}:${SQUID_PID_DIR} \ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ - quay.io/cdis/squid:${SQUID_IMAGE_TAG} + quay.io/cdis/squid-al:${SQUID_IMAGE_TAG} max_attempts=10 attempt_counter=0 diff --git a/flavors/squid_auto/startup_configs/squid.conf b/flavors/squid_auto/startup_configs/squid.conf index b1e44810a..bb8675a41 100644 --- a/flavors/squid_auto/startup_configs/squid.conf +++ b/flavors/squid_auto/startup_configs/squid.conf @@ -3,6 +3,10 @@ # Instead of obeying dns_v4_first settings, IP family usage order is now primarily controlled by DNS response time: If a DNS AAAA response comes first while Squid is waiting for an IP address, then Squid will use the received IPv6 address(es) first. For previously cached IPs, Squid tries IPv6 addresses first. To control IP address families used by Squid, admins are expected to use firewalls, DNS recursive-resolver configuration, and/or --disable-ipv6. When planning you configuration changes, please keep in mind that the upcoming Happy Eyeballs improvements will favor faster TCP connection establishment, decreasing the impact of DNS resolution timing. #dns_v4_first on +#To initialize the SSL certificate database directory for SSL bump commands +sslcrtd_program /usr/lib64/squid/security_file_certgen -s /var/cache/squid/ssl_db -M 4MB +sslcrtd_children 4 startup=1 idle=1 + #Because we just use one ACL for the whitelists, there can be #NO repetitions in the matches. If there is a wildcard that #matches an explicit domain, it WILL error and squid will not @@ -56,6 +60,8 @@ http_access deny all persistent_request_timeout 5 seconds +cache_effective_user squid +cache_effective_group squid pid_filename /var/run/squid/squid.pid # vi:syntax=squid.conf From 972a69d848452fba85e17b93e038bf1e6eb28924 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 18 Nov 2024 10:26:54 -0700 Subject: [PATCH 15/22] Adding "poetry run" (#2673) * adding updates so these jobs will work with our new AL based environments that use poetry to manage their virtual environments --- kube/services/jobs/graph-create-job.yaml | 16 +++++++++++----- kube/services/jobs/indexd-authz-job.yaml | 4 ++-- .../jobs/metadata-aggregate-sync-job.yaml | 2 +- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/kube/services/jobs/graph-create-job.yaml b/kube/services/jobs/graph-create-job.yaml index f6595cdd2..339e1ce46 100644 --- a/kube/services/jobs/graph-create-job.yaml +++ b/kube/services/jobs/graph-create-job.yaml @@ -51,7 +51,7 @@ spec: - "-c" # Script always succeeds if it runs (echo exits with 0) - | - eval $(python 2> /dev/null < /dev/null || poetry run python 2> /dev/null) < /dev/null; then + echo datamodel_postgres_admin graph-create -U "${db_creds[db_username]}" -P XXXXXX -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + datamodel_postgres_admin graph-create -U "${db_creds[db_username]}" -P "${db_creds[db_password]}" -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + echo "Exit code: $?" + else + echo poetry run datamodel_postgres_admin graph-create -U "${db_creds[db_username]}" -P XXXXXX -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + poetry run datamodel_postgres_admin graph-create -U "${db_creds[db_username]}" -P "${db_creds[db_password]}" -H "${db_creds[db_host]}" -D "${db_creds[db_database]}" + echo "Exit code: $?" + fi + restartPolicy: Never \ No newline at end of file diff --git a/kube/services/jobs/indexd-authz-job.yaml b/kube/services/jobs/indexd-authz-job.yaml index 473159f35..5cc167895 100644 --- a/kube/services/jobs/indexd-authz-job.yaml +++ b/kube/services/jobs/indexd-authz-job.yaml @@ -83,7 +83,7 @@ spec: - | flags="--path /var/www/indexd/ --arborist-url http://arborist-service --user-yaml-path /var/www/indexd/user.yaml" if [[ "$USE_SHEEPDOG" == "true" ]]; then - db_uri=$(python - <<- EOF + db_uri=$(poetry run python - <<- EOF from base64 import b64decode import json @@ -98,6 +98,6 @@ spec: if [[ -n "$START_DID" ]]; then flags="$flags --start-did $START_DID" fi - python /indexd/bin/migrate_acl_authz.py $flags + poetry run python /indexd/bin/migrate_acl_authz.py $flags echo "Exit code: $?" restartPolicy: Never diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index 7f4043753..e66fb2254 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -85,7 +85,7 @@ spec: args: - "-c" - | - /env/bin/python /src/src/mds/populate.py --config /aggregate_config.json + /env/bin/python /src/src/mds/populate.py --config /aggregate_config.json || poetry run python /mds/src/mds/populate.py --config /aggregate_config.json if [ $? -ne 0 ]; then echo "WARNING: non zero exit code: $?" else From 134803ed0746ede1ad5ba7ab251a56b99d43ca11 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:54:05 -0700 Subject: [PATCH 16/22] removing old squid file from cloud auto as it now lives in "base_images" repo" (#2675) --- .github/workflows/image_build_push_squid.yaml | 23 ------- Docker/squid/Dockerfile | 62 ------------------- Docker/squid/ERR_ACCESS_DENIED | 36 ----------- Docker/squid/certfix.sh | 6 -- Docker/squid/entrypoint.sh | 45 -------------- flavors/squid_auto/squid_running_on_docker.sh | 4 +- 6 files changed, 2 insertions(+), 174 deletions(-) delete mode 100644 .github/workflows/image_build_push_squid.yaml delete mode 100644 Docker/squid/Dockerfile delete mode 100644 Docker/squid/ERR_ACCESS_DENIED delete mode 100644 Docker/squid/certfix.sh delete mode 100644 Docker/squid/entrypoint.sh diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml deleted file mode 100644 index 0645fb8b2..000000000 --- a/.github/workflows/image_build_push_squid.yaml +++ /dev/null @@ -1,23 +0,0 @@ -name: Build Squid images - -on: - workflow_dispatch: - push: - paths: - - .github/workflows/image_build_push_squid.yaml - - Docker/squid/** - -jobs: - squid: - name: Squid image - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/squid/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/squid" - OVERRIDE_REPO_NAME: "squid" - USE_QUAY_ONLY: true - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/Docker/squid/Dockerfile b/Docker/squid/Dockerfile deleted file mode 100644 index 90b738902..000000000 --- a/Docker/squid/Dockerfile +++ /dev/null @@ -1,62 +0,0 @@ -FROM quay.io/cdis/ubuntu:18.04 - -ENV SQUID_VERSION="squid-5.1" \ - SQUID_DOWNLOAD_URL="http://www.squid-cache.org/Versions/v5/" \ - SQUID_USER="proxy" \ - SQUID_CACHE_DIR="/var/cache/squid" \ - SQUID_LOG_DIR="/var/log/squid" \ - SQUID_SYSCONFIG_DIR="/etc/squid" \ - SQUID_PID_DIR="/var/run/squid" \ - CFLAGS="-Os" \ - CXXFLAGS="-Os" - - -RUN apt update \ - && apt install -y build-essential wget libssl1.0-dev - -COPY ./entrypoint.sh /usr/sbin/entrypoint.sh -COPY ./certfix.sh /certfix.sh - -RUN chmod +x /usr/sbin/entrypoint.sh -RUN chmod +x /certfix.sh -RUN bash /certfix.sh - -RUN (cd /tmp \ - && wget ${SQUID_DOWNLOAD_URL}${SQUID_VERSION}.tar.xz \ - && tar -xJf ${SQUID_VERSION}.tar.xz \ - && sed -i 's/if (rawPid <= 1)/if (rawPid < 1)/' ${SQUID_VERSION}/src/Instance.cc \ - && mkdir squid-build \ - && cd squid-build \ - && ../${SQUID_VERSION}/configure \ - --prefix=/usr \ - --exec-prefix=/usr \ - --sysconfdir=${SQUID_SYSCONFIG_DIR} \ - --sharedstatedir=/var/lib \ - --localstatedir=/var \ - --datadir=/usr/share/squid \ - --with-logdir=${SQUID_LOG_DIR} \ - --with-pidfile=${SQUID_PID_DIR}/squid.pid \ - --with-default-user=${SQUID_USER} \ - --enable-linux-netfilter \ - --with-openssl \ - --without-nettle \ - --disable-arch-native \ - && make \ - && make install) - -RUN (cd /tmp \ - && rm ${SQUID_VERSION}.tar.xz \ - && rm -rf ${SQUID_VERSION} squid-build) - -COPY ./ERR_ACCESS_DENIED /usr/share/squid/errors/templates/ERR_ACCESS_DENIED - -RUN mkdir -p ${SQUID_LOG_DIR} ${SQUID_CACHE_DIR} \ - && chown -R ${SQUID_USER}. ${SQUID_LOG_DIR} ${SQUID_CACHE_DIR} - -EXPOSE 3128/tcp -EXPOSE 3129/tcp -EXPOSE 3130/tcp - -VOLUME ${SQUID_LOG_DIR} ${SQUID_CACHE_DIR} ${SQUID_PID_DIR} ${SQUID_SYSCONFIG_DIR} - -ENTRYPOINT ["/usr/sbin/entrypoint.sh"] diff --git a/Docker/squid/ERR_ACCESS_DENIED b/Docker/squid/ERR_ACCESS_DENIED deleted file mode 100644 index 4189c3d63..000000000 --- a/Docker/squid/ERR_ACCESS_DENIED +++ /dev/null @@ -1,36 +0,0 @@ - - - - -ERROR: The requested URL could not be retrieved - - -
-

ERROR

-

The requested URL could not be retrieved

-
-
- -
-

The following error was encountered while trying to retrieve the URL: %U

- -
-

Access Denied.

-
- -

Access control configuration prevents your request from being allowed at this time. Please contact your service provider if you feel this is incorrect.

- -
-
- -
- - - diff --git a/Docker/squid/certfix.sh b/Docker/squid/certfix.sh deleted file mode 100644 index 87dfef66c..000000000 --- a/Docker/squid/certfix.sh +++ /dev/null @@ -1,6 +0,0 @@ -#/bin/bash - -if [[ -z $(cat /etc/ca-certificates.conf | grep '!mozilla/DST_Root_CA_X3.crt') ]] && [[ ! -z $(cat /etc/ca-certificates.conf | grep 'mozilla/DST_Root_CA_X3.crt') ]]; then - echo /etc/ca-certificates.conf | xargs sed -i 's/mozilla\/DST_Root_CA_X3.crt/!mozilla\/DST_Root_CA_X3.crt/g' - update-ca-certificates -fi diff --git a/Docker/squid/entrypoint.sh b/Docker/squid/entrypoint.sh deleted file mode 100644 index 3a9c89b6b..000000000 --- a/Docker/squid/entrypoint.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -e - -create_log_dir() { - mkdir -p ${SQUID_LOG_DIR} - chmod -R 755 ${SQUID_LOG_DIR} - chown -R ${SQUID_USER}:${SQUID_USER} ${SQUID_LOG_DIR} -} - -create_cache_dir() { - mkdir -p ${SQUID_CACHE_DIR} - chown -R ${SQUID_USER}:${SQUID_USER} ${SQUID_CACHE_DIR} -} - -apply_backward_compatibility_fixes() { - if [[ -f ${SQUID_SYSCONFIG_DIR}/squid.user.conf ]]; then - rm -rf ${SQUID_SYSCONFIG_DIR}/squid.conf - ln -sf ${SQUID_SYSCONFIG_DIR}/squid.user.conf ${SQUID_SYSCONFIG_DIR}/squid.conf - fi -} - -create_log_dir -create_cache_dir -apply_backward_compatibility_fixes - -# allow arguments to be passed to squid -if [[ ${1:0:1} = '-' ]]; then - EXTRA_ARGS="$@" - set -- -elif [[ ${1} == squid || ${1} == $(which squid) ]]; then - EXTRA_ARGS="${@:2}" - set -- -fi - -# default behaviour is to launch squid -if [[ -z ${1} ]]; then - if [[ ! -d ${SQUID_CACHE_DIR}/00 ]]; then - echo "Initializing cache..." - $(which squid) -N -f ${SQUID_SYSCONFIG_DIR}/squid.conf -z - fi - echo "Starting squid..." - exec $(which squid) -f ${SQUID_SYSCONFIG_DIR}/squid.conf -NYCd 1 ${EXTRA_ARGS} -else - exec "$@" -fi diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 836ceb01f..812a9f738 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -166,7 +166,7 @@ $(command -v docker) run --name squid --network=host -d \ --volume ${SQUID_PID_DIR}:${SQUID_PID_DIR} \ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ - quay.io/cdis/squid-al:${SQUID_IMAGE_TAG} + quay.io/cdis/squid:${SQUID_IMAGE_TAG} exit 0 EOF @@ -306,7 +306,7 @@ function main(){ --volume ${SQUID_PID_DIR}:${SQUID_PID_DIR} \ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ - quay.io/cdis/squid-al:${SQUID_IMAGE_TAG} + quay.io/cdis/squid:${SQUID_IMAGE_TAG} max_attempts=10 attempt_counter=0 From 960444f28fef4783369099b4c9ea67426595e34d Mon Sep 17 00:00:00 2001 From: Krishna Agarwal <159047652+krishnaa05@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:07:32 -0600 Subject: [PATCH 17/22] Add GH action workflow for integration tests (#2670) * Add GH action workflow for integration tests * Update integration_tests.yaml * Reverting Jenkinsfile and removing integrationtests entry * Update Jenkinsfile --------- Co-authored-by: Hara Prasad --- .github/workflows/integration_tests.yaml | 27 ++++ Jenkinsfile | 198 ----------------------- 2 files changed, 27 insertions(+), 198 deletions(-) create mode 100644 .github/workflows/integration_tests.yaml diff --git a/.github/workflows/integration_tests.yaml b/.github/workflows/integration_tests.yaml new file mode 100644 index 000000000..80ce427fa --- /dev/null +++ b/.github/workflows/integration_tests.yaml @@ -0,0 +1,27 @@ +name: Integration Tests + +on: pull_request + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + integration_tests: + name: Integration tests + uses: uc-cdis/.github/.github/workflows/integration_tests.yaml@master + with: + QUAY_REPO: "awshelper" + secrets: + CI_AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} + CI_AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + JENKINS_API_TOKEN: ${{ secrets.JENKINS_API_TOKEN }} + QA_DASHBOARD_S3_PATH: ${{ secrets.QA_DASHBOARD_S3_PATH }} + CI_TEST_ORCID_USERID: ${{ secrets.CI_TEST_ORCID_USERID }} + CI_TEST_ORCID_PASSWORD: ${{ secrets.CI_TEST_ORCID_PASSWORD }} + CI_TEST_RAS_USERID: ${{ secrets.CI_TEST_RAS_USERID }} + CI_TEST_RAS_PASSWORD: ${{ secrets.CI_TEST_RAS_PASSWORD }} + CI_TEST_RAS_2_USERID: ${{ secrets.CI_TEST_RAS_2_USERID }} + CI_TEST_RAS_2_PASSWORD: ${{ secrets.CI_TEST_RAS_2_PASSWORD }} + CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} + CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }} diff --git a/Jenkinsfile b/Jenkinsfile index 908c2d01a..fbd7367c2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -433,203 +433,6 @@ spec: } } - stage('WaitForQuayBuild') { - options { - timeout(time: 30, unit: 'MINUTES') // timeout on this stage - } - steps { - script { - try { - if(!skipQuayImgBuildWait) { - quayHelper.waitForBuild( - "awshelper", - pipeConfig['currentBranchFormatted'] - ) - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('SelectNamespace') { - steps { - script { - try { - if(!doNotRunTests) { - (kubectlNamespace, lock) = kubeHelper.selectAndLockNamespace(pipeConfig['UID'], namespaces) - kubeLocks << lock - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - currentBuild.displayName = "#${BUILD_NUMBER} - ${kubectlNamespace}" - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('ModifyManifest') { - steps { - script { - try { - if(!doNotRunTests) { - manifestHelper.editService( - kubeHelper.getHostname(kubectlNamespace), - "awshelper", - pipeConfig.serviceTesting.branch - ) - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('K8sReset') { - options { - timeout(time: 1, unit: 'HOURS') // timeout on this stage - } - steps { - script { - try { - if(!doNotRunTests) { - // adding the reset-lock lock in case reset fails before unlocking - kubeLocks << kubeHelper.newKubeLock(kubectlNamespace, "gen3-reset", "reset-lock") - kubeHelper.reset(kubectlNamespace) - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - // ignore aborted pipelines (not a failure, just some subsequent commit that initiated a new build) - if (ex.getClass().getCanonicalName() != "hudson.AbortException" && - ex.getClass().getCanonicalName() != "org.jenkinsci.plugins.workflow.steps.FlowInterruptedException") { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - kubeHelper.sendSlackNotification(kubectlNamespace, "false") - kubeHelper.saveLogs(kubectlNamespace) - } - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('VerifyClusterHealth') { - steps { - script { - try { - if(!doNotRunTests) { - kubeHelper.waitForPods(kubectlNamespace) - testHelper.checkPodHealth(kubectlNamespace, "") - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - - stage('GenerateData') { - steps { - script { - try { - if(!doNotRunTests) { - testHelper.simulateData(kubectlNamespace) - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('FetchDataClient') { - steps { - script { - try { - if(!doNotRunTests) { - testHelper.fetchDataClient("master") - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('RunTests') { - options { - timeout(time: 3, unit: 'HOURS') // timeout on this stage - } - steps { - script { - try { - if(!doNotRunTests) { - testHelper.soonToBeLegacyRunIntegrationTests( - kubectlNamespace, - pipeConfig.serviceTesting.name, - testedEnv, - isGen3Release, - isNightlyBuild, - listOfSelectedTests - ) - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - - stage('CleanS3') { - steps { - script { - try { - if(!doNotRunTests) { - testHelper.cleanS3(kubectlNamespace) - } else { - Utils.markStageSkippedForConditional(STAGE_NAME) - } - } catch (ex) { - metricsHelper.writeMetricWithResult(STAGE_NAME, false) - pipelineHelper.handleError(ex) - } - metricsHelper.writeMetricWithResult(STAGE_NAME, true) - } - } - } - stage('authzTest') { steps { script { @@ -667,4 +470,3 @@ spec: } } } - From f4b0287725be7d8b7b0b2bae6f32b92954251555 Mon Sep 17 00:00:00 2001 From: pieterlukasse Date: Mon, 2 Dec 2024 16:47:38 +0100 Subject: [PATCH 18/22] fix: use /auth/request instead of /auth/mapping Arborist endpoint (#2677) --- kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml index a5d0972eb..5122c5347 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml @@ -56,7 +56,7 @@ stringData: security_oauth_callback_urlResolver: query security_ohdsi_custom_authorization_mode: teamproject - security_ohdsi_custom_authorization_url: $ARBORIST_URL/auth/mapping + security_ohdsi_custom_authorization_url: $ARBORIST_URL/auth/request logging_level_root: info logging_level_org_ohdsi: info From a2ba4fc0260b492b92c4126969fbc57bc6401b7c Mon Sep 17 00:00:00 2001 From: Maribelle Hannah Gomez Date: Wed, 18 Dec 2024 13:06:02 -0600 Subject: [PATCH 19/22] Update Dockerfile env key=value format --- Docker/python-nginx/python3.10-buster/Dockerfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Docker/python-nginx/python3.10-buster/Dockerfile b/Docker/python-nginx/python3.10-buster/Dockerfile index 9da445160..8410e57ab 100644 --- a/Docker/python-nginx/python3.10-buster/Dockerfile +++ b/Docker/python-nginx/python3.10-buster/Dockerfile @@ -2,9 +2,9 @@ FROM quay.io/cdis/python:3.10-buster # https://github.com/nginxinc/docker-nginx/blob/f958fbacada447737319e979db45a1da49123142/mainline/debian/Dockerfile -ENV NGINX_VERSION 1.21.1 -ENV NJS_VERSION 0.6.1 -ENV PKG_RELEASE 1~buster +ENV NGINX_VERSION=1.21.1 +ENV NJS_VERSION=0.6.1 +ENV PKG_RELEASE=1~buster RUN set -x \ # create nginx user/group first, to be consistent throughout docker variants @@ -126,7 +126,7 @@ RUN apt-get update && apt-get install -y supervisor \ COPY supervisord.ini /etc/supervisor.d/supervisord.ini # Which uWSGI .ini file should be used, to make it customizable -ENV UWSGI_INI /app/uwsgi.ini +ENV UWSGI_INI=/app/uwsgi.ini # By default, disable uwsgi cheaper mode and run 2 processes. # If UWSGI_CHEAPER=N and UWSGI_PROCESSES=M, N is the min and M is the max @@ -138,16 +138,16 @@ ENV UWSGI_PROCESSES=2 # By default, allow unlimited file sizes, modify it to limit the file sizes # To have a maximum of 1 MB (Nginx's default) change the line to: # ENV NGINX_MAX_UPLOAD 1m -ENV NGINX_MAX_UPLOAD 0 +ENV NGINX_MAX_UPLOAD=0 # By default, Nginx will run a single worker process, setting it to auto # will create a worker for each CPU core -ENV NGINX_WORKER_PROCESSES 1 +ENV NGINX_WORKER_PROCESSES=1 # By default, Nginx listens on port 80. # To modify this, change LISTEN_PORT environment variable. # (in a Dockerfile or with an option for `docker run`) -ENV LISTEN_PORT 80 +ENV LISTEN_PORT=80 # Copy the entrypoint that will generate Nginx additional configs COPY entrypoint.sh /entrypoint.sh @@ -165,4 +165,4 @@ ENTRYPOINT ["sh", "/entrypoint.sh"] COPY ./app /app WORKDIR /app -CMD ["/usr/bin/supervisord"] \ No newline at end of file +CMD ["/usr/bin/supervisord"] From 4c445dd5acf943b29cbf358bcb0170a365b60f84 Mon Sep 17 00:00:00 2001 From: Maribelle Hannah Gomez Date: Wed, 18 Dec 2024 13:10:51 -0600 Subject: [PATCH 20/22] Update Dockerfile --- Docker/awshelper/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index f3dd7b60e..7e08d0957 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -126,4 +126,4 @@ RUN git config --global user.email gen3 \ RUN export GEN3_HOME="$HOME/cloud-automation" \ && bash -c 'source "$GEN3_HOME/gen3/gen3setup.sh" && gen3 help' -CMD /bin/bash +CMD ["/bin/bash"] From a9db2d711c16ec42aa4eccad1a06153e00779c83 Mon Sep 17 00:00:00 2001 From: BinamB Date: Tue, 7 Jan 2025 14:54:01 -0600 Subject: [PATCH 21/22] Revert "Update Dockerfile" This reverts commit 4c445dd5acf943b29cbf358bcb0170a365b60f84. --- Docker/awshelper/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index 7e08d0957..f3dd7b60e 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -126,4 +126,4 @@ RUN git config --global user.email gen3 \ RUN export GEN3_HOME="$HOME/cloud-automation" \ && bash -c 'source "$GEN3_HOME/gen3/gen3setup.sh" && gen3 help' -CMD ["/bin/bash"] +CMD /bin/bash From e1bd5f095503e97b4ff07d82ec4a9f667f21dff8 Mon Sep 17 00:00:00 2001 From: BinamB Date: Tue, 7 Jan 2025 14:54:05 -0600 Subject: [PATCH 22/22] Revert "Update Dockerfile env key=value format" This reverts commit a2ba4fc0260b492b92c4126969fbc57bc6401b7c. --- Docker/python-nginx/python3.10-buster/Dockerfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Docker/python-nginx/python3.10-buster/Dockerfile b/Docker/python-nginx/python3.10-buster/Dockerfile index 8410e57ab..9da445160 100644 --- a/Docker/python-nginx/python3.10-buster/Dockerfile +++ b/Docker/python-nginx/python3.10-buster/Dockerfile @@ -2,9 +2,9 @@ FROM quay.io/cdis/python:3.10-buster # https://github.com/nginxinc/docker-nginx/blob/f958fbacada447737319e979db45a1da49123142/mainline/debian/Dockerfile -ENV NGINX_VERSION=1.21.1 -ENV NJS_VERSION=0.6.1 -ENV PKG_RELEASE=1~buster +ENV NGINX_VERSION 1.21.1 +ENV NJS_VERSION 0.6.1 +ENV PKG_RELEASE 1~buster RUN set -x \ # create nginx user/group first, to be consistent throughout docker variants @@ -126,7 +126,7 @@ RUN apt-get update && apt-get install -y supervisor \ COPY supervisord.ini /etc/supervisor.d/supervisord.ini # Which uWSGI .ini file should be used, to make it customizable -ENV UWSGI_INI=/app/uwsgi.ini +ENV UWSGI_INI /app/uwsgi.ini # By default, disable uwsgi cheaper mode and run 2 processes. # If UWSGI_CHEAPER=N and UWSGI_PROCESSES=M, N is the min and M is the max @@ -138,16 +138,16 @@ ENV UWSGI_PROCESSES=2 # By default, allow unlimited file sizes, modify it to limit the file sizes # To have a maximum of 1 MB (Nginx's default) change the line to: # ENV NGINX_MAX_UPLOAD 1m -ENV NGINX_MAX_UPLOAD=0 +ENV NGINX_MAX_UPLOAD 0 # By default, Nginx will run a single worker process, setting it to auto # will create a worker for each CPU core -ENV NGINX_WORKER_PROCESSES=1 +ENV NGINX_WORKER_PROCESSES 1 # By default, Nginx listens on port 80. # To modify this, change LISTEN_PORT environment variable. # (in a Dockerfile or with an option for `docker run`) -ENV LISTEN_PORT=80 +ENV LISTEN_PORT 80 # Copy the entrypoint that will generate Nginx additional configs COPY entrypoint.sh /entrypoint.sh @@ -165,4 +165,4 @@ ENTRYPOINT ["sh", "/entrypoint.sh"] COPY ./app /app WORKDIR /app -CMD ["/usr/bin/supervisord"] +CMD ["/usr/bin/supervisord"] \ No newline at end of file