From 46f2302fa6d95dc2574bf8d2ad7b93868c162f32 Mon Sep 17 00:00:00 2001 From: borg-z Date: Mon, 5 Aug 2024 14:05:48 +0300 Subject: [PATCH 1/6] vcd e2e workflow fix Signed-off-by: borg-z --- .github/1 | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 .github/1 diff --git a/.github/1 b/.github/1 new file mode 100644 index 0000000000..e69de29bb2 From c0840dff92c609cc2760de0add07a185d1fd1486 Mon Sep 17 00:00:00 2001 From: borg-z Date: Mon, 5 Aug 2024 16:35:45 +0300 Subject: [PATCH 2/6] renaming Signed-off-by: borg-z --- .github/scripts/js/constants.js | 2 +- .github/workflow_templates/e2e.multi.yml | 2 +- .github/workflows/e2e-vcd.yml | 3038 ++++++++++++++++++++++ 3 files changed, 3040 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/e2e-vcd.yml diff --git a/.github/scripts/js/constants.js b/.github/scripts/js/constants.js index e6ef4ea960..ab7610e7e6 100644 --- a/.github/scripts/js/constants.js +++ b/.github/scripts/js/constants.js @@ -34,7 +34,7 @@ const labels = { 'e2e/run/gcp': { type: 'e2e-run', provider: 'gcp' }, 'e2e/run/openstack': { type: 'e2e-run', provider: 'openstack' }, 'e2e/run/vsphere': { type: 'e2e-run', provider: 'vsphere' }, - 'e2e/run/vcd': { type: 'e2e-run', provider: 'vclouddirector' }, + 'e2e/run/vcd': { type: 'e2e-run', provider: 'vcd' }, 'e2e/run/yandex-cloud': { type: 'e2e-run', provider: 'yandex-cloud' }, 'e2e/run/static': { type: 'e2e-run', provider: 'static' }, diff --git a/.github/workflow_templates/e2e.multi.yml b/.github/workflow_templates/e2e.multi.yml index 70d0a6cc9e..7b60c96f86 100644 --- a/.github/workflow_templates/e2e.multi.yml +++ b/.github/workflow_templates/e2e.multi.yml @@ -32,7 +32,7 @@ $CI_COMMIT_REF_SLUG is a tag of published deckhouse images. It has a form */}!} -{!{- $providerNames := slice "AWS" "Azure" "GCP" "Yandex.Cloud" "OpenStack" "vSphere" "vCloudDirector" "Static" "EKS" -}!} +{!{- $providerNames := slice "AWS" "Azure" "GCP" "Yandex.Cloud" "OpenStack" "vSphere" "VCD" "Static" "EKS" -}!} {!{- $criNames := slice "Containerd" -}!} {!{- $kubernetesVersions := slice "1.26" "1.27" "1.28" "1.29" "1.30" "Automatic" -}!} diff --git a/.github/workflows/e2e-vcd.yml b/.github/workflows/e2e-vcd.yml new file mode 100644 index 0000000000..d3dee71454 --- /dev/null +++ b/.github/workflows/e2e-vcd.yml @@ -0,0 +1,3038 @@ +# +# THIS FILE IS GENERATED, PLEASE DO NOT EDIT. +# + +# Copyright 2022 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +name: 'e2e: VCD' +on: + workflow_dispatch: + inputs: + issue_id: + description: 'ID of issue where label was set' + required: false + issue_number: + description: 'Number of issue where label was set' + required: false + comment_id: + description: 'ID of comment in issue where to put workflow run status' + required: false + ci_commit_ref_name: + description: 'Git ref name for image tags' + required: false + pull_request_ref: + description: 'Git ref for checkout PR sources' + required: false + pull_request_sha: + description: 'Git SHA for restoring artifacts from cache' + required: false + pull_request_head_label: + description: 'Head label of pull request. e.g. my_repo:my_feature_branch' + required: false + cri: + description: 'A comma-separated list of cri to test. Available: Containerd.' + required: false + ver: + description: 'A comma-separated list of versions to test. Available: from 1.24 to 1.28.' + required: false + initial_ref_slug: + description: 'An image tag to install first and then switch to workflow context ref' + required: false +env: + + # + WERF_CHANNEL: "ea" + WERF_ENV: "FE" + TEST_TIMEOUT: "15m" + # Use fixed string 'sys/deckhouse-oss' for repo name. ${CI_PROJECT_PATH} is not available here in GitHub. + DEV_REGISTRY_PATH: "${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/sys/deckhouse-oss" + # Registry for additional repositories used for testing Github Actions workflows. + GHA_TEST_REGISTRY_PATH: "ghcr.io/${{ github.repository }}" + # + +# Note: no concurrency section for e2e workflows. +# Usually you run e2e and wait until it ends. + +jobs: + started_at: + name: Save start timestamp + outputs: + started_at: ${{ steps.started_at.outputs.started_at }} + runs-on: "ubuntu-latest" + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + + # + + git_info: + name: Get git info + runs-on: ubuntu-latest + outputs: + ci_commit_tag: ${{ steps.git_info.outputs.ci_commit_tag }} + ci_commit_branch: ${{ steps.git_info.outputs.ci_commit_branch }} + ci_commit_ref_name: ${{ steps.git_info.outputs.ci_commit_ref_name }} + ci_commit_ref_slug: ${{ steps.git_info.outputs.ci_commit_ref_slug }} + ref_full: ${{ steps.git_info.outputs.ref_full }} + github_sha: ${{ steps.git_info.outputs.github_sha }} + pr_number: ${{ steps.git_info.outputs.pr_number }} + # Skip the CI for automation PRs, e.g. changelog + if: ${{ github.event.pull_request.user.login != 'deckhouse-BOaTswain' }} + steps: + - id: git_info + name: Get tag name and SHA + uses: actions/github-script@v6.4.1 + with: + script: | + const { GITHUB_REF_TYPE, GITHUB_REF_NAME, GITHUB_REF } = process.env + + let refSlug = '' + let refName = '' + let refFull = '' + let githubBranch = '' + let githubTag = '' + let githubSHA = '' + let prNumber = '' + if (context.eventName === "workflow_dispatch" && context.payload.inputs && context.payload.inputs.pull_request_ref) { + // Trigger: workflow_dispatch with pull_request_ref. + // Extract pull request number from 'refs/pull//merge' + prNumber = context.payload.inputs.pull_request_ref.replace('refs/pull/', '').replace('/merge', '').replace('/head', '') + + refSlug = `pr${prNumber}` + refName = context.payload.inputs.ci_commit_ref_name + refFull = context.payload.inputs.pull_request_ref + githubBranch = refName + githubSHA = context.payload.inputs.pull_request_sha + core.info(`workflow_dispatch event: set git info from inputs. inputs: ${JSON.stringify(context.payload.inputs)}`) + } else if (context.eventName === "pull_request" || context.eventName === "pull_request_target" ) { + // For PRs from forks, tag images with `prXXX` to avoid clashes between branches. + const targetRepo = context.payload.repository.full_name; + const prRepo = context.payload.pull_request.head.repo.full_name + const prRef = context.payload.pull_request.head.ref + + refSlug = `pr${context.issue.number}`; + refName = (prRepo === targetRepo) ? prRef : refSlug; + refFull = `refs/pull/${context.issue.number}/head` + githubBranch = refName + githubSHA = context.payload.pull_request.head.sha + core.info(`pull request event: set git info from pull_request.head. pr:${prRepo}:${prRef} target:${targetRepo}:${context.ref}`) + prNumber = context.issue.number + } else { + // Other triggers: workflow_dispatch without pull_request_ref, schedule, push... + // refName is 'main' or tag name, so slugification is not necessary. + refSlug = GITHUB_REF_NAME + refName = GITHUB_REF_NAME + refFull = GITHUB_REF + githubTag = GITHUB_REF_TYPE == "tag" ? refName : "" + githubBranch = GITHUB_REF_TYPE == "branch" ? refName : "" + githubSHA = context.sha + core.info(`${context.eventName} event: set git info from context: ${JSON.stringify({GITHUB_REF_NAME, GITHUB_REF_TYPE, sha: context.sha })}`) + } + + core.setCommandEcho(true) + core.setOutput('ci_commit_ref_slug', refSlug) + core.setOutput('ci_commit_ref_name', refName) + core.setOutput(`ci_commit_tag`, githubTag) + core.setOutput(`ci_commit_branch`, githubBranch) + core.setOutput(`ref_full`, refFull) + core.setOutput('github_sha', githubSHA) + core.setOutput('pr_number', prNumber) + core.setCommandEcho(false) + + # + + # + check_e2e_labels: + name: Check e2e labels + runs-on: ubuntu-latest + outputs: + + run_containerd_1_26: ${{ steps.check.outputs.run_containerd_1_26 }} + run_containerd_1_27: ${{ steps.check.outputs.run_containerd_1_27 }} + run_containerd_1_28: ${{ steps.check.outputs.run_containerd_1_28 }} + run_containerd_1_29: ${{ steps.check.outputs.run_containerd_1_29 }} + run_containerd_1_30: ${{ steps.check.outputs.run_containerd_1_30 }} + run_containerd_automatic: ${{ steps.check.outputs.run_containerd_automatic }} + steps: + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + + # + - name: Check e2e labels + id: check + uses: actions/github-script@v6.4.1 + with: + script: | + const provider = 'vcd'; + const kubernetesDefaultVersion = '1.27'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.checkE2ELabels({github, context, core, provider, kubernetesDefaultVersion}); + # + + + # + run_containerd_1_26: + name: "e2e: VCD, Containerd, Kubernetes 1.26" + needs: + - check_e2e_labels + - git_info + if: needs.check_e2e_labels.outputs.run_containerd_1_26 == 'true' + outputs: + ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} + ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} + run_id: ${{ github.run_id }} + # need for find state in artifact + cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} + ran_for: "vcd;WithoutNAT;containerd;1.26" + failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} + issue_number: ${{ inputs.issue_number }} + install_image_path: ${{ steps.setup.outputs.install-image-path }} + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.26" + EVENT_LABEL: ${{ github.event.label.name }} + runs-on: [self-hosted, e2e-common] + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + with: + ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} + fetch-depth: 0 + # + # + - name: Update comment on start + if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const name = 'e2e: VCD, Containerd, Kubernetes 1.26'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnStart({github, context, core, name}) + + # + + + # + - name: Check dev registry credentials + id: check_dev_registry + env: + HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to dev registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Check rw registry credentials + id: check_rw_registry + env: + HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to rw registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} + logout: false + - name: Login to Github Container Registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} + with: + registry: ghcr.io + username: ${{ secrets.GHCR_IO_REGISTRY_USER }} + password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Install werf CLI + uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e + with: + channel: ${{env.WERF_CHANNEL}} + # + + - name: Setup + id: setup + env: + DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} + CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} + CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} + REF_FULL: ${{needs.git_info.outputs.ref_full}} + INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} + MANUAL_RUN: "true" + run: | + # Calculate unique prefix for e2e test. + # GITHUB_RUN_ID is a unique number for each workflow run. + # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. + # Add CRI and KUBERNETES_VERSION to create unique directory for each job. + # CRI and PROVIDER values are trimmed to reduce prefix length. + if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then + KUBERNETES_VERSION_SUF="auto" + else + KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} + fi + DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") + if [[ "${MANUAL_RUN}" == "false" ]] ; then + # for jobs which run multiple providers concurrency (daily e2e, for example) + # add provider suffix to prevent "directory already exists" error + DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" + fi + # converts to DNS-like (all letters in lower case and replace all dots to dash) + # because it prefix will use for k8s resources names (nodes, for example) + DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') + + # Create tmppath for test script. + TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} + if [[ -d "${TMP_DIR_PATH}" ]] ; then + echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" + ls -la ${TMP_DIR_PATH} + exit 1 + else + echo "Create temporary dir for job: ${TMP_DIR_PATH}." + mkdir -p "${TMP_DIR_PATH}" + fi + + ## Source: ci_templates/build.yml + + # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. + REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} + if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then + # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. + REPO_SUFFIX= + fi + + # Use dev-registry for Git branches. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + # Use rw-registry for Git tags. + SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" + + if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then + # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. + # Use dev-regisry for branches and Github Container Registry for semver tags. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" + fi + + # Prepare initial image tag for deploy/deckhouse to test switching from previous release. + INITIAL_IMAGE_TAG= + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + fi + + # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + # Use it as image tag. Add suffix to not overlap with PRs in main repo. + IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + + INSTALL_IMAGE_NAME= + if [[ -n ${CI_COMMIT_BRANCH} ]]; then + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} + fi + if [[ -n ${CI_COMMIT_TAG} ]] ; then + REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe + INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} + fi + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} + git fetch origin ${INITIAL_REF_SLUG} + git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts + fi + SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') + echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" + + # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. + echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." + docker pull "${INSTALL_IMAGE_NAME}" + + IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" + + echo '::echo::on' + echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT + echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT + echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT + echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT + echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT + + echo '::echo::off' + + - name: "Run e2e test: VCD/Containerd/1.26" + id: e2e_test_run + timeout-minutes: 80 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.26" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh run-test' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + echo "Start waiting ssh connection string script" + comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" + echo "Full comment url for updating ${comment_url}" + + ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT + + bastion_ip_file="" + if [[ "${PROVIDER}" == "Static" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + fi + + echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT + + $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & + + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh run-test + + # + - name: Read connection string + if: ${{ failure() || cancelled() }} + id: check_stay_failed_cluster + uses: actions/github-script@v6.4.1 + env: + SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} + SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} + with: + # it sets `should_run` output var if e2e/failed/stay label + script: | + const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); + await e2e_cleanup.readConnectionScript({core, context, github}); + + - name: Label pr if e2e failed + if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} + number: ${{ needs.git_info.outputs.pr_number }} + labels: "e2e/cluster/failed" + + - name: Cleanup bootstrapped cluster + if: success() + id: cleanup_cluster + timeout-minutes: 60 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.26" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh cleanup' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh cleanup + + # + + - name: Save dhctl state + id: save_failed_cluster_state + if: ${{ failure() }} + uses: actions/upload-artifact@v3.1.2 + with: + name: failed_cluster_state_vcd_containerd_1_26 + path: | + ${{ steps.setup.outputs.tmp-dir-path}}/dhctl + ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate + ${{ steps.setup.outputs.tmp-dir-path}}/logs + + - name: Save test results + if: ${{ steps.setup.outputs.dhctl-log-file }} + uses: actions/upload-artifact@v3.1.2 + with: + name: test_output_vcd_containerd_1_26 + path: | + ${{ steps.setup.outputs.dhctl-log-file}}* + ${{ steps.setup.outputs.tmp-dir-path}}/logs + testing/cloud_layouts/ + !testing/cloud_layouts/**/sshkey + + - name: Cleanup temp directory + if: always() + env: + TMPPATH: ${{ steps.setup.outputs.tmppath}} + run: | + echo "Remove temporary directory '${TMPPATH}' ..." + if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then + rm -rf "${TMPPATH}" + else + echo Not a directory. + fi + if [ -n $USER_RUNNER_ID ]; then + echo "Fix temp directories owner..." + chown -R $USER_RUNNER_ID "$(pwd)/testing" || true + chown -R $USER_RUNNER_ID "/deckhouse/testing" || true + chown -R $USER_RUNNER_ID /tmp || true + else + echo "Fix temp directories permissions..." + chmod -f -R 777 "$(pwd)/testing" || true + chmod -f -R 777 "/deckhouse/testing" || true + chmod -f -R 777 /tmp || true + fi + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'job,separate'; + const name = 'e2e: VCD, Containerd, Kubernetes 1.26'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + # + + # + run_containerd_1_27: + name: "e2e: VCD, Containerd, Kubernetes 1.27" + needs: + - check_e2e_labels + - git_info + if: needs.check_e2e_labels.outputs.run_containerd_1_27 == 'true' + outputs: + ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} + ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} + run_id: ${{ github.run_id }} + # need for find state in artifact + cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} + ran_for: "vcd;WithoutNAT;containerd;1.27" + failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} + issue_number: ${{ inputs.issue_number }} + install_image_path: ${{ steps.setup.outputs.install-image-path }} + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.27" + EVENT_LABEL: ${{ github.event.label.name }} + runs-on: [self-hosted, e2e-common] + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + with: + ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} + fetch-depth: 0 + # + # + - name: Update comment on start + if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const name = 'e2e: VCD, Containerd, Kubernetes 1.27'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnStart({github, context, core, name}) + + # + + + # + - name: Check dev registry credentials + id: check_dev_registry + env: + HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to dev registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Check rw registry credentials + id: check_rw_registry + env: + HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to rw registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} + logout: false + - name: Login to Github Container Registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} + with: + registry: ghcr.io + username: ${{ secrets.GHCR_IO_REGISTRY_USER }} + password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Install werf CLI + uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e + with: + channel: ${{env.WERF_CHANNEL}} + # + + - name: Setup + id: setup + env: + DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} + CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} + CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} + REF_FULL: ${{needs.git_info.outputs.ref_full}} + INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} + MANUAL_RUN: "true" + run: | + # Calculate unique prefix for e2e test. + # GITHUB_RUN_ID is a unique number for each workflow run. + # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. + # Add CRI and KUBERNETES_VERSION to create unique directory for each job. + # CRI and PROVIDER values are trimmed to reduce prefix length. + if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then + KUBERNETES_VERSION_SUF="auto" + else + KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} + fi + DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") + if [[ "${MANUAL_RUN}" == "false" ]] ; then + # for jobs which run multiple providers concurrency (daily e2e, for example) + # add provider suffix to prevent "directory already exists" error + DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" + fi + # converts to DNS-like (all letters in lower case and replace all dots to dash) + # because it prefix will use for k8s resources names (nodes, for example) + DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') + + # Create tmppath for test script. + TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} + if [[ -d "${TMP_DIR_PATH}" ]] ; then + echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" + ls -la ${TMP_DIR_PATH} + exit 1 + else + echo "Create temporary dir for job: ${TMP_DIR_PATH}." + mkdir -p "${TMP_DIR_PATH}" + fi + + ## Source: ci_templates/build.yml + + # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. + REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} + if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then + # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. + REPO_SUFFIX= + fi + + # Use dev-registry for Git branches. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + # Use rw-registry for Git tags. + SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" + + if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then + # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. + # Use dev-regisry for branches and Github Container Registry for semver tags. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" + fi + + # Prepare initial image tag for deploy/deckhouse to test switching from previous release. + INITIAL_IMAGE_TAG= + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + fi + + # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + # Use it as image tag. Add suffix to not overlap with PRs in main repo. + IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + + INSTALL_IMAGE_NAME= + if [[ -n ${CI_COMMIT_BRANCH} ]]; then + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} + fi + if [[ -n ${CI_COMMIT_TAG} ]] ; then + REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe + INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} + fi + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} + git fetch origin ${INITIAL_REF_SLUG} + git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts + fi + SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') + echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" + + # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. + echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." + docker pull "${INSTALL_IMAGE_NAME}" + + IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" + + echo '::echo::on' + echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT + echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT + echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT + echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT + echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT + + echo '::echo::off' + + - name: "Run e2e test: VCD/Containerd/1.27" + id: e2e_test_run + timeout-minutes: 80 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.27" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh run-test' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + echo "Start waiting ssh connection string script" + comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" + echo "Full comment url for updating ${comment_url}" + + ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT + + bastion_ip_file="" + if [[ "${PROVIDER}" == "Static" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + fi + + echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT + + $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & + + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh run-test + + # + - name: Read connection string + if: ${{ failure() || cancelled() }} + id: check_stay_failed_cluster + uses: actions/github-script@v6.4.1 + env: + SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} + SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} + with: + # it sets `should_run` output var if e2e/failed/stay label + script: | + const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); + await e2e_cleanup.readConnectionScript({core, context, github}); + + - name: Label pr if e2e failed + if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} + number: ${{ needs.git_info.outputs.pr_number }} + labels: "e2e/cluster/failed" + + - name: Cleanup bootstrapped cluster + if: success() + id: cleanup_cluster + timeout-minutes: 60 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.27" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh cleanup' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh cleanup + + # + + - name: Save dhctl state + id: save_failed_cluster_state + if: ${{ failure() }} + uses: actions/upload-artifact@v3.1.2 + with: + name: failed_cluster_state_vcd_containerd_1_27 + path: | + ${{ steps.setup.outputs.tmp-dir-path}}/dhctl + ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate + ${{ steps.setup.outputs.tmp-dir-path}}/logs + + - name: Save test results + if: ${{ steps.setup.outputs.dhctl-log-file }} + uses: actions/upload-artifact@v3.1.2 + with: + name: test_output_vcd_containerd_1_27 + path: | + ${{ steps.setup.outputs.dhctl-log-file}}* + ${{ steps.setup.outputs.tmp-dir-path}}/logs + testing/cloud_layouts/ + !testing/cloud_layouts/**/sshkey + + - name: Cleanup temp directory + if: always() + env: + TMPPATH: ${{ steps.setup.outputs.tmppath}} + run: | + echo "Remove temporary directory '${TMPPATH}' ..." + if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then + rm -rf "${TMPPATH}" + else + echo Not a directory. + fi + if [ -n $USER_RUNNER_ID ]; then + echo "Fix temp directories owner..." + chown -R $USER_RUNNER_ID "$(pwd)/testing" || true + chown -R $USER_RUNNER_ID "/deckhouse/testing" || true + chown -R $USER_RUNNER_ID /tmp || true + else + echo "Fix temp directories permissions..." + chmod -f -R 777 "$(pwd)/testing" || true + chmod -f -R 777 "/deckhouse/testing" || true + chmod -f -R 777 /tmp || true + fi + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'job,separate'; + const name = 'e2e: VCD, Containerd, Kubernetes 1.27'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + # + + # + run_containerd_1_28: + name: "e2e: VCD, Containerd, Kubernetes 1.28" + needs: + - check_e2e_labels + - git_info + if: needs.check_e2e_labels.outputs.run_containerd_1_28 == 'true' + outputs: + ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} + ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} + run_id: ${{ github.run_id }} + # need for find state in artifact + cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} + ran_for: "vcd;WithoutNAT;containerd;1.28" + failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} + issue_number: ${{ inputs.issue_number }} + install_image_path: ${{ steps.setup.outputs.install-image-path }} + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.28" + EVENT_LABEL: ${{ github.event.label.name }} + runs-on: [self-hosted, e2e-common] + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + with: + ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} + fetch-depth: 0 + # + # + - name: Update comment on start + if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const name = 'e2e: VCD, Containerd, Kubernetes 1.28'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnStart({github, context, core, name}) + + # + + + # + - name: Check dev registry credentials + id: check_dev_registry + env: + HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to dev registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Check rw registry credentials + id: check_rw_registry + env: + HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to rw registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} + logout: false + - name: Login to Github Container Registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} + with: + registry: ghcr.io + username: ${{ secrets.GHCR_IO_REGISTRY_USER }} + password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Install werf CLI + uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e + with: + channel: ${{env.WERF_CHANNEL}} + # + + - name: Setup + id: setup + env: + DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} + CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} + CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} + REF_FULL: ${{needs.git_info.outputs.ref_full}} + INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} + MANUAL_RUN: "true" + run: | + # Calculate unique prefix for e2e test. + # GITHUB_RUN_ID is a unique number for each workflow run. + # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. + # Add CRI and KUBERNETES_VERSION to create unique directory for each job. + # CRI and PROVIDER values are trimmed to reduce prefix length. + if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then + KUBERNETES_VERSION_SUF="auto" + else + KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} + fi + DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") + if [[ "${MANUAL_RUN}" == "false" ]] ; then + # for jobs which run multiple providers concurrency (daily e2e, for example) + # add provider suffix to prevent "directory already exists" error + DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" + fi + # converts to DNS-like (all letters in lower case and replace all dots to dash) + # because it prefix will use for k8s resources names (nodes, for example) + DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') + + # Create tmppath for test script. + TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} + if [[ -d "${TMP_DIR_PATH}" ]] ; then + echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" + ls -la ${TMP_DIR_PATH} + exit 1 + else + echo "Create temporary dir for job: ${TMP_DIR_PATH}." + mkdir -p "${TMP_DIR_PATH}" + fi + + ## Source: ci_templates/build.yml + + # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. + REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} + if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then + # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. + REPO_SUFFIX= + fi + + # Use dev-registry for Git branches. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + # Use rw-registry for Git tags. + SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" + + if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then + # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. + # Use dev-regisry for branches and Github Container Registry for semver tags. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" + fi + + # Prepare initial image tag for deploy/deckhouse to test switching from previous release. + INITIAL_IMAGE_TAG= + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + fi + + # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + # Use it as image tag. Add suffix to not overlap with PRs in main repo. + IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + + INSTALL_IMAGE_NAME= + if [[ -n ${CI_COMMIT_BRANCH} ]]; then + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} + fi + if [[ -n ${CI_COMMIT_TAG} ]] ; then + REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe + INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} + fi + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} + git fetch origin ${INITIAL_REF_SLUG} + git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts + fi + SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') + echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" + + # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. + echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." + docker pull "${INSTALL_IMAGE_NAME}" + + IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" + + echo '::echo::on' + echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT + echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT + echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT + echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT + echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT + + echo '::echo::off' + + - name: "Run e2e test: VCD/Containerd/1.28" + id: e2e_test_run + timeout-minutes: 80 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.28" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh run-test' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + echo "Start waiting ssh connection string script" + comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" + echo "Full comment url for updating ${comment_url}" + + ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT + + bastion_ip_file="" + if [[ "${PROVIDER}" == "Static" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + fi + + echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT + + $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & + + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh run-test + + # + - name: Read connection string + if: ${{ failure() || cancelled() }} + id: check_stay_failed_cluster + uses: actions/github-script@v6.4.1 + env: + SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} + SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} + with: + # it sets `should_run` output var if e2e/failed/stay label + script: | + const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); + await e2e_cleanup.readConnectionScript({core, context, github}); + + - name: Label pr if e2e failed + if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} + number: ${{ needs.git_info.outputs.pr_number }} + labels: "e2e/cluster/failed" + + - name: Cleanup bootstrapped cluster + if: success() + id: cleanup_cluster + timeout-minutes: 60 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.28" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh cleanup' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh cleanup + + # + + - name: Save dhctl state + id: save_failed_cluster_state + if: ${{ failure() }} + uses: actions/upload-artifact@v3.1.2 + with: + name: failed_cluster_state_vcd_containerd_1_28 + path: | + ${{ steps.setup.outputs.tmp-dir-path}}/dhctl + ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate + ${{ steps.setup.outputs.tmp-dir-path}}/logs + + - name: Save test results + if: ${{ steps.setup.outputs.dhctl-log-file }} + uses: actions/upload-artifact@v3.1.2 + with: + name: test_output_vcd_containerd_1_28 + path: | + ${{ steps.setup.outputs.dhctl-log-file}}* + ${{ steps.setup.outputs.tmp-dir-path}}/logs + testing/cloud_layouts/ + !testing/cloud_layouts/**/sshkey + + - name: Cleanup temp directory + if: always() + env: + TMPPATH: ${{ steps.setup.outputs.tmppath}} + run: | + echo "Remove temporary directory '${TMPPATH}' ..." + if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then + rm -rf "${TMPPATH}" + else + echo Not a directory. + fi + if [ -n $USER_RUNNER_ID ]; then + echo "Fix temp directories owner..." + chown -R $USER_RUNNER_ID "$(pwd)/testing" || true + chown -R $USER_RUNNER_ID "/deckhouse/testing" || true + chown -R $USER_RUNNER_ID /tmp || true + else + echo "Fix temp directories permissions..." + chmod -f -R 777 "$(pwd)/testing" || true + chmod -f -R 777 "/deckhouse/testing" || true + chmod -f -R 777 /tmp || true + fi + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'job,separate'; + const name = 'e2e: VCD, Containerd, Kubernetes 1.28'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + # + + # + run_containerd_1_29: + name: "e2e: VCD, Containerd, Kubernetes 1.29" + needs: + - check_e2e_labels + - git_info + if: needs.check_e2e_labels.outputs.run_containerd_1_29 == 'true' + outputs: + ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} + ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} + run_id: ${{ github.run_id }} + # need for find state in artifact + cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} + ran_for: "vcd;WithoutNAT;containerd;1.29" + failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} + issue_number: ${{ inputs.issue_number }} + install_image_path: ${{ steps.setup.outputs.install-image-path }} + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.29" + EVENT_LABEL: ${{ github.event.label.name }} + runs-on: [self-hosted, e2e-common] + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + with: + ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} + fetch-depth: 0 + # + # + - name: Update comment on start + if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const name = 'e2e: VCD, Containerd, Kubernetes 1.29'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnStart({github, context, core, name}) + + # + + + # + - name: Check dev registry credentials + id: check_dev_registry + env: + HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to dev registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Check rw registry credentials + id: check_rw_registry + env: + HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to rw registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} + logout: false + - name: Login to Github Container Registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} + with: + registry: ghcr.io + username: ${{ secrets.GHCR_IO_REGISTRY_USER }} + password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Install werf CLI + uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e + with: + channel: ${{env.WERF_CHANNEL}} + # + + - name: Setup + id: setup + env: + DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} + CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} + CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} + REF_FULL: ${{needs.git_info.outputs.ref_full}} + INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} + MANUAL_RUN: "true" + run: | + # Calculate unique prefix for e2e test. + # GITHUB_RUN_ID is a unique number for each workflow run. + # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. + # Add CRI and KUBERNETES_VERSION to create unique directory for each job. + # CRI and PROVIDER values are trimmed to reduce prefix length. + if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then + KUBERNETES_VERSION_SUF="auto" + else + KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} + fi + DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") + if [[ "${MANUAL_RUN}" == "false" ]] ; then + # for jobs which run multiple providers concurrency (daily e2e, for example) + # add provider suffix to prevent "directory already exists" error + DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" + fi + # converts to DNS-like (all letters in lower case and replace all dots to dash) + # because it prefix will use for k8s resources names (nodes, for example) + DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') + + # Create tmppath for test script. + TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} + if [[ -d "${TMP_DIR_PATH}" ]] ; then + echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" + ls -la ${TMP_DIR_PATH} + exit 1 + else + echo "Create temporary dir for job: ${TMP_DIR_PATH}." + mkdir -p "${TMP_DIR_PATH}" + fi + + ## Source: ci_templates/build.yml + + # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. + REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} + if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then + # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. + REPO_SUFFIX= + fi + + # Use dev-registry for Git branches. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + # Use rw-registry for Git tags. + SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" + + if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then + # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. + # Use dev-regisry for branches and Github Container Registry for semver tags. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" + fi + + # Prepare initial image tag for deploy/deckhouse to test switching from previous release. + INITIAL_IMAGE_TAG= + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + fi + + # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + # Use it as image tag. Add suffix to not overlap with PRs in main repo. + IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + + INSTALL_IMAGE_NAME= + if [[ -n ${CI_COMMIT_BRANCH} ]]; then + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} + fi + if [[ -n ${CI_COMMIT_TAG} ]] ; then + REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe + INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} + fi + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} + git fetch origin ${INITIAL_REF_SLUG} + git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts + fi + SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') + echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" + + # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. + echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." + docker pull "${INSTALL_IMAGE_NAME}" + + IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" + + echo '::echo::on' + echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT + echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT + echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT + echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT + echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT + + echo '::echo::off' + + - name: "Run e2e test: VCD/Containerd/1.29" + id: e2e_test_run + timeout-minutes: 80 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.29" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh run-test' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + echo "Start waiting ssh connection string script" + comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" + echo "Full comment url for updating ${comment_url}" + + ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT + + bastion_ip_file="" + if [[ "${PROVIDER}" == "Static" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + fi + + echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT + + $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & + + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh run-test + + # + - name: Read connection string + if: ${{ failure() || cancelled() }} + id: check_stay_failed_cluster + uses: actions/github-script@v6.4.1 + env: + SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} + SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} + with: + # it sets `should_run` output var if e2e/failed/stay label + script: | + const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); + await e2e_cleanup.readConnectionScript({core, context, github}); + + - name: Label pr if e2e failed + if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} + number: ${{ needs.git_info.outputs.pr_number }} + labels: "e2e/cluster/failed" + + - name: Cleanup bootstrapped cluster + if: success() + id: cleanup_cluster + timeout-minutes: 60 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.29" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh cleanup' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh cleanup + + # + + - name: Save dhctl state + id: save_failed_cluster_state + if: ${{ failure() }} + uses: actions/upload-artifact@v3.1.2 + with: + name: failed_cluster_state_vcd_containerd_1_29 + path: | + ${{ steps.setup.outputs.tmp-dir-path}}/dhctl + ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate + ${{ steps.setup.outputs.tmp-dir-path}}/logs + + - name: Save test results + if: ${{ steps.setup.outputs.dhctl-log-file }} + uses: actions/upload-artifact@v3.1.2 + with: + name: test_output_vcd_containerd_1_29 + path: | + ${{ steps.setup.outputs.dhctl-log-file}}* + ${{ steps.setup.outputs.tmp-dir-path}}/logs + testing/cloud_layouts/ + !testing/cloud_layouts/**/sshkey + + - name: Cleanup temp directory + if: always() + env: + TMPPATH: ${{ steps.setup.outputs.tmppath}} + run: | + echo "Remove temporary directory '${TMPPATH}' ..." + if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then + rm -rf "${TMPPATH}" + else + echo Not a directory. + fi + if [ -n $USER_RUNNER_ID ]; then + echo "Fix temp directories owner..." + chown -R $USER_RUNNER_ID "$(pwd)/testing" || true + chown -R $USER_RUNNER_ID "/deckhouse/testing" || true + chown -R $USER_RUNNER_ID /tmp || true + else + echo "Fix temp directories permissions..." + chmod -f -R 777 "$(pwd)/testing" || true + chmod -f -R 777 "/deckhouse/testing" || true + chmod -f -R 777 /tmp || true + fi + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'job,separate'; + const name = 'e2e: VCD, Containerd, Kubernetes 1.29'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + # + + # + run_containerd_1_30: + name: "e2e: VCD, Containerd, Kubernetes 1.30" + needs: + - check_e2e_labels + - git_info + if: needs.check_e2e_labels.outputs.run_containerd_1_30 == 'true' + outputs: + ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} + ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} + run_id: ${{ github.run_id }} + # need for find state in artifact + cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} + ran_for: "vcd;WithoutNAT;containerd;1.30" + failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} + issue_number: ${{ inputs.issue_number }} + install_image_path: ${{ steps.setup.outputs.install-image-path }} + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.30" + EVENT_LABEL: ${{ github.event.label.name }} + runs-on: [self-hosted, e2e-common] + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + with: + ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} + fetch-depth: 0 + # + # + - name: Update comment on start + if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const name = 'e2e: VCD, Containerd, Kubernetes 1.30'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnStart({github, context, core, name}) + + # + + + # + - name: Check dev registry credentials + id: check_dev_registry + env: + HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to dev registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Check rw registry credentials + id: check_rw_registry + env: + HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to rw registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} + logout: false + - name: Login to Github Container Registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} + with: + registry: ghcr.io + username: ${{ secrets.GHCR_IO_REGISTRY_USER }} + password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Install werf CLI + uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e + with: + channel: ${{env.WERF_CHANNEL}} + # + + - name: Setup + id: setup + env: + DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} + CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} + CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} + REF_FULL: ${{needs.git_info.outputs.ref_full}} + INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} + MANUAL_RUN: "true" + run: | + # Calculate unique prefix for e2e test. + # GITHUB_RUN_ID is a unique number for each workflow run. + # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. + # Add CRI and KUBERNETES_VERSION to create unique directory for each job. + # CRI and PROVIDER values are trimmed to reduce prefix length. + if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then + KUBERNETES_VERSION_SUF="auto" + else + KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} + fi + DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") + if [[ "${MANUAL_RUN}" == "false" ]] ; then + # for jobs which run multiple providers concurrency (daily e2e, for example) + # add provider suffix to prevent "directory already exists" error + DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" + fi + # converts to DNS-like (all letters in lower case and replace all dots to dash) + # because it prefix will use for k8s resources names (nodes, for example) + DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') + + # Create tmppath for test script. + TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} + if [[ -d "${TMP_DIR_PATH}" ]] ; then + echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" + ls -la ${TMP_DIR_PATH} + exit 1 + else + echo "Create temporary dir for job: ${TMP_DIR_PATH}." + mkdir -p "${TMP_DIR_PATH}" + fi + + ## Source: ci_templates/build.yml + + # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. + REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} + if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then + # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. + REPO_SUFFIX= + fi + + # Use dev-registry for Git branches. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + # Use rw-registry for Git tags. + SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" + + if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then + # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. + # Use dev-regisry for branches and Github Container Registry for semver tags. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" + fi + + # Prepare initial image tag for deploy/deckhouse to test switching from previous release. + INITIAL_IMAGE_TAG= + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + fi + + # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + # Use it as image tag. Add suffix to not overlap with PRs in main repo. + IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + + INSTALL_IMAGE_NAME= + if [[ -n ${CI_COMMIT_BRANCH} ]]; then + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} + fi + if [[ -n ${CI_COMMIT_TAG} ]] ; then + REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe + INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} + fi + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} + git fetch origin ${INITIAL_REF_SLUG} + git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts + fi + SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') + echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" + + # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. + echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." + docker pull "${INSTALL_IMAGE_NAME}" + + IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" + + echo '::echo::on' + echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT + echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT + echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT + echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT + echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT + + echo '::echo::off' + + - name: "Run e2e test: VCD/Containerd/1.30" + id: e2e_test_run + timeout-minutes: 80 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.30" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh run-test' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + echo "Start waiting ssh connection string script" + comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" + echo "Full comment url for updating ${comment_url}" + + ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT + + bastion_ip_file="" + if [[ "${PROVIDER}" == "Static" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + fi + + echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT + + $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & + + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh run-test + + # + - name: Read connection string + if: ${{ failure() || cancelled() }} + id: check_stay_failed_cluster + uses: actions/github-script@v6.4.1 + env: + SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} + SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} + with: + # it sets `should_run` output var if e2e/failed/stay label + script: | + const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); + await e2e_cleanup.readConnectionScript({core, context, github}); + + - name: Label pr if e2e failed + if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} + number: ${{ needs.git_info.outputs.pr_number }} + labels: "e2e/cluster/failed" + + - name: Cleanup bootstrapped cluster + if: success() + id: cleanup_cluster + timeout-minutes: 60 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "1.30" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh cleanup' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh cleanup + + # + + - name: Save dhctl state + id: save_failed_cluster_state + if: ${{ failure() }} + uses: actions/upload-artifact@v3.1.2 + with: + name: failed_cluster_state_vcd_containerd_1_30 + path: | + ${{ steps.setup.outputs.tmp-dir-path}}/dhctl + ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate + ${{ steps.setup.outputs.tmp-dir-path}}/logs + + - name: Save test results + if: ${{ steps.setup.outputs.dhctl-log-file }} + uses: actions/upload-artifact@v3.1.2 + with: + name: test_output_vcd_containerd_1_30 + path: | + ${{ steps.setup.outputs.dhctl-log-file}}* + ${{ steps.setup.outputs.tmp-dir-path}}/logs + testing/cloud_layouts/ + !testing/cloud_layouts/**/sshkey + + - name: Cleanup temp directory + if: always() + env: + TMPPATH: ${{ steps.setup.outputs.tmppath}} + run: | + echo "Remove temporary directory '${TMPPATH}' ..." + if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then + rm -rf "${TMPPATH}" + else + echo Not a directory. + fi + if [ -n $USER_RUNNER_ID ]; then + echo "Fix temp directories owner..." + chown -R $USER_RUNNER_ID "$(pwd)/testing" || true + chown -R $USER_RUNNER_ID "/deckhouse/testing" || true + chown -R $USER_RUNNER_ID /tmp || true + else + echo "Fix temp directories permissions..." + chmod -f -R 777 "$(pwd)/testing" || true + chmod -f -R 777 "/deckhouse/testing" || true + chmod -f -R 777 /tmp || true + fi + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'job,separate'; + const name = 'e2e: VCD, Containerd, Kubernetes 1.30'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + # + + # + run_containerd_Automatic: + name: "e2e: VCD, Containerd, Kubernetes Automatic" + needs: + - check_e2e_labels + - git_info + if: needs.check_e2e_labels.outputs.run_containerd_Automatic == 'true' + outputs: + ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} + ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} + run_id: ${{ github.run_id }} + # need for find state in artifact + cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} + ran_for: "vcd;WithoutNAT;containerd;Automatic" + failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} + issue_number: ${{ inputs.issue_number }} + install_image_path: ${{ steps.setup.outputs.install-image-path }} + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "Automatic" + EVENT_LABEL: ${{ github.event.label.name }} + runs-on: [self-hosted, e2e-common] + steps: + + # + - name: Job started timestamp + id: started_at + run: | + unixTimestamp=$(date +%s) + echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT + # + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + with: + ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} + fetch-depth: 0 + # + # + - name: Update comment on start + if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const name = 'e2e: VCD, Containerd, Kubernetes Automatic'; + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnStart({github, context, core, name}) + + # + + + # + - name: Check dev registry credentials + id: check_dev_registry + env: + HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to dev registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Check rw registry credentials + id: check_rw_registry + env: + HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + run: | + if [[ -n $HOST ]]; then + echo "has_credentials=true" >> $GITHUB_OUTPUT + echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT + fi + - name: Login to rw registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} + with: + registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} + username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} + password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} + logout: false + - name: Login to Github Container Registry + uses: docker/login-action@v2.1.0 + if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} + with: + registry: ghcr.io + username: ${{ secrets.GHCR_IO_REGISTRY_USER }} + password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} + logout: false + # + + # + - name: Install werf CLI + uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e + with: + channel: ${{env.WERF_CHANNEL}} + # + + - name: Setup + id: setup + env: + DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} + CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} + CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} + CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} + REF_FULL: ${{needs.git_info.outputs.ref_full}} + INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} + MANUAL_RUN: "true" + run: | + # Calculate unique prefix for e2e test. + # GITHUB_RUN_ID is a unique number for each workflow run. + # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. + # Add CRI and KUBERNETES_VERSION to create unique directory for each job. + # CRI and PROVIDER values are trimmed to reduce prefix length. + if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then + KUBERNETES_VERSION_SUF="auto" + else + KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} + fi + DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") + if [[ "${MANUAL_RUN}" == "false" ]] ; then + # for jobs which run multiple providers concurrency (daily e2e, for example) + # add provider suffix to prevent "directory already exists" error + DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" + fi + # converts to DNS-like (all letters in lower case and replace all dots to dash) + # because it prefix will use for k8s resources names (nodes, for example) + DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') + + # Create tmppath for test script. + TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} + if [[ -d "${TMP_DIR_PATH}" ]] ; then + echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" + ls -la ${TMP_DIR_PATH} + exit 1 + else + echo "Create temporary dir for job: ${TMP_DIR_PATH}." + mkdir -p "${TMP_DIR_PATH}" + fi + + ## Source: ci_templates/build.yml + + # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. + REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} + if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then + # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. + REPO_SUFFIX= + fi + + # Use dev-registry for Git branches. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + # Use rw-registry for Git tags. + SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" + + if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then + # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. + # Use dev-regisry for branches and Github Container Registry for semver tags. + BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" + SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" + fi + + # Prepare initial image tag for deploy/deckhouse to test switching from previous release. + INITIAL_IMAGE_TAG= + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + fi + + # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + # Use it as image tag. Add suffix to not overlap with PRs in main repo. + IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} + + INSTALL_IMAGE_NAME= + if [[ -n ${CI_COMMIT_BRANCH} ]]; then + # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} + fi + if [[ -n ${CI_COMMIT_TAG} ]] ; then + REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe + INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} + fi + if [[ -n ${INITIAL_REF_SLUG} ]] ; then + INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} + git fetch origin ${INITIAL_REF_SLUG} + git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts + fi + SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') + echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" + + # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. + echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." + docker pull "${INSTALL_IMAGE_NAME}" + + IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" + + echo '::echo::on' + echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT + echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT + echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT + echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT + echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT + echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT + + echo '::echo::off' + + - name: "Run e2e test: VCD/Containerd/Automatic" + id: e2e_test_run + timeout-minutes: 80 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "Automatic" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh run-test' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + echo "Start waiting ssh connection string script" + comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" + echo "Full comment url for updating ${comment_url}" + + ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT + + bastion_ip_file="" + if [[ "${PROVIDER}" == "Static" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + fi + + echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT + + $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & + + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh run-test + + # + - name: Read connection string + if: ${{ failure() || cancelled() }} + id: check_stay_failed_cluster + uses: actions/github-script@v6.4.1 + env: + SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} + SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} + with: + # it sets `should_run` output var if e2e/failed/stay label + script: | + const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); + await e2e_cleanup.readConnectionScript({core, context, github}); + + - name: Label pr if e2e failed + if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} + uses: actions-ecosystem/action-add-labels@v1 + with: + github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} + number: ${{ needs.git_info.outputs.pr_number }} + labels: "e2e/cluster/failed" + + - name: Cleanup bootstrapped cluster + if: success() + id: cleanup_cluster + timeout-minutes: 60 + env: + PROVIDER: VCD + CRI: Containerd + LAYOUT: WithoutNAT + KUBERNETES_VERSION: "Automatic" + LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} + LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} + TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} + PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} + INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} + DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} + # + COMMENT_ID: ${{ inputs.comment_id }} + GITHUB_API_SERVER: ${{ github.api_url }} + REPOSITORY: ${{ github.repository }} + DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} + GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + run: | + echo "Execute 'script.sh cleanup' via 'docker run', using environment: + INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} + DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} + INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} + PREFIX=${PREFIX} + PROVIDER=${PROVIDER} + CRI=${CRI} + LAYOUT=${LAYOUT} + KUBERNETES_VERSION=${KUBERNETES_VERSION} + TMP_DIR_PATH=${TMP_DIR_PATH} + " + + ls -lh $(pwd)/testing + + dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" + echo "DHCTL log file: $dhctl_log_file" + + user_runner_id=$(id -u):$(id -g) + echo "user_runner_id $user_runner_id" + + docker run --rm \ + -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ + -e PREFIX=${PREFIX} \ + -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ + -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ + -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ + -e CRI=${CRI} \ + -e PROVIDER=${PROVIDER:-not_provided} \ + -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ + -e LAYOUT=${LAYOUT:-not_provided} \ + -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ + -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e USER_RUNNER_ID=${user_runner_id} \ + -v $(pwd)/testing:/deckhouse/testing \ + -v $(pwd)/release.yaml:/deckhouse/release.yaml \ + -v ${TMP_DIR_PATH}:/tmp \ + -w /deckhouse \ + ${INSTALL_IMAGE_NAME} \ + bash /deckhouse/testing/cloud_layouts/script.sh cleanup + + # + + - name: Save dhctl state + id: save_failed_cluster_state + if: ${{ failure() }} + uses: actions/upload-artifact@v3.1.2 + with: + name: failed_cluster_state_vcd_containerd_Automatic + path: | + ${{ steps.setup.outputs.tmp-dir-path}}/dhctl + ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate + ${{ steps.setup.outputs.tmp-dir-path}}/logs + + - name: Save test results + if: ${{ steps.setup.outputs.dhctl-log-file }} + uses: actions/upload-artifact@v3.1.2 + with: + name: test_output_vcd_containerd_Automatic + path: | + ${{ steps.setup.outputs.dhctl-log-file}}* + ${{ steps.setup.outputs.tmp-dir-path}}/logs + testing/cloud_layouts/ + !testing/cloud_layouts/**/sshkey + + - name: Cleanup temp directory + if: always() + env: + TMPPATH: ${{ steps.setup.outputs.tmppath}} + run: | + echo "Remove temporary directory '${TMPPATH}' ..." + if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then + rm -rf "${TMPPATH}" + else + echo Not a directory. + fi + if [ -n $USER_RUNNER_ID ]; then + echo "Fix temp directories owner..." + chown -R $USER_RUNNER_ID "$(pwd)/testing" || true + chown -R $USER_RUNNER_ID "/deckhouse/testing" || true + chown -R $USER_RUNNER_ID /tmp || true + else + echo "Fix temp directories permissions..." + chmod -f -R 777 "$(pwd)/testing" || true + chmod -f -R 777 "/deckhouse/testing" || true + chmod -f -R 777 /tmp || true + fi + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'job,separate'; + const name = 'e2e: VCD, Containerd, Kubernetes Automatic'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + # + + + last_comment: + name: Update comment on finish + needs: ["started_at","git_info","run_containerd_1_26","run_containerd_1_27","run_containerd_1_28","run_containerd_1_29","run_containerd_1_30","run_containerd_Automatic"] + if: ${{ always() }} + runs-on: ubuntu-latest + env: + JOB_NAMES: | + {"run_containerd_1_26":"e2e: VCD, Containerd, Kubernetes 1.26","run_containerd_1_27":"e2e: VCD, Containerd, Kubernetes 1.27","run_containerd_1_28":"e2e: VCD, Containerd, Kubernetes 1.28","run_containerd_1_29":"e2e: VCD, Containerd, Kubernetes 1.29","run_containerd_1_30":"e2e: VCD, Containerd, Kubernetes 1.30","run_containerd_Automatic":"e2e: VCD, Containerd, Kubernetes Automatic"} + steps: + + # + - name: Checkout sources + uses: actions/checkout@v3.5.2 + + # + # + - name: Update comment on finish + id: update_comment_on_finish + if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} + env: + NEEDS_CONTEXT: ${{ toJSON(needs) }} + JOB_CONTEXT: ${{ toJSON(job) }} + STEPS_CONTEXT: ${{ toJSON(steps) }} + uses: actions/github-script@v6.4.1 + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + retries: 3 + script: | + const statusConfig = 'workflow,final,no-skipped,restore-separate'; + const name = 'e2e: VCD'; + const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); + const jobContext = JSON.parse(process.env.JOB_CONTEXT); + const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); + let jobNames = null + if (process.env.JOB_NAMES) { + jobNames = JSON.parse(process.env.JOB_NAMES); + } + + core.info(`needsContext: ${JSON.stringify(needsContext)}`); + core.info(`jobContext: ${JSON.stringify(jobContext)}`); + core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); + core.info(`jobNames: ${JSON.stringify(jobNames)}`); + + const ci = require('./.github/scripts/js/ci'); + return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); + # + + + # + - name: Set commit status after e2e run + id: set_e2e_requirement_status + if: ${{ always() }} + uses: actions/github-script@v6.4.1 + env: + JOB_STATUS: ${{ job.status }} + STATUS_TARGET_COMMIT: ${{needs.git_info.outputs.github_sha}} + with: + github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} + script: | + const e2eStatus = require('./.github/scripts/js/e2e-commit-status'); + + await e2eStatus.setStatusAfterE2eRun({github, context, core}); + # +# From 9fd20adf03c5e45807863212c1ad4b838e68b777 Mon Sep 17 00:00:00 2001 From: borg-z Date: Mon, 5 Aug 2024 16:37:17 +0300 Subject: [PATCH 3/6] $RANDOM Signed-off-by: borg-z --- test | 1 + 1 file changed, 1 insertion(+) create mode 100644 test diff --git a/test b/test new file mode 100644 index 0000000000..ca6d7e8a74 --- /dev/null +++ b/test @@ -0,0 +1 @@ +31848 From beaed0572a6e9807e374a0c8ef06e3d3d6356b4f Mon Sep 17 00:00:00 2001 From: borg-z Date: Mon, 5 Aug 2024 17:08:42 +0300 Subject: [PATCH 4/6] renaming Signed-off-by: borg-z --- .github/ci_templates/e2e_tests.yml | 8 +- .github/workflow_templates/e2e-daily.yml | 2 +- .../workflow_templates/e2e.abort.multi.yml | 2 +- ...t-vclouddirector.yml => e2e-abort-vcd.yml} | 66 +- .github/workflows/e2e-aws.yml | 12 +- .github/workflows/e2e-azure.yml | 12 +- .github/workflows/e2e-daily.yml | 22 +- .github/workflows/e2e-eks.yml | 12 +- .github/workflows/e2e-gcp.yml | 12 +- .github/workflows/e2e-openstack.yml | 12 +- .github/workflows/e2e-static.yml | 12 +- .github/workflows/e2e-vcd.yml | 180 +- .github/workflows/e2e-vclouddirector.yml | 3158 ----------------- .github/workflows/e2e-vsphere.yml | 12 +- .github/workflows/e2e-yandex-cloud.yml | 12 +- 15 files changed, 248 insertions(+), 3286 deletions(-) rename .github/workflows/{e2e-abort-vclouddirector.yml => e2e-abort-vcd.yml} (96%) delete mode 100644 .github/workflows/e2e-vclouddirector.yml diff --git a/.github/ci_templates/e2e_tests.yml b/.github/ci_templates/e2e_tests.yml index fb7b1a9369..5d30213f2d 100644 --- a/.github/ci_templates/e2e_tests.yml +++ b/.github/ci_templates/e2e_tests.yml @@ -11,7 +11,7 @@ {!{- if eq $ctx.provider "vsphere" -}!} {!{- $layout = "Standard" -}!} {!{- end -}!} -{!{- if eq $ctx.provider "vclouddirector" -}!} +{!{- if eq $ctx.provider "vcd" -}!} {!{- $layout = "Standard" -}!} {!{- end -}!} {!{- if eq $ctx.provider "static" -}!} @@ -86,7 +86,7 @@ {!{- else if eq $provider "vsphere" }!} LAYOUT_VSPHERE_PASSWORD: ${{ secrets.LAYOUT_VSPHERE_PASSWORD }} LAYOUT_VSPHERE_BASE_DOMAIN: ${{ secrets.LAYOUT_VSPHERE_BASE_DOMAIN }} -{!{- else if eq $provider "vclouddirector" }!} +{!{- else if eq $provider "vcd" }!} LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} LAYOUT_STATIC_BASTION_IP: 80.249.129.56 @@ -136,7 +136,7 @@ run: | bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -244,7 +244,7 @@ run: | {!{- else if eq $provider "vsphere" }!} -e LAYOUT_VSPHERE_PASSWORD=${LAYOUT_VSPHERE_PASSWORD:-not_provided} \ -e LAYOUT_VSPHERE_BASE_DOMAIN=${LAYOUT_VSPHERE_BASE_DOMAIN:-not_provided} \ -{!{- else if eq $provider "vclouddirector" }!} +{!{- else if eq $provider "vcd" }!} -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ diff --git a/.github/workflow_templates/e2e-daily.yml b/.github/workflow_templates/e2e-daily.yml index b2eb7c6855..66df39386e 100644 --- a/.github/workflow_templates/e2e-daily.yml +++ b/.github/workflow_templates/e2e-daily.yml @@ -49,7 +49,7 @@ jobs: {!{/* Jobs for each CRI and Kubernetes version */}!} {!{- $criName := "Containerd" -}!} {!{- $kubernetesVersion := "1.27" -}!} -{!{- $providerNames := slice "AWS" "Azure" "GCP" "Yandex.Cloud" "OpenStack" "vSphere" "vCloudDirector" "Static" -}!} +{!{- $providerNames := slice "AWS" "Azure" "GCP" "Yandex.Cloud" "OpenStack" "vSphere" "VCD" "Static" -}!} {!{- if $enableWorkflowOnTestRepos -}!} {!{- $providerNames = slice "AWS" "OpenStack" "Azure" -}!} {!{- end -}!} diff --git a/.github/workflow_templates/e2e.abort.multi.yml b/.github/workflow_templates/e2e.abort.multi.yml index 5afd602cef..512c1bc676 100644 --- a/.github/workflow_templates/e2e.abort.multi.yml +++ b/.github/workflow_templates/e2e.abort.multi.yml @@ -32,7 +32,7 @@ $CI_COMMIT_REF_SLUG is a tag of published deckhouse images. It has a form */}!} -{!{- $providerNames := slice "AWS" "Azure" "GCP" "Yandex.Cloud" "OpenStack" "vSphere" "vCloudDirector" "Static" "EKS" -}!} +{!{- $providerNames := slice "AWS" "Azure" "GCP" "Yandex.Cloud" "OpenStack" "vSphere" "VCD" "Static" "EKS" -}!} {!{- $criNames := slice "Containerd" -}!} {!{- $kubernetesVersions := slice "1.26" "1.27" "1.28" "1.29" "1.30" "Automatic" -}!} diff --git a/.github/workflows/e2e-abort-vclouddirector.yml b/.github/workflows/e2e-abort-vcd.yml similarity index 96% rename from .github/workflows/e2e-abort-vclouddirector.yml rename to .github/workflows/e2e-abort-vcd.yml index 0aee224767..b676f769d8 100644 --- a/.github/workflows/e2e-abort-vclouddirector.yml +++ b/.github/workflows/e2e-abort-vcd.yml @@ -3,7 +3,7 @@ # # -name: 'destroy cluster: vCloudDirector' +name: 'destroy cluster: VCD' on: workflow_dispatch: inputs: @@ -74,10 +74,10 @@ jobs: # run_containerd_1_26: - name: "destroy cluster: vCloudDirector, Containerd, Kubernetes 1.26" + name: "destroy cluster: VCD, Containerd, Kubernetes 1.26" if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.26' && github.event.inputs.layout == 'Standard' }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.26" @@ -108,7 +108,7 @@ jobs: github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} retries: 3 script: | - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.26'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.26'; const ci = require('./.github/scripts/js/ci'); return await ci.updateCommentOnStart({github, context, core, name}) @@ -222,7 +222,7 @@ jobs: if: ${{ success() }} id: cleanup_cluster env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.26" @@ -337,7 +337,7 @@ jobs: retries: 3 script: | const statusConfig = 'job,separate'; - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.26'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.26'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); @@ -358,10 +358,10 @@ jobs: # run_containerd_1_27: - name: "destroy cluster: vCloudDirector, Containerd, Kubernetes 1.27" + name: "destroy cluster: VCD, Containerd, Kubernetes 1.27" if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.27' && github.event.inputs.layout == 'Standard' }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.27" @@ -392,7 +392,7 @@ jobs: github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} retries: 3 script: | - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.27'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.27'; const ci = require('./.github/scripts/js/ci'); return await ci.updateCommentOnStart({github, context, core, name}) @@ -506,7 +506,7 @@ jobs: if: ${{ success() }} id: cleanup_cluster env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.27" @@ -621,7 +621,7 @@ jobs: retries: 3 script: | const statusConfig = 'job,separate'; - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.27'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.27'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); @@ -642,10 +642,10 @@ jobs: # run_containerd_1_28: - name: "destroy cluster: vCloudDirector, Containerd, Kubernetes 1.28" + name: "destroy cluster: VCD, Containerd, Kubernetes 1.28" if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.28' && github.event.inputs.layout == 'Standard' }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.28" @@ -676,7 +676,7 @@ jobs: github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} retries: 3 script: | - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.28'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.28'; const ci = require('./.github/scripts/js/ci'); return await ci.updateCommentOnStart({github, context, core, name}) @@ -790,7 +790,7 @@ jobs: if: ${{ success() }} id: cleanup_cluster env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.28" @@ -905,7 +905,7 @@ jobs: retries: 3 script: | const statusConfig = 'job,separate'; - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.28'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.28'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); @@ -926,10 +926,10 @@ jobs: # run_containerd_1_29: - name: "destroy cluster: vCloudDirector, Containerd, Kubernetes 1.29" + name: "destroy cluster: VCD, Containerd, Kubernetes 1.29" if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.29' && github.event.inputs.layout == 'Standard' }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.29" @@ -960,7 +960,7 @@ jobs: github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} retries: 3 script: | - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.29'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.29'; const ci = require('./.github/scripts/js/ci'); return await ci.updateCommentOnStart({github, context, core, name}) @@ -1074,7 +1074,7 @@ jobs: if: ${{ success() }} id: cleanup_cluster env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.29" @@ -1189,7 +1189,7 @@ jobs: retries: 3 script: | const statusConfig = 'job,separate'; - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.29'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.29'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); @@ -1210,10 +1210,10 @@ jobs: # run_containerd_1_30: - name: "destroy cluster: vCloudDirector, Containerd, Kubernetes 1.30" + name: "destroy cluster: VCD, Containerd, Kubernetes 1.30" if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == '1.30' && github.event.inputs.layout == 'Standard' }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.30" @@ -1244,7 +1244,7 @@ jobs: github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} retries: 3 script: | - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.30'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.30'; const ci = require('./.github/scripts/js/ci'); return await ci.updateCommentOnStart({github, context, core, name}) @@ -1358,7 +1358,7 @@ jobs: if: ${{ success() }} id: cleanup_cluster env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.30" @@ -1473,7 +1473,7 @@ jobs: retries: 3 script: | const statusConfig = 'job,separate'; - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes 1.30'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes 1.30'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); @@ -1494,10 +1494,10 @@ jobs: # run_containerd_automatic: - name: "destroy cluster: vCloudDirector, Containerd, Kubernetes Automatic" + name: "destroy cluster: VCD, Containerd, Kubernetes Automatic" if: ${{ github.event.inputs.cri == 'containerd' && github.event.inputs.k8s_version == 'Automatic' && github.event.inputs.layout == 'Standard' }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "Automatic" @@ -1528,7 +1528,7 @@ jobs: github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} retries: 3 script: | - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes Automatic'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes Automatic'; const ci = require('./.github/scripts/js/ci'); return await ci.updateCommentOnStart({github, context, core, name}) @@ -1642,7 +1642,7 @@ jobs: if: ${{ success() }} id: cleanup_cluster env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "Automatic" @@ -1757,7 +1757,7 @@ jobs: retries: 3 script: | const statusConfig = 'job,separate'; - const name = 'destroy cluster: vCloudDirector, Containerd, Kubernetes Automatic'; + const name = 'destroy cluster: VCD, Containerd, Kubernetes Automatic'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); @@ -1784,7 +1784,7 @@ jobs: runs-on: ubuntu-latest env: JOB_NAMES: | - {"run_containerd_1_26":"destroy cluster: vCloudDirector, Containerd, Kubernetes 1.26","run_containerd_1_27":"destroy cluster: vCloudDirector, Containerd, Kubernetes 1.27","run_containerd_1_28":"destroy cluster: vCloudDirector, Containerd, Kubernetes 1.28","run_containerd_1_29":"destroy cluster: vCloudDirector, Containerd, Kubernetes 1.29","run_containerd_1_30":"destroy cluster: vCloudDirector, Containerd, Kubernetes 1.30","run_containerd_automatic":"destroy cluster: vCloudDirector, Containerd, Kubernetes Automatic"} + {"run_containerd_1_26":"destroy cluster: VCD, Containerd, Kubernetes 1.26","run_containerd_1_27":"destroy cluster: VCD, Containerd, Kubernetes 1.27","run_containerd_1_28":"destroy cluster: VCD, Containerd, Kubernetes 1.28","run_containerd_1_29":"destroy cluster: VCD, Containerd, Kubernetes 1.29","run_containerd_1_30":"destroy cluster: VCD, Containerd, Kubernetes 1.30","run_containerd_automatic":"destroy cluster: VCD, Containerd, Kubernetes Automatic"} steps: # @@ -1806,7 +1806,7 @@ jobs: retries: 3 script: | const statusConfig = 'workflow,final,no-skipped,restore-separate'; - const name = 'destroy cluster: vCloudDirector'; + const name = 'destroy cluster: VCD'; const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); const jobContext = JSON.parse(process.env.JOB_CONTEXT); const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); diff --git a/.github/workflows/e2e-aws.yml b/.github/workflows/e2e-aws.yml index f556591388..f6dc5e38f4 100644 --- a/.github/workflows/e2e-aws.yml +++ b/.github/workflows/e2e-aws.yml @@ -466,7 +466,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -937,7 +937,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1408,7 +1408,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1879,7 +1879,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2350,7 +2350,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2821,7 +2821,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-azure.yml b/.github/workflows/e2e-azure.yml index a0590887d7..77078ab834 100644 --- a/.github/workflows/e2e-azure.yml +++ b/.github/workflows/e2e-azure.yml @@ -468,7 +468,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -947,7 +947,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1426,7 +1426,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1905,7 +1905,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2384,7 +2384,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2863,7 +2863,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-daily.yml b/.github/workflows/e2e-daily.yml index 55b6104c83..1233084e9f 100644 --- a/.github/workflows/e2e-daily.yml +++ b/.github/workflows/e2e-daily.yml @@ -2787,8 +2787,8 @@ jobs: # # - run_vclouddirector_containerd_1_27: - name: "vCloudDirector, Containerd, Kubernetes 1.27" + run_vcd_containerd_1_27: + name: "VCD, Containerd, Kubernetes 1.27" needs: - git_info outputs: @@ -2797,12 +2797,12 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;1.27" + ran_for: "vcd;Standard;containerd;1.27" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.27" @@ -2990,11 +2990,11 @@ jobs: echo '::echo::off' - - name: "Run e2e test: vCloudDirector/Containerd/1.27" + - name: "Run e2e test: VCD/Containerd/1.27" id: e2e_test_run timeout-minutes: 80 env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.27" @@ -3069,7 +3069,7 @@ jobs: id: cleanup_cluster timeout-minutes: 60 env: - PROVIDER: vCloudDirector + PROVIDER: VCD CRI: Containerd LAYOUT: Standard KUBERNETES_VERSION: "1.27" @@ -3143,7 +3143,7 @@ jobs: if: ${{ failure() }} uses: actions/upload-artifact@v3.1.2 with: - name: failed_cluster_state_vclouddirector_containerd_1_27 + name: failed_cluster_state_vcd_containerd_1_27 path: | ${{ steps.setup.outputs.tmp-dir-path}}/dhctl ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate @@ -3153,7 +3153,7 @@ jobs: if: ${{ steps.setup.outputs.dhctl-log-file }} uses: actions/upload-artifact@v3.1.2 with: - name: test_output_vclouddirector_containerd_1_27 + name: test_output_vcd_containerd_1_27 path: | ${{ steps.setup.outputs.dhctl-log-file}}* ${{ steps.setup.outputs.tmp-dir-path}}/logs @@ -3206,7 +3206,7 @@ jobs: "cri": "Containerd", "kube_version": "1.27", "layout": "Standard", - "provider": "vCloudDirector", + "provider": "VCD", "trigger": "CloudLayoutTestFailed", "severity_level": 7 @@ -3682,7 +3682,7 @@ jobs: send_alert_about_workflow_problem: name: Send alert about workflow problem runs-on: ubuntu-latest - needs: ["skip_tests_repos","git_info","run_aws_containerd_1_27","run_azure_containerd_1_27","run_gcp_containerd_1_27","run_yandex_cloud_containerd_1_27","run_openstack_containerd_1_27","run_vsphere_containerd_1_27","run_vclouddirector_containerd_1_27","run_static_containerd_1_27"] + needs: ["skip_tests_repos","git_info","run_aws_containerd_1_27","run_azure_containerd_1_27","run_gcp_containerd_1_27","run_yandex_cloud_containerd_1_27","run_openstack_containerd_1_27","run_vsphere_containerd_1_27","run_vcd_containerd_1_27","run_static_containerd_1_27"] if: ${{ failure() }} steps: diff --git a/.github/workflows/e2e-eks.yml b/.github/workflows/e2e-eks.yml index 13b11e70a6..4bcbee710c 100644 --- a/.github/workflows/e2e-eks.yml +++ b/.github/workflows/e2e-eks.yml @@ -474,7 +474,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -988,7 +988,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1502,7 +1502,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2016,7 +2016,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2530,7 +2530,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -3044,7 +3044,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-gcp.yml b/.github/workflows/e2e-gcp.yml index 349e5e8ebc..0a1c995da7 100644 --- a/.github/workflows/e2e-gcp.yml +++ b/.github/workflows/e2e-gcp.yml @@ -465,7 +465,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -932,7 +932,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1399,7 +1399,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1866,7 +1866,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2333,7 +2333,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2800,7 +2800,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-openstack.yml b/.github/workflows/e2e-openstack.yml index 34b7298f32..0ca5139e6e 100644 --- a/.github/workflows/e2e-openstack.yml +++ b/.github/workflows/e2e-openstack.yml @@ -465,7 +465,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -932,7 +932,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1399,7 +1399,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1866,7 +1866,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2333,7 +2333,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2800,7 +2800,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-static.yml b/.github/workflows/e2e-static.yml index 0a1c64a128..e8b54aa77c 100644 --- a/.github/workflows/e2e-static.yml +++ b/.github/workflows/e2e-static.yml @@ -465,7 +465,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -932,7 +932,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1399,7 +1399,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1866,7 +1866,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2333,7 +2333,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2800,7 +2800,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-vcd.yml b/.github/workflows/e2e-vcd.yml index d3dee71454..6cf078256e 100644 --- a/.github/workflows/e2e-vcd.yml +++ b/.github/workflows/e2e-vcd.yml @@ -206,14 +206,14 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vcd;WithoutNAT;containerd;1.26" + ran_for: "vcd;Standard;containerd;1.26" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.26" EVENT_LABEL: ${{ github.event.label.name }} runs-on: [self-hosted, e2e-common] @@ -419,7 +419,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.26" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -429,6 +429,11 @@ jobs: DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -464,7 +469,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -485,6 +490,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -522,7 +532,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.26" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -531,6 +541,11 @@ jobs: INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -569,6 +584,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -669,14 +689,14 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vcd;WithoutNAT;containerd;1.27" + ran_for: "vcd;Standard;containerd;1.27" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.27" EVENT_LABEL: ${{ github.event.label.name }} runs-on: [self-hosted, e2e-common] @@ -882,7 +902,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.27" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -892,6 +912,11 @@ jobs: DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -927,7 +952,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -948,6 +973,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -985,7 +1015,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.27" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -994,6 +1024,11 @@ jobs: INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -1032,6 +1067,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -1132,14 +1172,14 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vcd;WithoutNAT;containerd;1.28" + ran_for: "vcd;Standard;containerd;1.28" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.28" EVENT_LABEL: ${{ github.event.label.name }} runs-on: [self-hosted, e2e-common] @@ -1345,7 +1385,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.28" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -1355,6 +1395,11 @@ jobs: DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -1390,7 +1435,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1411,6 +1456,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -1448,7 +1498,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.28" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -1457,6 +1507,11 @@ jobs: INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -1495,6 +1550,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -1595,14 +1655,14 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vcd;WithoutNAT;containerd;1.29" + ran_for: "vcd;Standard;containerd;1.29" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.29" EVENT_LABEL: ${{ github.event.label.name }} runs-on: [self-hosted, e2e-common] @@ -1808,7 +1868,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.29" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -1818,6 +1878,11 @@ jobs: DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -1853,7 +1918,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1874,6 +1939,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -1911,7 +1981,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.29" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -1920,6 +1990,11 @@ jobs: INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -1958,6 +2033,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -2058,14 +2138,14 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vcd;WithoutNAT;containerd;1.30" + ran_for: "vcd;Standard;containerd;1.30" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.30" EVENT_LABEL: ${{ github.event.label.name }} runs-on: [self-hosted, e2e-common] @@ -2271,7 +2351,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.30" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -2281,6 +2361,11 @@ jobs: DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -2316,7 +2401,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2337,6 +2422,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -2374,7 +2464,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "1.30" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -2383,6 +2473,11 @@ jobs: INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -2421,6 +2516,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -2521,14 +2621,14 @@ jobs: run_id: ${{ github.run_id }} # need for find state in artifact cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vcd;WithoutNAT;containerd;Automatic" + ran_for: "vcd;Standard;containerd;Automatic" failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} issue_number: ${{ inputs.issue_number }} install_image_path: ${{ steps.setup.outputs.install-image-path }} env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "Automatic" EVENT_LABEL: ${{ github.event.label.name }} runs-on: [self-hosted, e2e-common] @@ -2734,7 +2834,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "Automatic" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -2744,6 +2844,11 @@ jobs: DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -2779,7 +2884,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2800,6 +2905,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ @@ -2837,7 +2947,7 @@ jobs: env: PROVIDER: VCD CRI: Containerd - LAYOUT: WithoutNAT + LAYOUT: Standard KUBERNETES_VERSION: "Automatic" LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} @@ -2846,6 +2956,11 @@ jobs: INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} # + LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} + LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} + LAYOUT_STATIC_BASTION_IP: 80.249.129.56 + LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} + LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} COMMENT_ID: ${{ inputs.comment_id }} GITHUB_API_SERVER: ${{ github.api_url }} REPOSITORY: ${{ github.repository }} @@ -2884,6 +2999,11 @@ jobs: -e LAYOUT=${LAYOUT:-not_provided} \ -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ + -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ + -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ + -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ + -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ + -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ -e USER_RUNNER_ID=${user_runner_id} \ -v $(pwd)/testing:/deckhouse/testing \ -v $(pwd)/release.yaml:/deckhouse/release.yaml \ diff --git a/.github/workflows/e2e-vclouddirector.yml b/.github/workflows/e2e-vclouddirector.yml deleted file mode 100644 index 0eb94054e7..0000000000 --- a/.github/workflows/e2e-vclouddirector.yml +++ /dev/null @@ -1,3158 +0,0 @@ -# -# THIS FILE IS GENERATED, PLEASE DO NOT EDIT. -# - -# Copyright 2022 Flant JSC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -name: 'e2e: vCloudDirector' -on: - workflow_dispatch: - inputs: - issue_id: - description: 'ID of issue where label was set' - required: false - issue_number: - description: 'Number of issue where label was set' - required: false - comment_id: - description: 'ID of comment in issue where to put workflow run status' - required: false - ci_commit_ref_name: - description: 'Git ref name for image tags' - required: false - pull_request_ref: - description: 'Git ref for checkout PR sources' - required: false - pull_request_sha: - description: 'Git SHA for restoring artifacts from cache' - required: false - pull_request_head_label: - description: 'Head label of pull request. e.g. my_repo:my_feature_branch' - required: false - cri: - description: 'A comma-separated list of cri to test. Available: Containerd.' - required: false - ver: - description: 'A comma-separated list of versions to test. Available: from 1.24 to 1.28.' - required: false - initial_ref_slug: - description: 'An image tag to install first and then switch to workflow context ref' - required: false -env: - - # - WERF_CHANNEL: "ea" - WERF_ENV: "FE" - TEST_TIMEOUT: "15m" - # Use fixed string 'sys/deckhouse-oss' for repo name. ${CI_PROJECT_PATH} is not available here in GitHub. - DEV_REGISTRY_PATH: "${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/sys/deckhouse-oss" - # Registry for additional repositories used for testing Github Actions workflows. - GHA_TEST_REGISTRY_PATH: "ghcr.io/${{ github.repository }}" - # - -# Note: no concurrency section for e2e workflows. -# Usually you run e2e and wait until it ends. - -jobs: - started_at: - name: Save start timestamp - outputs: - started_at: ${{ steps.started_at.outputs.started_at }} - runs-on: "ubuntu-latest" - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - - # - - git_info: - name: Get git info - runs-on: ubuntu-latest - outputs: - ci_commit_tag: ${{ steps.git_info.outputs.ci_commit_tag }} - ci_commit_branch: ${{ steps.git_info.outputs.ci_commit_branch }} - ci_commit_ref_name: ${{ steps.git_info.outputs.ci_commit_ref_name }} - ci_commit_ref_slug: ${{ steps.git_info.outputs.ci_commit_ref_slug }} - ref_full: ${{ steps.git_info.outputs.ref_full }} - github_sha: ${{ steps.git_info.outputs.github_sha }} - pr_number: ${{ steps.git_info.outputs.pr_number }} - # Skip the CI for automation PRs, e.g. changelog - if: ${{ github.event.pull_request.user.login != 'deckhouse-BOaTswain' }} - steps: - - id: git_info - name: Get tag name and SHA - uses: actions/github-script@v6.4.1 - with: - script: | - const { GITHUB_REF_TYPE, GITHUB_REF_NAME, GITHUB_REF } = process.env - - let refSlug = '' - let refName = '' - let refFull = '' - let githubBranch = '' - let githubTag = '' - let githubSHA = '' - let prNumber = '' - if (context.eventName === "workflow_dispatch" && context.payload.inputs && context.payload.inputs.pull_request_ref) { - // Trigger: workflow_dispatch with pull_request_ref. - // Extract pull request number from 'refs/pull//merge' - prNumber = context.payload.inputs.pull_request_ref.replace('refs/pull/', '').replace('/merge', '').replace('/head', '') - - refSlug = `pr${prNumber}` - refName = context.payload.inputs.ci_commit_ref_name - refFull = context.payload.inputs.pull_request_ref - githubBranch = refName - githubSHA = context.payload.inputs.pull_request_sha - core.info(`workflow_dispatch event: set git info from inputs. inputs: ${JSON.stringify(context.payload.inputs)}`) - } else if (context.eventName === "pull_request" || context.eventName === "pull_request_target" ) { - // For PRs from forks, tag images with `prXXX` to avoid clashes between branches. - const targetRepo = context.payload.repository.full_name; - const prRepo = context.payload.pull_request.head.repo.full_name - const prRef = context.payload.pull_request.head.ref - - refSlug = `pr${context.issue.number}`; - refName = (prRepo === targetRepo) ? prRef : refSlug; - refFull = `refs/pull/${context.issue.number}/head` - githubBranch = refName - githubSHA = context.payload.pull_request.head.sha - core.info(`pull request event: set git info from pull_request.head. pr:${prRepo}:${prRef} target:${targetRepo}:${context.ref}`) - prNumber = context.issue.number - } else { - // Other triggers: workflow_dispatch without pull_request_ref, schedule, push... - // refName is 'main' or tag name, so slugification is not necessary. - refSlug = GITHUB_REF_NAME - refName = GITHUB_REF_NAME - refFull = GITHUB_REF - githubTag = GITHUB_REF_TYPE == "tag" ? refName : "" - githubBranch = GITHUB_REF_TYPE == "branch" ? refName : "" - githubSHA = context.sha - core.info(`${context.eventName} event: set git info from context: ${JSON.stringify({GITHUB_REF_NAME, GITHUB_REF_TYPE, sha: context.sha })}`) - } - - core.setCommandEcho(true) - core.setOutput('ci_commit_ref_slug', refSlug) - core.setOutput('ci_commit_ref_name', refName) - core.setOutput(`ci_commit_tag`, githubTag) - core.setOutput(`ci_commit_branch`, githubBranch) - core.setOutput(`ref_full`, refFull) - core.setOutput('github_sha', githubSHA) - core.setOutput('pr_number', prNumber) - core.setCommandEcho(false) - - # - - # - check_e2e_labels: - name: Check e2e labels - runs-on: ubuntu-latest - outputs: - - run_containerd_1_26: ${{ steps.check.outputs.run_containerd_1_26 }} - run_containerd_1_27: ${{ steps.check.outputs.run_containerd_1_27 }} - run_containerd_1_28: ${{ steps.check.outputs.run_containerd_1_28 }} - run_containerd_1_29: ${{ steps.check.outputs.run_containerd_1_29 }} - run_containerd_1_30: ${{ steps.check.outputs.run_containerd_1_30 }} - run_containerd_automatic: ${{ steps.check.outputs.run_containerd_automatic }} - steps: - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - - # - - name: Check e2e labels - id: check - uses: actions/github-script@v6.4.1 - with: - script: | - const provider = 'vclouddirector'; - const kubernetesDefaultVersion = '1.27'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.checkE2ELabels({github, context, core, provider, kubernetesDefaultVersion}); - # - - - # - run_containerd_1_26: - name: "e2e: vCloudDirector, Containerd, Kubernetes 1.26" - needs: - - check_e2e_labels - - git_info - if: needs.check_e2e_labels.outputs.run_containerd_1_26 == 'true' - outputs: - ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} - ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} - run_id: ${{ github.run_id }} - # need for find state in artifact - cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;1.26" - failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} - issue_number: ${{ inputs.issue_number }} - install_image_path: ${{ steps.setup.outputs.install-image-path }} - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.26" - EVENT_LABEL: ${{ github.event.label.name }} - runs-on: [self-hosted, e2e-common] - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - with: - ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} - fetch-depth: 0 - # - # - - name: Update comment on start - if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.26'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnStart({github, context, core, name}) - - # - - - # - - name: Check dev registry credentials - id: check_dev_registry - env: - HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to dev registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Check rw registry credentials - id: check_rw_registry - env: - HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to rw registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} - logout: false - - name: Login to Github Container Registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} - with: - registry: ghcr.io - username: ${{ secrets.GHCR_IO_REGISTRY_USER }} - password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Install werf CLI - uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e - with: - channel: ${{env.WERF_CHANNEL}} - # - - - name: Setup - id: setup - env: - DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} - CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} - CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} - REF_FULL: ${{needs.git_info.outputs.ref_full}} - INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} - MANUAL_RUN: "true" - run: | - # Calculate unique prefix for e2e test. - # GITHUB_RUN_ID is a unique number for each workflow run. - # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. - # Add CRI and KUBERNETES_VERSION to create unique directory for each job. - # CRI and PROVIDER values are trimmed to reduce prefix length. - if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then - KUBERNETES_VERSION_SUF="auto" - else - KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} - fi - DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") - if [[ "${MANUAL_RUN}" == "false" ]] ; then - # for jobs which run multiple providers concurrency (daily e2e, for example) - # add provider suffix to prevent "directory already exists" error - DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" - fi - # converts to DNS-like (all letters in lower case and replace all dots to dash) - # because it prefix will use for k8s resources names (nodes, for example) - DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') - - # Create tmppath for test script. - TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} - if [[ -d "${TMP_DIR_PATH}" ]] ; then - echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" - ls -la ${TMP_DIR_PATH} - exit 1 - else - echo "Create temporary dir for job: ${TMP_DIR_PATH}." - mkdir -p "${TMP_DIR_PATH}" - fi - - ## Source: ci_templates/build.yml - - # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. - REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} - if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then - # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. - REPO_SUFFIX= - fi - - # Use dev-registry for Git branches. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - # Use rw-registry for Git tags. - SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" - - if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then - # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. - # Use dev-regisry for branches and Github Container Registry for semver tags. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" - fi - - # Prepare initial image tag for deploy/deckhouse to test switching from previous release. - INITIAL_IMAGE_TAG= - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - fi - - # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - # Use it as image tag. Add suffix to not overlap with PRs in main repo. - IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - - INSTALL_IMAGE_NAME= - if [[ -n ${CI_COMMIT_BRANCH} ]]; then - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} - fi - if [[ -n ${CI_COMMIT_TAG} ]] ; then - REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe - INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} - fi - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} - git fetch origin ${INITIAL_REF_SLUG} - git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts - fi - SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') - echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" - - # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. - echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." - docker pull "${INSTALL_IMAGE_NAME}" - - IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" - - echo '::echo::on' - echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT - echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT - echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT - echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT - echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT - - echo '::echo::off' - - - name: "Run e2e test: vCloudDirector/Containerd/1.26" - id: e2e_test_run - timeout-minutes: 80 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.26" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh run-test' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - echo "Start waiting ssh connection string script" - comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" - echo "Full comment url for updating ${comment_url}" - - ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT - - bastion_ip_file="" - if [[ "${PROVIDER}" == "Static" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - fi - - echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT - - $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & - - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh run-test - - # - - name: Read connection string - if: ${{ failure() || cancelled() }} - id: check_stay_failed_cluster - uses: actions/github-script@v6.4.1 - env: - SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} - SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} - with: - # it sets `should_run` output var if e2e/failed/stay label - script: | - const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); - await e2e_cleanup.readConnectionScript({core, context, github}); - - - name: Label pr if e2e failed - if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} - number: ${{ needs.git_info.outputs.pr_number }} - labels: "e2e/cluster/failed" - - - name: Cleanup bootstrapped cluster - if: success() - id: cleanup_cluster - timeout-minutes: 60 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.26" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh cleanup' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh cleanup - - # - - - name: Save dhctl state - id: save_failed_cluster_state - if: ${{ failure() }} - uses: actions/upload-artifact@v3.1.2 - with: - name: failed_cluster_state_vclouddirector_containerd_1_26 - path: | - ${{ steps.setup.outputs.tmp-dir-path}}/dhctl - ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate - ${{ steps.setup.outputs.tmp-dir-path}}/logs - - - name: Save test results - if: ${{ steps.setup.outputs.dhctl-log-file }} - uses: actions/upload-artifact@v3.1.2 - with: - name: test_output_vclouddirector_containerd_1_26 - path: | - ${{ steps.setup.outputs.dhctl-log-file}}* - ${{ steps.setup.outputs.tmp-dir-path}}/logs - testing/cloud_layouts/ - !testing/cloud_layouts/**/sshkey - - - name: Cleanup temp directory - if: always() - env: - TMPPATH: ${{ steps.setup.outputs.tmppath}} - run: | - echo "Remove temporary directory '${TMPPATH}' ..." - if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then - rm -rf "${TMPPATH}" - else - echo Not a directory. - fi - if [ -n $USER_RUNNER_ID ]; then - echo "Fix temp directories owner..." - chown -R $USER_RUNNER_ID "$(pwd)/testing" || true - chown -R $USER_RUNNER_ID "/deckhouse/testing" || true - chown -R $USER_RUNNER_ID /tmp || true - else - echo "Fix temp directories permissions..." - chmod -f -R 777 "$(pwd)/testing" || true - chmod -f -R 777 "/deckhouse/testing" || true - chmod -f -R 777 /tmp || true - fi - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'job,separate'; - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.26'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - # - - # - run_containerd_1_27: - name: "e2e: vCloudDirector, Containerd, Kubernetes 1.27" - needs: - - check_e2e_labels - - git_info - if: needs.check_e2e_labels.outputs.run_containerd_1_27 == 'true' - outputs: - ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} - ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} - run_id: ${{ github.run_id }} - # need for find state in artifact - cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;1.27" - failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} - issue_number: ${{ inputs.issue_number }} - install_image_path: ${{ steps.setup.outputs.install-image-path }} - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.27" - EVENT_LABEL: ${{ github.event.label.name }} - runs-on: [self-hosted, e2e-common] - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - with: - ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} - fetch-depth: 0 - # - # - - name: Update comment on start - if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.27'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnStart({github, context, core, name}) - - # - - - # - - name: Check dev registry credentials - id: check_dev_registry - env: - HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to dev registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Check rw registry credentials - id: check_rw_registry - env: - HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to rw registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} - logout: false - - name: Login to Github Container Registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} - with: - registry: ghcr.io - username: ${{ secrets.GHCR_IO_REGISTRY_USER }} - password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Install werf CLI - uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e - with: - channel: ${{env.WERF_CHANNEL}} - # - - - name: Setup - id: setup - env: - DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} - CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} - CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} - REF_FULL: ${{needs.git_info.outputs.ref_full}} - INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} - MANUAL_RUN: "true" - run: | - # Calculate unique prefix for e2e test. - # GITHUB_RUN_ID is a unique number for each workflow run. - # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. - # Add CRI and KUBERNETES_VERSION to create unique directory for each job. - # CRI and PROVIDER values are trimmed to reduce prefix length. - if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then - KUBERNETES_VERSION_SUF="auto" - else - KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} - fi - DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") - if [[ "${MANUAL_RUN}" == "false" ]] ; then - # for jobs which run multiple providers concurrency (daily e2e, for example) - # add provider suffix to prevent "directory already exists" error - DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" - fi - # converts to DNS-like (all letters in lower case and replace all dots to dash) - # because it prefix will use for k8s resources names (nodes, for example) - DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') - - # Create tmppath for test script. - TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} - if [[ -d "${TMP_DIR_PATH}" ]] ; then - echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" - ls -la ${TMP_DIR_PATH} - exit 1 - else - echo "Create temporary dir for job: ${TMP_DIR_PATH}." - mkdir -p "${TMP_DIR_PATH}" - fi - - ## Source: ci_templates/build.yml - - # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. - REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} - if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then - # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. - REPO_SUFFIX= - fi - - # Use dev-registry for Git branches. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - # Use rw-registry for Git tags. - SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" - - if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then - # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. - # Use dev-regisry for branches and Github Container Registry for semver tags. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" - fi - - # Prepare initial image tag for deploy/deckhouse to test switching from previous release. - INITIAL_IMAGE_TAG= - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - fi - - # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - # Use it as image tag. Add suffix to not overlap with PRs in main repo. - IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - - INSTALL_IMAGE_NAME= - if [[ -n ${CI_COMMIT_BRANCH} ]]; then - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} - fi - if [[ -n ${CI_COMMIT_TAG} ]] ; then - REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe - INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} - fi - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} - git fetch origin ${INITIAL_REF_SLUG} - git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts - fi - SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') - echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" - - # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. - echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." - docker pull "${INSTALL_IMAGE_NAME}" - - IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" - - echo '::echo::on' - echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT - echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT - echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT - echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT - echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT - - echo '::echo::off' - - - name: "Run e2e test: vCloudDirector/Containerd/1.27" - id: e2e_test_run - timeout-minutes: 80 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.27" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh run-test' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - echo "Start waiting ssh connection string script" - comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" - echo "Full comment url for updating ${comment_url}" - - ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT - - bastion_ip_file="" - if [[ "${PROVIDER}" == "Static" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - fi - - echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT - - $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & - - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh run-test - - # - - name: Read connection string - if: ${{ failure() || cancelled() }} - id: check_stay_failed_cluster - uses: actions/github-script@v6.4.1 - env: - SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} - SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} - with: - # it sets `should_run` output var if e2e/failed/stay label - script: | - const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); - await e2e_cleanup.readConnectionScript({core, context, github}); - - - name: Label pr if e2e failed - if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} - number: ${{ needs.git_info.outputs.pr_number }} - labels: "e2e/cluster/failed" - - - name: Cleanup bootstrapped cluster - if: success() - id: cleanup_cluster - timeout-minutes: 60 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.27" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh cleanup' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh cleanup - - # - - - name: Save dhctl state - id: save_failed_cluster_state - if: ${{ failure() }} - uses: actions/upload-artifact@v3.1.2 - with: - name: failed_cluster_state_vclouddirector_containerd_1_27 - path: | - ${{ steps.setup.outputs.tmp-dir-path}}/dhctl - ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate - ${{ steps.setup.outputs.tmp-dir-path}}/logs - - - name: Save test results - if: ${{ steps.setup.outputs.dhctl-log-file }} - uses: actions/upload-artifact@v3.1.2 - with: - name: test_output_vclouddirector_containerd_1_27 - path: | - ${{ steps.setup.outputs.dhctl-log-file}}* - ${{ steps.setup.outputs.tmp-dir-path}}/logs - testing/cloud_layouts/ - !testing/cloud_layouts/**/sshkey - - - name: Cleanup temp directory - if: always() - env: - TMPPATH: ${{ steps.setup.outputs.tmppath}} - run: | - echo "Remove temporary directory '${TMPPATH}' ..." - if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then - rm -rf "${TMPPATH}" - else - echo Not a directory. - fi - if [ -n $USER_RUNNER_ID ]; then - echo "Fix temp directories owner..." - chown -R $USER_RUNNER_ID "$(pwd)/testing" || true - chown -R $USER_RUNNER_ID "/deckhouse/testing" || true - chown -R $USER_RUNNER_ID /tmp || true - else - echo "Fix temp directories permissions..." - chmod -f -R 777 "$(pwd)/testing" || true - chmod -f -R 777 "/deckhouse/testing" || true - chmod -f -R 777 /tmp || true - fi - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'job,separate'; - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.27'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - # - - # - run_containerd_1_28: - name: "e2e: vCloudDirector, Containerd, Kubernetes 1.28" - needs: - - check_e2e_labels - - git_info - if: needs.check_e2e_labels.outputs.run_containerd_1_28 == 'true' - outputs: - ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} - ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} - run_id: ${{ github.run_id }} - # need for find state in artifact - cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;1.28" - failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} - issue_number: ${{ inputs.issue_number }} - install_image_path: ${{ steps.setup.outputs.install-image-path }} - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.28" - EVENT_LABEL: ${{ github.event.label.name }} - runs-on: [self-hosted, e2e-common] - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - with: - ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} - fetch-depth: 0 - # - # - - name: Update comment on start - if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.28'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnStart({github, context, core, name}) - - # - - - # - - name: Check dev registry credentials - id: check_dev_registry - env: - HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to dev registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Check rw registry credentials - id: check_rw_registry - env: - HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to rw registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} - logout: false - - name: Login to Github Container Registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} - with: - registry: ghcr.io - username: ${{ secrets.GHCR_IO_REGISTRY_USER }} - password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Install werf CLI - uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e - with: - channel: ${{env.WERF_CHANNEL}} - # - - - name: Setup - id: setup - env: - DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} - CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} - CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} - REF_FULL: ${{needs.git_info.outputs.ref_full}} - INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} - MANUAL_RUN: "true" - run: | - # Calculate unique prefix for e2e test. - # GITHUB_RUN_ID is a unique number for each workflow run. - # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. - # Add CRI and KUBERNETES_VERSION to create unique directory for each job. - # CRI and PROVIDER values are trimmed to reduce prefix length. - if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then - KUBERNETES_VERSION_SUF="auto" - else - KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} - fi - DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") - if [[ "${MANUAL_RUN}" == "false" ]] ; then - # for jobs which run multiple providers concurrency (daily e2e, for example) - # add provider suffix to prevent "directory already exists" error - DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" - fi - # converts to DNS-like (all letters in lower case and replace all dots to dash) - # because it prefix will use for k8s resources names (nodes, for example) - DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') - - # Create tmppath for test script. - TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} - if [[ -d "${TMP_DIR_PATH}" ]] ; then - echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" - ls -la ${TMP_DIR_PATH} - exit 1 - else - echo "Create temporary dir for job: ${TMP_DIR_PATH}." - mkdir -p "${TMP_DIR_PATH}" - fi - - ## Source: ci_templates/build.yml - - # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. - REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} - if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then - # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. - REPO_SUFFIX= - fi - - # Use dev-registry for Git branches. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - # Use rw-registry for Git tags. - SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" - - if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then - # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. - # Use dev-regisry for branches and Github Container Registry for semver tags. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" - fi - - # Prepare initial image tag for deploy/deckhouse to test switching from previous release. - INITIAL_IMAGE_TAG= - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - fi - - # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - # Use it as image tag. Add suffix to not overlap with PRs in main repo. - IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - - INSTALL_IMAGE_NAME= - if [[ -n ${CI_COMMIT_BRANCH} ]]; then - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} - fi - if [[ -n ${CI_COMMIT_TAG} ]] ; then - REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe - INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} - fi - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} - git fetch origin ${INITIAL_REF_SLUG} - git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts - fi - SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') - echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" - - # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. - echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." - docker pull "${INSTALL_IMAGE_NAME}" - - IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" - - echo '::echo::on' - echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT - echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT - echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT - echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT - echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT - - echo '::echo::off' - - - name: "Run e2e test: vCloudDirector/Containerd/1.28" - id: e2e_test_run - timeout-minutes: 80 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.28" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh run-test' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - echo "Start waiting ssh connection string script" - comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" - echo "Full comment url for updating ${comment_url}" - - ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT - - bastion_ip_file="" - if [[ "${PROVIDER}" == "Static" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - fi - - echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT - - $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & - - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh run-test - - # - - name: Read connection string - if: ${{ failure() || cancelled() }} - id: check_stay_failed_cluster - uses: actions/github-script@v6.4.1 - env: - SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} - SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} - with: - # it sets `should_run` output var if e2e/failed/stay label - script: | - const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); - await e2e_cleanup.readConnectionScript({core, context, github}); - - - name: Label pr if e2e failed - if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} - number: ${{ needs.git_info.outputs.pr_number }} - labels: "e2e/cluster/failed" - - - name: Cleanup bootstrapped cluster - if: success() - id: cleanup_cluster - timeout-minutes: 60 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.28" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh cleanup' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh cleanup - - # - - - name: Save dhctl state - id: save_failed_cluster_state - if: ${{ failure() }} - uses: actions/upload-artifact@v3.1.2 - with: - name: failed_cluster_state_vclouddirector_containerd_1_28 - path: | - ${{ steps.setup.outputs.tmp-dir-path}}/dhctl - ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate - ${{ steps.setup.outputs.tmp-dir-path}}/logs - - - name: Save test results - if: ${{ steps.setup.outputs.dhctl-log-file }} - uses: actions/upload-artifact@v3.1.2 - with: - name: test_output_vclouddirector_containerd_1_28 - path: | - ${{ steps.setup.outputs.dhctl-log-file}}* - ${{ steps.setup.outputs.tmp-dir-path}}/logs - testing/cloud_layouts/ - !testing/cloud_layouts/**/sshkey - - - name: Cleanup temp directory - if: always() - env: - TMPPATH: ${{ steps.setup.outputs.tmppath}} - run: | - echo "Remove temporary directory '${TMPPATH}' ..." - if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then - rm -rf "${TMPPATH}" - else - echo Not a directory. - fi - if [ -n $USER_RUNNER_ID ]; then - echo "Fix temp directories owner..." - chown -R $USER_RUNNER_ID "$(pwd)/testing" || true - chown -R $USER_RUNNER_ID "/deckhouse/testing" || true - chown -R $USER_RUNNER_ID /tmp || true - else - echo "Fix temp directories permissions..." - chmod -f -R 777 "$(pwd)/testing" || true - chmod -f -R 777 "/deckhouse/testing" || true - chmod -f -R 777 /tmp || true - fi - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'job,separate'; - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.28'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - # - - # - run_containerd_1_29: - name: "e2e: vCloudDirector, Containerd, Kubernetes 1.29" - needs: - - check_e2e_labels - - git_info - if: needs.check_e2e_labels.outputs.run_containerd_1_29 == 'true' - outputs: - ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} - ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} - run_id: ${{ github.run_id }} - # need for find state in artifact - cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;1.29" - failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} - issue_number: ${{ inputs.issue_number }} - install_image_path: ${{ steps.setup.outputs.install-image-path }} - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.29" - EVENT_LABEL: ${{ github.event.label.name }} - runs-on: [self-hosted, e2e-common] - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - with: - ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} - fetch-depth: 0 - # - # - - name: Update comment on start - if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.29'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnStart({github, context, core, name}) - - # - - - # - - name: Check dev registry credentials - id: check_dev_registry - env: - HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to dev registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Check rw registry credentials - id: check_rw_registry - env: - HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to rw registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} - logout: false - - name: Login to Github Container Registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} - with: - registry: ghcr.io - username: ${{ secrets.GHCR_IO_REGISTRY_USER }} - password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Install werf CLI - uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e - with: - channel: ${{env.WERF_CHANNEL}} - # - - - name: Setup - id: setup - env: - DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} - CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} - CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} - REF_FULL: ${{needs.git_info.outputs.ref_full}} - INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} - MANUAL_RUN: "true" - run: | - # Calculate unique prefix for e2e test. - # GITHUB_RUN_ID is a unique number for each workflow run. - # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. - # Add CRI and KUBERNETES_VERSION to create unique directory for each job. - # CRI and PROVIDER values are trimmed to reduce prefix length. - if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then - KUBERNETES_VERSION_SUF="auto" - else - KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} - fi - DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") - if [[ "${MANUAL_RUN}" == "false" ]] ; then - # for jobs which run multiple providers concurrency (daily e2e, for example) - # add provider suffix to prevent "directory already exists" error - DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" - fi - # converts to DNS-like (all letters in lower case and replace all dots to dash) - # because it prefix will use for k8s resources names (nodes, for example) - DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') - - # Create tmppath for test script. - TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} - if [[ -d "${TMP_DIR_PATH}" ]] ; then - echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" - ls -la ${TMP_DIR_PATH} - exit 1 - else - echo "Create temporary dir for job: ${TMP_DIR_PATH}." - mkdir -p "${TMP_DIR_PATH}" - fi - - ## Source: ci_templates/build.yml - - # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. - REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} - if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then - # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. - REPO_SUFFIX= - fi - - # Use dev-registry for Git branches. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - # Use rw-registry for Git tags. - SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" - - if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then - # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. - # Use dev-regisry for branches and Github Container Registry for semver tags. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" - fi - - # Prepare initial image tag for deploy/deckhouse to test switching from previous release. - INITIAL_IMAGE_TAG= - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - fi - - # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - # Use it as image tag. Add suffix to not overlap with PRs in main repo. - IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - - INSTALL_IMAGE_NAME= - if [[ -n ${CI_COMMIT_BRANCH} ]]; then - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} - fi - if [[ -n ${CI_COMMIT_TAG} ]] ; then - REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe - INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} - fi - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} - git fetch origin ${INITIAL_REF_SLUG} - git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts - fi - SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') - echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" - - # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. - echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." - docker pull "${INSTALL_IMAGE_NAME}" - - IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" - - echo '::echo::on' - echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT - echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT - echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT - echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT - echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT - - echo '::echo::off' - - - name: "Run e2e test: vCloudDirector/Containerd/1.29" - id: e2e_test_run - timeout-minutes: 80 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.29" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh run-test' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - echo "Start waiting ssh connection string script" - comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" - echo "Full comment url for updating ${comment_url}" - - ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT - - bastion_ip_file="" - if [[ "${PROVIDER}" == "Static" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - fi - - echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT - - $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & - - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh run-test - - # - - name: Read connection string - if: ${{ failure() || cancelled() }} - id: check_stay_failed_cluster - uses: actions/github-script@v6.4.1 - env: - SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} - SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} - with: - # it sets `should_run` output var if e2e/failed/stay label - script: | - const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); - await e2e_cleanup.readConnectionScript({core, context, github}); - - - name: Label pr if e2e failed - if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} - number: ${{ needs.git_info.outputs.pr_number }} - labels: "e2e/cluster/failed" - - - name: Cleanup bootstrapped cluster - if: success() - id: cleanup_cluster - timeout-minutes: 60 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.29" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh cleanup' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh cleanup - - # - - - name: Save dhctl state - id: save_failed_cluster_state - if: ${{ failure() }} - uses: actions/upload-artifact@v3.1.2 - with: - name: failed_cluster_state_vclouddirector_containerd_1_29 - path: | - ${{ steps.setup.outputs.tmp-dir-path}}/dhctl - ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate - ${{ steps.setup.outputs.tmp-dir-path}}/logs - - - name: Save test results - if: ${{ steps.setup.outputs.dhctl-log-file }} - uses: actions/upload-artifact@v3.1.2 - with: - name: test_output_vclouddirector_containerd_1_29 - path: | - ${{ steps.setup.outputs.dhctl-log-file}}* - ${{ steps.setup.outputs.tmp-dir-path}}/logs - testing/cloud_layouts/ - !testing/cloud_layouts/**/sshkey - - - name: Cleanup temp directory - if: always() - env: - TMPPATH: ${{ steps.setup.outputs.tmppath}} - run: | - echo "Remove temporary directory '${TMPPATH}' ..." - if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then - rm -rf "${TMPPATH}" - else - echo Not a directory. - fi - if [ -n $USER_RUNNER_ID ]; then - echo "Fix temp directories owner..." - chown -R $USER_RUNNER_ID "$(pwd)/testing" || true - chown -R $USER_RUNNER_ID "/deckhouse/testing" || true - chown -R $USER_RUNNER_ID /tmp || true - else - echo "Fix temp directories permissions..." - chmod -f -R 777 "$(pwd)/testing" || true - chmod -f -R 777 "/deckhouse/testing" || true - chmod -f -R 777 /tmp || true - fi - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'job,separate'; - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.29'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - # - - # - run_containerd_1_30: - name: "e2e: vCloudDirector, Containerd, Kubernetes 1.30" - needs: - - check_e2e_labels - - git_info - if: needs.check_e2e_labels.outputs.run_containerd_1_30 == 'true' - outputs: - ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} - ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} - run_id: ${{ github.run_id }} - # need for find state in artifact - cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;1.30" - failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} - issue_number: ${{ inputs.issue_number }} - install_image_path: ${{ steps.setup.outputs.install-image-path }} - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.30" - EVENT_LABEL: ${{ github.event.label.name }} - runs-on: [self-hosted, e2e-common] - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - with: - ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} - fetch-depth: 0 - # - # - - name: Update comment on start - if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.30'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnStart({github, context, core, name}) - - # - - - # - - name: Check dev registry credentials - id: check_dev_registry - env: - HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to dev registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Check rw registry credentials - id: check_rw_registry - env: - HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to rw registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} - logout: false - - name: Login to Github Container Registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} - with: - registry: ghcr.io - username: ${{ secrets.GHCR_IO_REGISTRY_USER }} - password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Install werf CLI - uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e - with: - channel: ${{env.WERF_CHANNEL}} - # - - - name: Setup - id: setup - env: - DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} - CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} - CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} - REF_FULL: ${{needs.git_info.outputs.ref_full}} - INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} - MANUAL_RUN: "true" - run: | - # Calculate unique prefix for e2e test. - # GITHUB_RUN_ID is a unique number for each workflow run. - # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. - # Add CRI and KUBERNETES_VERSION to create unique directory for each job. - # CRI and PROVIDER values are trimmed to reduce prefix length. - if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then - KUBERNETES_VERSION_SUF="auto" - else - KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} - fi - DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") - if [[ "${MANUAL_RUN}" == "false" ]] ; then - # for jobs which run multiple providers concurrency (daily e2e, for example) - # add provider suffix to prevent "directory already exists" error - DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" - fi - # converts to DNS-like (all letters in lower case and replace all dots to dash) - # because it prefix will use for k8s resources names (nodes, for example) - DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') - - # Create tmppath for test script. - TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} - if [[ -d "${TMP_DIR_PATH}" ]] ; then - echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" - ls -la ${TMP_DIR_PATH} - exit 1 - else - echo "Create temporary dir for job: ${TMP_DIR_PATH}." - mkdir -p "${TMP_DIR_PATH}" - fi - - ## Source: ci_templates/build.yml - - # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. - REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} - if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then - # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. - REPO_SUFFIX= - fi - - # Use dev-registry for Git branches. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - # Use rw-registry for Git tags. - SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" - - if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then - # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. - # Use dev-regisry for branches and Github Container Registry for semver tags. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" - fi - - # Prepare initial image tag for deploy/deckhouse to test switching from previous release. - INITIAL_IMAGE_TAG= - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - fi - - # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - # Use it as image tag. Add suffix to not overlap with PRs in main repo. - IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - - INSTALL_IMAGE_NAME= - if [[ -n ${CI_COMMIT_BRANCH} ]]; then - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} - fi - if [[ -n ${CI_COMMIT_TAG} ]] ; then - REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe - INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} - fi - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} - git fetch origin ${INITIAL_REF_SLUG} - git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts - fi - SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') - echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" - - # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. - echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." - docker pull "${INSTALL_IMAGE_NAME}" - - IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" - - echo '::echo::on' - echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT - echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT - echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT - echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT - echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT - - echo '::echo::off' - - - name: "Run e2e test: vCloudDirector/Containerd/1.30" - id: e2e_test_run - timeout-minutes: 80 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.30" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh run-test' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - echo "Start waiting ssh connection string script" - comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" - echo "Full comment url for updating ${comment_url}" - - ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT - - bastion_ip_file="" - if [[ "${PROVIDER}" == "Static" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - fi - - echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT - - $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & - - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh run-test - - # - - name: Read connection string - if: ${{ failure() || cancelled() }} - id: check_stay_failed_cluster - uses: actions/github-script@v6.4.1 - env: - SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} - SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} - with: - # it sets `should_run` output var if e2e/failed/stay label - script: | - const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); - await e2e_cleanup.readConnectionScript({core, context, github}); - - - name: Label pr if e2e failed - if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} - number: ${{ needs.git_info.outputs.pr_number }} - labels: "e2e/cluster/failed" - - - name: Cleanup bootstrapped cluster - if: success() - id: cleanup_cluster - timeout-minutes: 60 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "1.30" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh cleanup' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh cleanup - - # - - - name: Save dhctl state - id: save_failed_cluster_state - if: ${{ failure() }} - uses: actions/upload-artifact@v3.1.2 - with: - name: failed_cluster_state_vclouddirector_containerd_1_30 - path: | - ${{ steps.setup.outputs.tmp-dir-path}}/dhctl - ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate - ${{ steps.setup.outputs.tmp-dir-path}}/logs - - - name: Save test results - if: ${{ steps.setup.outputs.dhctl-log-file }} - uses: actions/upload-artifact@v3.1.2 - with: - name: test_output_vclouddirector_containerd_1_30 - path: | - ${{ steps.setup.outputs.dhctl-log-file}}* - ${{ steps.setup.outputs.tmp-dir-path}}/logs - testing/cloud_layouts/ - !testing/cloud_layouts/**/sshkey - - - name: Cleanup temp directory - if: always() - env: - TMPPATH: ${{ steps.setup.outputs.tmppath}} - run: | - echo "Remove temporary directory '${TMPPATH}' ..." - if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then - rm -rf "${TMPPATH}" - else - echo Not a directory. - fi - if [ -n $USER_RUNNER_ID ]; then - echo "Fix temp directories owner..." - chown -R $USER_RUNNER_ID "$(pwd)/testing" || true - chown -R $USER_RUNNER_ID "/deckhouse/testing" || true - chown -R $USER_RUNNER_ID /tmp || true - else - echo "Fix temp directories permissions..." - chmod -f -R 777 "$(pwd)/testing" || true - chmod -f -R 777 "/deckhouse/testing" || true - chmod -f -R 777 /tmp || true - fi - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'job,separate'; - const name = 'e2e: vCloudDirector, Containerd, Kubernetes 1.30'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - # - - # - run_containerd_Automatic: - name: "e2e: vCloudDirector, Containerd, Kubernetes Automatic" - needs: - - check_e2e_labels - - git_info - if: needs.check_e2e_labels.outputs.run_containerd_Automatic == 'true' - outputs: - ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }} - ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }} - run_id: ${{ github.run_id }} - # need for find state in artifact - cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }} - ran_for: "vclouddirector;Standard;containerd;Automatic" - failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }} - issue_number: ${{ inputs.issue_number }} - install_image_path: ${{ steps.setup.outputs.install-image-path }} - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "Automatic" - EVENT_LABEL: ${{ github.event.label.name }} - runs-on: [self-hosted, e2e-common] - steps: - - # - - name: Job started timestamp - id: started_at - run: | - unixTimestamp=$(date +%s) - echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT - # - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - with: - ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }} - fetch-depth: 0 - # - # - - name: Update comment on start - if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const name = 'e2e: vCloudDirector, Containerd, Kubernetes Automatic'; - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnStart({github, context, core, name}) - - # - - - # - - name: Check dev registry credentials - id: check_dev_registry - env: - HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to dev registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Check rw registry credentials - id: check_rw_registry - env: - HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - run: | - if [[ -n $HOST ]]; then - echo "has_credentials=true" >> $GITHUB_OUTPUT - echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT - fi - - name: Login to rw registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }} - with: - registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }} - username: ${{ secrets.DECKHOUSE_REGISTRY_USER }} - password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }} - logout: false - - name: Login to Github Container Registry - uses: docker/login-action@v2.1.0 - if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }} - with: - registry: ghcr.io - username: ${{ secrets.GHCR_IO_REGISTRY_USER }} - password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }} - logout: false - # - - # - - name: Install werf CLI - uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e - with: - channel: ${{env.WERF_CHANNEL}} - # - - - name: Setup - id: setup - env: - DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}} - CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}} - CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}} - CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}} - REF_FULL: ${{needs.git_info.outputs.ref_full}} - INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }} - MANUAL_RUN: "true" - run: | - # Calculate unique prefix for e2e test. - # GITHUB_RUN_ID is a unique number for each workflow run. - # GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository. - # Add CRI and KUBERNETES_VERSION to create unique directory for each job. - # CRI and PROVIDER values are trimmed to reduce prefix length. - if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then - KUBERNETES_VERSION_SUF="auto" - else - KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION} - fi - DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}") - if [[ "${MANUAL_RUN}" == "false" ]] ; then - # for jobs which run multiple providers concurrency (daily e2e, for example) - # add provider suffix to prevent "directory already exists" error - DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)" - fi - # converts to DNS-like (all letters in lower case and replace all dots to dash) - # because it prefix will use for k8s resources names (nodes, for example) - DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]') - - # Create tmppath for test script. - TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX} - if [[ -d "${TMP_DIR_PATH}" ]] ; then - echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!" - ls -la ${TMP_DIR_PATH} - exit 1 - else - echo "Create temporary dir for job: ${TMP_DIR_PATH}." - mkdir -p "${TMP_DIR_PATH}" - fi - - ## Source: ci_templates/build.yml - - # Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'. - REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-} - if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then - # REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'. - REPO_SUFFIX= - fi - - # Use dev-registry for Git branches. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - # Use rw-registry for Git tags. - SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse" - - if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then - # DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo. - # Use dev-regisry for branches and Github Container Registry for semver tags. - BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}" - SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}" - fi - - # Prepare initial image tag for deploy/deckhouse to test switching from previous release. - INITIAL_IMAGE_TAG= - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - fi - - # Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh). - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - # Use it as image tag. Add suffix to not overlap with PRs in main repo. - IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}} - - INSTALL_IMAGE_NAME= - if [[ -n ${CI_COMMIT_BRANCH} ]]; then - # CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch. - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG} - fi - if [[ -n ${CI_COMMIT_TAG} ]] ; then - REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe - INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG} - fi - if [[ -n ${INITIAL_REF_SLUG} ]] ; then - INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG} - git fetch origin ${INITIAL_REF_SLUG} - git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts - fi - SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]') - echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}" - - # Print image name in uppercase to prevent hiding non-secret registry host stored in secret. - echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'." - docker pull "${INSTALL_IMAGE_NAME}" - - IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}" - - echo '::echo::on' - echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT - echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT - echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT - echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT - echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT - - echo '::echo::off' - - - name: "Run e2e test: vCloudDirector/Containerd/Automatic" - id: e2e_test_run - timeout-minutes: 80 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "Automatic" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh run-test' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - echo "Start waiting ssh connection string script" - comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}" - echo "Full comment url for updating ${comment_url}" - - ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT - - bastion_ip_file="" - if [[ "${PROVIDER}" == "Static" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then - bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - fi - - echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT - - $(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 & - - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh run-test - - # - - name: Read connection string - if: ${{ failure() || cancelled() }} - id: check_stay_failed_cluster - uses: actions/github-script@v6.4.1 - env: - SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }} - SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }} - with: - # it sets `should_run` output var if e2e/failed/stay label - script: | - const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup'); - await e2e_cleanup.readConnectionScript({core, context, github}); - - - name: Label pr if e2e failed - if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }} - uses: actions-ecosystem/action-add-labels@v1 - with: - github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }} - number: ${{ needs.git_info.outputs.pr_number }} - labels: "e2e/cluster/failed" - - - name: Cleanup bootstrapped cluster - if: success() - id: cleanup_cluster - timeout-minutes: 60 - env: - PROVIDER: vCloudDirector - CRI: Containerd - LAYOUT: Standard - KUBERNETES_VERSION: "Automatic" - LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }} - LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}} - TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}} - PREFIX: ${{ steps.setup.outputs.dhctl-prefix}} - INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }} - DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }} - # - LAYOUT_VCD_PASSWORD: ${{ secrets.LAYOUT_VCD_PASSWORD }} - LAYOUT_VCD_USERNAME: ${{ secrets.LAYOUT_VCD_USERNAME }} - LAYOUT_STATIC_BASTION_IP: 80.249.129.56 - LAYOUT_VCD_SERVER: ${{ secrets.LAYOUT_VCD_SERVER }} - LAYOUT_VCD_ORG: ${{ secrets.LAYOUT_VCD_ORG }} - COMMENT_ID: ${{ inputs.comment_id }} - GITHUB_API_SERVER: ${{ github.api_url }} - REPOSITORY: ${{ github.repository }} - DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}} - GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - run: | - echo "Execute 'script.sh cleanup' via 'docker run', using environment: - INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME} - DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} - INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} - PREFIX=${PREFIX} - PROVIDER=${PROVIDER} - CRI=${CRI} - LAYOUT=${LAYOUT} - KUBERNETES_VERSION=${KUBERNETES_VERSION} - TMP_DIR_PATH=${TMP_DIR_PATH} - " - - ls -lh $(pwd)/testing - - dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - echo "DHCTL log file: $dhctl_log_file" - - user_runner_id=$(id -u):$(id -g) - echo "user_runner_id $user_runner_id" - - docker run --rm \ - -e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \ - -e PREFIX=${PREFIX} \ - -e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \ - -e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \ - -e KUBERNETES_VERSION=${KUBERNETES_VERSION} \ - -e CRI=${CRI} \ - -e PROVIDER=${PROVIDER:-not_provided} \ - -e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \ - -e LAYOUT=${LAYOUT:-not_provided} \ - -e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \ - -e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \ - -e LAYOUT_VCD_PASSWORD=${LAYOUT_VCD_PASSWORD:-not_provided} \ - -e LAYOUT_VCD_USERNAME=${LAYOUT_VCD_USERNAME:-not_provided} \ - -e LAYOUT_VCD_SERVER=${LAYOUT_VCD_SERVER:-not_provided} \ - -e LAYOUT_VCD_ORG=${LAYOUT_VCD_ORG:-not_provided} \ - -e LAYOUT_STATIC_BASTION_IP=80.249.129.56 \ - -e USER_RUNNER_ID=${user_runner_id} \ - -v $(pwd)/testing:/deckhouse/testing \ - -v $(pwd)/release.yaml:/deckhouse/release.yaml \ - -v ${TMP_DIR_PATH}:/tmp \ - -w /deckhouse \ - ${INSTALL_IMAGE_NAME} \ - bash /deckhouse/testing/cloud_layouts/script.sh cleanup - - # - - - name: Save dhctl state - id: save_failed_cluster_state - if: ${{ failure() }} - uses: actions/upload-artifact@v3.1.2 - with: - name: failed_cluster_state_vclouddirector_containerd_Automatic - path: | - ${{ steps.setup.outputs.tmp-dir-path}}/dhctl - ${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate - ${{ steps.setup.outputs.tmp-dir-path}}/logs - - - name: Save test results - if: ${{ steps.setup.outputs.dhctl-log-file }} - uses: actions/upload-artifact@v3.1.2 - with: - name: test_output_vclouddirector_containerd_Automatic - path: | - ${{ steps.setup.outputs.dhctl-log-file}}* - ${{ steps.setup.outputs.tmp-dir-path}}/logs - testing/cloud_layouts/ - !testing/cloud_layouts/**/sshkey - - - name: Cleanup temp directory - if: always() - env: - TMPPATH: ${{ steps.setup.outputs.tmppath}} - run: | - echo "Remove temporary directory '${TMPPATH}' ..." - if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then - rm -rf "${TMPPATH}" - else - echo Not a directory. - fi - if [ -n $USER_RUNNER_ID ]; then - echo "Fix temp directories owner..." - chown -R $USER_RUNNER_ID "$(pwd)/testing" || true - chown -R $USER_RUNNER_ID "/deckhouse/testing" || true - chown -R $USER_RUNNER_ID /tmp || true - else - echo "Fix temp directories permissions..." - chmod -f -R 777 "$(pwd)/testing" || true - chmod -f -R 777 "/deckhouse/testing" || true - chmod -f -R 777 /tmp || true - fi - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'job,separate'; - const name = 'e2e: vCloudDirector, Containerd, Kubernetes Automatic'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - # - - - last_comment: - name: Update comment on finish - needs: ["started_at","git_info","run_containerd_1_26","run_containerd_1_27","run_containerd_1_28","run_containerd_1_29","run_containerd_1_30","run_containerd_Automatic"] - if: ${{ always() }} - runs-on: ubuntu-latest - env: - JOB_NAMES: | - {"run_containerd_1_26":"e2e: vCloudDirector, Containerd, Kubernetes 1.26","run_containerd_1_27":"e2e: vCloudDirector, Containerd, Kubernetes 1.27","run_containerd_1_28":"e2e: vCloudDirector, Containerd, Kubernetes 1.28","run_containerd_1_29":"e2e: vCloudDirector, Containerd, Kubernetes 1.29","run_containerd_1_30":"e2e: vCloudDirector, Containerd, Kubernetes 1.30","run_containerd_Automatic":"e2e: vCloudDirector, Containerd, Kubernetes Automatic"} - steps: - - # - - name: Checkout sources - uses: actions/checkout@v3.5.2 - - # - # - - name: Update comment on finish - id: update_comment_on_finish - if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }} - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - JOB_CONTEXT: ${{ toJSON(job) }} - STEPS_CONTEXT: ${{ toJSON(steps) }} - uses: actions/github-script@v6.4.1 - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - retries: 3 - script: | - const statusConfig = 'workflow,final,no-skipped,restore-separate'; - const name = 'e2e: vCloudDirector'; - const needsContext = JSON.parse(process.env.NEEDS_CONTEXT); - const jobContext = JSON.parse(process.env.JOB_CONTEXT); - const stepsContext = JSON.parse(process.env.STEPS_CONTEXT); - let jobNames = null - if (process.env.JOB_NAMES) { - jobNames = JSON.parse(process.env.JOB_NAMES); - } - - core.info(`needsContext: ${JSON.stringify(needsContext)}`); - core.info(`jobContext: ${JSON.stringify(jobContext)}`); - core.info(`stepsContext: ${JSON.stringify(stepsContext)}`); - core.info(`jobNames: ${JSON.stringify(jobNames)}`); - - const ci = require('./.github/scripts/js/ci'); - return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames}); - # - - - # - - name: Set commit status after e2e run - id: set_e2e_requirement_status - if: ${{ always() }} - uses: actions/github-script@v6.4.1 - env: - JOB_STATUS: ${{ job.status }} - STATUS_TARGET_COMMIT: ${{needs.git_info.outputs.github_sha}} - with: - github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}} - script: | - const e2eStatus = require('./.github/scripts/js/e2e-commit-status'); - - await e2eStatus.setStatusAfterE2eRun({github, context, core}); - # -# diff --git a/.github/workflows/e2e-vsphere.yml b/.github/workflows/e2e-vsphere.yml index fde1640465..1e4cf114b9 100644 --- a/.github/workflows/e2e-vsphere.yml +++ b/.github/workflows/e2e-vsphere.yml @@ -466,7 +466,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -937,7 +937,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1408,7 +1408,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1879,7 +1879,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2350,7 +2350,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2821,7 +2821,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi diff --git a/.github/workflows/e2e-yandex-cloud.yml b/.github/workflows/e2e-yandex-cloud.yml index 94d78eaf32..9f58ad9b12 100644 --- a/.github/workflows/e2e-yandex-cloud.yml +++ b/.github/workflows/e2e-yandex-cloud.yml @@ -467,7 +467,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -942,7 +942,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1417,7 +1417,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -1892,7 +1892,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2367,7 +2367,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi @@ -2842,7 +2842,7 @@ jobs: bastion_ip_file="" if [[ "${PROVIDER}" == "Static" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" - elif [[ "${PROVIDER}" == "vCloudDirector" ]] ; then + elif [[ "${PROVIDER}" == "VCD" ]] ; then bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}" fi From 15ce2c7c8145abffeabf5b9b8db2ad744bae85a2 Mon Sep 17 00:00:00 2001 From: borg-z Date: Mon, 5 Aug 2024 17:09:59 +0300 Subject: [PATCH 5/6] $RANDOM Signed-off-by: borg-z --- .github/test | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/test diff --git a/.github/test b/.github/test new file mode 100644 index 0000000000..3e7f390c22 --- /dev/null +++ b/.github/test @@ -0,0 +1 @@ +4240 From dc65af550bcde6fe31bf9a3c3b8d04b70905d9f0 Mon Sep 17 00:00:00 2001 From: borg-z Date: Mon, 5 Aug 2024 17:10:43 +0300 Subject: [PATCH 6/6] . Signed-off-by: borg-z --- .github/test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/test b/.github/test index 3e7f390c22..3107b0c7cb 100644 --- a/.github/test +++ b/.github/test @@ -1 +1 @@ -4240 +6346