diff --git a/common/.ansible-lint b/common/.ansible-lint new file mode 100644 index 00000000..0522976e --- /dev/null +++ b/common/.ansible-lint @@ -0,0 +1,21 @@ +# Vim filetype=yaml +--- +offline: false +skip_list: + - name[template] # Allow Jinja templating inside task and play names + - template-instead-of-copy # Templated files should use template instead of copy + - yaml[line-length] # too long lines + - yaml[indentation] # Forcing lists to be always indented by 2 chars is silly IMO + - var-naming[no-role-prefix] # This would be too much churn for very little gain + - no-changed-when + - var-naming[no-role-prefix] # There are too many changes now and it would be too risky + +# ansible-lint gh workflow cannot find ansible.cfg hence fails to import vault_utils role +exclude_paths: + - ./ansible/playbooks/vault/vault.yaml + - ./ansible/playbooks/iib-ci/iib-ci.yaml + - ./ansible/playbooks/k8s_secrets/k8s_secrets.yml + - ./ansible/playbooks/process_secrets/process_secrets.yml + - ./ansible/playbooks/write-token-kubeconfig/write-token-kubeconfig.yml + - ./ansible/playbooks/process_secrets/display_secrets_info.yml + - ./ansible/roles/vault_utils/tests/test.yml diff --git a/common/.github/dependabot.yml b/common/.github/dependabot.yml new file mode 100644 index 00000000..a175e666 --- /dev/null +++ b/common/.github/dependabot.yml @@ -0,0 +1,9 @@ +--- +version: 2 +updates: + # Check for updates to GitHub Actions every week + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + diff --git a/common/.github/linters/.gitleaks.toml b/common/.github/linters/.gitleaks.toml new file mode 100644 index 00000000..9ad74347 --- /dev/null +++ b/common/.github/linters/.gitleaks.toml @@ -0,0 +1,4 @@ +[whitelist] +# As of v4, gitleaks only matches against filename, not path in the +# files directive. Leaving content for backwards compatibility. +files = [ ] diff --git a/common/.github/linters/.markdown-lint.yml b/common/.github/linters/.markdown-lint.yml new file mode 100644 index 00000000..a0bc47d1 --- /dev/null +++ b/common/.github/linters/.markdown-lint.yml @@ -0,0 +1,6 @@ +{ + "default": true, + "MD003": false, + "MD013": false, + "MD033": false +} \ No newline at end of file diff --git a/common/.github/workflows/linter.yml b/common/.github/workflows/linter.yml new file mode 100644 index 00000000..f82194ee --- /dev/null +++ b/common/.github/workflows/linter.yml @@ -0,0 +1,65 @@ +--- +name: Unit test common + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: [push, pull_request] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Unit common/ Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v4 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + - name: Setup helm + uses: azure/setup-helm@v4 + with: + version: 'v3.14.0' + + + ################################ + # Run Linter against code base # + ################################ + # - name: Lint Code Base + # uses: github/super-linter@v4 + # env: + # VALIDATE_ALL_CODEBASE: false + # DEFAULT_BRANCH: main + # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Run make test + run: | + make test + + - name: Run make helmlint + run: | + make helmlint + + # For now disable this until we have a nice and simple process to update the schemas in our repo + # - name: Run make helm kubeconform + # run: | + # curl -L -O https://github.com/yannh/kubeconform/releases/download/v0.4.13/kubeconform-linux-amd64.tar.gz + # tar xf kubeconform-linux-amd64.tar.gz + # sudo mv -v kubeconform /usr/local/bin + # make kubeconform diff --git a/common/.github/workflows/superlinter.yml b/common/.github/workflows/superlinter.yml new file mode 100644 index 00000000..03b6fff9 --- /dev/null +++ b/common/.github/workflows/superlinter.yml @@ -0,0 +1,44 @@ +--- +name: Super linter + +on: [push, pull_request] + +jobs: + build: + # Name the Job + name: Super linter + # Set the agent to run on + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: super-linter/super-linter/slim@v7 + env: + VALIDATE_ALL_CODEBASE: true + DEFAULT_BRANCH: main + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # These are the validation we disable atm + VALIDATE_ANSIBLE: false + VALIDATE_BASH: false + VALIDATE_CHECKOV: false + VALIDATE_JSCPD: false + VALIDATE_JSON_PRETTIER: false + VALIDATE_MARKDOWN_PRETTIER: false + VALIDATE_KUBERNETES_KUBECONFORM: false + VALIDATE_PYTHON_PYLINT: false + VALIDATE_SHELL_SHFMT: false + VALIDATE_YAML: false + VALIDATE_YAML_PRETTIER: false + # VALIDATE_DOCKERFILE_HADOLINT: false + # VALIDATE_MARKDOWN: false + # VALIDATE_NATURAL_LANGUAGE: false + # VALIDATE_TEKTON: false diff --git a/common/.gitignore b/common/.gitignore new file mode 100644 index 00000000..454efc9e --- /dev/null +++ b/common/.gitignore @@ -0,0 +1,13 @@ +__pycache__/ +*.py[cod] +*~ +*.swp +*.swo +values-secret.yaml +.*.expected.yaml +.vscode +pattern-vault.init +pattern-vault.init.bak +super-linter.log +golang-external-secrets/Chart.lock +hashicorp-vault/Chart.lock diff --git a/common/.gitleaks.toml b/common/.gitleaks.toml new file mode 120000 index 00000000..c05303b9 --- /dev/null +++ b/common/.gitleaks.toml @@ -0,0 +1 @@ +.github/linters/.gitleaks.toml \ No newline at end of file diff --git a/common/Changes.md b/common/Changes.md new file mode 100644 index 00000000..c12f1755 --- /dev/null +++ b/common/Changes.md @@ -0,0 +1,153 @@ +# Changes + +## Sep 24, 2024 + +* Ansible has been moved out of the common code tree, you must use a clustergroup chart that is >= 0.9.1 + +## Sep 6, 2024 + +* Most charts have been removed from the tree. To get the charts you now have to point to them + +## Sep 25, 2023 + +* Upgraded ESO to v0.9.5 + +## Aug 17, 2023 + +* Introduced support for multisource applications via .chart + .chartVersion + +## Jul 8, 2023 + +* Introduced a default of 20 for sync failures retries in argo applications (global override via global.options.applicationRetryLimit + and per-app override via .syncPolicy) + +## May 22, 2023 + +* Upgraded ESO to 0.8.2 +* *Important* we now use the newly blessed sso config for argo. This means that gitops < 1.8 are *unsupported* + +## May 18, 2023 + +* Introduce a EXTRA_HELM_OPTS env variable that will be passed to the helm invocations + +## April 21, 2023 + +* Added labels and annotation support to namespaces.yaml template + +## Apr 11, 2023 + +* Apply the ACM ocp-gitops-policy everywhere but the hub + +## Apr 7, 2023 + +* Moved to gitops-1.8 channel by default (stable is unmaintained and will be dropped starting with ocp-4.13) + +## March 20, 2023 + +* Upgraded ESO to 0.8.1 + +## February 9, 2023 + +* Add support for /values-.yaml and for /values--.yaml + +## January 29, 2023 + +* Stop extracting the HUB's CA via an imperative job running on the imported cluster. + Just use ACM to push the HUB's CA out to the managed clusters. + +## January 23, 2023 + +* Add initial support for running ESO on ACM-imported clusters + +## January 18, 2023 + +* Add validate-schema target + +## January 13, 2023 + +* Simplify the secrets paths when using argo hosted sites + +## January 10, 2023 + +* vaultPrefixes is now optional in the v2 secret spec and defaults to ["hub"] + +## December 9, 2022 + +* Dropped insecureUnsealVaultInsideCluster (and file_unseal) entirely. Now + vault is always unsealed via a cronjob in the cluster. It is recommended to + store the imperative/vaultkeys secret offline securely and then delete it. + +## December 8, 2022 + +* Removed the legacy installation targets: + `deploy upgrade legacy-deploy legacy-upgrade` + Patterns must now use the operator-based installation + +## November 29, 2022 + +* Upgraded vault-helm to 0.23.0 +* Enable vault-ssl by default + +## November 22, 2022 + +* Implemented a new format for the values-secret.yaml. Example can be found in examples/ folder +* Now the order of values-secret file lookup is the following: + 1. ~/values-secret-.yaml + 2. ~/values-secret.yaml + 3. /values-secret.yaml.template +* Add support for ansible vault encrypted values-secret files. You can now encrypt your values-secret file + at rest with `ansible-vault encrypt ~/values-secret.yaml`. When running `make load-secrets` if an encrypted + file is encountered the user will be prompted automatically for the password to decrypt it. + +## November 6, 2022 + +* Add support for /values--.yaml (e.g. /values-AWS-group-one.yaml) + +## October 28, 2022 + +* Updated vault helm chart to v0.22.1 and vault containers to 1.12.0 + +## October 25, 2022 + +* Updated External Secrets Operator to v0.6.0 +* Moved to -UBI based ESO containers + +## October 13, 2022 + +* Added global.clusterVersion as a new helm variable which represents the OCP + Major.Minor cluster version. By default now a user can add a + values--.yaml file to have specific cluster version + overrides (e.g. values-4.10-hub.yaml). Will need Validated Patterns Operator >= 0.0.6 + when deploying with the operator. Note: When using the ArgoCD Hub and spoke model, + you cannot have spokes with a different version of OCP than the hub. + +## October 4, 2022 + +* Extended the values-secret.yaml file to support multiple vault paths and re-wrote + the push_secrets feature as python module plugin. This requires the following line + in a pattern's ansible.cfg's '[defaults]' stanza: + + `library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules` + +## October 3, 2022 + +* Restore the ability to install a non-default site: `make TARGET_SITE=mysite install` +* Revised tests (new output and filenames, requires adding new result files to Git) +* ACM 2.6 required for ACM-based managed sites +* Introduced global.clusterDomain template variable (without the `apps.` prefix) +* Removed the ability to send specific charts to another cluster, use hosted argo sites instead +* Added the ability to have the hub host `values-{site}.yaml` for spoke clusters. + + The following example would deploy the namespaces, subscriptions, and + applications defined in `values-group-one.yaml` to the `perth` cluster + directly from ArgoCD on the hub. + + ```yaml + managedClusterGroups: + - name: group-one + hostedArgoSites: + - name: perth + domain: perth1.beekhof.net + bearerKeyPath: secret/data/hub/cluster_perth + caKeyPath: secret/data/hub/cluster_perth_ca + ``` diff --git a/common/LICENSE b/common/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/common/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/common/Makefile b/common/Makefile new file mode 100644 index 00000000..ce4a4a91 --- /dev/null +++ b/common/Makefile @@ -0,0 +1,265 @@ +NAME ?= $(shell basename "`pwd`") + +ifneq ($(origin TARGET_SITE), undefined) + TARGET_SITE_OPT=--set main.clusterGroupName=$(TARGET_SITE) +endif + +# This variable can be set in order to pass additional helm arguments from the +# the command line. I.e. we can set things without having to tweak values files +EXTRA_HELM_OPTS ?= + +# INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248 +# or +# INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248,registry-proxy.engineering.redhat.com/rh-osbs/iib:394249 +INDEX_IMAGES ?= + +TARGET_ORIGIN ?= origin +# This is to ensure that whether we start with a git@ or https:// URL, we end up with an https:// URL +# This is because we expect to use tokens for repo authentication as opposed to SSH keys +TARGET_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN) | sed -e 's/.*URL:[[:space:]]*//' -e 's%^git@%%' -e 's%^https://%%' -e 's%:%/%' -e 's%^%https://%') +# git branch --show-current is also available as of git 2.22, but we will use this for compatibility +TARGET_BRANCH=$(shell git rev-parse --abbrev-ref HEAD) + +UUID_FILE ?= ~/.config/validated-patterns/pattern-uuid +UUID_HELM_OPTS ?= + +# --set values always take precedence over the contents of -f +ifneq ("$(wildcard $(UUID_FILE))","") + UUID := $(shell cat $(UUID_FILE)) + UUID_HELM_OPTS := --set main.analyticsUUID=$(UUID) +endif + +# Set the secret name *and* its namespace when deploying from private repositories +# The format of said secret is documented here: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories +TOKEN_SECRET ?= +TOKEN_NAMESPACE ?= + +ifeq ($(TOKEN_SECRET),) + HELM_OPTS=-f values-global.yaml --set main.git.repoURL="$(TARGET_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) +else + # When we are working with a private repository we do not escape the git URL as it might be using an ssh secret which does not use https:// + TARGET_CLEAN_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN)) + HELM_OPTS=-f values-global.yaml --set main.tokenSecret=$(TOKEN_SECRET) --set main.tokenSecretNamespace=$(TOKEN_NAMESPACE) --set main.git.repoURL="$(TARGET_CLEAN_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) +endif + +# Helm does the right thing and fetches all the tags and detects the newest one +PATTERN_INSTALL_CHART ?= oci://quay.io/hybridcloudpatterns/pattern-install + +##@ Pattern Common Tasks + +.PHONY: help +help: ## This help message + @echo "Pattern: $(NAME)" + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^(\s|[a-zA-Z_0-9-])+:.*?##/ { printf " \033[36m%-35s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +# Makefiles in the individual patterns should call these targets explicitly +# e.g. from industrial-edge: make -f common/Makefile show +.PHONY: show +show: ## show the starting template without installing it + helm template $(PATTERN_INSTALL_CHART) --name-template $(NAME) $(HELM_OPTS) + +preview-all: ## (EXPERIMENTAL) Previews all applications on hub and managed clusters + @echo "NOTE: This is just a tentative approximation of rendering all hub and managed clusters templates" + @common/scripts/preview-all.sh $(TARGET_REPO) $(TARGET_BRANCH) + +preview-%: + $(eval CLUSTERGROUP ?= $(shell yq ".main.clusterGroupName" values-global.yaml)) + @common/scripts/preview.sh $(CLUSTERGROUP) $* $(TARGET_REPO) $(TARGET_BRANCH) + +.PHONY: operator-deploy +operator-deploy operator-upgrade: validate-prereq validate-origin validate-cluster ## runs helm install + @set -e -o pipefail + # Retry five times because the CRD might not be fully installed yet + for i in {1..5}; do \ + helm template --include-crds --name-template $(NAME) $(PATTERN_INSTALL_CHART) $(HELM_OPTS) | oc apply -f- && break || sleep 10; \ + done + +.PHONY: uninstall +uninstall: ## runs helm uninstall + $(eval CSV := $(shell oc get subscriptions -n openshift-operators openshift-gitops-operator -ojsonpath={.status.currentCSV})) + helm uninstall $(NAME) + @oc delete csv -n openshift-operators $(CSV) + +.PHONY: load-secrets +load-secrets: ## loads the secrets into the backend determined by values-global setting + common/scripts/process-secrets.sh $(NAME) + +.PHONY: legacy-load-secrets +legacy-load-secrets: ## loads the secrets into vault (only) + common/scripts/vault-utils.sh push_secrets $(NAME) + +.PHONY: secrets-backend-vault +secrets-backend-vault: ## Edits values files to use default Vault+ESO secrets config + common/scripts/set-secret-backend.sh vault + common/scripts/manage-secret-app.sh vault present + common/scripts/manage-secret-app.sh golang-external-secrets present + common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent + @git diff --exit-code || echo "Secrets backend set to vault, please review changes, commit, and push to activate in the pattern" + +.PHONY: secrets-backend-kubernetes +secrets-backend-kubernetes: ## Edits values file to use Kubernetes+ESO secrets config + common/scripts/set-secret-backend.sh kubernetes + common/scripts/manage-secret-namespace.sh validated-patterns-secrets present + common/scripts/manage-secret-app.sh vault absent + common/scripts/manage-secret-app.sh golang-external-secrets present + @git diff --exit-code || echo "Secrets backend set to kubernetes, please review changes, commit, and push to activate in the pattern" + +.PHONY: secrets-backend-none +secrets-backend-none: ## Edits values files to remove secrets manager + ESO + common/scripts/set-secret-backend.sh none + common/scripts/manage-secret-app.sh vault absent + common/scripts/manage-secret-app.sh golang-external-secrets absent + common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent + @git diff --exit-code || echo "Secrets backend set to none, please review changes, commit, and push to activate in the pattern" + +.PHONY: load-iib +load-iib: ## CI target to install Index Image Bundles + @set -e; if [ x$(INDEX_IMAGES) != x ]; then \ + ansible-playbook rhvp.cluster_utils.iib-ci; \ + else \ + echo "No INDEX_IMAGES defined. Bailing out"; \ + exit 1; \ + fi + +.PHONY: token-kubeconfig +token-kubeconfig: ## Create a local ~/.kube/config with password (not usually needed) + common/scripts/write-token-kubeconfig.sh + +##@ Validation Tasks + +# We only check the remote ssh git branch's existance if we're not running inside a container +# as getting ssh auth working inside a container seems a bit brittle +.PHONY: validate-origin +validate-origin: ## verify the git origin is available + @echo "Checking repository:" + @echo -n " $(TARGET_REPO) - branch '$(TARGET_BRANCH)': " + @git ls-remote --exit-code --heads $(TARGET_REPO) $(TARGET_BRANCH) >/dev/null &&\ + echo "OK" || (echo "NOT FOUND"; exit 1) + +.PHONY: validate-cluster +validate-cluster: ## Do some cluster validations before installing + @echo "Checking cluster:" + @echo -n " cluster-info: " + @oc cluster-info >/dev/null && echo "OK" || (echo "Error"; exit 1) + @echo -n " storageclass: " + @if [ `oc get storageclass -o go-template='{{printf "%d\n" (len .items)}}'` -eq 0 ]; then\ + echo "None Found"; exit 1;\ + else\ + echo "OK";\ + fi + + +.PHONY: validate-schema +validate-schema: ## validates values files against schema in common/clustergroup + $(eval VAL_PARAMS := $(shell for i in ./values-*.yaml; do echo -n "$${i} "; done)) + @echo -n "Validating clustergroup schema of: " + @set -e; for i in $(VAL_PARAMS); do echo -n " $$i"; helm template common/clustergroup $(HELM_OPTS) -f "$${i}" >/dev/null; done + @echo + +.PHONY: validate-prereq +validate-prereq: ## verify pre-requisites + @if [ ! -f /run/.containerenv ]; then\ + echo "Checking prerequisites:";\ + for t in $(EXECUTABLES); do if ! which $$t > /dev/null 2>&1; then echo "No $$t in PATH"; exit 1; fi; done;\ + echo " Check for '$(EXECUTABLES)': OK";\ + echo -n " Check for python-kubernetes: ";\ + if ! ansible -m ansible.builtin.command -a "{{ ansible_python_interpreter }} -c 'import kubernetes'" localhost > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ + echo "OK";\ + echo -n " Check for kubernetes.core collection: ";\ + if ! ansible-galaxy collection list | grep kubernetes.core > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ + echo "OK";\ + else\ + if [ -f values-global.yaml ]; then\ + OUT=`yq -r '.main.multiSourceConfig.enabled // (.main.multiSourceConfig.enabled = "false")' values-global.yaml`;\ + if [ "$${OUT,,}" = "false" ]; then\ + echo "You must set \".main.multiSourceConfig.enabled: true\" in your 'values-global.yaml' file";\ + echo "because your common subfolder is the slimmed down version with no helm charts in it";\ + exit 1;\ + fi;\ + fi;\ + fi + +.PHONY: argo-healthcheck +argo-healthcheck: ## Checks if all argo applications are synced + @echo "Checking argo applications" + $(eval APPS := $(shell oc get applications -A -o jsonpath='{range .items[*]}{@.metadata.namespace}{","}{@.metadata.name}{"\n"}{end}')) + @NOTOK=0; \ + for i in $(APPS); do\ + n=`echo "$${i}" | cut -f1 -d,`;\ + a=`echo "$${i}" | cut -f2 -d,`;\ + STATUS=`oc get -n "$${n}" application/"$${a}" -o jsonpath='{.status.sync.status}'`;\ + if [[ $$STATUS != "Synced" ]]; then\ + NOTOK=$$(( $${NOTOK} + 1));\ + fi;\ + HEALTH=`oc get -n "$${n}" application/"$${a}" -o jsonpath='{.status.health.status}'`;\ + if [[ $$HEALTH != "Healthy" ]]; then\ + NOTOK=$$(( $${NOTOK} + 1));\ + fi;\ + echo "$${n} $${a} -> Sync: $${STATUS} - Health: $${HEALTH}";\ + done;\ + if [ $${NOTOK} -gt 0 ]; then\ + echo "Some applications are not synced or are unhealthy";\ + exit 1;\ + fi + + +##@ Test and Linters Tasks + +CHARTS=$(shell find . -type f -iname 'Chart.yaml' -exec dirname "{}" \; | grep -v examples | sed -e 's/.\///') +# Section related to tests and linting +TEST_OPTS= -f values-global.yaml \ + --set global.repoURL="https://github.com/pattern-clone/mypattern" \ + --set main.git.repoURL="https://github.com/pattern-clone/mypattern" \ + --set main.git.revision=main --set global.pattern="mypattern" \ + --set global.namespace="pattern-namespace" \ + --set global.hubClusterDomain=apps.hub.example.com \ + --set global.localClusterDomain=apps.region.example.com \ + --set global.clusterDomain=region.example.com \ + --set global.clusterVersion="4.12" \ + --set global.clusterPlatform=aws \ + --set "clusterGroup.imperative.jobs[0].name"="test" \ + --set "clusterGroup.imperative.jobs[0].playbook"="rhvp.cluster_utils.test" +PATTERN_OPTS=-f common/examples/values-example.yaml +EXECUTABLES=git helm oc ansible + +.PHONY: test +test: ## run helm tests + @for t in $(CHARTS); do common/scripts/test.sh $$t all "$(TEST_OPTS)"; if [ $$? != 0 ]; then exit 1; fi; done + +.PHONY: helmlint +helmlint: ## run helm lint + @for t in $(CHARTS); do common/scripts/lint.sh $$t $(TEST_OPTS); if [ $$? != 0 ]; then exit 1; fi; done + +API_URL ?= https://raw.githubusercontent.com/hybrid-cloud-patterns/ocp-schemas/main/openshift/4.10/ +KUBECONFORM_SKIP ?= -skip 'CustomResourceDefinition,ClusterIssuer,CertManager,Certificate,ArgoCD' + +# We need to skip 'CustomResourceDefinition' as openapi2jsonschema seems to be unable to generate them ATM +.PHONY: kubeconform +kubeconform: ## run helm kubeconform + @for t in $(CHARTS); do helm template $(TEST_OPTS) $(PATTERN_OPTS) $$t | kubeconform -strict $(KUBECONFORM_SKIP) -verbose -schema-location $(API_URL); if [ $$? != 0 ]; then exit 1; fi; done + +.PHONY: super-linter +super-linter: ## Runs super linter locally + rm -rf .mypy_cache + podman run -e RUN_LOCAL=true -e USE_FIND_ALGORITHM=true \ + -e VALIDATE_ANSIBLE=false \ + -e VALIDATE_BASH=false \ + -e VALIDATE_CHECKOV=false \ + -e VALIDATE_DOCKERFILE_HADOLINT=false \ + -e VALIDATE_JSCPD=false \ + -e VALIDATE_JSON_PRETTIER=false \ + -e VALIDATE_MARKDOWN_PRETTIER=false \ + -e VALIDATE_KUBERNETES_KUBECONFORM=false \ + -e VALIDATE_PYTHON_PYLINT=false \ + -e VALIDATE_SHELL_SHFMT=false \ + -e VALIDATE_TEKTON=false \ + -e VALIDATE_YAML=false \ + -e VALIDATE_YAML_PRETTIER=false \ + $(DISABLE_LINTERS) \ + -v $(PWD):/tmp/lint:rw,z \ + -w /tmp/lint \ + ghcr.io/super-linter/super-linter:slim-v7 + +.PHONY: deploy upgrade legacy-deploy legacy-upgrade +deploy upgrade legacy-deploy legacy-upgrade: + @echo "UNSUPPORTED TARGET: please switch to 'operator-deploy'"; exit 1 diff --git a/common/README.md b/common/README.md new file mode 100644 index 00000000..41223529 --- /dev/null +++ b/common/README.md @@ -0,0 +1,51 @@ +# Validated Patterns common/ repository + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +## Note + +This is the `main` branch of common and it assumes that the pattern is fully +multisource (meaning that any used charts from VP is actually referenced from +either a helm chart repository or quay repository). I.e. there are no helm +charts contained in this branch of common and there is no ansible code neither. + +The helm charts now live in separate repositories under the VP +[organization](https://github.com/validatedpatterns) on GitHub. The repositories are: + +- clustergroup-chart +- pattern-install-chart +- hashicorp-vault-chart +- golang-external-secrets-chart +- acm-chart +- letsencrypt-chart + +The ansible bits live in this [repository](https://github.com/validatedpatterns/rhvp.cluster_utils) + +In order to be able to use this "slimmed-down" main branch of common you *must* +use a 0.9.* clustergroup-chart that. Add the following to your `values-global.yaml`: + +```yaml +main: + multiSourceConfig: + enabled: true + clusterGroupChartVersion: 0.9.* +``` + +## Start Here + +This repository is never used as standalone. It is usually imported in each pattern as a subtree. +In order to import the common/ the very first time you can use +`https://github.com/validatedpatterns/multicloud-gitops/blob/main/common/scripts/make_common_subtree.sh` + +In order to update your common subtree inside your pattern repository you can either use +`https://github.com/validatedpatterns/utilities/blob/main/scripts/update-common-everywhere.sh` or +do it manually by doing the following: + +```sh +git remote add -f upstream-common https://github.com/validatedpatterns/common.git +git merge -s subtree -Xtheirs -Xsubtree=common upstream-common/main +``` + +## Secrets + +There are two different secret formats parsed by the ansible bits. Both are documented [here](https://github.com/validatedpatterns/common/tree/main/ansible/roles/vault_utils/README.md) diff --git a/common/examples/blank/Chart.yaml b/common/examples/blank/Chart.yaml new file mode 100644 index 00000000..c552610d --- /dev/null +++ b/common/examples/blank/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +description: An empty Helm chart +keywords: +- pattern +name: blank +version: 0.0.1 diff --git a/common/examples/blank/templates/manifest.yaml b/common/examples/blank/templates/manifest.yaml new file mode 100644 index 00000000..3f160b02 --- /dev/null +++ b/common/examples/blank/templates/manifest.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: example diff --git a/common/examples/blank/values.yaml b/common/examples/blank/values.yaml new file mode 100644 index 00000000..35e4a6f4 --- /dev/null +++ b/common/examples/blank/values.yaml @@ -0,0 +1,2 @@ +tree: + of: "values" diff --git a/common/examples/industrial-edge-factory.yaml b/common/examples/industrial-edge-factory.yaml new file mode 100644 index 00000000..c60d0960 --- /dev/null +++ b/common/examples/industrial-edge-factory.yaml @@ -0,0 +1,112 @@ +clusterGroup: + name: factory + isHubCluster: false + + namespaces: + - manuela-stormshift-line-dashboard + - manuela-stormshift-machine-sensor + - manuela-stormshift-messaging + - manuela-factory-ml-workspace + + operatorgroupExcludes: + - manuela-factory-ml-workspace + + subscriptions: + - name: opendatahub-operator + channel: stable + source: community-operators + + - name: seldon-operator + namespace: manuela-stormshift-messaging + channel: stable + source: community-operators + + - name: amq-streams + namespace: manuela-stormshift-messaging + channel: stable + + - name: amq-broker-rhel8 + namespace: manuela-stormshift-messaging + channel: 7.x + + - name: red-hat-camel-k + namespace: manuela-stormshift-messaging + channel: stable + + projects: + - factory + + argoCD: + configManagementPlugins: + - name: helm-with-kustomize + image: quay.io/hybridcloudpatterns/utility-container:latest + pluginArgs: + - '--loglevel=debug' + pluginConfig: | + apiVersion: argoproj.io/v1alpha1 + kind: ConfigManagementPlugin + metadata: + name: helm-with-kustomize + spec: + preserveFileMode: true + init: + command: ["/bin/sh", "-c"] + args: ["helm dependency build"] + generate: + command: ["/bin/bash", "-c"] + args: ["helm template . --name-template ${ARGOCD_APP_NAME:0:52} + -f $(git rev-parse --show-toplevel)/values-global.yaml + -f $(git rev-parse --show-toplevel)/values-{{ .Values.clusterGroup.name }}.yaml + --set global.repoURL=$ARGOCD_APP_SOURCE_REPO_URL + --set global.targetRevision=$ARGOCD_APP_SOURCE_TARGET_REVISION + --set global.namespace=$ARGOCD_APP_NAMESPACE + --set global.pattern={{ .Values.global.pattern }} + --set global.clusterDomain={{ .Values.global.clusterDomain }} + --set global.hubClusterDomain={{ .Values.global.hubClusterDomain }} + --set global.localClusterDomain={{ coalesce .Values.global.localClusterDomain .Values.global.hubClusterDomain }} + --set clusterGroup.name={{ .Values.clusterGroup.name }} + --post-renderer ./kustomize"] + + applications: + - name: stormshift + project: factory + path: charts/factory/manuela-stormshift + plugin: + name: helm-with-kustomize + + - name: odh + namespace: manuela-factory-ml-workspace + project: factory + path: charts/datacenter/opendatahub + +# +# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate +# +# - name: pipelines +# namespace: production +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: stable +# overrides: +# - name: myparam +# value: myparam +# +# - name: pipelines +# namespace: staging +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: main +# +# Additional applications +# Be sure to include additional resources your apps will require +# +X machines +# +Y RAM +# +Z CPU +# - name: vendor-app +# namespace: default +# project: vendor +# path: path/to/myapp +# repoURL: https://github.com/vendor/applications.git +# targetRevision: main diff --git a/common/examples/industrial-edge-hub.yaml b/common/examples/industrial-edge-hub.yaml new file mode 100644 index 00000000..e48c4013 --- /dev/null +++ b/common/examples/industrial-edge-hub.yaml @@ -0,0 +1,241 @@ +clusterGroup: + name: datacenter + isHubCluster: true + + namespaces: + - golang-external-secrets + - external-secrets + - open-cluster-management + - manuela-ml-workspace + - manuela-tst-all + - manuela-ci + - manuela-data-lake + - staging + - vault + + operatorgroupExcludes: + - manuela-ml-workspace + + subscriptions: + acm: + name: advanced-cluster-management + namespace: open-cluster-management + channel: release-2.6 + + amqbroker-prod: + name: amq-broker-rhel8 + namespace: manuela-tst-all + channel: 7.x + + amqstreams-prod-dev: + name: amq-streams + namespaces: + - manuela-data-lake + - manuela-tst-all + channel: stable + + camelk-prod-dev: + name: red-hat-camel-k + namespaces: + - manuela-data-lake + - manuela-tst-all + channel: stable + + seldon-prod-dev: + name: seldon-operator + namespaces: + - manuela-ml-workspace + - manuela-tst-all + channel: stable + source: community-operators + + pipelines: + name: openshift-pipelines-operator-rh + channel: latest + source: redhat-operators + + odh: + name: opendatahub-operator + channel: stable + source: community-operators + + projects: + - datacenter + - production-datalake + - golang-external-secrets + - vault + + argoCD: + configManagementPlugins: + - name: helm-with-kustomize + image: quay.io/hybridcloudpatterns/utility-container:latest + pluginArgs: + - '--loglevel=debug' + pluginConfig: | + apiVersion: argoproj.io/v1alpha1 + kind: ConfigManagementPlugin + metadata: + name: helm-with-kustomize + spec: + preserveFileMode: true + init: + command: ["/bin/sh", "-c"] + args: ["helm dependency build"] + generate: + command: ["/bin/bash", "-c"] + args: ["helm template . --name-template ${ARGOCD_APP_NAME:0:52} + -f $(git rev-parse --show-toplevel)/values-global.yaml + -f $(git rev-parse --show-toplevel)/values-{{ .Values.clusterGroup.name }}.yaml + --set global.repoURL=$ARGOCD_APP_SOURCE_REPO_URL + --set global.targetRevision=$ARGOCD_APP_SOURCE_TARGET_REVISION + --set global.namespace=$ARGOCD_APP_NAMESPACE + --set global.pattern={{ .Values.global.pattern }} + --set global.clusterDomain={{ .Values.global.clusterDomain }} + --set global.hubClusterDomain={{ .Values.global.hubClusterDomain }} + --set global.localClusterDomain={{ coalesce .Values.global.localClusterDomain .Values.global.hubClusterDomain }} + --set clusterGroup.name={{ .Values.clusterGroup.name }} + --post-renderer ./kustomize"] + + applications: + acm: + name: acm + namespace: open-cluster-management + project: datacenter + path: common/acm + ignoreDifferences: + - group: internal.open-cluster-management.io + kind: ManagedClusterInfo + jsonPointers: + - /spec/loggingCA + + odh: + name: odh + namespace: manuela-ml-workspace + project: datacenter + path: charts/datacenter/opendatahub + + pipelines: + name: pipelines + namespace: manuela-ci + project: datacenter + path: charts/datacenter/pipelines + + production-data-lake: + name: production-data-lake + namespace: manuela-data-lake + project: production-datalake + path: charts/datacenter/manuela-data-lake + ignoreDifferences: + - group: apps + kind: Deployment + jsonPointers: + - /spec/replicas + - group: route.openshift.io + kind: Route + jsonPointers: + - /status + - group: image.openshift.io + kind: ImageStream + jsonPointers: + - /spec/tags + - group: apps.openshift.io + kind: DeploymentConfig + jsonPointers: + - /spec/template/spec/containers/0/image + + test: + name: manuela-test + namespace: manuela-tst-all + project: datacenter + path: charts/datacenter/manuela-tst + plugin: + name: helm-with-kustomize + + vault: + name: vault + namespace: vault + project: datacenter + chart: vault + repoURL: https://helm.releases.hashicorp.com + targetRevision: v0.20.1 + overrides: + - name: global.openshift + value: "true" + - name: injector.enabled + value: "false" + - name: ui.enabled + value: "true" + - name: ui.serviceType + value: LoadBalancer + - name: server.route.enabled + value: "true" + - name: server.route.host + value: null + - name: server.route.tls.termination + value: edge + - name: server.image.repository + value: "registry.connect.redhat.com/hashicorp/vault" + - name: server.image.tag + value: "1.10.3-ubi" + + secrets-operator: + name: golang-external-secrets + namespace: golang-external-secrets + project: golang-external-secrets + path: common/golang-external-secrets + + secrets: + name: external-secrets + namespace: external-secrets + project: golang-external-secrets + path: charts/datacenter/external-secrets + +# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate +# +# - name: pipelines +# namespace: production +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: stable +# overrides: +# - name: myparam +# value: myparam +# +# - name: pipelines +# namespace: staging +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: main +# +# Additional applications +# Be sure to include additional resources your apps will require +# +X machines +# +Y RAM +# +Z CPU +# - name: vendor-app +# namespace: default +# project: vendor +# path: path/to/myapp +# repoURL: https://github.com/vendor/applications.git +# targetRevision: main + + managedClusterGroups: + factory: + name: factory + # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git + # targetRevision: main + helmOverrides: + # Values must be strings! + - name: clusterGroup.isHubCluster + value: "false" + clusterSelector: + matchLabels: + clusterGroup: factory + matchExpressions: + - key: vendor + operator: In + values: + - OpenShift + diff --git a/common/examples/kustomize-renderer/Chart.yaml b/common/examples/kustomize-renderer/Chart.yaml new file mode 100644 index 00000000..88a786c9 --- /dev/null +++ b/common/examples/kustomize-renderer/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +description: A Helm chart to demonstrate how to use with kustomize +keywords: +- pattern +name: example +version: 0.0.1 diff --git a/common/examples/kustomize-renderer/environment.yaml b/common/examples/kustomize-renderer/environment.yaml new file mode 100644 index 00000000..de4c48a9 --- /dev/null +++ b/common/examples/kustomize-renderer/environment.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: environment +data: + IMAGE_PROVIDER: {{ .Values.global.imageregistry.hostname }} + IMAGE_ACCOUNT: {{ .Values.global.imageregistry.account }} + GIT_EMAIL: {{ .Values.global.git.email }} + GIT_DEV_REPO_URL: https://{{ .Values.global.git.hostname }}/{{ .Values.global.git.account }}/manuela-dev.git + GIT_DEV_REPO_REVISION: {{ .Values.global.git.dev_revision }} + GIT_OPS_REPO_TEST_URL: {{ .Values.global.repoURL }} + GIT_OPS_REPO_TEST_REVISION: {{ .Values.global.targetRevision }} + GIT_OPS_REPO_PROD_URL: {{ .Values.global.repoURL }} + GIT_OPS_REPO_PROD_REVISION: {{ .Values.global.targetRevision }} + IOT_CONSUMER_IMAGE: iot-consumer + IOT_CONSUMER_YAML_PATH: images.(name==messaging).newTag + IOT_CONSUMER_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_CONSUMER_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/messaging/kustomization.yaml + IOT_CONSUMER_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/messaging/messaging-is.yaml + IOT_FRONTEND_IMAGE: iot-frontend + IOT_FRONTEND_YAML_PATH: images.(name==line-dashboard).newTag + IOT_FRONTEND_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_FRONTEND_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/line-dashboard/kustomization.yaml + IOT_FRONTEND_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/line-dashboard/line-dashboard-is.yaml + IOT_SWSENSOR_IMAGE: iot-software-sensor + IOT_SWSENSOR_YAML_PATH: images.(name==machine-sensor).newTag + IOT_SWSENSOR_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_SWSENSOR_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/machine-sensor/kustomization.yaml + IOT_SWSENSOR_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/machine-sensor/machine-sensor-is.yaml + IOT_ANOMALY_IMAGE: iot-anomaly-detection + IOT_ANOMALY_YAML_PATH: images.(name==anomaly-detection).newTag + IOT_ANOMALY_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_ANOMALY_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/anomaly-detection/kustomization.yaml + IOT_ANOMALY_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/anomaly-detection/anomaly-detection-is.yaml diff --git a/common/examples/kustomize-renderer/kustomization.yaml b/common/examples/kustomize-renderer/kustomization.yaml new file mode 100644 index 00000000..8d8bcd10 --- /dev/null +++ b/common/examples/kustomize-renderer/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - environment.yaml + +patches: +- helm.patch.yaml diff --git a/common/examples/kustomize-renderer/kustomize b/common/examples/kustomize-renderer/kustomize new file mode 100755 index 00000000..3266d453 --- /dev/null +++ b/common/examples/kustomize-renderer/kustomize @@ -0,0 +1,14 @@ +#!/bin/bash -x + +BASE=`dirname $0` +if [ $BASE = $PWD ]; then + BASE=./ +fi + +cat <&0 > "$BASE/helm.yaml" + +# Including at least one log to stderr allows us to see the full -x output +echo $HOME $PWD 1>&2 +ls -al 1>&2 + +kustomize build "$BASE" && rm "$BASE/helm.yaml" diff --git a/common/examples/kustomize-renderer/templates/environment.yaml b/common/examples/kustomize-renderer/templates/environment.yaml new file mode 100644 index 00000000..de4c48a9 --- /dev/null +++ b/common/examples/kustomize-renderer/templates/environment.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: environment +data: + IMAGE_PROVIDER: {{ .Values.global.imageregistry.hostname }} + IMAGE_ACCOUNT: {{ .Values.global.imageregistry.account }} + GIT_EMAIL: {{ .Values.global.git.email }} + GIT_DEV_REPO_URL: https://{{ .Values.global.git.hostname }}/{{ .Values.global.git.account }}/manuela-dev.git + GIT_DEV_REPO_REVISION: {{ .Values.global.git.dev_revision }} + GIT_OPS_REPO_TEST_URL: {{ .Values.global.repoURL }} + GIT_OPS_REPO_TEST_REVISION: {{ .Values.global.targetRevision }} + GIT_OPS_REPO_PROD_URL: {{ .Values.global.repoURL }} + GIT_OPS_REPO_PROD_REVISION: {{ .Values.global.targetRevision }} + IOT_CONSUMER_IMAGE: iot-consumer + IOT_CONSUMER_YAML_PATH: images.(name==messaging).newTag + IOT_CONSUMER_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_CONSUMER_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/messaging/kustomization.yaml + IOT_CONSUMER_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/messaging/messaging-is.yaml + IOT_FRONTEND_IMAGE: iot-frontend + IOT_FRONTEND_YAML_PATH: images.(name==line-dashboard).newTag + IOT_FRONTEND_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_FRONTEND_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/line-dashboard/kustomization.yaml + IOT_FRONTEND_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/line-dashboard/line-dashboard-is.yaml + IOT_SWSENSOR_IMAGE: iot-software-sensor + IOT_SWSENSOR_YAML_PATH: images.(name==machine-sensor).newTag + IOT_SWSENSOR_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_SWSENSOR_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/machine-sensor/kustomization.yaml + IOT_SWSENSOR_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/machine-sensor/machine-sensor-is.yaml + IOT_ANOMALY_IMAGE: iot-anomaly-detection + IOT_ANOMALY_YAML_PATH: images.(name==anomaly-detection).newTag + IOT_ANOMALY_TEST_KUSTOMIZATION_PATH: charts/datacenter/manuela-tst/kustomization.yaml + IOT_ANOMALY_PROD_KUSTOMIZATION_PATH: charts/factory/manuela-stormshift/anomaly-detection/kustomization.yaml + IOT_ANOMALY_PROD_IMAGESTREAM_PATH: charts/factory/manuela-stormshift/anomaly-detection/anomaly-detection-is.yaml diff --git a/common/examples/kustomize-renderer/values.yaml b/common/examples/kustomize-renderer/values.yaml new file mode 100644 index 00000000..cb80a03a --- /dev/null +++ b/common/examples/kustomize-renderer/values.yaml @@ -0,0 +1,12 @@ +global: + git: + provider: github.com + account: PLAINTEXT + username: PLAINTEXT + email: SOMEWHERE@EXAMPLE.COM + dev_revision: main + + imageregistry: + provider: quay.io + account: PLAINTEXT + diff --git a/common/examples/medical-diagnosis-hub.yaml b/common/examples/medical-diagnosis-hub.yaml new file mode 100644 index 00000000..8bde30d0 --- /dev/null +++ b/common/examples/medical-diagnosis-hub.yaml @@ -0,0 +1,228 @@ +clusterGroup: + name: hub + isHubCluster: true + + namespaces: + - open-cluster-management + - openshift-serverless + - opendatahub + - openshift-storage + - xraylab-1 + - knative-serving + - staging + - vault + - golang-external-secrets + + subscriptions: + amq-streams: + name: amq-streams + namespace: xraylab-1 + channel: stable + + grafana: + name: grafana-operator + namespace: xraylab-1 + channel: v4 + source: community-operators + + odf: + name: odf-operator + namespace: openshift-storage + channel: stable-4.11 + + severless: + name: serverless-operator + channel: stable + + opendatahub: + name: opendatahub-operator + source: community-operators + + projects: + - hub + - medical-diagnosis + + applications: + vault: + name: vault + namespace: vault + project: hub + chart: vault + repoURL: https://helm.releases.hashicorp.com + targetRevision: v0.20.1 + overrides: + - name: global.openshift + value: "true" + - name: injector.enabled + value: "false" + - name: ui.enabled + value: "true" + - name: ui.serviceType + value: LoadBalancer + - name: server.route.enabled + value: "true" + - name: server.route.host + value: null + - name: server.route.tls.termination + value: edge + - name: server.image.repository + value: "registry.connect.redhat.com/hashicorp/vault" + - name: server.image.tag + value: "1.10.3-ubi" + + golang-external-secrets: + name: golang-external-secrets + namespace: golang-external-secrets + project: hub + path: common/golang-external-secrets + + opendatahub: + name: odh + namespace: opendatahub + project: medical-diagnosis + path: charts/all/opendatahub + + openshift-data-foundations: + name: odf + namespace: openshift-storage + project: medical-diagnosis + path: charts/all/openshift-data-foundations + + openshift-serverless: + name: serverless + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/openshift-serverless + + kafka: + name: kafka + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/kafka + + kafdrop: + name: kafdrop + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/kafdrop + + service-account: + name: xraylab-service-account + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/medical-diagnosis/service-account + + xraylab-init: + name: xraylab-init + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/medical-diagnosis/xray-init + + xraylab-database: + name: xraylab-database + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/medical-diagnosis/database + + xraylab-grafana-dashboards: + name: xraylab-grafana-dashboards + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/medical-diagnosis/grafana + + xraylab-image-server: + name: xraylab-image-server + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/medical-diagnosis/image-server + ignoreDifferences: + - group: apps.openshift.io + kind: DeploymentConfig + jqPathExpressions: + - '.spec.template.spec.containers[].image' + + xraylab-image-generator: + name: xraylab-image-generator + namespace: xraylab-1 + project: medical-diagnosis + path: charts/all/medical-diagnosis/image-generator + ignoreDifferences: + - group: apps.openshift.io + kind: DeploymentConfig + jqPathExpressions: + - '.spec.template.spec.containers[].image' + + imperative: + # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm + # The default schedule is every 10 minutes: imperative.schedule + # Total timeout of all jobs is 1h: imperative.activeDeadlineSeconds + # imagePullPolicy is set to always: imperative.imagePullPolicy + # For additional overrides that apply to the jobs, please refer to + # https://hybrid-cloud-patterns.io/imperative-actions/#additional-job-customizations + jobs: + - name: regional-ca + # ansible playbook to be run + playbook: ansible/playbooks/on-hub-get-regional-ca.yml + # per playbook timeout in seconds + timeout: 234 + # verbosity: "-v" + + managedClusterGroups: + region-one: + name: region-one + helmOverrides: + - name: clusterGroup.isHubCluster + value: false + clusterSelector: + matchLabels: + clusterGroup: region-one + +# To have apps in multiple flavors, use namespaces and use helm overrides as appropriate +# +# pipelines: +# name: pipelines +# namespace: production +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: stable +# overrides: +# - name: myparam +# value: myparam +# +# pipelines_staging: +# - name: pipelines +# namespace: staging +# project: datacenter +# path: applications/pipeline +# repoURL: https://github.com/you/applications.git +# targetRevision: main +# +# Additional applications +# Be sure to include additional resources your apps will require +# +X machines +# +Y RAM +# +Z CPU +# vendor-app: +# name: vendor-app +# namespace: default +# project: vendor +# path: path/to/myapp +# repoURL: https://github.com/vendor/applications.git +# targetRevision: main + +# managedSites: +# factory: +# name: factory +# # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git +# targetRevision: main +# path: applications/factory +# helmOverrides: +# - name: site.isHubCluster +# value: false +# clusterSelector: +# matchExpressions: +# - key: vendor +# operator: In +# values: +# - OpenShift diff --git a/common/examples/secrets/values-secret.v1.yaml b/common/examples/secrets/values-secret.v1.yaml new file mode 100644 index 00000000..c04e8262 --- /dev/null +++ b/common/examples/secrets/values-secret.v1.yaml @@ -0,0 +1,33 @@ +--- +# By default when a top-level 'version: "1.0"' is missing it is assumed to be '1.0' +# NEVER COMMIT THESE VALUES TO GIT + +secrets: + # These secrets will be pushed in the vault at secret/hub/test The vault will + # have secret/hub/test with secret1 and secret2 as keys with their associated + # values (secrets) + test: + secret1: foo + secret2: bar + + # This ends up as the s3Secret attribute to the path secret/hub/aws + aws: + s3Secret: test-secret + +# This will create the vault key secret/hub/testfoo which will have two +# properties 'b64content' and 'content' which will be the base64-encoded +# content and the normal content respectively +files: + testfoo: ~/ca.crt +# These secrets will be pushed in the vault at secret/region1/test The vault will +# have secret/region1/test with secret1 and secret2 as keys with their associated +# values (secrets) +secrets.region1: + test: + secret1: foo1 + secret2: bar1 +# This will create the vault key secret/region2/testbar which will have two +# properties 'b64content' and 'content' which will be the base64-encoded +# content and the normal content respectively +files.region2: + testbar: ~/ca.crt diff --git a/common/examples/secrets/values-secret.v2.yaml b/common/examples/secrets/values-secret.v2.yaml new file mode 100644 index 00000000..eab81a38 --- /dev/null +++ b/common/examples/secrets/values-secret.v2.yaml @@ -0,0 +1,114 @@ +# NEVER COMMIT THESE VALUES TO GIT (unless your file only uses generated +# passwords or only points to files) + +# NOTE: If you edit this file, make sure to also reflect the changes in the corresponding +# schema file + +# Needed to specify the new format (missing version means old version: 1.0 by default) +version: "2.0" + +backingStore: vault # 'vault' is the default when omitted + +# These are the vault policies to be created in the vault +# these are used when we let the vault generate the passwords +# by setting the 'onMissingValue' attribute to 'generate' +# See https://developer.hashicorp.com/vault/docs/concepts/password-policies +vaultPolicies: + basicPolicy: | + length=10 + rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 } + rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 } + rule "charset" { charset = "0123456789" min-chars = 1 } + + advancedPolicy: | + length=20 + rule "charset" { charset = "abcdefghijklmnopqrstuvwxyz" min-chars = 1 } + rule "charset" { charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" min-chars = 1 } + rule "charset" { charset = "0123456789" min-chars = 1 } + rule "charset" { charset = "!@#$%^&*" min-chars = 1 } + +# This is the mandatory top-level secrets entry +secrets: + - name: aws + fields: + - name: aws_access_key_id + ini_file: ~/.aws/credentials + ini_key: aws_access_key_id + # You can actually omit this as it is the default + # it is here, because I believe the json schema validator has a bug + # (it ignores the default value of onMissingValue in the aallOf if checks) + onMissingValue: error + # ini_section: default + - name: aws_secret_access_key + onMissingValue: error + ini_file: ~/.aws/credentials + ini_key: aws_secret_access_key + # ini_section: default + + - name: config-demo + vaultMount: secret + vaultPrefixes: + - region-one + - snowflake.blueprints.rhecoeng.com + fields: + - name: secret + onMissingValue: generate + override: true + vaultPolicy: basicPolicy + - name: secretprompt + value: null + onMissingValue: prompt + prompt: "Please specify the password for application ABC" + - name: secretprompt2 + value: defaultvalue + onMissingValue: prompt + prompt: "Please specify the API key for XYZ" + - name: secretfile + path: /tmp/ca.crt + onMissingValue: prompt + prompt: "Insert path to Certificate Authority" + - name: ca_crt + path: /tmp/ca.crt + onMissingValue: error + - name: ca_crt_b64 + path: /tmp/ca.crt + base64: true # defaults to false + onMissingValue: prompt + + - name: config-demo2 + vaultPrefixes: + - region-one + - snowflake.blueprints.rhecoeng.com + fields: + - name: ca_crt2 + path: null + onMissingValue: prompt + - name: ca_crt + path: /tmp/ca.crt + onMissingValue: error + + # This will be uploaded to the 'hub' vaultPrefix as it is the default when + # omitted + - name: config-demo3 + fields: + - name: ca_crt2 + path: null + onMissingValue: prompt + - name: ca_crt + path: /tmp/ca.crt + onMissingValue: error + # + # The cluster_xxxx pattern is used for creating externalSecrets that + # will be used by ArgoCD to push manifests to other clusters. + # + # oc extract -n openshift-config cm/kube-root-ca.crt --to=/home/user/ --keys=ca.crt --confirm + - name: cluster_foocluster + fields: + - name: bearerToken + value: + onMissingValue: error + - name: caCert + # See command above + path: /home/user/ca.crt + onMissingValue: error + base64: true diff --git a/common/examples/values-example.yaml b/common/examples/values-example.yaml new file mode 100644 index 00000000..312155b9 --- /dev/null +++ b/common/examples/values-example.yaml @@ -0,0 +1,207 @@ +global: + options: + useCSV: False + syncPolicy: Automatic + installPlanApproval: Automatic + multiClusterTarget: all + +#enabled: all + +clusterGroup: + name: example + #insecureUnsealVaultInsideCluster: false + isHubCluster: true + sharedValueFiles: + - /values/{{ .Values.global.clusterPlatform }}.yaml + - /values/{{ .Values.global.clusterVersion }}.yaml + + scheduler: + mastersSchedulable: true + + # You can define namespaces using hashes and not as a list like so: + # namespaces: + # open-cluster-management: + # labels: + # openshift.io/node-selector: "" + # kubernetes.io/os: linux + # annotations: + # openshift.io/cluster-monitoring: "true" + # owner: "namespace owner" + # application-ci: + # You cannot mix list and hashes to define namespaces + namespaces: + - open-cluster-management: + labels: + openshift.io/node-selector: "" + kubernetes.io/os: linux + annotations: + openshift.io/cluster-monitoring: "true" + owner: "namespace owner" + - application-ci: + operatorGroup: true + targetNamespaces: + - application-ci + - other-namespace + - exclude-targetns: + operatorGroup: true + targetNamespaces: + - include-ci + - exclude-og + - totally-exclude-og: + operatorGroup: false + - include-default-og: + operatorGroup: true + + nodes: + - m-m00.cluster.example.tld: + labels: + cluster.ocs.openshift.io/openshift-storage: "" + - m-m01.cluster.example.tld: + labels: + cluster.ocs.openshift.io/openshift-storage: "" + - m-m02.cluster.example.tld: + labels: + cluster.ocs.openshift.io/openshift-storage: "" + + operatorgroupExcludes: + - exclude-og + + subscriptions: + acm: + name: advanced-cluster-management + namespace: open-cluster-management + channel: release-2.4 + csv: advanced-cluster-management.v2.4.1 + + odh: + name: opendatahub-operator + source: community-operators + csv: opendatahub-operator.v1.1.0 + disabled: true + + pipelines: + name: openshift-pipelines-operator-rh + csv: redhat-openshift-pipelines.v1.5.2 + + # + # You can define projects using hashes like so: + # projects: + # hub: + # datacenter: + # You cannot mix list and hashes to define projects. + projects: + - datacenter + + applications: + acm: + name: acm + namespace: open-cluster-management + project: datacenter + path: common/acm + ignoreDifferences: + - group: internal.open-cluster-management.io + kind: ManagedClusterInfo + jsonPointers: + - /spec/loggingCA + pipe: + name: pipelines + namespace: application-ci + project: datacenter + path: charts/datacenter/pipelines + extraValueFiles: + - /values/{{ .Values.global.clusterVersion }}/{{ .Values.global.clusterPlatform }}.yaml + + imperative: + namespace: imperative + # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm + # The default schedule is every 10 minutes: imperative.schedule + # Total timeout of all jobs is 1h: imperative.activeDeadlineSeconds + # imagePullPolicy is set to always: imperative.imagePullPolicy + # For additional overrides that apply to the jobs, please refer to + # https://hybrid-cloud-patterns.io/imperative-actions/#additional-job-customizations + jobs: + - name: regional-ca + # ansible playbook to be run + playbook: ansible/playbooks/on-hub-get-regional-ca.yml + # per playbook timeout in seconds + timeout: 234 + # verbosity: "-v" + + managedClusterGroups: + - name: acm-edge + # Optional - Point to a different repo + # repoURL: https://github.com/hybrid-cloud-patterns/mySite.git + # Must contain values-{clustergroupname}.yaml at the top level + targetRevision: main + helmOverrides: + # Values must be strings! + - name: clusterGroup.isHubCluster + value: "false" + acmlabels: + - name: clusterGroup + value: acm-region + - name: acm-provision-edge + targetRevision: main + helmOverrides: + - name: clusterGroup.isHubCluster + value: "false" + clusterPools: + exampleAWSPool: + size: 3 + name: aws-ap + openshiftVersion: 4.10.18 + baseDomain: blueprints.rhecoeng.com + controlPlane: + count: 1 + platform: + aws: + type: m5.xlarge + workers: + count: 0 + platform: + aws: + region: ap-southeast-2 + exampleAzurePool: + name: azure-us + openshiftVersion: 4.10.18 + baseDomain: blueprints.rhecoeng.com + platform: + azure: + baseDomainResourceGroupName: dojo-dns-zones + region: eastus + clusters: + - Two + - three + clusterDeployments: + myFirstCluster: + name: aws-cd-one-w-pool + openshiftVersion: 4.10.18 + baseDomain: blueprints.rhecoeng.com + platform: + aws: + region: ap-southeast-1 + acmlabels: + - name: clusterGroup + value: region + - name: acm-provision-on-deploy + clusterDeployments: + mySecondCluster: + name: aws-cd-two-wo-pool + openshiftVersion: 4.10.18 + baseDomain: blueprints.rhecoeng.com + platform: + aws: + region: ap-southeast-3 + - name: argo-edge + hostedArgoSites: + - name: perth + domain: perth1.beekhof.net + # The default is secret/data/hub/cluster_ + #secretsPath: secret/data/hub/cluster_perth + - name: sydney + domain: syd.beekhof.net + # The default is secret/data/hub/cluster_ + #secretsPath: secret/data/hub/cluster_sydney + helmOverrides: + - name: clusterGroup.isHubCluster + value: "false" diff --git a/common/requirements.yml b/common/requirements.yml new file mode 100644 index 00000000..cb11ca24 --- /dev/null +++ b/common/requirements.yml @@ -0,0 +1,4 @@ +--- +# Define Ansible collection requirements here +collections: + - name: git+https://github.com/validatedpatterns/rhvp.cluster_utils.git,v1 diff --git a/common/scripts/determine-main-clustergroup.sh b/common/scripts/determine-main-clustergroup.sh new file mode 100755 index 00000000..6271dbad --- /dev/null +++ b/common/scripts/determine-main-clustergroup.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +PATTERN_DIR="$1" + +if [ -z "$PATTERN_DIR" ]; then + PATTERN_DIR="." +fi + +CGNAME=$(yq '.main.clusterGroupName' "$PATTERN_DIR/values-global.yaml") + +if [ -z "$CGNAME" ] || [ "$CGNAME" == "null" ]; then + echo "Error - cannot detrmine clusterGroupName" + exit 1 +fi + +echo "$CGNAME" diff --git a/common/scripts/determine-pattern-name.sh b/common/scripts/determine-pattern-name.sh new file mode 100755 index 00000000..fb503fe6 --- /dev/null +++ b/common/scripts/determine-pattern-name.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +PATTERN_DIR="$1" + +if [ -z "$PATTERN_DIR" ]; then + PATTERN_DIR="." +fi + +PATNAME=$(yq '.global.pattern' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) + +if [ -z "$PATNAME" ] || [ "$PATNAME" == "null" ]; then + PATNAME="$(basename "$PWD")" +fi + +echo "$PATNAME" diff --git a/common/scripts/determine-secretstore-backend.sh b/common/scripts/determine-secretstore-backend.sh new file mode 100755 index 00000000..ef784790 --- /dev/null +++ b/common/scripts/determine-secretstore-backend.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +PATTERN_DIR="$1" + +if [ -z "$PATTERN_DIR" ]; then + PATTERN_DIR="." +fi + +BACKEND=$(yq '.global.secretStore.backend' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) + +if [ -z "$BACKEND" -o "$BACKEND" == "null" ]; then + BACKEND="vault" +fi + +echo "$BACKEND" diff --git a/common/scripts/display-secrets-info.sh b/common/scripts/display-secrets-info.sh new file mode 100755 index 00000000..d9915855 --- /dev/null +++ b/common/scripts/display-secrets-info.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +if [ "$#" -ge 1 ]; then + export VALUES_SECRET=$(get_abs_filename "${1}") +fi + +if [[ "$#" == 2 ]]; then + SECRETS_BACKING_STORE="$2" +else + SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" +fi + +PATTERN_NAME=$(basename "`pwd`") + +ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" -e override_no_log=false "rhvp.cluster_utils.display_secrets_info" diff --git a/common/scripts/lint.sh b/common/scripts/lint.sh new file mode 100755 index 00000000..3a3d8e6f --- /dev/null +++ b/common/scripts/lint.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# helm template (even with --dry-run) can interact with the cluster +# This won't protect us if a user has ~/.kube +# Also call helm template with a non existing --kubeconfig while we're at it +unset KUBECONFIG +target=$1 +shift +name=$(echo $target | sed -e s@/@-@g -e s@charts-@@) + +# Test the charts as the pattern would drive them +INPUTS=$(ls -1 common/examples/*.yaml | grep -v secret) +for input in $INPUTS; do + helm lint $* -f $input $target + if [ $? != 0 ]; then exit 1; fi +done + +exit 0 diff --git a/common/scripts/load-k8s-secrets.sh b/common/scripts/load-k8s-secrets.sh new file mode 100755 index 00000000..9219f92f --- /dev/null +++ b/common/scripts/load-k8s-secrets.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +PATTERN_NAME=${1:-$(basename "`pwd`")} + +ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" "rhvp.cluster_utils.k8s_secrets" diff --git a/common/scripts/make_common_subtree.sh b/common/scripts/make_common_subtree.sh new file mode 100755 index 00000000..a5e406d8 --- /dev/null +++ b/common/scripts/make_common_subtree.sh @@ -0,0 +1,76 @@ +#!/bin/sh + +if [ "$1" = "-h" ]; then + echo "This script will convert common into a subtree and add a remote to help manage it." + echo "The script takes three positional arguments, as follows:" + echo + echo "$0 " + echo + echo "Run without arguments, the script would run as if these arguments had been passed:" + echo "$0 https://github.com/hybrid-cloud-patterns/common.git main common-subtree" + echo + echo "Please ensure the git subtree command is available. On RHEL/Fedora, the git subtree command" + echo "is in a separate package called git-subtree" + exit 1 +fi + +if [ -f '/etc/redhat-release' ]; then + rpm -qa | grep git-subtree 2>&1 + if [ ! $? = 0 ]; then + echo "you need to install git-subtree" + echo "would you like to install it now?" + select ANS in yes no + do + case $ANS in + yes) + sudo dnf install git-subtree -y + break + ;; + no) + exit + break + ;; + *) + echo "You must enter yes or no" + ;; + esac + done + fi +fi + +if [ "$1" ]; then + subtree_repo=$1 +else + subtree_repo=https://github.com/hybrid-cloud-patterns/common.git +fi + +if [ "$2" ]; then + subtree_branch=$2 +else + subtree_branch=main +fi + +if [ "$3" ]; then + subtree_remote=$3 +else + subtree_remote=common-subtree +fi + +git diff --quiet || (echo "This script must be run on a clean working tree" && exit 1) + +echo "Changing directory to project root" +cd `git rev-parse --show-toplevel` + +echo "Removing existing common and replacing it with subtree from $subtree_repo $subtree_remote" +rm -rf common + +echo "Committing removal of common" +(git add -A :/ && git commit -m "Removed previous version of common to convert to subtree from $subtree_repo $subtree_branch") || exit 1 + +echo "Adding (possibly replacing) subtree remote $subtree_remote" +git remote rm "$subtree_remote" +git remote add -f "$subtree_remote" "$subtree_repo" || exit 1 +git subtree add --prefix=common "$subtree_remote" "$subtree_branch" || exit 1 + +echo "Complete. You may now push these results if you are satisfied" +exit 0 diff --git a/common/scripts/manage-secret-app.sh b/common/scripts/manage-secret-app.sh new file mode 100755 index 00000000..1ea0d0bb --- /dev/null +++ b/common/scripts/manage-secret-app.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +APP=$1 +STATE=$2 + +MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" +MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" + +case "$APP" in + "vault") + APP_NAME="vault" + NAMESPACE="vault" + PROJECT="$MAIN_CLUSTERGROUP_PROJECT" + CHART_LOCATION="common/hashicorp-vault" + ;; + "golang-external-secrets") + APP_NAME="golang-external-secrets" + NAMESPACE="golang-external-secrets" + PROJECT="$MAIN_CLUSTERGROUP_PROJECT" + CHART_LOCATION="common/golang-external-secrets" + ;; + *) + echo "Error - cannot manage $APP can only manage vault and golang-external-secrets" + exit 1 + ;; +esac + +case "$STATE" in + "present") + common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" + + RES=$(yq ".clusterGroup.applications[] | select(.path == \"$CHART_LOCATION\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) + if [ -z "$RES" ]; then + echo "Application with chart location $CHART_LOCATION not found, adding" + yq -i ".clusterGroup.applications.$APP_NAME = { \"name\": \"$APP_NAME\", \"namespace\": \"$NAMESPACE\", \"project\": \"$PROJECT\", \"path\": \"$CHART_LOCATION\" }" "$MAIN_CLUSTERGROUP_FILE" + fi + ;; + "absent") + common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" + echo "Removing application wth chart location $CHART_LOCATION" + yq -i "del(.clusterGroup.applications[] | select(.path == \"$CHART_LOCATION\"))" "$MAIN_CLUSTERGROUP_FILE" + ;; + *) + echo "$STATE not supported" + exit 1 + ;; +esac + +exit 0 diff --git a/common/scripts/manage-secret-namespace.sh b/common/scripts/manage-secret-namespace.sh new file mode 100755 index 00000000..bcb06742 --- /dev/null +++ b/common/scripts/manage-secret-namespace.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +NAMESPACE=$1 +STATE=$2 + +MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" +MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" + +case "$STATE" in + "present") + + RES=$(yq ".clusterGroup.namespaces[] | select(. == \"$NAMESPACE\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) + if [ -z "$RES" ]; then + echo "Namespace $NAMESPACE not found, adding" + yq -i ".clusterGroup.namespaces += [ \"$NAMESPACE\" ]" "$MAIN_CLUSTERGROUP_FILE" + fi + ;; + "absent") + echo "Removing namespace $NAMESPACE" + yq -i "del(.clusterGroup.namespaces[] | select(. == \"$NAMESPACE\"))" "$MAIN_CLUSTERGROUP_FILE" + ;; + *) + echo "$STATE not supported" + exit 1 + ;; +esac + +exit 0 diff --git a/common/scripts/pattern-util.sh b/common/scripts/pattern-util.sh new file mode 100755 index 00000000..cb7fc873 --- /dev/null +++ b/common/scripts/pattern-util.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +function is_available { + command -v $1 >/dev/null 2>&1 || { echo >&2 "$1 is required but it's not installed. Aborting."; exit 1; } +} + +function version { + echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' +} + +if [ -z "$PATTERN_UTILITY_CONTAINER" ]; then + PATTERN_UTILITY_CONTAINER="quay.io/hybridcloudpatterns/utility-container" +fi + +readonly commands=(podman) +for cmd in ${commands[@]}; do is_available "$cmd"; done + +UNSUPPORTED_PODMAN_VERSIONS="1.6 1.5" +PODMAN_VERSION_STR=$(podman --version) +for i in ${UNSUPPORTED_PODMAN_VERSIONS}; do + # We add a space + if echo "${PODMAN_VERSION_STR}" | grep -q -E "\b${i}"; then + echo "Unsupported podman version. We recommend > 4.3.0" + podman --version + exit 1 + fi +done + +# podman --version outputs: +# podman version 4.8.2 +PODMAN_VERSION=$(echo "${PODMAN_VERSION_STR}" | awk '{ print $NF }') + +# podman < 4.3.0 do not support keep-id:uid=... +if [ $(version "${PODMAN_VERSION}") -lt $(version "4.3.0") ]; then + PODMAN_ARGS="-v ${HOME}:/root" +else + # We do not rely on bash's $UID and $GID because on MacOSX $GID is not set + MYNAME=$(id -n -u) + MYUID=$(id -u) + MYGID=$(id -g) + PODMAN_ARGS="--passwd-entry ${MYNAME}:x:${MYUID}:${MYGID}::/pattern-home:/bin/bash --user ${MYUID}:${MYGID} --userns keep-id:uid=${MYUID},gid=${MYGID}" + +fi + +if [ -n "$KUBECONFIG" ]; then + if [[ ! "${KUBECONFIG}" =~ ^$HOME* ]]; then + echo "${KUBECONFIG} is pointing outside of the HOME folder, this will make it unavailable from the container." + echo "Please move it somewhere inside your $HOME folder, as that is what gets bind-mounted inside the container" + exit 1 + fi +fi + +# Detect if we use podman machine. If we do not then we bind mount local host ssl folders +# if we are using podman machine then we do not bind mount anything (for now!) +REMOTE_PODMAN=$(podman system connection list -q | wc -l) +if [ $REMOTE_PODMAN -eq 0 ]; then # If we are not using podman machine we check the hosts folders + # Use /etc/pki by default and try a couple of fallbacks if it does not exist + if [ -d /etc/pki ]; then + PKI_HOST_MOUNT_ARGS="-v /etc/pki:/etc/pki:ro" + elif [ -d /etc/ssl ]; then + PKI_HOST_MOUNT_ARGS="-v /etc/ssl:/etc/ssl:ro" + else + PKI_HOST_MOUNT_ARGS="-v /usr/share/ca-certificates:/usr/share/ca-certificates:ro" + fi +else + PKI_HOST_MOUNT_ARGS="" +fi + +# Copy Kubeconfig from current environment. The utilities will pick up ~/.kube/config if set so it's not mandatory +# $HOME is mounted as itself for any files that are referenced with absolute paths +# $HOME is mounted to /root because the UID in the container is 0 and that's where SSH looks for credentials + +podman run -it --rm --pull=newer \ + --security-opt label=disable \ + -e EXTRA_HELM_OPTS \ + -e EXTRA_PLAYBOOK_OPTS \ + -e TARGET_ORIGIN \ + -e NAME \ + -e TOKEN_SECRET \ + -e TOKEN_NAMESPACE \ + -e VALUES_SECRET \ + -e KUBECONFIG \ + -e K8S_AUTH_HOST \ + -e K8S_AUTH_VERIFY_SSL \ + -e K8S_AUTH_SSL_CA_CERT \ + -e K8S_AUTH_USERNAME \ + -e K8S_AUTH_PASSWORD \ + -e K8S_AUTH_TOKEN \ + ${PKI_HOST_MOUNT_ARGS} \ + -v "${HOME}":"${HOME}" \ + -v "${HOME}":/pattern-home \ + ${PODMAN_ARGS} \ + ${EXTRA_ARGS} \ + -w "$(pwd)" \ + "$PATTERN_UTILITY_CONTAINER" \ + $@ diff --git a/common/scripts/preview-all.sh b/common/scripts/preview-all.sh new file mode 100755 index 00000000..4bf59322 --- /dev/null +++ b/common/scripts/preview-all.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +REPO=$1; shift; +TARGET_BRANCH=$1; shift + +HUB=$( yq ".main.clusterGroupName" values-global.yaml ) +MANAGED_CLUSTERS=$( yq ".clusterGroup.managedClusterGroups.[].name" values-$HUB.yaml ) +ALL_CLUSTERS=( $HUB $MANAGED_CLUSTERS ) + +CLUSTER_INFO_OUT=$(oc cluster-info 2>&1) +CLUSTER_INFO_RET=$? +if [ $CLUSTER_INFO_RET -ne 0 ]; then + echo "Could not access the cluster:" + echo "${CLUSTER_INFO_OUT}" + exit 1 +fi + +for cluster in ${ALL_CLUSTERS[@]}; do + # We always add clustergroup as it is the entry point and it gets special cased in preview.sh. + APPS="clustergroup $( yq ".clusterGroup.applications.[].name" values-$cluster.yaml )" + for app in $APPS; do + printf "# Parsing application $app from cluster $cluster\n" + common/scripts/preview.sh $cluster $app $REPO $TARGET_BRANCH + done +done diff --git a/common/scripts/preview.sh b/common/scripts/preview.sh new file mode 100755 index 00000000..b9839c51 --- /dev/null +++ b/common/scripts/preview.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# DISCLAIMER +# +# - Parsing of applications needs to be more clever. Currently the code assumes that all +# targets will be local charts. This is not true, for example, in industrial-edge. +# - There is currently not a mechanism to actually preview against multiple clusters +# (i.e. a hub and a remote). All previews will be done against the current. +# - Make output can be included in the YAML. + +SITE=$1; shift +APPNAME=$1; shift +GIT_REPO=$1; shift +GIT_BRANCH=$1; shift + +if [ "${APPNAME}" != "clustergroup" ]; then + # This covers the following case: + # foobar: + # name: foo + # namespace: foo + # project: foo + # path: charts/all/foo + # So we retrieve the actual index ("foobar") given the name attribute of the application + APP=$(yq ".clusterGroup.applications | with_entries(select(.value.name == \"$APPNAME\")) | keys | .[0]" values-$SITE.yaml) + chart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) + namespace=$(yq ".clusterGroup.applications.$APP.namespace" values-$SITE.yaml) +else + APP=$APPNAME + chart="common/clustergroup" + namespace="openshift-operators" +fi +pattern=$(yq ".global.pattern" values-global.yaml) + +# You can override the default lookups by using OCP_{PLATFORM,VERSION,DOMAIN} +# Note that when using the utility container you need to pass in the above variables +# by export EXTRA_ARGS="-e OCP_PLATFORM -e OCP_VERSION -e OCP_DOMAIN" before +# invoking pattern-util.sh +platform=${OCP_PLATFORM:-$(oc get Infrastructure.config.openshift.io/cluster -o jsonpath='{.spec.platformSpec.type}')} +ocpversion=${OCP_VERSION:-$(oc get clusterversion/version -o jsonpath='{.status.desired.version}' | awk -F. '{print $1"."$2}')} +domain=${OCP_DOMAIN:-$(oc get Ingress.config.openshift.io/cluster -o jsonpath='{.spec.domain}' | sed 's/^apps.//')} + +function replaceGlobals() { + output=$( echo $1 | sed -e 's/ //g' -e 's/\$//g' -e s@^-@@g -e s@\'@@g ) + + output=$(echo $output | sed "s@{{.Values.global.clusterPlatform}}@${platform}@g") + output=$(echo $output | sed "s@{{.Values.global.clusterVersion}}@${ocpversion}@g") + output=$(echo $output | sed "s@{{.Values.global.clusterDomain}}@${domain}@g") + + echo $output +} + +function getOverrides() { + overrides='' + overrides=$( yq ".clusterGroup.applications.$APP.overrides[]" "values-$SITE.yaml" ) + overrides=$( echo "$overrides" | tr -d '\n' ) + overrides=$( echo "$overrides" | sed -e 's/name:/ --set/g; s/value: /=/g' ) + if [ -n "$overrides" ]; then + echo "$overrides" + fi +} + + +CLUSTER_OPTS="" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.pattern=$pattern" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.repoURL=$GIT_REPO" +CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.repoURL=$GIT_REPO" +CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.revision=$GIT_BRANCH" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.namespace=$namespace" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.hubClusterDomain=apps.$domain" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.localClusterDomain=apps.$domain" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterDomain=$domain" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterVersion=$ocpversion" +CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterPlatform=$platform" + + +sharedValueFiles=$(yq ".clusterGroup.sharedValueFiles" values-$SITE.yaml) +appValueFiles=$(yq ".clusterGroup.applications.$APP.extraValueFiles" values-$SITE.yaml) +isKustomize=$(yq ".clusterGroup.applications.$APP.kustomize" values-$SITE.yaml) +OVERRIDES=$( getOverrides ) + +VALUE_FILES="-f values-global.yaml -f values-$SITE.yaml" +IFS=$'\n' +for line in $sharedValueFiles; do + if [ $line != "null" ] && [ -f $line ]; then + file=$(replaceGlobals $line) + VALUE_FILES="$VALUE_FILES -f $PWD$file" + fi +done + +for line in $appValueFiles; do + if [ $line != "null" ] && [ -f $line ]; then + file=$(replaceGlobals $line) + VALUE_FILES="$VALUE_FILES -f $PWD$file" + fi +done + +if [ $isKustomize == "true" ]; then + kustomizePath=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) + repoURL=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) + if [[ $repoURL == http* ]] || [[ $repoURL == git@ ]]; then + kustomizePath="${repoURL}/${kustomizePath}" + fi + cmd="oc kustomize ${kustomizePath}" + eval "$cmd" +else + cmd="helm template $chart --name-template ${APP} -n ${namespace} ${VALUE_FILES} ${OVERRIDES} ${CLUSTER_OPTS}" + eval "$cmd" +fi diff --git a/common/scripts/process-secrets.sh b/common/scripts/process-secrets.sh new file mode 100755 index 00000000..47eff7fa --- /dev/null +++ b/common/scripts/process-secrets.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +PATTERN_NAME=${1:-$(basename "`pwd`")} +SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" + +ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" "rhvp.cluster_utils.process_secrets" diff --git a/common/scripts/set-secret-backend.sh b/common/scripts/set-secret-backend.sh new file mode 100755 index 00000000..e07b15bf --- /dev/null +++ b/common/scripts/set-secret-backend.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +BACKEND=$1 + +yq -i ".global.secretStore.backend = \"$BACKEND\"" values-global.yaml diff --git a/common/scripts/test.sh b/common/scripts/test.sh new file mode 100755 index 00000000..a2aae9f4 --- /dev/null +++ b/common/scripts/test.sh @@ -0,0 +1,129 @@ +#!/bin/bash + +# helm template (even with --dry-run) can interact with the cluster +# This won't protect us if a user has ~/.kube +# Also call helm template with a non existing --kubeconfig while we're at it +unset KUBECONFIG +target=$1 +name=$(echo $1 | sed -e s@/@-@g -e s@charts-@@) + +function doTest() { + TEST_VARIANT=$1 + CHART_OPTS="$2" + TESTDIR=tests + TEST=${name}-${TEST_VARIANT} + FILENAME=${TEST}.expected.yaml + OUTPUT=${TESTDIR}/.${FILENAME} + REFERENCE=${TESTDIR}/${FILENAME} + + echo -e "\nTesting $name chart (${TEST_VARIANT}) with opts [$CHART_OPTS]" >&2 + helm template --kubeconfig /tmp/doesnotexistever $target --name-template $name ${CHART_OPTS} > ${OUTPUT} + rc=$? + if [ $rc -ne 0 ]; then + echo "FAIL on helm template $target --name-template $name ${CHART_OPTS}" >&2 + exit 1 + fi + if [ ! -e ${REFERENCE} ]; then + cp ${OUTPUT} ${REFERENCE} + git add ${REFERENCE} + echo -e "\n\n#### Created test output\007\n\n\007" >&2 + fi + diff -u ${REFERENCE} ${OUTPUT} + rc=$? + if [ $rc = 0 ]; then + rm -f ${OUTPUT} + echo "PASS" >&2 + elif [ -z $GITHUB_ACTIONS ]; then + read -p "Are these changes expected? [y/N] " EXPECTED + case $EXPECTED in + y*|Y*) + echo "Updating ${REFERENCE}" + cp ${OUTPUT} ${REFERENCE} + rm -f ${OUTPUT} + rc=0 + ;; + *) ;; + esac + fi + if [ $rc != 0 ]; then + echo "FAIL" >&2 + exit $rc + fi +} + +function doTestCompare() { + TEST_VARIANT="differences" + TESTDIR=tests + TEST=${name} + FILENAME=${TEST}.expected.yaml + OUTPUT=${TESTDIR}/.${FILENAME} + REFERENCE=${TESTDIR}/${FILENAME} + + echo -e "\nTesting $name chart (${TEST_VARIANT})" >&2 + # Another method of finding variables missing from values.yaml, eg. + # - name: -datacenter + # + name: pattern-name-datacenter + + TEST=${name} + FILENAME=${TEST}.expected.diff + OUTPUT=${TESTDIR}/.${FILENAME} + REFERENCE=${TESTDIR}/${FILENAME} + + # Drop the date from the diff output, it will not be stable + diff -u ${TESTDIR}/${name}-naked.expected.yaml ${TESTDIR}/${name}-normal.expected.yaml | sed 's/\.yaml.*20[0-9][0-9].*/.yaml/g' > ${OUTPUT} + + if [ ! -e ${REFERENCE} -a -z $GITHUB_ACTIONS ]; then + cp ${OUTPUT} ${REFERENCE} + git add ${REFERENCE} + echo -e "\n\n#### Created test output\007\n\n\007" >&2 + fi + + diff -u ${REFERENCE} ${OUTPUT} + rc=$? + + if [ $rc = 0 ]; then + rm -f ${OUTPUT} + echo "PASS" >&2 + elif [ -z $GITHUB_ACTIONS ]; then + read -p "Are these changes expected? [y/N] " EXPECTED + case $EXPECTED in + y*|Y*) + echo "Updating ${REFERENCE}" + cp ${OUTPUT} ${REFERENCE} + rm -f ${OUTPUT} + rc=0 + ;; + *) ;; + esac + fi + if [ $rc != 0 ]; then + echo "FAIL" >&2 + exit $rc + fi +} + +if [ $2 = "all" ]; then + echo -e "\n#####################" >&2 + echo "### ${name}" >&2 + echo "#####################" >&2 + + # Test that all values used by the chart are in values.yaml with the same defaults as the pattern + doTest naked + + # Test the charts as the pattern would drive them + INPUTS=$(ls -1 common/examples/*.yaml | grep -v secret) + for input in $INPUTS; do + variant=normal + if [ "$input" != "common/examples/values-example.yaml" ]; then + variant=$(echo $input | sed -e 's@.*/@@' -e 's@\.yaml@@') + fi + doTest $variant "$3 -f $input" + done + + # Ensure the differences between the two results are also stable + #doTestCompare +else + doTest $2 "$3" +fi + +exit 0 diff --git a/common/scripts/vault-utils.sh b/common/scripts/vault-utils.sh new file mode 100755 index 00000000..b014e5a4 --- /dev/null +++ b/common/scripts/vault-utils.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -eu + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +# Parse arguments +if [ $# -lt 1 ]; then + echo "Specify at least the command ($#): $*" + exit 1 +fi + +TASK="${1}" +PATTERN_NAME=${2:-$(basename "`pwd`")} + +if [ -z ${TASK} ]; then + echo "Task is unset" + exit 1 +fi + +ansible-playbook -t "${TASK}" -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" "rhvp.cluster_utils.vault" diff --git a/common/scripts/write-token-kubeconfig.sh b/common/scripts/write-token-kubeconfig.sh new file mode 100755 index 00000000..7544fac2 --- /dev/null +++ b/common/scripts/write-token-kubeconfig.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -eu + +OUTPUTFILE=${1:-"~/.kube/config"} + +get_abs_filename() { + # $1 : relative filename + echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" +} + +SCRIPT=$(get_abs_filename "$0") +SCRIPTPATH=$(dirname "${SCRIPT}") +COMMONPATH=$(dirname "${SCRIPTPATH}") +PATTERNPATH=$(dirname "${COMMONPATH}") + +ansible-playbook -e pattern_dir="${PATTERNPATH}" -e kubeconfig_file="${OUTPUTFILE}" "rhvp.cluster_utils.write-token-kubeconfig" diff --git a/common/values-global.yaml b/common/values-global.yaml new file mode 100644 index 00000000..684f89f2 --- /dev/null +++ b/common/values-global.yaml @@ -0,0 +1,20 @@ +global: + options: + useCSV: True + syncPolicy: Manual + installPlanApproval: Automatic + applicationRetryLimit: 20 + + git: + hostname: github.com + # Account is the user or organization under which the pattern repos lives + account: hybrid-cloud-patterns + email: someone@somewhere.com + dev_revision: main + + secretStore: + backend: vault + +main: + clusterGroupName: example +