Skip to content

Commit df16621

Browse files
authored
Get S3 bucket object name from user (#439)
* Add option to pass s3 bucket object * Add option to pass s3 bucket object * update zip_and_upload_to_s3
1 parent 143cc9d commit df16621

File tree

9 files changed

+22
-18
lines changed

9 files changed

+22
-18
lines changed

README.md

+4-2
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ Action also can be passed to the CLI as `--action create/destroy` instead of spe
2424
* `--clusters-install-data-directory`: Clusters configurations are written to `<clusters-install-data-directory><platform><cluster name>`; write permissions are needed.
2525
* `<cluster directory>/auth` contains `kubeconfig` and `kubeadmin-password` files
2626
* `--parallel`: To create / destroy clusters in parallel
27-
* Pass `--s3-bucket-name` (and optionally `--s3-bucket-path`) to backup <cluster directory> in an S3 bucket.
27+
* Pass `--s3-bucket-name` (and optionally `--s3-bucket-path` and `--s3-bucket-object-name`) to back up <cluster directory> in an S3 bucket.
2828
* `--ocm-token`: OCM token, defaults to `OCM_TOKEN` environment variable.
2929
* `--must-gather-output-dir`: Path to must-gather output dir. `must-gather` will try to collect data when cluster installation fails and cluster can be accessed.
3030

@@ -211,6 +211,7 @@ podman run quay.io/redhat_msi/openshift-cli-installer \
211211
--registry-config-file=registry-config.json \
212212
--s3-bucket-name=openshift-cli-installer \
213213
--s3-bucket-path=install-folders \
214+
--s3-bucket-object-name=cluster-backup \
214215
--cluster 'name=ipi1;base-domain=gcp.interop.ccitredhat.com;platform=gcp;region=us-east1;version=4.14.0-ec.2;worker-flavor=custom-4-16384;log_level=info'
215216
```
216217
* Default `log_level=error` is set for cluster config to hide the openshift-installer logs which contains kubeadmin password.
@@ -260,7 +261,8 @@ podman run quay.io/redhat_msi/openshift-cli-installer \
260261
--action create \
261262
--registry-config-file=registry-config.json \
262263
--s3-bucket-name=openshift-cli-installer \
263-
--s3-bucket-path=install-folders \ --cluster 'name=hyper1;platform=hypershift;region=us-west-2;version=4.13.4;compute-machine-type=m5.4xlarge;replicas=6;channel-group=candidate;expiration-time=2h;timeout=1h' \
264+
--s3-bucket-path=install-folders \
265+
--cluster 'name=hyper1;platform=hypershift;region=us-west-2;version=4.13.4;compute-machine-type=m5.4xlarge;replicas=6;channel-group=candidate;expiration-time=2h;timeout=1h' \
264266
--ocm-token=$OCM_TOKEN \
265267
266268
--cluster 'name=ipi1;base-domain=aws.interop.ccitredhat.com;platform=aws;region=us-east-2;version=4.14.0-ec.2;worker-flavor=m5.xlarge' \

openshift_cli_installer/cli.py

+5
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,11 @@
8888
help="S3 bucket path UUID to append to the S3 zip file name",
8989
show_default=True,
9090
)
91+
@click.option(
92+
"--s3-bucket-object-name",
93+
help="S3 bucket object name; Will be saved as a zip file",
94+
show_default=True,
95+
)
9196
@click.option(
9297
"--ocm-token",
9398
help="OCM token.",

openshift_cli_installer/libs/clusters/ipi_cluster.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -170,8 +170,7 @@ def _rollback_on_error(_ex=None):
170170
zip_and_upload_to_s3(
171171
install_dir=self.cluster_info["cluster-dir"],
172172
s3_bucket_name=self.s3_bucket_name,
173-
s3_bucket_path=self.s3_bucket_path,
174-
uuid=self.cluster_info["shortuuid"],
173+
s3_bucket_object_name=self.cluster_info["s3-object-name"],
175174
)
176175

177176
def destroy_cluster(self):

openshift_cli_installer/libs/clusters/ocp_cluster.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -140,9 +140,9 @@ def get_ocm_client(self):
140140
).client
141141

142142
def _add_s3_bucket_data(self):
143+
object_name = self.s3_bucket_object_name or f"{self.cluster_info['name']}-{self.cluster_info['shortuuid']}"
143144
self.cluster_info["s3-object-name"] = (
144-
f"{f'{self.s3_bucket_path}/' if self.s3_bucket_path else ''}"
145-
f"{self.cluster_info['name']}-{self.cluster_info['shortuuid']}.zip"
145+
f"{f'{self.s3_bucket_path}/' if self.s3_bucket_path else ''}{object_name}.zip"
146146
)
147147

148148
def check_and_assign_aws_cluster_region(self):

openshift_cli_installer/libs/clusters/osd_cluster.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,7 @@ def create_cluster(self):
8787
zip_and_upload_to_s3(
8888
install_dir=self.cluster_info["cluster-dir"],
8989
s3_bucket_name=self.s3_bucket_name,
90-
s3_bucket_path=self.s3_bucket_path,
91-
uuid=self.cluster_info["shortuuid"],
90+
s3_bucket_object_name=self.cluster_info["s3-object-name"],
9291
)
9392

9493
def destroy_cluster(self):

openshift_cli_installer/libs/clusters/rosa_cluster.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -226,10 +226,9 @@ def create_cluster(self):
226226

227227
if self.s3_bucket_name:
228228
zip_and_upload_to_s3(
229-
uuid=self.cluster_info["shortuuid"],
230229
install_dir=self.cluster_info["cluster-dir"],
231230
s3_bucket_name=self.s3_bucket_name,
232-
s3_bucket_path=self.s3_bucket_path,
231+
s3_bucket_object_name=self.cluster_info["s3-object-name"],
233232
)
234233

235234
def destroy_cluster(self):

openshift_cli_installer/libs/user_input.py

+1
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ def __init__(self, **kwargs):
5252
self.s3_bucket_name = self.user_kwargs.get("s3_bucket_name")
5353
self.s3_bucket_path = self.user_kwargs.get("s3_bucket_path")
5454
self.s3_bucket_path_uuid = self.user_kwargs.get("s3_bucket_path_uuid")
55+
self.s3_bucket_object_name = self.user_kwargs.get("s3_bucket_object_name")
5556
self.destroy_clusters_from_s3_bucket = self.user_kwargs.get("destroy_clusters_from_s3_bucket")
5657
self.destroy_clusters_from_s3_bucket_query = self.user_kwargs.get("destroy_clusters_from_s3_bucket_query")
5758
self.destroy_clusters_from_install_data_directory = self.user_kwargs.get(

openshift_cli_installer/scripts/openshift-cli-installer-build-command.py

+2
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ def main():
3535
cmd += f" --s3-bucket-path={os_env['S3_BUCKET_PATH']}"
3636
if os_env.get("S3_BUCKET_PATH_UUID"):
3737
cmd += f" --s3-bucket-path-uuid={os_env['S3_BUCKET_PATH_UUID']}"
38+
if os_env.get("S3_BUCKET_OBJECT_NAME"):
39+
cmd += f" --s3-bucket-object-name={os_env['S3_BUCKET_OBJECT_NAME']}"
3840
if os_env.get("AWS_ACCESS_KEY_ID"):
3941
cmd += f" --aws-access-key-id={os_env['AWS_ACCESS_KEY_ID']}"
4042
if os_env.get("AWS_SECRET_ACCESS_KEY"):

openshift_cli_installer/utils/general.py

+5-8
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import shutil
55
from functools import wraps
66
from importlib.util import find_spec
7+
from pathlib import Path
78
from time import sleep
89

910
import click
@@ -54,17 +55,13 @@ def inner(*args, **kwargs):
5455

5556

5657
@ignore_exceptions()
57-
def zip_and_upload_to_s3(install_dir, s3_bucket_name, uuid, s3_bucket_path=None):
58+
def zip_and_upload_to_s3(install_dir, s3_bucket_name, s3_bucket_object_name):
5859
remove_terraform_folder_from_install_dir(install_dir=install_dir)
5960

60-
_base_name = f"{install_dir}-{uuid}"
61+
zip_file = shutil.make_archive(base_name=Path(s3_bucket_object_name).stem, format="zip", root_dir=install_dir)
6162

62-
zip_file = shutil.make_archive(base_name=_base_name, format="zip", root_dir=install_dir)
63-
bucket_key = os.path.join(s3_bucket_path or "", os.path.split(zip_file)[-1])
64-
LOGGER.info(f"Upload {zip_file} file to S3 {s3_bucket_name}, path {bucket_key}")
65-
s3_client().upload_file(Filename=zip_file, Bucket=s3_bucket_name, Key=bucket_key)
66-
67-
return _base_name
63+
LOGGER.info(f"Upload {zip_file} file to S3 {s3_bucket_name}, path {s3_bucket_object_name}")
64+
s3_client().upload_file(Filename=zip_file, Bucket=s3_bucket_name, Key=s3_bucket_object_name)
6865

6966

7067
def get_manifests_path():

0 commit comments

Comments
 (0)