diff --git a/tests/s3_mock_server_s3express_provider_test.c b/tests/s3_mock_server_s3express_provider_test.c index 693c9fdf8..bd7d3d873 100644 --- a/tests/s3_mock_server_s3express_provider_test.c +++ b/tests/s3_mock_server_s3express_provider_test.c @@ -614,9 +614,8 @@ TEST_CASE(s3express_provider_long_running_session_refresh) { /* 300 secs to make sure we will refresh it at least once. */ size_t num_requests = 600; - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; struct aws_credentials_properties_s3express property = { - .host = aws_byte_cursor_from_c_str(endpoint), + .host = g_test_s3express_bucket_use1_az4_endpoint, }; for (size_t i = 0; i < num_requests; i++) { diff --git a/tests/s3_s3express_client_test.c b/tests/s3_s3express_client_test.c index cad5b81d2..b8f771a7f 100644 --- a/tests/s3_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -228,8 +228,6 @@ static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, s struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_s3_client_config client_config = { @@ -243,13 +241,23 @@ static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, s struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(s_s3express_put_object_request( - allocator, client, content_length, &tester, host_cursor, key_cursor, region_cursor)); + allocator, + client, + content_length, + &tester, + g_test_s3express_bucket_use1_az4_endpoint, + key_cursor, + region_cursor)); struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); - char west2_endpoint[] = "crts-west2--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"; - struct aws_byte_cursor west2_host_cursor = aws_byte_cursor_from_c_str(west2_endpoint); ASSERT_SUCCESS(s_s3express_put_object_request( - allocator, client, content_length, &tester, west2_host_cursor, key_cursor, west2_region_cursor)); + allocator, + client, + content_length, + &tester, + g_test_s3express_bucket_usw2_az1_endpoint, + key_cursor, + west2_region_cursor)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); @@ -280,13 +288,9 @@ TEST_CASE(s3express_client_put_object_multipart_multiple) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); - char west2_endpoint[] = "crts-west2--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"; - struct aws_byte_cursor west2_host_cursor = aws_byte_cursor_from_c_str(west2_endpoint); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), @@ -302,11 +306,11 @@ TEST_CASE(s3express_client_put_object_multipart_multiple) { input_streams[i] = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); struct aws_byte_cursor request_region = region_cursor; - struct aws_byte_cursor request_host = host_cursor; + struct aws_byte_cursor request_host = g_test_s3express_bucket_use1_az4_endpoint; if (i % 2 == 0) { /* Make half of request to east1 and rest half to west2 */ request_region = west2_region_cursor; - request_host = west2_host_cursor; + request_host = g_test_s3express_bucket_usw2_az1_endpoint; } struct aws_http_message *message = aws_s3_test_put_object_request_new( @@ -401,8 +405,6 @@ TEST_CASE(s3express_client_put_object_long_running_session_refresh) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_s3_client_config client_config = { @@ -421,7 +423,12 @@ TEST_CASE(s3express_client_put_object_long_running_session_refresh) { struct aws_input_stream *upload_stream = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); struct aws_http_message *message = aws_s3_test_put_object_request_new( - allocator, &host_cursor, key_cursor, g_test_body_content_type, upload_stream, 0); + allocator, + &g_test_s3express_bucket_use1_az4_endpoint, + key_cursor, + g_test_body_content_type, + upload_stream, + 0); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; @@ -473,10 +480,6 @@ TEST_CASE(s3express_client_get_object) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); - struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-download-10MB"); - struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, @@ -487,7 +490,8 @@ TEST_CASE(s3express_client_get_object) { struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); - struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, host_cursor, key_cursor); + struct aws_http_message *message = aws_s3_test_get_object_request_new( + allocator, g_test_s3express_bucket_use1_az4_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); @@ -532,10 +536,6 @@ TEST_CASE(s3express_client_get_object_multiple) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); - struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-download-10MB"); - struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, @@ -548,7 +548,8 @@ TEST_CASE(s3express_client_get_object_multiple) { for (size_t i = 0; i < num_meta_requests; ++i) { - struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, host_cursor, key_cursor); + struct aws_http_message *message = aws_s3_test_get_object_request_new( + allocator, g_test_s3express_bucket_use1_az4_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); diff --git a/tests/s3_tester.c b/tests/s3_tester.c index d1e4e6321..e41192d75 100644 --- a/tests/s3_tester.c +++ b/tests/s3_tester.c @@ -64,6 +64,16 @@ struct aws_byte_cursor g_test_bucket_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERA /* If `$CRT_S3_TEST_BUCKET_NAME` envrionment variable is set, use `$CRT_S3_TEST_BUCKET_NAME-public`; otherwise, use * aws-c-s3-test-bucket-public */ struct aws_byte_cursor g_test_public_bucket_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-c-s3-test-bucket-public"); +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com */ +struct aws_byte_cursor g_test_s3express_bucket_usw2_az1_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( + "aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"); +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--us1-az1--x-s3.s3express-use1-az4.us-east-1.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com */ +struct aws_byte_cursor g_test_s3express_bucket_use1_az4_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( + "aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"); #ifdef BYO_CRYPTO /* Under BYO_CRYPTO, this function currently needs to be defined by the user. Defining a null implementation here so @@ -355,6 +365,28 @@ int aws_s3_tester_init(struct aws_allocator *allocator, struct aws_s3_tester *te AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); tester->public_bucket_name = aws_string_new_from_c_str(allocator, public_bucket_name_buffer); g_test_public_bucket_name = aws_byte_cursor_from_string(tester->public_bucket_name); + + char s3express_bucket_usw2_az1_endpoint_buffer[512] = ""; + snprintf( + s3express_bucket_usw2_az1_endpoint_buffer, + sizeof(s3express_bucket_usw2_az1_endpoint_buffer), + "" PRInSTR "--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com", + AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); + tester->s3express_bucket_usw2_az1_endpoint = + aws_string_new_from_c_str(allocator, s3express_bucket_usw2_az1_endpoint_buffer); + g_test_s3express_bucket_usw2_az1_endpoint = + aws_byte_cursor_from_string(tester->s3express_bucket_usw2_az1_endpoint); + + char s3express_bucket_use1_az4_name_buffer[128] = ""; + snprintf( + s3express_bucket_use1_az4_name_buffer, + sizeof(s3express_bucket_use1_az4_name_buffer), + "" PRInSTR "--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com", + AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); + tester->s3express_bucket_use1_az4_endpoint = + aws_string_new_from_c_str(allocator, s3express_bucket_use1_az4_name_buffer); + g_test_s3express_bucket_use1_az4_endpoint = + aws_byte_cursor_from_string(tester->s3express_bucket_use1_az4_endpoint); } aws_s3_library_init(allocator); @@ -698,6 +730,8 @@ void aws_s3_tester_clean_up(struct aws_s3_tester *tester) { } aws_string_destroy(tester->bucket_name); aws_string_destroy(tester->public_bucket_name); + aws_string_destroy(tester->s3express_bucket_usw2_az1_endpoint); + aws_string_destroy(tester->s3express_bucket_use1_az4_endpoint); aws_credentials_release(tester->anonymous_creds); diff --git a/tests/s3_tester.h b/tests/s3_tester.h index a2df2817b..a0f5e5c40 100644 --- a/tests/s3_tester.h +++ b/tests/s3_tester.h @@ -100,6 +100,8 @@ struct aws_s3_tester { struct aws_string *bucket_name; struct aws_string *public_bucket_name; + struct aws_string *s3express_bucket_usw2_az1_endpoint; + struct aws_string *s3express_bucket_use1_az4_endpoint; struct { struct aws_mutex lock; @@ -489,5 +491,13 @@ extern struct aws_byte_cursor g_test_bucket_name; * aws-c-s3-test-bucket-public */ extern struct aws_byte_cursor g_test_public_bucket_name; +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com */ +extern struct aws_byte_cursor g_test_s3express_bucket_usw2_az1_endpoint; +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--us1-az1--x-s3.s3express-use1-az4.us-east-1.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com */ +extern struct aws_byte_cursor g_test_s3express_bucket_use1_az4_endpoint; #endif /* AWS_S3_TESTER_H */ diff --git a/tests/test_helper/README.md b/tests/test_helper/README.md index 6fb9627e9..91432460a 100644 --- a/tests/test_helper/README.md +++ b/tests/test_helper/README.md @@ -32,13 +32,22 @@ python3 test_helper.py clean + `pre-existing-10MB` + `pre-existing-1MB` + `pre-existing-empty` + * Create `-public` in us-west-2 * Upload files: + `pre-existing-1MB` 1MB file with public read access. +* Create directory bucket `--usw2-az1--x-s3` in us-west-2 +* Upload files: + + `pre-existing-10MB` 10MB file. + +* Create directory bucket `--use1-az4--x-s3` in us-east-1 +* Upload files: + + `pre-existing-10MB` 10MB file. + ### `clean` action -* Delete the `` and `-public` and every object inside them +* Delete the buckets create by init action and every object inside them. ## BUCKET_NAME diff --git a/tests/test_helper/test_helper.py b/tests/test_helper/test_helper.py index 29e65d909..9056dd97a 100755 --- a/tests/test_helper/test_helper.py +++ b/tests/test_helper/test_helper.py @@ -9,17 +9,18 @@ import os import random +print(boto3.__version__) +REGION = 'us-west-2' +REGION_EAST_1 = 'us-east-1' s3 = boto3.resource('s3') -s3_client = boto3.client('s3') +s3_client = boto3.client('s3', region_name=REGION) +s3_client_east1 = boto3.client('s3', region_name=REGION_EAST_1) s3_control_client = boto3.client('s3control') -REGION = 'us-west-2' - - MB = 1024*1024 GB = 1024*1024*1024 @@ -37,23 +38,23 @@ args = parser.parse_args() if args.bucket_name is not None: - BUCKET_NAME = args.bucket_name + BUCKET_NAME_BASE = args.bucket_name elif "CRT_S3_TEST_BUCKET_NAME" in os.environ: - BUCKET_NAME = os.environ['CRT_S3_TEST_BUCKET_NAME'] + BUCKET_NAME_BASE = os.environ['CRT_S3_TEST_BUCKET_NAME'] else: # Generate a random bucket name - BUCKET_NAME = 'aws-c-s3-test-bucket-' + str(random.random())[2:8] + BUCKET_NAME_BASE = 'aws-c-s3-test-bucket-' + str(random.random())[2:8] -PUBLIC_BUCKET_NAME = BUCKET_NAME + "-public" +PUBLIC_BUCKET_NAME = BUCKET_NAME_BASE + "-public" def create_bytes(size): return bytearray([1] * size) -def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME, sse=None, public_read=False): +def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, public_read=False, client=s3_client): if size == 0: - s3_client.put_object(Bucket=bucket, Key=keyname) + client.put_object(Bucket=bucket, Key=keyname) print(f"Object {keyname} uploaded") return @@ -72,7 +73,7 @@ def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME, sse=None, public if public_read: args['ACL'] = 'public-read' try: - s3_client.put_object(**args) + client.put_object(**args) except botocore.exceptions.ClientError as e: print(f"Object {keyname} failed to upload, with exception: {e}") if public_read and e.response['Error']['Code'] == 'AccessDenied': @@ -81,55 +82,84 @@ def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME, sse=None, public print(f"Object {keyname} uploaded") -def create_bucket_with_lifecycle(): +def create_bucket_with_lifecycle(availability_zone=None, client=s3_client): try: # Create the bucket. This returns an error if the bucket already exists. - s3_client.create_bucket( - Bucket=BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': REGION}) - s3_client.put_bucket_lifecycle_configuration( - Bucket=BUCKET_NAME, - LifecycleConfiguration={ - 'Rules': [ - { - 'ID': 'clean up non-pre-existing objects', - 'Expiration': { - 'Days': 1, - }, - 'Filter': { - 'Prefix': 'upload/', - }, - 'Status': 'Enabled', - 'NoncurrentVersionExpiration': { - 'NoncurrentDays': 1, - }, - 'AbortIncompleteMultipartUpload': { - 'DaysAfterInitiation': 1, + + if availability_zone is not None: + bucket_config = { + 'Location': { + 'Type': 'AvailabilityZone', + 'Name': availability_zone + }, + 'Bucket': { + 'Type': 'Directory', + 'DataRedundancy': 'SingleAvailabilityZone' + } + } + bucket_name = BUCKET_NAME_BASE+f"--{availability_zone}--x-s3" + else: + bucket_config = {'LocationConstraint': REGION} + bucket_name = BUCKET_NAME_BASE + + client.create_bucket( + Bucket=bucket_name, CreateBucketConfiguration=bucket_config) + if availability_zone is None: + client.put_bucket_lifecycle_configuration( + Bucket=bucket_name, + LifecycleConfiguration={ + 'Rules': [ + { + 'ID': 'clean up non-pre-existing objects', + 'Expiration': { + 'Days': 1, + }, + 'Filter': { + 'Prefix': 'upload/', + }, + 'Status': 'Enabled', + 'NoncurrentVersionExpiration': { + 'NoncurrentDays': 1, + }, + 'AbortIncompleteMultipartUpload': { + 'DaysAfterInitiation': 1, + }, }, - }, - ], - }, - ) - print(f"Bucket {BUCKET_NAME} created", file=sys.stderr) - put_pre_existing_objects( - 10*MB, 'pre-existing-10MB-aes256-c', sse='aes256-c') - put_pre_existing_objects( - 10*MB, 'pre-existing-10MB-aes256', sse='aes256') + ], + }, + ) + print(f"Bucket {bucket_name} created", file=sys.stderr) + put_pre_existing_objects( - 10*MB, 'pre-existing-10MB-kms', sse='kms') - put_pre_existing_objects(256*MB, 'pre-existing-256MB') - put_pre_existing_objects(256*MB, 'pre-existing-256MB-@') - put_pre_existing_objects(2*GB, 'pre-existing-2GB') - put_pre_existing_objects(2*GB, 'pre-existing-2GB-@') - put_pre_existing_objects(10*MB, 'pre-existing-10MB') - put_pre_existing_objects(1*MB, 'pre-existing-1MB') - put_pre_existing_objects(1*MB, 'pre-existing-1MB-@') - put_pre_existing_objects(0, 'pre-existing-empty') + 10*MB, 'pre-existing-10MB', bucket=bucket_name, client=client) + + if availability_zone is None: + put_pre_existing_objects( + 10*MB, 'pre-existing-10MB-aes256-c', sse='aes256-c', bucket=bucket_name) + put_pre_existing_objects( + 10*MB, 'pre-existing-10MB-aes256', sse='aes256', bucket=bucket_name) + put_pre_existing_objects( + 10*MB, 'pre-existing-10MB-kms', sse='kms', bucket=bucket_name) + put_pre_existing_objects( + 256*MB, 'pre-existing-256MB', bucket=bucket_name) + put_pre_existing_objects( + 256*MB, 'pre-existing-256MB-@', bucket=bucket_name) + put_pre_existing_objects( + 2*GB, 'pre-existing-2GB', bucket=bucket_name) + put_pre_existing_objects( + 2*GB, 'pre-existing-2GB-@', bucket=bucket_name) + put_pre_existing_objects( + 1*MB, 'pre-existing-1MB', bucket=bucket_name) + put_pre_existing_objects( + 1*MB, 'pre-existing-1MB-@', bucket=bucket_name) + put_pre_existing_objects( + 0, 'pre-existing-empty', bucket=bucket_name) except botocore.exceptions.ClientError as e: # The bucket already exists. That's fine. if e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou' or e.response['Error']['Code'] == 'BucketAlreadyExists': print( - f"Bucket {BUCKET_NAME} not created, skip initializing.", file=sys.stderr) + f"Bucket {bucket_name} not created, skip initializing.", file=sys.stderr) return raise e @@ -159,34 +189,43 @@ def create_bucket_with_public_object(): raise e -def cleanup(bucket_name): - bucket = s3.Bucket(bucket_name) - bucket.objects.all().delete() - s3_client.delete_bucket(Bucket=bucket_name) +def cleanup(bucket_name, availability_zone=None, client=s3_client): + if availability_zone is not None: + bucket_name = bucket_name+f"--{availability_zone}--x-s3" + + objects = client.list_objects_v2(Bucket=bucket_name)["Contents"] + objects = list(map(lambda x: {"Key": x["Key"]}, objects)) + client.delete_objects(Bucket=bucket_name, Delete={"Objects": objects}) + client.delete_bucket(Bucket=bucket_name) print(f"Bucket {bucket_name} deleted", file=sys.stderr) if args.action == 'init': try: - print(BUCKET_NAME + " " + PUBLIC_BUCKET_NAME + " initializing...") + print(BUCKET_NAME_BASE + " " + PUBLIC_BUCKET_NAME + " initializing...") + create_bucket_with_lifecycle("use1-az4", s3_client_east1) + create_bucket_with_lifecycle("usw2-az1") create_bucket_with_lifecycle() create_bucket_with_public_object() - if os.environ.get('CRT_S3_TEST_BUCKET_NAME') != BUCKET_NAME: + if os.environ.get('CRT_S3_TEST_BUCKET_NAME') != BUCKET_NAME_BASE: print( - f"* Please set the environment variable $CRT_S3_TEST_BUCKET_NAME to {BUCKET_NAME} before running the tests.") + f"* Please set the environment variable $CRT_S3_TEST_BUCKET_NAME to {BUCKET_NAME_BASE} before running the tests.") except Exception as e: print(e) try: # Try to clean up the bucket created, when initialization failed. - cleanup(BUCKET_NAME) + cleanup(BUCKET_NAME_BASE, "use1-az4", s3_client_east1) + cleanup(BUCKET_NAME_BASE, "usw2-az1") + cleanup(BUCKET_NAME_BASE) cleanup(PUBLIC_BUCKET_NAME) - except Exception as e: + except Exception as e2: exit(-1) - raise e exit(-1) elif args.action == 'clean': if "CRT_S3_TEST_BUCKET_NAME" not in os.environ and args.bucket_name is None: print("Set the environment variable CRT_S3_TEST_BUCKET_NAME before clean up, or pass in bucket_name as argument.") exit(-1) - cleanup(BUCKET_NAME) + cleanup(BUCKET_NAME_BASE, "use1-az4", s3_client_east1) + cleanup(BUCKET_NAME_BASE, "usw2-az1") + cleanup(BUCKET_NAME_BASE) cleanup(PUBLIC_BUCKET_NAME)