From fcd7a104df3b9b78e2dccd5cfb27773a06a09e90 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 7 Dec 2023 15:59:36 -0800 Subject: [PATCH] Create session error code (#392) --- docs/memory_aware_request_execution.md | 14 ++--- include/aws/s3/private/s3_util.h | 8 --- include/aws/s3/s3.h | 1 + source/s3.c | 1 + source/s3_auto_ranged_get.c | 4 +- source/s3_auto_ranged_put.c | 4 +- source/s3_copy_object.c | 2 +- source/s3_meta_request.c | 11 ++-- source/s3express_credentials_provider.c | 23 ++++++-- tests/CMakeLists.txt | 1 + .../s3_mock_server_s3express_provider_test.c | 2 + tests/s3_mock_server_tests.c | 2 +- tests/s3_retry_tests.c | 2 +- tests/s3_s3express_client_test.c | 55 +++++++++++++++++++ 14 files changed, 97 insertions(+), 33 deletions(-) diff --git a/docs/memory_aware_request_execution.md b/docs/memory_aware_request_execution.md index 9d2dd4934..2b104ce44 100644 --- a/docs/memory_aware_request_execution.md +++ b/docs/memory_aware_request_execution.md @@ -3,12 +3,12 @@ scales resource usage, such as number of parallel requests in flight, to achieve target throughput. The client creates buffers to hold data it is sending or receiving for each request and scaling requests in flight has direct impact on memory used. In practice, setting high target throughput or larger part size can -lead to high observed memory usage. +lead to high observed memory usage. To mitigate high memory usages, memory reuse improvements were recently added to the client along with options to limit max memory used. The following sections will go into more detail on aspects of those changes and how the affect the -client. +client. ### Memory Reuse At the basic level, CRT S3 client starts with a meta request for operation like @@ -18,7 +18,7 @@ requests and release it right after the request was done. That approach, resulted in a lot of very short lived allocations and allocator thrashing, overall leading to memory use spikes considerably higher than whats needed. To address that, the client is switching to a pooled buffer approach, discussed -below. +below. Note: approach described below is work in progress and concentrates on improving the common cases (default 8mb part sizes and part sizes smaller than 64mb). @@ -29,7 +29,7 @@ Several observations about the client usage of buffers: - Get operations always use either the configured part size or default of 8mb. Part size for get is not adjusted, since there is no 10,000 part limitation. - Both Put and Get operations go through fill and drain phases. Ex. for Put, the - client first schedules a number of reads to 'fil' the buffers from the source + client first schedules a number of reads to 'fill' the buffers from the source and as those reads complete, the buffer are send over to the networking layer are 'drained' - individual uploadParts or ranged gets operations typically have a similar @@ -37,16 +37,16 @@ Several observations about the client usage of buffers: in bulk at the same time The buffer pooling takes advantage of some of those allocation patterns and -works as follows. +works as follows. The memory is split into primary and secondary areas. Secondary area is used for requests with part size bigger than a predefined value (currently 4 times part size) allocations from it got directly to allocator and are effectively old way of -doing things. +doing things. Primary memory area is split into blocks of fixed size (part size if defined or 8mb if not times 16). Blocks are allocated on demand. Each block is logically subdivided into part sized chunks. Pool allocates and releases in chunk sizes -only, and supports acquiring several chunks (up to 4) at once. +only, and supports acquiring several chunks (up to 4) at once. Blocks are kept around while there are ongoing requests and are released async, when there is low pressure on memory. diff --git a/include/aws/s3/private/s3_util.h b/include/aws/s3/private/s3_util.h index 046386131..f92c17d79 100644 --- a/include/aws/s3/private/s3_util.h +++ b/include/aws/s3/private/s3_util.h @@ -35,14 +35,6 @@ struct aws_http_headers; struct aws_http_message; struct aws_s3_client; -enum aws_s3_response_status { - AWS_S3_RESPONSE_STATUS_SUCCESS = 200, - AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS = 204, - AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS = 206, - AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR = 500, - AWS_S3_RESPONSE_STATUS_SLOW_DOWN = 503, -}; - struct aws_cached_signing_config_aws { struct aws_allocator *allocator; struct aws_string *service; diff --git a/include/aws/s3/s3.h b/include/aws/s3/s3.h index 68015d392..12ce6b1d2 100644 --- a/include/aws/s3/s3.h +++ b/include/aws/s3/s3.h @@ -43,6 +43,7 @@ enum aws_s3_errors { AWS_ERROR_S3_FILE_MODIFIED, AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, + AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID) }; diff --git a/source/s3.c b/source/s3.c index 44cc8bb00..b1b3716b7 100644 --- a/source/s3.c +++ b/source/s3.c @@ -44,6 +44,7 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, "Request was not created due to used memory exceeding memory limit."), AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, "Specified memory configuration is invalid for the system. " "Memory limit should be at least 1GiB. Part size and max part size should be smaller than memory limit."), + AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, "CreateSession call failed when signing with S3 Express."), }; /* clang-format on */ diff --git a/source/s3_auto_ranged_get.c b/source/s3_auto_ranged_get.c index 5e72b3a74..7e9c2b27f 100644 --- a/source/s3_auto_ranged_get.c +++ b/source/s3_auto_ranged_get.c @@ -46,10 +46,10 @@ static int s_s3_auto_ranged_get_success_status(struct aws_s3_meta_request *meta_ AWS_PRECONDITION(auto_ranged_get); if (auto_ranged_get->initial_message_has_range_header) { - return AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS; + return AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT; } - return AWS_S3_RESPONSE_STATUS_SUCCESS; + return AWS_HTTP_STATUS_CODE_200_OK; } /* Allocate a new auto-ranged-get meta request. */ diff --git a/source/s3_auto_ranged_put.c b/source/s3_auto_ranged_put.c index c77cbce5f..b1f6ea235 100644 --- a/source/s3_auto_ranged_put.c +++ b/source/s3_auto_ranged_put.c @@ -689,7 +689,7 @@ static bool s_s3_auto_ranged_put_update( } if (!work_remaining) { - aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS); + aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } aws_s3_meta_request_unlock_synced_data(meta_request); @@ -1412,7 +1412,7 @@ static void s_s3_auto_ranged_put_request_finished( "(request finished prior to being paused?)", (void *)meta_request); - aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS); + aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } else { aws_s3_meta_request_set_fail_synced(meta_request, request, error_code); } diff --git a/source/s3_copy_object.c b/source/s3_copy_object.c index 2bb76a985..4c039a949 100644 --- a/source/s3_copy_object.c +++ b/source/s3_copy_object.c @@ -324,7 +324,7 @@ static bool s_s3_copy_object_update( } if (!work_remaining) { - aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS); + aws_s3_meta_request_set_success_synced(meta_request, AWS_HTTP_STATUS_CODE_200_OK); } aws_s3_meta_request_unlock_synced_data(meta_request); diff --git a/source/s3_meta_request.c b/source/s3_meta_request.c index 4647e5381..71a19eee3 100644 --- a/source/s3_meta_request.c +++ b/source/s3_meta_request.c @@ -1088,15 +1088,16 @@ static int s_s3_meta_request_error_code_from_response_status(int response_status int error_code = AWS_ERROR_UNKNOWN; switch (response_status) { - case AWS_S3_RESPONSE_STATUS_SUCCESS: - case AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS: - case AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS: + case AWS_HTTP_STATUS_CODE_200_OK: + case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT: + case AWS_HTTP_STATUS_CODE_204_NO_CONTENT: error_code = AWS_ERROR_SUCCESS; break; - case AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR: + case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR: error_code = AWS_ERROR_S3_INTERNAL_ERROR; break; - case AWS_S3_RESPONSE_STATUS_SLOW_DOWN: + case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE: + /* S3 response 503 for throttling, slow down the sending */ error_code = AWS_ERROR_S3_SLOW_DOWN; break; default: diff --git a/source/s3express_credentials_provider.c b/source/s3express_credentials_provider.c index 1dbf9b969..7b0f10020 100644 --- a/source/s3express_credentials_provider.c +++ b/source/s3express_credentials_provider.c @@ -314,16 +314,25 @@ static void s_on_request_finished( struct aws_credentials *credentials = NULL; int error_code = meta_request_result->error_code; - if (error_code == AWS_ERROR_SUCCESS && meta_request_result->response_status != 200) { - error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE; - } - AWS_LOGF_DEBUG( AWS_LS_AUTH_CREDENTIALS_PROVIDER, - "CreateSession call completed with http status %d and error code %s", + "(id=%p): CreateSession call completed with http status: %d and error code %s", + (void *)session_creator->provider, meta_request_result->response_status, aws_error_debug_str(error_code)); + if (error_code && meta_request_result->error_response_body && meta_request_result->error_response_body->len > 0) { + /* The Create Session failed with an error response from S3, provide a specific error code for user. */ + error_code = AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED; + AWS_LOGF_ERROR( + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): CreateSession call failed with http status: %d, and error response body is: %.*s", + (void *)session_creator->provider, + meta_request_result->response_status, + (int)meta_request_result->error_response_body->len, + meta_request_result->error_response_body->buffer); + } + if (error_code == AWS_ERROR_SUCCESS) { credentials = s_parse_s3express_xml( session_creator->allocator, aws_byte_cursor_from_buf(&session_creator->response_buf), session_creator); @@ -331,7 +340,9 @@ static void s_on_request_finished( if (!credentials) { error_code = AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE; AWS_LOGF_ERROR( - AWS_LS_AUTH_CREDENTIALS_PROVIDER, "failed to read credentials from document, treating as an error."); + AWS_LS_AUTH_CREDENTIALS_PROVIDER, + "(id=%p): failed to read credentials from document, treating as an error.", + (void *)session_creator->provider); } } { /* BEGIN CRITICAL SECTION */ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 39bfb81f5..a4f81da52 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -302,6 +302,7 @@ add_net_test_case(s3express_client_put_object_multipart_multiple) add_net_test_case(s3express_client_put_object_long_running_session_refresh) add_net_test_case(s3express_client_get_object) add_net_test_case(s3express_client_get_object_multiple) +add_net_test_case(s3express_client_get_object_create_session_error) add_net_test_case(meta_request_auto_ranged_get_new_error_handling) add_net_test_case(meta_request_auto_ranged_put_new_error_handling) diff --git a/tests/s3_mock_server_s3express_provider_test.c b/tests/s3_mock_server_s3express_provider_test.c index bd7d3d873..7748babf0 100644 --- a/tests/s3_mock_server_s3express_provider_test.c +++ b/tests/s3_mock_server_s3express_provider_test.c @@ -298,10 +298,12 @@ TEST_CASE(s3express_provider_get_credentials_cancel_mock_server) { ASSERT_SUCCESS(aws_s3express_credentials_provider_get_credentials( provider, tester.anonymous_creds, &property, s_get_credentials_callback, &s_s3express_tester)); + /* Release the provider right after we fetch the credentials, which will cancel the create session call. */ aws_s3express_credentials_provider_release(provider); s_aws_wait_for_provider_shutdown_callback(); s_aws_wait_for_credentials_result(1); + /* The error code will be AWS_ERROR_S3_CANCELED. */ ASSERT_UINT_EQUALS(AWS_ERROR_S3_CANCELED, s_s3express_tester.error_code); ASSERT_SUCCESS(s_s3express_tester_cleanup()); aws_s3_tester_clean_up(&tester); diff --git a/tests/s3_mock_server_tests.c b/tests/s3_mock_server_tests.c index 5d24e5a9e..6b288de19 100644 --- a/tests/s3_mock_server_tests.c +++ b/tests/s3_mock_server_tests.c @@ -329,7 +329,7 @@ TEST_CASE(async_access_denied_from_complete_multipart_mock_server) { ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results)); ASSERT_UINT_EQUALS(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, out_results.finished_error_code); - ASSERT_UINT_EQUALS(AWS_S3_RESPONSE_STATUS_SUCCESS, out_results.finished_response_status); + ASSERT_UINT_EQUALS(AWS_HTTP_STATUS_CODE_200_OK, out_results.finished_response_status); ASSERT_TRUE(out_results.error_response_body.len != 0); ASSERT_STR_EQUALS("CompleteMultipartUpload", aws_string_c_str(out_results.error_response_operation_name)); diff --git a/tests/s3_retry_tests.c b/tests/s3_retry_tests.c index 229b0829d..2548b2a7b 100644 --- a/tests/s3_retry_tests.c +++ b/tests/s3_retry_tests.c @@ -359,7 +359,7 @@ static void s_s3_meta_request_send_request_finish_fail_first( if (aws_s3_tester_inc_counter2(tester) == 1) { AWS_ASSERT(connection->request->send_data.response_status == 404); - connection->request->send_data.response_status = AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR; + connection->request->send_data.response_status = AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR; } struct aws_s3_meta_request_vtable *original_meta_request_vtable = diff --git a/tests/s3_s3express_client_test.c b/tests/s3_s3express_client_test.c index b8f771a7f..cfdb42b12 100644 --- a/tests/s3_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -590,3 +590,58 @@ TEST_CASE(s3express_client_get_object_multiple) { aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } + +TEST_CASE(s3express_client_get_object_create_session_error) { + (void)ctx; + + struct aws_s3_tester tester; + ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); + + struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); + + struct aws_s3_client_config client_config = { + .part_size = MB_TO_BYTES(5), + .enable_s3express = true, + .region = region_cursor, + }; + + ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); + + struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); + + struct aws_byte_cursor my_dummy_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( + "non-exist-bucket-test--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"); + + struct aws_http_message *message = + aws_s3_test_get_object_request_new(allocator, my_dummy_endpoint, g_pre_existing_object_10MB); + + struct aws_s3_meta_request_options options; + AWS_ZERO_STRUCT(options); + options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; + options.message = message; + struct aws_signing_config_aws s3express_signing_config = { + .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, + .service = g_s3express_service_name, + }; + options.signing_config = &s3express_signing_config; + struct aws_s3_meta_request_test_results meta_request_test_results; + aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); + + ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results)); + + struct aws_s3_meta_request *meta_request = aws_s3_client_make_meta_request(client, &options); + ASSERT_TRUE(meta_request != NULL); + /* Wait for the request to finish. */ + aws_s3_tester_wait_for_meta_request_finish(&tester); + ASSERT_UINT_EQUALS(meta_request_test_results.finished_error_code, AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED); + + meta_request = aws_s3_meta_request_release(meta_request); + aws_s3_tester_wait_for_meta_request_shutdown(&tester); + + aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); + + aws_http_message_release(message); + aws_s3_client_release(client); + aws_s3_tester_clean_up(&tester); + return AWS_OP_SUCCESS; +}