From 7f14806acc1eef13cfec05c52f07864f830477c1 Mon Sep 17 00:00:00 2001 From: Dmitriy Musatkin Date: Mon, 20 Nov 2023 11:51:25 -0500 Subject: [PATCH] lint and update docs --- docs/memory_aware_request_execution.md | 15 +++++---------- source/s3_buffer_pool.c | 6 +++--- source/s3_client.c | 18 +++++++++--------- tests/s3_buffer_pool_tests.c | 2 -- 4 files changed, 17 insertions(+), 24 deletions(-) diff --git a/docs/memory_aware_request_execution.md b/docs/memory_aware_request_execution.md index b4a97273d..9d2dd4934 100644 --- a/docs/memory_aware_request_execution.md +++ b/docs/memory_aware_request_execution.md @@ -39,19 +39,14 @@ Several observations about the client usage of buffers: The buffer pooling takes advantage of some of those allocation patterns and works as follows. The memory is split into primary and secondary areas. Secondary area is used for -requests with part size bigger than a predefined value (currently 128mb) +requests with part size bigger than a predefined value (currently 4 times part size) allocations from it got directly to allocator and are effectively old way of doing things. -Primary memory area is split into blocks of fixed size (currently 128mb). Blocks -are allocated on demand. Buffers are 'acquired' from those block, either by -finding an empty spot in existing block or creating a new block. Allocations -from blocks are simplified to not have to worry about gaps in block, by -allocating only from the back of the block - i.e. each allocation moves the -available space pointer in the block forward and increments the number of -allocations in that block. Once available space pointer reaches end of block, no -more allocations are done from that block until all the previous allocations are -released (i.e. block is put back into use, when allocation count reaches 0). +Primary memory area is split into blocks of fixed size (part size if defined or +8mb if not times 16). Blocks are allocated on demand. Each block is logically +subdivided into part sized chunks. Pool allocates and releases in chunk sizes +only, and supports acquiring several chunks (up to 4) at once. Blocks are kept around while there are ongoing requests and are released async, when there is low pressure on memory. diff --git a/source/s3_buffer_pool.c b/source/s3_buffer_pool.c index e64d9dd76..8fa0e0112 100644 --- a/source/s3_buffer_pool.c +++ b/source/s3_buffer_pool.c @@ -81,17 +81,17 @@ struct s3_buffer_pool_block { }; static inline uint16_t s_set_bit_n(uint16_t num, size_t position, size_t n) { - uint16_t mask = ((uint16_t)0x00FF) >> (8 - n) ; + uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); return num | (mask << position) ; } static inline uint16_t s_clear_bit_n(uint16_t num, size_t position, size_t n) { - uint16_t mask = ((uint16_t)0x00FF) >> (8 - n) ; + uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); return num & ~ (mask << position); } static inline bool s_check_bit_n(uint16_t num, size_t position, size_t n) { - uint16_t mask = ((uint16_t)0x00FF) >> (8 - n) ; + uint16_t mask = ((uint16_t)0x00FF) >> (8 - n); return (num >> position) & mask; } diff --git a/source/s3_client.c b/source/s3_client.c index f8aa98066..74b7108f0 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -81,9 +81,6 @@ static size_t s_dns_host_address_ttl_seconds = 5 * 60; * 30 seconds mirrors the value currently used by the Java SDK. */ static const uint32_t s_default_throughput_failure_interval_seconds = 30; -/* Default size of buffer pool blocks. */ -static const size_t s_buffer_pool_default_chunk_size = MB_TO_BYTES(8); - /* Default multiplier between max part size and memory limit */ static const size_t s_default_max_part_size_to_mem_lim_multiplier = 4; @@ -304,7 +301,14 @@ struct aws_s3_client *aws_s3_client_new( mem_limit = client_config->memory_limit_in_bytes; } - client->buffer_pool = aws_s3_buffer_pool_new(allocator, s_buffer_pool_default_chunk_size, mem_limit); + size_t part_size; + if (client_config->part_size != 0) { + part_size = (size_t)client_config->part_size; + } else { + part_size = s_default_part_size; + } + + client->buffer_pool = aws_s3_buffer_pool_new(allocator, part_size, mem_limit); client->vtable = &s_s3_client_default_vtable; @@ -342,11 +346,7 @@ struct aws_s3_client *aws_s3_client_new( /* Make a copy of the region string. */ client->region = aws_string_new_from_array(allocator, client_config->region.ptr, client_config->region.len); - if (client_config->part_size != 0) { - *((size_t *)&client->part_size) = (size_t)client_config->part_size; - } else { - *((size_t *)&client->part_size) = s_default_part_size; - } + *((size_t *)&client->part_size) = part_size; if (client_config->max_part_size != 0) { *((uint64_t *)&client->max_part_size) = client_config->max_part_size; diff --git a/tests/s3_buffer_pool_tests.c b/tests/s3_buffer_pool_tests.c index 0dfb41ac7..904c5e27f 100644 --- a/tests/s3_buffer_pool_tests.c +++ b/tests/s3_buffer_pool_tests.c @@ -138,8 +138,6 @@ static int s_test_s3_buffer_pool_trim(struct aws_allocator *allocator, void *ctx struct aws_s3_buffer_pool_usage_stats stats_after = aws_s3_buffer_pool_get_usage(buffer_pool); - AWS_LOGF_DEBUG(0, "foo %zu", stats_after.primary_num_blocks); - ASSERT_TRUE(stats_before.primary_num_blocks > stats_after.primary_num_blocks); for (size_t i = 20; i < 40; ++i) {