Skip to content

Commit

Permalink
lint and update docs
Browse files Browse the repository at this point in the history
  • Loading branch information
DmitriyMusatkin committed Nov 20, 2023
1 parent e0b0ab7 commit 7f14806
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 24 deletions.
15 changes: 5 additions & 10 deletions docs/memory_aware_request_execution.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,14 @@ Several observations about the client usage of buffers:
The buffer pooling takes advantage of some of those allocation patterns and
works as follows.
The memory is split into primary and secondary areas. Secondary area is used for
requests with part size bigger than a predefined value (currently 128mb)
requests with part size bigger than a predefined value (currently 4 times part size)
allocations from it got directly to allocator and are effectively old way of
doing things.

Primary memory area is split into blocks of fixed size (currently 128mb). Blocks
are allocated on demand. Buffers are 'acquired' from those block, either by
finding an empty spot in existing block or creating a new block. Allocations
from blocks are simplified to not have to worry about gaps in block, by
allocating only from the back of the block - i.e. each allocation moves the
available space pointer in the block forward and increments the number of
allocations in that block. Once available space pointer reaches end of block, no
more allocations are done from that block until all the previous allocations are
released (i.e. block is put back into use, when allocation count reaches 0).
Primary memory area is split into blocks of fixed size (part size if defined or
8mb if not times 16). Blocks are allocated on demand. Each block is logically
subdivided into part sized chunks. Pool allocates and releases in chunk sizes
only, and supports acquiring several chunks (up to 4) at once.

Blocks are kept around while there are ongoing requests and are released async,
when there is low pressure on memory.
Expand Down
6 changes: 3 additions & 3 deletions source/s3_buffer_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,17 +81,17 @@ struct s3_buffer_pool_block {
};

static inline uint16_t s_set_bit_n(uint16_t num, size_t position, size_t n) {
uint16_t mask = ((uint16_t)0x00FF) >> (8 - n) ;
uint16_t mask = ((uint16_t)0x00FF) >> (8 - n);
return num | (mask << position) ;
}

static inline uint16_t s_clear_bit_n(uint16_t num, size_t position, size_t n) {
uint16_t mask = ((uint16_t)0x00FF) >> (8 - n) ;
uint16_t mask = ((uint16_t)0x00FF) >> (8 - n);
return num & ~ (mask << position);
}

static inline bool s_check_bit_n(uint16_t num, size_t position, size_t n) {
uint16_t mask = ((uint16_t)0x00FF) >> (8 - n) ;
uint16_t mask = ((uint16_t)0x00FF) >> (8 - n);
return (num >> position) & mask;
}

Expand Down
18 changes: 9 additions & 9 deletions source/s3_client.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,6 @@ static size_t s_dns_host_address_ttl_seconds = 5 * 60;
* 30 seconds mirrors the value currently used by the Java SDK. */
static const uint32_t s_default_throughput_failure_interval_seconds = 30;

/* Default size of buffer pool blocks. */
static const size_t s_buffer_pool_default_chunk_size = MB_TO_BYTES(8);

/* Default multiplier between max part size and memory limit */
static const size_t s_default_max_part_size_to_mem_lim_multiplier = 4;

Expand Down Expand Up @@ -304,7 +301,14 @@ struct aws_s3_client *aws_s3_client_new(
mem_limit = client_config->memory_limit_in_bytes;
}

client->buffer_pool = aws_s3_buffer_pool_new(allocator, s_buffer_pool_default_chunk_size, mem_limit);
size_t part_size;
if (client_config->part_size != 0) {
part_size = (size_t)client_config->part_size;
} else {
part_size = s_default_part_size;
}

client->buffer_pool = aws_s3_buffer_pool_new(allocator, part_size, mem_limit);

client->vtable = &s_s3_client_default_vtable;

Expand Down Expand Up @@ -342,11 +346,7 @@ struct aws_s3_client *aws_s3_client_new(
/* Make a copy of the region string. */
client->region = aws_string_new_from_array(allocator, client_config->region.ptr, client_config->region.len);

if (client_config->part_size != 0) {
*((size_t *)&client->part_size) = (size_t)client_config->part_size;
} else {
*((size_t *)&client->part_size) = s_default_part_size;
}
*((size_t *)&client->part_size) = part_size;

if (client_config->max_part_size != 0) {
*((uint64_t *)&client->max_part_size) = client_config->max_part_size;
Expand Down
2 changes: 0 additions & 2 deletions tests/s3_buffer_pool_tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,6 @@ static int s_test_s3_buffer_pool_trim(struct aws_allocator *allocator, void *ctx

struct aws_s3_buffer_pool_usage_stats stats_after = aws_s3_buffer_pool_get_usage(buffer_pool);

AWS_LOGF_DEBUG(0, "foo %zu", stats_after.primary_num_blocks);

ASSERT_TRUE(stats_before.primary_num_blocks > stats_after.primary_num_blocks);

for (size_t i = 20; i < 40; ++i) {
Expand Down

0 comments on commit 7f14806

Please sign in to comment.