Skip to content

Commit

Permalink
support customizing part_size per meta-request
Browse files Browse the repository at this point in the history
  • Loading branch information
graebm committed Dec 5, 2023
1 parent de36fee commit ba42827
Showing 1 changed file with 43 additions and 6 deletions.
49 changes: 43 additions & 6 deletions include/aws/s3/s3_client.h
Original file line number Diff line number Diff line change
Expand Up @@ -389,21 +389,36 @@ struct aws_s3_client_config {
*/
struct aws_signing_config_aws *signing_config;

/* Size of parts the files will be downloaded or uploaded in. */
/**
* Optional.
* Size of parts the object will be downloaded or uploaded in, in bytes.
* This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT.
* If set, this should be at least 5MiB.
* If not set, this defaults to 8MiB.
*
* You can also set this per meta-request, via `aws_s3_meta_request_options.part_size`.
*/
uint64_t part_size;

/* If the part size needs to be adjusted for service limits, this is the maximum size it will be adjusted to. On 32
* bit machine, it will be forced to SIZE_MAX, which is around 4GiB. The server limit is 5GiB, but object size limit
* is 5TiB for now. We should be good enough for all the cases. */
uint64_t max_part_size;

/* The size threshold in bytes for when to use multipart uploads for a AWS_S3_META_REQUEST_TYPE_PUT_OBJECT meta
* request. Uploads over this size will automatically use a multipart upload strategy,while uploads smaller or
* equal to this threshold will use a single request to upload the whole object. If not set, `part_size` will be
* used as threshold. */
/**
* Optional.
* The size threshold in bytes for when to use multipart uploads.
* Uploads larger than this will use the multipart upload strategy.
* Uploads smaller or equal to this will use a single HTTP request.
* This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT.
* If set, this should be at least `part_size`.
* If not set, `part_size` will be used as the threshold.
*
* You can also set this per meta-request, via `aws_s3_meta_request_options.multipart_upload_threshold`.
*/
uint64_t multipart_upload_threshold;

/* Throughput target in Gbps that we are trying to reach. */
/* Throughput target in gigabits per second (Gbps) that we are trying to reach. */
double throughput_target_gbps;

/* How much memory can we use. */
Expand Down Expand Up @@ -612,6 +627,28 @@ struct aws_s3_meta_request_options {
*/
const struct aws_s3_checksum_config *checksum_config;

/**
* Optional.
* Size of parts the object will be downloaded or uploaded in, in bytes.
* This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT.
* If set, this should be at least 5MiB.
* If not set, the value from `aws_s3_client_config.part_size` is used, which defaults to 8MiB.
*/
uint64_t part_size;

/**
* Optional.
* The size threshold in bytes for when to use multipart uploads.
* Uploads larger than this will use the multipart upload strategy.
* Uploads smaller or equal to this will use a single HTTP request.
* This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT.
* If set, this should be at least `part_size`.
* If not set, `part_size` will be used as the threshold.
* If both `part_size` and `multipart_upload_threshold` are not set,
* the values from `aws_s3_client_config` are used, which default to 8MiB.
*/
uint64_t multipart_upload_threshold;

/* User data for all callbacks. */
void *user_data;

Expand Down

0 comments on commit ba42827

Please sign in to comment.