diff --git a/include/aws/s3/s3_client.h b/include/aws/s3/s3_client.h index 71e7b95a7..7aefdb9c3 100644 --- a/include/aws/s3/s3_client.h +++ b/include/aws/s3/s3_client.h @@ -389,7 +389,15 @@ struct aws_s3_client_config { */ struct aws_signing_config_aws *signing_config; - /* Size of parts the files will be downloaded or uploaded in. */ + /** + * Optional. + * Size of parts the object will be downloaded or uploaded in, in bytes. + * This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If set, this should be at least 5MiB. + * If not set, this defaults to 8MiB. + * + * You can also set this per meta-request, via `aws_s3_meta_request_options.part_size`. + */ uint64_t part_size; /* If the part size needs to be adjusted for service limits, this is the maximum size it will be adjusted to. On 32 @@ -397,13 +405,20 @@ struct aws_s3_client_config { * is 5TiB for now. We should be good enough for all the cases. */ uint64_t max_part_size; - /* The size threshold in bytes for when to use multipart uploads for a AWS_S3_META_REQUEST_TYPE_PUT_OBJECT meta - * request. Uploads over this size will automatically use a multipart upload strategy,while uploads smaller or - * equal to this threshold will use a single request to upload the whole object. If not set, `part_size` will be - * used as threshold. */ + /** + * Optional. + * The size threshold in bytes for when to use multipart uploads. + * Uploads larger than this will use the multipart upload strategy. + * Uploads smaller or equal to this will use a single HTTP request. + * This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If set, this should be at least `part_size`. + * If not set, `part_size` will be used as the threshold. + * + * You can also set this per meta-request, via `aws_s3_meta_request_options.multipart_upload_threshold`. + */ uint64_t multipart_upload_threshold; - /* Throughput target in Gbps that we are trying to reach. */ + /* Throughput target in gigabits per second (Gbps) that we are trying to reach. */ double throughput_target_gbps; /* How much memory can we use. */ @@ -612,6 +627,28 @@ struct aws_s3_meta_request_options { */ const struct aws_s3_checksum_config *checksum_config; + /** + * Optional. + * Size of parts the object will be downloaded or uploaded in, in bytes. + * This only affects AWS_S3_META_REQUEST_TYPE_GET_OBJECT and AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If set, this should be at least 5MiB. + * If not set, the value from `aws_s3_client_config.part_size` is used, which defaults to 8MiB. + */ + uint64_t part_size; + + /** + * Optional. + * The size threshold in bytes for when to use multipart uploads. + * Uploads larger than this will use the multipart upload strategy. + * Uploads smaller or equal to this will use a single HTTP request. + * This only affects AWS_S3_META_REQUEST_TYPE_PUT_OBJECT. + * If set, this should be at least `part_size`. + * If not set, `part_size` will be used as the threshold. + * If both `part_size` and `multipart_upload_threshold` are not set, + * the values from `aws_s3_client_config` are used, which default to 8MiB. + */ + uint64_t multipart_upload_threshold; + /* User data for all callbacks. */ void *user_data;