Skip to content

Commit

Permalink
Merge branch 'main' into update-cmake
Browse files Browse the repository at this point in the history
# Conflicts:
#	include/aws/s3/s3.h
#	source/s3.c
  • Loading branch information
waahm7 committed Oct 16, 2024
2 parents 7d35925 + 1670150 commit e77b411
Show file tree
Hide file tree
Showing 9 changed files with 195 additions and 39 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,6 @@ cmake-build*
# js package locks irrelevant to the overall package's security
benchmarks/benchmarks-stack/benchmarks-stack/package-lock.json
benchmarks/dashboard-stack/package-lock.json

# virtual environment
.venv/
1 change: 1 addition & 0 deletions include/aws/s3/s3.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ enum aws_s3_errors {
AWS_ERROR_S3_REQUEST_HAS_COMPLETED,
AWS_ERROR_S3_RECV_FILE_ALREADY_EXISTS,
AWS_ERROR_S3_RECV_FILE_NOT_FOUND,
AWS_ERROR_S3_REQUEST_TIMEOUT,

AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID)
};
Expand Down
1 change: 1 addition & 0 deletions source/s3.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ static struct aws_error_info s_errors[] = {
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_REQUEST_HAS_COMPLETED, "Request has already completed, action cannot be performed."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RECV_FILE_ALREADY_EXISTS, "File already exists, cannot create as new."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RECV_FILE_NOT_FOUND, "The receive file doesn't exist, cannot create as configuration required."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_REQUEST_TIMEOUT, "RequestTimeout error received from S3."),
};
/* clang-format on */

Expand Down
88 changes: 55 additions & 33 deletions source/s3_client.c
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ struct aws_s3_client *aws_s3_client_new(
client->buffer_pool = aws_s3_buffer_pool_new(allocator, part_size, mem_limit);

if (client->buffer_pool == NULL) {
goto on_early_fail;
goto on_error;
}

struct aws_s3_buffer_pool_usage_stats pool_usage = aws_s3_buffer_pool_get_usage(client->buffer_pool);
Expand All @@ -357,15 +357,15 @@ struct aws_s3_client *aws_s3_client_new(
"Cannot create client from client_config; configured max part size should not exceed memory limit."
"size.");
aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG);
goto on_early_fail;
goto on_error;
}

client->vtable = &s_s3_client_default_vtable;

aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_s3_client_start_destroy);

if (aws_mutex_init(&client->synced_data.lock) != AWS_OP_SUCCESS) {
goto on_early_fail;
goto on_error;
}

aws_linked_list_init(&client->synced_data.pending_meta_request_work);
Expand Down Expand Up @@ -488,6 +488,44 @@ struct aws_s3_client *aws_s3_client_new(
}
}

client->num_network_interface_names = client_config->num_network_interface_names;
if (client_config->num_network_interface_names > 0) {
AWS_LOGF_DEBUG(
AWS_LS_S3_CLIENT,
"id=%p Client received network interface names array with length %zu.",
(void *)client,
client->num_network_interface_names);
aws_array_list_init_dynamic(
&client->network_interface_names,
client->allocator,
client_config->num_network_interface_names,
sizeof(struct aws_string *));
client->network_interface_names_cursor_array = aws_mem_calloc(
client->allocator, client_config->num_network_interface_names, sizeof(struct aws_byte_cursor));
for (size_t i = 0; i < client_config->num_network_interface_names; i++) {
struct aws_byte_cursor interface_name = client_config->network_interface_names_array[i];
struct aws_string *interface_name_str = aws_string_new_from_cursor(client->allocator, &interface_name);
aws_array_list_push_back(&client->network_interface_names, &interface_name_str);
if (aws_is_network_interface_name_valid(aws_string_c_str(interface_name_str)) == false) {
AWS_LOGF_ERROR(
AWS_LS_S3_CLIENT,
"id=%p network_interface_names_array[%zu]=" PRInSTR " is not valid.",
(void *)client,
i,
AWS_BYTE_CURSOR_PRI(interface_name));
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
goto on_error;
}
client->network_interface_names_cursor_array[i] = aws_byte_cursor_from_string(interface_name_str);
AWS_LOGF_DEBUG(
AWS_LS_S3_CLIENT,
"id=%p network_interface_names_array[%zu]=" PRInSTR "",
(void *)client,
i,
AWS_BYTE_CURSOR_PRI(client->network_interface_names_cursor_array[i]));
}
}

/* Set up body streaming ELG */
{
uint16_t num_event_loops =
Expand Down Expand Up @@ -579,34 +617,6 @@ struct aws_s3_client *aws_s3_client_new(
*((bool *)&client->enable_read_backpressure) = client_config->enable_read_backpressure;
*((size_t *)&client->initial_read_window) = client_config->initial_read_window;

client->num_network_interface_names = client_config->num_network_interface_names;
if (client_config->num_network_interface_names > 0) {
AWS_LOGF_DEBUG(
AWS_LS_S3_CLIENT,
"id=%p Client received network interface names array with length %zu.",
(void *)client,
client->num_network_interface_names);
aws_array_list_init_dynamic(
&client->network_interface_names,
client->allocator,
client_config->num_network_interface_names,
sizeof(struct aws_string *));
client->network_interface_names_cursor_array = aws_mem_calloc(
client->allocator, client_config->num_network_interface_names, sizeof(struct aws_byte_cursor));
for (size_t i = 0; i < client_config->num_network_interface_names; i++) {
struct aws_byte_cursor interface_name = client_config->network_interface_names_array[i];
struct aws_string *interface_name_str = aws_string_new_from_cursor(client->allocator, &interface_name);
aws_array_list_push_back(&client->network_interface_names, &interface_name_str);
client->network_interface_names_cursor_array[i] = aws_byte_cursor_from_string(interface_name_str);
AWS_LOGF_DEBUG(
AWS_LS_S3_CLIENT,
"id=%p network_interface_names_array[%zu]=" PRInSTR "",
(void *)client,
i,
AWS_BYTE_CURSOR_PRI(client->network_interface_names_cursor_array[i]));
}
}

return client;

on_error:
Expand All @@ -628,10 +638,22 @@ struct aws_s3_client *aws_s3_client_new(
aws_mem_release(client->allocator, client->proxy_ev_settings);
aws_mem_release(client->allocator, client->tcp_keep_alive_options);

aws_event_loop_group_release(client->client_bootstrap->event_loop_group);
if (client->client_bootstrap != NULL) {
aws_event_loop_group_release(client->client_bootstrap->event_loop_group);
}
aws_client_bootstrap_release(client->client_bootstrap);
aws_mutex_clean_up(&client->synced_data.lock);
on_early_fail:

aws_mem_release(client->allocator, client->network_interface_names_cursor_array);
for (size_t i = 0; i < aws_array_list_length(&client->network_interface_names); i++) {
struct aws_string *interface_name = NULL;
aws_array_list_get_at(&client->network_interface_names, &interface_name, i);
aws_string_destroy(interface_name);
}

aws_array_list_clean_up(&client->network_interface_names);
aws_s3_buffer_pool_destroy(client->buffer_pool);

aws_mem_release(client->allocator, client);
return NULL;
}
Expand Down
5 changes: 5 additions & 0 deletions source/s3_util.c
Original file line number Diff line number Diff line change
Expand Up @@ -687,6 +687,11 @@ int aws_s3_crt_error_code_from_recoverable_server_error_code_string(struct aws_b
if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "RequestTimeTooSkewed")) {
return AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED;
}

if (aws_byte_cursor_eq_c_str_ignore_case(&error_code_string, "RequestTimeout")) {
return AWS_ERROR_S3_REQUEST_TIMEOUT;
}

return AWS_ERROR_UNKNOWN;
}

Expand Down
3 changes: 3 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ add_test_case(test_s3_abort_multipart_upload_message_new)

add_net_test_case(test_s3_client_create_destroy)
add_net_test_case(test_s3_client_create_error)
add_net_test_case(test_s3_client_create_error_with_invalid_memory_limit_config)
add_net_test_case(test_s3_client_create_error_with_invalid_network_interface)
add_net_test_case(test_s3_client_monitoring_options_override)
add_net_test_case(test_s3_client_proxy_ev_settings_override)
add_net_test_case(test_s3_client_tcp_keep_alive_options_override)
Expand Down Expand Up @@ -337,6 +339,7 @@ if(ENABLE_MOCK_SERVER_TESTS)
add_net_test_case(s3express_client_sanity_test_mock_server)
add_net_test_case(s3express_client_sanity_override_test_mock_server)
add_net_test_case(request_time_too_skewed_mock_server)
add_net_test_case(request_timeout_error_mock_server)
endif()

add_net_test_case(s3express_provider_long_running_session_refresh)
Expand Down
15 changes: 15 additions & 0 deletions tests/mock_s3_server/CreateMultipartUpload/request_timeout.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@

{
"status": 400,
"headers": {"x-amz-request-id": "12345"},
"body": [
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>",
"",
"<Error>",
"<Code>RequestTimeout</Code>",
"<Message>Your socket connection to the server was not read from or written to within the timeout period. Idle connections will be closed.</Message>",
"<RequestId>1234</RequestId>",
"<HostId>asdf</HostId>",
"</Error>"
]
}
55 changes: 55 additions & 0 deletions tests/s3_data_plane_tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,61 @@ static int s_test_s3_client_create_error(struct aws_allocator *allocator, void *
return 0;
}

AWS_TEST_CASE(
test_s3_client_create_error_with_invalid_memory_limit_config,
s_test_s3_client_create_error_with_invalid_memory_limit_config)
static int s_test_s3_client_create_error_with_invalid_memory_limit_config(struct aws_allocator *allocator, void *ctx) {
(void)ctx;

struct aws_s3_tester tester;
AWS_ZERO_STRUCT(tester);
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));

struct aws_s3_client_config client_config;
AWS_ZERO_STRUCT(client_config);
ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION));

client_config.memory_limit_in_bytes = 100;
struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config);
ASSERT_TRUE(client == NULL);
client_config.memory_limit_in_bytes = GB_TO_BYTES(1);
client_config.max_part_size = GB_TO_BYTES(2);
client = aws_s3_client_new(allocator, &client_config);
ASSERT_TRUE(client == NULL);

tester.bound_to_client = false;
aws_s3_tester_clean_up(&tester);
return 0;
}

AWS_TEST_CASE(
test_s3_client_create_error_with_invalid_network_interface,
s_test_s3_client_create_error_with_invalid_network_interface)
static int s_test_s3_client_create_error_with_invalid_network_interface(struct aws_allocator *allocator, void *ctx) {
(void)ctx;

struct aws_s3_tester tester;
AWS_ZERO_STRUCT(tester);
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));

struct aws_s3_client_config client_config;
AWS_ZERO_STRUCT(client_config);
ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_REGION));

struct aws_byte_cursor *interface_names_array = aws_mem_calloc(allocator, 1, sizeof(struct aws_byte_cursor));
interface_names_array[0] = aws_byte_cursor_from_c_str("invalid-nic");
client_config.network_interface_names_array = interface_names_array;
client_config.num_network_interface_names = 1;

struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config);
ASSERT_TRUE(client == NULL);
tester.bound_to_client = false;

aws_s3_tester_clean_up(&tester);
aws_mem_release(allocator, interface_names_array);
return 0;
}

AWS_TEST_CASE(test_s3_client_monitoring_options_override, s_test_s3_client_monitoring_options_override)
static int s_test_s3_client_monitoring_options_override(struct aws_allocator *allocator, void *ctx) {
(void)ctx;
Expand Down
63 changes: 57 additions & 6 deletions tests/s3_mock_server_tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,16 +169,19 @@ TEST_CASE(multipart_upload_mock_server) {

TEST_CASE(multipart_upload_with_network_interface_names_mock_server) {
(void)ctx;

#if defined(AWS_OS_WINDOWS)
(void)allocator;
return AWS_OP_SKIP;
#else
struct aws_s3_tester tester;
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
struct aws_byte_cursor *interface_names_array = aws_mem_calloc(allocator, 2, sizeof(struct aws_byte_cursor));
char *localhost_interface = "\0";
#if defined(AWS_OS_APPLE)
# if defined(AWS_OS_APPLE)
localhost_interface = "lo0";
#else
# else
localhost_interface = "lo";
#endif
# endif
interface_names_array[0] = aws_byte_cursor_from_c_str(localhost_interface);
interface_names_array[1] = aws_byte_cursor_from_c_str(localhost_interface);

Expand Down Expand Up @@ -212,11 +215,11 @@ TEST_CASE(multipart_upload_with_network_interface_names_mock_server) {
aws_s3_meta_request_test_results_init(&out_results, allocator);
ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results));
if (out_results.finished_error_code != 0) {
#if !defined(AWS_OS_APPLE) && !defined(AWS_OS_LINUX)
# if !defined(AWS_OS_APPLE) && !defined(AWS_OS_LINUX)
if (out_results.finished_error_code == AWS_ERROR_PLATFORM_NOT_SUPPORTED) {
return AWS_OP_SKIP;
}
#endif
# endif
ASSERT_TRUE(false, "aws_s3_tester_send_meta_request_with_options(() failed");
}
aws_s3_meta_request_test_results_clean_up(&out_results);
Expand All @@ -225,6 +228,7 @@ TEST_CASE(multipart_upload_with_network_interface_names_mock_server) {
aws_mem_release(allocator, interface_names_array);

return AWS_OP_SUCCESS;
#endif
}

TEST_CASE(multipart_upload_checksum_with_retry_mock_server) {
Expand Down Expand Up @@ -1074,3 +1078,50 @@ TEST_CASE(request_time_too_skewed_mock_server) {

return AWS_OP_SUCCESS;
}

TEST_CASE(request_timeout_error_mock_server) {
(void)ctx;

struct aws_s3_tester tester;
ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester));
struct aws_s3_tester_client_options client_options = {
.part_size = MB_TO_BYTES(5),
.tls_usage = AWS_S3_TLS_DISABLED,
};

struct aws_s3_client *client = NULL;
ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client));

struct aws_byte_cursor object_path = aws_byte_cursor_from_c_str("/request_timeout");
struct aws_s3_meta_request_test_results out_results;
aws_s3_meta_request_test_results_init(&out_results, allocator);

struct aws_s3_tester_meta_request_options put_options = {
.allocator = allocator,
.meta_request_type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
.client = client,
.checksum_algorithm = AWS_SCA_CRC32,
.validate_get_response_checksum = false,
.put_options =
{
.object_size_mb = 10,
.object_path_override = object_path,
},
.mock_server = true,
.validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_FAILURE,
};

ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &put_options, &out_results));

ASSERT_UINT_EQUALS(AWS_ERROR_S3_REQUEST_TIMEOUT, out_results.finished_error_code);

/* The default retry will max out after 5 times. So, in total, it will be 6 requests, first one and 5 retries. */
size_t result_num = aws_array_list_length(&out_results.synced_data.metrics);
ASSERT_UINT_EQUALS(6, result_num);

aws_s3_meta_request_test_results_clean_up(&out_results);
aws_s3_client_release(client);
aws_s3_tester_clean_up(&tester);

return AWS_OP_SUCCESS;
}

0 comments on commit e77b411

Please sign in to comment.