From aa4f9668ac192630e8019f93952614f21dddfee5 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 26 Aug 2024 13:33:14 -0700 Subject: [PATCH 01/39] Provide I/O operation status back to event loop --- include/aws/io/event_loop.h | 19 +++++++++++++++++++ source/bsd/kqueue_event_loop.c | 32 ++++++++++++++++++++++++++++++++ source/event_loop.c | 9 +++++++++ source/socket_channel_handler.c | 10 ++++++++++ 4 files changed, 70 insertions(+) diff --git a/include/aws/io/event_loop.h b/include/aws/io/event_loop.h index 58041a4c7..9279c7841 100644 --- a/include/aws/io/event_loop.h +++ b/include/aws/io/event_loop.h @@ -25,6 +25,7 @@ enum aws_io_event_type { struct aws_event_loop; struct aws_task; struct aws_thread_options; +struct aws_event_loop_io_op_result; #if AWS_USE_IO_COMPLETION_PORTS @@ -99,6 +100,10 @@ struct aws_event_loop_vtable { void *user_data); #endif int (*unsubscribe_from_io_events)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); + void (*feedback_io_result)( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_event_loop_io_op_result *io_op_result); void (*free_io_event_resources)(void *user_data); bool (*is_on_callers_thread)(struct aws_event_loop *event_loop); }; @@ -140,6 +145,11 @@ struct aws_event_loop_group { struct aws_shutdown_callback_options shutdown_options; }; +struct aws_event_loop_io_op_result { + size_t read_bytes; + int error_code; +}; + AWS_EXTERN_C_BEGIN #ifdef AWS_USE_IO_COMPLETION_PORTS @@ -366,6 +376,15 @@ int aws_event_loop_subscribe_to_io_events( AWS_IO_API int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +/** + * Update the I/O operation completion status on the given I/O handle. + */ +AWS_IO_API +void aws_event_loop_feedback_io_op_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_event_loop_io_op_result *io_op_result); + /** * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index 33a517e7b..4c00ff284 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -39,6 +39,10 @@ static int s_subscribe_to_io_events( aws_event_loop_on_event_fn *on_event, void *user_data); static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +static void s_feedback_io_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_event_loop_io_op_result *io_op_result); static void s_free_io_event_resources(void *user_data); static bool s_is_event_thread(struct aws_event_loop *event_loop); @@ -110,6 +114,8 @@ struct handle_data { struct aws_task subscribe_task; struct aws_task cleanup_task; + + struct aws_event_loop_io_op_result last_io_operation_result; }; enum { @@ -127,6 +133,7 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .subscribe_to_io_events = s_subscribe_to_io_events, .cancel_task = s_cancel_task, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, + .feedback_io_result = s_feedback_io_result, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_event_thread, }; @@ -135,6 +142,7 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_ASSERT(alloc); + // FIXME Remove this assert. AWS_ASSERT(clock); AWS_ASSERT(options); AWS_ASSERT(options->clock); @@ -725,6 +733,23 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc return AWS_OP_SUCCESS; } +static void s_feedback_io_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_event_loop_io_op_result *io_op_result) { + AWS_ASSERT(handle->additional_data); + struct handle_data *handle_data = handle->additional_data; + AWS_ASSERT(event_loop == handle_data->event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: got feedback on I/O operation for fd %d: status %s", + (void *)event_loop, + handle->data.fd, + aws_error_str(io_op_result->error_code)); + handle_data->last_io_operation_result.read_bytes = io_op_result->read_bytes; + handle_data->last_io_operation_result.error_code = io_op_result->error_code; +} + static bool s_is_event_thread(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; @@ -931,6 +956,13 @@ static void aws_event_loop_thread(void *user_data) { handle_data->owner->data.fd); handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); + AWS_LOGF_INFO( + AWS_LS_IO_EVENT_LOOP, + "id=%p: on_event completion status is %d (%s); read %lu bytes", + (void *)event_loop, + handle_data->last_io_operation_result.error_code, + aws_error_str(handle_data->last_io_operation_result.error_code), + handle_data->last_io_operation_result.read_bytes); } handle_data->events_this_loop = 0; diff --git a/source/event_loop.c b/source/event_loop.c index 1e7aef676..1c598d240 100644 --- a/source/event_loop.c +++ b/source/event_loop.c @@ -514,6 +514,15 @@ int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, return event_loop->vtable->unsubscribe_from_io_events(event_loop, handle); } +void aws_event_loop_feedback_io_op_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_event_loop_io_op_result *io_op_result) { + if (event_loop->vtable->feedback_io_result) { + event_loop->vtable->feedback_io_result(event_loop, handle, io_op_result); + } +} + void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_ASSERT(event_loop && event_loop->vtable->free_io_event_resources); event_loop->vtable->free_io_event_resources(handle->additional_data); diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index e8c9c5499..220f83cdf 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -183,6 +183,9 @@ static void s_do_read(struct socket_handler *socket_handler) { AWS_ASSERT(last_error != 0); if (last_error != AWS_IO_READ_WOULD_BLOCK) { + struct aws_event_loop_io_op_result io_op_result = {total_read, last_error}; + aws_event_loop_feedback_io_op_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); aws_channel_shutdown(socket_handler->slot->channel, last_error); } else { AWS_LOGF_TRACE( @@ -190,6 +193,9 @@ static void s_do_read(struct socket_handler *socket_handler) { "id=%p: out of data to read on socket. " "Waiting on event-loop notification.", (void *)socket_handler->slot->handler); + struct aws_event_loop_io_op_result io_op_result = {total_read, AWS_IO_READ_WOULD_BLOCK}; + aws_event_loop_feedback_io_op_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); } return; } @@ -206,6 +212,10 @@ static void s_do_read(struct socket_handler *socket_handler) { &socket_handler->read_task_storage, s_read_task, socket_handler, "socket_handler_re_read"); aws_channel_schedule_task_now(socket_handler->slot->channel, &socket_handler->read_task_storage); } + + struct aws_event_loop_io_op_result io_op_result = {total_read, AWS_ERROR_SUCCESS}; + aws_event_loop_feedback_io_op_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); } /* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. */ From abe7747931922f0581d5348d8166b143dcabfb7f Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 26 Aug 2024 13:48:16 -0700 Subject: [PATCH 02/39] Add flag for last io result --- source/bsd/kqueue_event_loop.c | 9 +++++++++ source/socket_channel_handler.c | 10 ++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index 4c00ff284..a5c2f3610 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -116,6 +116,7 @@ struct handle_data { struct aws_task cleanup_task; struct aws_event_loop_io_op_result last_io_operation_result; + bool last_io_operation_is_updated; }; enum { @@ -740,6 +741,7 @@ static void s_feedback_io_result( AWS_ASSERT(handle->additional_data); struct handle_data *handle_data = handle->additional_data; AWS_ASSERT(event_loop == handle_data->event_loop); + AWS_ASSERT(handle_data->last_io_operation_is_updated == 0); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: got feedback on I/O operation for fd %d: status %s", @@ -748,6 +750,7 @@ static void s_feedback_io_result( aws_error_str(io_op_result->error_code)); handle_data->last_io_operation_result.read_bytes = io_op_result->read_bytes; handle_data->last_io_operation_result.error_code = io_op_result->error_code; + handle_data->last_io_operation_is_updated = 1; } static bool s_is_event_thread(struct aws_event_loop *event_loop) { @@ -954,8 +957,14 @@ static void aws_event_loop_thread(void *user_data) { "id=%p: activity on fd %d, invoking handler.", (void *)event_loop, handle_data->owner->data.fd); + + // Reset last I/O operation result, so if a channel forgets to update its value, we can catch it. + handle_data->last_io_operation_is_updated = 0; + handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); + + AWS_ASSERT(handle_data->last_io_operation_is_updated == 1); AWS_LOGF_INFO( AWS_LS_IO_EVENT_LOOP, "id=%p: on_event completion status is %d (%s); read %lu bytes", diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index 220f83cdf..01fb4c2e0 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -182,10 +182,11 @@ static void s_do_read(struct socket_handler *socket_handler) { if (total_read < max_to_read) { AWS_ASSERT(last_error != 0); + struct aws_event_loop_io_op_result io_op_result = {total_read, last_error}; + aws_event_loop_feedback_io_op_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); + if (last_error != AWS_IO_READ_WOULD_BLOCK) { - struct aws_event_loop_io_op_result io_op_result = {total_read, last_error}; - aws_event_loop_feedback_io_op_result( - socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); aws_channel_shutdown(socket_handler->slot->channel, last_error); } else { AWS_LOGF_TRACE( @@ -193,9 +194,6 @@ static void s_do_read(struct socket_handler *socket_handler) { "id=%p: out of data to read on socket. " "Waiting on event-loop notification.", (void *)socket_handler->slot->handler); - struct aws_event_loop_io_op_result io_op_result = {total_read, AWS_IO_READ_WOULD_BLOCK}; - aws_event_loop_feedback_io_op_result( - socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); } return; } From 8f7cbffb27b15424d6c137457709852eba203834 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 13:49:16 -0700 Subject: [PATCH 03/39] Add callback to io handle --- CMakeLists.txt | 7 +- include/aws/io/event_loop.h | 19 - include/aws/io/io.h | 21 + source/event_loop.c | 9 - source/qnx/host_resolver.c | 121 ++ source/qnx/kqueue_event_loop.c | 1035 ++++++++++++++++ source/qnx/pipe.c | 596 +++++++++ source/qnx/shared_library.c | 66 + source/qnx/socket.c | 2058 +++++++++++++++++++++++++++++++ source/socket_channel_handler.c | 37 +- tests/socket_test.c | 2 + 11 files changed, 3933 insertions(+), 38 deletions(-) create mode 100644 source/qnx/host_resolver.c create mode 100644 source/qnx/kqueue_event_loop.c create mode 100644 source/qnx/pipe.c create mode 100644 source/qnx/shared_library.c create mode 100644 source/qnx/socket.c diff --git a/CMakeLists.txt b/CMakeLists.txt index dc3395853..286fb6e68 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,8 +115,7 @@ elseif (APPLE) ) file(GLOB AWS_IO_OS_SRC - "source/bsd/*.c" - "source/posix/*.c" + "source/qnx/*.c" "source/darwin/*.c" ) @@ -127,7 +126,7 @@ elseif (APPLE) #No choice on TLS for apple, darwinssl will always be used. list(APPEND PLATFORM_LIBS "-framework Security") - set(EVENT_LOOP_DEFINE "KQUEUE") + set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetBSD" OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") file(GLOB AWS_IO_OS_HEADERS @@ -141,6 +140,8 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetB set(EVENT_LOOP_DEFINE "KQUEUE") set(USE_S2N ON) +elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") + set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") endif() if (BYO_CRYPTO) diff --git a/include/aws/io/event_loop.h b/include/aws/io/event_loop.h index 9279c7841..58041a4c7 100644 --- a/include/aws/io/event_loop.h +++ b/include/aws/io/event_loop.h @@ -25,7 +25,6 @@ enum aws_io_event_type { struct aws_event_loop; struct aws_task; struct aws_thread_options; -struct aws_event_loop_io_op_result; #if AWS_USE_IO_COMPLETION_PORTS @@ -100,10 +99,6 @@ struct aws_event_loop_vtable { void *user_data); #endif int (*unsubscribe_from_io_events)(struct aws_event_loop *event_loop, struct aws_io_handle *handle); - void (*feedback_io_result)( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_event_loop_io_op_result *io_op_result); void (*free_io_event_resources)(void *user_data); bool (*is_on_callers_thread)(struct aws_event_loop *event_loop); }; @@ -145,11 +140,6 @@ struct aws_event_loop_group { struct aws_shutdown_callback_options shutdown_options; }; -struct aws_event_loop_io_op_result { - size_t read_bytes; - int error_code; -}; - AWS_EXTERN_C_BEGIN #ifdef AWS_USE_IO_COMPLETION_PORTS @@ -376,15 +366,6 @@ int aws_event_loop_subscribe_to_io_events( AWS_IO_API int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); -/** - * Update the I/O operation completion status on the given I/O handle. - */ -AWS_IO_API -void aws_event_loop_feedback_io_op_result( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_event_loop_io_op_result *io_op_result); - /** * Cleans up resources (user_data) associated with the I/O eventing subsystem for a given handle. This should only * ever be necessary in the case where you are cleaning up an event loop during shutdown and its thread has already diff --git a/include/aws/io/io.h b/include/aws/io/io.h index 011e1a779..27f6f0c9d 100644 --- a/include/aws/io/io.h +++ b/include/aws/io/io.h @@ -14,12 +14,33 @@ AWS_PUSH_SANE_WARNING_LEVEL #define AWS_C_IO_PACKAGE_ID 1 +struct aws_io_handle; + +#if AWS_USE_ON_EVENT_WITH_RESULT +struct aws_event_loop; + +struct aws_io_handle_io_op_result { + size_t read_bytes; + size_t written_bytes; + int read_error_code; + int write_error_code; +}; + +typedef void(aws_io_handle_update_io_results_fn)( + struct aws_event_loop *, + struct aws_io_handle *, + const struct aws_io_handle_io_op_result *); +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ + struct aws_io_handle { union { int fd; void *handle; } data; void *additional_data; +#if AWS_USE_ON_EVENT_WITH_RESULT + aws_io_handle_update_io_results_fn *update_io_result; +#endif }; enum aws_io_message_type { diff --git a/source/event_loop.c b/source/event_loop.c index 1c598d240..1e7aef676 100644 --- a/source/event_loop.c +++ b/source/event_loop.c @@ -514,15 +514,6 @@ int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, return event_loop->vtable->unsubscribe_from_io_events(event_loop, handle); } -void aws_event_loop_feedback_io_op_result( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_event_loop_io_op_result *io_op_result) { - if (event_loop->vtable->feedback_io_result) { - event_loop->vtable->feedback_io_result(event_loop, handle, io_op_result); - } -} - void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_ASSERT(event_loop && event_loop->vtable->free_io_event_resources); event_loop->vtable->free_io_event_resources(handle->additional_data); diff --git a/source/qnx/host_resolver.c b/source/qnx/host_resolver.c new file mode 100644 index 000000000..e4aafb838 --- /dev/null +++ b/source/qnx/host_resolver.c @@ -0,0 +1,121 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#include + +#include +#include +#include +#include + +int aws_default_dns_resolve( + struct aws_allocator *allocator, + const struct aws_string *host_name, + struct aws_array_list *output_addresses, + void *user_data) { + + (void)user_data; + struct addrinfo *result = NULL; + struct addrinfo *iter = NULL; + /* max string length for ipv6. */ + socklen_t max_len = INET6_ADDRSTRLEN; + char address_buffer[max_len]; + + const char *hostname_cstr = aws_string_c_str(host_name); + AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); + + /* Android would prefer NO HINTS IF YOU DON'T MIND, SIR */ +#if defined(ANDROID) + int err_code = getaddrinfo(hostname_cstr, NULL, NULL, &result); +#else + struct addrinfo hints; + AWS_ZERO_STRUCT(hints); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; +# if !defined(__OpenBSD__) + hints.ai_flags = AI_ALL | AI_V4MAPPED; +# endif /* __OpenBSD__ */ + + int err_code = getaddrinfo(hostname_cstr, NULL, &hints, &result); +#endif + + if (err_code) { + AWS_LOGF_ERROR( + AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d: %s", err_code, gai_strerror(err_code)); + goto clean_up; + } + + for (iter = result; iter != NULL; iter = iter->ai_next) { + struct aws_host_address host_address; + + AWS_ZERO_ARRAY(address_buffer); + + if (iter->ai_family == AF_INET6) { + host_address.record_type = AWS_ADDRESS_RECORD_TYPE_AAAA; + inet_ntop(iter->ai_family, &((struct sockaddr_in6 *)iter->ai_addr)->sin6_addr, address_buffer, max_len); + } else { + host_address.record_type = AWS_ADDRESS_RECORD_TYPE_A; + inet_ntop(iter->ai_family, &((struct sockaddr_in *)iter->ai_addr)->sin_addr, address_buffer, max_len); + } + + size_t address_len = strlen(address_buffer); + const struct aws_string *address = + aws_string_new_from_array(allocator, (const uint8_t *)address_buffer, address_len); + + if (!address) { + goto clean_up; + } + + const struct aws_string *host_cpy = aws_string_new_from_string(allocator, host_name); + + if (!host_cpy) { + aws_string_destroy((void *)address); + goto clean_up; + } + + AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolved record: %s", address_buffer); + + host_address.address = address; + host_address.weight = 0; + host_address.allocator = allocator; + host_address.use_count = 0; + host_address.connection_failure_count = 0; + host_address.host = host_cpy; + + if (aws_array_list_push_back(output_addresses, &host_address)) { + aws_host_address_clean_up(&host_address); + goto clean_up; + } + } + + freeaddrinfo(result); + return AWS_OP_SUCCESS; + +clean_up: + if (result) { + freeaddrinfo(result); + } + + if (err_code) { + switch (err_code) { + case EAI_FAIL: + case EAI_AGAIN: + return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); + case EAI_MEMORY: + return aws_raise_error(AWS_ERROR_OOM); + case EAI_NONAME: + case EAI_SERVICE: + return aws_raise_error(AWS_IO_DNS_INVALID_NAME); + default: + return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + } + } + + return AWS_OP_ERR; +} diff --git a/source/qnx/kqueue_event_loop.c b/source/qnx/kqueue_event_loop.c new file mode 100644 index 000000000..6374eea6e --- /dev/null +++ b/source/qnx/kqueue_event_loop.c @@ -0,0 +1,1035 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#if defined(__FreeBSD__) || defined(__NetBSD__) +# define __BSD_VISIBLE 1 +# include +#endif + +#include + +#include +#include +#include + +static void s_destroy(struct aws_event_loop *event_loop); +static int s_run(struct aws_event_loop *event_loop); +static int s_stop(struct aws_event_loop *event_loop); +static int s_wait_for_stop_completion(struct aws_event_loop *event_loop); +static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); +static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); +static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); +static int s_subscribe_to_io_events( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data); +static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +static void s_free_io_event_resources(void *user_data); +static bool s_is_event_thread(struct aws_event_loop *event_loop); + +static void aws_event_loop_thread(void *user_data); + +int aws_open_nonblocking_posix_pipe(int pipe_fds[2]); + +enum event_thread_state { + EVENT_THREAD_STATE_READY_TO_RUN, + EVENT_THREAD_STATE_RUNNING, + EVENT_THREAD_STATE_STOPPING, +}; + +enum pipe_fd_index { + READ_FD, + WRITE_FD, +}; + +struct kqueue_loop { + /* thread_created_on is the handle to the event loop thread. */ + struct aws_thread thread_created_on; + /* thread_joined_to is used by the thread destroying the event loop. */ + aws_thread_id_t thread_joined_to; + /* running_thread_id is NULL if the event loop thread is stopped or points-to the thread_id of the thread running + * the event loop (either thread_created_on or thread_joined_to). Atomic because of concurrent writes (e.g., + * run/stop) and reads (e.g., is_event_loop_thread). + * An aws_thread_id_t variable itself cannot be atomic because it is an opaque type that is platform-dependent. */ + struct aws_atomic_var running_thread_id; + int kq_fd; /* kqueue file descriptor */ + + /* Pipe for signaling to event-thread that cross_thread_data has changed. */ + int cross_thread_signal_pipe[2]; + + /* cross_thread_data holds things that must be communicated across threads. + * When the event-thread is running, the mutex must be locked while anyone touches anything in cross_thread_data. + * If this data is modified outside the thread, the thread is signaled via activity on a pipe. */ + struct { + struct aws_mutex mutex; + bool thread_signaled; /* whether thread has been signaled about changes to cross_thread_data */ + struct aws_linked_list tasks_to_schedule; + enum event_thread_state state; + } cross_thread_data; + + /* thread_data holds things which, when the event-thread is running, may only be touched by the thread */ + struct { + struct aws_task_scheduler scheduler; + + int connected_handle_count; + + /* These variables duplicate ones in cross_thread_data. We move values out while holding the mutex and operate + * on them later */ + enum event_thread_state state; + } thread_data; + + struct aws_thread_options thread_options; +}; + +/* Data attached to aws_io_handle while the handle is subscribed to io events */ +struct handle_data { + struct aws_io_handle *owner; + struct aws_event_loop *event_loop; + aws_event_loop_on_event_fn *on_event; + void *on_event_user_data; + + int events_subscribed; /* aws_io_event_types this handle should be subscribed to */ + int events_this_loop; /* aws_io_event_types received during current loop of the event-thread */ + + enum { HANDLE_STATE_SUBSCRIBING, HANDLE_STATE_SUBSCRIBED, HANDLE_STATE_UNSUBSCRIBED } state; + + struct aws_task subscribe_task; + struct aws_task cleanup_task; +}; + +enum { + DEFAULT_TIMEOUT_SEC = 100, /* Max kevent() timeout per loop of the event-thread */ + MAX_EVENTS = 100, /* Max kevents to process per loop of the event-thread */ +}; + +struct aws_event_loop_vtable s_kqueue_vtable = { + .destroy = s_destroy, + .run = s_run, + .stop = s_stop, + .wait_for_stop_completion = s_wait_for_stop_completion, + .schedule_task_now = s_schedule_task_now, + .schedule_task_future = s_schedule_task_future, + .subscribe_to_io_events = s_subscribe_to_io_events, + .cancel_task = s_cancel_task, + .unsubscribe_from_io_events = s_unsubscribe_from_io_events, + .free_io_event_resources = s_free_io_event_resources, + .is_on_callers_thread = s_is_event_thread, +}; + +static void s_update_io_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_io_handle_io_op_result *io_op_result) { + AWS_ASSERT(handle->additional_data); + struct handle_data *handle_data = handle->additional_data; + AWS_ASSERT(event_loop == handle_data->event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: got feedback on I/O operation for fd %d: read: status %d (%s), %lu bytes; write: status %d (%s), %lu " + "bytes", + (void *)event_loop, + handle->data.fd, + io_op_result->read_error_code, + aws_error_str(io_op_result->read_error_code), + io_op_result->read_bytes, + io_op_result->write_error_code, + aws_error_str(io_op_result->write_error_code), + io_op_result->written_bytes); +} + +struct aws_event_loop *aws_event_loop_new_default_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options) { + AWS_ASSERT(alloc); + // FIXME Remove this assert. + AWS_ASSERT(clock); + AWS_ASSERT(options); + AWS_ASSERT(options->clock); + + bool clean_up_event_loop_mem = false; + bool clean_up_event_loop_base = false; + bool clean_up_impl_mem = false; + bool clean_up_thread = false; + bool clean_up_kqueue = false; + bool clean_up_signal_pipe = false; + bool clean_up_signal_kevent = false; + bool clean_up_mutex = false; + + struct aws_event_loop *event_loop = aws_mem_acquire(alloc, sizeof(struct aws_event_loop)); + if (!event_loop) { + return NULL; + } + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered kqueue", (void *)event_loop); + clean_up_event_loop_mem = true; + + int err = aws_event_loop_init_base(event_loop, alloc, options->clock); + if (err) { + goto clean_up; + } + clean_up_event_loop_base = true; + + struct kqueue_loop *impl = aws_mem_calloc(alloc, 1, sizeof(struct kqueue_loop)); + if (!impl) { + goto clean_up; + } + + if (options->thread_options) { + impl->thread_options = *options->thread_options; + } else { + impl->thread_options = *aws_default_thread_options(); + } + + /* intialize thread id to NULL. It will be set when the event loop thread starts. */ + aws_atomic_init_ptr(&impl->running_thread_id, NULL); + clean_up_impl_mem = true; + + err = aws_thread_init(&impl->thread_created_on, alloc); + if (err) { + goto clean_up; + } + clean_up_thread = true; + + impl->kq_fd = kqueue(); + if (impl->kq_fd == -1) { + AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open kqueue handle.", (void *)event_loop); + aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + goto clean_up; + } + clean_up_kqueue = true; + + err = aws_open_nonblocking_posix_pipe(impl->cross_thread_signal_pipe); + if (err) { + AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to open pipe handle.", (void *)event_loop); + goto clean_up; + } + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: pipe descriptors read %d, write %d.", + (void *)event_loop, + impl->cross_thread_signal_pipe[READ_FD], + impl->cross_thread_signal_pipe[WRITE_FD]); + clean_up_signal_pipe = true; + + /* Set up kevent to handle activity on the cross_thread_signal_pipe */ + struct kevent thread_signal_kevent; + EV_SET( + &thread_signal_kevent, + impl->cross_thread_signal_pipe[READ_FD], + EVFILT_READ /*filter*/, + EV_ADD | EV_CLEAR /*flags*/, + 0 /*fflags*/, + 0 /*data*/, + NULL /*udata*/); + + int res = kevent( + impl->kq_fd, + &thread_signal_kevent /*changelist*/, + 1 /*nchanges*/, + NULL /*eventlist*/, + 0 /*nevents*/, + NULL /*timeout*/); + + if (res == -1) { + AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to create cross-thread signal kevent.", (void *)event_loop); + aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + goto clean_up; + } + clean_up_signal_kevent = true; + + err = aws_mutex_init(&impl->cross_thread_data.mutex); + if (err) { + goto clean_up; + } + clean_up_mutex = true; + + impl->cross_thread_data.thread_signaled = false; + + aws_linked_list_init(&impl->cross_thread_data.tasks_to_schedule); + + impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; + + err = aws_task_scheduler_init(&impl->thread_data.scheduler, alloc); + if (err) { + goto clean_up; + } + + impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; + + event_loop->impl_data = impl; + + event_loop->vtable = &s_kqueue_vtable; + + /* success */ + return event_loop; + +clean_up: + if (clean_up_mutex) { + aws_mutex_clean_up(&impl->cross_thread_data.mutex); + } + if (clean_up_signal_kevent) { + thread_signal_kevent.flags = EV_DELETE; + kevent( + impl->kq_fd, + &thread_signal_kevent /*changelist*/, + 1 /*nchanges*/, + NULL /*eventlist*/, + 0 /*nevents*/, + NULL /*timeout*/); + } + if (clean_up_signal_pipe) { + close(impl->cross_thread_signal_pipe[READ_FD]); + close(impl->cross_thread_signal_pipe[WRITE_FD]); + } + if (clean_up_kqueue) { + close(impl->kq_fd); + } + if (clean_up_thread) { + aws_thread_clean_up(&impl->thread_created_on); + } + if (clean_up_impl_mem) { + aws_mem_release(alloc, impl); + } + if (clean_up_event_loop_base) { + aws_event_loop_clean_up_base(event_loop); + } + if (clean_up_event_loop_mem) { + aws_mem_release(alloc, event_loop); + } + return NULL; +} + +static void s_destroy(struct aws_event_loop *event_loop) { + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: destroying event_loop", (void *)event_loop); + struct kqueue_loop *impl = event_loop->impl_data; + + /* Stop the event-thread. This might have already happened. It's safe to call multiple times. */ + s_stop(event_loop); + int err = s_wait_for_stop_completion(event_loop); + if (err) { + AWS_LOGF_WARN( + AWS_LS_IO_EVENT_LOOP, + "id=%p: failed to destroy event-thread, resources have been leaked", + (void *)event_loop); + AWS_ASSERT("Failed to destroy event-thread, resources have been leaked." == NULL); + return; + } + /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ + impl->thread_joined_to = aws_thread_current_thread_id(); + aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_joined_to); + + /* Clean up task-related stuff first. It's possible the a cancelled task adds further tasks to this event_loop. + * Tasks added in this way will be in cross_thread_data.tasks_to_schedule, so we clean that up last */ + + aws_task_scheduler_clean_up(&impl->thread_data.scheduler); /* Tasks in scheduler get cancelled*/ + + while (!aws_linked_list_empty(&impl->cross_thread_data.tasks_to_schedule)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&impl->cross_thread_data.tasks_to_schedule); + struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); + task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); + } + + /* Warn user if aws_io_handle was subscribed, but never unsubscribed. This would cause memory leaks. */ + AWS_ASSERT(impl->thread_data.connected_handle_count == 0); + + /* Clean up everything else */ + aws_mutex_clean_up(&impl->cross_thread_data.mutex); + + struct kevent thread_signal_kevent; + EV_SET( + &thread_signal_kevent, + impl->cross_thread_signal_pipe[READ_FD], + EVFILT_READ /*filter*/, + EV_DELETE /*flags*/, + 0 /*fflags*/, + 0 /*data*/, + NULL /*udata*/); + + kevent( + impl->kq_fd, + &thread_signal_kevent /*changelist*/, + 1 /*nchanges*/, + NULL /*eventlist*/, + 0 /*nevents*/, + NULL /*timeout*/); + + close(impl->cross_thread_signal_pipe[READ_FD]); + close(impl->cross_thread_signal_pipe[WRITE_FD]); + close(impl->kq_fd); + aws_thread_clean_up(&impl->thread_created_on); + aws_mem_release(event_loop->alloc, impl); + aws_event_loop_clean_up_base(event_loop); + aws_mem_release(event_loop->alloc, event_loop); +} + +static int s_run(struct aws_event_loop *event_loop) { + struct kqueue_loop *impl = event_loop->impl_data; + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: starting event-loop thread.", (void *)event_loop); + /* to re-run, call stop() and wait_for_stop_completion() */ + AWS_ASSERT(impl->cross_thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); + AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); + + /* Since thread isn't running it's ok to touch thread_data, + * and it's ok to touch cross_thread_data without locking the mutex */ + impl->cross_thread_data.state = EVENT_THREAD_STATE_RUNNING; + + aws_thread_increment_unjoined_count(); + int err = + aws_thread_launch(&impl->thread_created_on, aws_event_loop_thread, (void *)event_loop, &impl->thread_options); + + if (err) { + aws_thread_decrement_unjoined_count(); + AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop); + goto clean_up; + } + + return AWS_OP_SUCCESS; + +clean_up: + impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; + return AWS_OP_ERR; +} + +/* This function can't fail, we're relying on the thread responding to critical messages (ex: stop thread) */ +void signal_cross_thread_data_changed(struct aws_event_loop *event_loop) { + struct kqueue_loop *impl = event_loop->impl_data; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: signaling event-loop that cross-thread tasks need to be scheduled.", + (void *)event_loop); + /* Doesn't actually matter what we write, any activity on pipe signals that cross_thread_data has changed, + * If the pipe is full and the write fails, that's fine, the event-thread will get the signal from some previous + * write */ + uint32_t write_whatever = 0xC0FFEE; + write(impl->cross_thread_signal_pipe[WRITE_FD], &write_whatever, sizeof(write_whatever)); +} + +static int s_stop(struct aws_event_loop *event_loop) { + struct kqueue_loop *impl = event_loop->impl_data; + + bool signal_thread = false; + + { /* Begin critical section */ + aws_mutex_lock(&impl->cross_thread_data.mutex); + if (impl->cross_thread_data.state == EVENT_THREAD_STATE_RUNNING) { + impl->cross_thread_data.state = EVENT_THREAD_STATE_STOPPING; + signal_thread = !impl->cross_thread_data.thread_signaled; + impl->cross_thread_data.thread_signaled = true; + } + aws_mutex_unlock(&impl->cross_thread_data.mutex); + } /* End critical section */ + + if (signal_thread) { + signal_cross_thread_data_changed(event_loop); + } + + return AWS_OP_SUCCESS; +} + +static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { + struct kqueue_loop *impl = event_loop->impl_data; + +#ifdef DEBUG_BUILD + aws_mutex_lock(&impl->cross_thread_data.mutex); + /* call stop() before wait_for_stop_completion() or you'll wait forever */ + AWS_ASSERT(impl->cross_thread_data.state != EVENT_THREAD_STATE_RUNNING); + aws_mutex_unlock(&impl->cross_thread_data.mutex); +#endif + + int err = aws_thread_join(&impl->thread_created_on); + aws_thread_decrement_unjoined_count(); + if (err) { + return AWS_OP_ERR; + } + + /* Since thread is no longer running it's ok to touch thread_data, + * and it's ok to touch cross_thread_data without locking the mutex */ + impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; + impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; + + return AWS_OP_SUCCESS; +} + +/* Common functionality for "now" and "future" task scheduling. + * If `run_at_nanos` is zero then the task is scheduled as a "now" task. */ +static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { + AWS_ASSERT(task); + struct kqueue_loop *impl = event_loop->impl_data; + + /* If we're on the event-thread, just schedule it directly */ + if (s_is_event_thread(event_loop)) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: scheduling task %p in-thread for timestamp %llu", + (void *)event_loop, + (void *)task, + (unsigned long long)run_at_nanos); + if (run_at_nanos == 0) { + aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); + } else { + aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, run_at_nanos); + } + return; + } + + /* Otherwise, add it to cross_thread_data.tasks_to_schedule and signal the event-thread to process it */ + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: scheduling task %p cross-thread for timestamp %llu", + (void *)event_loop, + (void *)task, + (unsigned long long)run_at_nanos); + task->timestamp = run_at_nanos; + bool should_signal_thread = false; + + /* Begin critical section */ + aws_mutex_lock(&impl->cross_thread_data.mutex); + aws_linked_list_push_back(&impl->cross_thread_data.tasks_to_schedule, &task->node); + + /* Signal thread that cross_thread_data has changed (unless it's been signaled already) */ + if (!impl->cross_thread_data.thread_signaled) { + should_signal_thread = true; + impl->cross_thread_data.thread_signaled = true; + } + + aws_mutex_unlock(&impl->cross_thread_data.mutex); + /* End critical section */ + + if (should_signal_thread) { + signal_cross_thread_data_changed(event_loop); + } +} + +static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { + s_schedule_task_common(event_loop, task, 0); /* Zero is used to denote "now" tasks */ +} + +static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { + s_schedule_task_common(event_loop, task, run_at_nanos); +} + +static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { + struct kqueue_loop *kqueue_loop = event_loop->impl_data; + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: cancelling task %p", (void *)event_loop, (void *)task); + aws_task_scheduler_cancel_task(&kqueue_loop->thread_data.scheduler, task); +} + +/* Scheduled task that connects aws_io_handle with the kqueue */ +static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { + (void)task; + struct handle_data *handle_data = user_data; + struct aws_event_loop *event_loop = handle_data->event_loop; + struct kqueue_loop *impl = handle_data->event_loop->impl_data; + + impl->thread_data.connected_handle_count++; + + /* if task was cancelled, nothing to do */ + if (status == AWS_TASK_STATUS_CANCELED) { + return; + } + + /* If handle was unsubscribed before this task could execute, nothing to do */ + if (handle_data->state == HANDLE_STATE_UNSUBSCRIBED) { + return; + } + + AWS_ASSERT(handle_data->state == HANDLE_STATE_SUBSCRIBING); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: subscribing to events on fd %d", (void *)event_loop, handle_data->owner->data.fd); + + /* In order to monitor both reads and writes, kqueue requires you to add two separate kevents. + * If we're adding two separate kevents, but one of those fails, we need to remove the other kevent. + * Therefore we use the EV_RECEIPT flag. This causes kevent() to tell whether each EV_ADD succeeded, + * rather than the usual behavior of telling us about recent events. */ + struct kevent changelist[2]; + AWS_ZERO_ARRAY(changelist); + + int changelist_size = 0; + + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { + EV_SET( + &changelist[changelist_size++], + handle_data->owner->data.fd, + EVFILT_READ /*filter*/, + EV_ADD | EV_RECEIPT | EV_CLEAR /*flags*/, + 0 /*fflags*/, + 0 /*data*/, + handle_data /*udata*/); + } + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { + EV_SET( + &changelist[changelist_size++], + handle_data->owner->data.fd, + EVFILT_WRITE /*filter*/, + EV_ADD | EV_RECEIPT | EV_CLEAR /*flags*/, + 0 /*fflags*/, + 0 /*data*/, + handle_data /*udata*/); + } + + int num_events = kevent( + impl->kq_fd, + changelist /*changelist*/, + changelist_size /*nchanges*/, + changelist /*eventlist. It's OK to re-use the same memory for changelist input and eventlist output*/, + changelist_size /*nevents*/, + NULL /*timeout*/); + if (num_events == -1) { + goto subscribe_failed; + } + + /* Look through results to see if any failed */ + for (int i = 0; i < num_events; ++i) { + /* Every result should be flagged as error, that's just how EV_RECEIPT works */ + AWS_ASSERT(changelist[i].flags & EV_ERROR); + + /* If a real error occurred, .data contains the error code */ + if (changelist[i].data != 0) { + goto subscribe_failed; + } + } + + /* Success */ + handle_data->state = HANDLE_STATE_SUBSCRIBED; + handle_data->owner->update_io_result = s_update_io_result; + return; + +subscribe_failed: + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: failed to subscribe to events on fd %d", + (void *)event_loop, + handle_data->owner->data.fd); + /* Remove any related kevents that succeeded */ + for (int i = 0; i < num_events; ++i) { + if (changelist[i].data == 0) { + changelist[i].flags = EV_DELETE; + kevent( + impl->kq_fd, + &changelist[i] /*changelist*/, + 1 /*nchanges*/, + NULL /*eventlist*/, + 0 /*nevents*/, + NULL /*timeout*/); + } + } + + /* We can't return an error code because this was a scheduled task. + * Notify the user of the failed subscription by passing AWS_IO_EVENT_TYPE_ERROR to the callback. */ + handle_data->on_event(event_loop, handle_data->owner, AWS_IO_EVENT_TYPE_ERROR, handle_data->on_event_user_data); +} + +static int s_subscribe_to_io_events( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data) { + + AWS_ASSERT(event_loop); + AWS_ASSERT(handle->data.fd != -1); + AWS_ASSERT(handle->additional_data == NULL); + AWS_ASSERT(on_event); + /* Must subscribe for read, write, or both */ + AWS_ASSERT(events & (AWS_IO_EVENT_TYPE_READABLE | AWS_IO_EVENT_TYPE_WRITABLE)); + + struct handle_data *handle_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct handle_data)); + if (!handle_data) { + return AWS_OP_ERR; + } + + handle_data->owner = handle; + handle_data->event_loop = event_loop; + handle_data->on_event = on_event; + handle_data->on_event_user_data = user_data; + handle_data->events_subscribed = events; + handle_data->state = HANDLE_STATE_SUBSCRIBING; + + handle->additional_data = handle_data; + + /* We schedule a task to perform the actual changes to the kqueue, read on for an explanation why... + * + * kqueue requires separate registrations for read and write events. + * If the user wants to know about both read and write, we need register once for read and once for write. + * If the first registration succeeds, but the second registration fails, we need to delete the first registration. + * If this all happened outside the event-thread, the successful registration's events could begin processing + * in the brief window of time before the registration is deleted. */ + + aws_task_init(&handle_data->subscribe_task, s_subscribe_task, handle_data, "kqueue_event_loop_subscribe"); + s_schedule_task_now(event_loop, &handle_data->subscribe_task); + + return AWS_OP_SUCCESS; +} + +static void s_free_io_event_resources(void *user_data) { + struct handle_data *handle_data = user_data; + struct kqueue_loop *impl = handle_data->event_loop->impl_data; + + impl->thread_data.connected_handle_count--; + + aws_mem_release(handle_data->event_loop->alloc, handle_data); +} + +static void s_clean_up_handle_data_task(struct aws_task *task, void *user_data, enum aws_task_status status) { + (void)task; + (void)status; + + struct handle_data *handle_data = user_data; + s_free_io_event_resources(handle_data); +} + +static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); + AWS_ASSERT(handle->additional_data); + struct handle_data *handle_data = handle->additional_data; + struct kqueue_loop *impl = event_loop->impl_data; + + AWS_ASSERT(event_loop == handle_data->event_loop); + + /* If the handle was successfully subscribed to kqueue, then remove it. */ + if (handle_data->state == HANDLE_STATE_SUBSCRIBED) { + struct kevent changelist[2]; + int changelist_size = 0; + + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { + EV_SET( + &changelist[changelist_size++], + handle_data->owner->data.fd, + EVFILT_READ /*filter*/, + EV_DELETE /*flags*/, + 0 /*fflags*/, + 0 /*data*/, + handle_data /*udata*/); + } + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { + EV_SET( + &changelist[changelist_size++], + handle_data->owner->data.fd, + EVFILT_WRITE /*filter*/, + EV_DELETE /*flags*/, + 0 /*fflags*/, + 0 /*data*/, + handle_data /*udata*/); + } + + kevent(impl->kq_fd, changelist, changelist_size, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); + } + + /* Schedule a task to clean up the memory. This is done in a task to prevent the following scenario: + * - While processing a batch of events, some callback unsubscribes another aws_io_handle. + * - One of the other events in this batch belongs to that other aws_io_handle. + * - If the handle_data were already deleted, there would be an access invalid memory. */ + + aws_task_init( + &handle_data->cleanup_task, s_clean_up_handle_data_task, handle_data, "kqueue_event_loop_clean_up_handle_data"); + aws_event_loop_schedule_task_now(event_loop, &handle_data->cleanup_task); + + handle_data->state = HANDLE_STATE_UNSUBSCRIBED; + handle->additional_data = NULL; + + return AWS_OP_SUCCESS; +} + +static bool s_is_event_thread(struct aws_event_loop *event_loop) { + struct kqueue_loop *impl = event_loop->impl_data; + + aws_thread_id_t *thread_id = aws_atomic_load_ptr(&impl->running_thread_id); + return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); +} + +/* Called from thread. + * Takes tasks from tasks_to_schedule and adds them to the scheduler. */ +static void s_process_tasks_to_schedule(struct aws_event_loop *event_loop, struct aws_linked_list *tasks_to_schedule) { + struct kqueue_loop *impl = event_loop->impl_data; + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: processing cross-thread tasks", (void *)event_loop); + + while (!aws_linked_list_empty(tasks_to_schedule)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(tasks_to_schedule); + struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: task %p pulled to event-loop, scheduling now.", + (void *)event_loop, + (void *)task); + /* Timestamp 0 is used to denote "now" tasks */ + if (task->timestamp == 0) { + aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); + } else { + aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, task->timestamp); + } + } +} + +static void s_process_cross_thread_data(struct aws_event_loop *event_loop) { + struct kqueue_loop *impl = event_loop->impl_data; + + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread data to process", (void *)event_loop); + /* If there are tasks to schedule, grab them all out of synced_data.tasks_to_schedule. + * We'll process them later, so that we minimize time spent holding the mutex. */ + struct aws_linked_list tasks_to_schedule; + aws_linked_list_init(&tasks_to_schedule); + + { /* Begin critical section */ + aws_mutex_lock(&impl->cross_thread_data.mutex); + impl->cross_thread_data.thread_signaled = false; + + bool initiate_stop = (impl->cross_thread_data.state == EVENT_THREAD_STATE_STOPPING) && + (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING); + if (AWS_UNLIKELY(initiate_stop)) { + impl->thread_data.state = EVENT_THREAD_STATE_STOPPING; + } + + aws_linked_list_swap_contents(&impl->cross_thread_data.tasks_to_schedule, &tasks_to_schedule); + + aws_mutex_unlock(&impl->cross_thread_data.mutex); + } /* End critical section */ + + s_process_tasks_to_schedule(event_loop, &tasks_to_schedule); +} + +static int s_aws_event_flags_from_kevent(struct kevent *kevent) { + int event_flags = 0; + + if (kevent->flags & EV_ERROR) { + event_flags |= AWS_IO_EVENT_TYPE_ERROR; + } else if (kevent->filter == EVFILT_READ) { + if (kevent->data != 0) { + event_flags |= AWS_IO_EVENT_TYPE_READABLE; + } + + if (kevent->flags & EV_EOF) { + event_flags |= AWS_IO_EVENT_TYPE_CLOSED; + } + } else if (kevent->filter == EVFILT_WRITE) { + if (kevent->data != 0) { + event_flags |= AWS_IO_EVENT_TYPE_WRITABLE; + } + + if (kevent->flags & EV_EOF) { + event_flags |= AWS_IO_EVENT_TYPE_CLOSED; + } + } + + return event_flags; +} + +/** + * This just calls kevent() + * + * We broke this out into its own function so that the stacktrace clearly shows + * what this thread is doing. We've had a lot of cases where users think this + * thread is deadlocked because it's stuck here. We want it to be clear + * that it's doing nothing on purpose. It's waiting for events to happen... + */ +AWS_NO_INLINE +static int aws_event_loop_listen_for_io_events(int kq_fd, struct kevent kevents[MAX_EVENTS], struct timespec *timeout) { + return kevent(kq_fd, NULL /*changelist*/, 0 /*nchanges*/, kevents /*eventlist*/, MAX_EVENTS /*nevents*/, timeout); +} + +static void s_aws_kqueue_cleanup_aws_lc_thread_local_state(void *user_data) { + (void)user_data; + + aws_cal_thread_clean_up(); +} + +static void aws_event_loop_thread(void *user_data) { + struct aws_event_loop *event_loop = user_data; + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); + struct kqueue_loop *impl = event_loop->impl_data; + + /* set thread id to the event-loop's thread. */ + aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_created_on.thread_id); + + AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); + impl->thread_data.state = EVENT_THREAD_STATE_RUNNING; + + struct kevent kevents[MAX_EVENTS]; + + /* A single aws_io_handle could have two separate kevents if subscribed for both read and write. + * If both the read and write kevents fire in the same loop of the event-thread, + * combine the event-flags and deliver them in a single callback. + * This makes the kqueue_event_loop behave more like the other platform implementations. */ + struct handle_data *io_handle_events[MAX_EVENTS]; + + struct timespec timeout = { + .tv_sec = DEFAULT_TIMEOUT_SEC, + .tv_nsec = 0, + }; + + AWS_LOGF_INFO( + AWS_LS_IO_EVENT_LOOP, + "id=%p: default timeout %ds, and max events to process per tick %d", + (void *)event_loop, + DEFAULT_TIMEOUT_SEC, + MAX_EVENTS); + + aws_thread_current_at_exit(s_aws_kqueue_cleanup_aws_lc_thread_local_state, NULL); + + while (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING) { + int num_io_handle_events = 0; + bool should_process_cross_thread_data = false; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: waiting for a maximum of %ds %lluns", + (void *)event_loop, + (int)timeout.tv_sec, + (unsigned long long)timeout.tv_nsec); + + /* Process kqueue events */ + int num_kevents = aws_event_loop_listen_for_io_events(impl->kq_fd, kevents, &timeout); + + aws_event_loop_register_tick_start(event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, num_kevents); + if (num_kevents == -1) { + /* Raise an error, in case this is interesting to anyone monitoring, + * and continue on with this loop. We can't process events, + * but we can still process scheduled tasks */ + aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + + /* Force the cross_thread_data to be processed. + * There might be valuable info in there, like the message to stop the thread. + * It's fine to do this even if nothing has changed, it just costs a mutex lock/unlock. */ + should_process_cross_thread_data = true; + } + + for (int i = 0; i < num_kevents; ++i) { + struct kevent *kevent = &kevents[i]; + + /* Was this event to signal that cross_thread_data has changed? */ + if ((int)kevent->ident == impl->cross_thread_signal_pipe[READ_FD]) { + should_process_cross_thread_data = true; + + /* Drain whatever data was written to the signaling pipe */ + uint32_t read_whatever; + while (read((int)kevent->ident, &read_whatever, sizeof(read_whatever)) > 0) { + } + + continue; + } + + /* Otherwise this was a normal event on a subscribed handle. Figure out which flags to report. */ + int event_flags = s_aws_event_flags_from_kevent(kevent); + if (event_flags == 0) { + continue; + } + + /* Combine flags, in case multiple kevents correspond to one handle. (see notes at top of function) */ + struct handle_data *handle_data = kevent->udata; + if (handle_data->events_this_loop == 0) { + io_handle_events[num_io_handle_events++] = handle_data; + } + handle_data->events_this_loop |= event_flags; + } + + /* Invoke each handle's event callback (unless the handle has been unsubscribed) */ + for (int i = 0; i < num_io_handle_events; ++i) { + struct handle_data *handle_data = io_handle_events[i]; + + if (handle_data->state == HANDLE_STATE_SUBSCRIBED) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: activity on fd %d, invoking handler.", + (void *)event_loop, + handle_data->owner->data.fd); + handle_data->on_event( + event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); + + // AWS_LOGF_INFO( + // AWS_LS_IO_EVENT_LOOP, + // "id=%p: on_event completion status: read: status %d (%s), %lu bytes; write: status + // %d (%s), %lu " "bytes", (void *)event_loop, io_op_result.read_error_code, + // aws_error_str(io_op_result.read_error_code), + // io_op_result.read_bytes, + // io_op_result.write_error_code, + // aws_error_str(io_op_result.write_error_code), + // io_op_result.written_bytes); + } + + handle_data->events_this_loop = 0; + } + + /* Process cross_thread_data */ + if (should_process_cross_thread_data) { + s_process_cross_thread_data(event_loop); + } + + /* Run scheduled tasks */ + uint64_t now_ns = 0; + event_loop->clock(&now_ns); /* If clock fails, now_ns will be 0 and tasks scheduled for a specific time + will not be run. That's ok, we'll handle them next time around. */ + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); + aws_task_scheduler_run_all(&impl->thread_data.scheduler, now_ns); + + /* Set timeout for next kevent() call. + * If clock fails, or scheduler has no tasks, use default timeout */ + bool use_default_timeout = false; + + int err = event_loop->clock(&now_ns); + if (err) { + use_default_timeout = true; + } + + uint64_t next_run_time_ns; + if (!aws_task_scheduler_has_tasks(&impl->thread_data.scheduler, &next_run_time_ns)) { + + use_default_timeout = true; + } + + if (use_default_timeout) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: no more scheduled tasks using default timeout.", (void *)event_loop); + timeout.tv_sec = DEFAULT_TIMEOUT_SEC; + timeout.tv_nsec = 0; + } else { + /* Convert from timestamp in nanoseconds, to timeout in seconds with nanosecond remainder */ + uint64_t timeout_ns = next_run_time_ns > now_ns ? next_run_time_ns - now_ns : 0; + + uint64_t timeout_remainder_ns = 0; + uint64_t timeout_sec = + aws_timestamp_convert(timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &timeout_remainder_ns); + + if (timeout_sec > LONG_MAX) { /* Check for overflow. On Darwin, these values are stored as longs */ + timeout_sec = LONG_MAX; + timeout_remainder_ns = 0; + } + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: detected more scheduled tasks with the next occurring at " + "%llu using timeout of %ds %lluns.", + (void *)event_loop, + (unsigned long long)timeout_ns, + (int)timeout_sec, + (unsigned long long)timeout_remainder_ns); + timeout.tv_sec = (time_t)(timeout_sec); + timeout.tv_nsec = (long)(timeout_remainder_ns); + } + + aws_event_loop_register_tick_end(event_loop); + } + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop); + /* reset to NULL. This should be updated again during destroy before tasks are canceled. */ + aws_atomic_store_ptr(&impl->running_thread_id, NULL); +} diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c new file mode 100644 index 000000000..6beca8355 --- /dev/null +++ b/source/qnx/pipe.c @@ -0,0 +1,596 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#ifdef __GLIBC__ +# define __USE_GNU +#endif + +/* TODO: move this detection to CMAKE and a config header */ +#if !defined(COMPAT_MODE) && defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || __GLIBC__ > 2) +# define HAVE_PIPE2 1 +#else +# define HAVE_PIPE2 0 +#endif + +#include +#include +#include + +/* This isn't defined on ancient linux distros (breaking the builds). + * However, if this is a prebuild, we purposely build on an ancient system, but + * we want the kernel calls to still be the same as a modern build since that's likely the target of the application + * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag + * gets passed as long as it does. + */ +#ifndef O_CLOEXEC +# define O_CLOEXEC 02000000 +#endif + +struct read_end_impl { + struct aws_allocator *alloc; + struct aws_io_handle handle; + struct aws_event_loop *event_loop; + aws_pipe_on_readable_fn *on_readable_user_callback; + void *on_readable_user_data; + + /* Used in handshake for detecting whether user callback resulted in read-end being cleaned up. + * If clean_up() sees that the pointer is set, the bool it points to will get set true. */ + bool *did_user_callback_clean_up_read_end; + + bool is_subscribed; +}; + +struct pipe_write_request { + struct aws_byte_cursor original_cursor; + struct aws_byte_cursor cursor; /* tracks progress of write */ + size_t num_bytes_written; + aws_pipe_on_write_completed_fn *user_callback; + void *user_data; + struct aws_linked_list_node list_node; + + /* True if the write-end is cleaned up while the user callback is being invoked */ + bool did_user_callback_clean_up_write_end; +}; + +struct write_end_impl { + struct aws_allocator *alloc; + struct aws_io_handle handle; + struct aws_event_loop *event_loop; + struct aws_linked_list write_list; + + /* Valid while invoking user callback on a completed write request. */ + struct pipe_write_request *currently_invoking_write_callback; + + bool is_writable; + + /* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around + * and re-using it whenever possible */ +}; + +static void s_write_end_on_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data); + +static int s_translate_posix_error(int err) { + AWS_ASSERT(err); + + switch (err) { + case EPIPE: + return AWS_IO_BROKEN_PIPE; + default: + return AWS_ERROR_SYS_CALL_FAILURE; + } +} + +static int s_raise_posix_error(int err) { + return aws_raise_error(s_translate_posix_error(err)); +} + +AWS_IO_API int aws_open_nonblocking_posix_pipe(int pipe_fds[2]) { + int err; + +#if HAVE_PIPE2 + err = pipe2(pipe_fds, O_NONBLOCK | O_CLOEXEC); + if (err) { + return s_raise_posix_error(err); + } + + return AWS_OP_SUCCESS; +#else + err = pipe(pipe_fds); + if (err) { + return s_raise_posix_error(err); + } + + for (int i = 0; i < 2; ++i) { + int flags = fcntl(pipe_fds[i], F_GETFL); + if (flags == -1) { + s_raise_posix_error(err); + goto error; + } + + flags |= O_NONBLOCK | O_CLOEXEC; + if (fcntl(pipe_fds[i], F_SETFL, flags) == -1) { + s_raise_posix_error(err); + goto error; + } + } + + return AWS_OP_SUCCESS; +error: + close(pipe_fds[0]); + close(pipe_fds[1]); + return AWS_OP_ERR; +#endif +} + +int aws_pipe_init( + struct aws_pipe_read_end *read_end, + struct aws_event_loop *read_end_event_loop, + struct aws_pipe_write_end *write_end, + struct aws_event_loop *write_end_event_loop, + struct aws_allocator *allocator) { + + AWS_ASSERT(read_end); + AWS_ASSERT(read_end_event_loop); + AWS_ASSERT(write_end); + AWS_ASSERT(write_end_event_loop); + AWS_ASSERT(allocator); + + AWS_ZERO_STRUCT(*read_end); + AWS_ZERO_STRUCT(*write_end); + + struct read_end_impl *read_impl = NULL; + struct write_end_impl *write_impl = NULL; + int err; + + /* Open pipe */ + int pipe_fds[2]; + err = aws_open_nonblocking_posix_pipe(pipe_fds); + if (err) { + return AWS_OP_ERR; + } + + /* Init read-end */ + read_impl = aws_mem_calloc(allocator, 1, sizeof(struct read_end_impl)); + if (!read_impl) { + goto error; + } + + read_impl->alloc = allocator; + read_impl->handle.data.fd = pipe_fds[0]; + read_impl->event_loop = read_end_event_loop; + + /* Init write-end */ + write_impl = aws_mem_calloc(allocator, 1, sizeof(struct write_end_impl)); + if (!write_impl) { + goto error; + } + + write_impl->alloc = allocator; + write_impl->handle.data.fd = pipe_fds[1]; + write_impl->event_loop = write_end_event_loop; + write_impl->is_writable = true; /* Assume pipe is writable to start. Even if it's not, things shouldn't break */ + aws_linked_list_init(&write_impl->write_list); + + read_end->impl_data = read_impl; + write_end->impl_data = write_impl; + + err = aws_event_loop_subscribe_to_io_events( + write_end_event_loop, &write_impl->handle, AWS_IO_EVENT_TYPE_WRITABLE, s_write_end_on_event, write_end); + if (err) { + goto error; + } + + return AWS_OP_SUCCESS; + +error: + close(pipe_fds[0]); + close(pipe_fds[1]); + + if (read_impl) { + aws_mem_release(allocator, read_impl); + } + + if (write_impl) { + aws_mem_release(allocator, write_impl); + } + + read_end->impl_data = NULL; + write_end->impl_data = NULL; + + return AWS_OP_ERR; +} + +int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end) { + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (read_impl->is_subscribed) { + int err = aws_pipe_unsubscribe_from_readable_events(read_end); + if (err) { + return AWS_OP_ERR; + } + } + + /* If the event-handler is invoking a user callback, let it know that the read-end was cleaned up */ + if (read_impl->did_user_callback_clean_up_read_end) { + *read_impl->did_user_callback_clean_up_read_end = true; + } + + close(read_impl->handle.data.fd); + + aws_mem_release(read_impl->alloc, read_impl); + AWS_ZERO_STRUCT(*read_end); + return AWS_OP_SUCCESS; +} + +struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end) { + const struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + aws_raise_error(AWS_IO_BROKEN_PIPE); + return NULL; + } + + return read_impl->event_loop; +} + +struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end) { + const struct write_end_impl *write_impl = write_end->impl_data; + if (!write_impl) { + aws_raise_error(AWS_IO_BROKEN_PIPE); + return NULL; + } + + return write_impl->event_loop; +} + +int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *num_bytes_read) { + AWS_ASSERT(dst_buffer && dst_buffer->buffer); + + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (num_bytes_read) { + *num_bytes_read = 0; + } + + size_t num_bytes_to_read = dst_buffer->capacity - dst_buffer->len; + + ssize_t read_val = read(read_impl->handle.data.fd, dst_buffer->buffer + dst_buffer->len, num_bytes_to_read); + + if (read_val < 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); + } + return s_raise_posix_error(errno_value); + } + + /* Success */ + dst_buffer->len += read_val; + + if (num_bytes_read) { + *num_bytes_read = read_val; + } + + return AWS_OP_SUCCESS; +} + +static void s_read_end_on_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + (void)handle; + + /* Note that it should be impossible for this to run after read-end has been unsubscribed or cleaned up */ + struct aws_pipe_read_end *read_end = user_data; + struct read_end_impl *read_impl = read_end->impl_data; + AWS_ASSERT(read_impl); + AWS_ASSERT(read_impl->event_loop == event_loop); + AWS_ASSERT(&read_impl->handle == handle); + AWS_ASSERT(read_impl->is_subscribed); + AWS_ASSERT(events != 0); + AWS_ASSERT(read_impl->did_user_callback_clean_up_read_end == NULL); + + /* Set up handshake, so we can be informed if the read-end is cleaned up while invoking a user callback */ + bool did_user_callback_clean_up_read_end = false; + read_impl->did_user_callback_clean_up_read_end = &did_user_callback_clean_up_read_end; + + /* If readable event received, tell user to try and read, even if "error" events have also occurred. */ + if (events & AWS_IO_EVENT_TYPE_READABLE) { + read_impl->on_readable_user_callback(read_end, AWS_ERROR_SUCCESS, read_impl->on_readable_user_data); + + if (did_user_callback_clean_up_read_end) { + return; + } + + events &= ~AWS_IO_EVENT_TYPE_READABLE; + } + + if (events) { + /* Check that user didn't unsubscribe in the previous callback */ + if (read_impl->is_subscribed) { + read_impl->on_readable_user_callback(read_end, AWS_IO_BROKEN_PIPE, read_impl->on_readable_user_data); + + if (did_user_callback_clean_up_read_end) { + return; + } + } + } + + read_impl->did_user_callback_clean_up_read_end = NULL; +} + +int aws_pipe_subscribe_to_readable_events( + struct aws_pipe_read_end *read_end, + aws_pipe_on_readable_fn *on_readable, + void *user_data) { + + AWS_ASSERT(on_readable); + + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (read_impl->is_subscribed) { + return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); + } + + read_impl->is_subscribed = true; + read_impl->on_readable_user_callback = on_readable; + read_impl->on_readable_user_data = user_data; + + int err = aws_event_loop_subscribe_to_io_events( + read_impl->event_loop, &read_impl->handle, AWS_IO_EVENT_TYPE_READABLE, s_read_end_on_event, read_end); + if (err) { + read_impl->is_subscribed = false; + read_impl->on_readable_user_callback = NULL; + read_impl->on_readable_user_data = NULL; + + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end) { + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (!read_impl->is_subscribed) { + return aws_raise_error(AWS_ERROR_IO_NOT_SUBSCRIBED); + } + + int err = aws_event_loop_unsubscribe_from_io_events(read_impl->event_loop, &read_impl->handle); + if (err) { + return AWS_OP_ERR; + } + + read_impl->is_subscribed = false; + read_impl->on_readable_user_callback = NULL; + read_impl->on_readable_user_data = NULL; + + return AWS_OP_SUCCESS; +} + +/* Pop front write request, invoke its callback, and delete it. + * Returns whether the callback resulted in the write-end getting cleaned up */ +static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end *write_end, int error_code) { + struct write_end_impl *write_impl = write_end->impl_data; + + AWS_ASSERT(!aws_linked_list_empty(&write_impl->write_list)); + struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); + struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); + + struct aws_allocator *alloc = write_impl->alloc; + + /* Let the write-end know that a callback is in process, so the write-end can inform the callback + * whether it resulted in clean_up() being called. */ + bool write_end_cleaned_up_during_callback = false; + struct pipe_write_request *prev_invoking_request = write_impl->currently_invoking_write_callback; + write_impl->currently_invoking_write_callback = request; + + if (request->user_callback) { + request->user_callback(write_end, error_code, request->original_cursor, request->user_data); + write_end_cleaned_up_during_callback = request->did_user_callback_clean_up_write_end; + } + + if (!write_end_cleaned_up_during_callback) { + write_impl->currently_invoking_write_callback = prev_invoking_request; + } + + aws_mem_release(alloc, request); + + return write_end_cleaned_up_during_callback; +} + +/* Process write requests as long as the pipe remains writable */ +static void s_write_end_process_requests( + struct aws_pipe_write_end *write_end) { + struct write_end_impl *write_impl = write_end->impl_data; + AWS_ASSERT(write_impl); + AWS_ASSERT(write_impl->handle.update_io_result); + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + + while (!aws_linked_list_empty(&write_impl->write_list)) { + struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); + struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); + + int completed_error_code = AWS_ERROR_SUCCESS; + + if (request->cursor.len > 0) { + ssize_t write_val = write(write_impl->handle.data.fd, request->cursor.ptr, request->cursor.len); + + if (write_val < 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + /* The pipe is no longer writable. Bail out */ + write_impl->is_writable = false; + io_op_result.write_error_code = errno_value; + write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); + return; + } + + /* A non-recoverable error occurred during this write */ + completed_error_code = s_translate_posix_error(errno_value); + + } else { + aws_byte_cursor_advance(&request->cursor, write_val); + + io_op_result.written_bytes += (size_t)write_val; + + if (request->cursor.len > 0) { + /* There was a partial write, loop again to try and write the rest. */ + continue; + } + } + } + + /* If we got this far in the loop, then the write request is complete. + * Note that the callback may result in the pipe being cleaned up. */ + // TODO Call update. + bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); + if (write_end_cleaned_up) { + /* Bail out! Any remaining requests were canceled during clean_up() */ + return; + } + } +} + +/* Handle events on the write-end's file handle */ +static void s_write_end_on_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + (void)handle; + + /* Note that it should be impossible for this to run after write-end has been unsubscribed or cleaned up */ + struct aws_pipe_write_end *write_end = user_data; + struct write_end_impl *write_impl = write_end->impl_data; + AWS_ASSERT(write_impl); + AWS_ASSERT(write_impl->event_loop == event_loop); + AWS_ASSERT(&write_impl->handle == handle); + + /* Only care about the writable event. */ + if ((events & AWS_IO_EVENT_TYPE_WRITABLE) == 0) { + return; + } + + write_impl->is_writable = true; + + s_write_end_process_requests(write_end); +} + +int aws_pipe_write( + struct aws_pipe_write_end *write_end, + struct aws_byte_cursor src_buffer, + aws_pipe_on_write_completed_fn *on_completed, + void *user_data) { + + AWS_ASSERT(src_buffer.ptr); + + struct write_end_impl *write_impl = write_end->impl_data; + if (!write_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + struct pipe_write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct pipe_write_request)); + if (!request) { + return AWS_OP_ERR; + } + + request->original_cursor = src_buffer; + request->cursor = src_buffer; + request->user_callback = on_completed; + request->user_data = user_data; + + aws_linked_list_push_back(&write_impl->write_list, &request->list_node); + + /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could + * happen if a this aws_pipe_write() call was made by another write's completion callback */ + if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { + struct aws_io_handle_io_op_result io_op_result; + s_write_end_process_requests(write_end); + } + + return AWS_OP_SUCCESS; +} + +int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) { + struct write_end_impl *write_impl = write_end->impl_data; + if (!write_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + int err = aws_event_loop_unsubscribe_from_io_events(write_impl->event_loop, &write_impl->handle); + if (err) { + return AWS_OP_ERR; + } + + close(write_impl->handle.data.fd); + + /* Zero out write-end before invoking user callbacks so that it won't work anymore with public functions. */ + AWS_ZERO_STRUCT(*write_end); + + /* If a request callback is currently being invoked, let it know that the write-end was cleaned up */ + if (write_impl->currently_invoking_write_callback) { + write_impl->currently_invoking_write_callback->did_user_callback_clean_up_write_end = true; + } + + /* Force any outstanding write requests to complete with an error status. */ + while (!aws_linked_list_empty(&write_impl->write_list)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); + struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); + if (request->user_callback) { + request->user_callback(NULL, AWS_IO_BROKEN_PIPE, request->original_cursor, request->user_data); + } + aws_mem_release(write_impl->alloc, request); + } + + aws_mem_release(write_impl->alloc, write_impl); + return AWS_OP_SUCCESS; +} diff --git a/source/qnx/shared_library.c b/source/qnx/shared_library.c new file mode 100644 index 000000000..751c99bc2 --- /dev/null +++ b/source/qnx/shared_library.c @@ -0,0 +1,66 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#include + +static const char *s_null = ""; +static const char *s_unknown_error = ""; + +int aws_shared_library_init(struct aws_shared_library *library, const char *library_path) { + AWS_ZERO_STRUCT(*library); + + library->library_handle = dlopen(library_path, RTLD_LAZY); + if (library->library_handle == NULL) { + const char *error = dlerror(); + AWS_LOGF_ERROR( + AWS_LS_IO_SHARED_LIBRARY, + "id=%p: Failed to load shared library at path \"%s\" with error: %s", + (void *)library, + library_path ? library_path : s_null, + error ? error : s_unknown_error); + return aws_raise_error(AWS_IO_SHARED_LIBRARY_LOAD_FAILURE); + } + + return AWS_OP_SUCCESS; +} + +void aws_shared_library_clean_up(struct aws_shared_library *library) { + if (library && library->library_handle) { + dlclose(library->library_handle); + library->library_handle = NULL; + } +} + +int aws_shared_library_find_function( + struct aws_shared_library *library, + const char *symbol_name, + aws_generic_function *function_address) { + if (library == NULL || library->library_handle == NULL) { + return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); + } + + /* + * Suggested work around for (undefined behavior) cast from void * to function pointer + * in POSIX.1-2003 standard, at least according to dlsym man page code sample. + */ + *(void **)(function_address) = dlsym(library->library_handle, symbol_name); + + if (*function_address == NULL) { + const char *error = dlerror(); + AWS_LOGF_ERROR( + AWS_LS_IO_SHARED_LIBRARY, + "id=%p: Failed to find shared library symbol \"%s\" with error: %s", + (void *)library, + symbol_name ? symbol_name : s_null, + error ? error : s_unknown_error); + return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); + } + + return AWS_OP_SUCCESS; +} diff --git a/source/qnx/socket.c b/source/qnx/socket.c new file mode 100644 index 000000000..a3d59f946 --- /dev/null +++ b/source/qnx/socket.c @@ -0,0 +1,2058 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * On OsX, suppress NoPipe signals via flags to setsockopt() + * On Linux, suppress NoPipe signals via flags to send() + */ +#if defined(__MACH__) +# define NO_SIGNAL_SOCK_OPT SO_NOSIGPIPE +# define NO_SIGNAL_SEND 0 +# define TCP_KEEPIDLE TCP_KEEPALIVE +#else +# define NO_SIGNAL_SEND MSG_NOSIGNAL +#endif + +/* This isn't defined on ancient linux distros (breaking the builds). + * However, if this is a prebuild, we purposely build on an ancient system, but + * we want the kernel calls to still be the same as a modern build since that's likely the target of the application + * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag + * gets passed as long as it does. + */ +#ifndef O_CLOEXEC +# define O_CLOEXEC 02000000 +#endif + +#ifdef USE_VSOCK +# if defined(__linux__) && defined(AF_VSOCK) +# include +# else +# error "USE_VSOCK not supported on current platform" +# endif +#endif + +/* other than CONNECTED_READ | CONNECTED_WRITE + * a socket is only in one of these states at a time. */ +enum socket_state { + INIT = 0x01, + CONNECTING = 0x02, + CONNECTED_READ = 0x04, + CONNECTED_WRITE = 0x08, + BOUND = 0x10, + LISTENING = 0x20, + TIMEDOUT = 0x40, + ERROR = 0x80, + CLOSED, +}; + +static int s_convert_domain(enum aws_socket_domain domain) { + switch (domain) { + case AWS_SOCKET_IPV4: + return AF_INET; + case AWS_SOCKET_IPV6: + return AF_INET6; + case AWS_SOCKET_LOCAL: + return AF_UNIX; +#ifdef USE_VSOCK + case AWS_SOCKET_VSOCK: + return AF_VSOCK; +#endif + default: + AWS_ASSERT(0); + return AF_INET; + } +} + +static int s_convert_type(enum aws_socket_type type) { + switch (type) { + case AWS_SOCKET_STREAM: + return SOCK_STREAM; + case AWS_SOCKET_DGRAM: + return SOCK_DGRAM; + default: + AWS_ASSERT(0); + return SOCK_STREAM; + } +} + +static int s_determine_socket_error(int error) { + switch (error) { + case ECONNREFUSED: + return AWS_IO_SOCKET_CONNECTION_REFUSED; + case ECONNRESET: + return AWS_IO_SOCKET_CLOSED; + case ETIMEDOUT: + return AWS_IO_SOCKET_TIMEOUT; + case EHOSTUNREACH: + case ENETUNREACH: + return AWS_IO_SOCKET_NO_ROUTE_TO_HOST; + case EADDRNOTAVAIL: + return AWS_IO_SOCKET_INVALID_ADDRESS; + case ENETDOWN: + return AWS_IO_SOCKET_NETWORK_DOWN; + case ECONNABORTED: + return AWS_IO_SOCKET_CONNECT_ABORTED; + case EADDRINUSE: + return AWS_IO_SOCKET_ADDRESS_IN_USE; + case ENOBUFS: + case ENOMEM: + return AWS_ERROR_OOM; + case EAGAIN: + return AWS_IO_READ_WOULD_BLOCK; + case EMFILE: + case ENFILE: + return AWS_ERROR_MAX_FDS_EXCEEDED; + case ENOENT: + case EINVAL: + return AWS_ERROR_FILE_INVALID_PATH; + case EAFNOSUPPORT: + return AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY; + case EACCES: + return AWS_ERROR_NO_PERMISSION; + default: + return AWS_IO_SOCKET_NOT_CONNECTED; + } +} + +static int s_create_socket(struct aws_socket *sock, const struct aws_socket_options *options) { + + int fd = socket(s_convert_domain(options->domain), s_convert_type(options->type), 0); + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: initializing with domain %d and type %d", + (void *)sock, + fd, + options->domain, + options->type); + if (fd != -1) { + int flags = fcntl(fd, F_GETFL, 0); + flags |= O_NONBLOCK | O_CLOEXEC; + int success = fcntl(fd, F_SETFL, flags); + (void)success; + sock->io_handle.data.fd = fd; + sock->io_handle.additional_data = NULL; + return aws_socket_set_options(sock, options); + } + + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); +} + +struct posix_socket_connect_args { + struct aws_task task; + struct aws_allocator *allocator; + struct aws_socket *socket; +}; + +struct posix_socket { + struct aws_linked_list write_queue; + struct aws_linked_list written_queue; + struct aws_task written_task; + struct posix_socket_connect_args *connect_args; + /* Note that only the posix_socket impl part is refcounted. + * The public aws_socket can be a stack variable and cleaned up synchronously + * (by blocking until the event-loop cleans up the impl part). + * In hindsight, aws_socket should have been heap-allocated and refcounted, but alas */ + struct aws_ref_count internal_refcount; + struct aws_allocator *allocator; + bool written_task_scheduled; + bool currently_subscribed; + bool continue_accept; + bool *close_happened; +}; + +static void s_socket_destroy_impl(void *user_data) { + struct posix_socket *socket_impl = user_data; + aws_mem_release(socket_impl->allocator, socket_impl); +} + +static int s_socket_init( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options, + int existing_socket_fd) { + AWS_ASSERT(options); + AWS_ZERO_STRUCT(*socket); + + struct posix_socket *posix_socket = aws_mem_calloc(alloc, 1, sizeof(struct posix_socket)); + if (!posix_socket) { + socket->impl = NULL; + return AWS_OP_ERR; + } + + socket->allocator = alloc; + socket->io_handle.data.fd = -1; + socket->state = INIT; + socket->options = *options; + + if (existing_socket_fd < 0) { + int err = s_create_socket(socket, options); + if (err) { + aws_mem_release(alloc, posix_socket); + socket->impl = NULL; + return AWS_OP_ERR; + } + } else { + socket->io_handle = (struct aws_io_handle){ + .data = {.fd = existing_socket_fd}, + .additional_data = NULL, + }; + aws_socket_set_options(socket, options); + } + + aws_linked_list_init(&posix_socket->write_queue); + aws_linked_list_init(&posix_socket->written_queue); + posix_socket->currently_subscribed = false; + posix_socket->continue_accept = false; + aws_ref_count_init(&posix_socket->internal_refcount, posix_socket, s_socket_destroy_impl); + posix_socket->allocator = alloc; + posix_socket->connect_args = NULL; + posix_socket->close_happened = NULL; + socket->impl = posix_socket; + return AWS_OP_SUCCESS; +} + +int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { + AWS_ASSERT(options); + return s_socket_init(socket, alloc, options, -1); +} + +void aws_socket_clean_up(struct aws_socket *socket) { + if (!socket->impl) { + /* protect from double clean */ + return; + } + + int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ + (void)fd_for_logging; + + if (aws_socket_is_open(socket)) { + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, fd_for_logging); + aws_socket_close(socket); + } + struct posix_socket *socket_impl = socket->impl; + + if (aws_ref_count_release(&socket_impl->internal_refcount) != 0) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: is still pending io letting it dangle and cleaning up later.", + (void *)socket, + fd_for_logging); + } + + AWS_ZERO_STRUCT(*socket); + socket->io_handle.data.fd = -1; +} + +/* Update socket->local_endpoint based on the results of getsockname() */ +static int s_update_local_endpoint(struct aws_socket *socket) { + struct aws_socket_endpoint tmp_endpoint; + AWS_ZERO_STRUCT(tmp_endpoint); + + struct sockaddr_storage address; + AWS_ZERO_STRUCT(address); + socklen_t address_size = sizeof(address); + + if (getsockname(socket->io_handle.data.fd, (struct sockaddr *)&address, &address_size) != 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: getsockname() failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + + if (address.ss_family == AF_INET) { + struct sockaddr_in *s = (struct sockaddr_in *)&address; + tmp_endpoint.port = ntohs(s->sin_port); + if (inet_ntop(AF_INET, &s->sin_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: inet_ntop() failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + } else if (address.ss_family == AF_INET6) { + struct sockaddr_in6 *s = (struct sockaddr_in6 *)&address; + tmp_endpoint.port = ntohs(s->sin6_port); + if (inet_ntop(AF_INET6, &s->sin6_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: inet_ntop() failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + } else if (address.ss_family == AF_UNIX) { + struct sockaddr_un *s = (struct sockaddr_un *)&address; + + /* Ensure there's a null-terminator. + * On some platforms it may be missing when the path gets very long. See: + * https://man7.org/linux/man-pages/man7/unix.7.html#BUGS + * But let's keep it simple, and not deal with that madness until someone demands it. */ + size_t sun_len; + if (aws_secure_strlen(s->sun_path, sizeof(tmp_endpoint.address), &sun_len)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: UNIX domain socket name is too long", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); + } + memcpy(tmp_endpoint.address, s->sun_path, sun_len); +#if USE_VSOCK + } else if (address.ss_family == AF_VSOCK) { + struct sockaddr_vm *s = (struct sockaddr_vm *)&address; + + tmp_endpoint.port = s->svm_port; + + snprintf(tmp_endpoint.address, sizeof(tmp_endpoint.address), "%" PRIu32, s->svm_cid); + return AWS_OP_SUCCESS; +#endif /* USE_VSOCK */ + } else { + AWS_ASSERT(0); + return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); + } + + socket->local_endpoint = tmp_endpoint; + return AWS_OP_SUCCESS; +} + +static void s_on_connection_error(struct aws_socket *socket, int error); + +static int s_on_connection_success(struct aws_socket *socket) { + + struct aws_event_loop *event_loop = socket->event_loop; + struct posix_socket *socket_impl = socket->impl; + + if (socket_impl->currently_subscribed) { + aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); + socket_impl->currently_subscribed = false; + } + + socket->event_loop = NULL; + + int connect_result; + socklen_t result_length = sizeof(connect_result); + + if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to determine connection error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + return AWS_OP_ERR; + } + + if (connect_result) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connection error %d", + (void *)socket, + socket->io_handle.data.fd, + connect_result); + int aws_error = s_determine_socket_error(connect_result); + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + return AWS_OP_ERR; + } + + AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection success", (void *)socket, socket->io_handle.data.fd); + + if (s_update_local_endpoint(socket)) { + s_on_connection_error(socket, aws_last_error()); + return AWS_OP_ERR; + } + + socket->state = CONNECTED_WRITE | CONNECTED_READ; + + if (aws_socket_assign_to_event_loop(socket, event_loop)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: assignment to event loop %p failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop, + aws_last_error()); + s_on_connection_error(socket, aws_last_error()); + return AWS_OP_ERR; + } + + socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); + + return AWS_OP_SUCCESS; +} + +static void s_on_connection_error(struct aws_socket *socket, int error) { + socket->state = ERROR; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); + if (socket->connection_result_fn) { + socket->connection_result_fn(socket, error, socket->connect_accept_user_data); + } else if (socket->accept_result_fn) { + socket->accept_result_fn(socket, error, NULL, socket->connect_accept_user_data); + } +} + +/* the next two callbacks compete based on which one runs first. if s_socket_connect_event + * comes back first, then we set socket_args->socket = NULL and continue on with the connection. + * if s_handle_socket_timeout() runs first, is sees socket_args->socket is NULL and just cleans up its memory. + * s_handle_socket_timeout() will always run so the memory for socket_connect_args is always cleaned up there. */ +static void s_socket_connect_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + (void)handle; + + AWS_ASSERT(handle->update_io_result); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "fd=%d: update I/O results callback: %p", handle->data.fd, (void *)handle->update_io_result); + + struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); + + if (socket_args->socket) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: has not timed out yet proceeding with connection.", + (void *)socket_args->socket, + handle->data.fd); + + struct posix_socket *socket_impl = socket_args->socket->impl; + if (!(events & AWS_IO_EVENT_TYPE_ERROR || events & AWS_IO_EVENT_TYPE_CLOSED) && + (events & AWS_IO_EVENT_TYPE_READABLE || events & AWS_IO_EVENT_TYPE_WRITABLE)) { + struct aws_socket *socket = socket_args->socket; + socket_args->socket = NULL; + socket_impl->connect_args = NULL; + s_on_connection_success(socket); + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + // TODO Update? + return; + } + + int aws_error = aws_socket_get_error(socket_args->socket); + /* we'll get another notification. */ + if (aws_error == AWS_IO_READ_WOULD_BLOCK) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: spurious event, waiting for another notification.", + (void *)socket_args->socket, + handle->data.fd); + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + // handle->update_io_result(event_loop, handle, &io_op_result); + return; + } + + struct aws_socket *socket = socket_args->socket; + socket_args->socket = NULL; + socket_impl->connect_args = NULL; + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + } + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + // TODO Update? +} + +static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { + (void)task; + (void)status; + + struct posix_socket_connect_args *socket_args = args; + + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "task_id=%p: timeout task triggered, evaluating timeouts.", (void *)task); + /* successful connection will have nulled out connect_args->socket */ + if (socket_args->socket) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: timed out, shutting down.", + (void *)socket_args->socket, + socket_args->socket->io_handle.data.fd); + + socket_args->socket->state = TIMEDOUT; + int error_code = AWS_IO_SOCKET_TIMEOUT; + + if (status == AWS_TASK_STATUS_RUN_READY) { + aws_event_loop_unsubscribe_from_io_events(socket_args->socket->event_loop, &socket_args->socket->io_handle); + } else { + error_code = AWS_IO_EVENT_LOOP_SHUTDOWN; + aws_event_loop_free_io_event_resources(socket_args->socket->event_loop, &socket_args->socket->io_handle); + } + socket_args->socket->event_loop = NULL; + struct posix_socket *socket_impl = socket_args->socket->impl; + socket_impl->currently_subscribed = false; + aws_raise_error(error_code); + struct aws_socket *socket = socket_args->socket; + /*socket close sets socket_args->socket to NULL and + * socket_impl->connect_args to NULL. */ + aws_socket_close(socket); + s_on_connection_error(socket, error_code); + } + + aws_mem_release(socket_args->allocator, socket_args); +} + +/* this is used simply for moving a connect_success callback when the connect finished immediately + * (like for unix domain sockets) into the event loop's thread. Also note, in that case there was no + * timeout task scheduled, so in this case the socket_args are cleaned up. */ +static void s_run_connect_success(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + struct posix_socket_connect_args *socket_args = arg; + + if (socket_args->socket) { + struct posix_socket *socket_impl = socket_args->socket->impl; + if (status == AWS_TASK_STATUS_RUN_READY) { + s_on_connection_success(socket_args->socket); + } else { + aws_raise_error(AWS_IO_SOCKET_CONNECT_ABORTED); + socket_args->socket->event_loop = NULL; + s_on_connection_error(socket_args->socket, AWS_IO_SOCKET_CONNECT_ABORTED); + } + socket_impl->connect_args = NULL; + } + + aws_mem_release(socket_args->allocator, socket_args); +} + +static inline int s_convert_pton_error(int pton_code, int errno_value) { + if (pton_code == 0) { + return AWS_IO_SOCKET_INVALID_ADDRESS; + } + + return s_determine_socket_error(errno_value); +} + +struct socket_address { + union sock_addr_types { + struct sockaddr_in addr_in; + struct sockaddr_in6 addr_in6; + struct sockaddr_un un_addr; +#ifdef USE_VSOCK + struct sockaddr_vm vm_addr; +#endif + } sock_addr_types; +}; + +#ifdef USE_VSOCK +/** Convert a string to a VSOCK CID. Respects the calling convetion of inet_pton: + * 0 on error, 1 on success. */ +static int parse_cid(const char *cid_str, unsigned int *value) { + if (cid_str == NULL || value == NULL) { + errno = EINVAL; + return 0; + } + /* strtoll returns 0 as both error and correct value */ + errno = 0; + /* unsigned long long to handle edge cases in convention explicitly */ + long long cid = strtoll(cid_str, NULL, 10); + if (errno != 0) { + return 0; + } + + /* -1U means any, so it's a valid value, but it needs to be converted to + * unsigned int. */ + if (cid == -1) { + *value = VMADDR_CID_ANY; + return 1; + } + + if (cid < 0 || cid > UINT_MAX) { + errno = ERANGE; + return 0; + } + + /* cast is safe here, edge cases already checked */ + *value = (unsigned int)cid; + return 1; +} +#endif + +int aws_socket_connect( + struct aws_socket *socket, + const struct aws_socket_endpoint *remote_endpoint, + struct aws_event_loop *event_loop, + aws_socket_on_connection_result_fn *on_connection_result, + void *user_data) { + AWS_ASSERT(event_loop); + AWS_ASSERT(!socket->event_loop); + + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: beginning connect.", (void *)socket, socket->io_handle.data.fd); + + if (socket->event_loop) { + return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); + } + + if (socket->options.type != AWS_SOCKET_DGRAM) { + AWS_ASSERT(on_connection_result); + if (socket->state != INIT) { + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + } else { /* UDP socket */ + /* UDP sockets jump to CONNECT_READ if bind is called first */ + if (socket->state != CONNECTED_READ && socket->state != INIT) { + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + } + + size_t address_strlen; + if (aws_secure_strlen(remote_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { + return AWS_OP_ERR; + } + + if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { + return AWS_OP_ERR; + } + + struct socket_address address; + AWS_ZERO_STRUCT(address); + socklen_t sock_size = 0; + int pton_err = 1; + if (socket->options.domain == AWS_SOCKET_IPV4) { + pton_err = inet_pton(AF_INET, remote_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); + address.sock_addr_types.addr_in.sin_port = htons((uint16_t)remote_endpoint->port); + address.sock_addr_types.addr_in.sin_family = AF_INET; + sock_size = sizeof(address.sock_addr_types.addr_in); + } else if (socket->options.domain == AWS_SOCKET_IPV6) { + pton_err = inet_pton(AF_INET6, remote_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); + address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); + address.sock_addr_types.addr_in6.sin6_family = AF_INET6; + sock_size = sizeof(address.sock_addr_types.addr_in6); + } else if (socket->options.domain == AWS_SOCKET_LOCAL) { + address.sock_addr_types.un_addr.sun_family = AF_UNIX; + strncpy(address.sock_addr_types.un_addr.sun_path, remote_endpoint->address, AWS_ADDRESS_MAX_LEN); + sock_size = sizeof(address.sock_addr_types.un_addr); +#ifdef USE_VSOCK + } else if (socket->options.domain == AWS_SOCKET_VSOCK) { + pton_err = parse_cid(remote_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); + address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; + address.sock_addr_types.vm_addr.svm_port = remote_endpoint->port; + sock_size = sizeof(address.sock_addr_types.vm_addr); +#endif + } else { + AWS_ASSERT(0); + return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); + } + + if (pton_err != 1) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to parse address %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + remote_endpoint->address, + remote_endpoint->port); + return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connecting to endpoint %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + remote_endpoint->address, + remote_endpoint->port); + + socket->state = CONNECTING; + socket->remote_endpoint = *remote_endpoint; + socket->connect_accept_user_data = user_data; + socket->connection_result_fn = on_connection_result; + + struct posix_socket *socket_impl = socket->impl; + + socket_impl->connect_args = aws_mem_calloc(socket->allocator, 1, sizeof(struct posix_socket_connect_args)); + if (!socket_impl->connect_args) { + return AWS_OP_ERR; + } + + socket_impl->connect_args->socket = socket; + socket_impl->connect_args->allocator = socket->allocator; + + socket_impl->connect_args->task.fn = s_handle_socket_timeout; + socket_impl->connect_args->task.arg = socket_impl->connect_args; + + int error_code = connect(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size); + socket->event_loop = event_loop; + + if (!error_code) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connected immediately, not scheduling timeout.", + (void *)socket, + socket->io_handle.data.fd); + socket_impl->connect_args->task.fn = s_run_connect_success; + /* the subscription for IO will happen once we setup the connection in the task. Since we already + * know the connection succeeded, we don't need to register for events yet. */ + aws_event_loop_schedule_task_now(event_loop, &socket_impl->connect_args->task); + } + + if (error_code) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (errno_value == EINPROGRESS || errno_value == EALREADY) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connection pending waiting on event-loop notification or timeout.", + (void *)socket, + socket->io_handle.data.fd); + /* cache the timeout task; it is possible for the IO subscription to come back virtually immediately + * and null out the connect args */ + struct aws_task *timeout_task = &socket_impl->connect_args->task; + + socket_impl->currently_subscribed = true; + /* This event is for when the connection finishes. (the fd will flip writable). */ + if (aws_event_loop_subscribe_to_io_events( + event_loop, + &socket->io_handle, + AWS_IO_EVENT_TYPE_WRITABLE, + s_socket_connect_event, + socket_impl->connect_args)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to register with event-loop %p.", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop); + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + goto err_clean_up; + } + + /* schedule a task to run at the connect timeout interval, if this task runs before the connect + * happens, we consider that a timeout. */ + uint64_t timeout = 0; + aws_event_loop_current_clock_time(event_loop, &timeout); + timeout += aws_timestamp_convert( + socket->options.connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: scheduling timeout task for %llu.", + (void *)socket, + socket->io_handle.data.fd, + (unsigned long long)timeout); + aws_event_loop_schedule_task_future(event_loop, timeout_task, timeout); + } else { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connect failed with error code %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + aws_raise_error(aws_error); + socket->event_loop = NULL; + socket_impl->currently_subscribed = false; + goto err_clean_up; + } + } + return AWS_OP_SUCCESS; + +err_clean_up: + aws_mem_release(socket->allocator, socket_impl->connect_args); + socket_impl->connect_args = NULL; + return AWS_OP_ERR; +} + +int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { + if (socket->state != INIT) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: invalid state for bind operation.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + size_t address_strlen; + if (aws_secure_strlen(local_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { + return AWS_OP_ERR; + } + + if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { + return AWS_OP_ERR; + } + + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: binding to %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + local_endpoint->address, + local_endpoint->port); + + struct socket_address address; + AWS_ZERO_STRUCT(address); + socklen_t sock_size = 0; + int pton_err = 1; + if (socket->options.domain == AWS_SOCKET_IPV4) { + pton_err = inet_pton(AF_INET, local_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); + address.sock_addr_types.addr_in.sin_port = htons((uint16_t)local_endpoint->port); + address.sock_addr_types.addr_in.sin_family = AF_INET; + sock_size = sizeof(address.sock_addr_types.addr_in); + } else if (socket->options.domain == AWS_SOCKET_IPV6) { + pton_err = inet_pton(AF_INET6, local_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); + address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); + address.sock_addr_types.addr_in6.sin6_family = AF_INET6; + sock_size = sizeof(address.sock_addr_types.addr_in6); + } else if (socket->options.domain == AWS_SOCKET_LOCAL) { + address.sock_addr_types.un_addr.sun_family = AF_UNIX; + strncpy(address.sock_addr_types.un_addr.sun_path, local_endpoint->address, AWS_ADDRESS_MAX_LEN); + sock_size = sizeof(address.sock_addr_types.un_addr); +#ifdef USE_VSOCK + } else if (socket->options.domain == AWS_SOCKET_VSOCK) { + pton_err = parse_cid(local_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); + address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; + address.sock_addr_types.vm_addr.svm_port = local_endpoint->port; + sock_size = sizeof(address.sock_addr_types.vm_addr); +#endif + } else { + AWS_ASSERT(0); + return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); + } + + if (pton_err != 1) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to parse address %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + local_endpoint->address, + local_endpoint->port); + return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); + } + + if (bind(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size) != 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: bind failed with error code %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + + aws_raise_error(s_determine_socket_error(errno_value)); + goto error; + } + + if (s_update_local_endpoint(socket)) { + goto error; + } + + if (socket->options.type == AWS_SOCKET_STREAM) { + socket->state = BOUND; + } else { + /* e.g. UDP is now readable */ + socket->state = CONNECTED_READ; + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: successfully bound to %s:%u", + (void *)socket, + socket->io_handle.data.fd, + socket->local_endpoint.address, + socket->local_endpoint.port); + + return AWS_OP_SUCCESS; + +error: + socket->state = ERROR; + return AWS_OP_ERR; +} + +int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { + if (socket->local_endpoint.address[0] == 0) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: Socket has no local address. Socket must be bound first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + *out_address = socket->local_endpoint; + return AWS_OP_SUCCESS; +} + +int aws_socket_listen(struct aws_socket *socket, int backlog_size) { + if (socket->state != BOUND) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: invalid state for listen operation. You must call bind first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + int error_code = listen(socket->io_handle.data.fd, backlog_size); + + if (!error_code) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, "id=%p fd=%d: successfully listening", (void *)socket, socket->io_handle.data.fd); + socket->state = LISTENING; + return AWS_OP_SUCCESS; + } + + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: listen failed with error code %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + + socket->state = ERROR; + + return aws_raise_error(s_determine_socket_error(errno_value)); +} + +/* this is called by the event loop handler that was installed in start_accept(). It runs once the FD goes readable, + * accepts as many as it can and then returns control to the event loop. */ +static void s_socket_accept_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + + AWS_ASSERT(handle->update_io_result); + + struct aws_socket *socket = user_data; + struct posix_socket *socket_impl = socket->impl; + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: listening event received: %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)handle->update_io_result); + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + + if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { + int in_fd = 0; + while (socket_impl->continue_accept && in_fd != -1) { + struct sockaddr_storage in_addr; + socklen_t in_len = sizeof(struct sockaddr_storage); + + in_fd = accept(handle->data.fd, (struct sockaddr *)&in_addr, &in_len); + if (in_fd == -1) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + break; + } + + int aws_error = aws_socket_get_error(socket); + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + io_op_result.read_error_code = aws_error; + break; + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, "id=%p fd=%d: incoming connection", (void *)socket, socket->io_handle.data.fd); + + struct aws_socket *new_sock = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); + + if (!new_sock) { + close(in_fd); + s_on_connection_error(socket, aws_last_error()); + continue; + } + + if (s_socket_init(new_sock, socket->allocator, &socket->options, in_fd)) { + aws_mem_release(socket->allocator, new_sock); + s_on_connection_error(socket, aws_last_error()); + continue; + } + + new_sock->local_endpoint = socket->local_endpoint; + new_sock->state = CONNECTED_READ | CONNECTED_WRITE; + uint32_t port = 0; + + /* get the info on the incoming socket's address */ + if (in_addr.ss_family == AF_INET) { + struct sockaddr_in *s = (struct sockaddr_in *)&in_addr; + port = ntohs(s->sin_port); + /* this came from the kernel, a.) it won't fail. b.) even if it does + * its not fatal. come back and add logging later. */ + if (!inet_ntop( + AF_INET, + &s->sin_addr, + new_sock->remote_endpoint.address, + sizeof(new_sock->remote_endpoint.address))) { + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d:. Failed to determine remote address.", + (void *)socket, + socket->io_handle.data.fd); + } + new_sock->options.domain = AWS_SOCKET_IPV4; + } else if (in_addr.ss_family == AF_INET6) { + /* this came from the kernel, a.) it won't fail. b.) even if it does + * its not fatal. come back and add logging later. */ + struct sockaddr_in6 *s = (struct sockaddr_in6 *)&in_addr; + port = ntohs(s->sin6_port); + if (!inet_ntop( + AF_INET6, + &s->sin6_addr, + new_sock->remote_endpoint.address, + sizeof(new_sock->remote_endpoint.address))) { + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d:. Failed to determine remote address.", + (void *)socket, + socket->io_handle.data.fd); + } + new_sock->options.domain = AWS_SOCKET_IPV6; + } else if (in_addr.ss_family == AF_UNIX) { + new_sock->remote_endpoint = socket->local_endpoint; + new_sock->options.domain = AWS_SOCKET_LOCAL; + } + + new_sock->remote_endpoint.port = port; + + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connected to %s:%d, incoming fd %d", + (void *)socket, + socket->io_handle.data.fd, + new_sock->remote_endpoint.address, + new_sock->remote_endpoint.port, + in_fd); + + int flags = fcntl(in_fd, F_GETFL, 0); + + flags |= O_NONBLOCK | O_CLOEXEC; + fcntl(in_fd, F_SETFL, flags); + + bool close_occurred = false; + socket_impl->close_happened = &close_occurred; + socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); + + if (close_occurred) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: trying to update: %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)handle->update_io_result); + return; + } + + socket_impl->close_happened = NULL; + } + } + + handle->update_io_result(event_loop, handle, &io_op_result); + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: finished processing incoming connections, " + "waiting on event-loop notification", + (void *)socket, + socket->io_handle.data.fd); +} + +int aws_socket_start_accept( + struct aws_socket *socket, + struct aws_event_loop *accept_loop, + aws_socket_on_accept_result_fn *on_accept_result, + void *user_data) { + AWS_ASSERT(on_accept_result); + AWS_ASSERT(accept_loop); + + if (socket->event_loop) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: is already assigned to event-loop %p.", + (void *)socket, + socket->io_handle.data.fd, + (void *)socket->event_loop); + return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); + } + + if (socket->state != LISTENING) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: invalid state for start_accept operation. You must call listen first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + socket->accept_result_fn = on_accept_result; + socket->connect_accept_user_data = user_data; + socket->event_loop = accept_loop; + struct posix_socket *socket_impl = socket->impl; + socket_impl->continue_accept = true; + socket_impl->currently_subscribed = true; + + if (aws_event_loop_subscribe_to_io_events( + socket->event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_READABLE, s_socket_accept_event, socket)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to subscribe to event-loop %p.", + (void *)socket, + socket->io_handle.data.fd, + (void *)socket->event_loop); + socket_impl->continue_accept = false; + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +struct stop_accept_args { + struct aws_task task; + struct aws_mutex mutex; + struct aws_condition_variable condition_variable; + struct aws_socket *socket; + int ret_code; + bool invoked; +}; + +static bool s_stop_accept_pred(void *arg) { + struct stop_accept_args *stop_accept_args = arg; + return stop_accept_args->invoked; +} + +static void s_stop_accept_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + + struct stop_accept_args *stop_accept_args = arg; + aws_mutex_lock(&stop_accept_args->mutex); + stop_accept_args->ret_code = AWS_OP_SUCCESS; + if (aws_socket_stop_accept(stop_accept_args->socket)) { + stop_accept_args->ret_code = aws_last_error(); + } + stop_accept_args->invoked = true; + aws_condition_variable_notify_one(&stop_accept_args->condition_variable); + aws_mutex_unlock(&stop_accept_args->mutex); +} + +int aws_socket_stop_accept(struct aws_socket *socket) { + if (socket->state != LISTENING) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: is not in a listening state, can't stop_accept.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections", (void *)socket, socket->io_handle.data.fd); + + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + struct stop_accept_args args = { + .mutex = AWS_MUTEX_INIT, + .condition_variable = AWS_CONDITION_VARIABLE_INIT, + .invoked = false, + .socket = socket, + .ret_code = AWS_OP_SUCCESS, + .task = {.fn = s_stop_accept_task}, + }; + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: stopping accepting new connections from a different thread than " + "the socket is running from. Blocking until it shuts down.", + (void *)socket, + socket->io_handle.data.fd); + /* Look.... I know what I'm doing.... trust me, I'm an engineer. + * We wait on the completion before 'args' goes out of scope. + * NOLINTNEXTLINE */ + args.task.arg = &args; + aws_mutex_lock(&args.mutex); + aws_event_loop_schedule_task_now(socket->event_loop, &args.task); + aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_stop_accept_pred, &args); + aws_mutex_unlock(&args.mutex); + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: stop accept task finished running.", + (void *)socket, + socket->io_handle.data.fd); + + if (args.ret_code) { + return aws_raise_error(args.ret_code); + } + return AWS_OP_SUCCESS; + } + + int ret_val = AWS_OP_SUCCESS; + struct posix_socket *socket_impl = socket->impl; + if (socket_impl->currently_subscribed) { + ret_val = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); + socket_impl->currently_subscribed = false; + socket_impl->continue_accept = false; + socket->event_loop = NULL; + } + + return ret_val; +} + +int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options) { + if (socket->options.domain != options->domain || socket->options.type != options->type) { + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setting socket options to: keep-alive %d, keep idle %d, keep-alive interval %d, keep-alive probe " + "count %d.", + (void *)socket, + socket->io_handle.data.fd, + (int)options->keepalive, + (int)options->keep_alive_timeout_sec, + (int)options->keep_alive_interval_sec, + (int)options->keep_alive_max_failed_probes); + + socket->options = *options; + +#ifdef NO_SIGNAL_SOCK_OPT + int option_value = 1; + if (AWS_UNLIKELY(setsockopt( + socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL_SOCK_OPT, &option_value, sizeof(option_value)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for NO_SIGNAL_SOCK_OPT failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } +#endif /* NO_SIGNAL_SOCK_OPT */ + + int reuse = 1; + if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for SO_REUSEADDR failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + size_t network_interface_length = 0; + if (aws_secure_strlen(options->network_interface_name, AWS_NETWORK_INTERFACE_NAME_MAX, &network_interface_length)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: network_interface_name max length must be %d length and NULL terminated", + (void *)socket, + socket->io_handle.data.fd, + AWS_NETWORK_INTERFACE_NAME_MAX); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + if (network_interface_length != 0) { +#if defined(SO_BINDTODEVICE) + if (setsockopt( + socket->io_handle.data.fd, + SOL_SOCKET, + SO_BINDTODEVICE, + options->network_interface_name, + network_interface_length)) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() with SO_BINDTODEVICE for \"%s\" failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } +#elif defined(IP_BOUND_IF) + /* + * If SO_BINDTODEVICE is not supported, the alternative is IP_BOUND_IF which requires an index instead + * of a name. We are not using this everywhere because this requires 2 system calls instead of 1, and is + * dependent upon the type of sockets, which doesn't support AWS_SOCKET_LOCAL. As a future optimization, we can + * look into caching the result of if_nametoindex. + */ + uint network_interface_index = if_nametoindex(options->network_interface_name); + if (network_interface_index == 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: network_interface_name \"%s\" not found. if_nametoindex() failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + if (options->domain == AWS_SOCKET_IPV6) { + if (setsockopt( + socket->io_handle.data.fd, + IPPROTO_IPV6, + IPV6_BOUND_IF, + &network_interface_index, + sizeof(network_interface_index))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() with IPV6_BOUND_IF for \"%s\" failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + } else if (setsockopt( + socket->io_handle.data.fd, + IPPROTO_IP, + IP_BOUND_IF, + &network_interface_index, + sizeof(network_interface_index))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() with IP_BOUND_IF for \"%s\" failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } +#else + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: network_interface_name is not supported on this platform.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif + } + if (options->type == AWS_SOCKET_STREAM && options->domain != AWS_SOCKET_LOCAL) { + if (socket->options.keepalive) { + int keep_alive = 1; + if (AWS_UNLIKELY( + setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_KEEPALIVE, &keep_alive, sizeof(int)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling SO_KEEPALIVE failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + } + +#if !defined(__OpenBSD__) + if (socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec) { + int ival_in_secs = socket->options.keep_alive_interval_sec; + if (AWS_UNLIKELY(setsockopt( + socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPIDLE, &ival_in_secs, sizeof(ival_in_secs)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling TCP_KEEPIDLE for TCP failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + + ival_in_secs = socket->options.keep_alive_timeout_sec; + if (AWS_UNLIKELY(setsockopt( + socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPINTVL, &ival_in_secs, sizeof(ival_in_secs)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling TCP_KEEPINTVL for TCP failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + } + + if (socket->options.keep_alive_max_failed_probes) { + int max_probes = socket->options.keep_alive_max_failed_probes; + if (AWS_UNLIKELY( + setsockopt(socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPCNT, &max_probes, sizeof(max_probes)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling TCP_KEEPCNT for TCP failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + } +#endif /* __OpenBSD__ */ + } + + return AWS_OP_SUCCESS; +} + +struct socket_write_request { + struct aws_byte_cursor cursor_cpy; + aws_socket_on_write_completed_fn *written_fn; + void *write_user_data; + struct aws_linked_list_node node; + size_t original_buffer_len; + int error_code; +}; + +struct posix_socket_close_args { + struct aws_mutex mutex; + struct aws_condition_variable condition_variable; + struct aws_socket *socket; + bool invoked; + int ret_code; +}; + +static bool s_close_predicate(void *arg) { + struct posix_socket_close_args *close_args = arg; + return close_args->invoked; +} + +static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + + struct posix_socket_close_args *close_args = arg; + aws_mutex_lock(&close_args->mutex); + close_args->ret_code = AWS_OP_SUCCESS; + + if (aws_socket_close(close_args->socket)) { + close_args->ret_code = aws_last_error(); + } + + close_args->invoked = true; + aws_condition_variable_notify_one(&close_args->condition_variable); + aws_mutex_unlock(&close_args->mutex); +} + +int aws_socket_close(struct aws_socket *socket) { + struct posix_socket *socket_impl = socket->impl; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: closing", (void *)socket, socket->io_handle.data.fd); + struct aws_event_loop *event_loop = socket->event_loop; + if (socket->event_loop) { + /* don't freak out on me, this almost never happens, and never occurs inside a channel + * it only gets hit from a listening socket shutting down or from a unit test. */ + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: closing from a different thread than " + "the socket is running from. Blocking until it closes down.", + (void *)socket, + socket->io_handle.data.fd); + /* the only time we allow this kind of thing is when you're a listener.*/ + if (socket->state != LISTENING) { + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + struct posix_socket_close_args args = { + .mutex = AWS_MUTEX_INIT, + .condition_variable = AWS_CONDITION_VARIABLE_INIT, + .socket = socket, + .ret_code = AWS_OP_SUCCESS, + .invoked = false, + }; + + struct aws_task close_task = { + .fn = s_close_task, + .arg = &args, + }; + + int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ + (void)fd_for_logging; + + aws_mutex_lock(&args.mutex); + aws_event_loop_schedule_task_now(socket->event_loop, &close_task); + aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_close_predicate, &args); + aws_mutex_unlock(&args.mutex); + AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, fd_for_logging); + if (args.ret_code) { + return aws_raise_error(args.ret_code); + } + + return AWS_OP_SUCCESS; + } + + if (socket_impl->currently_subscribed) { + if (socket->state & LISTENING) { + aws_socket_stop_accept(socket); + } else { + int err_code = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); + + if (err_code) { + return AWS_OP_ERR; + } + } + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + } + } + + if (socket_impl->close_happened) { + *socket_impl->close_happened = true; + } + + if (socket_impl->connect_args) { + socket_impl->connect_args->socket = NULL; + socket_impl->connect_args = NULL; + } + + if (aws_socket_is_open(socket)) { + close(socket->io_handle.data.fd); + socket->io_handle.data.fd = -1; + socket->state = CLOSED; + + /* ensure callbacks for pending writes fire (in order) before this close function returns */ + + if (socket_impl->written_task_scheduled) { + aws_event_loop_cancel_task(event_loop, &socket_impl->written_task); + } + + while (!aws_linked_list_empty(&socket_impl->written_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; + write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); + aws_mem_release(socket->allocator, write_request); + } + + while (!aws_linked_list_empty(&socket_impl->write_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; + write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data); + aws_mem_release(socket->allocator, write_request); + } + } + + return AWS_OP_SUCCESS; +} + +int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { + int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, "id=%p fd=%d: shutting down in direction %d", (void *)socket, socket->io_handle.data.fd, dir); + if (shutdown(socket->io_handle.data.fd, how)) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + + if (dir == AWS_CHANNEL_DIR_READ) { + socket->state &= ~CONNECTED_READ; + } else { + socket->state &= ~CONNECTED_WRITE; + } + + return AWS_OP_SUCCESS; +} + +static void s_written_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + + struct aws_socket *socket = arg; + struct posix_socket *socket_impl = socket->impl; + + socket_impl->written_task_scheduled = false; + + /* this is to handle a race condition when a callback kicks off a cleanup, or the user decides + * to close the socket based on something they read (SSL validation failed for example). + * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling */ + aws_ref_count_acquire(&socket_impl->internal_refcount); + + /* Notes about weird loop: + * 1) Only process the initial contents of queue when this task is run, + * ignoring any writes queued during delivery. + * If we simply looped until the queue was empty, we could get into a + * synchronous loop of completing and writing and completing and writing... + * and it would be tough for multiple sockets to share an event-loop fairly. + * 2) Check if queue is empty with each iteration. + * If user calls close() from the callback, close() will process all + * nodes in the written_queue, and the queue will be empty when the + * callstack gets back to here. */ + if (!aws_linked_list_empty(&socket_impl->written_queue)) { + struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue); + do { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; + write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); + aws_mem_release(socket_impl->allocator, write_request); + if (node == stop_after) { + break; + } + } while (!aws_linked_list_empty(&socket_impl->written_queue)); + } + + aws_ref_count_release(&socket_impl->internal_refcount); +} + +/* this gets called in two scenarios. + * 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned + * immediately if something bad has happened to the socket. In this case, `parent_request` is set. + * 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */ +static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { + struct posix_socket *socket_impl = socket->impl; + + AWS_ASSERT(socket->io_handle.update_io_result); + + if (parent_request) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: processing write requests, called from aws_socket_write", + (void *)socket, + socket->io_handle.data.fd); + } else { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: processing write requests, invoked by the event-loop", + (void *)socket, + socket->io_handle.data.fd); + } + + bool purge = false; + int aws_error = AWS_OP_SUCCESS; + bool parent_request_failed = false; + bool pushed_to_written_queue = false; + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + + /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ + while (!aws_linked_list_empty(&socket_impl->write_queue)) { + struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: dequeued write request of size %llu, remaining to write %llu", + (void *)socket, + socket->io_handle.data.fd, + (unsigned long long)write_request->original_buffer_len, + (unsigned long long)write_request->cursor_cpy.len); + + ssize_t written = send( + socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL_SEND); + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: send written size %d", + (void *)socket, + socket->io_handle.data.fd, + (int)written); + + if (written < 0) { + if (errno_value == EAGAIN) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; /* TODO Add AWS_IO_WRITE_EAGAIN ode. */ + break; + } + + if (errno_value == EPIPE) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: already closed before write", + (void *)socket, + socket->io_handle.data.fd); + aws_error = AWS_IO_SOCKET_CLOSED; + aws_raise_error(aws_error); + purge = true; + io_op_result.write_error_code = aws_error; + break; + } + + purge = true; + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: write error with error code %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + aws_error = s_determine_socket_error(errno_value); + aws_raise_error(aws_error); + io_op_result.write_error_code = aws_error; + break; + } + + io_op_result.written_bytes += (size_t)written; + + size_t remaining_to_write = write_request->cursor_cpy.len; + + aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: remaining write request to write %llu", + (void *)socket, + socket->io_handle.data.fd, + (unsigned long long)write_request->cursor_cpy.len); + + if ((size_t)written == remaining_to_write) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: write request completed", (void *)socket, socket->io_handle.data.fd); + + aws_linked_list_remove(node); + write_request->error_code = AWS_ERROR_SUCCESS; + aws_linked_list_push_back(&socket_impl->written_queue, node); + pushed_to_written_queue = true; + } + } + + if (purge) { + while (!aws_linked_list_empty(&socket_impl->write_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + + /* If this fn was invoked directly from aws_socket_write(), don't invoke the error callback + * as the user will be able to rely on the return value from aws_socket_write() */ + if (write_request == parent_request) { + parent_request_failed = true; + aws_mem_release(socket->allocator, write_request); + } else { + write_request->error_code = aws_error; + aws_linked_list_push_back(&socket_impl->written_queue, node); + pushed_to_written_queue = true; + } + } + } + + if (pushed_to_written_queue && !socket_impl->written_task_scheduled) { + socket_impl->written_task_scheduled = true; + aws_task_init(&socket_impl->written_task, s_written_task, socket, "socket_written_task"); + aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); + } + + socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); + + /* Only report error if aws_socket_write() invoked this function and its write_request failed */ + if (!parent_request_failed) { + return AWS_OP_SUCCESS; + } + + aws_raise_error(aws_error); + return AWS_OP_ERR; +} + +static void s_on_socket_io_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + (void)event_loop; + (void)handle; + struct aws_socket *socket = user_data; + struct posix_socket *socket_impl = socket->impl; + + /* this is to handle a race condition when an error kicks off a cleanup, or the user decides + * to close the socket based on something they read (SSL validation failed for example). + * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling but currently + * subscribed is set to false. */ + aws_ref_count_acquire(&socket_impl->internal_refcount); + + /* NOTE: READABLE|WRITABLE|HANG_UP events might arrive simultaneously + * (e.g. peer sends last few bytes and immediately hangs up). + * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */ + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); + if (socket->readable_fn) { + socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data); + } + } + /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not + * have been cleaned up, so this next branch is safe. */ + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) { + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd); + s_process_socket_write_requests(socket, NULL); + } + + if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) { + aws_raise_error(AWS_IO_SOCKET_CLOSED); + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: closed remotely", (void *)socket, socket->io_handle.data.fd); + if (socket->readable_fn) { + socket->readable_fn(socket, AWS_IO_SOCKET_CLOSED, socket->readable_user_data); + } + goto end_check; + } + + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_ERROR) { + int aws_error = aws_socket_get_error(socket); + aws_raise_error(aws_error); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: error event occurred", (void *)socket, socket->io_handle.data.fd); + if (socket->readable_fn) { + socket->readable_fn(socket, aws_error, socket->readable_user_data); + } + goto end_check; + } + +end_check: + aws_ref_count_release(&socket_impl->internal_refcount); + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "====== s_on_socket_io_event"); +} + +int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { + if (!socket->event_loop) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: assigning to event loop %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop); + socket->event_loop = event_loop; + struct posix_socket *socket_impl = socket->impl; + socket_impl->currently_subscribed = true; + if (aws_event_loop_subscribe_to_io_events( + event_loop, + &socket->io_handle, + AWS_IO_EVENT_TYPE_WRITABLE | AWS_IO_EVENT_TYPE_READABLE, + s_on_socket_io_event, + socket)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: assigning to event loop %p failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop, + aws_last_error()); + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); +} + +struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket) { + return socket->event_loop; +} + +int aws_socket_subscribe_to_readable_events( + struct aws_socket *socket, + aws_socket_on_readable_fn *on_readable, + void *user_data) { + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, " id=%p fd=%d: subscribing to readable events", (void *)socket, socket->io_handle.data.fd); + if (!(socket->state & CONNECTED_READ)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: can't subscribe to readable events since the socket is not connected", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); + } + + if (socket->readable_fn) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: can't subscribe to readable events since it is already subscribed", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); + } + + AWS_ASSERT(on_readable); + socket->readable_user_data = user_data; + socket->readable_fn = on_readable; + + return AWS_OP_SUCCESS; +} + +int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { + AWS_ASSERT(amount_read); + + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: cannot read from a different thread than event loop %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)socket->event_loop); + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (!(socket->state & CONNECTED_READ)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: cannot read because it is not connected", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); + } + + ssize_t read_val = read(socket->io_handle.data.fd, buffer->buffer + buffer->len, buffer->capacity - buffer->len); + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: read of %d", (void *)socket, socket->io_handle.data.fd, (int)read_val); + + if (read_val > 0) { + *amount_read = (size_t)read_val; + buffer->len += *amount_read; + return AWS_OP_SUCCESS; + } + + /* read_val of 0 means EOF which we'll treat as AWS_IO_SOCKET_CLOSED */ + if (read_val == 0) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, "id=%p fd=%d: zero read, socket is closed", (void *)socket, socket->io_handle.data.fd); + *amount_read = 0; + + if (buffer->capacity - buffer->len > 0) { + return aws_raise_error(AWS_IO_SOCKET_CLOSED); + } + + return AWS_OP_SUCCESS; + } + +#if defined(EWOULDBLOCK) + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { +#else + if (errno_value == EAGAIN) { +#endif + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: read would block", (void *)socket, socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); + } + + if (errno_value == EPIPE || errno_value == ECONNRESET) { + AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket is closed.", (void *)socket, socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_CLOSED); + } + + if (errno_value == ETIMEDOUT) { + AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket timed out.", (void *)socket, socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_TIMEOUT); + } + + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: read failed with error: %s", + (void *)socket, + socket->io_handle.data.fd, + strerror(errno_value)); + return aws_raise_error(s_determine_socket_error(errno_value)); +} + +int aws_socket_write( + struct aws_socket *socket, + const struct aws_byte_cursor *cursor, + aws_socket_on_write_completed_fn *written_fn, + void *user_data) { + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (!(socket->state & CONNECTED_WRITE)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: cannot write to because it is not connected", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); + } + + AWS_ASSERT(written_fn); + struct posix_socket *socket_impl = socket->impl; + struct socket_write_request *write_request = + aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_write_request)); + + if (!write_request) { + return AWS_OP_ERR; + } + + write_request->original_buffer_len = cursor->len; + write_request->written_fn = written_fn; + write_request->write_user_data = user_data; + write_request->cursor_cpy = *cursor; + aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node); + + return s_process_socket_write_requests(socket, write_request); +} + +int aws_socket_get_error(struct aws_socket *socket) { + int connect_result; + socklen_t result_length = sizeof(connect_result); + + if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { + return s_determine_socket_error(errno); + } + + if (connect_result) { + return s_determine_socket_error(connect_result); + } + + return AWS_OP_SUCCESS; +} + +bool aws_socket_is_open(struct aws_socket *socket) { + return socket->io_handle.data.fd >= 0; +} + +void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { + struct aws_uuid uuid; + AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); + char uuid_str[AWS_UUID_STR_LEN] = {0}; + struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); + AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); + snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); +} diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index 01fb4c2e0..66f8efe00 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -138,9 +138,13 @@ static void s_do_read(struct socket_handler *socket_handler) { (unsigned long long)max_to_read); if (max_to_read == 0) { + // TODO Set to ewouldblock? return; } + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + size_t total_read = 0; size_t read = 0; int last_error = 0; @@ -153,10 +157,12 @@ static void s_do_read(struct socket_handler *socket_handler) { if (aws_socket_read(socket_handler->socket, &message->message_data, &read)) { last_error = aws_last_error(); aws_mem_release(message->allocator, message); + io_op_result.read_error_code = last_error; break; } total_read += read; + io_op_result.read_bytes += read; AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: read %llu from socket", @@ -166,6 +172,7 @@ static void s_do_read(struct socket_handler *socket_handler) { if (aws_channel_slot_send_message(socket_handler->slot, message, AWS_CHANNEL_DIR_READ)) { last_error = aws_last_error(); aws_mem_release(message->allocator, message); + io_op_result.read_error_code = last_error; break; } } @@ -182,11 +189,8 @@ static void s_do_read(struct socket_handler *socket_handler) { if (total_read < max_to_read) { AWS_ASSERT(last_error != 0); - struct aws_event_loop_io_op_result io_op_result = {total_read, last_error}; - aws_event_loop_feedback_io_op_result( - socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); - if (last_error != AWS_IO_READ_WOULD_BLOCK) { + io_op_result.read_error_code = last_error; aws_channel_shutdown(socket_handler->slot->channel, last_error); } else { AWS_LOGF_TRACE( @@ -194,7 +198,18 @@ static void s_do_read(struct socket_handler *socket_handler) { "id=%p: out of data to read on socket. " "Waiting on event-loop notification.", (void *)socket_handler->slot->handler); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; } + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET_HANDLER, + "=== s_do_read 1: %d %lu %d %lu", + io_op_result.read_error_code, + io_op_result.read_bytes, + io_op_result.write_error_code, + io_op_result.written_bytes); + AWS_ASSERT(socket_handler->socket->io_handle.update_io_result); + socket_handler->socket->io_handle.update_io_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); return; } /* in this case, everything was fine, but there's still pending reads. We need to schedule a task to do the read @@ -211,12 +226,20 @@ static void s_do_read(struct socket_handler *socket_handler) { aws_channel_schedule_task_now(socket_handler->slot->channel, &socket_handler->read_task_storage); } - struct aws_event_loop_io_op_result io_op_result = {total_read, AWS_ERROR_SUCCESS}; - aws_event_loop_feedback_io_op_result( + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET_HANDLER, + "id=%p: === s_do_read update I/O results: %d %lu %d %lu", + (void *)socket_handler->slot->handler, + io_op_result.read_error_code, + io_op_result.read_bytes, + io_op_result.write_error_code, + io_op_result.written_bytes); + socket_handler->socket->io_handle.update_io_result( socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); } -/* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. */ +/* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. + * If an error, start the channel shutdown process. */ static void s_on_readable_notification(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; diff --git a/tests/socket_test.c b/tests/socket_test.c index ecc47c2b2..22ec38604 100644 --- a/tests/socket_test.c +++ b/tests/socket_test.c @@ -22,6 +22,8 @@ # include #endif +// #if AWS_USE_ON_EVENT_WITH_RESULT + struct local_listener_args { struct aws_socket *incoming; struct aws_mutex *mutex; From 50bca0d7ba384cb34ab15f17ff01a7199d00754c Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 13:54:08 -0700 Subject: [PATCH 04/39] Revert unrelated changes --- source/bsd/kqueue_event_loop.c | 41 ---------------------------------- source/qnx/pipe.c | 3 +-- source/qnx/socket.c | 1 + 3 files changed, 2 insertions(+), 43 deletions(-) diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index a5c2f3610..33a517e7b 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -39,10 +39,6 @@ static int s_subscribe_to_io_events( aws_event_loop_on_event_fn *on_event, void *user_data); static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); -static void s_feedback_io_result( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_event_loop_io_op_result *io_op_result); static void s_free_io_event_resources(void *user_data); static bool s_is_event_thread(struct aws_event_loop *event_loop); @@ -114,9 +110,6 @@ struct handle_data { struct aws_task subscribe_task; struct aws_task cleanup_task; - - struct aws_event_loop_io_op_result last_io_operation_result; - bool last_io_operation_is_updated; }; enum { @@ -134,7 +127,6 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .subscribe_to_io_events = s_subscribe_to_io_events, .cancel_task = s_cancel_task, .unsubscribe_from_io_events = s_unsubscribe_from_io_events, - .feedback_io_result = s_feedback_io_result, .free_io_event_resources = s_free_io_event_resources, .is_on_callers_thread = s_is_event_thread, }; @@ -143,7 +135,6 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_ASSERT(alloc); - // FIXME Remove this assert. AWS_ASSERT(clock); AWS_ASSERT(options); AWS_ASSERT(options->clock); @@ -734,25 +725,6 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc return AWS_OP_SUCCESS; } -static void s_feedback_io_result( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_event_loop_io_op_result *io_op_result) { - AWS_ASSERT(handle->additional_data); - struct handle_data *handle_data = handle->additional_data; - AWS_ASSERT(event_loop == handle_data->event_loop); - AWS_ASSERT(handle_data->last_io_operation_is_updated == 0); - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: got feedback on I/O operation for fd %d: status %s", - (void *)event_loop, - handle->data.fd, - aws_error_str(io_op_result->error_code)); - handle_data->last_io_operation_result.read_bytes = io_op_result->read_bytes; - handle_data->last_io_operation_result.error_code = io_op_result->error_code; - handle_data->last_io_operation_is_updated = 1; -} - static bool s_is_event_thread(struct aws_event_loop *event_loop) { struct kqueue_loop *impl = event_loop->impl_data; @@ -957,21 +929,8 @@ static void aws_event_loop_thread(void *user_data) { "id=%p: activity on fd %d, invoking handler.", (void *)event_loop, handle_data->owner->data.fd); - - // Reset last I/O operation result, so if a channel forgets to update its value, we can catch it. - handle_data->last_io_operation_is_updated = 0; - handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); - - AWS_ASSERT(handle_data->last_io_operation_is_updated == 1); - AWS_LOGF_INFO( - AWS_LS_IO_EVENT_LOOP, - "id=%p: on_event completion status is %d (%s); read %lu bytes", - (void *)event_loop, - handle_data->last_io_operation_result.error_code, - aws_error_str(handle_data->last_io_operation_result.error_code), - handle_data->last_io_operation_result.read_bytes); } handle_data->events_this_loop = 0; diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c index 6beca8355..a56790b35 100644 --- a/source/qnx/pipe.c +++ b/source/qnx/pipe.c @@ -436,8 +436,7 @@ static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end * } /* Process write requests as long as the pipe remains writable */ -static void s_write_end_process_requests( - struct aws_pipe_write_end *write_end) { +static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; AWS_ASSERT(write_impl); AWS_ASSERT(write_impl->handle.update_io_result); diff --git a/source/qnx/socket.c b/source/qnx/socket.c index a3d59f946..2d8884237 100644 --- a/source/qnx/socket.c +++ b/source/qnx/socket.c @@ -18,6 +18,7 @@ #include #include #include +#include /* Required when VSOCK is used */ #include #include #include From 29d9c0402626db9a8afd361c95327d39324d5b1f Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 13:58:22 -0700 Subject: [PATCH 05/39] Use shared sources --- CMakeLists.txt | 3 +- source/bsd/kqueue_event_loop.c | 33 + source/posix/pipe.c | 10 + source/posix/socket.c | 52 +- source/qnx/host_resolver.c | 121 -- source/qnx/kqueue_event_loop.c | 1035 ---------------- source/qnx/pipe.c | 595 --------- source/qnx/shared_library.c | 66 - source/qnx/socket.c | 2059 -------------------------------- 9 files changed, 96 insertions(+), 3878 deletions(-) delete mode 100644 source/qnx/host_resolver.c delete mode 100644 source/qnx/kqueue_event_loop.c delete mode 100644 source/qnx/pipe.c delete mode 100644 source/qnx/shared_library.c delete mode 100644 source/qnx/socket.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 286fb6e68..5fa20037c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,7 +115,8 @@ elseif (APPLE) ) file(GLOB AWS_IO_OS_SRC - "source/qnx/*.c" + "source/bsd/*.c" + "source/posix/*.c" "source/darwin/*.c" ) diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index 33a517e7b..6374eea6e 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -131,10 +131,32 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .is_on_callers_thread = s_is_event_thread, }; +static void s_update_io_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_io_handle_io_op_result *io_op_result) { + AWS_ASSERT(handle->additional_data); + struct handle_data *handle_data = handle->additional_data; + AWS_ASSERT(event_loop == handle_data->event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: got feedback on I/O operation for fd %d: read: status %d (%s), %lu bytes; write: status %d (%s), %lu " + "bytes", + (void *)event_loop, + handle->data.fd, + io_op_result->read_error_code, + aws_error_str(io_op_result->read_error_code), + io_op_result->read_bytes, + io_op_result->write_error_code, + aws_error_str(io_op_result->write_error_code), + io_op_result->written_bytes); +} + struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_ASSERT(alloc); + // FIXME Remove this assert. AWS_ASSERT(clock); AWS_ASSERT(options); AWS_ASSERT(options->clock); @@ -586,6 +608,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta /* Success */ handle_data->state = HANDLE_STATE_SUBSCRIBED; + handle_data->owner->update_io_result = s_update_io_result; return; subscribe_failed: @@ -931,6 +954,16 @@ static void aws_event_loop_thread(void *user_data) { handle_data->owner->data.fd); handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); + + // AWS_LOGF_INFO( + // AWS_LS_IO_EVENT_LOOP, + // "id=%p: on_event completion status: read: status %d (%s), %lu bytes; write: status + // %d (%s), %lu " "bytes", (void *)event_loop, io_op_result.read_error_code, + // aws_error_str(io_op_result.read_error_code), + // io_op_result.read_bytes, + // io_op_result.write_error_code, + // aws_error_str(io_op_result.write_error_code), + // io_op_result.written_bytes); } handle_data->events_this_loop = 0; diff --git a/source/posix/pipe.c b/source/posix/pipe.c index f727b021c..a56790b35 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -439,6 +439,10 @@ static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end * static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; AWS_ASSERT(write_impl); + AWS_ASSERT(write_impl->handle.update_io_result); + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); while (!aws_linked_list_empty(&write_impl->write_list)) { struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); @@ -454,6 +458,8 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { /* The pipe is no longer writable. Bail out */ write_impl->is_writable = false; + io_op_result.write_error_code = errno_value; + write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); return; } @@ -463,6 +469,8 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { } else { aws_byte_cursor_advance(&request->cursor, write_val); + io_op_result.written_bytes += (size_t)write_val; + if (request->cursor.len > 0) { /* There was a partial write, loop again to try and write the rest. */ continue; @@ -472,6 +480,7 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { /* If we got this far in the loop, then the write request is complete. * Note that the callback may result in the pipe being cleaned up. */ + // TODO Call update. bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); if (write_end_cleaned_up) { /* Bail out! Any remaining requests were canceled during clean_up() */ @@ -539,6 +548,7 @@ int aws_pipe_write( /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could * happen if a this aws_pipe_write() call was made by another write's completion callback */ if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { + struct aws_io_handle_io_op_result io_op_result; s_write_end_process_requests(write_end); } diff --git a/source/posix/socket.c b/source/posix/socket.c index dbbf62657..2d8884237 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -448,6 +448,10 @@ static void s_socket_connect_event( (void)event_loop; (void)handle; + AWS_ASSERT(handle->update_io_result); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "fd=%d: update I/O results callback: %p", handle->data.fd, (void *)handle->update_io_result); + struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); @@ -465,6 +469,9 @@ static void s_socket_connect_event( socket_args->socket = NULL; socket_impl->connect_args = NULL; s_on_connection_success(socket); + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + // TODO Update? return; } @@ -476,6 +483,10 @@ static void s_socket_connect_event( "id=%p fd=%d: spurious event, waiting for another notification.", (void *)socket_args->socket, handle->data.fd); + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + // handle->update_io_result(event_loop, handle, &io_op_result); return; } @@ -485,6 +496,10 @@ static void s_socket_connect_event( aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); } + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + // TODO Update? } static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { @@ -949,11 +964,20 @@ static void s_socket_accept_event( (void)event_loop; + AWS_ASSERT(handle->update_io_result); + struct aws_socket *socket = user_data; struct posix_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); + AWS_LS_IO_SOCKET, + "id=%p fd=%d: listening event received: %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)handle->update_io_result); + + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { int in_fd = 0; @@ -966,12 +990,14 @@ static void s_socket_accept_event( int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; break; } int aws_error = aws_socket_get_error(socket); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); + io_op_result.read_error_code = aws_error; break; } @@ -1057,6 +1083,12 @@ static void s_socket_accept_event( socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); if (close_occurred) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: trying to update: %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)handle->update_io_result); return; } @@ -1064,6 +1096,8 @@ static void s_socket_accept_event( } } + handle->update_io_result(event_loop, handle, &io_op_result); + AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: finished processing incoming connections, " @@ -1613,6 +1647,8 @@ static void s_written_task(struct aws_task *task, void *arg, enum aws_task_statu static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { struct posix_socket *socket_impl = socket->impl; + AWS_ASSERT(socket->io_handle.update_io_result); + if (parent_request) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, @@ -1632,6 +1668,9 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc bool parent_request_failed = false; bool pushed_to_written_queue = false; + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ while (!aws_linked_list_empty(&socket_impl->write_queue)) { struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); @@ -1660,6 +1699,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc if (errno_value == EAGAIN) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; /* TODO Add AWS_IO_WRITE_EAGAIN ode. */ break; } @@ -1672,6 +1712,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_error = AWS_IO_SOCKET_CLOSED; aws_raise_error(aws_error); purge = true; + io_op_result.write_error_code = aws_error; break; } @@ -1684,9 +1725,12 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc errno_value); aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); + io_op_result.write_error_code = aws_error; break; } + io_op_result.written_bytes += (size_t)written; + size_t remaining_to_write = write_request->cursor_cpy.len; aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); @@ -1732,6 +1776,8 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); } + socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); + /* Only report error if aws_socket_write() invoked this function and its write_request failed */ if (!parent_request_failed) { return AWS_OP_SUCCESS; @@ -1761,6 +1807,9 @@ static void s_on_socket_io_event( * (e.g. peer sends last few bytes and immediately hangs up). * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */ + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); if (socket->readable_fn) { @@ -1796,6 +1845,7 @@ static void s_on_socket_io_event( end_check: aws_ref_count_release(&socket_impl->internal_refcount); + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "====== s_on_socket_io_event"); } int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { diff --git a/source/qnx/host_resolver.c b/source/qnx/host_resolver.c deleted file mode 100644 index e4aafb838..000000000 --- a/source/qnx/host_resolver.c +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -#include - -#include -#include -#include -#include - -int aws_default_dns_resolve( - struct aws_allocator *allocator, - const struct aws_string *host_name, - struct aws_array_list *output_addresses, - void *user_data) { - - (void)user_data; - struct addrinfo *result = NULL; - struct addrinfo *iter = NULL; - /* max string length for ipv6. */ - socklen_t max_len = INET6_ADDRSTRLEN; - char address_buffer[max_len]; - - const char *hostname_cstr = aws_string_c_str(host_name); - AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); - - /* Android would prefer NO HINTS IF YOU DON'T MIND, SIR */ -#if defined(ANDROID) - int err_code = getaddrinfo(hostname_cstr, NULL, NULL, &result); -#else - struct addrinfo hints; - AWS_ZERO_STRUCT(hints); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; -# if !defined(__OpenBSD__) - hints.ai_flags = AI_ALL | AI_V4MAPPED; -# endif /* __OpenBSD__ */ - - int err_code = getaddrinfo(hostname_cstr, NULL, &hints, &result); -#endif - - if (err_code) { - AWS_LOGF_ERROR( - AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d: %s", err_code, gai_strerror(err_code)); - goto clean_up; - } - - for (iter = result; iter != NULL; iter = iter->ai_next) { - struct aws_host_address host_address; - - AWS_ZERO_ARRAY(address_buffer); - - if (iter->ai_family == AF_INET6) { - host_address.record_type = AWS_ADDRESS_RECORD_TYPE_AAAA; - inet_ntop(iter->ai_family, &((struct sockaddr_in6 *)iter->ai_addr)->sin6_addr, address_buffer, max_len); - } else { - host_address.record_type = AWS_ADDRESS_RECORD_TYPE_A; - inet_ntop(iter->ai_family, &((struct sockaddr_in *)iter->ai_addr)->sin_addr, address_buffer, max_len); - } - - size_t address_len = strlen(address_buffer); - const struct aws_string *address = - aws_string_new_from_array(allocator, (const uint8_t *)address_buffer, address_len); - - if (!address) { - goto clean_up; - } - - const struct aws_string *host_cpy = aws_string_new_from_string(allocator, host_name); - - if (!host_cpy) { - aws_string_destroy((void *)address); - goto clean_up; - } - - AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolved record: %s", address_buffer); - - host_address.address = address; - host_address.weight = 0; - host_address.allocator = allocator; - host_address.use_count = 0; - host_address.connection_failure_count = 0; - host_address.host = host_cpy; - - if (aws_array_list_push_back(output_addresses, &host_address)) { - aws_host_address_clean_up(&host_address); - goto clean_up; - } - } - - freeaddrinfo(result); - return AWS_OP_SUCCESS; - -clean_up: - if (result) { - freeaddrinfo(result); - } - - if (err_code) { - switch (err_code) { - case EAI_FAIL: - case EAI_AGAIN: - return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); - case EAI_MEMORY: - return aws_raise_error(AWS_ERROR_OOM); - case EAI_NONAME: - case EAI_SERVICE: - return aws_raise_error(AWS_IO_DNS_INVALID_NAME); - default: - return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); - } - } - - return AWS_OP_ERR; -} diff --git a/source/qnx/kqueue_event_loop.c b/source/qnx/kqueue_event_loop.c deleted file mode 100644 index 6374eea6e..000000000 --- a/source/qnx/kqueue_event_loop.c +++ /dev/null @@ -1,1035 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -#include -#include -#include -#include -#include -#include - -#if defined(__FreeBSD__) || defined(__NetBSD__) -# define __BSD_VISIBLE 1 -# include -#endif - -#include - -#include -#include -#include - -static void s_destroy(struct aws_event_loop *event_loop); -static int s_run(struct aws_event_loop *event_loop); -static int s_stop(struct aws_event_loop *event_loop); -static int s_wait_for_stop_completion(struct aws_event_loop *event_loop); -static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); -static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); -static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); -static int s_subscribe_to_io_events( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - aws_event_loop_on_event_fn *on_event, - void *user_data); -static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); -static void s_free_io_event_resources(void *user_data); -static bool s_is_event_thread(struct aws_event_loop *event_loop); - -static void aws_event_loop_thread(void *user_data); - -int aws_open_nonblocking_posix_pipe(int pipe_fds[2]); - -enum event_thread_state { - EVENT_THREAD_STATE_READY_TO_RUN, - EVENT_THREAD_STATE_RUNNING, - EVENT_THREAD_STATE_STOPPING, -}; - -enum pipe_fd_index { - READ_FD, - WRITE_FD, -}; - -struct kqueue_loop { - /* thread_created_on is the handle to the event loop thread. */ - struct aws_thread thread_created_on; - /* thread_joined_to is used by the thread destroying the event loop. */ - aws_thread_id_t thread_joined_to; - /* running_thread_id is NULL if the event loop thread is stopped or points-to the thread_id of the thread running - * the event loop (either thread_created_on or thread_joined_to). Atomic because of concurrent writes (e.g., - * run/stop) and reads (e.g., is_event_loop_thread). - * An aws_thread_id_t variable itself cannot be atomic because it is an opaque type that is platform-dependent. */ - struct aws_atomic_var running_thread_id; - int kq_fd; /* kqueue file descriptor */ - - /* Pipe for signaling to event-thread that cross_thread_data has changed. */ - int cross_thread_signal_pipe[2]; - - /* cross_thread_data holds things that must be communicated across threads. - * When the event-thread is running, the mutex must be locked while anyone touches anything in cross_thread_data. - * If this data is modified outside the thread, the thread is signaled via activity on a pipe. */ - struct { - struct aws_mutex mutex; - bool thread_signaled; /* whether thread has been signaled about changes to cross_thread_data */ - struct aws_linked_list tasks_to_schedule; - enum event_thread_state state; - } cross_thread_data; - - /* thread_data holds things which, when the event-thread is running, may only be touched by the thread */ - struct { - struct aws_task_scheduler scheduler; - - int connected_handle_count; - - /* These variables duplicate ones in cross_thread_data. We move values out while holding the mutex and operate - * on them later */ - enum event_thread_state state; - } thread_data; - - struct aws_thread_options thread_options; -}; - -/* Data attached to aws_io_handle while the handle is subscribed to io events */ -struct handle_data { - struct aws_io_handle *owner; - struct aws_event_loop *event_loop; - aws_event_loop_on_event_fn *on_event; - void *on_event_user_data; - - int events_subscribed; /* aws_io_event_types this handle should be subscribed to */ - int events_this_loop; /* aws_io_event_types received during current loop of the event-thread */ - - enum { HANDLE_STATE_SUBSCRIBING, HANDLE_STATE_SUBSCRIBED, HANDLE_STATE_UNSUBSCRIBED } state; - - struct aws_task subscribe_task; - struct aws_task cleanup_task; -}; - -enum { - DEFAULT_TIMEOUT_SEC = 100, /* Max kevent() timeout per loop of the event-thread */ - MAX_EVENTS = 100, /* Max kevents to process per loop of the event-thread */ -}; - -struct aws_event_loop_vtable s_kqueue_vtable = { - .destroy = s_destroy, - .run = s_run, - .stop = s_stop, - .wait_for_stop_completion = s_wait_for_stop_completion, - .schedule_task_now = s_schedule_task_now, - .schedule_task_future = s_schedule_task_future, - .subscribe_to_io_events = s_subscribe_to_io_events, - .cancel_task = s_cancel_task, - .unsubscribe_from_io_events = s_unsubscribe_from_io_events, - .free_io_event_resources = s_free_io_event_resources, - .is_on_callers_thread = s_is_event_thread, -}; - -static void s_update_io_result( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_io_handle_io_op_result *io_op_result) { - AWS_ASSERT(handle->additional_data); - struct handle_data *handle_data = handle->additional_data; - AWS_ASSERT(event_loop == handle_data->event_loop); - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: got feedback on I/O operation for fd %d: read: status %d (%s), %lu bytes; write: status %d (%s), %lu " - "bytes", - (void *)event_loop, - handle->data.fd, - io_op_result->read_error_code, - aws_error_str(io_op_result->read_error_code), - io_op_result->read_bytes, - io_op_result->write_error_code, - aws_error_str(io_op_result->write_error_code), - io_op_result->written_bytes); -} - -struct aws_event_loop *aws_event_loop_new_default_with_options( - struct aws_allocator *alloc, - const struct aws_event_loop_options *options) { - AWS_ASSERT(alloc); - // FIXME Remove this assert. - AWS_ASSERT(clock); - AWS_ASSERT(options); - AWS_ASSERT(options->clock); - - bool clean_up_event_loop_mem = false; - bool clean_up_event_loop_base = false; - bool clean_up_impl_mem = false; - bool clean_up_thread = false; - bool clean_up_kqueue = false; - bool clean_up_signal_pipe = false; - bool clean_up_signal_kevent = false; - bool clean_up_mutex = false; - - struct aws_event_loop *event_loop = aws_mem_acquire(alloc, sizeof(struct aws_event_loop)); - if (!event_loop) { - return NULL; - } - - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered kqueue", (void *)event_loop); - clean_up_event_loop_mem = true; - - int err = aws_event_loop_init_base(event_loop, alloc, options->clock); - if (err) { - goto clean_up; - } - clean_up_event_loop_base = true; - - struct kqueue_loop *impl = aws_mem_calloc(alloc, 1, sizeof(struct kqueue_loop)); - if (!impl) { - goto clean_up; - } - - if (options->thread_options) { - impl->thread_options = *options->thread_options; - } else { - impl->thread_options = *aws_default_thread_options(); - } - - /* intialize thread id to NULL. It will be set when the event loop thread starts. */ - aws_atomic_init_ptr(&impl->running_thread_id, NULL); - clean_up_impl_mem = true; - - err = aws_thread_init(&impl->thread_created_on, alloc); - if (err) { - goto clean_up; - } - clean_up_thread = true; - - impl->kq_fd = kqueue(); - if (impl->kq_fd == -1) { - AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Failed to open kqueue handle.", (void *)event_loop); - aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); - goto clean_up; - } - clean_up_kqueue = true; - - err = aws_open_nonblocking_posix_pipe(impl->cross_thread_signal_pipe); - if (err) { - AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to open pipe handle.", (void *)event_loop); - goto clean_up; - } - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: pipe descriptors read %d, write %d.", - (void *)event_loop, - impl->cross_thread_signal_pipe[READ_FD], - impl->cross_thread_signal_pipe[WRITE_FD]); - clean_up_signal_pipe = true; - - /* Set up kevent to handle activity on the cross_thread_signal_pipe */ - struct kevent thread_signal_kevent; - EV_SET( - &thread_signal_kevent, - impl->cross_thread_signal_pipe[READ_FD], - EVFILT_READ /*filter*/, - EV_ADD | EV_CLEAR /*flags*/, - 0 /*fflags*/, - 0 /*data*/, - NULL /*udata*/); - - int res = kevent( - impl->kq_fd, - &thread_signal_kevent /*changelist*/, - 1 /*nchanges*/, - NULL /*eventlist*/, - 0 /*nevents*/, - NULL /*timeout*/); - - if (res == -1) { - AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: failed to create cross-thread signal kevent.", (void *)event_loop); - aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); - goto clean_up; - } - clean_up_signal_kevent = true; - - err = aws_mutex_init(&impl->cross_thread_data.mutex); - if (err) { - goto clean_up; - } - clean_up_mutex = true; - - impl->cross_thread_data.thread_signaled = false; - - aws_linked_list_init(&impl->cross_thread_data.tasks_to_schedule); - - impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; - - err = aws_task_scheduler_init(&impl->thread_data.scheduler, alloc); - if (err) { - goto clean_up; - } - - impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; - - event_loop->impl_data = impl; - - event_loop->vtable = &s_kqueue_vtable; - - /* success */ - return event_loop; - -clean_up: - if (clean_up_mutex) { - aws_mutex_clean_up(&impl->cross_thread_data.mutex); - } - if (clean_up_signal_kevent) { - thread_signal_kevent.flags = EV_DELETE; - kevent( - impl->kq_fd, - &thread_signal_kevent /*changelist*/, - 1 /*nchanges*/, - NULL /*eventlist*/, - 0 /*nevents*/, - NULL /*timeout*/); - } - if (clean_up_signal_pipe) { - close(impl->cross_thread_signal_pipe[READ_FD]); - close(impl->cross_thread_signal_pipe[WRITE_FD]); - } - if (clean_up_kqueue) { - close(impl->kq_fd); - } - if (clean_up_thread) { - aws_thread_clean_up(&impl->thread_created_on); - } - if (clean_up_impl_mem) { - aws_mem_release(alloc, impl); - } - if (clean_up_event_loop_base) { - aws_event_loop_clean_up_base(event_loop); - } - if (clean_up_event_loop_mem) { - aws_mem_release(alloc, event_loop); - } - return NULL; -} - -static void s_destroy(struct aws_event_loop *event_loop) { - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: destroying event_loop", (void *)event_loop); - struct kqueue_loop *impl = event_loop->impl_data; - - /* Stop the event-thread. This might have already happened. It's safe to call multiple times. */ - s_stop(event_loop); - int err = s_wait_for_stop_completion(event_loop); - if (err) { - AWS_LOGF_WARN( - AWS_LS_IO_EVENT_LOOP, - "id=%p: failed to destroy event-thread, resources have been leaked", - (void *)event_loop); - AWS_ASSERT("Failed to destroy event-thread, resources have been leaked." == NULL); - return; - } - /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ - impl->thread_joined_to = aws_thread_current_thread_id(); - aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_joined_to); - - /* Clean up task-related stuff first. It's possible the a cancelled task adds further tasks to this event_loop. - * Tasks added in this way will be in cross_thread_data.tasks_to_schedule, so we clean that up last */ - - aws_task_scheduler_clean_up(&impl->thread_data.scheduler); /* Tasks in scheduler get cancelled*/ - - while (!aws_linked_list_empty(&impl->cross_thread_data.tasks_to_schedule)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&impl->cross_thread_data.tasks_to_schedule); - struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); - task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); - } - - /* Warn user if aws_io_handle was subscribed, but never unsubscribed. This would cause memory leaks. */ - AWS_ASSERT(impl->thread_data.connected_handle_count == 0); - - /* Clean up everything else */ - aws_mutex_clean_up(&impl->cross_thread_data.mutex); - - struct kevent thread_signal_kevent; - EV_SET( - &thread_signal_kevent, - impl->cross_thread_signal_pipe[READ_FD], - EVFILT_READ /*filter*/, - EV_DELETE /*flags*/, - 0 /*fflags*/, - 0 /*data*/, - NULL /*udata*/); - - kevent( - impl->kq_fd, - &thread_signal_kevent /*changelist*/, - 1 /*nchanges*/, - NULL /*eventlist*/, - 0 /*nevents*/, - NULL /*timeout*/); - - close(impl->cross_thread_signal_pipe[READ_FD]); - close(impl->cross_thread_signal_pipe[WRITE_FD]); - close(impl->kq_fd); - aws_thread_clean_up(&impl->thread_created_on); - aws_mem_release(event_loop->alloc, impl); - aws_event_loop_clean_up_base(event_loop); - aws_mem_release(event_loop->alloc, event_loop); -} - -static int s_run(struct aws_event_loop *event_loop) { - struct kqueue_loop *impl = event_loop->impl_data; - - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: starting event-loop thread.", (void *)event_loop); - /* to re-run, call stop() and wait_for_stop_completion() */ - AWS_ASSERT(impl->cross_thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); - AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); - - /* Since thread isn't running it's ok to touch thread_data, - * and it's ok to touch cross_thread_data without locking the mutex */ - impl->cross_thread_data.state = EVENT_THREAD_STATE_RUNNING; - - aws_thread_increment_unjoined_count(); - int err = - aws_thread_launch(&impl->thread_created_on, aws_event_loop_thread, (void *)event_loop, &impl->thread_options); - - if (err) { - aws_thread_decrement_unjoined_count(); - AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: thread creation failed.", (void *)event_loop); - goto clean_up; - } - - return AWS_OP_SUCCESS; - -clean_up: - impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; - return AWS_OP_ERR; -} - -/* This function can't fail, we're relying on the thread responding to critical messages (ex: stop thread) */ -void signal_cross_thread_data_changed(struct aws_event_loop *event_loop) { - struct kqueue_loop *impl = event_loop->impl_data; - - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: signaling event-loop that cross-thread tasks need to be scheduled.", - (void *)event_loop); - /* Doesn't actually matter what we write, any activity on pipe signals that cross_thread_data has changed, - * If the pipe is full and the write fails, that's fine, the event-thread will get the signal from some previous - * write */ - uint32_t write_whatever = 0xC0FFEE; - write(impl->cross_thread_signal_pipe[WRITE_FD], &write_whatever, sizeof(write_whatever)); -} - -static int s_stop(struct aws_event_loop *event_loop) { - struct kqueue_loop *impl = event_loop->impl_data; - - bool signal_thread = false; - - { /* Begin critical section */ - aws_mutex_lock(&impl->cross_thread_data.mutex); - if (impl->cross_thread_data.state == EVENT_THREAD_STATE_RUNNING) { - impl->cross_thread_data.state = EVENT_THREAD_STATE_STOPPING; - signal_thread = !impl->cross_thread_data.thread_signaled; - impl->cross_thread_data.thread_signaled = true; - } - aws_mutex_unlock(&impl->cross_thread_data.mutex); - } /* End critical section */ - - if (signal_thread) { - signal_cross_thread_data_changed(event_loop); - } - - return AWS_OP_SUCCESS; -} - -static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { - struct kqueue_loop *impl = event_loop->impl_data; - -#ifdef DEBUG_BUILD - aws_mutex_lock(&impl->cross_thread_data.mutex); - /* call stop() before wait_for_stop_completion() or you'll wait forever */ - AWS_ASSERT(impl->cross_thread_data.state != EVENT_THREAD_STATE_RUNNING); - aws_mutex_unlock(&impl->cross_thread_data.mutex); -#endif - - int err = aws_thread_join(&impl->thread_created_on); - aws_thread_decrement_unjoined_count(); - if (err) { - return AWS_OP_ERR; - } - - /* Since thread is no longer running it's ok to touch thread_data, - * and it's ok to touch cross_thread_data without locking the mutex */ - impl->cross_thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; - impl->thread_data.state = EVENT_THREAD_STATE_READY_TO_RUN; - - return AWS_OP_SUCCESS; -} - -/* Common functionality for "now" and "future" task scheduling. - * If `run_at_nanos` is zero then the task is scheduled as a "now" task. */ -static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { - AWS_ASSERT(task); - struct kqueue_loop *impl = event_loop->impl_data; - - /* If we're on the event-thread, just schedule it directly */ - if (s_is_event_thread(event_loop)) { - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: scheduling task %p in-thread for timestamp %llu", - (void *)event_loop, - (void *)task, - (unsigned long long)run_at_nanos); - if (run_at_nanos == 0) { - aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); - } else { - aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, run_at_nanos); - } - return; - } - - /* Otherwise, add it to cross_thread_data.tasks_to_schedule and signal the event-thread to process it */ - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: scheduling task %p cross-thread for timestamp %llu", - (void *)event_loop, - (void *)task, - (unsigned long long)run_at_nanos); - task->timestamp = run_at_nanos; - bool should_signal_thread = false; - - /* Begin critical section */ - aws_mutex_lock(&impl->cross_thread_data.mutex); - aws_linked_list_push_back(&impl->cross_thread_data.tasks_to_schedule, &task->node); - - /* Signal thread that cross_thread_data has changed (unless it's been signaled already) */ - if (!impl->cross_thread_data.thread_signaled) { - should_signal_thread = true; - impl->cross_thread_data.thread_signaled = true; - } - - aws_mutex_unlock(&impl->cross_thread_data.mutex); - /* End critical section */ - - if (should_signal_thread) { - signal_cross_thread_data_changed(event_loop); - } -} - -static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { - s_schedule_task_common(event_loop, task, 0); /* Zero is used to denote "now" tasks */ -} - -static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { - s_schedule_task_common(event_loop, task, run_at_nanos); -} - -static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { - struct kqueue_loop *kqueue_loop = event_loop->impl_data; - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: cancelling task %p", (void *)event_loop, (void *)task); - aws_task_scheduler_cancel_task(&kqueue_loop->thread_data.scheduler, task); -} - -/* Scheduled task that connects aws_io_handle with the kqueue */ -static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { - (void)task; - struct handle_data *handle_data = user_data; - struct aws_event_loop *event_loop = handle_data->event_loop; - struct kqueue_loop *impl = handle_data->event_loop->impl_data; - - impl->thread_data.connected_handle_count++; - - /* if task was cancelled, nothing to do */ - if (status == AWS_TASK_STATUS_CANCELED) { - return; - } - - /* If handle was unsubscribed before this task could execute, nothing to do */ - if (handle_data->state == HANDLE_STATE_UNSUBSCRIBED) { - return; - } - - AWS_ASSERT(handle_data->state == HANDLE_STATE_SUBSCRIBING); - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: subscribing to events on fd %d", (void *)event_loop, handle_data->owner->data.fd); - - /* In order to monitor both reads and writes, kqueue requires you to add two separate kevents. - * If we're adding two separate kevents, but one of those fails, we need to remove the other kevent. - * Therefore we use the EV_RECEIPT flag. This causes kevent() to tell whether each EV_ADD succeeded, - * rather than the usual behavior of telling us about recent events. */ - struct kevent changelist[2]; - AWS_ZERO_ARRAY(changelist); - - int changelist_size = 0; - - if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { - EV_SET( - &changelist[changelist_size++], - handle_data->owner->data.fd, - EVFILT_READ /*filter*/, - EV_ADD | EV_RECEIPT | EV_CLEAR /*flags*/, - 0 /*fflags*/, - 0 /*data*/, - handle_data /*udata*/); - } - if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { - EV_SET( - &changelist[changelist_size++], - handle_data->owner->data.fd, - EVFILT_WRITE /*filter*/, - EV_ADD | EV_RECEIPT | EV_CLEAR /*flags*/, - 0 /*fflags*/, - 0 /*data*/, - handle_data /*udata*/); - } - - int num_events = kevent( - impl->kq_fd, - changelist /*changelist*/, - changelist_size /*nchanges*/, - changelist /*eventlist. It's OK to re-use the same memory for changelist input and eventlist output*/, - changelist_size /*nevents*/, - NULL /*timeout*/); - if (num_events == -1) { - goto subscribe_failed; - } - - /* Look through results to see if any failed */ - for (int i = 0; i < num_events; ++i) { - /* Every result should be flagged as error, that's just how EV_RECEIPT works */ - AWS_ASSERT(changelist[i].flags & EV_ERROR); - - /* If a real error occurred, .data contains the error code */ - if (changelist[i].data != 0) { - goto subscribe_failed; - } - } - - /* Success */ - handle_data->state = HANDLE_STATE_SUBSCRIBED; - handle_data->owner->update_io_result = s_update_io_result; - return; - -subscribe_failed: - AWS_LOGF_ERROR( - AWS_LS_IO_EVENT_LOOP, - "id=%p: failed to subscribe to events on fd %d", - (void *)event_loop, - handle_data->owner->data.fd); - /* Remove any related kevents that succeeded */ - for (int i = 0; i < num_events; ++i) { - if (changelist[i].data == 0) { - changelist[i].flags = EV_DELETE; - kevent( - impl->kq_fd, - &changelist[i] /*changelist*/, - 1 /*nchanges*/, - NULL /*eventlist*/, - 0 /*nevents*/, - NULL /*timeout*/); - } - } - - /* We can't return an error code because this was a scheduled task. - * Notify the user of the failed subscription by passing AWS_IO_EVENT_TYPE_ERROR to the callback. */ - handle_data->on_event(event_loop, handle_data->owner, AWS_IO_EVENT_TYPE_ERROR, handle_data->on_event_user_data); -} - -static int s_subscribe_to_io_events( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - aws_event_loop_on_event_fn *on_event, - void *user_data) { - - AWS_ASSERT(event_loop); - AWS_ASSERT(handle->data.fd != -1); - AWS_ASSERT(handle->additional_data == NULL); - AWS_ASSERT(on_event); - /* Must subscribe for read, write, or both */ - AWS_ASSERT(events & (AWS_IO_EVENT_TYPE_READABLE | AWS_IO_EVENT_TYPE_WRITABLE)); - - struct handle_data *handle_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct handle_data)); - if (!handle_data) { - return AWS_OP_ERR; - } - - handle_data->owner = handle; - handle_data->event_loop = event_loop; - handle_data->on_event = on_event; - handle_data->on_event_user_data = user_data; - handle_data->events_subscribed = events; - handle_data->state = HANDLE_STATE_SUBSCRIBING; - - handle->additional_data = handle_data; - - /* We schedule a task to perform the actual changes to the kqueue, read on for an explanation why... - * - * kqueue requires separate registrations for read and write events. - * If the user wants to know about both read and write, we need register once for read and once for write. - * If the first registration succeeds, but the second registration fails, we need to delete the first registration. - * If this all happened outside the event-thread, the successful registration's events could begin processing - * in the brief window of time before the registration is deleted. */ - - aws_task_init(&handle_data->subscribe_task, s_subscribe_task, handle_data, "kqueue_event_loop_subscribe"); - s_schedule_task_now(event_loop, &handle_data->subscribe_task); - - return AWS_OP_SUCCESS; -} - -static void s_free_io_event_resources(void *user_data) { - struct handle_data *handle_data = user_data; - struct kqueue_loop *impl = handle_data->event_loop->impl_data; - - impl->thread_data.connected_handle_count--; - - aws_mem_release(handle_data->event_loop->alloc, handle_data); -} - -static void s_clean_up_handle_data_task(struct aws_task *task, void *user_data, enum aws_task_status status) { - (void)task; - (void)status; - - struct handle_data *handle_data = user_data; - s_free_io_event_resources(handle_data); -} - -static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); - AWS_ASSERT(handle->additional_data); - struct handle_data *handle_data = handle->additional_data; - struct kqueue_loop *impl = event_loop->impl_data; - - AWS_ASSERT(event_loop == handle_data->event_loop); - - /* If the handle was successfully subscribed to kqueue, then remove it. */ - if (handle_data->state == HANDLE_STATE_SUBSCRIBED) { - struct kevent changelist[2]; - int changelist_size = 0; - - if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { - EV_SET( - &changelist[changelist_size++], - handle_data->owner->data.fd, - EVFILT_READ /*filter*/, - EV_DELETE /*flags*/, - 0 /*fflags*/, - 0 /*data*/, - handle_data /*udata*/); - } - if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { - EV_SET( - &changelist[changelist_size++], - handle_data->owner->data.fd, - EVFILT_WRITE /*filter*/, - EV_DELETE /*flags*/, - 0 /*fflags*/, - 0 /*data*/, - handle_data /*udata*/); - } - - kevent(impl->kq_fd, changelist, changelist_size, NULL /*eventlist*/, 0 /*nevents*/, NULL /*timeout*/); - } - - /* Schedule a task to clean up the memory. This is done in a task to prevent the following scenario: - * - While processing a batch of events, some callback unsubscribes another aws_io_handle. - * - One of the other events in this batch belongs to that other aws_io_handle. - * - If the handle_data were already deleted, there would be an access invalid memory. */ - - aws_task_init( - &handle_data->cleanup_task, s_clean_up_handle_data_task, handle_data, "kqueue_event_loop_clean_up_handle_data"); - aws_event_loop_schedule_task_now(event_loop, &handle_data->cleanup_task); - - handle_data->state = HANDLE_STATE_UNSUBSCRIBED; - handle->additional_data = NULL; - - return AWS_OP_SUCCESS; -} - -static bool s_is_event_thread(struct aws_event_loop *event_loop) { - struct kqueue_loop *impl = event_loop->impl_data; - - aws_thread_id_t *thread_id = aws_atomic_load_ptr(&impl->running_thread_id); - return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); -} - -/* Called from thread. - * Takes tasks from tasks_to_schedule and adds them to the scheduler. */ -static void s_process_tasks_to_schedule(struct aws_event_loop *event_loop, struct aws_linked_list *tasks_to_schedule) { - struct kqueue_loop *impl = event_loop->impl_data; - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: processing cross-thread tasks", (void *)event_loop); - - while (!aws_linked_list_empty(tasks_to_schedule)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(tasks_to_schedule); - struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); - - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: task %p pulled to event-loop, scheduling now.", - (void *)event_loop, - (void *)task); - /* Timestamp 0 is used to denote "now" tasks */ - if (task->timestamp == 0) { - aws_task_scheduler_schedule_now(&impl->thread_data.scheduler, task); - } else { - aws_task_scheduler_schedule_future(&impl->thread_data.scheduler, task, task->timestamp); - } - } -} - -static void s_process_cross_thread_data(struct aws_event_loop *event_loop) { - struct kqueue_loop *impl = event_loop->impl_data; - - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: notified of cross-thread data to process", (void *)event_loop); - /* If there are tasks to schedule, grab them all out of synced_data.tasks_to_schedule. - * We'll process them later, so that we minimize time spent holding the mutex. */ - struct aws_linked_list tasks_to_schedule; - aws_linked_list_init(&tasks_to_schedule); - - { /* Begin critical section */ - aws_mutex_lock(&impl->cross_thread_data.mutex); - impl->cross_thread_data.thread_signaled = false; - - bool initiate_stop = (impl->cross_thread_data.state == EVENT_THREAD_STATE_STOPPING) && - (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING); - if (AWS_UNLIKELY(initiate_stop)) { - impl->thread_data.state = EVENT_THREAD_STATE_STOPPING; - } - - aws_linked_list_swap_contents(&impl->cross_thread_data.tasks_to_schedule, &tasks_to_schedule); - - aws_mutex_unlock(&impl->cross_thread_data.mutex); - } /* End critical section */ - - s_process_tasks_to_schedule(event_loop, &tasks_to_schedule); -} - -static int s_aws_event_flags_from_kevent(struct kevent *kevent) { - int event_flags = 0; - - if (kevent->flags & EV_ERROR) { - event_flags |= AWS_IO_EVENT_TYPE_ERROR; - } else if (kevent->filter == EVFILT_READ) { - if (kevent->data != 0) { - event_flags |= AWS_IO_EVENT_TYPE_READABLE; - } - - if (kevent->flags & EV_EOF) { - event_flags |= AWS_IO_EVENT_TYPE_CLOSED; - } - } else if (kevent->filter == EVFILT_WRITE) { - if (kevent->data != 0) { - event_flags |= AWS_IO_EVENT_TYPE_WRITABLE; - } - - if (kevent->flags & EV_EOF) { - event_flags |= AWS_IO_EVENT_TYPE_CLOSED; - } - } - - return event_flags; -} - -/** - * This just calls kevent() - * - * We broke this out into its own function so that the stacktrace clearly shows - * what this thread is doing. We've had a lot of cases where users think this - * thread is deadlocked because it's stuck here. We want it to be clear - * that it's doing nothing on purpose. It's waiting for events to happen... - */ -AWS_NO_INLINE -static int aws_event_loop_listen_for_io_events(int kq_fd, struct kevent kevents[MAX_EVENTS], struct timespec *timeout) { - return kevent(kq_fd, NULL /*changelist*/, 0 /*nchanges*/, kevents /*eventlist*/, MAX_EVENTS /*nevents*/, timeout); -} - -static void s_aws_kqueue_cleanup_aws_lc_thread_local_state(void *user_data) { - (void)user_data; - - aws_cal_thread_clean_up(); -} - -static void aws_event_loop_thread(void *user_data) { - struct aws_event_loop *event_loop = user_data; - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); - struct kqueue_loop *impl = event_loop->impl_data; - - /* set thread id to the event-loop's thread. */ - aws_atomic_store_ptr(&impl->running_thread_id, &impl->thread_created_on.thread_id); - - AWS_ASSERT(impl->thread_data.state == EVENT_THREAD_STATE_READY_TO_RUN); - impl->thread_data.state = EVENT_THREAD_STATE_RUNNING; - - struct kevent kevents[MAX_EVENTS]; - - /* A single aws_io_handle could have two separate kevents if subscribed for both read and write. - * If both the read and write kevents fire in the same loop of the event-thread, - * combine the event-flags and deliver them in a single callback. - * This makes the kqueue_event_loop behave more like the other platform implementations. */ - struct handle_data *io_handle_events[MAX_EVENTS]; - - struct timespec timeout = { - .tv_sec = DEFAULT_TIMEOUT_SEC, - .tv_nsec = 0, - }; - - AWS_LOGF_INFO( - AWS_LS_IO_EVENT_LOOP, - "id=%p: default timeout %ds, and max events to process per tick %d", - (void *)event_loop, - DEFAULT_TIMEOUT_SEC, - MAX_EVENTS); - - aws_thread_current_at_exit(s_aws_kqueue_cleanup_aws_lc_thread_local_state, NULL); - - while (impl->thread_data.state == EVENT_THREAD_STATE_RUNNING) { - int num_io_handle_events = 0; - bool should_process_cross_thread_data = false; - - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: waiting for a maximum of %ds %lluns", - (void *)event_loop, - (int)timeout.tv_sec, - (unsigned long long)timeout.tv_nsec); - - /* Process kqueue events */ - int num_kevents = aws_event_loop_listen_for_io_events(impl->kq_fd, kevents, &timeout); - - aws_event_loop_register_tick_start(event_loop); - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: wake up with %d events to process.", (void *)event_loop, num_kevents); - if (num_kevents == -1) { - /* Raise an error, in case this is interesting to anyone monitoring, - * and continue on with this loop. We can't process events, - * but we can still process scheduled tasks */ - aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); - - /* Force the cross_thread_data to be processed. - * There might be valuable info in there, like the message to stop the thread. - * It's fine to do this even if nothing has changed, it just costs a mutex lock/unlock. */ - should_process_cross_thread_data = true; - } - - for (int i = 0; i < num_kevents; ++i) { - struct kevent *kevent = &kevents[i]; - - /* Was this event to signal that cross_thread_data has changed? */ - if ((int)kevent->ident == impl->cross_thread_signal_pipe[READ_FD]) { - should_process_cross_thread_data = true; - - /* Drain whatever data was written to the signaling pipe */ - uint32_t read_whatever; - while (read((int)kevent->ident, &read_whatever, sizeof(read_whatever)) > 0) { - } - - continue; - } - - /* Otherwise this was a normal event on a subscribed handle. Figure out which flags to report. */ - int event_flags = s_aws_event_flags_from_kevent(kevent); - if (event_flags == 0) { - continue; - } - - /* Combine flags, in case multiple kevents correspond to one handle. (see notes at top of function) */ - struct handle_data *handle_data = kevent->udata; - if (handle_data->events_this_loop == 0) { - io_handle_events[num_io_handle_events++] = handle_data; - } - handle_data->events_this_loop |= event_flags; - } - - /* Invoke each handle's event callback (unless the handle has been unsubscribed) */ - for (int i = 0; i < num_io_handle_events; ++i) { - struct handle_data *handle_data = io_handle_events[i]; - - if (handle_data->state == HANDLE_STATE_SUBSCRIBED) { - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: activity on fd %d, invoking handler.", - (void *)event_loop, - handle_data->owner->data.fd); - handle_data->on_event( - event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); - - // AWS_LOGF_INFO( - // AWS_LS_IO_EVENT_LOOP, - // "id=%p: on_event completion status: read: status %d (%s), %lu bytes; write: status - // %d (%s), %lu " "bytes", (void *)event_loop, io_op_result.read_error_code, - // aws_error_str(io_op_result.read_error_code), - // io_op_result.read_bytes, - // io_op_result.write_error_code, - // aws_error_str(io_op_result.write_error_code), - // io_op_result.written_bytes); - } - - handle_data->events_this_loop = 0; - } - - /* Process cross_thread_data */ - if (should_process_cross_thread_data) { - s_process_cross_thread_data(event_loop); - } - - /* Run scheduled tasks */ - uint64_t now_ns = 0; - event_loop->clock(&now_ns); /* If clock fails, now_ns will be 0 and tasks scheduled for a specific time - will not be run. That's ok, we'll handle them next time around. */ - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: running scheduled tasks.", (void *)event_loop); - aws_task_scheduler_run_all(&impl->thread_data.scheduler, now_ns); - - /* Set timeout for next kevent() call. - * If clock fails, or scheduler has no tasks, use default timeout */ - bool use_default_timeout = false; - - int err = event_loop->clock(&now_ns); - if (err) { - use_default_timeout = true; - } - - uint64_t next_run_time_ns; - if (!aws_task_scheduler_has_tasks(&impl->thread_data.scheduler, &next_run_time_ns)) { - - use_default_timeout = true; - } - - if (use_default_timeout) { - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: no more scheduled tasks using default timeout.", (void *)event_loop); - timeout.tv_sec = DEFAULT_TIMEOUT_SEC; - timeout.tv_nsec = 0; - } else { - /* Convert from timestamp in nanoseconds, to timeout in seconds with nanosecond remainder */ - uint64_t timeout_ns = next_run_time_ns > now_ns ? next_run_time_ns - now_ns : 0; - - uint64_t timeout_remainder_ns = 0; - uint64_t timeout_sec = - aws_timestamp_convert(timeout_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &timeout_remainder_ns); - - if (timeout_sec > LONG_MAX) { /* Check for overflow. On Darwin, these values are stored as longs */ - timeout_sec = LONG_MAX; - timeout_remainder_ns = 0; - } - - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: detected more scheduled tasks with the next occurring at " - "%llu using timeout of %ds %lluns.", - (void *)event_loop, - (unsigned long long)timeout_ns, - (int)timeout_sec, - (unsigned long long)timeout_remainder_ns); - timeout.tv_sec = (time_t)(timeout_sec); - timeout.tv_nsec = (long)(timeout_remainder_ns); - } - - aws_event_loop_register_tick_end(event_loop); - } - - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: exiting main loop", (void *)event_loop); - /* reset to NULL. This should be updated again during destroy before tasks are canceled. */ - aws_atomic_store_ptr(&impl->running_thread_id, NULL); -} diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c deleted file mode 100644 index a56790b35..000000000 --- a/source/qnx/pipe.c +++ /dev/null @@ -1,595 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -#ifdef __GLIBC__ -# define __USE_GNU -#endif - -/* TODO: move this detection to CMAKE and a config header */ -#if !defined(COMPAT_MODE) && defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || __GLIBC__ > 2) -# define HAVE_PIPE2 1 -#else -# define HAVE_PIPE2 0 -#endif - -#include -#include -#include - -/* This isn't defined on ancient linux distros (breaking the builds). - * However, if this is a prebuild, we purposely build on an ancient system, but - * we want the kernel calls to still be the same as a modern build since that's likely the target of the application - * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag - * gets passed as long as it does. - */ -#ifndef O_CLOEXEC -# define O_CLOEXEC 02000000 -#endif - -struct read_end_impl { - struct aws_allocator *alloc; - struct aws_io_handle handle; - struct aws_event_loop *event_loop; - aws_pipe_on_readable_fn *on_readable_user_callback; - void *on_readable_user_data; - - /* Used in handshake for detecting whether user callback resulted in read-end being cleaned up. - * If clean_up() sees that the pointer is set, the bool it points to will get set true. */ - bool *did_user_callback_clean_up_read_end; - - bool is_subscribed; -}; - -struct pipe_write_request { - struct aws_byte_cursor original_cursor; - struct aws_byte_cursor cursor; /* tracks progress of write */ - size_t num_bytes_written; - aws_pipe_on_write_completed_fn *user_callback; - void *user_data; - struct aws_linked_list_node list_node; - - /* True if the write-end is cleaned up while the user callback is being invoked */ - bool did_user_callback_clean_up_write_end; -}; - -struct write_end_impl { - struct aws_allocator *alloc; - struct aws_io_handle handle; - struct aws_event_loop *event_loop; - struct aws_linked_list write_list; - - /* Valid while invoking user callback on a completed write request. */ - struct pipe_write_request *currently_invoking_write_callback; - - bool is_writable; - - /* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around - * and re-using it whenever possible */ -}; - -static void s_write_end_on_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data); - -static int s_translate_posix_error(int err) { - AWS_ASSERT(err); - - switch (err) { - case EPIPE: - return AWS_IO_BROKEN_PIPE; - default: - return AWS_ERROR_SYS_CALL_FAILURE; - } -} - -static int s_raise_posix_error(int err) { - return aws_raise_error(s_translate_posix_error(err)); -} - -AWS_IO_API int aws_open_nonblocking_posix_pipe(int pipe_fds[2]) { - int err; - -#if HAVE_PIPE2 - err = pipe2(pipe_fds, O_NONBLOCK | O_CLOEXEC); - if (err) { - return s_raise_posix_error(err); - } - - return AWS_OP_SUCCESS; -#else - err = pipe(pipe_fds); - if (err) { - return s_raise_posix_error(err); - } - - for (int i = 0; i < 2; ++i) { - int flags = fcntl(pipe_fds[i], F_GETFL); - if (flags == -1) { - s_raise_posix_error(err); - goto error; - } - - flags |= O_NONBLOCK | O_CLOEXEC; - if (fcntl(pipe_fds[i], F_SETFL, flags) == -1) { - s_raise_posix_error(err); - goto error; - } - } - - return AWS_OP_SUCCESS; -error: - close(pipe_fds[0]); - close(pipe_fds[1]); - return AWS_OP_ERR; -#endif -} - -int aws_pipe_init( - struct aws_pipe_read_end *read_end, - struct aws_event_loop *read_end_event_loop, - struct aws_pipe_write_end *write_end, - struct aws_event_loop *write_end_event_loop, - struct aws_allocator *allocator) { - - AWS_ASSERT(read_end); - AWS_ASSERT(read_end_event_loop); - AWS_ASSERT(write_end); - AWS_ASSERT(write_end_event_loop); - AWS_ASSERT(allocator); - - AWS_ZERO_STRUCT(*read_end); - AWS_ZERO_STRUCT(*write_end); - - struct read_end_impl *read_impl = NULL; - struct write_end_impl *write_impl = NULL; - int err; - - /* Open pipe */ - int pipe_fds[2]; - err = aws_open_nonblocking_posix_pipe(pipe_fds); - if (err) { - return AWS_OP_ERR; - } - - /* Init read-end */ - read_impl = aws_mem_calloc(allocator, 1, sizeof(struct read_end_impl)); - if (!read_impl) { - goto error; - } - - read_impl->alloc = allocator; - read_impl->handle.data.fd = pipe_fds[0]; - read_impl->event_loop = read_end_event_loop; - - /* Init write-end */ - write_impl = aws_mem_calloc(allocator, 1, sizeof(struct write_end_impl)); - if (!write_impl) { - goto error; - } - - write_impl->alloc = allocator; - write_impl->handle.data.fd = pipe_fds[1]; - write_impl->event_loop = write_end_event_loop; - write_impl->is_writable = true; /* Assume pipe is writable to start. Even if it's not, things shouldn't break */ - aws_linked_list_init(&write_impl->write_list); - - read_end->impl_data = read_impl; - write_end->impl_data = write_impl; - - err = aws_event_loop_subscribe_to_io_events( - write_end_event_loop, &write_impl->handle, AWS_IO_EVENT_TYPE_WRITABLE, s_write_end_on_event, write_end); - if (err) { - goto error; - } - - return AWS_OP_SUCCESS; - -error: - close(pipe_fds[0]); - close(pipe_fds[1]); - - if (read_impl) { - aws_mem_release(allocator, read_impl); - } - - if (write_impl) { - aws_mem_release(allocator, write_impl); - } - - read_end->impl_data = NULL; - write_end->impl_data = NULL; - - return AWS_OP_ERR; -} - -int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end) { - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (read_impl->is_subscribed) { - int err = aws_pipe_unsubscribe_from_readable_events(read_end); - if (err) { - return AWS_OP_ERR; - } - } - - /* If the event-handler is invoking a user callback, let it know that the read-end was cleaned up */ - if (read_impl->did_user_callback_clean_up_read_end) { - *read_impl->did_user_callback_clean_up_read_end = true; - } - - close(read_impl->handle.data.fd); - - aws_mem_release(read_impl->alloc, read_impl); - AWS_ZERO_STRUCT(*read_end); - return AWS_OP_SUCCESS; -} - -struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end) { - const struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - aws_raise_error(AWS_IO_BROKEN_PIPE); - return NULL; - } - - return read_impl->event_loop; -} - -struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end) { - const struct write_end_impl *write_impl = write_end->impl_data; - if (!write_impl) { - aws_raise_error(AWS_IO_BROKEN_PIPE); - return NULL; - } - - return write_impl->event_loop; -} - -int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *num_bytes_read) { - AWS_ASSERT(dst_buffer && dst_buffer->buffer); - - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (num_bytes_read) { - *num_bytes_read = 0; - } - - size_t num_bytes_to_read = dst_buffer->capacity - dst_buffer->len; - - ssize_t read_val = read(read_impl->handle.data.fd, dst_buffer->buffer + dst_buffer->len, num_bytes_to_read); - - if (read_val < 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); - } - return s_raise_posix_error(errno_value); - } - - /* Success */ - dst_buffer->len += read_val; - - if (num_bytes_read) { - *num_bytes_read = read_val; - } - - return AWS_OP_SUCCESS; -} - -static void s_read_end_on_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - (void)handle; - - /* Note that it should be impossible for this to run after read-end has been unsubscribed or cleaned up */ - struct aws_pipe_read_end *read_end = user_data; - struct read_end_impl *read_impl = read_end->impl_data; - AWS_ASSERT(read_impl); - AWS_ASSERT(read_impl->event_loop == event_loop); - AWS_ASSERT(&read_impl->handle == handle); - AWS_ASSERT(read_impl->is_subscribed); - AWS_ASSERT(events != 0); - AWS_ASSERT(read_impl->did_user_callback_clean_up_read_end == NULL); - - /* Set up handshake, so we can be informed if the read-end is cleaned up while invoking a user callback */ - bool did_user_callback_clean_up_read_end = false; - read_impl->did_user_callback_clean_up_read_end = &did_user_callback_clean_up_read_end; - - /* If readable event received, tell user to try and read, even if "error" events have also occurred. */ - if (events & AWS_IO_EVENT_TYPE_READABLE) { - read_impl->on_readable_user_callback(read_end, AWS_ERROR_SUCCESS, read_impl->on_readable_user_data); - - if (did_user_callback_clean_up_read_end) { - return; - } - - events &= ~AWS_IO_EVENT_TYPE_READABLE; - } - - if (events) { - /* Check that user didn't unsubscribe in the previous callback */ - if (read_impl->is_subscribed) { - read_impl->on_readable_user_callback(read_end, AWS_IO_BROKEN_PIPE, read_impl->on_readable_user_data); - - if (did_user_callback_clean_up_read_end) { - return; - } - } - } - - read_impl->did_user_callback_clean_up_read_end = NULL; -} - -int aws_pipe_subscribe_to_readable_events( - struct aws_pipe_read_end *read_end, - aws_pipe_on_readable_fn *on_readable, - void *user_data) { - - AWS_ASSERT(on_readable); - - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (read_impl->is_subscribed) { - return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); - } - - read_impl->is_subscribed = true; - read_impl->on_readable_user_callback = on_readable; - read_impl->on_readable_user_data = user_data; - - int err = aws_event_loop_subscribe_to_io_events( - read_impl->event_loop, &read_impl->handle, AWS_IO_EVENT_TYPE_READABLE, s_read_end_on_event, read_end); - if (err) { - read_impl->is_subscribed = false; - read_impl->on_readable_user_callback = NULL; - read_impl->on_readable_user_data = NULL; - - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; -} - -int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end) { - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (!read_impl->is_subscribed) { - return aws_raise_error(AWS_ERROR_IO_NOT_SUBSCRIBED); - } - - int err = aws_event_loop_unsubscribe_from_io_events(read_impl->event_loop, &read_impl->handle); - if (err) { - return AWS_OP_ERR; - } - - read_impl->is_subscribed = false; - read_impl->on_readable_user_callback = NULL; - read_impl->on_readable_user_data = NULL; - - return AWS_OP_SUCCESS; -} - -/* Pop front write request, invoke its callback, and delete it. - * Returns whether the callback resulted in the write-end getting cleaned up */ -static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end *write_end, int error_code) { - struct write_end_impl *write_impl = write_end->impl_data; - - AWS_ASSERT(!aws_linked_list_empty(&write_impl->write_list)); - struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); - struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); - - struct aws_allocator *alloc = write_impl->alloc; - - /* Let the write-end know that a callback is in process, so the write-end can inform the callback - * whether it resulted in clean_up() being called. */ - bool write_end_cleaned_up_during_callback = false; - struct pipe_write_request *prev_invoking_request = write_impl->currently_invoking_write_callback; - write_impl->currently_invoking_write_callback = request; - - if (request->user_callback) { - request->user_callback(write_end, error_code, request->original_cursor, request->user_data); - write_end_cleaned_up_during_callback = request->did_user_callback_clean_up_write_end; - } - - if (!write_end_cleaned_up_during_callback) { - write_impl->currently_invoking_write_callback = prev_invoking_request; - } - - aws_mem_release(alloc, request); - - return write_end_cleaned_up_during_callback; -} - -/* Process write requests as long as the pipe remains writable */ -static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { - struct write_end_impl *write_impl = write_end->impl_data; - AWS_ASSERT(write_impl); - AWS_ASSERT(write_impl->handle.update_io_result); - - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - - while (!aws_linked_list_empty(&write_impl->write_list)) { - struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); - struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); - - int completed_error_code = AWS_ERROR_SUCCESS; - - if (request->cursor.len > 0) { - ssize_t write_val = write(write_impl->handle.data.fd, request->cursor.ptr, request->cursor.len); - - if (write_val < 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - /* The pipe is no longer writable. Bail out */ - write_impl->is_writable = false; - io_op_result.write_error_code = errno_value; - write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); - return; - } - - /* A non-recoverable error occurred during this write */ - completed_error_code = s_translate_posix_error(errno_value); - - } else { - aws_byte_cursor_advance(&request->cursor, write_val); - - io_op_result.written_bytes += (size_t)write_val; - - if (request->cursor.len > 0) { - /* There was a partial write, loop again to try and write the rest. */ - continue; - } - } - } - - /* If we got this far in the loop, then the write request is complete. - * Note that the callback may result in the pipe being cleaned up. */ - // TODO Call update. - bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); - if (write_end_cleaned_up) { - /* Bail out! Any remaining requests were canceled during clean_up() */ - return; - } - } -} - -/* Handle events on the write-end's file handle */ -static void s_write_end_on_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - (void)handle; - - /* Note that it should be impossible for this to run after write-end has been unsubscribed or cleaned up */ - struct aws_pipe_write_end *write_end = user_data; - struct write_end_impl *write_impl = write_end->impl_data; - AWS_ASSERT(write_impl); - AWS_ASSERT(write_impl->event_loop == event_loop); - AWS_ASSERT(&write_impl->handle == handle); - - /* Only care about the writable event. */ - if ((events & AWS_IO_EVENT_TYPE_WRITABLE) == 0) { - return; - } - - write_impl->is_writable = true; - - s_write_end_process_requests(write_end); -} - -int aws_pipe_write( - struct aws_pipe_write_end *write_end, - struct aws_byte_cursor src_buffer, - aws_pipe_on_write_completed_fn *on_completed, - void *user_data) { - - AWS_ASSERT(src_buffer.ptr); - - struct write_end_impl *write_impl = write_end->impl_data; - if (!write_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - struct pipe_write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct pipe_write_request)); - if (!request) { - return AWS_OP_ERR; - } - - request->original_cursor = src_buffer; - request->cursor = src_buffer; - request->user_callback = on_completed; - request->user_data = user_data; - - aws_linked_list_push_back(&write_impl->write_list, &request->list_node); - - /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could - * happen if a this aws_pipe_write() call was made by another write's completion callback */ - if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { - struct aws_io_handle_io_op_result io_op_result; - s_write_end_process_requests(write_end); - } - - return AWS_OP_SUCCESS; -} - -int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) { - struct write_end_impl *write_impl = write_end->impl_data; - if (!write_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - int err = aws_event_loop_unsubscribe_from_io_events(write_impl->event_loop, &write_impl->handle); - if (err) { - return AWS_OP_ERR; - } - - close(write_impl->handle.data.fd); - - /* Zero out write-end before invoking user callbacks so that it won't work anymore with public functions. */ - AWS_ZERO_STRUCT(*write_end); - - /* If a request callback is currently being invoked, let it know that the write-end was cleaned up */ - if (write_impl->currently_invoking_write_callback) { - write_impl->currently_invoking_write_callback->did_user_callback_clean_up_write_end = true; - } - - /* Force any outstanding write requests to complete with an error status. */ - while (!aws_linked_list_empty(&write_impl->write_list)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); - struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); - if (request->user_callback) { - request->user_callback(NULL, AWS_IO_BROKEN_PIPE, request->original_cursor, request->user_data); - } - aws_mem_release(write_impl->alloc, request); - } - - aws_mem_release(write_impl->alloc, write_impl); - return AWS_OP_SUCCESS; -} diff --git a/source/qnx/shared_library.c b/source/qnx/shared_library.c deleted file mode 100644 index 751c99bc2..000000000 --- a/source/qnx/shared_library.c +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -#include - -static const char *s_null = ""; -static const char *s_unknown_error = ""; - -int aws_shared_library_init(struct aws_shared_library *library, const char *library_path) { - AWS_ZERO_STRUCT(*library); - - library->library_handle = dlopen(library_path, RTLD_LAZY); - if (library->library_handle == NULL) { - const char *error = dlerror(); - AWS_LOGF_ERROR( - AWS_LS_IO_SHARED_LIBRARY, - "id=%p: Failed to load shared library at path \"%s\" with error: %s", - (void *)library, - library_path ? library_path : s_null, - error ? error : s_unknown_error); - return aws_raise_error(AWS_IO_SHARED_LIBRARY_LOAD_FAILURE); - } - - return AWS_OP_SUCCESS; -} - -void aws_shared_library_clean_up(struct aws_shared_library *library) { - if (library && library->library_handle) { - dlclose(library->library_handle); - library->library_handle = NULL; - } -} - -int aws_shared_library_find_function( - struct aws_shared_library *library, - const char *symbol_name, - aws_generic_function *function_address) { - if (library == NULL || library->library_handle == NULL) { - return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); - } - - /* - * Suggested work around for (undefined behavior) cast from void * to function pointer - * in POSIX.1-2003 standard, at least according to dlsym man page code sample. - */ - *(void **)(function_address) = dlsym(library->library_handle, symbol_name); - - if (*function_address == NULL) { - const char *error = dlerror(); - AWS_LOGF_ERROR( - AWS_LS_IO_SHARED_LIBRARY, - "id=%p: Failed to find shared library symbol \"%s\" with error: %s", - (void *)library, - symbol_name ? symbol_name : s_null, - error ? error : s_unknown_error); - return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); - } - - return AWS_OP_SUCCESS; -} diff --git a/source/qnx/socket.c b/source/qnx/socket.c deleted file mode 100644 index 2d8884237..000000000 --- a/source/qnx/socket.c +++ /dev/null @@ -1,2059 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include /* Required when VSOCK is used */ -#include -#include -#include -#include -#include - -/* - * On OsX, suppress NoPipe signals via flags to setsockopt() - * On Linux, suppress NoPipe signals via flags to send() - */ -#if defined(__MACH__) -# define NO_SIGNAL_SOCK_OPT SO_NOSIGPIPE -# define NO_SIGNAL_SEND 0 -# define TCP_KEEPIDLE TCP_KEEPALIVE -#else -# define NO_SIGNAL_SEND MSG_NOSIGNAL -#endif - -/* This isn't defined on ancient linux distros (breaking the builds). - * However, if this is a prebuild, we purposely build on an ancient system, but - * we want the kernel calls to still be the same as a modern build since that's likely the target of the application - * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag - * gets passed as long as it does. - */ -#ifndef O_CLOEXEC -# define O_CLOEXEC 02000000 -#endif - -#ifdef USE_VSOCK -# if defined(__linux__) && defined(AF_VSOCK) -# include -# else -# error "USE_VSOCK not supported on current platform" -# endif -#endif - -/* other than CONNECTED_READ | CONNECTED_WRITE - * a socket is only in one of these states at a time. */ -enum socket_state { - INIT = 0x01, - CONNECTING = 0x02, - CONNECTED_READ = 0x04, - CONNECTED_WRITE = 0x08, - BOUND = 0x10, - LISTENING = 0x20, - TIMEDOUT = 0x40, - ERROR = 0x80, - CLOSED, -}; - -static int s_convert_domain(enum aws_socket_domain domain) { - switch (domain) { - case AWS_SOCKET_IPV4: - return AF_INET; - case AWS_SOCKET_IPV6: - return AF_INET6; - case AWS_SOCKET_LOCAL: - return AF_UNIX; -#ifdef USE_VSOCK - case AWS_SOCKET_VSOCK: - return AF_VSOCK; -#endif - default: - AWS_ASSERT(0); - return AF_INET; - } -} - -static int s_convert_type(enum aws_socket_type type) { - switch (type) { - case AWS_SOCKET_STREAM: - return SOCK_STREAM; - case AWS_SOCKET_DGRAM: - return SOCK_DGRAM; - default: - AWS_ASSERT(0); - return SOCK_STREAM; - } -} - -static int s_determine_socket_error(int error) { - switch (error) { - case ECONNREFUSED: - return AWS_IO_SOCKET_CONNECTION_REFUSED; - case ECONNRESET: - return AWS_IO_SOCKET_CLOSED; - case ETIMEDOUT: - return AWS_IO_SOCKET_TIMEOUT; - case EHOSTUNREACH: - case ENETUNREACH: - return AWS_IO_SOCKET_NO_ROUTE_TO_HOST; - case EADDRNOTAVAIL: - return AWS_IO_SOCKET_INVALID_ADDRESS; - case ENETDOWN: - return AWS_IO_SOCKET_NETWORK_DOWN; - case ECONNABORTED: - return AWS_IO_SOCKET_CONNECT_ABORTED; - case EADDRINUSE: - return AWS_IO_SOCKET_ADDRESS_IN_USE; - case ENOBUFS: - case ENOMEM: - return AWS_ERROR_OOM; - case EAGAIN: - return AWS_IO_READ_WOULD_BLOCK; - case EMFILE: - case ENFILE: - return AWS_ERROR_MAX_FDS_EXCEEDED; - case ENOENT: - case EINVAL: - return AWS_ERROR_FILE_INVALID_PATH; - case EAFNOSUPPORT: - return AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY; - case EACCES: - return AWS_ERROR_NO_PERMISSION; - default: - return AWS_IO_SOCKET_NOT_CONNECTED; - } -} - -static int s_create_socket(struct aws_socket *sock, const struct aws_socket_options *options) { - - int fd = socket(s_convert_domain(options->domain), s_convert_type(options->type), 0); - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: initializing with domain %d and type %d", - (void *)sock, - fd, - options->domain, - options->type); - if (fd != -1) { - int flags = fcntl(fd, F_GETFL, 0); - flags |= O_NONBLOCK | O_CLOEXEC; - int success = fcntl(fd, F_SETFL, flags); - (void)success; - sock->io_handle.data.fd = fd; - sock->io_handle.additional_data = NULL; - return aws_socket_set_options(sock, options); - } - - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); -} - -struct posix_socket_connect_args { - struct aws_task task; - struct aws_allocator *allocator; - struct aws_socket *socket; -}; - -struct posix_socket { - struct aws_linked_list write_queue; - struct aws_linked_list written_queue; - struct aws_task written_task; - struct posix_socket_connect_args *connect_args; - /* Note that only the posix_socket impl part is refcounted. - * The public aws_socket can be a stack variable and cleaned up synchronously - * (by blocking until the event-loop cleans up the impl part). - * In hindsight, aws_socket should have been heap-allocated and refcounted, but alas */ - struct aws_ref_count internal_refcount; - struct aws_allocator *allocator; - bool written_task_scheduled; - bool currently_subscribed; - bool continue_accept; - bool *close_happened; -}; - -static void s_socket_destroy_impl(void *user_data) { - struct posix_socket *socket_impl = user_data; - aws_mem_release(socket_impl->allocator, socket_impl); -} - -static int s_socket_init( - struct aws_socket *socket, - struct aws_allocator *alloc, - const struct aws_socket_options *options, - int existing_socket_fd) { - AWS_ASSERT(options); - AWS_ZERO_STRUCT(*socket); - - struct posix_socket *posix_socket = aws_mem_calloc(alloc, 1, sizeof(struct posix_socket)); - if (!posix_socket) { - socket->impl = NULL; - return AWS_OP_ERR; - } - - socket->allocator = alloc; - socket->io_handle.data.fd = -1; - socket->state = INIT; - socket->options = *options; - - if (existing_socket_fd < 0) { - int err = s_create_socket(socket, options); - if (err) { - aws_mem_release(alloc, posix_socket); - socket->impl = NULL; - return AWS_OP_ERR; - } - } else { - socket->io_handle = (struct aws_io_handle){ - .data = {.fd = existing_socket_fd}, - .additional_data = NULL, - }; - aws_socket_set_options(socket, options); - } - - aws_linked_list_init(&posix_socket->write_queue); - aws_linked_list_init(&posix_socket->written_queue); - posix_socket->currently_subscribed = false; - posix_socket->continue_accept = false; - aws_ref_count_init(&posix_socket->internal_refcount, posix_socket, s_socket_destroy_impl); - posix_socket->allocator = alloc; - posix_socket->connect_args = NULL; - posix_socket->close_happened = NULL; - socket->impl = posix_socket; - return AWS_OP_SUCCESS; -} - -int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { - AWS_ASSERT(options); - return s_socket_init(socket, alloc, options, -1); -} - -void aws_socket_clean_up(struct aws_socket *socket) { - if (!socket->impl) { - /* protect from double clean */ - return; - } - - int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ - (void)fd_for_logging; - - if (aws_socket_is_open(socket)) { - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, fd_for_logging); - aws_socket_close(socket); - } - struct posix_socket *socket_impl = socket->impl; - - if (aws_ref_count_release(&socket_impl->internal_refcount) != 0) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: is still pending io letting it dangle and cleaning up later.", - (void *)socket, - fd_for_logging); - } - - AWS_ZERO_STRUCT(*socket); - socket->io_handle.data.fd = -1; -} - -/* Update socket->local_endpoint based on the results of getsockname() */ -static int s_update_local_endpoint(struct aws_socket *socket) { - struct aws_socket_endpoint tmp_endpoint; - AWS_ZERO_STRUCT(tmp_endpoint); - - struct sockaddr_storage address; - AWS_ZERO_STRUCT(address); - socklen_t address_size = sizeof(address); - - if (getsockname(socket->io_handle.data.fd, (struct sockaddr *)&address, &address_size) != 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: getsockname() failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - - if (address.ss_family == AF_INET) { - struct sockaddr_in *s = (struct sockaddr_in *)&address; - tmp_endpoint.port = ntohs(s->sin_port); - if (inet_ntop(AF_INET, &s->sin_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: inet_ntop() failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - } else if (address.ss_family == AF_INET6) { - struct sockaddr_in6 *s = (struct sockaddr_in6 *)&address; - tmp_endpoint.port = ntohs(s->sin6_port); - if (inet_ntop(AF_INET6, &s->sin6_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: inet_ntop() failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - } else if (address.ss_family == AF_UNIX) { - struct sockaddr_un *s = (struct sockaddr_un *)&address; - - /* Ensure there's a null-terminator. - * On some platforms it may be missing when the path gets very long. See: - * https://man7.org/linux/man-pages/man7/unix.7.html#BUGS - * But let's keep it simple, and not deal with that madness until someone demands it. */ - size_t sun_len; - if (aws_secure_strlen(s->sun_path, sizeof(tmp_endpoint.address), &sun_len)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: UNIX domain socket name is too long", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); - } - memcpy(tmp_endpoint.address, s->sun_path, sun_len); -#if USE_VSOCK - } else if (address.ss_family == AF_VSOCK) { - struct sockaddr_vm *s = (struct sockaddr_vm *)&address; - - tmp_endpoint.port = s->svm_port; - - snprintf(tmp_endpoint.address, sizeof(tmp_endpoint.address), "%" PRIu32, s->svm_cid); - return AWS_OP_SUCCESS; -#endif /* USE_VSOCK */ - } else { - AWS_ASSERT(0); - return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); - } - - socket->local_endpoint = tmp_endpoint; - return AWS_OP_SUCCESS; -} - -static void s_on_connection_error(struct aws_socket *socket, int error); - -static int s_on_connection_success(struct aws_socket *socket) { - - struct aws_event_loop *event_loop = socket->event_loop; - struct posix_socket *socket_impl = socket->impl; - - if (socket_impl->currently_subscribed) { - aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); - socket_impl->currently_subscribed = false; - } - - socket->event_loop = NULL; - - int connect_result; - socklen_t result_length = sizeof(connect_result); - - if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to determine connection error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - return AWS_OP_ERR; - } - - if (connect_result) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connection error %d", - (void *)socket, - socket->io_handle.data.fd, - connect_result); - int aws_error = s_determine_socket_error(connect_result); - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - return AWS_OP_ERR; - } - - AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection success", (void *)socket, socket->io_handle.data.fd); - - if (s_update_local_endpoint(socket)) { - s_on_connection_error(socket, aws_last_error()); - return AWS_OP_ERR; - } - - socket->state = CONNECTED_WRITE | CONNECTED_READ; - - if (aws_socket_assign_to_event_loop(socket, event_loop)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: assignment to event loop %p failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop, - aws_last_error()); - s_on_connection_error(socket, aws_last_error()); - return AWS_OP_ERR; - } - - socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); - - return AWS_OP_SUCCESS; -} - -static void s_on_connection_error(struct aws_socket *socket, int error) { - socket->state = ERROR; - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); - if (socket->connection_result_fn) { - socket->connection_result_fn(socket, error, socket->connect_accept_user_data); - } else if (socket->accept_result_fn) { - socket->accept_result_fn(socket, error, NULL, socket->connect_accept_user_data); - } -} - -/* the next two callbacks compete based on which one runs first. if s_socket_connect_event - * comes back first, then we set socket_args->socket = NULL and continue on with the connection. - * if s_handle_socket_timeout() runs first, is sees socket_args->socket is NULL and just cleans up its memory. - * s_handle_socket_timeout() will always run so the memory for socket_connect_args is always cleaned up there. */ -static void s_socket_connect_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - (void)handle; - - AWS_ASSERT(handle->update_io_result); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "fd=%d: update I/O results callback: %p", handle->data.fd, (void *)handle->update_io_result); - - struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); - - if (socket_args->socket) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: has not timed out yet proceeding with connection.", - (void *)socket_args->socket, - handle->data.fd); - - struct posix_socket *socket_impl = socket_args->socket->impl; - if (!(events & AWS_IO_EVENT_TYPE_ERROR || events & AWS_IO_EVENT_TYPE_CLOSED) && - (events & AWS_IO_EVENT_TYPE_READABLE || events & AWS_IO_EVENT_TYPE_WRITABLE)) { - struct aws_socket *socket = socket_args->socket; - socket_args->socket = NULL; - socket_impl->connect_args = NULL; - s_on_connection_success(socket); - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - // TODO Update? - return; - } - - int aws_error = aws_socket_get_error(socket_args->socket); - /* we'll get another notification. */ - if (aws_error == AWS_IO_READ_WOULD_BLOCK) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: spurious event, waiting for another notification.", - (void *)socket_args->socket, - handle->data.fd); - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - // handle->update_io_result(event_loop, handle, &io_op_result); - return; - } - - struct aws_socket *socket = socket_args->socket; - socket_args->socket = NULL; - socket_impl->connect_args = NULL; - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - } - - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - // TODO Update? -} - -static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { - (void)task; - (void)status; - - struct posix_socket_connect_args *socket_args = args; - - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "task_id=%p: timeout task triggered, evaluating timeouts.", (void *)task); - /* successful connection will have nulled out connect_args->socket */ - if (socket_args->socket) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: timed out, shutting down.", - (void *)socket_args->socket, - socket_args->socket->io_handle.data.fd); - - socket_args->socket->state = TIMEDOUT; - int error_code = AWS_IO_SOCKET_TIMEOUT; - - if (status == AWS_TASK_STATUS_RUN_READY) { - aws_event_loop_unsubscribe_from_io_events(socket_args->socket->event_loop, &socket_args->socket->io_handle); - } else { - error_code = AWS_IO_EVENT_LOOP_SHUTDOWN; - aws_event_loop_free_io_event_resources(socket_args->socket->event_loop, &socket_args->socket->io_handle); - } - socket_args->socket->event_loop = NULL; - struct posix_socket *socket_impl = socket_args->socket->impl; - socket_impl->currently_subscribed = false; - aws_raise_error(error_code); - struct aws_socket *socket = socket_args->socket; - /*socket close sets socket_args->socket to NULL and - * socket_impl->connect_args to NULL. */ - aws_socket_close(socket); - s_on_connection_error(socket, error_code); - } - - aws_mem_release(socket_args->allocator, socket_args); -} - -/* this is used simply for moving a connect_success callback when the connect finished immediately - * (like for unix domain sockets) into the event loop's thread. Also note, in that case there was no - * timeout task scheduled, so in this case the socket_args are cleaned up. */ -static void s_run_connect_success(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - struct posix_socket_connect_args *socket_args = arg; - - if (socket_args->socket) { - struct posix_socket *socket_impl = socket_args->socket->impl; - if (status == AWS_TASK_STATUS_RUN_READY) { - s_on_connection_success(socket_args->socket); - } else { - aws_raise_error(AWS_IO_SOCKET_CONNECT_ABORTED); - socket_args->socket->event_loop = NULL; - s_on_connection_error(socket_args->socket, AWS_IO_SOCKET_CONNECT_ABORTED); - } - socket_impl->connect_args = NULL; - } - - aws_mem_release(socket_args->allocator, socket_args); -} - -static inline int s_convert_pton_error(int pton_code, int errno_value) { - if (pton_code == 0) { - return AWS_IO_SOCKET_INVALID_ADDRESS; - } - - return s_determine_socket_error(errno_value); -} - -struct socket_address { - union sock_addr_types { - struct sockaddr_in addr_in; - struct sockaddr_in6 addr_in6; - struct sockaddr_un un_addr; -#ifdef USE_VSOCK - struct sockaddr_vm vm_addr; -#endif - } sock_addr_types; -}; - -#ifdef USE_VSOCK -/** Convert a string to a VSOCK CID. Respects the calling convetion of inet_pton: - * 0 on error, 1 on success. */ -static int parse_cid(const char *cid_str, unsigned int *value) { - if (cid_str == NULL || value == NULL) { - errno = EINVAL; - return 0; - } - /* strtoll returns 0 as both error and correct value */ - errno = 0; - /* unsigned long long to handle edge cases in convention explicitly */ - long long cid = strtoll(cid_str, NULL, 10); - if (errno != 0) { - return 0; - } - - /* -1U means any, so it's a valid value, but it needs to be converted to - * unsigned int. */ - if (cid == -1) { - *value = VMADDR_CID_ANY; - return 1; - } - - if (cid < 0 || cid > UINT_MAX) { - errno = ERANGE; - return 0; - } - - /* cast is safe here, edge cases already checked */ - *value = (unsigned int)cid; - return 1; -} -#endif - -int aws_socket_connect( - struct aws_socket *socket, - const struct aws_socket_endpoint *remote_endpoint, - struct aws_event_loop *event_loop, - aws_socket_on_connection_result_fn *on_connection_result, - void *user_data) { - AWS_ASSERT(event_loop); - AWS_ASSERT(!socket->event_loop); - - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: beginning connect.", (void *)socket, socket->io_handle.data.fd); - - if (socket->event_loop) { - return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); - } - - if (socket->options.type != AWS_SOCKET_DGRAM) { - AWS_ASSERT(on_connection_result); - if (socket->state != INIT) { - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - } else { /* UDP socket */ - /* UDP sockets jump to CONNECT_READ if bind is called first */ - if (socket->state != CONNECTED_READ && socket->state != INIT) { - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - } - - size_t address_strlen; - if (aws_secure_strlen(remote_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { - return AWS_OP_ERR; - } - - if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { - return AWS_OP_ERR; - } - - struct socket_address address; - AWS_ZERO_STRUCT(address); - socklen_t sock_size = 0; - int pton_err = 1; - if (socket->options.domain == AWS_SOCKET_IPV4) { - pton_err = inet_pton(AF_INET, remote_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); - address.sock_addr_types.addr_in.sin_port = htons((uint16_t)remote_endpoint->port); - address.sock_addr_types.addr_in.sin_family = AF_INET; - sock_size = sizeof(address.sock_addr_types.addr_in); - } else if (socket->options.domain == AWS_SOCKET_IPV6) { - pton_err = inet_pton(AF_INET6, remote_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); - address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); - address.sock_addr_types.addr_in6.sin6_family = AF_INET6; - sock_size = sizeof(address.sock_addr_types.addr_in6); - } else if (socket->options.domain == AWS_SOCKET_LOCAL) { - address.sock_addr_types.un_addr.sun_family = AF_UNIX; - strncpy(address.sock_addr_types.un_addr.sun_path, remote_endpoint->address, AWS_ADDRESS_MAX_LEN); - sock_size = sizeof(address.sock_addr_types.un_addr); -#ifdef USE_VSOCK - } else if (socket->options.domain == AWS_SOCKET_VSOCK) { - pton_err = parse_cid(remote_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); - address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; - address.sock_addr_types.vm_addr.svm_port = remote_endpoint->port; - sock_size = sizeof(address.sock_addr_types.vm_addr); -#endif - } else { - AWS_ASSERT(0); - return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); - } - - if (pton_err != 1) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to parse address %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - remote_endpoint->address, - remote_endpoint->port); - return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connecting to endpoint %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - remote_endpoint->address, - remote_endpoint->port); - - socket->state = CONNECTING; - socket->remote_endpoint = *remote_endpoint; - socket->connect_accept_user_data = user_data; - socket->connection_result_fn = on_connection_result; - - struct posix_socket *socket_impl = socket->impl; - - socket_impl->connect_args = aws_mem_calloc(socket->allocator, 1, sizeof(struct posix_socket_connect_args)); - if (!socket_impl->connect_args) { - return AWS_OP_ERR; - } - - socket_impl->connect_args->socket = socket; - socket_impl->connect_args->allocator = socket->allocator; - - socket_impl->connect_args->task.fn = s_handle_socket_timeout; - socket_impl->connect_args->task.arg = socket_impl->connect_args; - - int error_code = connect(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size); - socket->event_loop = event_loop; - - if (!error_code) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connected immediately, not scheduling timeout.", - (void *)socket, - socket->io_handle.data.fd); - socket_impl->connect_args->task.fn = s_run_connect_success; - /* the subscription for IO will happen once we setup the connection in the task. Since we already - * know the connection succeeded, we don't need to register for events yet. */ - aws_event_loop_schedule_task_now(event_loop, &socket_impl->connect_args->task); - } - - if (error_code) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - if (errno_value == EINPROGRESS || errno_value == EALREADY) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connection pending waiting on event-loop notification or timeout.", - (void *)socket, - socket->io_handle.data.fd); - /* cache the timeout task; it is possible for the IO subscription to come back virtually immediately - * and null out the connect args */ - struct aws_task *timeout_task = &socket_impl->connect_args->task; - - socket_impl->currently_subscribed = true; - /* This event is for when the connection finishes. (the fd will flip writable). */ - if (aws_event_loop_subscribe_to_io_events( - event_loop, - &socket->io_handle, - AWS_IO_EVENT_TYPE_WRITABLE, - s_socket_connect_event, - socket_impl->connect_args)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to register with event-loop %p.", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop); - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - goto err_clean_up; - } - - /* schedule a task to run at the connect timeout interval, if this task runs before the connect - * happens, we consider that a timeout. */ - uint64_t timeout = 0; - aws_event_loop_current_clock_time(event_loop, &timeout); - timeout += aws_timestamp_convert( - socket->options.connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: scheduling timeout task for %llu.", - (void *)socket, - socket->io_handle.data.fd, - (unsigned long long)timeout); - aws_event_loop_schedule_task_future(event_loop, timeout_task, timeout); - } else { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connect failed with error code %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - aws_raise_error(aws_error); - socket->event_loop = NULL; - socket_impl->currently_subscribed = false; - goto err_clean_up; - } - } - return AWS_OP_SUCCESS; - -err_clean_up: - aws_mem_release(socket->allocator, socket_impl->connect_args); - socket_impl->connect_args = NULL; - return AWS_OP_ERR; -} - -int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { - if (socket->state != INIT) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: invalid state for bind operation.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - size_t address_strlen; - if (aws_secure_strlen(local_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { - return AWS_OP_ERR; - } - - if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { - return AWS_OP_ERR; - } - - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: binding to %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - local_endpoint->address, - local_endpoint->port); - - struct socket_address address; - AWS_ZERO_STRUCT(address); - socklen_t sock_size = 0; - int pton_err = 1; - if (socket->options.domain == AWS_SOCKET_IPV4) { - pton_err = inet_pton(AF_INET, local_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); - address.sock_addr_types.addr_in.sin_port = htons((uint16_t)local_endpoint->port); - address.sock_addr_types.addr_in.sin_family = AF_INET; - sock_size = sizeof(address.sock_addr_types.addr_in); - } else if (socket->options.domain == AWS_SOCKET_IPV6) { - pton_err = inet_pton(AF_INET6, local_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); - address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); - address.sock_addr_types.addr_in6.sin6_family = AF_INET6; - sock_size = sizeof(address.sock_addr_types.addr_in6); - } else if (socket->options.domain == AWS_SOCKET_LOCAL) { - address.sock_addr_types.un_addr.sun_family = AF_UNIX; - strncpy(address.sock_addr_types.un_addr.sun_path, local_endpoint->address, AWS_ADDRESS_MAX_LEN); - sock_size = sizeof(address.sock_addr_types.un_addr); -#ifdef USE_VSOCK - } else if (socket->options.domain == AWS_SOCKET_VSOCK) { - pton_err = parse_cid(local_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); - address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; - address.sock_addr_types.vm_addr.svm_port = local_endpoint->port; - sock_size = sizeof(address.sock_addr_types.vm_addr); -#endif - } else { - AWS_ASSERT(0); - return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); - } - - if (pton_err != 1) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to parse address %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - local_endpoint->address, - local_endpoint->port); - return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); - } - - if (bind(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size) != 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: bind failed with error code %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - - aws_raise_error(s_determine_socket_error(errno_value)); - goto error; - } - - if (s_update_local_endpoint(socket)) { - goto error; - } - - if (socket->options.type == AWS_SOCKET_STREAM) { - socket->state = BOUND; - } else { - /* e.g. UDP is now readable */ - socket->state = CONNECTED_READ; - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: successfully bound to %s:%u", - (void *)socket, - socket->io_handle.data.fd, - socket->local_endpoint.address, - socket->local_endpoint.port); - - return AWS_OP_SUCCESS; - -error: - socket->state = ERROR; - return AWS_OP_ERR; -} - -int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { - if (socket->local_endpoint.address[0] == 0) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: Socket has no local address. Socket must be bound first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - *out_address = socket->local_endpoint; - return AWS_OP_SUCCESS; -} - -int aws_socket_listen(struct aws_socket *socket, int backlog_size) { - if (socket->state != BOUND) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: invalid state for listen operation. You must call bind first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - int error_code = listen(socket->io_handle.data.fd, backlog_size); - - if (!error_code) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, "id=%p fd=%d: successfully listening", (void *)socket, socket->io_handle.data.fd); - socket->state = LISTENING; - return AWS_OP_SUCCESS; - } - - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: listen failed with error code %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - - socket->state = ERROR; - - return aws_raise_error(s_determine_socket_error(errno_value)); -} - -/* this is called by the event loop handler that was installed in start_accept(). It runs once the FD goes readable, - * accepts as many as it can and then returns control to the event loop. */ -static void s_socket_accept_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - - AWS_ASSERT(handle->update_io_result); - - struct aws_socket *socket = user_data; - struct posix_socket *socket_impl = socket->impl; - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: listening event received: %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)handle->update_io_result); - - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - - if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { - int in_fd = 0; - while (socket_impl->continue_accept && in_fd != -1) { - struct sockaddr_storage in_addr; - socklen_t in_len = sizeof(struct sockaddr_storage); - - in_fd = accept(handle->data.fd, (struct sockaddr *)&in_addr, &in_len); - if (in_fd == -1) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - break; - } - - int aws_error = aws_socket_get_error(socket); - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - io_op_result.read_error_code = aws_error; - break; - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, "id=%p fd=%d: incoming connection", (void *)socket, socket->io_handle.data.fd); - - struct aws_socket *new_sock = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); - - if (!new_sock) { - close(in_fd); - s_on_connection_error(socket, aws_last_error()); - continue; - } - - if (s_socket_init(new_sock, socket->allocator, &socket->options, in_fd)) { - aws_mem_release(socket->allocator, new_sock); - s_on_connection_error(socket, aws_last_error()); - continue; - } - - new_sock->local_endpoint = socket->local_endpoint; - new_sock->state = CONNECTED_READ | CONNECTED_WRITE; - uint32_t port = 0; - - /* get the info on the incoming socket's address */ - if (in_addr.ss_family == AF_INET) { - struct sockaddr_in *s = (struct sockaddr_in *)&in_addr; - port = ntohs(s->sin_port); - /* this came from the kernel, a.) it won't fail. b.) even if it does - * its not fatal. come back and add logging later. */ - if (!inet_ntop( - AF_INET, - &s->sin_addr, - new_sock->remote_endpoint.address, - sizeof(new_sock->remote_endpoint.address))) { - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d:. Failed to determine remote address.", - (void *)socket, - socket->io_handle.data.fd); - } - new_sock->options.domain = AWS_SOCKET_IPV4; - } else if (in_addr.ss_family == AF_INET6) { - /* this came from the kernel, a.) it won't fail. b.) even if it does - * its not fatal. come back and add logging later. */ - struct sockaddr_in6 *s = (struct sockaddr_in6 *)&in_addr; - port = ntohs(s->sin6_port); - if (!inet_ntop( - AF_INET6, - &s->sin6_addr, - new_sock->remote_endpoint.address, - sizeof(new_sock->remote_endpoint.address))) { - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d:. Failed to determine remote address.", - (void *)socket, - socket->io_handle.data.fd); - } - new_sock->options.domain = AWS_SOCKET_IPV6; - } else if (in_addr.ss_family == AF_UNIX) { - new_sock->remote_endpoint = socket->local_endpoint; - new_sock->options.domain = AWS_SOCKET_LOCAL; - } - - new_sock->remote_endpoint.port = port; - - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connected to %s:%d, incoming fd %d", - (void *)socket, - socket->io_handle.data.fd, - new_sock->remote_endpoint.address, - new_sock->remote_endpoint.port, - in_fd); - - int flags = fcntl(in_fd, F_GETFL, 0); - - flags |= O_NONBLOCK | O_CLOEXEC; - fcntl(in_fd, F_SETFL, flags); - - bool close_occurred = false; - socket_impl->close_happened = &close_occurred; - socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); - - if (close_occurred) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: trying to update: %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)handle->update_io_result); - return; - } - - socket_impl->close_happened = NULL; - } - } - - handle->update_io_result(event_loop, handle, &io_op_result); - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: finished processing incoming connections, " - "waiting on event-loop notification", - (void *)socket, - socket->io_handle.data.fd); -} - -int aws_socket_start_accept( - struct aws_socket *socket, - struct aws_event_loop *accept_loop, - aws_socket_on_accept_result_fn *on_accept_result, - void *user_data) { - AWS_ASSERT(on_accept_result); - AWS_ASSERT(accept_loop); - - if (socket->event_loop) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: is already assigned to event-loop %p.", - (void *)socket, - socket->io_handle.data.fd, - (void *)socket->event_loop); - return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); - } - - if (socket->state != LISTENING) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: invalid state for start_accept operation. You must call listen first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - socket->accept_result_fn = on_accept_result; - socket->connect_accept_user_data = user_data; - socket->event_loop = accept_loop; - struct posix_socket *socket_impl = socket->impl; - socket_impl->continue_accept = true; - socket_impl->currently_subscribed = true; - - if (aws_event_loop_subscribe_to_io_events( - socket->event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_READABLE, s_socket_accept_event, socket)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to subscribe to event-loop %p.", - (void *)socket, - socket->io_handle.data.fd, - (void *)socket->event_loop); - socket_impl->continue_accept = false; - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; -} - -struct stop_accept_args { - struct aws_task task; - struct aws_mutex mutex; - struct aws_condition_variable condition_variable; - struct aws_socket *socket; - int ret_code; - bool invoked; -}; - -static bool s_stop_accept_pred(void *arg) { - struct stop_accept_args *stop_accept_args = arg; - return stop_accept_args->invoked; -} - -static void s_stop_accept_task(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - (void)status; - - struct stop_accept_args *stop_accept_args = arg; - aws_mutex_lock(&stop_accept_args->mutex); - stop_accept_args->ret_code = AWS_OP_SUCCESS; - if (aws_socket_stop_accept(stop_accept_args->socket)) { - stop_accept_args->ret_code = aws_last_error(); - } - stop_accept_args->invoked = true; - aws_condition_variable_notify_one(&stop_accept_args->condition_variable); - aws_mutex_unlock(&stop_accept_args->mutex); -} - -int aws_socket_stop_accept(struct aws_socket *socket) { - if (socket->state != LISTENING) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: is not in a listening state, can't stop_accept.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections", (void *)socket, socket->io_handle.data.fd); - - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - struct stop_accept_args args = { - .mutex = AWS_MUTEX_INIT, - .condition_variable = AWS_CONDITION_VARIABLE_INIT, - .invoked = false, - .socket = socket, - .ret_code = AWS_OP_SUCCESS, - .task = {.fn = s_stop_accept_task}, - }; - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: stopping accepting new connections from a different thread than " - "the socket is running from. Blocking until it shuts down.", - (void *)socket, - socket->io_handle.data.fd); - /* Look.... I know what I'm doing.... trust me, I'm an engineer. - * We wait on the completion before 'args' goes out of scope. - * NOLINTNEXTLINE */ - args.task.arg = &args; - aws_mutex_lock(&args.mutex); - aws_event_loop_schedule_task_now(socket->event_loop, &args.task); - aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_stop_accept_pred, &args); - aws_mutex_unlock(&args.mutex); - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: stop accept task finished running.", - (void *)socket, - socket->io_handle.data.fd); - - if (args.ret_code) { - return aws_raise_error(args.ret_code); - } - return AWS_OP_SUCCESS; - } - - int ret_val = AWS_OP_SUCCESS; - struct posix_socket *socket_impl = socket->impl; - if (socket_impl->currently_subscribed) { - ret_val = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); - socket_impl->currently_subscribed = false; - socket_impl->continue_accept = false; - socket->event_loop = NULL; - } - - return ret_val; -} - -int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options) { - if (socket->options.domain != options->domain || socket->options.type != options->type) { - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setting socket options to: keep-alive %d, keep idle %d, keep-alive interval %d, keep-alive probe " - "count %d.", - (void *)socket, - socket->io_handle.data.fd, - (int)options->keepalive, - (int)options->keep_alive_timeout_sec, - (int)options->keep_alive_interval_sec, - (int)options->keep_alive_max_failed_probes); - - socket->options = *options; - -#ifdef NO_SIGNAL_SOCK_OPT - int option_value = 1; - if (AWS_UNLIKELY(setsockopt( - socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL_SOCK_OPT, &option_value, sizeof(option_value)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for NO_SIGNAL_SOCK_OPT failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } -#endif /* NO_SIGNAL_SOCK_OPT */ - - int reuse = 1; - if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for SO_REUSEADDR failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - size_t network_interface_length = 0; - if (aws_secure_strlen(options->network_interface_name, AWS_NETWORK_INTERFACE_NAME_MAX, &network_interface_length)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: network_interface_name max length must be %d length and NULL terminated", - (void *)socket, - socket->io_handle.data.fd, - AWS_NETWORK_INTERFACE_NAME_MAX); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - if (network_interface_length != 0) { -#if defined(SO_BINDTODEVICE) - if (setsockopt( - socket->io_handle.data.fd, - SOL_SOCKET, - SO_BINDTODEVICE, - options->network_interface_name, - network_interface_length)) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() with SO_BINDTODEVICE for \"%s\" failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } -#elif defined(IP_BOUND_IF) - /* - * If SO_BINDTODEVICE is not supported, the alternative is IP_BOUND_IF which requires an index instead - * of a name. We are not using this everywhere because this requires 2 system calls instead of 1, and is - * dependent upon the type of sockets, which doesn't support AWS_SOCKET_LOCAL. As a future optimization, we can - * look into caching the result of if_nametoindex. - */ - uint network_interface_index = if_nametoindex(options->network_interface_name); - if (network_interface_index == 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: network_interface_name \"%s\" not found. if_nametoindex() failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - if (options->domain == AWS_SOCKET_IPV6) { - if (setsockopt( - socket->io_handle.data.fd, - IPPROTO_IPV6, - IPV6_BOUND_IF, - &network_interface_index, - sizeof(network_interface_index))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() with IPV6_BOUND_IF for \"%s\" failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - } else if (setsockopt( - socket->io_handle.data.fd, - IPPROTO_IP, - IP_BOUND_IF, - &network_interface_index, - sizeof(network_interface_index))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() with IP_BOUND_IF for \"%s\" failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } -#else - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: network_interface_name is not supported on this platform.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); -#endif - } - if (options->type == AWS_SOCKET_STREAM && options->domain != AWS_SOCKET_LOCAL) { - if (socket->options.keepalive) { - int keep_alive = 1; - if (AWS_UNLIKELY( - setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_KEEPALIVE, &keep_alive, sizeof(int)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling SO_KEEPALIVE failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - } - -#if !defined(__OpenBSD__) - if (socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec) { - int ival_in_secs = socket->options.keep_alive_interval_sec; - if (AWS_UNLIKELY(setsockopt( - socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPIDLE, &ival_in_secs, sizeof(ival_in_secs)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling TCP_KEEPIDLE for TCP failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - - ival_in_secs = socket->options.keep_alive_timeout_sec; - if (AWS_UNLIKELY(setsockopt( - socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPINTVL, &ival_in_secs, sizeof(ival_in_secs)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling TCP_KEEPINTVL for TCP failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - } - - if (socket->options.keep_alive_max_failed_probes) { - int max_probes = socket->options.keep_alive_max_failed_probes; - if (AWS_UNLIKELY( - setsockopt(socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPCNT, &max_probes, sizeof(max_probes)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling TCP_KEEPCNT for TCP failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - } -#endif /* __OpenBSD__ */ - } - - return AWS_OP_SUCCESS; -} - -struct socket_write_request { - struct aws_byte_cursor cursor_cpy; - aws_socket_on_write_completed_fn *written_fn; - void *write_user_data; - struct aws_linked_list_node node; - size_t original_buffer_len; - int error_code; -}; - -struct posix_socket_close_args { - struct aws_mutex mutex; - struct aws_condition_variable condition_variable; - struct aws_socket *socket; - bool invoked; - int ret_code; -}; - -static bool s_close_predicate(void *arg) { - struct posix_socket_close_args *close_args = arg; - return close_args->invoked; -} - -static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - (void)status; - - struct posix_socket_close_args *close_args = arg; - aws_mutex_lock(&close_args->mutex); - close_args->ret_code = AWS_OP_SUCCESS; - - if (aws_socket_close(close_args->socket)) { - close_args->ret_code = aws_last_error(); - } - - close_args->invoked = true; - aws_condition_variable_notify_one(&close_args->condition_variable); - aws_mutex_unlock(&close_args->mutex); -} - -int aws_socket_close(struct aws_socket *socket) { - struct posix_socket *socket_impl = socket->impl; - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: closing", (void *)socket, socket->io_handle.data.fd); - struct aws_event_loop *event_loop = socket->event_loop; - if (socket->event_loop) { - /* don't freak out on me, this almost never happens, and never occurs inside a channel - * it only gets hit from a listening socket shutting down or from a unit test. */ - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: closing from a different thread than " - "the socket is running from. Blocking until it closes down.", - (void *)socket, - socket->io_handle.data.fd); - /* the only time we allow this kind of thing is when you're a listener.*/ - if (socket->state != LISTENING) { - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - struct posix_socket_close_args args = { - .mutex = AWS_MUTEX_INIT, - .condition_variable = AWS_CONDITION_VARIABLE_INIT, - .socket = socket, - .ret_code = AWS_OP_SUCCESS, - .invoked = false, - }; - - struct aws_task close_task = { - .fn = s_close_task, - .arg = &args, - }; - - int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ - (void)fd_for_logging; - - aws_mutex_lock(&args.mutex); - aws_event_loop_schedule_task_now(socket->event_loop, &close_task); - aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_close_predicate, &args); - aws_mutex_unlock(&args.mutex); - AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, fd_for_logging); - if (args.ret_code) { - return aws_raise_error(args.ret_code); - } - - return AWS_OP_SUCCESS; - } - - if (socket_impl->currently_subscribed) { - if (socket->state & LISTENING) { - aws_socket_stop_accept(socket); - } else { - int err_code = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); - - if (err_code) { - return AWS_OP_ERR; - } - } - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - } - } - - if (socket_impl->close_happened) { - *socket_impl->close_happened = true; - } - - if (socket_impl->connect_args) { - socket_impl->connect_args->socket = NULL; - socket_impl->connect_args = NULL; - } - - if (aws_socket_is_open(socket)) { - close(socket->io_handle.data.fd); - socket->io_handle.data.fd = -1; - socket->state = CLOSED; - - /* ensure callbacks for pending writes fire (in order) before this close function returns */ - - if (socket_impl->written_task_scheduled) { - aws_event_loop_cancel_task(event_loop, &socket_impl->written_task); - } - - while (!aws_linked_list_empty(&socket_impl->written_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; - write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); - aws_mem_release(socket->allocator, write_request); - } - - while (!aws_linked_list_empty(&socket_impl->write_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; - write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data); - aws_mem_release(socket->allocator, write_request); - } - } - - return AWS_OP_SUCCESS; -} - -int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { - int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, "id=%p fd=%d: shutting down in direction %d", (void *)socket, socket->io_handle.data.fd, dir); - if (shutdown(socket->io_handle.data.fd, how)) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - - if (dir == AWS_CHANNEL_DIR_READ) { - socket->state &= ~CONNECTED_READ; - } else { - socket->state &= ~CONNECTED_WRITE; - } - - return AWS_OP_SUCCESS; -} - -static void s_written_task(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - (void)status; - - struct aws_socket *socket = arg; - struct posix_socket *socket_impl = socket->impl; - - socket_impl->written_task_scheduled = false; - - /* this is to handle a race condition when a callback kicks off a cleanup, or the user decides - * to close the socket based on something they read (SSL validation failed for example). - * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling */ - aws_ref_count_acquire(&socket_impl->internal_refcount); - - /* Notes about weird loop: - * 1) Only process the initial contents of queue when this task is run, - * ignoring any writes queued during delivery. - * If we simply looped until the queue was empty, we could get into a - * synchronous loop of completing and writing and completing and writing... - * and it would be tough for multiple sockets to share an event-loop fairly. - * 2) Check if queue is empty with each iteration. - * If user calls close() from the callback, close() will process all - * nodes in the written_queue, and the queue will be empty when the - * callstack gets back to here. */ - if (!aws_linked_list_empty(&socket_impl->written_queue)) { - struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue); - do { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; - write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); - aws_mem_release(socket_impl->allocator, write_request); - if (node == stop_after) { - break; - } - } while (!aws_linked_list_empty(&socket_impl->written_queue)); - } - - aws_ref_count_release(&socket_impl->internal_refcount); -} - -/* this gets called in two scenarios. - * 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned - * immediately if something bad has happened to the socket. In this case, `parent_request` is set. - * 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */ -static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { - struct posix_socket *socket_impl = socket->impl; - - AWS_ASSERT(socket->io_handle.update_io_result); - - if (parent_request) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: processing write requests, called from aws_socket_write", - (void *)socket, - socket->io_handle.data.fd); - } else { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: processing write requests, invoked by the event-loop", - (void *)socket, - socket->io_handle.data.fd); - } - - bool purge = false; - int aws_error = AWS_OP_SUCCESS; - bool parent_request_failed = false; - bool pushed_to_written_queue = false; - - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - - /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ - while (!aws_linked_list_empty(&socket_impl->write_queue)) { - struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: dequeued write request of size %llu, remaining to write %llu", - (void *)socket, - socket->io_handle.data.fd, - (unsigned long long)write_request->original_buffer_len, - (unsigned long long)write_request->cursor_cpy.len); - - ssize_t written = send( - socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL_SEND); - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: send written size %d", - (void *)socket, - socket->io_handle.data.fd, - (int)written); - - if (written < 0) { - if (errno_value == EAGAIN) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; /* TODO Add AWS_IO_WRITE_EAGAIN ode. */ - break; - } - - if (errno_value == EPIPE) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: already closed before write", - (void *)socket, - socket->io_handle.data.fd); - aws_error = AWS_IO_SOCKET_CLOSED; - aws_raise_error(aws_error); - purge = true; - io_op_result.write_error_code = aws_error; - break; - } - - purge = true; - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: write error with error code %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - aws_error = s_determine_socket_error(errno_value); - aws_raise_error(aws_error); - io_op_result.write_error_code = aws_error; - break; - } - - io_op_result.written_bytes += (size_t)written; - - size_t remaining_to_write = write_request->cursor_cpy.len; - - aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: remaining write request to write %llu", - (void *)socket, - socket->io_handle.data.fd, - (unsigned long long)write_request->cursor_cpy.len); - - if ((size_t)written == remaining_to_write) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: write request completed", (void *)socket, socket->io_handle.data.fd); - - aws_linked_list_remove(node); - write_request->error_code = AWS_ERROR_SUCCESS; - aws_linked_list_push_back(&socket_impl->written_queue, node); - pushed_to_written_queue = true; - } - } - - if (purge) { - while (!aws_linked_list_empty(&socket_impl->write_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - - /* If this fn was invoked directly from aws_socket_write(), don't invoke the error callback - * as the user will be able to rely on the return value from aws_socket_write() */ - if (write_request == parent_request) { - parent_request_failed = true; - aws_mem_release(socket->allocator, write_request); - } else { - write_request->error_code = aws_error; - aws_linked_list_push_back(&socket_impl->written_queue, node); - pushed_to_written_queue = true; - } - } - } - - if (pushed_to_written_queue && !socket_impl->written_task_scheduled) { - socket_impl->written_task_scheduled = true; - aws_task_init(&socket_impl->written_task, s_written_task, socket, "socket_written_task"); - aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); - } - - socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); - - /* Only report error if aws_socket_write() invoked this function and its write_request failed */ - if (!parent_request_failed) { - return AWS_OP_SUCCESS; - } - - aws_raise_error(aws_error); - return AWS_OP_ERR; -} - -static void s_on_socket_io_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - (void)event_loop; - (void)handle; - struct aws_socket *socket = user_data; - struct posix_socket *socket_impl = socket->impl; - - /* this is to handle a race condition when an error kicks off a cleanup, or the user decides - * to close the socket based on something they read (SSL validation failed for example). - * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling but currently - * subscribed is set to false. */ - aws_ref_count_acquire(&socket_impl->internal_refcount); - - /* NOTE: READABLE|WRITABLE|HANG_UP events might arrive simultaneously - * (e.g. peer sends last few bytes and immediately hangs up). - * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */ - - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); - if (socket->readable_fn) { - socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data); - } - } - /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not - * have been cleaned up, so this next branch is safe. */ - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) { - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd); - s_process_socket_write_requests(socket, NULL); - } - - if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) { - aws_raise_error(AWS_IO_SOCKET_CLOSED); - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: closed remotely", (void *)socket, socket->io_handle.data.fd); - if (socket->readable_fn) { - socket->readable_fn(socket, AWS_IO_SOCKET_CLOSED, socket->readable_user_data); - } - goto end_check; - } - - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_ERROR) { - int aws_error = aws_socket_get_error(socket); - aws_raise_error(aws_error); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: error event occurred", (void *)socket, socket->io_handle.data.fd); - if (socket->readable_fn) { - socket->readable_fn(socket, aws_error, socket->readable_user_data); - } - goto end_check; - } - -end_check: - aws_ref_count_release(&socket_impl->internal_refcount); - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "====== s_on_socket_io_event"); -} - -int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { - if (!socket->event_loop) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: assigning to event loop %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop); - socket->event_loop = event_loop; - struct posix_socket *socket_impl = socket->impl; - socket_impl->currently_subscribed = true; - if (aws_event_loop_subscribe_to_io_events( - event_loop, - &socket->io_handle, - AWS_IO_EVENT_TYPE_WRITABLE | AWS_IO_EVENT_TYPE_READABLE, - s_on_socket_io_event, - socket)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: assigning to event loop %p failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop, - aws_last_error()); - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; - } - - return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); -} - -struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket) { - return socket->event_loop; -} - -int aws_socket_subscribe_to_readable_events( - struct aws_socket *socket, - aws_socket_on_readable_fn *on_readable, - void *user_data) { - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, " id=%p fd=%d: subscribing to readable events", (void *)socket, socket->io_handle.data.fd); - if (!(socket->state & CONNECTED_READ)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: can't subscribe to readable events since the socket is not connected", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); - } - - if (socket->readable_fn) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: can't subscribe to readable events since it is already subscribed", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); - } - - AWS_ASSERT(on_readable); - socket->readable_user_data = user_data; - socket->readable_fn = on_readable; - - return AWS_OP_SUCCESS; -} - -int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { - AWS_ASSERT(amount_read); - - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: cannot read from a different thread than event loop %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)socket->event_loop); - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (!(socket->state & CONNECTED_READ)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: cannot read because it is not connected", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); - } - - ssize_t read_val = read(socket->io_handle.data.fd, buffer->buffer + buffer->len, buffer->capacity - buffer->len); - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: read of %d", (void *)socket, socket->io_handle.data.fd, (int)read_val); - - if (read_val > 0) { - *amount_read = (size_t)read_val; - buffer->len += *amount_read; - return AWS_OP_SUCCESS; - } - - /* read_val of 0 means EOF which we'll treat as AWS_IO_SOCKET_CLOSED */ - if (read_val == 0) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, "id=%p fd=%d: zero read, socket is closed", (void *)socket, socket->io_handle.data.fd); - *amount_read = 0; - - if (buffer->capacity - buffer->len > 0) { - return aws_raise_error(AWS_IO_SOCKET_CLOSED); - } - - return AWS_OP_SUCCESS; - } - -#if defined(EWOULDBLOCK) - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { -#else - if (errno_value == EAGAIN) { -#endif - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: read would block", (void *)socket, socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); - } - - if (errno_value == EPIPE || errno_value == ECONNRESET) { - AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket is closed.", (void *)socket, socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_CLOSED); - } - - if (errno_value == ETIMEDOUT) { - AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket timed out.", (void *)socket, socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_TIMEOUT); - } - - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: read failed with error: %s", - (void *)socket, - socket->io_handle.data.fd, - strerror(errno_value)); - return aws_raise_error(s_determine_socket_error(errno_value)); -} - -int aws_socket_write( - struct aws_socket *socket, - const struct aws_byte_cursor *cursor, - aws_socket_on_write_completed_fn *written_fn, - void *user_data) { - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (!(socket->state & CONNECTED_WRITE)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: cannot write to because it is not connected", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); - } - - AWS_ASSERT(written_fn); - struct posix_socket *socket_impl = socket->impl; - struct socket_write_request *write_request = - aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_write_request)); - - if (!write_request) { - return AWS_OP_ERR; - } - - write_request->original_buffer_len = cursor->len; - write_request->written_fn = written_fn; - write_request->write_user_data = user_data; - write_request->cursor_cpy = *cursor; - aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node); - - return s_process_socket_write_requests(socket, write_request); -} - -int aws_socket_get_error(struct aws_socket *socket) { - int connect_result; - socklen_t result_length = sizeof(connect_result); - - if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { - return s_determine_socket_error(errno); - } - - if (connect_result) { - return s_determine_socket_error(connect_result); - } - - return AWS_OP_SUCCESS; -} - -bool aws_socket_is_open(struct aws_socket *socket) { - return socket->io_handle.data.fd >= 0; -} - -void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { - struct aws_uuid uuid; - AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); - char uuid_str[AWS_UUID_STR_LEN] = {0}; - struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); - AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); -} From ad1262d1dd496ef934309ee36f986d2dd693e757 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 14:51:56 -0700 Subject: [PATCH 06/39] fixup --- CMakeLists.txt | 2 ++ source/bsd/kqueue_event_loop.c | 16 ++++----- source/posix/socket.c | 59 +++++++++++++++++---------------- source/socket_channel_handler.c | 27 ++++----------- tests/socket_test.c | 2 -- 5 files changed, 45 insertions(+), 61 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5fa20037c..36bdd4d96 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,6 +127,8 @@ elseif (APPLE) #No choice on TLS for apple, darwinssl will always be used. list(APPEND PLATFORM_LIBS "-framework Security") + set(EVENT_LOOP_DEFINE "KQUEUE") + # FIXME For debugging. set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetBSD" OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index 6374eea6e..cbd45264a 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -131,6 +131,9 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .is_on_callers_thread = s_is_event_thread, }; +/** + * FIXME kqueue is used for debugging/demonstration purposes. It's going to be reverted. + */ static void s_update_io_result( struct aws_event_loop *event_loop, struct aws_io_handle *handle, @@ -150,13 +153,14 @@ static void s_update_io_result( io_op_result->write_error_code, aws_error_str(io_op_result->write_error_code), io_op_result->written_bytes); + + /* Here a handle IO status should be updated. It'll be used from the event loop. */ } struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { AWS_ASSERT(alloc); - // FIXME Remove this assert. AWS_ASSERT(clock); AWS_ASSERT(options); AWS_ASSERT(options->clock); @@ -955,15 +959,7 @@ static void aws_event_loop_thread(void *user_data) { handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); - // AWS_LOGF_INFO( - // AWS_LS_IO_EVENT_LOOP, - // "id=%p: on_event completion status: read: status %d (%s), %lu bytes; write: status - // %d (%s), %lu " "bytes", (void *)event_loop, io_op_result.read_error_code, - // aws_error_str(io_op_result.read_error_code), - // io_op_result.read_bytes, - // io_op_result.write_error_code, - // aws_error_str(io_op_result.write_error_code), - // io_op_result.written_bytes); + /* It's possible to check for IO result here. */ } handle_data->events_this_loop = 0; diff --git a/source/posix/socket.c b/source/posix/socket.c index 2d8884237..3f1446b6b 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -448,10 +448,6 @@ static void s_socket_connect_event( (void)event_loop; (void)handle; - AWS_ASSERT(handle->update_io_result); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "fd=%d: update I/O results callback: %p", handle->data.fd, (void *)handle->update_io_result); - struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); @@ -469,9 +465,6 @@ static void s_socket_connect_event( socket_args->socket = NULL; socket_impl->connect_args = NULL; s_on_connection_success(socket); - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - // TODO Update? return; } @@ -483,10 +476,15 @@ static void s_socket_connect_event( "id=%p fd=%d: spurious event, waiting for another notification.", (void *)socket_args->socket, handle->data.fd); + +#if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - // handle->update_io_result(event_loop, handle, &io_op_result); + AWS_ASSERT(handle->update_io_result); + handle->update_io_result(event_loop, handle, &io_op_result); +#endif + return; } @@ -496,10 +494,6 @@ static void s_socket_connect_event( aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); } - - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - // TODO Update? } static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { @@ -964,20 +958,16 @@ static void s_socket_accept_event( (void)event_loop; - AWS_ASSERT(handle->update_io_result); - struct aws_socket *socket = user_data; struct posix_socket *socket_impl = socket->impl; AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: listening event received: %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)handle->update_io_result); + AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); +#if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); +#endif if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { int in_fd = 0; @@ -990,14 +980,18 @@ static void s_socket_accept_event( int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; +#endif break; } int aws_error = aws_socket_get_error(socket); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = aws_error; +#endif break; } @@ -1083,12 +1077,6 @@ static void s_socket_accept_event( socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); if (close_occurred) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: trying to update: %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)handle->update_io_result); return; } @@ -1096,7 +1084,10 @@ static void s_socket_accept_event( } } +#if AWS_USE_ON_EVENT_WITH_RESULT + AWS_ASSERT(handle->update_io_result); handle->update_io_result(event_loop, handle, &io_op_result); +#endif AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, @@ -1647,8 +1638,6 @@ static void s_written_task(struct aws_task *task, void *arg, enum aws_task_statu static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { struct posix_socket *socket_impl = socket->impl; - AWS_ASSERT(socket->io_handle.update_io_result); - if (parent_request) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, @@ -1668,8 +1657,10 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc bool parent_request_failed = false; bool pushed_to_written_queue = false; +#if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); +#endif /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ while (!aws_linked_list_empty(&socket_impl->write_queue)) { @@ -1699,7 +1690,9 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc if (errno_value == EAGAIN) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; /* TODO Add AWS_IO_WRITE_EAGAIN ode. */ +#if AWS_USE_ON_EVENT_WITH_RESULT + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; /* TODO Add AWS_IO_WRITE_EAGAIN code. */ +#endif break; } @@ -1712,7 +1705,9 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_error = AWS_IO_SOCKET_CLOSED; aws_raise_error(aws_error); purge = true; +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.write_error_code = aws_error; +#endif break; } @@ -1725,11 +1720,15 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc errno_value); aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.write_error_code = aws_error; +#endif break; } +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.written_bytes += (size_t)written; +#endif size_t remaining_to_write = write_request->cursor_cpy.len; @@ -1776,7 +1775,10 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); } +#if AWS_USE_ON_EVENT_WITH_RESULT + AWS_ASSERT(socket->io_handle.update_io_result); socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); +#endif /* Only report error if aws_socket_write() invoked this function and its write_request failed */ if (!parent_request_failed) { @@ -1845,7 +1847,6 @@ static void s_on_socket_io_event( end_check: aws_ref_count_release(&socket_impl->internal_refcount); - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "====== s_on_socket_io_event"); } int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index 66f8efe00..8264ef3e6 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -138,12 +138,14 @@ static void s_do_read(struct socket_handler *socket_handler) { (unsigned long long)max_to_read); if (max_to_read == 0) { - // TODO Set to ewouldblock? + /* TODO Set to ewouldblock? */ return; } - +#if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + AWS_ASSERT(socket_handler->socket->io_handle.update_io_result); +#endif size_t total_read = 0; size_t read = 0; @@ -200,14 +202,6 @@ static void s_do_read(struct socket_handler *socket_handler) { (void *)socket_handler->slot->handler); io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; } - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET_HANDLER, - "=== s_do_read 1: %d %lu %d %lu", - io_op_result.read_error_code, - io_op_result.read_bytes, - io_op_result.write_error_code, - io_op_result.written_bytes); - AWS_ASSERT(socket_handler->socket->io_handle.update_io_result); socket_handler->socket->io_handle.update_io_result( socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); return; @@ -226,20 +220,13 @@ static void s_do_read(struct socket_handler *socket_handler) { aws_channel_schedule_task_now(socket_handler->slot->channel, &socket_handler->read_task_storage); } - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET_HANDLER, - "id=%p: === s_do_read update I/O results: %d %lu %d %lu", - (void *)socket_handler->slot->handler, - io_op_result.read_error_code, - io_op_result.read_bytes, - io_op_result.write_error_code, - io_op_result.written_bytes); +#if AWS_USE_ON_EVENT_WITH_RESULT socket_handler->socket->io_handle.update_io_result( socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); +#endif } -/* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. - * If an error, start the channel shutdown process. */ +/* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. */ static void s_on_readable_notification(struct aws_socket *socket, int error_code, void *user_data) { (void)socket; diff --git a/tests/socket_test.c b/tests/socket_test.c index 22ec38604..ecc47c2b2 100644 --- a/tests/socket_test.c +++ b/tests/socket_test.c @@ -22,8 +22,6 @@ # include #endif -// #if AWS_USE_ON_EVENT_WITH_RESULT - struct local_listener_args { struct aws_socket *incoming; struct aws_mutex *mutex; From 28e55e1a0cc52994f206b1e3a24db079b69d98b7 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 14:58:29 -0700 Subject: [PATCH 07/39] fixup --- source/posix/pipe.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/source/posix/pipe.c b/source/posix/pipe.c index a56790b35..0463824e4 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -278,6 +278,12 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b if (read_val < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { +#if AWS_USE_ON_EVENT_WITH_RESULT + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); +#endif return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } return s_raise_posix_error(errno_value); @@ -441,9 +447,6 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { AWS_ASSERT(write_impl); AWS_ASSERT(write_impl->handle.update_io_result); - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - while (!aws_linked_list_empty(&write_impl->write_list)) { struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); @@ -458,8 +461,15 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { /* The pipe is no longer writable. Bail out */ write_impl->is_writable = false; - io_op_result.write_error_code = errno_value; + +#if AWS_USE_ON_EVENT_WITH_RESULT + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; + AWS_ASSERT(write_impl->handle.update_io_result); write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); +#endif + return; } @@ -469,8 +479,6 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { } else { aws_byte_cursor_advance(&request->cursor, write_val); - io_op_result.written_bytes += (size_t)write_val; - if (request->cursor.len > 0) { /* There was a partial write, loop again to try and write the rest. */ continue; @@ -480,7 +488,6 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { /* If we got this far in the loop, then the write request is complete. * Note that the callback may result in the pipe being cleaned up. */ - // TODO Call update. bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); if (write_end_cleaned_up) { /* Bail out! Any remaining requests were canceled during clean_up() */ @@ -548,7 +555,6 @@ int aws_pipe_write( /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could * happen if a this aws_pipe_write() call was made by another write's completion callback */ if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { - struct aws_io_handle_io_op_result io_op_result; s_write_end_process_requests(write_end); } From c5f9e0093e8ead541ed786ffd9dbc6916bb193f8 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 15:04:08 -0700 Subject: [PATCH 08/39] Use #if everywhere --- source/posix/pipe.c | 1 - source/posix/socket.c | 3 --- source/socket_channel_handler.c | 13 ++++++++++++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/source/posix/pipe.c b/source/posix/pipe.c index 0463824e4..9313030c4 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -445,7 +445,6 @@ static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end * static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { struct write_end_impl *write_impl = write_end->impl_data; AWS_ASSERT(write_impl); - AWS_ASSERT(write_impl->handle.update_io_result); while (!aws_linked_list_empty(&write_impl->write_list)) { struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); diff --git a/source/posix/socket.c b/source/posix/socket.c index 3f1446b6b..734858f41 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -1809,9 +1809,6 @@ static void s_on_socket_io_event( * (e.g. peer sends last few bytes and immediately hangs up). * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */ - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); if (socket->readable_fn) { diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index 8264ef3e6..a523a49ed 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -138,7 +138,6 @@ static void s_do_read(struct socket_handler *socket_handler) { (unsigned long long)max_to_read); if (max_to_read == 0) { - /* TODO Set to ewouldblock? */ return; } #if AWS_USE_ON_EVENT_WITH_RESULT @@ -159,12 +158,16 @@ static void s_do_read(struct socket_handler *socket_handler) { if (aws_socket_read(socket_handler->socket, &message->message_data, &read)) { last_error = aws_last_error(); aws_mem_release(message->allocator, message); +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; +#endif break; } total_read += read; +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_bytes += read; +#endif AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: read %llu from socket", @@ -174,7 +177,9 @@ static void s_do_read(struct socket_handler *socket_handler) { if (aws_channel_slot_send_message(socket_handler->slot, message, AWS_CHANNEL_DIR_READ)) { last_error = aws_last_error(); aws_mem_release(message->allocator, message); +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; +#endif break; } } @@ -192,7 +197,9 @@ static void s_do_read(struct socket_handler *socket_handler) { AWS_ASSERT(last_error != 0); if (last_error != AWS_IO_READ_WOULD_BLOCK) { +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; +#endif aws_channel_shutdown(socket_handler->slot->channel, last_error); } else { AWS_LOGF_TRACE( @@ -200,10 +207,14 @@ static void s_do_read(struct socket_handler *socket_handler) { "id=%p: out of data to read on socket. " "Waiting on event-loop notification.", (void *)socket_handler->slot->handler); +#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; +#endif } +#if AWS_USE_ON_EVENT_WITH_RESULT socket_handler->socket->io_handle.update_io_result( socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); +#endif return; } /* in this case, everything was fine, but there's still pending reads. We need to schedule a task to do the read From 4ed5a87ca426b8f31d45a2fc1f136d6724c80411 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 15:12:45 -0700 Subject: [PATCH 09/39] Fix kqueue --- include/aws/io/io.h | 3 +++ source/bsd/kqueue_event_loop.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/aws/io/io.h b/include/aws/io/io.h index 27f6f0c9d..2baf7db1a 100644 --- a/include/aws/io/io.h +++ b/include/aws/io/io.h @@ -19,6 +19,9 @@ struct aws_io_handle; #if AWS_USE_ON_EVENT_WITH_RESULT struct aws_event_loop; +/** + * Results of the I/O operation(s) performed on the aws_io_handle. + */ struct aws_io_handle_io_op_result { size_t read_bytes; size_t written_bytes; diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index cbd45264a..ed1e82063 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -131,6 +131,7 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .is_on_callers_thread = s_is_event_thread, }; +#if AWS_USE_ON_EVENT_WITH_RESULT /** * FIXME kqueue is used for debugging/demonstration purposes. It's going to be reverted. */ @@ -140,6 +141,7 @@ static void s_update_io_result( const struct aws_io_handle_io_op_result *io_op_result) { AWS_ASSERT(handle->additional_data); struct handle_data *handle_data = handle->additional_data; + (void)handle_data; AWS_ASSERT(event_loop == handle_data->event_loop); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, @@ -154,8 +156,9 @@ static void s_update_io_result( aws_error_str(io_op_result->write_error_code), io_op_result->written_bytes); - /* Here a handle IO status should be updated. It'll be used from the event loop. */ + /* Here, the handle IO status should be updated. It'll be used in the event loop. */ } +#endif struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, @@ -612,7 +615,9 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta /* Success */ handle_data->state = HANDLE_STATE_SUBSCRIBED; +#if AWS_USE_ON_EVENT_WITH_RESULT handle_data->owner->update_io_result = s_update_io_result; +#endif return; subscribe_failed: From 619319ffa48aea09561fb88efd663b091236f0a3 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 27 Aug 2024 15:31:21 -0700 Subject: [PATCH 10/39] Fix pipe tests --- source/posix/pipe.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/source/posix/pipe.c b/source/posix/pipe.c index 9313030c4..319e6621b 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -279,10 +279,12 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { #if AWS_USE_ON_EVENT_WITH_RESULT - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + if (read_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + } #endif return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } From be85d892e9b9e0ff43f3e82f943c88718fcd3c5f Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 3 Sep 2024 08:41:23 -0700 Subject: [PATCH 11/39] Remove AWS_ASSERT, use AWS_ZERO_STRUCT --- source/posix/pipe.c | 14 +++++++------- source/posix/socket.c | 26 ++++++++++++++------------ 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/source/posix/pipe.c b/source/posix/pipe.c index 319e6621b..5b6d9e303 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -281,7 +281,7 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b #if AWS_USE_ON_EVENT_WITH_RESULT if (read_impl->handle.update_io_result) { struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + AWS_ZERO_STRUCT(io_op_result); io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); } @@ -464,13 +464,13 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { write_impl->is_writable = false; #if AWS_USE_ON_EVENT_WITH_RESULT - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; - AWS_ASSERT(write_impl->handle.update_io_result); - write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); + if (write_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; + write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); + } #endif - return; } diff --git a/source/posix/socket.c b/source/posix/socket.c index 734858f41..2aa027981 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -478,13 +478,13 @@ static void s_socket_connect_event( handle->data.fd); #if AWS_USE_ON_EVENT_WITH_RESULT - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - AWS_ASSERT(handle->update_io_result); - handle->update_io_result(event_loop, handle, &io_op_result); + if (handle->update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + handle->update_io_result(event_loop, handle, &io_op_result); + } #endif - return; } @@ -966,7 +966,7 @@ static void s_socket_accept_event( #if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + AWS_ZERO_STRUCT(io_op_result); #endif if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { @@ -1085,8 +1085,9 @@ static void s_socket_accept_event( } #if AWS_USE_ON_EVENT_WITH_RESULT - AWS_ASSERT(handle->update_io_result); - handle->update_io_result(event_loop, handle, &io_op_result); + if (handle->update_io_result) { + handle->update_io_result(event_loop, handle, &io_op_result); + } #endif AWS_LOGF_TRACE( @@ -1659,7 +1660,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc #if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + AWS_ZERO_STRUCT(io_op_result); #endif /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ @@ -1776,8 +1777,9 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc } #if AWS_USE_ON_EVENT_WITH_RESULT - AWS_ASSERT(socket->io_handle.update_io_result); - socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); + if (socket->io_handle.update_io_result) { + socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); + } #endif /* Only report error if aws_socket_write() invoked this function and its write_request failed */ From 739e0f68f9b05e1e4e582e1b95fc8e0107ccad9c Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 16 Sep 2024 10:23:20 -0700 Subject: [PATCH 12/39] Remove changes made to kqueue --- CMakeLists.txt | 2 -- source/bsd/kqueue_event_loop.c | 34 ---------------------------------- 2 files changed, 36 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 36bdd4d96..e56d7d7aa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -128,8 +128,6 @@ elseif (APPLE) #No choice on TLS for apple, darwinssl will always be used. list(APPEND PLATFORM_LIBS "-framework Security") set(EVENT_LOOP_DEFINE "KQUEUE") - # FIXME For debugging. - set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetBSD" OR CMAKE_SYSTEM_NAME STREQUAL "OpenBSD") file(GLOB AWS_IO_OS_HEADERS diff --git a/source/bsd/kqueue_event_loop.c b/source/bsd/kqueue_event_loop.c index ed1e82063..33a517e7b 100644 --- a/source/bsd/kqueue_event_loop.c +++ b/source/bsd/kqueue_event_loop.c @@ -131,35 +131,6 @@ struct aws_event_loop_vtable s_kqueue_vtable = { .is_on_callers_thread = s_is_event_thread, }; -#if AWS_USE_ON_EVENT_WITH_RESULT -/** - * FIXME kqueue is used for debugging/demonstration purposes. It's going to be reverted. - */ -static void s_update_io_result( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - const struct aws_io_handle_io_op_result *io_op_result) { - AWS_ASSERT(handle->additional_data); - struct handle_data *handle_data = handle->additional_data; - (void)handle_data; - AWS_ASSERT(event_loop == handle_data->event_loop); - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p: got feedback on I/O operation for fd %d: read: status %d (%s), %lu bytes; write: status %d (%s), %lu " - "bytes", - (void *)event_loop, - handle->data.fd, - io_op_result->read_error_code, - aws_error_str(io_op_result->read_error_code), - io_op_result->read_bytes, - io_op_result->write_error_code, - aws_error_str(io_op_result->write_error_code), - io_op_result->written_bytes); - - /* Here, the handle IO status should be updated. It'll be used in the event loop. */ -} -#endif - struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, const struct aws_event_loop_options *options) { @@ -615,9 +586,6 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta /* Success */ handle_data->state = HANDLE_STATE_SUBSCRIBED; -#if AWS_USE_ON_EVENT_WITH_RESULT - handle_data->owner->update_io_result = s_update_io_result; -#endif return; subscribe_failed: @@ -963,8 +931,6 @@ static void aws_event_loop_thread(void *user_data) { handle_data->owner->data.fd); handle_data->on_event( event_loop, handle_data->owner, handle_data->events_this_loop, handle_data->on_event_user_data); - - /* It's possible to check for IO result here. */ } handle_data->events_this_loop = 0; From 4036be9654126f4eddd952ee1064c318c432ff05 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 16 Sep 2024 10:41:36 -0700 Subject: [PATCH 13/39] Add ionotify event loop (#670) --- CMakeLists.txt | 11 +- include/aws/io/io.h | 6 +- source/posix/pipe.c | 15 +- source/posix/socket.c | 28 +- source/qnx/ionotify_event_loop.c | 974 +++++++++++++++++++++++++++++++ source/socket_channel_handler.c | 16 +- tests/event_loop_test.c | 12 + tests/pipe_test.c | 5 + 8 files changed, 1042 insertions(+), 25 deletions(-) create mode 100644 source/qnx/ionotify_event_loop.c diff --git a/CMakeLists.txt b/CMakeLists.txt index e56d7d7aa..1e7a6a875 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -142,7 +142,16 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetB set(USE_S2N ON) elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") + file(GLOB AWS_IO_OS_HEADERS + ) + + file(GLOB AWS_IO_OS_SRC + "source/posix/*.c" + "source/qnx/*.c" + ) set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") + set(USE_S2N ON) + list(APPEND PLATFORM_LIBS "socket") endif() if (BYO_CRYPTO) @@ -233,7 +242,7 @@ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" DESTINATION "${LIBRARY_DIRECTORY}/${PROJECT_NAME}/cmake/" COMPONENT Development) -if (NOT CMAKE_CROSSCOMPILING) +if (NOT CMAKE_CROSSCOMPILING OR AWS_BUILD_QNX_TESTS) if (BUILD_TESTING) add_subdirectory(tests) endif() diff --git a/include/aws/io/io.h b/include/aws/io/io.h index 2baf7db1a..890b3dfae 100644 --- a/include/aws/io/io.h +++ b/include/aws/io/io.h @@ -25,7 +25,11 @@ struct aws_event_loop; struct aws_io_handle_io_op_result { size_t read_bytes; size_t written_bytes; + /** Error codes representing generic errors happening on I/O handles. */ + int error_code; + /** Error codes specific to reading operations. */ int read_error_code; + /** Error codes specific to writing operations. */ int write_error_code; }; @@ -43,7 +47,7 @@ struct aws_io_handle { void *additional_data; #if AWS_USE_ON_EVENT_WITH_RESULT aws_io_handle_update_io_results_fn *update_io_result; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ }; enum aws_io_message_type { diff --git a/source/posix/pipe.c b/source/posix/pipe.c index 5b6d9e303..4fe189d95 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -285,11 +285,21 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); } -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } return s_raise_posix_error(errno_value); } +#if AWS_USE_ON_EVENT_WITH_RESULT + else if (read_val == 0) { + if (read_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.error_code = AWS_IO_SOCKET_CLOSED; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + } + } +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ /* Success */ dst_buffer->len += read_val; @@ -470,7 +480,8 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); } -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ + return; } diff --git a/source/posix/socket.c b/source/posix/socket.c index 2aa027981..8ccf2401a 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -484,7 +484,8 @@ static void s_socket_connect_event( io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; handle->update_io_result(event_loop, handle, &io_op_result); } -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ + return; } @@ -967,7 +968,7 @@ static void s_socket_accept_event( #if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; AWS_ZERO_STRUCT(io_op_result); -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { int in_fd = 0; @@ -982,7 +983,7 @@ static void s_socket_accept_event( if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -991,7 +992,7 @@ static void s_socket_accept_event( s_on_connection_error(socket, aws_error); #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = aws_error; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -1088,7 +1089,7 @@ static void s_socket_accept_event( if (handle->update_io_result) { handle->update_io_result(event_loop, handle, &io_op_result); } -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, @@ -1661,7 +1662,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc #if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; AWS_ZERO_STRUCT(io_op_result); -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ while (!aws_linked_list_empty(&socket_impl->write_queue)) { @@ -1692,8 +1693,8 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); #if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; /* TODO Add AWS_IO_WRITE_EAGAIN code. */ -#endif + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -1708,7 +1709,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc purge = true; #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.write_error_code = aws_error; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -1723,13 +1724,13 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_raise_error(aws_error); #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.write_error_code = aws_error; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.written_bytes += (size_t)written; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ size_t remaining_to_write = write_request->cursor_cpy.len; @@ -1780,7 +1781,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc if (socket->io_handle.update_io_result) { socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); } -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ /* Only report error if aws_socket_write() invoked this function and its write_request failed */ if (!parent_request_failed) { @@ -2055,5 +2056,6 @@ void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint char uuid_str[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); + /* TODO QNX allows creating a socket file only in /tmp directory. */ + snprintf(endpoint->address, sizeof(endpoint->address), "/tmp/testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); } diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c new file mode 100644 index 000000000..1d9148162 --- /dev/null +++ b/source/qnx/ionotify_event_loop.c @@ -0,0 +1,974 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +static void s_destroy(struct aws_event_loop *event_loop); +static int s_run(struct aws_event_loop *event_loop); +static int s_stop(struct aws_event_loop *event_loop); +static int s_wait_for_stop_completion(struct aws_event_loop *event_loop); +static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task); +static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos); +static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task); +static int s_subscribe_to_io_events( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data); +static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle); +static void s_free_io_event_resources(void *user_data); +static bool s_is_on_callers_thread(struct aws_event_loop *event_loop); + +static void aws_event_loop_thread(void *args); + +static struct aws_event_loop_vtable s_vtable = { + .destroy = s_destroy, + .run = s_run, + .stop = s_stop, + .wait_for_stop_completion = s_wait_for_stop_completion, + .schedule_task_now = s_schedule_task_now, + .schedule_task_future = s_schedule_task_future, + .cancel_task = s_cancel_task, + .subscribe_to_io_events = s_subscribe_to_io_events, + .unsubscribe_from_io_events = s_unsubscribe_from_io_events, + .free_io_event_resources = s_free_io_event_resources, + .is_on_callers_thread = s_is_on_callers_thread, +}; + +struct ionotify_loop { + struct aws_task_scheduler scheduler; + struct aws_thread thread_created_on; + struct aws_thread_options thread_options; + aws_thread_id_t thread_joined_to; + struct aws_atomic_var running_thread_id; + /* Channel to receive I/O events. Resource managers open connections to this channel to send their events. */ + int io_events_channel_id; + /* Connection to the events channel opened by the event loop. It's used by ionotify and some event loop logic (e.g. + * cross-thread and I/O results notifications) to send pulses to the pulse channel. */ + int pulse_connection_id; + struct aws_mutex task_pre_queue_mutex; + struct aws_linked_list task_pre_queue; + struct aws_task stop_task; + struct aws_atomic_var stop_task_ptr; + bool should_continue; + /* ionotify forces to choose one of the following as user-provided data associated with each received event: + * 1. A pointer. But events won't contain the triggered flags (i.e. your code has to figure out itself if it was + * _NOTIFY_COND_INPUT or _NOTIFY_COND_HUP). + * 2. Some bits of a special field of type int (28 bits on x86_64). QNX will use the remaining bits (4 bits in + * QNX 8.0) in this field to specify the types of the triggered events. + * + * Since event loop must know the types of received I/O events, the second options is used. 28-bit IDs are mapped to + * each subscribed aws_io_handle. The mapping is stored in this hash table. + */ + struct aws_hash_table handles; + int last_handle_id; +}; + +/* Data associated with a subscribed I/O handle. */ +struct ionotify_event_data { + struct aws_allocator *alloc; + struct aws_io_handle *handle; + struct aws_event_loop *event_loop; + aws_event_loop_on_event_fn *on_event; + int events_subscribed; + /* enum aws_io_event_type */ + int latest_io_event_types; + /* Connection opened on the events channel. Used to send pulses to the main event loop. */ + int pulse_connection_id; + struct sigevent event; + void *user_data; + struct aws_task subscribe_task; + struct aws_task cleanup_task; + /* ID with a value that can fit into pulse user data field (only _NOTIFY_COND_MASK bits can be used). */ + int handle_id; + /* False when handle is unsubscribed, but this struct hasn't been cleaned up yet. */ + bool is_subscribed; +}; + +/* SI_NOTIFY is a QNX special sigev code requesting resource managers to return active event type along with the event + * itself. */ +static short IO_EVENT_PULSE_SIGEV_CODE = SI_NOTIFY; +static short CROSS_THREAD_PULSE_SIGEV_CODE = _PULSE_CODE_MINAVAIL; +static short IO_EVENT_KICKSTART_SIGEV_CODE = _PULSE_CODE_MINAVAIL + 1; +static short IO_EVENT_UPDATE_ERROR_SIGEV_CODE = _PULSE_CODE_MINAVAIL + 2; + +/* Setup edge triggered ionotify with a scheduler. */ +struct aws_event_loop *aws_event_loop_new_default_with_options( + struct aws_allocator *alloc, + const struct aws_event_loop_options *options) { + AWS_PRECONDITION(options); + AWS_PRECONDITION(options->clock); + + struct aws_event_loop *event_loop = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop)); + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered ionotify", (void *)event_loop); + if (aws_event_loop_init_base(event_loop, alloc, options->clock)) { + goto clean_up_loop; + } + + struct ionotify_loop *ionotify_loop = aws_mem_calloc(alloc, 1, sizeof(struct ionotify_loop)); + + if (options->thread_options) { + ionotify_loop->thread_options = *options->thread_options; + } else { + ionotify_loop->thread_options = *aws_default_thread_options(); + } + + /* initialize thread id to NULL, it should be updated when the event loop thread starts. */ + aws_atomic_init_ptr(&ionotify_loop->running_thread_id, NULL); + + aws_linked_list_init(&ionotify_loop->task_pre_queue); + ionotify_loop->task_pre_queue_mutex = (struct aws_mutex)AWS_MUTEX_INIT; + aws_atomic_init_ptr(&ionotify_loop->stop_task_ptr, NULL); + + if (aws_thread_init(&ionotify_loop->thread_created_on, alloc)) { + goto clean_up_ionotify; + } + + /* Setup channel to receive cross-thread pulses and pulses from resource managers. */ + ionotify_loop->io_events_channel_id = ChannelCreate(0); + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (ionotify_loop->io_events_channel_id == -1) { + printf("ChannelCreate failed with errno %d (%s)\n", errno_value, strerror(errno_value)); + goto clean_up_thread; + } + AWS_LOGF_DEBUG( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Opened QNX channel with ID %d", + (void *)event_loop, + ionotify_loop->io_events_channel_id); + + /* Open connection over the QNX channel for pulses. */ + ionotify_loop->pulse_connection_id = ConnectAttach(0, 0, ionotify_loop->io_events_channel_id, _NTO_SIDE_CHANNEL, 0); + if (ionotify_loop->pulse_connection_id == -1) { + goto clean_up_thread; + } + + if (aws_task_scheduler_init(&ionotify_loop->scheduler, alloc)) { + goto clean_up_thread; + } + + ionotify_loop->should_continue = false; + + event_loop->impl_data = ionotify_loop; + event_loop->vtable = &s_vtable; + + if (aws_hash_table_init(&ionotify_loop->handles, alloc, 32, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { + goto clean_up_thread; + } + + return event_loop; + +clean_up_thread: + aws_thread_clean_up(&ionotify_loop->thread_created_on); + +clean_up_ionotify: + aws_mem_release(alloc, ionotify_loop); + +clean_up_loop: + aws_mem_release(alloc, event_loop); + + return NULL; +} + +static void s_destroy(struct aws_event_loop *event_loop) { + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); + + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + /* we don't know if stop() has been called by someone else, + * just call stop() again and wait for event-loop to finish. */ + aws_event_loop_stop(event_loop); + s_wait_for_stop_completion(event_loop); + + /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ + ionotify_loop->thread_joined_to = aws_thread_current_thread_id(); + aws_atomic_store_ptr(&ionotify_loop->running_thread_id, &ionotify_loop->thread_joined_to); + aws_task_scheduler_clean_up(&ionotify_loop->scheduler); + + while (!aws_linked_list_empty(&ionotify_loop->task_pre_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_loop->task_pre_queue); + struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); + task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); + } + + aws_thread_clean_up(&ionotify_loop->thread_created_on); + + aws_hash_table_clean_up(&ionotify_loop->handles); + + aws_mem_release(event_loop->alloc, ionotify_loop); + aws_event_loop_clean_up_base(event_loop); + aws_mem_release(event_loop->alloc, event_loop); +} + +static int s_run(struct aws_event_loop *event_loop) { + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); + + ionotify_loop->should_continue = true; + aws_thread_increment_unjoined_count(); + if (aws_thread_launch( + &ionotify_loop->thread_created_on, &aws_event_loop_thread, event_loop, &ionotify_loop->thread_options)) { + + aws_thread_decrement_unjoined_count(); + AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Thread creation failed.", (void *)event_loop); + ionotify_loop->should_continue = false; + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status) { + (void)task; + struct aws_event_loop *event_loop = args; + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + /* now okay to reschedule stop tasks. */ + aws_atomic_store_ptr(&ionotify_loop->stop_task_ptr, NULL); + if (status == AWS_TASK_STATUS_RUN_READY) { + /* this allows the event loop to invoke the callback once the event loop has completed. */ + ionotify_loop->should_continue = false; + } +} + +static int s_stop(struct aws_event_loop *event_loop) { + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + void *expected_ptr = NULL; + bool update_succeeded = + aws_atomic_compare_exchange_ptr(&ionotify_loop->stop_task_ptr, &expected_ptr, &ionotify_loop->stop_task); + if (!update_succeeded) { + /* the stop task is already scheduled. */ + return AWS_OP_SUCCESS; + } + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Stopping event-loop thread", (void *)event_loop); + aws_task_init(&ionotify_loop->stop_task, s_stop_task, event_loop, "ionotify_event_loop_stop"); + s_schedule_task_now(event_loop, &ionotify_loop->stop_task); + + return AWS_OP_SUCCESS; +} + +static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + int result = aws_thread_join(&ionotify_loop->thread_created_on); + aws_thread_decrement_unjoined_count(); + return result; +} + +static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + /* if event loop and the caller are the same thread, just schedule and be done with it. */ + if (s_is_on_callers_thread(event_loop)) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Scheduling task %p in-thread for timestamp %llu", + (void *)event_loop, + (void *)task, + (unsigned long long)run_at_nanos); + if (run_at_nanos == 0) { + /* zero denotes "now" task */ + aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, task); + } else { + aws_task_scheduler_schedule_future(&ionotify_loop->scheduler, task, run_at_nanos); + } + return; + } + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Scheduling task %p cross-thread for timestamp %llu", + (void *)event_loop, + (void *)task, + (unsigned long long)run_at_nanos); + task->timestamp = run_at_nanos; + + aws_mutex_lock(&ionotify_loop->task_pre_queue_mutex); + bool is_first_task = aws_linked_list_empty(&ionotify_loop->task_pre_queue); + aws_linked_list_push_back(&ionotify_loop->task_pre_queue, &task->node); + aws_mutex_unlock(&ionotify_loop->task_pre_queue_mutex); + + /* If the list was not empty, we already sent a cross-thread pulse. No need to send it again. */ + if (is_first_task) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Waking up event-loop thread by sending pulse to connection ID %d", + (void *)event_loop, + ionotify_loop->pulse_connection_id); + /* The pulse itself is enough for cross-thread notifications. */ + int user_data_value = 0; + int rc = MsgSendPulse(ionotify_loop->pulse_connection_id, -1, CROSS_THREAD_PULSE_SIGEV_CODE, user_data_value); + int errno_value = errno; + if (rc == -1) { + /* The task was scheduled, but we couldn't notify the main loop about it. According to QNX docs, inability + * to send a pulse indicates that there is no available memory left for the process. Not notifying the loop + * is the minor thing in such a scenario. So, just log the error. */ + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to send cross-thread pulse: %d (%s)", + (void *)event_loop, + errno_value, + strerror(errno_value)); + } + } +} + +static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) { + s_schedule_task_common(event_loop, task, 0 /* zero denotes "now" task */); +} + +static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { + s_schedule_task_common(event_loop, task, run_at_nanos); +} + +static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Cancelling task %p", (void *)event_loop, (void *)task); + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + aws_task_scheduler_cancel_task(&ionotify_loop->scheduler, task); +} + +/* Map ionotify_event_data to internal ID. */ +static int s_add_handle(struct ionotify_loop *ionotify_loop, struct ionotify_event_data *ionotify_event_data) { + AWS_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); + + /* Special constant, _NOTIFY_COND_MASK, limits the maximum value that can be used as user data in I/O events. */ + int max_handle_id = _NOTIFY_COND_MASK; + + if (AWS_UNLIKELY(aws_hash_table_get_entry_count(&ionotify_loop->handles) == (size_t)max_handle_id)) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Maximum number of registered handles reached", + (void *)ionotify_event_data->event_loop); + return AWS_OP_ERR; + } + + struct aws_hash_element *elem = NULL; + int next_handle_id = ionotify_loop->last_handle_id; + int was_created = 0; + do { + ++next_handle_id; + if (next_handle_id > max_handle_id) { + next_handle_id = 1; + } + aws_hash_table_create(&ionotify_loop->handles, (void *)next_handle_id, &elem, &was_created); + /* next_handle_id is already present in the hash table, skip it. */ + if (was_created == 0) { + elem = NULL; + } + } while (elem == NULL); + + ionotify_event_data->handle_id = next_handle_id; + ionotify_loop->last_handle_id = next_handle_id; + elem->value = ionotify_event_data; + + return AWS_OP_SUCCESS; +} + +struct ionotify_event_data *s_find_handle( + struct aws_event_loop *event_loop, + struct ionotify_loop *ionotify_loop, + int handle_id) { + AWS_ASSERT(s_is_on_callers_thread(event_loop)); + (void)event_loop; + struct ionotify_event_data *ionotify_event_data = NULL; + struct aws_hash_element *elem = NULL; + aws_hash_table_find(&ionotify_loop->handles, (void *)handle_id, &elem); + if (elem != NULL) { + ionotify_event_data = elem->value; + } + return ionotify_event_data; +} + +static void s_remove_handle(struct aws_event_loop *event_loop, struct ionotify_loop *ionotify_loop, int handle_id) { + AWS_ASSERT(s_is_on_callers_thread(event_loop)); + (void)event_loop; + aws_hash_table_remove(&ionotify_loop->handles, (void *)handle_id, NULL, NULL); +} + +/* Scheduled task that performs the actual subscription using ionotify. */ +static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { + (void)task; + + /* If task was cancelled, nothing to do. */ + if (status == AWS_TASK_STATUS_CANCELED) { + return; + } + + struct ionotify_event_data *ionotify_event_data = user_data; + struct aws_event_loop *event_loop = ionotify_event_data->event_loop; + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Subscribing to events on fd %d for events %d", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + ionotify_event_data->events_subscribed); + + /* Map ionotify_event_data to ID. This ID will be returned with the I/O events from ionotify. */ + if (ionotify_event_data->handle_id == 0) { + s_add_handle(ionotify_loop, ionotify_event_data); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Mapped fd %d to handle ID %u", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + ionotify_event_data->handle_id); + /* I/O events from ionotify will be delivered as pulses with a user-defined 28-bit ID. + * SIGEV_PULSE_PRIO_INHERIT means the thread that receives the pulse will run at the initial priority of the + * process. */ + short pulse_priority = SIGEV_PULSE_PRIO_INHERIT; + short pulse_sigev_code = IO_EVENT_PULSE_SIGEV_CODE; + SIGEV_PULSE_INT_INIT( + &ionotify_event_data->event, + ionotify_event_data->pulse_connection_id, + pulse_priority, + pulse_sigev_code, + ionotify_event_data->handle_id); + + /* From the iomgr.h header: + * If extended conditions are requested, and they need to be returned in an armed event, the negative of the + * satisfied conditions are returned in (io_notify_t).i.event.sigev_code. + * Extended conditions are the ones starting with _NOTIFY_CONDE_. + * For that feature to work, special bits in the event structure must be set. */ + ionotify_event_data->event.sigev_notify |= SIGEV_FLAG_CODE_UPDATEABLE; + SIGEV_MAKE_UPDATEABLE(&ionotify_event_data->event); + + /* The application must register the event by calling MsgRegisterEvent() with the fd processed in ionotify(). + * See: + * https://www.qnx.com/developers/docs/8.0/com.qnx.doc.neutrino.lib_ref/topic/i/ionotify.html + * https://www.qnx.com/developers/docs/8.0/com.qnx.doc.neutrino.lib_ref/topic/m/msgregisterevent.html + * + * It's enough to register an event only once and then reuse it on followup ionotify rearming calls. + * NOTE: If you create a new sigevent for the same file descriptor, with the same flags, you HAVE to register + * it again. */ + MsgRegisterEvent(&ionotify_event_data->event, ionotify_event_data->handle->data.fd); + } + + ionotify_event_data->is_subscribed = true; + + /* Everyone is always registered for errors. */ + int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; + if (ionotify_event_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { + event_mask |= _NOTIFY_COND_INPUT; + event_mask |= _NOTIFY_COND_OBAND; + } + if (ionotify_event_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { + event_mask |= _NOTIFY_COND_OUTPUT; + } + + /* Arm resource manager associated with a given file descriptor in edge-triggered mode. + * After this call, a corresponding resource manager starts sending events. */ + int rc = + ionotify(ionotify_event_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, &ionotify_event_data->event); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to subscribe to events on fd %d: error %d (%s)", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + errno_value, + strerror(errno_value)); + ionotify_event_data->on_event( + event_loop, ionotify_event_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_event_data->user_data); + return; + } + + /* ionotify can return active conditions if they are among specified. Send notification to kick-start processing fd + * if it has desired conditions. */ + + /* User-provided field has no space for extended conditions, so set field in ionotify_event_data. */ + if (rc & (_NOTIFY_CONDE_ERR | _NOTIFY_CONDE_NVAL)) { + ionotify_event_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_ERROR; + } + if (rc & _NOTIFY_CONDE_HUP) { + ionotify_event_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_CLOSED; + } + + if ((rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT)) || + ionotify_event_data->latest_io_event_types) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Sending pulse for fd %d because it has desired I/O conditions (rc is %d)", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + rc); + /* Set _NOTIFY_COND_MASK low bits to ID, the same as ionotify does, so the main loop can process all pulses in + * unified manner. */ + int kick_start_event_mask = rc & _NOTIFY_COND_MASK; + kick_start_event_mask |= ionotify_event_data->handle_id; + int send_rc = + MsgSendPulse(ionotify_loop->pulse_connection_id, -1, IO_EVENT_KICKSTART_SIGEV_CODE, kick_start_event_mask); + if (send_rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to send pulse for fd %d", + (void *)event_loop, + ionotify_event_data->handle->data.fd); + } + } +} + +/* This callback is called by I/O operations to notify about their results. */ +static void s_process_io_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_io_handle_io_op_result *io_op_result) { + + AWS_ASSERT(!s_is_on_callers_thread(event_loop)); + + AWS_ASSERT(handle->additional_data); + struct ionotify_event_data *ionotify_event_data = handle->additional_data; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Processing I/O operation result for fd %d: status %d (%s); read status %d (%s); write status %d (%s)", + (void *)event_loop, + handle->data.fd, + io_op_result->error_code, + aws_error_str(io_op_result->error_code), + io_op_result->read_error_code, + aws_error_str(io_op_result->read_error_code), + io_op_result->write_error_code, + aws_error_str(io_op_result->write_error_code)); + + int event_types = 0; + if (io_op_result->error_code == AWS_IO_SOCKET_CLOSED) { + ionotify_event_data->latest_io_event_types = AWS_IO_EVENT_TYPE_CLOSED; + } + if (io_op_result->read_error_code == AWS_IO_READ_WOULD_BLOCK) { + event_types |= AWS_IO_EVENT_TYPE_READABLE; + } + if (io_op_result->write_error_code == AWS_IO_READ_WOULD_BLOCK) { + event_types |= AWS_IO_EVENT_TYPE_WRITABLE; + } + + /* Rearm resource manager. */ + if (event_types != 0) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: Got EWOULDBLOCK for fd %d, rearming it", (void *)event_loop, handle->data.fd); + /* We're on the event loop thread, just schedule subscribing task. */ + ionotify_event_data->events_subscribed = event_types; + s_subscribe_task(NULL, ionotify_event_data, AWS_TASK_STATUS_RUN_READY); + } + + /* Notify event loop of error conditions. */ + if (ionotify_event_data->latest_io_event_types != 0) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: fd errored, sending pulse for fd %d", + (void *)event_loop, + ionotify_event_data->handle->data.fd); + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + int send_rc = MsgSendPulse( + ionotify_loop->pulse_connection_id, -1, IO_EVENT_UPDATE_ERROR_SIGEV_CODE, ionotify_event_data->handle_id); + int errno_value = errno; + if (send_rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to send UPDATE_ERROR pulse for fd %d: error %d (%s)", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + errno_value, + strerror(errno_value)); + } + } +} + +struct ionotify_io_op_results { + struct aws_io_handle_io_op_result io_op_result; + struct aws_event_loop *event_loop; + struct aws_io_handle *handle; +}; + +static void s_update_io_result_task(struct aws_task *task, void *user_data, enum aws_task_status status) { + struct ionotify_io_op_results *ionotify_io_op_results = user_data; + struct aws_event_loop *event_loop = ionotify_io_op_results->event_loop; + + aws_mem_release(event_loop->alloc, task); + + /* If task was cancelled, nothing to do. */ + if (status == AWS_TASK_STATUS_CANCELED) { + aws_mem_release(event_loop->alloc, ionotify_io_op_results); + return; + } + + s_process_io_result(event_loop, ionotify_io_op_results->handle, &ionotify_io_op_results->io_op_result); + + aws_mem_release(event_loop->alloc, ionotify_io_op_results); +} + +/* This callback is called by I/O operations to notify about their results. */ +static void s_update_io_result( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + const struct aws_io_handle_io_op_result *io_op_result) { + + if (!s_is_on_callers_thread(event_loop)) { + /* Move processing I/O operation results to the epoll thread if the operation is performed in another thread.*/ + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got I/O operation result from another thread", (void *)event_loop); + struct aws_task *task = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_task)); + struct ionotify_io_op_results *ionotify_io_op_results = + aws_mem_calloc(event_loop->alloc, 1, sizeof(struct ionotify_io_op_results)); + ionotify_io_op_results->event_loop = event_loop; + ionotify_io_op_results->handle = handle; + memcpy(&ionotify_io_op_results->io_op_result, io_op_result, sizeof(struct aws_io_handle_io_op_result)); + aws_task_init(task, s_update_io_result_task, ionotify_io_op_results, "ionotify_event_loop_resubscribe_ct"); + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, task); + return; + } + + s_process_io_result(event_loop, handle, io_op_result); +} + +static int s_subscribe_to_io_events( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + aws_event_loop_on_event_fn *on_event, + void *user_data) { + + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Subscribing to events on fd %d", (void *)event_loop, handle->data.fd); + struct ionotify_event_data *ionotify_event_data = + aws_mem_calloc(event_loop->alloc, 1, sizeof(struct ionotify_event_data)); + handle->additional_data = ionotify_event_data; + + ionotify_event_data->alloc = event_loop->alloc; + ionotify_event_data->handle = handle; + ionotify_event_data->event_loop = event_loop; + ionotify_event_data->on_event = on_event; + ionotify_event_data->events_subscribed = events; + ionotify_event_data->pulse_connection_id = ionotify_loop->pulse_connection_id; + ionotify_event_data->user_data = user_data; + ionotify_event_data->handle->update_io_result = s_update_io_result; + + aws_task_init( + &ionotify_event_data->subscribe_task, s_subscribe_task, ionotify_event_data, "ionotify_event_loop_subscribe"); + s_schedule_task_now(event_loop, &ionotify_event_data->subscribe_task); + + return AWS_OP_SUCCESS; +} + +static void s_free_io_event_resources(void *user_data) { + struct ionotify_event_data *event_data = user_data; + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "Releasing ionotify_event_data at %p", user_data); + aws_mem_release(event_data->alloc, (void *)event_data); +} + +static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); + + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + AWS_ASSERT(handle->additional_data); + struct ionotify_event_data *ionotify_event_data = handle->additional_data; + + /* Disarm resource manager for a given fd. */ + int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; + event_mask |= _NOTIFY_COND_INPUT | _NOTIFY_CONDE_RDNORM | _NOTIFY_COND_OBAND; + event_mask |= _NOTIFY_COND_OUTPUT | _NOTIFY_CONDE_WRNORM; + int rc = ionotify(ionotify_event_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, NULL); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to unsubscribe from events on fd %d: error %d (%s)", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + errno_value, + strerror(errno_value)); + return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + } + + /* We can't clean up yet, because we have schedule tasks and more events to process, + * mark it as unsubscribed and schedule a cleanup task. */ + ionotify_event_data->is_subscribed = false; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Removing from handles map using ID %u", + (void *)event_loop, + ionotify_event_data->handle_id); + s_remove_handle(event_loop, ionotify_loop, ionotify_event_data->handle_id); + + handle->additional_data = NULL; + handle->update_io_result = NULL; + + /* Main loop obtains ionotify_event_data instance from hash map, so it's safe to release it right here. */ + s_free_io_event_resources(ionotify_event_data); + + return AWS_OP_SUCCESS; +} + +static bool s_is_on_callers_thread(struct aws_event_loop *event_loop) { + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + aws_thread_id_t *thread_id = aws_atomic_load_ptr(&ionotify_loop->running_thread_id); + return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); +} + +static void s_process_task_pre_queue(struct aws_event_loop *event_loop) { + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Processing cross-thread tasks", (void *)event_loop); + + struct aws_linked_list task_pre_queue; + aws_linked_list_init(&task_pre_queue); + + aws_mutex_lock(&ionotify_loop->task_pre_queue_mutex); + aws_linked_list_swap_contents(&ionotify_loop->task_pre_queue, &task_pre_queue); + aws_mutex_unlock(&ionotify_loop->task_pre_queue_mutex); + + while (!aws_linked_list_empty(&task_pre_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&task_pre_queue); + struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: task %p pulled to event-loop, scheduling now.", + (void *)event_loop, + (void *)task); + /* Timestamp 0 is used to denote "now" tasks */ + if (task->timestamp == 0) { + aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, task); + } else { + aws_task_scheduler_schedule_future(&ionotify_loop->scheduler, task, task->timestamp); + } + } +} + +/** + * This just calls MsgReceive(). + * + * We broke this out into its own function so that the stacktrace clearly shows + * what this thread is doing. We've had a lot of cases where users think this + * thread is deadlocked because it's stuck here. We want it to be clear + * that it's doing nothing on purpose. It's waiting for events to happen... + */ +AWS_NO_INLINE +static rcvid_t aws_event_loop_listen_for_io_events( + int io_events_channel_id, + const uint64_t *timeout, + struct _pulse *pulse, + int *errno_value) { + /* Event of type SIGEV_UNBLOCK makes the timed-out kernel call fail with an error of ETIMEDOUT. */ + struct sigevent notify; + SIGEV_UNBLOCK_INIT(¬ify); + int rc = TimerTimeout(CLOCK_MONOTONIC, _NTO_TIMEOUT_RECEIVE, ¬ify, timeout, NULL); + if (rc == -1) { + *errno_value = errno; + return rc; + } + rcvid_t rcvid = MsgReceive(io_events_channel_id, pulse, sizeof(*pulse), NULL); + if (rcvid == -1) { + *errno_value = errno; + } + return rcvid; +} + +static void s_aws_ionotify_cleanup_aws_lc_thread_local_state(void *user_data) { + (void)user_data; + aws_cal_thread_clean_up(); +} + +static void s_process_pulse( + struct aws_event_loop *event_loop, + const struct _pulse *pulse, + bool *should_process_cross_thread_tasks) { + if (pulse->code == CROSS_THREAD_PULSE_SIGEV_CODE) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: MsgReceived got cross-thread pulse", (void *)event_loop); + *should_process_cross_thread_tasks = true; + return; + } + + int user_data = pulse->value.sival_int; + + int handle_id = user_data & _NOTIFY_DATA_MASK; + if (handle_id == 0) { + AWS_LOGF_ERROR(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse with empty handle ID, ignoring it", (void *)event_loop); + return; + } + + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %u", (void *)event_loop, handle_id); + + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_loop, handle_id); + if (ionotify_event_data == NULL) { + /* This situation is totally OK when the corresponding fd is already unsubscribed. */ + AWS_LOGF_DEBUG( + AWS_LS_IO_EVENT_LOOP, + "id=%p: No mapped data found for handle ID %d, fd must be already unsubscribed", + (void *)event_loop, + handle_id); + return; + } + + if (!ionotify_event_data->is_subscribed) { + return; + } + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Processing fd %d: pulse code %d", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + pulse->code); + int event_mask = 0; + if (pulse->value.sival_int & _NOTIFY_COND_OBAND) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: fd got out-of-band data", (void *)event_loop); + event_mask |= AWS_IO_EVENT_TYPE_READABLE; + } + if (pulse->value.sival_int & _NOTIFY_COND_INPUT) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: fd is readable", (void *)event_loop); + event_mask |= AWS_IO_EVENT_TYPE_READABLE; + } + if (pulse->value.sival_int & _NOTIFY_COND_OUTPUT) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: fd is writable", (void *)event_loop); + event_mask |= AWS_IO_EVENT_TYPE_WRITABLE; + } + if (pulse->value.sival_int & _NOTIFY_COND_EXTEN) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: fd has extended condition: %d %d", + (void *)event_loop, + pulse->code, + ionotify_event_data->event.sigev_code); + if (pulse->code != IO_EVENT_PULSE_SIGEV_CODE) { + event_mask |= AWS_IO_EVENT_TYPE_ERROR; + } + } + + if (ionotify_event_data->latest_io_event_types == AWS_IO_EVENT_TYPE_CLOSED) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: latest_io_event_types is AWS_IO_EVENT_TYPE_CLOSED", (void *)event_loop); + event_mask |= AWS_IO_EVENT_TYPE_CLOSED; + } + + /* Reset the I/O operation code to not process it twice. */ + ionotify_event_data->latest_io_event_types = 0; + + ionotify_event_data->on_event(event_loop, ionotify_event_data->handle, event_mask, ionotify_event_data->user_data); +} + +static void aws_event_loop_thread(void *args) { + struct aws_event_loop *event_loop = args; + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + + /* set thread id to the thread of the event loop */ + aws_atomic_store_ptr(&ionotify_loop->running_thread_id, &ionotify_loop->thread_created_on.thread_id); + + aws_thread_current_at_exit(s_aws_ionotify_cleanup_aws_lc_thread_local_state, NULL); + + /* Default timeout is 100 seconds. */ + static uint64_t DEFAULT_TIMEOUT_NS = 100ULL * AWS_TIMESTAMP_NANOS; + + uint64_t timeout = DEFAULT_TIMEOUT_NS; + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Default timeout %" PRIu64, (void *)event_loop, timeout); + + /* Until stop is called: + * - Call MsgReceive. If a task is scheduled, or a file descriptor has activity, it will return. + * - Process all I/O events. + * - Run all scheduled tasks. + * - Process queued subscription cleanups. + */ + while (ionotify_loop->should_continue) { + bool should_process_cross_thread_tasks = false; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: Waiting for a maximum of %" PRIu64 " ns", (void *)event_loop, timeout); + struct _pulse pulse; + int errno_value; + rcvid_t rcvid = + aws_event_loop_listen_for_io_events(ionotify_loop->io_events_channel_id, &timeout, &pulse, &errno_value); + aws_event_loop_register_tick_start(event_loop); + + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Wake up with rcvid %ld\n", (void *)event_loop, rcvid); + if (rcvid == 0) { + s_process_pulse(event_loop, &pulse, &should_process_cross_thread_tasks); + } else if (rcvid > 0) { + AWS_LOGF_WARN(AWS_LS_IO_EVENT_LOOP, "id=%p: Received message, ignoring it\n", (void *)event_loop); + } else { + if (errno_value == ETIMEDOUT) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Woke up by timeout\n", (void *)event_loop); + } else { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Listening for I/O events failed with error %d (%s)", + (void *)event_loop, + errno_value, + strerror(errno_value)); + } + } + + /* Run scheduled tasks. */ + if (should_process_cross_thread_tasks) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Processing prequeued tasks", (void *)event_loop); + s_process_task_pre_queue(event_loop); + } + + uint64_t now_ns = 0; + event_loop->clock(&now_ns); + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Running scheduled tasks", (void *)event_loop); + aws_task_scheduler_run_all(&ionotify_loop->scheduler, now_ns); + + /* Set timeout for next MsgReceive call. + * If clock fails, or scheduler has no tasks, use default timeout. */ + bool use_default_timeout = false; + + if (event_loop->clock(&now_ns)) { + use_default_timeout = true; + } + + uint64_t next_run_time_ns; + if (!aws_task_scheduler_has_tasks(&ionotify_loop->scheduler, &next_run_time_ns)) { + use_default_timeout = true; + } + + if (use_default_timeout) { + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p: No more scheduled tasks using default timeout.", (void *)event_loop); + timeout = DEFAULT_TIMEOUT_NS; + } else { + timeout = (next_run_time_ns > now_ns) ? (next_run_time_ns - now_ns) : 0; + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Detected more scheduled tasks with the next occurring at %" PRIu64 + ", using timeout of %" PRIu64, + (void *)event_loop, + next_run_time_ns, + timeout); + } + + aws_event_loop_register_tick_end(event_loop); + } + + AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "id=%p: Exiting main loop", (void *)event_loop); + /* set thread id back to NULL. This should be updated again in destroy, before tasks are canceled. */ + aws_atomic_store_ptr(&ionotify_loop->running_thread_id, NULL); +} diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index a523a49ed..4332e8637 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -144,7 +144,7 @@ static void s_do_read(struct socket_handler *socket_handler) { struct aws_io_handle_io_op_result io_op_result; memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); AWS_ASSERT(socket_handler->socket->io_handle.update_io_result); -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ size_t total_read = 0; size_t read = 0; @@ -160,14 +160,14 @@ static void s_do_read(struct socket_handler *socket_handler) { aws_mem_release(message->allocator, message); #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } total_read += read; #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_bytes += read; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: read %llu from socket", @@ -179,7 +179,7 @@ static void s_do_read(struct socket_handler *socket_handler) { aws_mem_release(message->allocator, message); #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } } @@ -199,7 +199,7 @@ static void s_do_read(struct socket_handler *socket_handler) { if (last_error != AWS_IO_READ_WOULD_BLOCK) { #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ aws_channel_shutdown(socket_handler->slot->channel, last_error); } else { AWS_LOGF_TRACE( @@ -209,12 +209,12 @@ static void s_do_read(struct socket_handler *socket_handler) { (void *)socket_handler->slot->handler); #if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ } #if AWS_USE_ON_EVENT_WITH_RESULT socket_handler->socket->io_handle.update_io_result( socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ return; } /* in this case, everything was fine, but there's still pending reads. We need to schedule a task to do the read @@ -234,7 +234,7 @@ static void s_do_read(struct socket_handler *socket_handler) { #if AWS_USE_ON_EVENT_WITH_RESULT socket_handler->socket->io_handle.update_io_result( socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); -#endif +#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ } /* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. */ diff --git a/tests/event_loop_test.c b/tests/event_loop_test.c index e86448c8b..5daf6390b 100644 --- a/tests/event_loop_test.c +++ b/tests/event_loop_test.c @@ -313,6 +313,7 @@ AWS_TEST_CASE(event_loop_completion_events, s_test_event_loop_completion_events) #else /* !AWS_USE_IO_COMPLETION_PORTS */ +# include # include int aws_open_nonblocking_posix_pipe(int pipe_fds[2]); @@ -835,6 +836,17 @@ static int s_state_read_until_blocked(struct thread_tester *tester) { uint8_t buffer[512]; while (simple_pipe_read(&tester->read_handle, buffer, sizeof(buffer)) > 0) { } +# if AWS_USE_ON_EVENT_WITH_RESULT + if (errno == EAGAIN) { + if (tester->read_handle.update_io_result != NULL) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + tester->read_handle.update_io_result(tester->event_loop, &tester->read_handle, &io_op_result); + } else { + } + } +# endif return AWS_OP_SUCCESS; } diff --git a/tests/pipe_test.c b/tests/pipe_test.c index 053c5aefd..6fc9ebd4d 100644 --- a/tests/pipe_test.c +++ b/tests/pipe_test.c @@ -429,6 +429,11 @@ static void s_on_readable_event(struct aws_pipe_read_end *read_end, int error_co } s_signal_done_on_read_end_closed(state); } + } else { + /* Some event loop implementations (only QNX, to be fair) can't detect pipe closing one of its ends without + * performing operation on the other end. So, this read operation should notify event loop that the writing end + * is closed. */ + aws_pipe_read(&state->read_end, &state->buffers.dst, NULL); } return; From d709f28896530709d82239cec158ebfb65a91998 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 16 Sep 2024 14:04:57 -0700 Subject: [PATCH 14/39] Remove pipe test fix --- CMakeLists.txt | 3 --- tests/event_loop_test.c | 1 - tests/pipe_test.c | 5 ----- 3 files changed, 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1e7a6a875..9a4b93429 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -142,9 +142,6 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetB set(USE_S2N ON) elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") - file(GLOB AWS_IO_OS_HEADERS - ) - file(GLOB AWS_IO_OS_SRC "source/posix/*.c" "source/qnx/*.c" diff --git a/tests/event_loop_test.c b/tests/event_loop_test.c index 5daf6390b..1f988a24a 100644 --- a/tests/event_loop_test.c +++ b/tests/event_loop_test.c @@ -843,7 +843,6 @@ static int s_state_read_until_blocked(struct thread_tester *tester) { AWS_ZERO_STRUCT(io_op_result); io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; tester->read_handle.update_io_result(tester->event_loop, &tester->read_handle, &io_op_result); - } else { } } # endif diff --git a/tests/pipe_test.c b/tests/pipe_test.c index 6fc9ebd4d..053c5aefd 100644 --- a/tests/pipe_test.c +++ b/tests/pipe_test.c @@ -429,11 +429,6 @@ static void s_on_readable_event(struct aws_pipe_read_end *read_end, int error_co } s_signal_done_on_read_end_closed(state); } - } else { - /* Some event loop implementations (only QNX, to be fair) can't detect pipe closing one of its ends without - * performing operation on the other end. So, this read operation should notify event loop that the writing end - * is closed. */ - aws_pipe_read(&state->read_end, &state->buffers.dst, NULL); } return; From d8a8085adaa165916f592a931961e2d0f9839a6c Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 16 Sep 2024 14:19:55 -0700 Subject: [PATCH 15/39] Copy posix stuff to qnx --- CMakeLists.txt | 1 - source/posix/pipe.c | 28 - source/posix/socket.c | 54 +- source/qnx/host_resolver.c | 121 +++ source/qnx/pipe.c | 610 +++++++++++ source/qnx/shared_library.c | 66 ++ source/qnx/socket.c | 2041 +++++++++++++++++++++++++++++++++++ 7 files changed, 2839 insertions(+), 82 deletions(-) create mode 100644 source/qnx/host_resolver.c create mode 100644 source/qnx/pipe.c create mode 100644 source/qnx/shared_library.c create mode 100644 source/qnx/socket.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 9a4b93429..7b1890002 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -143,7 +143,6 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetB elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") file(GLOB AWS_IO_OS_SRC - "source/posix/*.c" "source/qnx/*.c" ) set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") diff --git a/source/posix/pipe.c b/source/posix/pipe.c index 4fe189d95..f727b021c 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -278,28 +278,10 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b if (read_val < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { -#if AWS_USE_ON_EVENT_WITH_RESULT - if (read_impl->handle.update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); - } -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } return s_raise_posix_error(errno_value); } -#if AWS_USE_ON_EVENT_WITH_RESULT - else if (read_val == 0) { - if (read_impl->handle.update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - io_op_result.error_code = AWS_IO_SOCKET_CLOSED; - read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); - } - } -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ /* Success */ dst_buffer->len += read_val; @@ -472,16 +454,6 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { /* The pipe is no longer writable. Bail out */ write_impl->is_writable = false; - -#if AWS_USE_ON_EVENT_WITH_RESULT - if (write_impl->handle.update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; - write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); - } -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - return; } diff --git a/source/posix/socket.c b/source/posix/socket.c index 8ccf2401a..dbbf62657 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -476,16 +476,6 @@ static void s_socket_connect_event( "id=%p fd=%d: spurious event, waiting for another notification.", (void *)socket_args->socket, handle->data.fd); - -#if AWS_USE_ON_EVENT_WITH_RESULT - if (handle->update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - handle->update_io_result(event_loop, handle, &io_op_result); - } -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - return; } @@ -965,11 +955,6 @@ static void s_socket_accept_event( AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); -#if AWS_USE_ON_EVENT_WITH_RESULT - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { int in_fd = 0; while (socket_impl->continue_accept && in_fd != -1) { @@ -981,18 +966,12 @@ static void s_socket_accept_event( int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { -#if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } int aws_error = aws_socket_get_error(socket); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); -#if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.read_error_code = aws_error; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -1085,12 +1064,6 @@ static void s_socket_accept_event( } } -#if AWS_USE_ON_EVENT_WITH_RESULT - if (handle->update_io_result) { - handle->update_io_result(event_loop, handle, &io_op_result); - } -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: finished processing incoming connections, " @@ -1659,11 +1632,6 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc bool parent_request_failed = false; bool pushed_to_written_queue = false; -#if AWS_USE_ON_EVENT_WITH_RESULT - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ while (!aws_linked_list_empty(&socket_impl->write_queue)) { struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); @@ -1692,9 +1660,6 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc if (errno_value == EAGAIN) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); -#if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -1707,9 +1672,6 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_error = AWS_IO_SOCKET_CLOSED; aws_raise_error(aws_error); purge = true; -#if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.write_error_code = aws_error; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } @@ -1722,16 +1684,9 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc errno_value); aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); -#if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.write_error_code = aws_error; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } -#if AWS_USE_ON_EVENT_WITH_RESULT - io_op_result.written_bytes += (size_t)written; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - size_t remaining_to_write = write_request->cursor_cpy.len; aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); @@ -1777,12 +1732,6 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); } -#if AWS_USE_ON_EVENT_WITH_RESULT - if (socket->io_handle.update_io_result) { - socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); - } -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ - /* Only report error if aws_socket_write() invoked this function and its write_request failed */ if (!parent_request_failed) { return AWS_OP_SUCCESS; @@ -2056,6 +2005,5 @@ void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint char uuid_str[AWS_UUID_STR_LEN] = {0}; struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - /* TODO QNX allows creating a socket file only in /tmp directory. */ - snprintf(endpoint->address, sizeof(endpoint->address), "/tmp/testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); + snprintf(endpoint->address, sizeof(endpoint->address), "testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); } diff --git a/source/qnx/host_resolver.c b/source/qnx/host_resolver.c new file mode 100644 index 000000000..e4aafb838 --- /dev/null +++ b/source/qnx/host_resolver.c @@ -0,0 +1,121 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#include + +#include +#include +#include +#include + +int aws_default_dns_resolve( + struct aws_allocator *allocator, + const struct aws_string *host_name, + struct aws_array_list *output_addresses, + void *user_data) { + + (void)user_data; + struct addrinfo *result = NULL; + struct addrinfo *iter = NULL; + /* max string length for ipv6. */ + socklen_t max_len = INET6_ADDRSTRLEN; + char address_buffer[max_len]; + + const char *hostname_cstr = aws_string_c_str(host_name); + AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); + + /* Android would prefer NO HINTS IF YOU DON'T MIND, SIR */ +#if defined(ANDROID) + int err_code = getaddrinfo(hostname_cstr, NULL, NULL, &result); +#else + struct addrinfo hints; + AWS_ZERO_STRUCT(hints); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; +# if !defined(__OpenBSD__) + hints.ai_flags = AI_ALL | AI_V4MAPPED; +# endif /* __OpenBSD__ */ + + int err_code = getaddrinfo(hostname_cstr, NULL, &hints, &result); +#endif + + if (err_code) { + AWS_LOGF_ERROR( + AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d: %s", err_code, gai_strerror(err_code)); + goto clean_up; + } + + for (iter = result; iter != NULL; iter = iter->ai_next) { + struct aws_host_address host_address; + + AWS_ZERO_ARRAY(address_buffer); + + if (iter->ai_family == AF_INET6) { + host_address.record_type = AWS_ADDRESS_RECORD_TYPE_AAAA; + inet_ntop(iter->ai_family, &((struct sockaddr_in6 *)iter->ai_addr)->sin6_addr, address_buffer, max_len); + } else { + host_address.record_type = AWS_ADDRESS_RECORD_TYPE_A; + inet_ntop(iter->ai_family, &((struct sockaddr_in *)iter->ai_addr)->sin_addr, address_buffer, max_len); + } + + size_t address_len = strlen(address_buffer); + const struct aws_string *address = + aws_string_new_from_array(allocator, (const uint8_t *)address_buffer, address_len); + + if (!address) { + goto clean_up; + } + + const struct aws_string *host_cpy = aws_string_new_from_string(allocator, host_name); + + if (!host_cpy) { + aws_string_destroy((void *)address); + goto clean_up; + } + + AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolved record: %s", address_buffer); + + host_address.address = address; + host_address.weight = 0; + host_address.allocator = allocator; + host_address.use_count = 0; + host_address.connection_failure_count = 0; + host_address.host = host_cpy; + + if (aws_array_list_push_back(output_addresses, &host_address)) { + aws_host_address_clean_up(&host_address); + goto clean_up; + } + } + + freeaddrinfo(result); + return AWS_OP_SUCCESS; + +clean_up: + if (result) { + freeaddrinfo(result); + } + + if (err_code) { + switch (err_code) { + case EAI_FAIL: + case EAI_AGAIN: + return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); + case EAI_MEMORY: + return aws_raise_error(AWS_ERROR_OOM); + case EAI_NONAME: + case EAI_SERVICE: + return aws_raise_error(AWS_IO_DNS_INVALID_NAME); + default: + return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); + } + } + + return AWS_OP_ERR; +} diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c new file mode 100644 index 000000000..78578baf2 --- /dev/null +++ b/source/qnx/pipe.c @@ -0,0 +1,610 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#ifdef __GLIBC__ +# define __USE_GNU +#endif + +/* TODO: move this detection to CMAKE and a config header */ +#if !defined(COMPAT_MODE) && defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || __GLIBC__ > 2) +# define HAVE_PIPE2 1 +#else +# define HAVE_PIPE2 0 +#endif + +#include +#include +#include + +/* This isn't defined on ancient linux distros (breaking the builds). + * However, if this is a prebuild, we purposely build on an ancient system, but + * we want the kernel calls to still be the same as a modern build since that's likely the target of the application + * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag + * gets passed as long as it does. + */ +#ifndef O_CLOEXEC +# define O_CLOEXEC 02000000 +#endif + +struct read_end_impl { + struct aws_allocator *alloc; + struct aws_io_handle handle; + struct aws_event_loop *event_loop; + aws_pipe_on_readable_fn *on_readable_user_callback; + void *on_readable_user_data; + + /* Used in handshake for detecting whether user callback resulted in read-end being cleaned up. + * If clean_up() sees that the pointer is set, the bool it points to will get set true. */ + bool *did_user_callback_clean_up_read_end; + + bool is_subscribed; +}; + +struct pipe_write_request { + struct aws_byte_cursor original_cursor; + struct aws_byte_cursor cursor; /* tracks progress of write */ + size_t num_bytes_written; + aws_pipe_on_write_completed_fn *user_callback; + void *user_data; + struct aws_linked_list_node list_node; + + /* True if the write-end is cleaned up while the user callback is being invoked */ + bool did_user_callback_clean_up_write_end; +}; + +struct write_end_impl { + struct aws_allocator *alloc; + struct aws_io_handle handle; + struct aws_event_loop *event_loop; + struct aws_linked_list write_list; + + /* Valid while invoking user callback on a completed write request. */ + struct pipe_write_request *currently_invoking_write_callback; + + bool is_writable; + + /* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around + * and re-using it whenever possible */ +}; + +static void s_write_end_on_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data); + +static int s_translate_posix_error(int err) { + AWS_ASSERT(err); + + switch (err) { + case EPIPE: + return AWS_IO_BROKEN_PIPE; + default: + return AWS_ERROR_SYS_CALL_FAILURE; + } +} + +static int s_raise_posix_error(int err) { + return aws_raise_error(s_translate_posix_error(err)); +} + +AWS_IO_API int aws_open_nonblocking_posix_pipe(int pipe_fds[2]) { + int err; + +#if HAVE_PIPE2 + err = pipe2(pipe_fds, O_NONBLOCK | O_CLOEXEC); + if (err) { + return s_raise_posix_error(err); + } + + return AWS_OP_SUCCESS; +#else + err = pipe(pipe_fds); + if (err) { + return s_raise_posix_error(err); + } + + for (int i = 0; i < 2; ++i) { + int flags = fcntl(pipe_fds[i], F_GETFL); + if (flags == -1) { + s_raise_posix_error(err); + goto error; + } + + flags |= O_NONBLOCK | O_CLOEXEC; + if (fcntl(pipe_fds[i], F_SETFL, flags) == -1) { + s_raise_posix_error(err); + goto error; + } + } + + return AWS_OP_SUCCESS; +error: + close(pipe_fds[0]); + close(pipe_fds[1]); + return AWS_OP_ERR; +#endif +} + +int aws_pipe_init( + struct aws_pipe_read_end *read_end, + struct aws_event_loop *read_end_event_loop, + struct aws_pipe_write_end *write_end, + struct aws_event_loop *write_end_event_loop, + struct aws_allocator *allocator) { + + AWS_ASSERT(read_end); + AWS_ASSERT(read_end_event_loop); + AWS_ASSERT(write_end); + AWS_ASSERT(write_end_event_loop); + AWS_ASSERT(allocator); + + AWS_ZERO_STRUCT(*read_end); + AWS_ZERO_STRUCT(*write_end); + + struct read_end_impl *read_impl = NULL; + struct write_end_impl *write_impl = NULL; + int err; + + /* Open pipe */ + int pipe_fds[2]; + err = aws_open_nonblocking_posix_pipe(pipe_fds); + if (err) { + return AWS_OP_ERR; + } + + /* Init read-end */ + read_impl = aws_mem_calloc(allocator, 1, sizeof(struct read_end_impl)); + if (!read_impl) { + goto error; + } + + read_impl->alloc = allocator; + read_impl->handle.data.fd = pipe_fds[0]; + read_impl->event_loop = read_end_event_loop; + + /* Init write-end */ + write_impl = aws_mem_calloc(allocator, 1, sizeof(struct write_end_impl)); + if (!write_impl) { + goto error; + } + + write_impl->alloc = allocator; + write_impl->handle.data.fd = pipe_fds[1]; + write_impl->event_loop = write_end_event_loop; + write_impl->is_writable = true; /* Assume pipe is writable to start. Even if it's not, things shouldn't break */ + aws_linked_list_init(&write_impl->write_list); + + read_end->impl_data = read_impl; + write_end->impl_data = write_impl; + + err = aws_event_loop_subscribe_to_io_events( + write_end_event_loop, &write_impl->handle, AWS_IO_EVENT_TYPE_WRITABLE, s_write_end_on_event, write_end); + if (err) { + goto error; + } + + return AWS_OP_SUCCESS; + +error: + close(pipe_fds[0]); + close(pipe_fds[1]); + + if (read_impl) { + aws_mem_release(allocator, read_impl); + } + + if (write_impl) { + aws_mem_release(allocator, write_impl); + } + + read_end->impl_data = NULL; + write_end->impl_data = NULL; + + return AWS_OP_ERR; +} + +int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end) { + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (read_impl->is_subscribed) { + int err = aws_pipe_unsubscribe_from_readable_events(read_end); + if (err) { + return AWS_OP_ERR; + } + } + + /* If the event-handler is invoking a user callback, let it know that the read-end was cleaned up */ + if (read_impl->did_user_callback_clean_up_read_end) { + *read_impl->did_user_callback_clean_up_read_end = true; + } + + close(read_impl->handle.data.fd); + + aws_mem_release(read_impl->alloc, read_impl); + AWS_ZERO_STRUCT(*read_end); + return AWS_OP_SUCCESS; +} + +struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end) { + const struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + aws_raise_error(AWS_IO_BROKEN_PIPE); + return NULL; + } + + return read_impl->event_loop; +} + +struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end) { + const struct write_end_impl *write_impl = write_end->impl_data; + if (!write_impl) { + aws_raise_error(AWS_IO_BROKEN_PIPE); + return NULL; + } + + return write_impl->event_loop; +} + +int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *num_bytes_read) { + AWS_ASSERT(dst_buffer && dst_buffer->buffer); + + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (num_bytes_read) { + *num_bytes_read = 0; + } + + size_t num_bytes_to_read = dst_buffer->capacity - dst_buffer->len; + + ssize_t read_val = read(read_impl->handle.data.fd, dst_buffer->buffer + dst_buffer->len, num_bytes_to_read); + + if (read_val < 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + // Return results back to event loop. + if (read_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + } + return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); + } + return s_raise_posix_error(errno_value); + } + else if (read_val == 0) { + // Return results back to event loop. + if (read_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); + io_op_result.error_code = AWS_IO_SOCKET_CLOSED; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + } + } + + /* Success */ + dst_buffer->len += read_val; + + if (num_bytes_read) { + *num_bytes_read = read_val; + } + + return AWS_OP_SUCCESS; +} + +static void s_read_end_on_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + (void)handle; + + /* Note that it should be impossible for this to run after read-end has been unsubscribed or cleaned up */ + struct aws_pipe_read_end *read_end = user_data; + struct read_end_impl *read_impl = read_end->impl_data; + AWS_ASSERT(read_impl); + AWS_ASSERT(read_impl->event_loop == event_loop); + AWS_ASSERT(&read_impl->handle == handle); + AWS_ASSERT(read_impl->is_subscribed); + AWS_ASSERT(events != 0); + AWS_ASSERT(read_impl->did_user_callback_clean_up_read_end == NULL); + + /* Set up handshake, so we can be informed if the read-end is cleaned up while invoking a user callback */ + bool did_user_callback_clean_up_read_end = false; + read_impl->did_user_callback_clean_up_read_end = &did_user_callback_clean_up_read_end; + + /* If readable event received, tell user to try and read, even if "error" events have also occurred. */ + if (events & AWS_IO_EVENT_TYPE_READABLE) { + read_impl->on_readable_user_callback(read_end, AWS_ERROR_SUCCESS, read_impl->on_readable_user_data); + + if (did_user_callback_clean_up_read_end) { + return; + } + + events &= ~AWS_IO_EVENT_TYPE_READABLE; + } + + if (events) { + /* Check that user didn't unsubscribe in the previous callback */ + if (read_impl->is_subscribed) { + read_impl->on_readable_user_callback(read_end, AWS_IO_BROKEN_PIPE, read_impl->on_readable_user_data); + + if (did_user_callback_clean_up_read_end) { + return; + } + } + } + + read_impl->did_user_callback_clean_up_read_end = NULL; +} + +int aws_pipe_subscribe_to_readable_events( + struct aws_pipe_read_end *read_end, + aws_pipe_on_readable_fn *on_readable, + void *user_data) { + + AWS_ASSERT(on_readable); + + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (read_impl->is_subscribed) { + return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); + } + + read_impl->is_subscribed = true; + read_impl->on_readable_user_callback = on_readable; + read_impl->on_readable_user_data = user_data; + + int err = aws_event_loop_subscribe_to_io_events( + read_impl->event_loop, &read_impl->handle, AWS_IO_EVENT_TYPE_READABLE, s_read_end_on_event, read_end); + if (err) { + read_impl->is_subscribed = false; + read_impl->on_readable_user_callback = NULL; + read_impl->on_readable_user_data = NULL; + + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end) { + struct read_end_impl *read_impl = read_end->impl_data; + if (!read_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (!read_impl->is_subscribed) { + return aws_raise_error(AWS_ERROR_IO_NOT_SUBSCRIBED); + } + + int err = aws_event_loop_unsubscribe_from_io_events(read_impl->event_loop, &read_impl->handle); + if (err) { + return AWS_OP_ERR; + } + + read_impl->is_subscribed = false; + read_impl->on_readable_user_callback = NULL; + read_impl->on_readable_user_data = NULL; + + return AWS_OP_SUCCESS; +} + +/* Pop front write request, invoke its callback, and delete it. + * Returns whether the callback resulted in the write-end getting cleaned up */ +static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end *write_end, int error_code) { + struct write_end_impl *write_impl = write_end->impl_data; + + AWS_ASSERT(!aws_linked_list_empty(&write_impl->write_list)); + struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); + struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); + + struct aws_allocator *alloc = write_impl->alloc; + + /* Let the write-end know that a callback is in process, so the write-end can inform the callback + * whether it resulted in clean_up() being called. */ + bool write_end_cleaned_up_during_callback = false; + struct pipe_write_request *prev_invoking_request = write_impl->currently_invoking_write_callback; + write_impl->currently_invoking_write_callback = request; + + if (request->user_callback) { + request->user_callback(write_end, error_code, request->original_cursor, request->user_data); + write_end_cleaned_up_during_callback = request->did_user_callback_clean_up_write_end; + } + + if (!write_end_cleaned_up_during_callback) { + write_impl->currently_invoking_write_callback = prev_invoking_request; + } + + aws_mem_release(alloc, request); + + return write_end_cleaned_up_during_callback; +} + +/* Process write requests as long as the pipe remains writable */ +static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { + struct write_end_impl *write_impl = write_end->impl_data; + AWS_ASSERT(write_impl); + + while (!aws_linked_list_empty(&write_impl->write_list)) { + struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); + struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); + + int completed_error_code = AWS_ERROR_SUCCESS; + + if (request->cursor.len > 0) { + ssize_t write_val = write(write_impl->handle.data.fd, request->cursor.ptr, request->cursor.len); + + if (write_val < 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + /* The pipe is no longer writable. Bail out */ + write_impl->is_writable = false; + + // Return results back to event loop. + if (write_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; + write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); + } + + return; + } + + /* A non-recoverable error occurred during this write */ + completed_error_code = s_translate_posix_error(errno_value); + + } else { + aws_byte_cursor_advance(&request->cursor, write_val); + + if (request->cursor.len > 0) { + /* There was a partial write, loop again to try and write the rest. */ + continue; + } + } + } + + /* If we got this far in the loop, then the write request is complete. + * Note that the callback may result in the pipe being cleaned up. */ + bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); + if (write_end_cleaned_up) { + /* Bail out! Any remaining requests were canceled during clean_up() */ + return; + } + } +} + +/* Handle events on the write-end's file handle */ +static void s_write_end_on_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + (void)handle; + + /* Note that it should be impossible for this to run after write-end has been unsubscribed or cleaned up */ + struct aws_pipe_write_end *write_end = user_data; + struct write_end_impl *write_impl = write_end->impl_data; + AWS_ASSERT(write_impl); + AWS_ASSERT(write_impl->event_loop == event_loop); + AWS_ASSERT(&write_impl->handle == handle); + + /* Only care about the writable event. */ + if ((events & AWS_IO_EVENT_TYPE_WRITABLE) == 0) { + return; + } + + write_impl->is_writable = true; + + s_write_end_process_requests(write_end); +} + +int aws_pipe_write( + struct aws_pipe_write_end *write_end, + struct aws_byte_cursor src_buffer, + aws_pipe_on_write_completed_fn *on_completed, + void *user_data) { + + AWS_ASSERT(src_buffer.ptr); + + struct write_end_impl *write_impl = write_end->impl_data; + if (!write_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + struct pipe_write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct pipe_write_request)); + if (!request) { + return AWS_OP_ERR; + } + + request->original_cursor = src_buffer; + request->cursor = src_buffer; + request->user_callback = on_completed; + request->user_data = user_data; + + aws_linked_list_push_back(&write_impl->write_list, &request->list_node); + + /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could + * happen if a this aws_pipe_write() call was made by another write's completion callback */ + if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { + s_write_end_process_requests(write_end); + } + + return AWS_OP_SUCCESS; +} + +int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) { + struct write_end_impl *write_impl = write_end->impl_data; + if (!write_impl) { + return aws_raise_error(AWS_IO_BROKEN_PIPE); + } + + if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + int err = aws_event_loop_unsubscribe_from_io_events(write_impl->event_loop, &write_impl->handle); + if (err) { + return AWS_OP_ERR; + } + + close(write_impl->handle.data.fd); + + /* Zero out write-end before invoking user callbacks so that it won't work anymore with public functions. */ + AWS_ZERO_STRUCT(*write_end); + + /* If a request callback is currently being invoked, let it know that the write-end was cleaned up */ + if (write_impl->currently_invoking_write_callback) { + write_impl->currently_invoking_write_callback->did_user_callback_clean_up_write_end = true; + } + + /* Force any outstanding write requests to complete with an error status. */ + while (!aws_linked_list_empty(&write_impl->write_list)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); + struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); + if (request->user_callback) { + request->user_callback(NULL, AWS_IO_BROKEN_PIPE, request->original_cursor, request->user_data); + } + aws_mem_release(write_impl->alloc, request); + } + + aws_mem_release(write_impl->alloc, write_impl); + return AWS_OP_SUCCESS; +} diff --git a/source/qnx/shared_library.c b/source/qnx/shared_library.c new file mode 100644 index 000000000..751c99bc2 --- /dev/null +++ b/source/qnx/shared_library.c @@ -0,0 +1,66 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include + +#include + +static const char *s_null = ""; +static const char *s_unknown_error = ""; + +int aws_shared_library_init(struct aws_shared_library *library, const char *library_path) { + AWS_ZERO_STRUCT(*library); + + library->library_handle = dlopen(library_path, RTLD_LAZY); + if (library->library_handle == NULL) { + const char *error = dlerror(); + AWS_LOGF_ERROR( + AWS_LS_IO_SHARED_LIBRARY, + "id=%p: Failed to load shared library at path \"%s\" with error: %s", + (void *)library, + library_path ? library_path : s_null, + error ? error : s_unknown_error); + return aws_raise_error(AWS_IO_SHARED_LIBRARY_LOAD_FAILURE); + } + + return AWS_OP_SUCCESS; +} + +void aws_shared_library_clean_up(struct aws_shared_library *library) { + if (library && library->library_handle) { + dlclose(library->library_handle); + library->library_handle = NULL; + } +} + +int aws_shared_library_find_function( + struct aws_shared_library *library, + const char *symbol_name, + aws_generic_function *function_address) { + if (library == NULL || library->library_handle == NULL) { + return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); + } + + /* + * Suggested work around for (undefined behavior) cast from void * to function pointer + * in POSIX.1-2003 standard, at least according to dlsym man page code sample. + */ + *(void **)(function_address) = dlsym(library->library_handle, symbol_name); + + if (*function_address == NULL) { + const char *error = dlerror(); + AWS_LOGF_ERROR( + AWS_LS_IO_SHARED_LIBRARY, + "id=%p: Failed to find shared library symbol \"%s\" with error: %s", + (void *)library, + symbol_name ? symbol_name : s_null, + error ? error : s_unknown_error); + return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); + } + + return AWS_OP_SUCCESS; +} diff --git a/source/qnx/socket.c b/source/qnx/socket.c new file mode 100644 index 000000000..fa131b393 --- /dev/null +++ b/source/qnx/socket.c @@ -0,0 +1,2041 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include /* Required when VSOCK is used */ +#include +#include +#include +#include +#include + +/* + * On OsX, suppress NoPipe signals via flags to setsockopt() + * On Linux, suppress NoPipe signals via flags to send() + */ +#if defined(__MACH__) +# define NO_SIGNAL_SOCK_OPT SO_NOSIGPIPE +# define NO_SIGNAL_SEND 0 +# define TCP_KEEPIDLE TCP_KEEPALIVE +#else +# define NO_SIGNAL_SEND MSG_NOSIGNAL +#endif + +/* This isn't defined on ancient linux distros (breaking the builds). + * However, if this is a prebuild, we purposely build on an ancient system, but + * we want the kernel calls to still be the same as a modern build since that's likely the target of the application + * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag + * gets passed as long as it does. + */ +#ifndef O_CLOEXEC +# define O_CLOEXEC 02000000 +#endif + +#ifdef USE_VSOCK +# if defined(__linux__) && defined(AF_VSOCK) +# include +# else +# error "USE_VSOCK not supported on current platform" +# endif +#endif + +/* other than CONNECTED_READ | CONNECTED_WRITE + * a socket is only in one of these states at a time. */ +enum socket_state { + INIT = 0x01, + CONNECTING = 0x02, + CONNECTED_READ = 0x04, + CONNECTED_WRITE = 0x08, + BOUND = 0x10, + LISTENING = 0x20, + TIMEDOUT = 0x40, + ERROR = 0x80, + CLOSED, +}; + +static int s_convert_domain(enum aws_socket_domain domain) { + switch (domain) { + case AWS_SOCKET_IPV4: + return AF_INET; + case AWS_SOCKET_IPV6: + return AF_INET6; + case AWS_SOCKET_LOCAL: + return AF_UNIX; +#ifdef USE_VSOCK + case AWS_SOCKET_VSOCK: + return AF_VSOCK; +#endif + default: + AWS_ASSERT(0); + return AF_INET; + } +} + +static int s_convert_type(enum aws_socket_type type) { + switch (type) { + case AWS_SOCKET_STREAM: + return SOCK_STREAM; + case AWS_SOCKET_DGRAM: + return SOCK_DGRAM; + default: + AWS_ASSERT(0); + return SOCK_STREAM; + } +} + +static int s_determine_socket_error(int error) { + switch (error) { + case ECONNREFUSED: + return AWS_IO_SOCKET_CONNECTION_REFUSED; + case ECONNRESET: + return AWS_IO_SOCKET_CLOSED; + case ETIMEDOUT: + return AWS_IO_SOCKET_TIMEOUT; + case EHOSTUNREACH: + case ENETUNREACH: + return AWS_IO_SOCKET_NO_ROUTE_TO_HOST; + case EADDRNOTAVAIL: + return AWS_IO_SOCKET_INVALID_ADDRESS; + case ENETDOWN: + return AWS_IO_SOCKET_NETWORK_DOWN; + case ECONNABORTED: + return AWS_IO_SOCKET_CONNECT_ABORTED; + case EADDRINUSE: + return AWS_IO_SOCKET_ADDRESS_IN_USE; + case ENOBUFS: + case ENOMEM: + return AWS_ERROR_OOM; + case EAGAIN: + return AWS_IO_READ_WOULD_BLOCK; + case EMFILE: + case ENFILE: + return AWS_ERROR_MAX_FDS_EXCEEDED; + case ENOENT: + case EINVAL: + return AWS_ERROR_FILE_INVALID_PATH; + case EAFNOSUPPORT: + return AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY; + case EACCES: + return AWS_ERROR_NO_PERMISSION; + default: + return AWS_IO_SOCKET_NOT_CONNECTED; + } +} + +static int s_create_socket(struct aws_socket *sock, const struct aws_socket_options *options) { + + int fd = socket(s_convert_domain(options->domain), s_convert_type(options->type), 0); + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: initializing with domain %d and type %d", + (void *)sock, + fd, + options->domain, + options->type); + if (fd != -1) { + int flags = fcntl(fd, F_GETFL, 0); + flags |= O_NONBLOCK | O_CLOEXEC; + int success = fcntl(fd, F_SETFL, flags); + (void)success; + sock->io_handle.data.fd = fd; + sock->io_handle.additional_data = NULL; + return aws_socket_set_options(sock, options); + } + + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); +} + +struct posix_socket_connect_args { + struct aws_task task; + struct aws_allocator *allocator; + struct aws_socket *socket; +}; + +struct posix_socket { + struct aws_linked_list write_queue; + struct aws_linked_list written_queue; + struct aws_task written_task; + struct posix_socket_connect_args *connect_args; + /* Note that only the posix_socket impl part is refcounted. + * The public aws_socket can be a stack variable and cleaned up synchronously + * (by blocking until the event-loop cleans up the impl part). + * In hindsight, aws_socket should have been heap-allocated and refcounted, but alas */ + struct aws_ref_count internal_refcount; + struct aws_allocator *allocator; + bool written_task_scheduled; + bool currently_subscribed; + bool continue_accept; + bool *close_happened; +}; + +static void s_socket_destroy_impl(void *user_data) { + struct posix_socket *socket_impl = user_data; + aws_mem_release(socket_impl->allocator, socket_impl); +} + +static int s_socket_init( + struct aws_socket *socket, + struct aws_allocator *alloc, + const struct aws_socket_options *options, + int existing_socket_fd) { + AWS_ASSERT(options); + AWS_ZERO_STRUCT(*socket); + + struct posix_socket *posix_socket = aws_mem_calloc(alloc, 1, sizeof(struct posix_socket)); + if (!posix_socket) { + socket->impl = NULL; + return AWS_OP_ERR; + } + + socket->allocator = alloc; + socket->io_handle.data.fd = -1; + socket->state = INIT; + socket->options = *options; + + if (existing_socket_fd < 0) { + int err = s_create_socket(socket, options); + if (err) { + aws_mem_release(alloc, posix_socket); + socket->impl = NULL; + return AWS_OP_ERR; + } + } else { + socket->io_handle = (struct aws_io_handle){ + .data = {.fd = existing_socket_fd}, + .additional_data = NULL, + }; + aws_socket_set_options(socket, options); + } + + aws_linked_list_init(&posix_socket->write_queue); + aws_linked_list_init(&posix_socket->written_queue); + posix_socket->currently_subscribed = false; + posix_socket->continue_accept = false; + aws_ref_count_init(&posix_socket->internal_refcount, posix_socket, s_socket_destroy_impl); + posix_socket->allocator = alloc; + posix_socket->connect_args = NULL; + posix_socket->close_happened = NULL; + socket->impl = posix_socket; + return AWS_OP_SUCCESS; +} + +int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { + AWS_ASSERT(options); + return s_socket_init(socket, alloc, options, -1); +} + +void aws_socket_clean_up(struct aws_socket *socket) { + if (!socket->impl) { + /* protect from double clean */ + return; + } + + int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ + (void)fd_for_logging; + + if (aws_socket_is_open(socket)) { + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, fd_for_logging); + aws_socket_close(socket); + } + struct posix_socket *socket_impl = socket->impl; + + if (aws_ref_count_release(&socket_impl->internal_refcount) != 0) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: is still pending io letting it dangle and cleaning up later.", + (void *)socket, + fd_for_logging); + } + + AWS_ZERO_STRUCT(*socket); + socket->io_handle.data.fd = -1; +} + +/* Update socket->local_endpoint based on the results of getsockname() */ +static int s_update_local_endpoint(struct aws_socket *socket) { + struct aws_socket_endpoint tmp_endpoint; + AWS_ZERO_STRUCT(tmp_endpoint); + + struct sockaddr_storage address; + AWS_ZERO_STRUCT(address); + socklen_t address_size = sizeof(address); + + if (getsockname(socket->io_handle.data.fd, (struct sockaddr *)&address, &address_size) != 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: getsockname() failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + + if (address.ss_family == AF_INET) { + struct sockaddr_in *s = (struct sockaddr_in *)&address; + tmp_endpoint.port = ntohs(s->sin_port); + if (inet_ntop(AF_INET, &s->sin_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: inet_ntop() failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + } else if (address.ss_family == AF_INET6) { + struct sockaddr_in6 *s = (struct sockaddr_in6 *)&address; + tmp_endpoint.port = ntohs(s->sin6_port); + if (inet_ntop(AF_INET6, &s->sin6_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: inet_ntop() failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + } else if (address.ss_family == AF_UNIX) { + struct sockaddr_un *s = (struct sockaddr_un *)&address; + + /* Ensure there's a null-terminator. + * On some platforms it may be missing when the path gets very long. See: + * https://man7.org/linux/man-pages/man7/unix.7.html#BUGS + * But let's keep it simple, and not deal with that madness until someone demands it. */ + size_t sun_len; + if (aws_secure_strlen(s->sun_path, sizeof(tmp_endpoint.address), &sun_len)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: UNIX domain socket name is too long", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); + } + memcpy(tmp_endpoint.address, s->sun_path, sun_len); +#if USE_VSOCK + } else if (address.ss_family == AF_VSOCK) { + struct sockaddr_vm *s = (struct sockaddr_vm *)&address; + + tmp_endpoint.port = s->svm_port; + + snprintf(tmp_endpoint.address, sizeof(tmp_endpoint.address), "%" PRIu32, s->svm_cid); + return AWS_OP_SUCCESS; +#endif /* USE_VSOCK */ + } else { + AWS_ASSERT(0); + return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); + } + + socket->local_endpoint = tmp_endpoint; + return AWS_OP_SUCCESS; +} + +static void s_on_connection_error(struct aws_socket *socket, int error); + +static int s_on_connection_success(struct aws_socket *socket) { + + struct aws_event_loop *event_loop = socket->event_loop; + struct posix_socket *socket_impl = socket->impl; + + if (socket_impl->currently_subscribed) { + aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); + socket_impl->currently_subscribed = false; + } + + socket->event_loop = NULL; + + int connect_result; + socklen_t result_length = sizeof(connect_result); + + if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to determine connection error %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + return AWS_OP_ERR; + } + + if (connect_result) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connection error %d", + (void *)socket, + socket->io_handle.data.fd, + connect_result); + int aws_error = s_determine_socket_error(connect_result); + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + return AWS_OP_ERR; + } + + AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection success", (void *)socket, socket->io_handle.data.fd); + + if (s_update_local_endpoint(socket)) { + s_on_connection_error(socket, aws_last_error()); + return AWS_OP_ERR; + } + + socket->state = CONNECTED_WRITE | CONNECTED_READ; + + if (aws_socket_assign_to_event_loop(socket, event_loop)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: assignment to event loop %p failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop, + aws_last_error()); + s_on_connection_error(socket, aws_last_error()); + return AWS_OP_ERR; + } + + socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); + + return AWS_OP_SUCCESS; +} + +static void s_on_connection_error(struct aws_socket *socket, int error) { + socket->state = ERROR; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); + if (socket->connection_result_fn) { + socket->connection_result_fn(socket, error, socket->connect_accept_user_data); + } else if (socket->accept_result_fn) { + socket->accept_result_fn(socket, error, NULL, socket->connect_accept_user_data); + } +} + +/* the next two callbacks compete based on which one runs first. if s_socket_connect_event + * comes back first, then we set socket_args->socket = NULL and continue on with the connection. + * if s_handle_socket_timeout() runs first, is sees socket_args->socket is NULL and just cleans up its memory. + * s_handle_socket_timeout() will always run so the memory for socket_connect_args is always cleaned up there. */ +static void s_socket_connect_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + (void)handle; + + struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); + + if (socket_args->socket) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: has not timed out yet proceeding with connection.", + (void *)socket_args->socket, + handle->data.fd); + + struct posix_socket *socket_impl = socket_args->socket->impl; + if (!(events & AWS_IO_EVENT_TYPE_ERROR || events & AWS_IO_EVENT_TYPE_CLOSED) && + (events & AWS_IO_EVENT_TYPE_READABLE || events & AWS_IO_EVENT_TYPE_WRITABLE)) { + struct aws_socket *socket = socket_args->socket; + socket_args->socket = NULL; + socket_impl->connect_args = NULL; + s_on_connection_success(socket); + return; + } + + int aws_error = aws_socket_get_error(socket_args->socket); + /* we'll get another notification. */ + if (aws_error == AWS_IO_READ_WOULD_BLOCK) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: spurious event, waiting for another notification.", + (void *)socket_args->socket, + handle->data.fd); + + // Return results back to event loop. + if (handle->update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + handle->update_io_result(event_loop, handle, &io_op_result); + } + + return; + } + + struct aws_socket *socket = socket_args->socket; + socket_args->socket = NULL; + socket_impl->connect_args = NULL; + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + } +} + +static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { + (void)task; + (void)status; + + struct posix_socket_connect_args *socket_args = args; + + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "task_id=%p: timeout task triggered, evaluating timeouts.", (void *)task); + /* successful connection will have nulled out connect_args->socket */ + if (socket_args->socket) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: timed out, shutting down.", + (void *)socket_args->socket, + socket_args->socket->io_handle.data.fd); + + socket_args->socket->state = TIMEDOUT; + int error_code = AWS_IO_SOCKET_TIMEOUT; + + if (status == AWS_TASK_STATUS_RUN_READY) { + aws_event_loop_unsubscribe_from_io_events(socket_args->socket->event_loop, &socket_args->socket->io_handle); + } else { + error_code = AWS_IO_EVENT_LOOP_SHUTDOWN; + aws_event_loop_free_io_event_resources(socket_args->socket->event_loop, &socket_args->socket->io_handle); + } + socket_args->socket->event_loop = NULL; + struct posix_socket *socket_impl = socket_args->socket->impl; + socket_impl->currently_subscribed = false; + aws_raise_error(error_code); + struct aws_socket *socket = socket_args->socket; + /*socket close sets socket_args->socket to NULL and + * socket_impl->connect_args to NULL. */ + aws_socket_close(socket); + s_on_connection_error(socket, error_code); + } + + aws_mem_release(socket_args->allocator, socket_args); +} + +/* this is used simply for moving a connect_success callback when the connect finished immediately + * (like for unix domain sockets) into the event loop's thread. Also note, in that case there was no + * timeout task scheduled, so in this case the socket_args are cleaned up. */ +static void s_run_connect_success(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + struct posix_socket_connect_args *socket_args = arg; + + if (socket_args->socket) { + struct posix_socket *socket_impl = socket_args->socket->impl; + if (status == AWS_TASK_STATUS_RUN_READY) { + s_on_connection_success(socket_args->socket); + } else { + aws_raise_error(AWS_IO_SOCKET_CONNECT_ABORTED); + socket_args->socket->event_loop = NULL; + s_on_connection_error(socket_args->socket, AWS_IO_SOCKET_CONNECT_ABORTED); + } + socket_impl->connect_args = NULL; + } + + aws_mem_release(socket_args->allocator, socket_args); +} + +static inline int s_convert_pton_error(int pton_code, int errno_value) { + if (pton_code == 0) { + return AWS_IO_SOCKET_INVALID_ADDRESS; + } + + return s_determine_socket_error(errno_value); +} + +struct socket_address { + union sock_addr_types { + struct sockaddr_in addr_in; + struct sockaddr_in6 addr_in6; + struct sockaddr_un un_addr; +#ifdef USE_VSOCK + struct sockaddr_vm vm_addr; +#endif + } sock_addr_types; +}; + +#ifdef USE_VSOCK +/** Convert a string to a VSOCK CID. Respects the calling convetion of inet_pton: + * 0 on error, 1 on success. */ +static int parse_cid(const char *cid_str, unsigned int *value) { + if (cid_str == NULL || value == NULL) { + errno = EINVAL; + return 0; + } + /* strtoll returns 0 as both error and correct value */ + errno = 0; + /* unsigned long long to handle edge cases in convention explicitly */ + long long cid = strtoll(cid_str, NULL, 10); + if (errno != 0) { + return 0; + } + + /* -1U means any, so it's a valid value, but it needs to be converted to + * unsigned int. */ + if (cid == -1) { + *value = VMADDR_CID_ANY; + return 1; + } + + if (cid < 0 || cid > UINT_MAX) { + errno = ERANGE; + return 0; + } + + /* cast is safe here, edge cases already checked */ + *value = (unsigned int)cid; + return 1; +} +#endif + +int aws_socket_connect( + struct aws_socket *socket, + const struct aws_socket_endpoint *remote_endpoint, + struct aws_event_loop *event_loop, + aws_socket_on_connection_result_fn *on_connection_result, + void *user_data) { + AWS_ASSERT(event_loop); + AWS_ASSERT(!socket->event_loop); + + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: beginning connect.", (void *)socket, socket->io_handle.data.fd); + + if (socket->event_loop) { + return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); + } + + if (socket->options.type != AWS_SOCKET_DGRAM) { + AWS_ASSERT(on_connection_result); + if (socket->state != INIT) { + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + } else { /* UDP socket */ + /* UDP sockets jump to CONNECT_READ if bind is called first */ + if (socket->state != CONNECTED_READ && socket->state != INIT) { + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + } + + size_t address_strlen; + if (aws_secure_strlen(remote_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { + return AWS_OP_ERR; + } + + if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { + return AWS_OP_ERR; + } + + struct socket_address address; + AWS_ZERO_STRUCT(address); + socklen_t sock_size = 0; + int pton_err = 1; + if (socket->options.domain == AWS_SOCKET_IPV4) { + pton_err = inet_pton(AF_INET, remote_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); + address.sock_addr_types.addr_in.sin_port = htons((uint16_t)remote_endpoint->port); + address.sock_addr_types.addr_in.sin_family = AF_INET; + sock_size = sizeof(address.sock_addr_types.addr_in); + } else if (socket->options.domain == AWS_SOCKET_IPV6) { + pton_err = inet_pton(AF_INET6, remote_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); + address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); + address.sock_addr_types.addr_in6.sin6_family = AF_INET6; + sock_size = sizeof(address.sock_addr_types.addr_in6); + } else if (socket->options.domain == AWS_SOCKET_LOCAL) { + address.sock_addr_types.un_addr.sun_family = AF_UNIX; + strncpy(address.sock_addr_types.un_addr.sun_path, remote_endpoint->address, AWS_ADDRESS_MAX_LEN); + sock_size = sizeof(address.sock_addr_types.un_addr); +#ifdef USE_VSOCK + } else if (socket->options.domain == AWS_SOCKET_VSOCK) { + pton_err = parse_cid(remote_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); + address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; + address.sock_addr_types.vm_addr.svm_port = remote_endpoint->port; + sock_size = sizeof(address.sock_addr_types.vm_addr); +#endif + } else { + AWS_ASSERT(0); + return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); + } + + if (pton_err != 1) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to parse address %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + remote_endpoint->address, + remote_endpoint->port); + return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connecting to endpoint %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + remote_endpoint->address, + remote_endpoint->port); + + socket->state = CONNECTING; + socket->remote_endpoint = *remote_endpoint; + socket->connect_accept_user_data = user_data; + socket->connection_result_fn = on_connection_result; + + struct posix_socket *socket_impl = socket->impl; + + socket_impl->connect_args = aws_mem_calloc(socket->allocator, 1, sizeof(struct posix_socket_connect_args)); + if (!socket_impl->connect_args) { + return AWS_OP_ERR; + } + + socket_impl->connect_args->socket = socket; + socket_impl->connect_args->allocator = socket->allocator; + + socket_impl->connect_args->task.fn = s_handle_socket_timeout; + socket_impl->connect_args->task.arg = socket_impl->connect_args; + + int error_code = connect(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size); + socket->event_loop = event_loop; + + if (!error_code) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connected immediately, not scheduling timeout.", + (void *)socket, + socket->io_handle.data.fd); + socket_impl->connect_args->task.fn = s_run_connect_success; + /* the subscription for IO will happen once we setup the connection in the task. Since we already + * know the connection succeeded, we don't need to register for events yet. */ + aws_event_loop_schedule_task_now(event_loop, &socket_impl->connect_args->task); + } + + if (error_code) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + if (errno_value == EINPROGRESS || errno_value == EALREADY) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connection pending waiting on event-loop notification or timeout.", + (void *)socket, + socket->io_handle.data.fd); + /* cache the timeout task; it is possible for the IO subscription to come back virtually immediately + * and null out the connect args */ + struct aws_task *timeout_task = &socket_impl->connect_args->task; + + socket_impl->currently_subscribed = true; + /* This event is for when the connection finishes. (the fd will flip writable). */ + if (aws_event_loop_subscribe_to_io_events( + event_loop, + &socket->io_handle, + AWS_IO_EVENT_TYPE_WRITABLE, + s_socket_connect_event, + socket_impl->connect_args)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to register with event-loop %p.", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop); + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + goto err_clean_up; + } + + /* schedule a task to run at the connect timeout interval, if this task runs before the connect + * happens, we consider that a timeout. */ + uint64_t timeout = 0; + aws_event_loop_current_clock_time(event_loop, &timeout); + timeout += aws_timestamp_convert( + socket->options.connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: scheduling timeout task for %llu.", + (void *)socket, + socket->io_handle.data.fd, + (unsigned long long)timeout); + aws_event_loop_schedule_task_future(event_loop, timeout_task, timeout); + } else { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connect failed with error code %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + int aws_error = s_determine_socket_error(errno_value); + aws_raise_error(aws_error); + socket->event_loop = NULL; + socket_impl->currently_subscribed = false; + goto err_clean_up; + } + } + return AWS_OP_SUCCESS; + +err_clean_up: + aws_mem_release(socket->allocator, socket_impl->connect_args); + socket_impl->connect_args = NULL; + return AWS_OP_ERR; +} + +int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { + if (socket->state != INIT) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: invalid state for bind operation.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + size_t address_strlen; + if (aws_secure_strlen(local_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { + return AWS_OP_ERR; + } + + if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { + return AWS_OP_ERR; + } + + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: binding to %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + local_endpoint->address, + local_endpoint->port); + + struct socket_address address; + AWS_ZERO_STRUCT(address); + socklen_t sock_size = 0; + int pton_err = 1; + if (socket->options.domain == AWS_SOCKET_IPV4) { + pton_err = inet_pton(AF_INET, local_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); + address.sock_addr_types.addr_in.sin_port = htons((uint16_t)local_endpoint->port); + address.sock_addr_types.addr_in.sin_family = AF_INET; + sock_size = sizeof(address.sock_addr_types.addr_in); + } else if (socket->options.domain == AWS_SOCKET_IPV6) { + pton_err = inet_pton(AF_INET6, local_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); + address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); + address.sock_addr_types.addr_in6.sin6_family = AF_INET6; + sock_size = sizeof(address.sock_addr_types.addr_in6); + } else if (socket->options.domain == AWS_SOCKET_LOCAL) { + address.sock_addr_types.un_addr.sun_family = AF_UNIX; + strncpy(address.sock_addr_types.un_addr.sun_path, local_endpoint->address, AWS_ADDRESS_MAX_LEN); + sock_size = sizeof(address.sock_addr_types.un_addr); +#ifdef USE_VSOCK + } else if (socket->options.domain == AWS_SOCKET_VSOCK) { + pton_err = parse_cid(local_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); + address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; + address.sock_addr_types.vm_addr.svm_port = local_endpoint->port; + sock_size = sizeof(address.sock_addr_types.vm_addr); +#endif + } else { + AWS_ASSERT(0); + return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); + } + + if (pton_err != 1) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to parse address %s:%u.", + (void *)socket, + socket->io_handle.data.fd, + local_endpoint->address, + local_endpoint->port); + return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); + } + + if (bind(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size) != 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: bind failed with error code %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + + aws_raise_error(s_determine_socket_error(errno_value)); + goto error; + } + + if (s_update_local_endpoint(socket)) { + goto error; + } + + if (socket->options.type == AWS_SOCKET_STREAM) { + socket->state = BOUND; + } else { + /* e.g. UDP is now readable */ + socket->state = CONNECTED_READ; + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: successfully bound to %s:%u", + (void *)socket, + socket->io_handle.data.fd, + socket->local_endpoint.address, + socket->local_endpoint.port); + + return AWS_OP_SUCCESS; + +error: + socket->state = ERROR; + return AWS_OP_ERR; +} + +int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { + if (socket->local_endpoint.address[0] == 0) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: Socket has no local address. Socket must be bound first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + *out_address = socket->local_endpoint; + return AWS_OP_SUCCESS; +} + +int aws_socket_listen(struct aws_socket *socket, int backlog_size) { + if (socket->state != BOUND) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: invalid state for listen operation. You must call bind first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + int error_code = listen(socket->io_handle.data.fd, backlog_size); + + if (!error_code) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, "id=%p fd=%d: successfully listening", (void *)socket, socket->io_handle.data.fd); + socket->state = LISTENING; + return AWS_OP_SUCCESS; + } + + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: listen failed with error code %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + + socket->state = ERROR; + + return aws_raise_error(s_determine_socket_error(errno_value)); +} + +/* this is called by the event loop handler that was installed in start_accept(). It runs once the FD goes readable, + * accepts as many as it can and then returns control to the event loop. */ +static void s_socket_accept_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + + (void)event_loop; + + struct aws_socket *socket = user_data; + struct posix_socket *socket_impl = socket->impl; + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); + + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + + if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { + int in_fd = 0; + while (socket_impl->continue_accept && in_fd != -1) { + struct sockaddr_storage in_addr; + socklen_t in_len = sizeof(struct sockaddr_storage); + + in_fd = accept(handle->data.fd, (struct sockaddr *)&in_addr, &in_len); + if (in_fd == -1) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + break; + } + + int aws_error = aws_socket_get_error(socket); + aws_raise_error(aws_error); + s_on_connection_error(socket, aws_error); + io_op_result.read_error_code = aws_error; + break; + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, "id=%p fd=%d: incoming connection", (void *)socket, socket->io_handle.data.fd); + + struct aws_socket *new_sock = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); + + if (!new_sock) { + close(in_fd); + s_on_connection_error(socket, aws_last_error()); + continue; + } + + if (s_socket_init(new_sock, socket->allocator, &socket->options, in_fd)) { + aws_mem_release(socket->allocator, new_sock); + s_on_connection_error(socket, aws_last_error()); + continue; + } + + new_sock->local_endpoint = socket->local_endpoint; + new_sock->state = CONNECTED_READ | CONNECTED_WRITE; + uint32_t port = 0; + + /* get the info on the incoming socket's address */ + if (in_addr.ss_family == AF_INET) { + struct sockaddr_in *s = (struct sockaddr_in *)&in_addr; + port = ntohs(s->sin_port); + /* this came from the kernel, a.) it won't fail. b.) even if it does + * its not fatal. come back and add logging later. */ + if (!inet_ntop( + AF_INET, + &s->sin_addr, + new_sock->remote_endpoint.address, + sizeof(new_sock->remote_endpoint.address))) { + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d:. Failed to determine remote address.", + (void *)socket, + socket->io_handle.data.fd); + } + new_sock->options.domain = AWS_SOCKET_IPV4; + } else if (in_addr.ss_family == AF_INET6) { + /* this came from the kernel, a.) it won't fail. b.) even if it does + * its not fatal. come back and add logging later. */ + struct sockaddr_in6 *s = (struct sockaddr_in6 *)&in_addr; + port = ntohs(s->sin6_port); + if (!inet_ntop( + AF_INET6, + &s->sin6_addr, + new_sock->remote_endpoint.address, + sizeof(new_sock->remote_endpoint.address))) { + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d:. Failed to determine remote address.", + (void *)socket, + socket->io_handle.data.fd); + } + new_sock->options.domain = AWS_SOCKET_IPV6; + } else if (in_addr.ss_family == AF_UNIX) { + new_sock->remote_endpoint = socket->local_endpoint; + new_sock->options.domain = AWS_SOCKET_LOCAL; + } + + new_sock->remote_endpoint.port = port; + + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: connected to %s:%d, incoming fd %d", + (void *)socket, + socket->io_handle.data.fd, + new_sock->remote_endpoint.address, + new_sock->remote_endpoint.port, + in_fd); + + int flags = fcntl(in_fd, F_GETFL, 0); + + flags |= O_NONBLOCK | O_CLOEXEC; + fcntl(in_fd, F_SETFL, flags); + + bool close_occurred = false; + socket_impl->close_happened = &close_occurred; + socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); + + if (close_occurred) { + return; + } + + socket_impl->close_happened = NULL; + } + } + + // Return results back to event loop. + if (handle->update_io_result) { + handle->update_io_result(event_loop, handle, &io_op_result); + } + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: finished processing incoming connections, " + "waiting on event-loop notification", + (void *)socket, + socket->io_handle.data.fd); +} + +int aws_socket_start_accept( + struct aws_socket *socket, + struct aws_event_loop *accept_loop, + aws_socket_on_accept_result_fn *on_accept_result, + void *user_data) { + AWS_ASSERT(on_accept_result); + AWS_ASSERT(accept_loop); + + if (socket->event_loop) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: is already assigned to event-loop %p.", + (void *)socket, + socket->io_handle.data.fd, + (void *)socket->event_loop); + return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); + } + + if (socket->state != LISTENING) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: invalid state for start_accept operation. You must call listen first.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + socket->accept_result_fn = on_accept_result; + socket->connect_accept_user_data = user_data; + socket->event_loop = accept_loop; + struct posix_socket *socket_impl = socket->impl; + socket_impl->continue_accept = true; + socket_impl->currently_subscribed = true; + + if (aws_event_loop_subscribe_to_io_events( + socket->event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_READABLE, s_socket_accept_event, socket)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: failed to subscribe to event-loop %p.", + (void *)socket, + socket->io_handle.data.fd, + (void *)socket->event_loop); + socket_impl->continue_accept = false; + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; +} + +struct stop_accept_args { + struct aws_task task; + struct aws_mutex mutex; + struct aws_condition_variable condition_variable; + struct aws_socket *socket; + int ret_code; + bool invoked; +}; + +static bool s_stop_accept_pred(void *arg) { + struct stop_accept_args *stop_accept_args = arg; + return stop_accept_args->invoked; +} + +static void s_stop_accept_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + + struct stop_accept_args *stop_accept_args = arg; + aws_mutex_lock(&stop_accept_args->mutex); + stop_accept_args->ret_code = AWS_OP_SUCCESS; + if (aws_socket_stop_accept(stop_accept_args->socket)) { + stop_accept_args->ret_code = aws_last_error(); + } + stop_accept_args->invoked = true; + aws_condition_variable_notify_one(&stop_accept_args->condition_variable); + aws_mutex_unlock(&stop_accept_args->mutex); +} + +int aws_socket_stop_accept(struct aws_socket *socket) { + if (socket->state != LISTENING) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: is not in a listening state, can't stop_accept.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections", (void *)socket, socket->io_handle.data.fd); + + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + struct stop_accept_args args = { + .mutex = AWS_MUTEX_INIT, + .condition_variable = AWS_CONDITION_VARIABLE_INIT, + .invoked = false, + .socket = socket, + .ret_code = AWS_OP_SUCCESS, + .task = {.fn = s_stop_accept_task}, + }; + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: stopping accepting new connections from a different thread than " + "the socket is running from. Blocking until it shuts down.", + (void *)socket, + socket->io_handle.data.fd); + /* Look.... I know what I'm doing.... trust me, I'm an engineer. + * We wait on the completion before 'args' goes out of scope. + * NOLINTNEXTLINE */ + args.task.arg = &args; + aws_mutex_lock(&args.mutex); + aws_event_loop_schedule_task_now(socket->event_loop, &args.task); + aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_stop_accept_pred, &args); + aws_mutex_unlock(&args.mutex); + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: stop accept task finished running.", + (void *)socket, + socket->io_handle.data.fd); + + if (args.ret_code) { + return aws_raise_error(args.ret_code); + } + return AWS_OP_SUCCESS; + } + + int ret_val = AWS_OP_SUCCESS; + struct posix_socket *socket_impl = socket->impl; + if (socket_impl->currently_subscribed) { + ret_val = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); + socket_impl->currently_subscribed = false; + socket_impl->continue_accept = false; + socket->event_loop = NULL; + } + + return ret_val; +} + +int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options) { + if (socket->options.domain != options->domain || socket->options.type != options->type) { + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setting socket options to: keep-alive %d, keep idle %d, keep-alive interval %d, keep-alive probe " + "count %d.", + (void *)socket, + socket->io_handle.data.fd, + (int)options->keepalive, + (int)options->keep_alive_timeout_sec, + (int)options->keep_alive_interval_sec, + (int)options->keep_alive_max_failed_probes); + + socket->options = *options; + +#ifdef NO_SIGNAL_SOCK_OPT + int option_value = 1; + if (AWS_UNLIKELY(setsockopt( + socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL_SOCK_OPT, &option_value, sizeof(option_value)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for NO_SIGNAL_SOCK_OPT failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } +#endif /* NO_SIGNAL_SOCK_OPT */ + + int reuse = 1; + if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for SO_REUSEADDR failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + size_t network_interface_length = 0; + if (aws_secure_strlen(options->network_interface_name, AWS_NETWORK_INTERFACE_NAME_MAX, &network_interface_length)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: network_interface_name max length must be %d length and NULL terminated", + (void *)socket, + socket->io_handle.data.fd, + AWS_NETWORK_INTERFACE_NAME_MAX); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + if (network_interface_length != 0) { +#if defined(SO_BINDTODEVICE) + if (setsockopt( + socket->io_handle.data.fd, + SOL_SOCKET, + SO_BINDTODEVICE, + options->network_interface_name, + network_interface_length)) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() with SO_BINDTODEVICE for \"%s\" failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } +#elif defined(IP_BOUND_IF) + /* + * If SO_BINDTODEVICE is not supported, the alternative is IP_BOUND_IF which requires an index instead + * of a name. We are not using this everywhere because this requires 2 system calls instead of 1, and is + * dependent upon the type of sockets, which doesn't support AWS_SOCKET_LOCAL. As a future optimization, we can + * look into caching the result of if_nametoindex. + */ + uint network_interface_index = if_nametoindex(options->network_interface_name); + if (network_interface_index == 0) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: network_interface_name \"%s\" not found. if_nametoindex() failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + if (options->domain == AWS_SOCKET_IPV6) { + if (setsockopt( + socket->io_handle.data.fd, + IPPROTO_IPV6, + IPV6_BOUND_IF, + &network_interface_index, + sizeof(network_interface_index))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() with IPV6_BOUND_IF for \"%s\" failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } + } else if (setsockopt( + socket->io_handle.data.fd, + IPPROTO_IP, + IP_BOUND_IF, + &network_interface_index, + sizeof(network_interface_index))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() with IP_BOUND_IF for \"%s\" failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + options->network_interface_name, + errno_value); + return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); + } +#else + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: network_interface_name is not supported on this platform.", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); +#endif + } + if (options->type == AWS_SOCKET_STREAM && options->domain != AWS_SOCKET_LOCAL) { + if (socket->options.keepalive) { + int keep_alive = 1; + if (AWS_UNLIKELY( + setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_KEEPALIVE, &keep_alive, sizeof(int)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling SO_KEEPALIVE failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + } + +#if !defined(__OpenBSD__) + if (socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec) { + int ival_in_secs = socket->options.keep_alive_interval_sec; + if (AWS_UNLIKELY(setsockopt( + socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPIDLE, &ival_in_secs, sizeof(ival_in_secs)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling TCP_KEEPIDLE for TCP failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + + ival_in_secs = socket->options.keep_alive_timeout_sec; + if (AWS_UNLIKELY(setsockopt( + socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPINTVL, &ival_in_secs, sizeof(ival_in_secs)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling TCP_KEEPINTVL for TCP failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + } + + if (socket->options.keep_alive_max_failed_probes) { + int max_probes = socket->options.keep_alive_max_failed_probes; + if (AWS_UNLIKELY( + setsockopt(socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPCNT, &max_probes, sizeof(max_probes)))) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + AWS_LOGF_WARN( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: setsockopt() for enabling TCP_KEEPCNT for TCP failed with errno %d.", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + } + } +#endif /* __OpenBSD__ */ + } + + return AWS_OP_SUCCESS; +} + +struct socket_write_request { + struct aws_byte_cursor cursor_cpy; + aws_socket_on_write_completed_fn *written_fn; + void *write_user_data; + struct aws_linked_list_node node; + size_t original_buffer_len; + int error_code; +}; + +struct posix_socket_close_args { + struct aws_mutex mutex; + struct aws_condition_variable condition_variable; + struct aws_socket *socket; + bool invoked; + int ret_code; +}; + +static bool s_close_predicate(void *arg) { + struct posix_socket_close_args *close_args = arg; + return close_args->invoked; +} + +static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + + struct posix_socket_close_args *close_args = arg; + aws_mutex_lock(&close_args->mutex); + close_args->ret_code = AWS_OP_SUCCESS; + + if (aws_socket_close(close_args->socket)) { + close_args->ret_code = aws_last_error(); + } + + close_args->invoked = true; + aws_condition_variable_notify_one(&close_args->condition_variable); + aws_mutex_unlock(&close_args->mutex); +} + +int aws_socket_close(struct aws_socket *socket) { + struct posix_socket *socket_impl = socket->impl; + AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: closing", (void *)socket, socket->io_handle.data.fd); + struct aws_event_loop *event_loop = socket->event_loop; + if (socket->event_loop) { + /* don't freak out on me, this almost never happens, and never occurs inside a channel + * it only gets hit from a listening socket shutting down or from a unit test. */ + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: closing from a different thread than " + "the socket is running from. Blocking until it closes down.", + (void *)socket, + socket->io_handle.data.fd); + /* the only time we allow this kind of thing is when you're a listener.*/ + if (socket->state != LISTENING) { + return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); + } + + struct posix_socket_close_args args = { + .mutex = AWS_MUTEX_INIT, + .condition_variable = AWS_CONDITION_VARIABLE_INIT, + .socket = socket, + .ret_code = AWS_OP_SUCCESS, + .invoked = false, + }; + + struct aws_task close_task = { + .fn = s_close_task, + .arg = &args, + }; + + int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ + (void)fd_for_logging; + + aws_mutex_lock(&args.mutex); + aws_event_loop_schedule_task_now(socket->event_loop, &close_task); + aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_close_predicate, &args); + aws_mutex_unlock(&args.mutex); + AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, fd_for_logging); + if (args.ret_code) { + return aws_raise_error(args.ret_code); + } + + return AWS_OP_SUCCESS; + } + + if (socket_impl->currently_subscribed) { + if (socket->state & LISTENING) { + aws_socket_stop_accept(socket); + } else { + int err_code = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); + + if (err_code) { + return AWS_OP_ERR; + } + } + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + } + } + + if (socket_impl->close_happened) { + *socket_impl->close_happened = true; + } + + if (socket_impl->connect_args) { + socket_impl->connect_args->socket = NULL; + socket_impl->connect_args = NULL; + } + + if (aws_socket_is_open(socket)) { + close(socket->io_handle.data.fd); + socket->io_handle.data.fd = -1; + socket->state = CLOSED; + + /* ensure callbacks for pending writes fire (in order) before this close function returns */ + + if (socket_impl->written_task_scheduled) { + aws_event_loop_cancel_task(event_loop, &socket_impl->written_task); + } + + while (!aws_linked_list_empty(&socket_impl->written_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; + write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); + aws_mem_release(socket->allocator, write_request); + } + + while (!aws_linked_list_empty(&socket_impl->write_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; + write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data); + aws_mem_release(socket->allocator, write_request); + } + } + + return AWS_OP_SUCCESS; +} + +int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { + int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, "id=%p fd=%d: shutting down in direction %d", (void *)socket, socket->io_handle.data.fd, dir); + if (shutdown(socket->io_handle.data.fd, how)) { + int errno_value = errno; /* Always cache errno before potential side-effect */ + int aws_error = s_determine_socket_error(errno_value); + return aws_raise_error(aws_error); + } + + if (dir == AWS_CHANNEL_DIR_READ) { + socket->state &= ~CONNECTED_READ; + } else { + socket->state &= ~CONNECTED_WRITE; + } + + return AWS_OP_SUCCESS; +} + +static void s_written_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + + struct aws_socket *socket = arg; + struct posix_socket *socket_impl = socket->impl; + + socket_impl->written_task_scheduled = false; + + /* this is to handle a race condition when a callback kicks off a cleanup, or the user decides + * to close the socket based on something they read (SSL validation failed for example). + * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling */ + aws_ref_count_acquire(&socket_impl->internal_refcount); + + /* Notes about weird loop: + * 1) Only process the initial contents of queue when this task is run, + * ignoring any writes queued during delivery. + * If we simply looped until the queue was empty, we could get into a + * synchronous loop of completing and writing and completing and writing... + * and it would be tough for multiple sockets to share an event-loop fairly. + * 2) Check if queue is empty with each iteration. + * If user calls close() from the callback, close() will process all + * nodes in the written_queue, and the queue will be empty when the + * callstack gets back to here. */ + if (!aws_linked_list_empty(&socket_impl->written_queue)) { + struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue); + do { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; + write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); + aws_mem_release(socket_impl->allocator, write_request); + if (node == stop_after) { + break; + } + } while (!aws_linked_list_empty(&socket_impl->written_queue)); + } + + aws_ref_count_release(&socket_impl->internal_refcount); +} + +/* this gets called in two scenarios. + * 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned + * immediately if something bad has happened to the socket. In this case, `parent_request` is set. + * 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */ +static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { + struct posix_socket *socket_impl = socket->impl; + + if (parent_request) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: processing write requests, called from aws_socket_write", + (void *)socket, + socket->io_handle.data.fd); + } else { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: processing write requests, invoked by the event-loop", + (void *)socket, + socket->io_handle.data.fd); + } + + bool purge = false; + int aws_error = AWS_OP_SUCCESS; + bool parent_request_failed = false; + bool pushed_to_written_queue = false; + + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + + /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ + while (!aws_linked_list_empty(&socket_impl->write_queue)) { + struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: dequeued write request of size %llu, remaining to write %llu", + (void *)socket, + socket->io_handle.data.fd, + (unsigned long long)write_request->original_buffer_len, + (unsigned long long)write_request->cursor_cpy.len); + + ssize_t written = send( + socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL_SEND); + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: send written size %d", + (void *)socket, + socket->io_handle.data.fd, + (int)written); + + if (written < 0) { + if (errno_value == EAGAIN) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; + break; + } + + if (errno_value == EPIPE) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: already closed before write", + (void *)socket, + socket->io_handle.data.fd); + aws_error = AWS_IO_SOCKET_CLOSED; + aws_raise_error(aws_error); + purge = true; + io_op_result.write_error_code = aws_error; + break; + } + + purge = true; + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: write error with error code %d", + (void *)socket, + socket->io_handle.data.fd, + errno_value); + aws_error = s_determine_socket_error(errno_value); + aws_raise_error(aws_error); + io_op_result.write_error_code = aws_error; + break; + } + + io_op_result.written_bytes += (size_t)written; + + size_t remaining_to_write = write_request->cursor_cpy.len; + + aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: remaining write request to write %llu", + (void *)socket, + socket->io_handle.data.fd, + (unsigned long long)write_request->cursor_cpy.len); + + if ((size_t)written == remaining_to_write) { + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: write request completed", (void *)socket, socket->io_handle.data.fd); + + aws_linked_list_remove(node); + write_request->error_code = AWS_ERROR_SUCCESS; + aws_linked_list_push_back(&socket_impl->written_queue, node); + pushed_to_written_queue = true; + } + } + + if (purge) { + while (!aws_linked_list_empty(&socket_impl->write_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); + struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); + + /* If this fn was invoked directly from aws_socket_write(), don't invoke the error callback + * as the user will be able to rely on the return value from aws_socket_write() */ + if (write_request == parent_request) { + parent_request_failed = true; + aws_mem_release(socket->allocator, write_request); + } else { + write_request->error_code = aws_error; + aws_linked_list_push_back(&socket_impl->written_queue, node); + pushed_to_written_queue = true; + } + } + } + + if (pushed_to_written_queue && !socket_impl->written_task_scheduled) { + socket_impl->written_task_scheduled = true; + aws_task_init(&socket_impl->written_task, s_written_task, socket, "socket_written_task"); + aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); + } + + // Return results back to event loop. + if (socket->io_handle.update_io_result) { + socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); + } + + /* Only report error if aws_socket_write() invoked this function and its write_request failed */ + if (!parent_request_failed) { + return AWS_OP_SUCCESS; + } + + aws_raise_error(aws_error); + return AWS_OP_ERR; +} + +static void s_on_socket_io_event( + struct aws_event_loop *event_loop, + struct aws_io_handle *handle, + int events, + void *user_data) { + (void)event_loop; + (void)handle; + struct aws_socket *socket = user_data; + struct posix_socket *socket_impl = socket->impl; + + /* this is to handle a race condition when an error kicks off a cleanup, or the user decides + * to close the socket based on something they read (SSL validation failed for example). + * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling but currently + * subscribed is set to false. */ + aws_ref_count_acquire(&socket_impl->internal_refcount); + + /* NOTE: READABLE|WRITABLE|HANG_UP events might arrive simultaneously + * (e.g. peer sends last few bytes and immediately hangs up). + * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */ + + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); + if (socket->readable_fn) { + socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data); + } + } + /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not + * have been cleaned up, so this next branch is safe. */ + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) { + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd); + s_process_socket_write_requests(socket, NULL); + } + + if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) { + aws_raise_error(AWS_IO_SOCKET_CLOSED); + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: closed remotely", (void *)socket, socket->io_handle.data.fd); + if (socket->readable_fn) { + socket->readable_fn(socket, AWS_IO_SOCKET_CLOSED, socket->readable_user_data); + } + goto end_check; + } + + if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_ERROR) { + int aws_error = aws_socket_get_error(socket); + aws_raise_error(aws_error); + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: error event occurred", (void *)socket, socket->io_handle.data.fd); + if (socket->readable_fn) { + socket->readable_fn(socket, aws_error, socket->readable_user_data); + } + goto end_check; + } + +end_check: + aws_ref_count_release(&socket_impl->internal_refcount); +} + +int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { + if (!socket->event_loop) { + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: assigning to event loop %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop); + socket->event_loop = event_loop; + struct posix_socket *socket_impl = socket->impl; + socket_impl->currently_subscribed = true; + if (aws_event_loop_subscribe_to_io_events( + event_loop, + &socket->io_handle, + AWS_IO_EVENT_TYPE_WRITABLE | AWS_IO_EVENT_TYPE_READABLE, + s_on_socket_io_event, + socket)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: assigning to event loop %p failed with error %d", + (void *)socket, + socket->io_handle.data.fd, + (void *)event_loop, + aws_last_error()); + socket_impl->currently_subscribed = false; + socket->event_loop = NULL; + return AWS_OP_ERR; + } + + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); +} + +struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket) { + return socket->event_loop; +} + +int aws_socket_subscribe_to_readable_events( + struct aws_socket *socket, + aws_socket_on_readable_fn *on_readable, + void *user_data) { + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, " id=%p fd=%d: subscribing to readable events", (void *)socket, socket->io_handle.data.fd); + if (!(socket->state & CONNECTED_READ)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: can't subscribe to readable events since the socket is not connected", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); + } + + if (socket->readable_fn) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: can't subscribe to readable events since it is already subscribed", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); + } + + AWS_ASSERT(on_readable); + socket->readable_user_data = user_data; + socket->readable_fn = on_readable; + + return AWS_OP_SUCCESS; +} + +int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { + AWS_ASSERT(amount_read); + + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: cannot read from a different thread than event loop %p", + (void *)socket, + socket->io_handle.data.fd, + (void *)socket->event_loop); + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (!(socket->state & CONNECTED_READ)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: cannot read because it is not connected", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); + } + + ssize_t read_val = read(socket->io_handle.data.fd, buffer->buffer + buffer->len, buffer->capacity - buffer->len); + int errno_value = errno; /* Always cache errno before potential side-effect */ + + AWS_LOGF_TRACE( + AWS_LS_IO_SOCKET, "id=%p fd=%d: read of %d", (void *)socket, socket->io_handle.data.fd, (int)read_val); + + if (read_val > 0) { + *amount_read = (size_t)read_val; + buffer->len += *amount_read; + return AWS_OP_SUCCESS; + } + + /* read_val of 0 means EOF which we'll treat as AWS_IO_SOCKET_CLOSED */ + if (read_val == 0) { + AWS_LOGF_INFO( + AWS_LS_IO_SOCKET, "id=%p fd=%d: zero read, socket is closed", (void *)socket, socket->io_handle.data.fd); + *amount_read = 0; + + if (buffer->capacity - buffer->len > 0) { + return aws_raise_error(AWS_IO_SOCKET_CLOSED); + } + + return AWS_OP_SUCCESS; + } + +#if defined(EWOULDBLOCK) + if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { +#else + if (errno_value == EAGAIN) { +#endif + AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: read would block", (void *)socket, socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); + } + + if (errno_value == EPIPE || errno_value == ECONNRESET) { + AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket is closed.", (void *)socket, socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_CLOSED); + } + + if (errno_value == ETIMEDOUT) { + AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket timed out.", (void *)socket, socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_TIMEOUT); + } + + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: read failed with error: %s", + (void *)socket, + socket->io_handle.data.fd, + strerror(errno_value)); + return aws_raise_error(s_determine_socket_error(errno_value)); +} + +int aws_socket_write( + struct aws_socket *socket, + const struct aws_byte_cursor *cursor, + aws_socket_on_write_completed_fn *written_fn, + void *user_data) { + if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { + return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); + } + + if (!(socket->state & CONNECTED_WRITE)) { + AWS_LOGF_ERROR( + AWS_LS_IO_SOCKET, + "id=%p fd=%d: cannot write to because it is not connected", + (void *)socket, + socket->io_handle.data.fd); + return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); + } + + AWS_ASSERT(written_fn); + struct posix_socket *socket_impl = socket->impl; + struct socket_write_request *write_request = + aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_write_request)); + + if (!write_request) { + return AWS_OP_ERR; + } + + write_request->original_buffer_len = cursor->len; + write_request->written_fn = written_fn; + write_request->write_user_data = user_data; + write_request->cursor_cpy = *cursor; + aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node); + + return s_process_socket_write_requests(socket, write_request); +} + +int aws_socket_get_error(struct aws_socket *socket) { + int connect_result; + socklen_t result_length = sizeof(connect_result); + + if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { + return s_determine_socket_error(errno); + } + + if (connect_result) { + return s_determine_socket_error(connect_result); + } + + return AWS_OP_SUCCESS; +} + +bool aws_socket_is_open(struct aws_socket *socket) { + return socket->io_handle.data.fd >= 0; +} + +void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { + struct aws_uuid uuid; + AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); + char uuid_str[AWS_UUID_STR_LEN] = {0}; + struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); + AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); + snprintf(endpoint->address, sizeof(endpoint->address), "/tmp/testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); +} From 702312afb18a954f06765f7f3d01a1a25bf5692d Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 17 Sep 2024 08:54:42 -0700 Subject: [PATCH 16/39] fixup --- source/qnx/ionotify_event_loop.c | 5 +++-- source/qnx/pipe.c | 3 +-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 1d9148162..d13afe45e 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -536,7 +536,7 @@ static void s_process_io_result( struct aws_io_handle *handle, const struct aws_io_handle_io_op_result *io_op_result) { - AWS_ASSERT(!s_is_on_callers_thread(event_loop)); + AWS_ASSERT(s_is_on_callers_thread(event_loop)); AWS_ASSERT(handle->additional_data); struct ionotify_event_data *ionotify_event_data = handle->additional_data; @@ -570,7 +570,8 @@ static void s_process_io_result( AWS_LS_IO_EVENT_LOOP, "id=%p: Got EWOULDBLOCK for fd %d, rearming it", (void *)event_loop, handle->data.fd); /* We're on the event loop thread, just schedule subscribing task. */ ionotify_event_data->events_subscribed = event_types; - s_subscribe_task(NULL, ionotify_event_data, AWS_TASK_STATUS_RUN_READY); + struct ionotify_loop *ionotify_loop = event_loop->impl_data; + aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, &ionotify_event_data->subscribe_task); } /* Notify event loop of error conditions. */ diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c index 78578baf2..3410a5197 100644 --- a/source/qnx/pipe.c +++ b/source/qnx/pipe.c @@ -288,8 +288,7 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } return s_raise_posix_error(errno_value); - } - else if (read_val == 0) { + } else if (read_val == 0) { // Return results back to event loop. if (read_impl->handle.update_io_result) { struct aws_io_handle_io_op_result io_op_result; From eede270775cad2bf520437a0ec57753851b2b66b Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 17 Sep 2024 10:42:43 -0700 Subject: [PATCH 17/39] fixup --- source/qnx/ionotify_event_loop.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index d13afe45e..49871b7a0 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -97,7 +97,7 @@ struct ionotify_event_data { void *user_data; struct aws_task subscribe_task; struct aws_task cleanup_task; - /* ID with a value that can fit into pulse user data field (only _NOTIFY_COND_MASK bits can be used). */ + /* ID with a value that can fit into pulse user data field (only _NOTIFY_DATA_MASK bits can be used). */ int handle_id; /* False when handle is unsubscribed, but this struct hasn't been cleaned up yet. */ bool is_subscribed; @@ -351,8 +351,8 @@ static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *ta static int s_add_handle(struct ionotify_loop *ionotify_loop, struct ionotify_event_data *ionotify_event_data) { AWS_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); - /* Special constant, _NOTIFY_COND_MASK, limits the maximum value that can be used as user data in I/O events. */ - int max_handle_id = _NOTIFY_COND_MASK; + /* Special constant, _NOTIFY_DATA_MASK, limits the maximum value that can be used as user data in I/O events. */ + int max_handle_id = _NOTIFY_DATA_MASK; if (AWS_UNLIKELY(aws_hash_table_get_entry_count(&ionotify_loop->handles) == (size_t)max_handle_id)) { AWS_LOGF_ERROR( From 8bc5d1b70f2aa11ec8f08ef28a4b2c98cd3514f2 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 17 Sep 2024 10:42:59 -0700 Subject: [PATCH 18/39] Fix pipe missing events issue --- source/qnx/ionotify_event_loop.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 49871b7a0..b03418aed 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -527,6 +527,38 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta (void *)event_loop, ionotify_event_data->handle->data.fd); } + + /* QNX resource manager for POSIX pipes has a bug/undocumented behavior when under specific conditions it stops + * sending requested events. Below are more details. + * + * First, a quote from the ionotify docs for _NOTIFY_ACTION_EDGEARM: + * Conditions are considered as met only if a change occurs since the last call to + * ionotify(..., _NOTIFY_ACTION_EDGEARM, ...). Met conditions are returned; a notification is armed for unmet + * conditions. + * + * Now, the issue. If ionotify arms the writing end of the pipe when it has a buffer for data, the returning + * code contains _NOTIFY_COND_OBAND event. This is expected and correct behavior. According to the docs, the + * writing end of the pipe should not be armed in the resource manager in such a case. However, after that, the + * resource manager stops returning _NOTIFY_COND_OBAND for the writing end altogether (i.e. the followup + * ionotify calls does not return _NOTIFY_COND_OBAND). It seems, the resource manager actually arms the writing + * end, but does it incorrectly. + * + * Disarming the met conditions fixes the issue. + * + * NOTE: Sockets are not affected by this issue. Since disarming non-armed conditions shouldn't cause any side + * effects, perform it for everyone. + */ + int active_events = rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT); + if (active_events) { + rc = ionotify(ionotify_event_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, active_events, NULL); + if (rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to disarm events for fd %d", + (void *)event_loop, + ionotify_event_data->handle->data.fd); + } + } } } From 692d1253e397d857d1ff4e3f95e5a5f0db9a1f27 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 17 Sep 2024 11:12:09 -0700 Subject: [PATCH 19/39] Handle unsubscribing in a task --- source/qnx/ionotify_event_loop.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index b03418aed..b58b0df44 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -418,6 +418,10 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta struct aws_event_loop *event_loop = ionotify_event_data->event_loop; struct ionotify_loop *ionotify_loop = event_loop->impl_data; + if (!ionotify_event_data->is_subscribed) { + return; + } + AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Subscribing to events on fd %d for events %d", @@ -573,6 +577,10 @@ static void s_process_io_result( AWS_ASSERT(handle->additional_data); struct ionotify_event_data *ionotify_event_data = handle->additional_data; + if (!ionotify_event_data->is_subscribed) { + return; + } + AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Processing I/O operation result for fd %d: status %d (%s); read status %d (%s); write status %d (%s)", @@ -712,9 +720,16 @@ static void s_free_io_event_resources(void *user_data) { aws_mem_release(event_data->alloc, (void *)event_data); } +static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)task; + (void)status; + struct ionotify_event_data *ionotify_event_data = (struct ionotify_event_data *)arg; + s_free_io_event_resources(ionotify_event_data); +} + static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: un-subscribing from events on fd %d", (void *)event_loop, handle->data.fd); + AWS_LS_IO_EVENT_LOOP, "id=%p: Unsubscribing from events on fd %d", (void *)event_loop, handle->data.fd); struct ionotify_loop *ionotify_loop = event_loop->impl_data; @@ -752,8 +767,13 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc handle->additional_data = NULL; handle->update_io_result = NULL; - /* Main loop obtains ionotify_event_data instance from hash map, so it's safe to release it right here. */ - s_free_io_event_resources(ionotify_event_data); + /* There might be pending tasks for ionotify_event_data, so put a cleanup task. */ + aws_task_init( + &ionotify_event_data->cleanup_task, + s_unsubscribe_cleanup_task, + ionotify_event_data, + "ionotify_event_loop_unsubscribe_cleanup"); + s_schedule_task_now(event_loop, &ionotify_event_data->cleanup_task); return AWS_OP_SUCCESS; } From c79772e9c29d91a31ef0a3e572158a71dbb73343 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 17 Sep 2024 11:22:40 -0700 Subject: [PATCH 20/39] Handle is_subscribed only in resubscriptions --- source/qnx/ionotify_event_loop.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index b58b0df44..8ffc1d8d4 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -418,10 +418,6 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta struct aws_event_loop *event_loop = ionotify_event_data->event_loop; struct ionotify_loop *ionotify_loop = event_loop->impl_data; - if (!ionotify_event_data->is_subscribed) { - return; - } - AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Subscribing to events on fd %d for events %d", @@ -467,6 +463,9 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta * NOTE: If you create a new sigevent for the same file descriptor, with the same flags, you HAVE to register * it again. */ MsgRegisterEvent(&ionotify_event_data->event, ionotify_event_data->handle->data.fd); + } else if (!ionotify_event_data->is_subscribed) { + /* This is a resubscribing task, but unsubscribe happened, so ignore it. */ + return; } ionotify_event_data->is_subscribed = true; From 2bddf95c40c86bfc81dc5b2561a24c1aebfee6fb Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 17 Sep 2024 15:34:19 -0700 Subject: [PATCH 21/39] Add aws_pipe_read to tests --- tests/pipe_test.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pipe_test.c b/tests/pipe_test.c index 053c5aefd..9607f1f3c 100644 --- a/tests/pipe_test.c +++ b/tests/pipe_test.c @@ -429,6 +429,8 @@ static void s_on_readable_event(struct aws_pipe_read_end *read_end, int error_co } s_signal_done_on_read_end_closed(state); } + } else if (error_code == AWS_ERROR_SUCCESS) { + aws_pipe_read(&state->read_end, &state->buffers.dst, NULL); } return; From dd34f87ef7471f833d51759f47c5e31b470aaae1 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Fri, 20 Sep 2024 15:13:34 -0700 Subject: [PATCH 22/39] Fix race condition, fix pulse error code --- source/qnx/ionotify_event_loop.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 8ffc1d8d4..dbd31621e 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -675,8 +675,7 @@ static void s_update_io_result( ionotify_io_op_results->handle = handle; memcpy(&ionotify_io_op_results->io_op_result, io_op_result, sizeof(struct aws_io_handle_io_op_result)); aws_task_init(task, s_update_io_result_task, ionotify_io_op_results, "ionotify_event_loop_resubscribe_ct"); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; - aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, task); + s_schedule_task_now(event_loop, task); return; } @@ -909,9 +908,6 @@ static void s_process_pulse( (void *)event_loop, pulse->code, ionotify_event_data->event.sigev_code); - if (pulse->code != IO_EVENT_PULSE_SIGEV_CODE) { - event_mask |= AWS_IO_EVENT_TYPE_ERROR; - } } if (ionotify_event_data->latest_io_event_types == AWS_IO_EVENT_TYPE_CLOSED) { From c4dceeacf050bd2a4a0900b31b5e84fe093084e7 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Fri, 20 Sep 2024 15:16:32 -0700 Subject: [PATCH 23/39] Use separate task for resubscribing --- source/qnx/ionotify_event_loop.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index dbd31621e..1c323103c 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -96,6 +96,7 @@ struct ionotify_event_data { struct sigevent event; void *user_data; struct aws_task subscribe_task; + struct aws_task resubscribe_task; struct aws_task cleanup_task; /* ID with a value that can fit into pulse user data field (only _NOTIFY_DATA_MASK bits can be used). */ int handle_id; @@ -610,7 +611,8 @@ static void s_process_io_result( /* We're on the event loop thread, just schedule subscribing task. */ ionotify_event_data->events_subscribed = event_types; struct ionotify_loop *ionotify_loop = event_loop->impl_data; - aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, &ionotify_event_data->subscribe_task); + aws_task_scheduler_cancel_task(&ionotify_loop->scheduler, &ionotify_event_data->resubscribe_task); + aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, &ionotify_event_data->resubscribe_task); } /* Notify event loop of error conditions. */ @@ -705,6 +707,12 @@ static int s_subscribe_to_io_events( ionotify_event_data->user_data = user_data; ionotify_event_data->handle->update_io_result = s_update_io_result; + aws_task_init( + &ionotify_event_data->resubscribe_task, + s_subscribe_task, + ionotify_event_data, + "ionotify_event_loop_resubscribe"); + aws_task_init( &ionotify_event_data->subscribe_task, s_subscribe_task, ionotify_event_data, "ionotify_event_loop_subscribe"); s_schedule_task_now(event_loop, &ionotify_event_data->subscribe_task); From 29f900e90c2720046350d6d46f59a5915809f46b Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Fri, 20 Sep 2024 15:19:26 -0700 Subject: [PATCH 24/39] Add QNX paths --- source/s2n/s2n_tls_channel_handler.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/source/s2n/s2n_tls_channel_handler.c b/source/s2n/s2n_tls_channel_handler.c index 355a64b1b..5aded10bd 100644 --- a/source/s2n/s2n_tls_channel_handler.c +++ b/source/s2n/s2n_tls_channel_handler.c @@ -101,6 +101,7 @@ AWS_STATIC_STRING_FROM_LITERAL(s_rhel_path, "/etc/pki/tls/certs"); AWS_STATIC_STRING_FROM_LITERAL(s_android_path, "/system/etc/security/cacerts"); AWS_STATIC_STRING_FROM_LITERAL(s_free_bsd_path, "/usr/local/share/certs"); AWS_STATIC_STRING_FROM_LITERAL(s_net_bsd_path, "/etc/openssl/certs"); +AWS_STATIC_STRING_FROM_LITERAL(s_qnx_path, "/usr/ssl/certs"); AWS_IO_API const char *aws_determine_default_pki_dir(void) { /* debian variants; OpenBSD (although the directory doesn't exist by default) */ @@ -128,6 +129,11 @@ AWS_IO_API const char *aws_determine_default_pki_dir(void) { return aws_string_c_str(s_net_bsd_path); } + /* QNX */ + if (aws_path_exists(s_qnx_path)) { + return aws_string_c_str(s_qnx_path); + } + return NULL; } @@ -137,6 +143,7 @@ AWS_STATIC_STRING_FROM_LITERAL(s_open_suse_ca_file_path, "/etc/ssl/ca-bundle.pem AWS_STATIC_STRING_FROM_LITERAL(s_open_elec_ca_file_path, "/etc/pki/tls/cacert.pem"); AWS_STATIC_STRING_FROM_LITERAL(s_modern_rhel_ca_file_path, "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"); AWS_STATIC_STRING_FROM_LITERAL(s_openbsd_ca_file_path, "/etc/ssl/cert.pem"); +AWS_STATIC_STRING_FROM_LITERAL(s_qnx_ca_file_path, "/usr/ssl/certs/cacert.pem"); AWS_IO_API const char *aws_determine_default_pki_ca_file(void) { /* debian variants */ @@ -169,6 +176,11 @@ AWS_IO_API const char *aws_determine_default_pki_ca_file(void) { return aws_string_c_str(s_openbsd_ca_file_path); } + /* QNX */ + if (aws_path_exists(s_qnx_ca_file_path)) { + return aws_string_c_str(s_qnx_ca_file_path); + } + return NULL; } From a8d8366c34d2c69f2dfb3324e3606c276925e0ea Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 23 Sep 2024 10:46:33 -0700 Subject: [PATCH 25/39] Remove non-qnx specifics from posix copies --- source/qnx/host_resolver.c | 7 -- source/qnx/pipe.c | 22 +---- source/qnx/socket.c | 169 +------------------------------------ 3 files changed, 3 insertions(+), 195 deletions(-) diff --git a/source/qnx/host_resolver.c b/source/qnx/host_resolver.c index e4aafb838..46c3bd1a8 100644 --- a/source/qnx/host_resolver.c +++ b/source/qnx/host_resolver.c @@ -30,20 +30,13 @@ int aws_default_dns_resolve( const char *hostname_cstr = aws_string_c_str(host_name); AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); - /* Android would prefer NO HINTS IF YOU DON'T MIND, SIR */ -#if defined(ANDROID) - int err_code = getaddrinfo(hostname_cstr, NULL, NULL, &result); -#else struct addrinfo hints; AWS_ZERO_STRUCT(hints); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; -# if !defined(__OpenBSD__) hints.ai_flags = AI_ALL | AI_V4MAPPED; -# endif /* __OpenBSD__ */ int err_code = getaddrinfo(hostname_cstr, NULL, &hints, &result); -#endif if (err_code) { AWS_LOGF_ERROR( diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c index 3410a5197..19eac19b1 100644 --- a/source/qnx/pipe.c +++ b/source/qnx/pipe.c @@ -7,31 +7,13 @@ #include -#ifdef __GLIBC__ -# define __USE_GNU -#endif - -/* TODO: move this detection to CMAKE and a config header */ -#if !defined(COMPAT_MODE) && defined(__GLIBC__) && ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || __GLIBC__ > 2) -# define HAVE_PIPE2 1 -#else -# define HAVE_PIPE2 0 -#endif +/* TODO Verified for QNX 8.0 only. */ +#define HAVE_PIPE2 1 #include #include #include -/* This isn't defined on ancient linux distros (breaking the builds). - * However, if this is a prebuild, we purposely build on an ancient system, but - * we want the kernel calls to still be the same as a modern build since that's likely the target of the application - * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag - * gets passed as long as it does. - */ -#ifndef O_CLOEXEC -# define O_CLOEXEC 02000000 -#endif - struct read_end_impl { struct aws_allocator *alloc; struct aws_io_handle handle; diff --git a/source/qnx/socket.c b/source/qnx/socket.c index fa131b393..593b8fe23 100644 --- a/source/qnx/socket.c +++ b/source/qnx/socket.c @@ -18,43 +18,12 @@ #include #include #include -#include /* Required when VSOCK is used */ #include #include #include #include #include -/* - * On OsX, suppress NoPipe signals via flags to setsockopt() - * On Linux, suppress NoPipe signals via flags to send() - */ -#if defined(__MACH__) -# define NO_SIGNAL_SOCK_OPT SO_NOSIGPIPE -# define NO_SIGNAL_SEND 0 -# define TCP_KEEPIDLE TCP_KEEPALIVE -#else -# define NO_SIGNAL_SEND MSG_NOSIGNAL -#endif - -/* This isn't defined on ancient linux distros (breaking the builds). - * However, if this is a prebuild, we purposely build on an ancient system, but - * we want the kernel calls to still be the same as a modern build since that's likely the target of the application - * calling this code. Just define this if it isn't there already. GlibC and the kernel don't really care how the flag - * gets passed as long as it does. - */ -#ifndef O_CLOEXEC -# define O_CLOEXEC 02000000 -#endif - -#ifdef USE_VSOCK -# if defined(__linux__) && defined(AF_VSOCK) -# include -# else -# error "USE_VSOCK not supported on current platform" -# endif -#endif - /* other than CONNECTED_READ | CONNECTED_WRITE * a socket is only in one of these states at a time. */ enum socket_state { @@ -77,10 +46,6 @@ static int s_convert_domain(enum aws_socket_domain domain) { return AF_INET6; case AWS_SOCKET_LOCAL: return AF_UNIX; -#ifdef USE_VSOCK - case AWS_SOCKET_VSOCK: - return AF_VSOCK; -#endif default: AWS_ASSERT(0); return AF_INET; @@ -337,15 +302,6 @@ static int s_update_local_endpoint(struct aws_socket *socket) { return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); } memcpy(tmp_endpoint.address, s->sun_path, sun_len); -#if USE_VSOCK - } else if (address.ss_family == AF_VSOCK) { - struct sockaddr_vm *s = (struct sockaddr_vm *)&address; - - tmp_endpoint.port = s->svm_port; - - snprintf(tmp_endpoint.address, sizeof(tmp_endpoint.address), "%" PRIu32, s->svm_cid); - return AWS_OP_SUCCESS; -#endif /* USE_VSOCK */ } else { AWS_ASSERT(0); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); @@ -569,46 +525,9 @@ struct socket_address { struct sockaddr_in addr_in; struct sockaddr_in6 addr_in6; struct sockaddr_un un_addr; -#ifdef USE_VSOCK - struct sockaddr_vm vm_addr; -#endif } sock_addr_types; }; -#ifdef USE_VSOCK -/** Convert a string to a VSOCK CID. Respects the calling convetion of inet_pton: - * 0 on error, 1 on success. */ -static int parse_cid(const char *cid_str, unsigned int *value) { - if (cid_str == NULL || value == NULL) { - errno = EINVAL; - return 0; - } - /* strtoll returns 0 as both error and correct value */ - errno = 0; - /* unsigned long long to handle edge cases in convention explicitly */ - long long cid = strtoll(cid_str, NULL, 10); - if (errno != 0) { - return 0; - } - - /* -1U means any, so it's a valid value, but it needs to be converted to - * unsigned int. */ - if (cid == -1) { - *value = VMADDR_CID_ANY; - return 1; - } - - if (cid < 0 || cid > UINT_MAX) { - errno = ERANGE; - return 0; - } - - /* cast is safe here, edge cases already checked */ - *value = (unsigned int)cid; - return 1; -} -#endif - int aws_socket_connect( struct aws_socket *socket, const struct aws_socket_endpoint *remote_endpoint, @@ -663,13 +582,6 @@ int aws_socket_connect( address.sock_addr_types.un_addr.sun_family = AF_UNIX; strncpy(address.sock_addr_types.un_addr.sun_path, remote_endpoint->address, AWS_ADDRESS_MAX_LEN); sock_size = sizeof(address.sock_addr_types.un_addr); -#ifdef USE_VSOCK - } else if (socket->options.domain == AWS_SOCKET_VSOCK) { - pton_err = parse_cid(remote_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); - address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; - address.sock_addr_types.vm_addr.svm_port = remote_endpoint->port; - sock_size = sizeof(address.sock_addr_types.vm_addr); -#endif } else { AWS_ASSERT(0); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); @@ -839,13 +751,6 @@ int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint address.sock_addr_types.un_addr.sun_family = AF_UNIX; strncpy(address.sock_addr_types.un_addr.sun_path, local_endpoint->address, AWS_ADDRESS_MAX_LEN); sock_size = sizeof(address.sock_addr_types.un_addr); -#ifdef USE_VSOCK - } else if (socket->options.domain == AWS_SOCKET_VSOCK) { - pton_err = parse_cid(local_endpoint->address, &address.sock_addr_types.vm_addr.svm_cid); - address.sock_addr_types.vm_addr.svm_family = AF_VSOCK; - address.sock_addr_types.vm_addr.svm_port = local_endpoint->port; - sock_size = sizeof(address.sock_addr_types.vm_addr); -#endif } else { AWS_ASSERT(0); return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); @@ -1250,20 +1155,6 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op socket->options = *options; -#ifdef NO_SIGNAL_SOCK_OPT - int option_value = 1; - if (AWS_UNLIKELY(setsockopt( - socket->io_handle.data.fd, SOL_SOCKET, NO_SIGNAL_SOCK_OPT, &option_value, sizeof(option_value)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for NO_SIGNAL_SOCK_OPT failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } -#endif /* NO_SIGNAL_SOCK_OPT */ - int reuse = 1; if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) { int errno_value = errno; /* Always cache errno before potential side-effect */ @@ -1302,58 +1193,6 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op errno_value); return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); } -#elif defined(IP_BOUND_IF) - /* - * If SO_BINDTODEVICE is not supported, the alternative is IP_BOUND_IF which requires an index instead - * of a name. We are not using this everywhere because this requires 2 system calls instead of 1, and is - * dependent upon the type of sockets, which doesn't support AWS_SOCKET_LOCAL. As a future optimization, we can - * look into caching the result of if_nametoindex. - */ - uint network_interface_index = if_nametoindex(options->network_interface_name); - if (network_interface_index == 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: network_interface_name \"%s\" not found. if_nametoindex() failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - if (options->domain == AWS_SOCKET_IPV6) { - if (setsockopt( - socket->io_handle.data.fd, - IPPROTO_IPV6, - IPV6_BOUND_IF, - &network_interface_index, - sizeof(network_interface_index))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() with IPV6_BOUND_IF for \"%s\" failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - } else if (setsockopt( - socket->io_handle.data.fd, - IPPROTO_IP, - IP_BOUND_IF, - &network_interface_index, - sizeof(network_interface_index))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() with IP_BOUND_IF for \"%s\" failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } #else AWS_LOGF_ERROR( AWS_LS_IO_SOCKET, @@ -1378,7 +1217,6 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op } } -#if !defined(__OpenBSD__) if (socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec) { int ival_in_secs = socket->options.keep_alive_interval_sec; if (AWS_UNLIKELY(setsockopt( @@ -1418,7 +1256,6 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op errno_value); } } -#endif /* __OpenBSD__ */ } return AWS_OP_SUCCESS; @@ -1668,7 +1505,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc (unsigned long long)write_request->cursor_cpy.len); ssize_t written = send( - socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, NO_SIGNAL_SEND); + socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, MSG_NOSIGNAL); int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_TRACE( @@ -1948,11 +1785,7 @@ int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size return AWS_OP_SUCCESS; } -#if defined(EWOULDBLOCK) if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { -#else - if (errno_value == EAGAIN) { -#endif AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: read would block", (void *)socket, socket->io_handle.data.fd); return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } From 6af39ec74e9e608044e404185bcdfa3d4c595538 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 23 Sep 2024 11:28:29 -0700 Subject: [PATCH 26/39] Improve comments and logging --- source/qnx/ionotify_event_loop.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 1c323103c..bc6242c1b 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -62,7 +62,7 @@ struct ionotify_loop { /* Channel to receive I/O events. Resource managers open connections to this channel to send their events. */ int io_events_channel_id; /* Connection to the events channel opened by the event loop. It's used by ionotify and some event loop logic (e.g. - * cross-thread and I/O results notifications) to send pulses to the pulse channel. */ + * cross-thread and I/O results notifications) to send pulses to the events channel. */ int pulse_connection_id; struct aws_mutex task_pre_queue_mutex; struct aws_linked_list task_pre_queue; @@ -76,8 +76,7 @@ struct ionotify_loop { * QNX 8.0) in this field to specify the types of the triggered events. * * Since event loop must know the types of received I/O events, the second options is used. 28-bit IDs are mapped to - * each subscribed aws_io_handle. The mapping is stored in this hash table. - */ + * each subscribed aws_io_handle. The mapping is stored in this hash table. */ struct aws_hash_table handles; int last_handle_id; }; @@ -89,7 +88,9 @@ struct ionotify_event_data { struct aws_event_loop *event_loop; aws_event_loop_on_event_fn *on_event; int events_subscribed; - /* enum aws_io_event_type */ + /* A QNX event notification can use only 4 bits for I/O event types (input data, output data, out-of-band data, and + * extended flag indicating that additional events happened). So, the latest_io_event_types field contains these + * additional event types converted to CRT event loop domain (enum aws_io_event_type). */ int latest_io_event_types; /* Connection opened on the events channel. Used to send pulses to the main event loop. */ int pulse_connection_id; @@ -144,11 +145,16 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( goto clean_up_ionotify; } - /* Setup channel to receive cross-thread pulses and pulses from resource managers. */ + /* Setup channel to receive events from resource managers. */ ionotify_loop->io_events_channel_id = ChannelCreate(0); int errno_value = errno; /* Always cache errno before potential side-effect */ if (ionotify_loop->io_events_channel_id == -1) { - printf("ChannelCreate failed with errno %d (%s)\n", errno_value, strerror(errno_value)); + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: ChannelCreate failed with errno %d (%s)\n", + (void *)event_loop, + errno_value, + strerror(errno_value)); goto clean_up_thread; } AWS_LOGF_DEBUG( @@ -159,11 +165,19 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( /* Open connection over the QNX channel for pulses. */ ionotify_loop->pulse_connection_id = ConnectAttach(0, 0, ionotify_loop->io_events_channel_id, _NTO_SIDE_CHANNEL, 0); + errno_value = errno; /* Always cache errno before potential side-effect */ if (ionotify_loop->pulse_connection_id == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: ConnectAttach failed with errno %d (%s)\n", + (void *)event_loop, + errno_value, + strerror(errno_value)); goto clean_up_thread; } if (aws_task_scheduler_init(&ionotify_loop->scheduler, alloc)) { + AWS_LOGF_ERROR(AWS_LS_IO_EVENT_LOOP, "id=%p: aws_task_scheduler_init failed\n", (void *)event_loop); goto clean_up_thread; } @@ -859,7 +873,7 @@ static void s_process_pulse( const struct _pulse *pulse, bool *should_process_cross_thread_tasks) { if (pulse->code == CROSS_THREAD_PULSE_SIGEV_CODE) { - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: MsgReceived got cross-thread pulse", (void *)event_loop); + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: MsgReceive got cross-thread pulse", (void *)event_loop); *should_process_cross_thread_tasks = true; return; } From 9c35c72d1d64832078090db7471aabfdc467f056 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 23 Sep 2024 11:38:44 -0700 Subject: [PATCH 27/39] Fix latest_io_event_types --- source/qnx/ionotify_event_loop.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index bc6242c1b..677d9f809 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -932,15 +932,13 @@ static void s_process_pulse( ionotify_event_data->event.sigev_code); } - if (ionotify_event_data->latest_io_event_types == AWS_IO_EVENT_TYPE_CLOSED) { - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: latest_io_event_types is AWS_IO_EVENT_TYPE_CLOSED", (void *)event_loop); - event_mask |= AWS_IO_EVENT_TYPE_CLOSED; + if (ionotify_event_data->latest_io_event_types) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: latest_io_event_types is non-empty", (void *)event_loop); + event_mask |= ionotify_event_data->latest_io_event_types; + /* Reset additional I/O event types to not process them twice. */ + ionotify_event_data->latest_io_event_types = 0; } - /* Reset the I/O operation code to not process it twice. */ - ionotify_event_data->latest_io_event_types = 0; - ionotify_event_data->on_event(event_loop, ionotify_event_data->handle, event_mask, ionotify_event_data->user_data); } From bc653f6cdc53b9d98feb3de710251a58810f1026 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Mon, 23 Sep 2024 11:55:42 -0700 Subject: [PATCH 28/39] Fix format --- source/qnx/socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/qnx/socket.c b/source/qnx/socket.c index 593b8fe23..30dd12de2 100644 --- a/source/qnx/socket.c +++ b/source/qnx/socket.c @@ -1504,8 +1504,8 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc (unsigned long long)write_request->original_buffer_len, (unsigned long long)write_request->cursor_cpy.len); - ssize_t written = send( - socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, MSG_NOSIGNAL); + ssize_t written = + send(socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, MSG_NOSIGNAL); int errno_value = errno; /* Always cache errno before potential side-effect */ AWS_LOGF_TRACE( From 48134e8b1625a30b037cbb47a071c62fe96013dc Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Tue, 24 Sep 2024 13:01:28 -0700 Subject: [PATCH 29/39] Merge posix and qnx sources --- CMakeLists.txt | 1 + include/aws/io/io.h | 5 +- source/posix/pipe.c | 21 + source/posix/socket.c | 29 + source/qnx/host_resolver.c | 114 -- source/qnx/pipe.c | 591 ---------- source/qnx/shared_library.c | 66 -- source/qnx/socket.c | 1874 ------------------------------- source/socket_channel_handler.c | 32 +- tests/event_loop_test.c | 2 - 10 files changed, 62 insertions(+), 2673 deletions(-) delete mode 100644 source/qnx/host_resolver.c delete mode 100644 source/qnx/pipe.c delete mode 100644 source/qnx/shared_library.c delete mode 100644 source/qnx/socket.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b1890002..d83201e51 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -144,6 +144,7 @@ elseif (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD" OR CMAKE_SYSTEM_NAME STREQUAL "NetB elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") file(GLOB AWS_IO_OS_SRC "source/qnx/*.c" + "source/posix/*.c" ) set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") set(USE_S2N ON) diff --git a/include/aws/io/io.h b/include/aws/io/io.h index 890b3dfae..d39d42cf2 100644 --- a/include/aws/io/io.h +++ b/include/aws/io/io.h @@ -16,7 +16,6 @@ AWS_PUSH_SANE_WARNING_LEVEL struct aws_io_handle; -#if AWS_USE_ON_EVENT_WITH_RESULT struct aws_event_loop; /** @@ -37,7 +36,6 @@ typedef void(aws_io_handle_update_io_results_fn)( struct aws_event_loop *, struct aws_io_handle *, const struct aws_io_handle_io_op_result *); -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ struct aws_io_handle { union { @@ -45,9 +43,8 @@ struct aws_io_handle { void *handle; } data; void *additional_data; -#if AWS_USE_ON_EVENT_WITH_RESULT + /* Optional callback to return results of I/O operations performed on this handle. */ aws_io_handle_update_io_results_fn *update_io_result; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ }; enum aws_io_message_type { diff --git a/source/posix/pipe.c b/source/posix/pipe.c index f727b021c..f5a72c8d6 100644 --- a/source/posix/pipe.c +++ b/source/posix/pipe.c @@ -278,9 +278,22 @@ int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_b if (read_val < 0) { int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + if (read_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + } return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); } return s_raise_posix_error(errno_value); + } else if (read_val == 0) { + if (read_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.error_code = AWS_IO_SOCKET_CLOSED; + read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); + } } /* Success */ @@ -454,6 +467,14 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { /* The pipe is no longer writable. Bail out */ write_impl->is_writable = false; + + if (write_impl->handle.update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; + write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); + } + return; } diff --git a/source/posix/socket.c b/source/posix/socket.c index dbbf62657..1c1b5a617 100644 --- a/source/posix/socket.c +++ b/source/posix/socket.c @@ -476,6 +476,14 @@ static void s_socket_connect_event( "id=%p fd=%d: spurious event, waiting for another notification.", (void *)socket_args->socket, handle->data.fd); + + if (handle->update_io_result) { + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; + handle->update_io_result(event_loop, handle, &io_op_result); + } + return; } @@ -955,6 +963,9 @@ static void s_socket_accept_event( AWS_LOGF_DEBUG( AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { int in_fd = 0; while (socket_impl->continue_accept && in_fd != -1) { @@ -966,12 +977,14 @@ static void s_socket_accept_event( int errno_value = errno; /* Always cache errno before potential side-effect */ if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { + io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; break; } int aws_error = aws_socket_get_error(socket); aws_raise_error(aws_error); s_on_connection_error(socket, aws_error); + io_op_result.read_error_code = aws_error; break; } @@ -1064,6 +1077,10 @@ static void s_socket_accept_event( } } + if (handle->update_io_result) { + handle->update_io_result(event_loop, handle, &io_op_result); + } + AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: finished processing incoming connections, " @@ -1632,6 +1649,9 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc bool parent_request_failed = false; bool pushed_to_written_queue = false; + struct aws_io_handle_io_op_result io_op_result; + AWS_ZERO_STRUCT(io_op_result); + /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ while (!aws_linked_list_empty(&socket_impl->write_queue)) { struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); @@ -1660,6 +1680,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc if (errno_value == EAGAIN) { AWS_LOGF_TRACE( AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); + io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; break; } @@ -1672,6 +1693,7 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_error = AWS_IO_SOCKET_CLOSED; aws_raise_error(aws_error); purge = true; + io_op_result.write_error_code = aws_error; break; } @@ -1684,9 +1706,12 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc errno_value); aws_error = s_determine_socket_error(errno_value); aws_raise_error(aws_error); + io_op_result.write_error_code = aws_error; break; } + io_op_result.written_bytes += (size_t)written; + size_t remaining_to_write = write_request->cursor_cpy.len; aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); @@ -1732,6 +1757,10 @@ static int s_process_socket_write_requests(struct aws_socket *socket, struct soc aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); } + if (socket->io_handle.update_io_result) { + socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); + } + /* Only report error if aws_socket_write() invoked this function and its write_request failed */ if (!parent_request_failed) { return AWS_OP_SUCCESS; diff --git a/source/qnx/host_resolver.c b/source/qnx/host_resolver.c deleted file mode 100644 index 46c3bd1a8..000000000 --- a/source/qnx/host_resolver.c +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -#include - -#include -#include -#include -#include - -int aws_default_dns_resolve( - struct aws_allocator *allocator, - const struct aws_string *host_name, - struct aws_array_list *output_addresses, - void *user_data) { - - (void)user_data; - struct addrinfo *result = NULL; - struct addrinfo *iter = NULL; - /* max string length for ipv6. */ - socklen_t max_len = INET6_ADDRSTRLEN; - char address_buffer[max_len]; - - const char *hostname_cstr = aws_string_c_str(host_name); - AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolving host %s", hostname_cstr); - - struct addrinfo hints; - AWS_ZERO_STRUCT(hints); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - hints.ai_flags = AI_ALL | AI_V4MAPPED; - - int err_code = getaddrinfo(hostname_cstr, NULL, &hints, &result); - - if (err_code) { - AWS_LOGF_ERROR( - AWS_LS_IO_DNS, "static: getaddrinfo failed with error_code %d: %s", err_code, gai_strerror(err_code)); - goto clean_up; - } - - for (iter = result; iter != NULL; iter = iter->ai_next) { - struct aws_host_address host_address; - - AWS_ZERO_ARRAY(address_buffer); - - if (iter->ai_family == AF_INET6) { - host_address.record_type = AWS_ADDRESS_RECORD_TYPE_AAAA; - inet_ntop(iter->ai_family, &((struct sockaddr_in6 *)iter->ai_addr)->sin6_addr, address_buffer, max_len); - } else { - host_address.record_type = AWS_ADDRESS_RECORD_TYPE_A; - inet_ntop(iter->ai_family, &((struct sockaddr_in *)iter->ai_addr)->sin_addr, address_buffer, max_len); - } - - size_t address_len = strlen(address_buffer); - const struct aws_string *address = - aws_string_new_from_array(allocator, (const uint8_t *)address_buffer, address_len); - - if (!address) { - goto clean_up; - } - - const struct aws_string *host_cpy = aws_string_new_from_string(allocator, host_name); - - if (!host_cpy) { - aws_string_destroy((void *)address); - goto clean_up; - } - - AWS_LOGF_DEBUG(AWS_LS_IO_DNS, "static: resolved record: %s", address_buffer); - - host_address.address = address; - host_address.weight = 0; - host_address.allocator = allocator; - host_address.use_count = 0; - host_address.connection_failure_count = 0; - host_address.host = host_cpy; - - if (aws_array_list_push_back(output_addresses, &host_address)) { - aws_host_address_clean_up(&host_address); - goto clean_up; - } - } - - freeaddrinfo(result); - return AWS_OP_SUCCESS; - -clean_up: - if (result) { - freeaddrinfo(result); - } - - if (err_code) { - switch (err_code) { - case EAI_FAIL: - case EAI_AGAIN: - return aws_raise_error(AWS_IO_DNS_QUERY_FAILED); - case EAI_MEMORY: - return aws_raise_error(AWS_ERROR_OOM); - case EAI_NONAME: - case EAI_SERVICE: - return aws_raise_error(AWS_IO_DNS_INVALID_NAME); - default: - return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); - } - } - - return AWS_OP_ERR; -} diff --git a/source/qnx/pipe.c b/source/qnx/pipe.c deleted file mode 100644 index 19eac19b1..000000000 --- a/source/qnx/pipe.c +++ /dev/null @@ -1,591 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -/* TODO Verified for QNX 8.0 only. */ -#define HAVE_PIPE2 1 - -#include -#include -#include - -struct read_end_impl { - struct aws_allocator *alloc; - struct aws_io_handle handle; - struct aws_event_loop *event_loop; - aws_pipe_on_readable_fn *on_readable_user_callback; - void *on_readable_user_data; - - /* Used in handshake for detecting whether user callback resulted in read-end being cleaned up. - * If clean_up() sees that the pointer is set, the bool it points to will get set true. */ - bool *did_user_callback_clean_up_read_end; - - bool is_subscribed; -}; - -struct pipe_write_request { - struct aws_byte_cursor original_cursor; - struct aws_byte_cursor cursor; /* tracks progress of write */ - size_t num_bytes_written; - aws_pipe_on_write_completed_fn *user_callback; - void *user_data; - struct aws_linked_list_node list_node; - - /* True if the write-end is cleaned up while the user callback is being invoked */ - bool did_user_callback_clean_up_write_end; -}; - -struct write_end_impl { - struct aws_allocator *alloc; - struct aws_io_handle handle; - struct aws_event_loop *event_loop; - struct aws_linked_list write_list; - - /* Valid while invoking user callback on a completed write request. */ - struct pipe_write_request *currently_invoking_write_callback; - - bool is_writable; - - /* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around - * and re-using it whenever possible */ -}; - -static void s_write_end_on_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data); - -static int s_translate_posix_error(int err) { - AWS_ASSERT(err); - - switch (err) { - case EPIPE: - return AWS_IO_BROKEN_PIPE; - default: - return AWS_ERROR_SYS_CALL_FAILURE; - } -} - -static int s_raise_posix_error(int err) { - return aws_raise_error(s_translate_posix_error(err)); -} - -AWS_IO_API int aws_open_nonblocking_posix_pipe(int pipe_fds[2]) { - int err; - -#if HAVE_PIPE2 - err = pipe2(pipe_fds, O_NONBLOCK | O_CLOEXEC); - if (err) { - return s_raise_posix_error(err); - } - - return AWS_OP_SUCCESS; -#else - err = pipe(pipe_fds); - if (err) { - return s_raise_posix_error(err); - } - - for (int i = 0; i < 2; ++i) { - int flags = fcntl(pipe_fds[i], F_GETFL); - if (flags == -1) { - s_raise_posix_error(err); - goto error; - } - - flags |= O_NONBLOCK | O_CLOEXEC; - if (fcntl(pipe_fds[i], F_SETFL, flags) == -1) { - s_raise_posix_error(err); - goto error; - } - } - - return AWS_OP_SUCCESS; -error: - close(pipe_fds[0]); - close(pipe_fds[1]); - return AWS_OP_ERR; -#endif -} - -int aws_pipe_init( - struct aws_pipe_read_end *read_end, - struct aws_event_loop *read_end_event_loop, - struct aws_pipe_write_end *write_end, - struct aws_event_loop *write_end_event_loop, - struct aws_allocator *allocator) { - - AWS_ASSERT(read_end); - AWS_ASSERT(read_end_event_loop); - AWS_ASSERT(write_end); - AWS_ASSERT(write_end_event_loop); - AWS_ASSERT(allocator); - - AWS_ZERO_STRUCT(*read_end); - AWS_ZERO_STRUCT(*write_end); - - struct read_end_impl *read_impl = NULL; - struct write_end_impl *write_impl = NULL; - int err; - - /* Open pipe */ - int pipe_fds[2]; - err = aws_open_nonblocking_posix_pipe(pipe_fds); - if (err) { - return AWS_OP_ERR; - } - - /* Init read-end */ - read_impl = aws_mem_calloc(allocator, 1, sizeof(struct read_end_impl)); - if (!read_impl) { - goto error; - } - - read_impl->alloc = allocator; - read_impl->handle.data.fd = pipe_fds[0]; - read_impl->event_loop = read_end_event_loop; - - /* Init write-end */ - write_impl = aws_mem_calloc(allocator, 1, sizeof(struct write_end_impl)); - if (!write_impl) { - goto error; - } - - write_impl->alloc = allocator; - write_impl->handle.data.fd = pipe_fds[1]; - write_impl->event_loop = write_end_event_loop; - write_impl->is_writable = true; /* Assume pipe is writable to start. Even if it's not, things shouldn't break */ - aws_linked_list_init(&write_impl->write_list); - - read_end->impl_data = read_impl; - write_end->impl_data = write_impl; - - err = aws_event_loop_subscribe_to_io_events( - write_end_event_loop, &write_impl->handle, AWS_IO_EVENT_TYPE_WRITABLE, s_write_end_on_event, write_end); - if (err) { - goto error; - } - - return AWS_OP_SUCCESS; - -error: - close(pipe_fds[0]); - close(pipe_fds[1]); - - if (read_impl) { - aws_mem_release(allocator, read_impl); - } - - if (write_impl) { - aws_mem_release(allocator, write_impl); - } - - read_end->impl_data = NULL; - write_end->impl_data = NULL; - - return AWS_OP_ERR; -} - -int aws_pipe_clean_up_read_end(struct aws_pipe_read_end *read_end) { - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (read_impl->is_subscribed) { - int err = aws_pipe_unsubscribe_from_readable_events(read_end); - if (err) { - return AWS_OP_ERR; - } - } - - /* If the event-handler is invoking a user callback, let it know that the read-end was cleaned up */ - if (read_impl->did_user_callback_clean_up_read_end) { - *read_impl->did_user_callback_clean_up_read_end = true; - } - - close(read_impl->handle.data.fd); - - aws_mem_release(read_impl->alloc, read_impl); - AWS_ZERO_STRUCT(*read_end); - return AWS_OP_SUCCESS; -} - -struct aws_event_loop *aws_pipe_get_read_end_event_loop(const struct aws_pipe_read_end *read_end) { - const struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - aws_raise_error(AWS_IO_BROKEN_PIPE); - return NULL; - } - - return read_impl->event_loop; -} - -struct aws_event_loop *aws_pipe_get_write_end_event_loop(const struct aws_pipe_write_end *write_end) { - const struct write_end_impl *write_impl = write_end->impl_data; - if (!write_impl) { - aws_raise_error(AWS_IO_BROKEN_PIPE); - return NULL; - } - - return write_impl->event_loop; -} - -int aws_pipe_read(struct aws_pipe_read_end *read_end, struct aws_byte_buf *dst_buffer, size_t *num_bytes_read) { - AWS_ASSERT(dst_buffer && dst_buffer->buffer); - - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (num_bytes_read) { - *num_bytes_read = 0; - } - - size_t num_bytes_to_read = dst_buffer->capacity - dst_buffer->len; - - ssize_t read_val = read(read_impl->handle.data.fd, dst_buffer->buffer + dst_buffer->len, num_bytes_to_read); - - if (read_val < 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - // Return results back to event loop. - if (read_impl->handle.update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); - } - return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); - } - return s_raise_posix_error(errno_value); - } else if (read_val == 0) { - // Return results back to event loop. - if (read_impl->handle.update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - io_op_result.error_code = AWS_IO_SOCKET_CLOSED; - read_impl->handle.update_io_result(read_impl->event_loop, &read_impl->handle, &io_op_result); - } - } - - /* Success */ - dst_buffer->len += read_val; - - if (num_bytes_read) { - *num_bytes_read = read_val; - } - - return AWS_OP_SUCCESS; -} - -static void s_read_end_on_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - (void)handle; - - /* Note that it should be impossible for this to run after read-end has been unsubscribed or cleaned up */ - struct aws_pipe_read_end *read_end = user_data; - struct read_end_impl *read_impl = read_end->impl_data; - AWS_ASSERT(read_impl); - AWS_ASSERT(read_impl->event_loop == event_loop); - AWS_ASSERT(&read_impl->handle == handle); - AWS_ASSERT(read_impl->is_subscribed); - AWS_ASSERT(events != 0); - AWS_ASSERT(read_impl->did_user_callback_clean_up_read_end == NULL); - - /* Set up handshake, so we can be informed if the read-end is cleaned up while invoking a user callback */ - bool did_user_callback_clean_up_read_end = false; - read_impl->did_user_callback_clean_up_read_end = &did_user_callback_clean_up_read_end; - - /* If readable event received, tell user to try and read, even if "error" events have also occurred. */ - if (events & AWS_IO_EVENT_TYPE_READABLE) { - read_impl->on_readable_user_callback(read_end, AWS_ERROR_SUCCESS, read_impl->on_readable_user_data); - - if (did_user_callback_clean_up_read_end) { - return; - } - - events &= ~AWS_IO_EVENT_TYPE_READABLE; - } - - if (events) { - /* Check that user didn't unsubscribe in the previous callback */ - if (read_impl->is_subscribed) { - read_impl->on_readable_user_callback(read_end, AWS_IO_BROKEN_PIPE, read_impl->on_readable_user_data); - - if (did_user_callback_clean_up_read_end) { - return; - } - } - } - - read_impl->did_user_callback_clean_up_read_end = NULL; -} - -int aws_pipe_subscribe_to_readable_events( - struct aws_pipe_read_end *read_end, - aws_pipe_on_readable_fn *on_readable, - void *user_data) { - - AWS_ASSERT(on_readable); - - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (read_impl->is_subscribed) { - return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); - } - - read_impl->is_subscribed = true; - read_impl->on_readable_user_callback = on_readable; - read_impl->on_readable_user_data = user_data; - - int err = aws_event_loop_subscribe_to_io_events( - read_impl->event_loop, &read_impl->handle, AWS_IO_EVENT_TYPE_READABLE, s_read_end_on_event, read_end); - if (err) { - read_impl->is_subscribed = false; - read_impl->on_readable_user_callback = NULL; - read_impl->on_readable_user_data = NULL; - - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; -} - -int aws_pipe_unsubscribe_from_readable_events(struct aws_pipe_read_end *read_end) { - struct read_end_impl *read_impl = read_end->impl_data; - if (!read_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(read_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (!read_impl->is_subscribed) { - return aws_raise_error(AWS_ERROR_IO_NOT_SUBSCRIBED); - } - - int err = aws_event_loop_unsubscribe_from_io_events(read_impl->event_loop, &read_impl->handle); - if (err) { - return AWS_OP_ERR; - } - - read_impl->is_subscribed = false; - read_impl->on_readable_user_callback = NULL; - read_impl->on_readable_user_data = NULL; - - return AWS_OP_SUCCESS; -} - -/* Pop front write request, invoke its callback, and delete it. - * Returns whether the callback resulted in the write-end getting cleaned up */ -static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end *write_end, int error_code) { - struct write_end_impl *write_impl = write_end->impl_data; - - AWS_ASSERT(!aws_linked_list_empty(&write_impl->write_list)); - struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); - struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); - - struct aws_allocator *alloc = write_impl->alloc; - - /* Let the write-end know that a callback is in process, so the write-end can inform the callback - * whether it resulted in clean_up() being called. */ - bool write_end_cleaned_up_during_callback = false; - struct pipe_write_request *prev_invoking_request = write_impl->currently_invoking_write_callback; - write_impl->currently_invoking_write_callback = request; - - if (request->user_callback) { - request->user_callback(write_end, error_code, request->original_cursor, request->user_data); - write_end_cleaned_up_during_callback = request->did_user_callback_clean_up_write_end; - } - - if (!write_end_cleaned_up_during_callback) { - write_impl->currently_invoking_write_callback = prev_invoking_request; - } - - aws_mem_release(alloc, request); - - return write_end_cleaned_up_during_callback; -} - -/* Process write requests as long as the pipe remains writable */ -static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) { - struct write_end_impl *write_impl = write_end->impl_data; - AWS_ASSERT(write_impl); - - while (!aws_linked_list_empty(&write_impl->write_list)) { - struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list); - struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); - - int completed_error_code = AWS_ERROR_SUCCESS; - - if (request->cursor.len > 0) { - ssize_t write_val = write(write_impl->handle.data.fd, request->cursor.ptr, request->cursor.len); - - if (write_val < 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - /* The pipe is no longer writable. Bail out */ - write_impl->is_writable = false; - - // Return results back to event loop. - if (write_impl->handle.update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; - write_impl->handle.update_io_result(write_impl->event_loop, &write_impl->handle, &io_op_result); - } - - return; - } - - /* A non-recoverable error occurred during this write */ - completed_error_code = s_translate_posix_error(errno_value); - - } else { - aws_byte_cursor_advance(&request->cursor, write_val); - - if (request->cursor.len > 0) { - /* There was a partial write, loop again to try and write the rest. */ - continue; - } - } - } - - /* If we got this far in the loop, then the write request is complete. - * Note that the callback may result in the pipe being cleaned up. */ - bool write_end_cleaned_up = s_write_end_complete_front_write_request(write_end, completed_error_code); - if (write_end_cleaned_up) { - /* Bail out! Any remaining requests were canceled during clean_up() */ - return; - } - } -} - -/* Handle events on the write-end's file handle */ -static void s_write_end_on_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - (void)handle; - - /* Note that it should be impossible for this to run after write-end has been unsubscribed or cleaned up */ - struct aws_pipe_write_end *write_end = user_data; - struct write_end_impl *write_impl = write_end->impl_data; - AWS_ASSERT(write_impl); - AWS_ASSERT(write_impl->event_loop == event_loop); - AWS_ASSERT(&write_impl->handle == handle); - - /* Only care about the writable event. */ - if ((events & AWS_IO_EVENT_TYPE_WRITABLE) == 0) { - return; - } - - write_impl->is_writable = true; - - s_write_end_process_requests(write_end); -} - -int aws_pipe_write( - struct aws_pipe_write_end *write_end, - struct aws_byte_cursor src_buffer, - aws_pipe_on_write_completed_fn *on_completed, - void *user_data) { - - AWS_ASSERT(src_buffer.ptr); - - struct write_end_impl *write_impl = write_end->impl_data; - if (!write_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - struct pipe_write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct pipe_write_request)); - if (!request) { - return AWS_OP_ERR; - } - - request->original_cursor = src_buffer; - request->cursor = src_buffer; - request->user_callback = on_completed; - request->user_data = user_data; - - aws_linked_list_push_back(&write_impl->write_list, &request->list_node); - - /* If the pipe is writable, process the request (unless pipe is already in the middle of processing, which could - * happen if a this aws_pipe_write() call was made by another write's completion callback */ - if (write_impl->is_writable && !write_impl->currently_invoking_write_callback) { - s_write_end_process_requests(write_end); - } - - return AWS_OP_SUCCESS; -} - -int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) { - struct write_end_impl *write_impl = write_end->impl_data; - if (!write_impl) { - return aws_raise_error(AWS_IO_BROKEN_PIPE); - } - - if (!aws_event_loop_thread_is_callers_thread(write_impl->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - int err = aws_event_loop_unsubscribe_from_io_events(write_impl->event_loop, &write_impl->handle); - if (err) { - return AWS_OP_ERR; - } - - close(write_impl->handle.data.fd); - - /* Zero out write-end before invoking user callbacks so that it won't work anymore with public functions. */ - AWS_ZERO_STRUCT(*write_end); - - /* If a request callback is currently being invoked, let it know that the write-end was cleaned up */ - if (write_impl->currently_invoking_write_callback) { - write_impl->currently_invoking_write_callback->did_user_callback_clean_up_write_end = true; - } - - /* Force any outstanding write requests to complete with an error status. */ - while (!aws_linked_list_empty(&write_impl->write_list)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list); - struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node); - if (request->user_callback) { - request->user_callback(NULL, AWS_IO_BROKEN_PIPE, request->original_cursor, request->user_data); - } - aws_mem_release(write_impl->alloc, request); - } - - aws_mem_release(write_impl->alloc, write_impl); - return AWS_OP_SUCCESS; -} diff --git a/source/qnx/shared_library.c b/source/qnx/shared_library.c deleted file mode 100644 index 751c99bc2..000000000 --- a/source/qnx/shared_library.c +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include - -#include - -static const char *s_null = ""; -static const char *s_unknown_error = ""; - -int aws_shared_library_init(struct aws_shared_library *library, const char *library_path) { - AWS_ZERO_STRUCT(*library); - - library->library_handle = dlopen(library_path, RTLD_LAZY); - if (library->library_handle == NULL) { - const char *error = dlerror(); - AWS_LOGF_ERROR( - AWS_LS_IO_SHARED_LIBRARY, - "id=%p: Failed to load shared library at path \"%s\" with error: %s", - (void *)library, - library_path ? library_path : s_null, - error ? error : s_unknown_error); - return aws_raise_error(AWS_IO_SHARED_LIBRARY_LOAD_FAILURE); - } - - return AWS_OP_SUCCESS; -} - -void aws_shared_library_clean_up(struct aws_shared_library *library) { - if (library && library->library_handle) { - dlclose(library->library_handle); - library->library_handle = NULL; - } -} - -int aws_shared_library_find_function( - struct aws_shared_library *library, - const char *symbol_name, - aws_generic_function *function_address) { - if (library == NULL || library->library_handle == NULL) { - return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); - } - - /* - * Suggested work around for (undefined behavior) cast from void * to function pointer - * in POSIX.1-2003 standard, at least according to dlsym man page code sample. - */ - *(void **)(function_address) = dlsym(library->library_handle, symbol_name); - - if (*function_address == NULL) { - const char *error = dlerror(); - AWS_LOGF_ERROR( - AWS_LS_IO_SHARED_LIBRARY, - "id=%p: Failed to find shared library symbol \"%s\" with error: %s", - (void *)library, - symbol_name ? symbol_name : s_null, - error ? error : s_unknown_error); - return aws_raise_error(AWS_IO_SHARED_LIBRARY_FIND_SYMBOL_FAILURE); - } - - return AWS_OP_SUCCESS; -} diff --git a/source/qnx/socket.c b/source/qnx/socket.c deleted file mode 100644 index 30dd12de2..000000000 --- a/source/qnx/socket.c +++ /dev/null @@ -1,1874 +0,0 @@ -/** - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * SPDX-License-Identifier: Apache-2.0. - */ - -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* other than CONNECTED_READ | CONNECTED_WRITE - * a socket is only in one of these states at a time. */ -enum socket_state { - INIT = 0x01, - CONNECTING = 0x02, - CONNECTED_READ = 0x04, - CONNECTED_WRITE = 0x08, - BOUND = 0x10, - LISTENING = 0x20, - TIMEDOUT = 0x40, - ERROR = 0x80, - CLOSED, -}; - -static int s_convert_domain(enum aws_socket_domain domain) { - switch (domain) { - case AWS_SOCKET_IPV4: - return AF_INET; - case AWS_SOCKET_IPV6: - return AF_INET6; - case AWS_SOCKET_LOCAL: - return AF_UNIX; - default: - AWS_ASSERT(0); - return AF_INET; - } -} - -static int s_convert_type(enum aws_socket_type type) { - switch (type) { - case AWS_SOCKET_STREAM: - return SOCK_STREAM; - case AWS_SOCKET_DGRAM: - return SOCK_DGRAM; - default: - AWS_ASSERT(0); - return SOCK_STREAM; - } -} - -static int s_determine_socket_error(int error) { - switch (error) { - case ECONNREFUSED: - return AWS_IO_SOCKET_CONNECTION_REFUSED; - case ECONNRESET: - return AWS_IO_SOCKET_CLOSED; - case ETIMEDOUT: - return AWS_IO_SOCKET_TIMEOUT; - case EHOSTUNREACH: - case ENETUNREACH: - return AWS_IO_SOCKET_NO_ROUTE_TO_HOST; - case EADDRNOTAVAIL: - return AWS_IO_SOCKET_INVALID_ADDRESS; - case ENETDOWN: - return AWS_IO_SOCKET_NETWORK_DOWN; - case ECONNABORTED: - return AWS_IO_SOCKET_CONNECT_ABORTED; - case EADDRINUSE: - return AWS_IO_SOCKET_ADDRESS_IN_USE; - case ENOBUFS: - case ENOMEM: - return AWS_ERROR_OOM; - case EAGAIN: - return AWS_IO_READ_WOULD_BLOCK; - case EMFILE: - case ENFILE: - return AWS_ERROR_MAX_FDS_EXCEEDED; - case ENOENT: - case EINVAL: - return AWS_ERROR_FILE_INVALID_PATH; - case EAFNOSUPPORT: - return AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY; - case EACCES: - return AWS_ERROR_NO_PERMISSION; - default: - return AWS_IO_SOCKET_NOT_CONNECTED; - } -} - -static int s_create_socket(struct aws_socket *sock, const struct aws_socket_options *options) { - - int fd = socket(s_convert_domain(options->domain), s_convert_type(options->type), 0); - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: initializing with domain %d and type %d", - (void *)sock, - fd, - options->domain, - options->type); - if (fd != -1) { - int flags = fcntl(fd, F_GETFL, 0); - flags |= O_NONBLOCK | O_CLOEXEC; - int success = fcntl(fd, F_SETFL, flags); - (void)success; - sock->io_handle.data.fd = fd; - sock->io_handle.additional_data = NULL; - return aws_socket_set_options(sock, options); - } - - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); -} - -struct posix_socket_connect_args { - struct aws_task task; - struct aws_allocator *allocator; - struct aws_socket *socket; -}; - -struct posix_socket { - struct aws_linked_list write_queue; - struct aws_linked_list written_queue; - struct aws_task written_task; - struct posix_socket_connect_args *connect_args; - /* Note that only the posix_socket impl part is refcounted. - * The public aws_socket can be a stack variable and cleaned up synchronously - * (by blocking until the event-loop cleans up the impl part). - * In hindsight, aws_socket should have been heap-allocated and refcounted, but alas */ - struct aws_ref_count internal_refcount; - struct aws_allocator *allocator; - bool written_task_scheduled; - bool currently_subscribed; - bool continue_accept; - bool *close_happened; -}; - -static void s_socket_destroy_impl(void *user_data) { - struct posix_socket *socket_impl = user_data; - aws_mem_release(socket_impl->allocator, socket_impl); -} - -static int s_socket_init( - struct aws_socket *socket, - struct aws_allocator *alloc, - const struct aws_socket_options *options, - int existing_socket_fd) { - AWS_ASSERT(options); - AWS_ZERO_STRUCT(*socket); - - struct posix_socket *posix_socket = aws_mem_calloc(alloc, 1, sizeof(struct posix_socket)); - if (!posix_socket) { - socket->impl = NULL; - return AWS_OP_ERR; - } - - socket->allocator = alloc; - socket->io_handle.data.fd = -1; - socket->state = INIT; - socket->options = *options; - - if (existing_socket_fd < 0) { - int err = s_create_socket(socket, options); - if (err) { - aws_mem_release(alloc, posix_socket); - socket->impl = NULL; - return AWS_OP_ERR; - } - } else { - socket->io_handle = (struct aws_io_handle){ - .data = {.fd = existing_socket_fd}, - .additional_data = NULL, - }; - aws_socket_set_options(socket, options); - } - - aws_linked_list_init(&posix_socket->write_queue); - aws_linked_list_init(&posix_socket->written_queue); - posix_socket->currently_subscribed = false; - posix_socket->continue_accept = false; - aws_ref_count_init(&posix_socket->internal_refcount, posix_socket, s_socket_destroy_impl); - posix_socket->allocator = alloc; - posix_socket->connect_args = NULL; - posix_socket->close_happened = NULL; - socket->impl = posix_socket; - return AWS_OP_SUCCESS; -} - -int aws_socket_init(struct aws_socket *socket, struct aws_allocator *alloc, const struct aws_socket_options *options) { - AWS_ASSERT(options); - return s_socket_init(socket, alloc, options, -1); -} - -void aws_socket_clean_up(struct aws_socket *socket) { - if (!socket->impl) { - /* protect from double clean */ - return; - } - - int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ - (void)fd_for_logging; - - if (aws_socket_is_open(socket)) { - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: is still open, closing...", (void *)socket, fd_for_logging); - aws_socket_close(socket); - } - struct posix_socket *socket_impl = socket->impl; - - if (aws_ref_count_release(&socket_impl->internal_refcount) != 0) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: is still pending io letting it dangle and cleaning up later.", - (void *)socket, - fd_for_logging); - } - - AWS_ZERO_STRUCT(*socket); - socket->io_handle.data.fd = -1; -} - -/* Update socket->local_endpoint based on the results of getsockname() */ -static int s_update_local_endpoint(struct aws_socket *socket) { - struct aws_socket_endpoint tmp_endpoint; - AWS_ZERO_STRUCT(tmp_endpoint); - - struct sockaddr_storage address; - AWS_ZERO_STRUCT(address); - socklen_t address_size = sizeof(address); - - if (getsockname(socket->io_handle.data.fd, (struct sockaddr *)&address, &address_size) != 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: getsockname() failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - - if (address.ss_family == AF_INET) { - struct sockaddr_in *s = (struct sockaddr_in *)&address; - tmp_endpoint.port = ntohs(s->sin_port); - if (inet_ntop(AF_INET, &s->sin_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: inet_ntop() failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - } else if (address.ss_family == AF_INET6) { - struct sockaddr_in6 *s = (struct sockaddr_in6 *)&address; - tmp_endpoint.port = ntohs(s->sin6_port); - if (inet_ntop(AF_INET6, &s->sin6_addr, tmp_endpoint.address, sizeof(tmp_endpoint.address)) == NULL) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: inet_ntop() failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - } else if (address.ss_family == AF_UNIX) { - struct sockaddr_un *s = (struct sockaddr_un *)&address; - - /* Ensure there's a null-terminator. - * On some platforms it may be missing when the path gets very long. See: - * https://man7.org/linux/man-pages/man7/unix.7.html#BUGS - * But let's keep it simple, and not deal with that madness until someone demands it. */ - size_t sun_len; - if (aws_secure_strlen(s->sun_path, sizeof(tmp_endpoint.address), &sun_len)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: UNIX domain socket name is too long", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_INVALID_ADDRESS); - } - memcpy(tmp_endpoint.address, s->sun_path, sun_len); - } else { - AWS_ASSERT(0); - return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); - } - - socket->local_endpoint = tmp_endpoint; - return AWS_OP_SUCCESS; -} - -static void s_on_connection_error(struct aws_socket *socket, int error); - -static int s_on_connection_success(struct aws_socket *socket) { - - struct aws_event_loop *event_loop = socket->event_loop; - struct posix_socket *socket_impl = socket->impl; - - if (socket_impl->currently_subscribed) { - aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); - socket_impl->currently_subscribed = false; - } - - socket->event_loop = NULL; - - int connect_result; - socklen_t result_length = sizeof(connect_result); - - if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to determine connection error %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - return AWS_OP_ERR; - } - - if (connect_result) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connection error %d", - (void *)socket, - socket->io_handle.data.fd, - connect_result); - int aws_error = s_determine_socket_error(connect_result); - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - return AWS_OP_ERR; - } - - AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection success", (void *)socket, socket->io_handle.data.fd); - - if (s_update_local_endpoint(socket)) { - s_on_connection_error(socket, aws_last_error()); - return AWS_OP_ERR; - } - - socket->state = CONNECTED_WRITE | CONNECTED_READ; - - if (aws_socket_assign_to_event_loop(socket, event_loop)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: assignment to event loop %p failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop, - aws_last_error()); - s_on_connection_error(socket, aws_last_error()); - return AWS_OP_ERR; - } - - socket->connection_result_fn(socket, AWS_ERROR_SUCCESS, socket->connect_accept_user_data); - - return AWS_OP_SUCCESS; -} - -static void s_on_connection_error(struct aws_socket *socket, int error) { - socket->state = ERROR; - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: connection failure", (void *)socket, socket->io_handle.data.fd); - if (socket->connection_result_fn) { - socket->connection_result_fn(socket, error, socket->connect_accept_user_data); - } else if (socket->accept_result_fn) { - socket->accept_result_fn(socket, error, NULL, socket->connect_accept_user_data); - } -} - -/* the next two callbacks compete based on which one runs first. if s_socket_connect_event - * comes back first, then we set socket_args->socket = NULL and continue on with the connection. - * if s_handle_socket_timeout() runs first, is sees socket_args->socket is NULL and just cleans up its memory. - * s_handle_socket_timeout() will always run so the memory for socket_connect_args is always cleaned up there. */ -static void s_socket_connect_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - (void)handle; - - struct posix_socket_connect_args *socket_args = (struct posix_socket_connect_args *)user_data; - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "fd=%d: connection activity handler triggered ", handle->data.fd); - - if (socket_args->socket) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: has not timed out yet proceeding with connection.", - (void *)socket_args->socket, - handle->data.fd); - - struct posix_socket *socket_impl = socket_args->socket->impl; - if (!(events & AWS_IO_EVENT_TYPE_ERROR || events & AWS_IO_EVENT_TYPE_CLOSED) && - (events & AWS_IO_EVENT_TYPE_READABLE || events & AWS_IO_EVENT_TYPE_WRITABLE)) { - struct aws_socket *socket = socket_args->socket; - socket_args->socket = NULL; - socket_impl->connect_args = NULL; - s_on_connection_success(socket); - return; - } - - int aws_error = aws_socket_get_error(socket_args->socket); - /* we'll get another notification. */ - if (aws_error == AWS_IO_READ_WOULD_BLOCK) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: spurious event, waiting for another notification.", - (void *)socket_args->socket, - handle->data.fd); - - // Return results back to event loop. - if (handle->update_io_result) { - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - handle->update_io_result(event_loop, handle, &io_op_result); - } - - return; - } - - struct aws_socket *socket = socket_args->socket; - socket_args->socket = NULL; - socket_impl->connect_args = NULL; - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - } -} - -static void s_handle_socket_timeout(struct aws_task *task, void *args, aws_task_status status) { - (void)task; - (void)status; - - struct posix_socket_connect_args *socket_args = args; - - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "task_id=%p: timeout task triggered, evaluating timeouts.", (void *)task); - /* successful connection will have nulled out connect_args->socket */ - if (socket_args->socket) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: timed out, shutting down.", - (void *)socket_args->socket, - socket_args->socket->io_handle.data.fd); - - socket_args->socket->state = TIMEDOUT; - int error_code = AWS_IO_SOCKET_TIMEOUT; - - if (status == AWS_TASK_STATUS_RUN_READY) { - aws_event_loop_unsubscribe_from_io_events(socket_args->socket->event_loop, &socket_args->socket->io_handle); - } else { - error_code = AWS_IO_EVENT_LOOP_SHUTDOWN; - aws_event_loop_free_io_event_resources(socket_args->socket->event_loop, &socket_args->socket->io_handle); - } - socket_args->socket->event_loop = NULL; - struct posix_socket *socket_impl = socket_args->socket->impl; - socket_impl->currently_subscribed = false; - aws_raise_error(error_code); - struct aws_socket *socket = socket_args->socket; - /*socket close sets socket_args->socket to NULL and - * socket_impl->connect_args to NULL. */ - aws_socket_close(socket); - s_on_connection_error(socket, error_code); - } - - aws_mem_release(socket_args->allocator, socket_args); -} - -/* this is used simply for moving a connect_success callback when the connect finished immediately - * (like for unix domain sockets) into the event loop's thread. Also note, in that case there was no - * timeout task scheduled, so in this case the socket_args are cleaned up. */ -static void s_run_connect_success(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - struct posix_socket_connect_args *socket_args = arg; - - if (socket_args->socket) { - struct posix_socket *socket_impl = socket_args->socket->impl; - if (status == AWS_TASK_STATUS_RUN_READY) { - s_on_connection_success(socket_args->socket); - } else { - aws_raise_error(AWS_IO_SOCKET_CONNECT_ABORTED); - socket_args->socket->event_loop = NULL; - s_on_connection_error(socket_args->socket, AWS_IO_SOCKET_CONNECT_ABORTED); - } - socket_impl->connect_args = NULL; - } - - aws_mem_release(socket_args->allocator, socket_args); -} - -static inline int s_convert_pton_error(int pton_code, int errno_value) { - if (pton_code == 0) { - return AWS_IO_SOCKET_INVALID_ADDRESS; - } - - return s_determine_socket_error(errno_value); -} - -struct socket_address { - union sock_addr_types { - struct sockaddr_in addr_in; - struct sockaddr_in6 addr_in6; - struct sockaddr_un un_addr; - } sock_addr_types; -}; - -int aws_socket_connect( - struct aws_socket *socket, - const struct aws_socket_endpoint *remote_endpoint, - struct aws_event_loop *event_loop, - aws_socket_on_connection_result_fn *on_connection_result, - void *user_data) { - AWS_ASSERT(event_loop); - AWS_ASSERT(!socket->event_loop); - - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: beginning connect.", (void *)socket, socket->io_handle.data.fd); - - if (socket->event_loop) { - return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); - } - - if (socket->options.type != AWS_SOCKET_DGRAM) { - AWS_ASSERT(on_connection_result); - if (socket->state != INIT) { - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - } else { /* UDP socket */ - /* UDP sockets jump to CONNECT_READ if bind is called first */ - if (socket->state != CONNECTED_READ && socket->state != INIT) { - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - } - - size_t address_strlen; - if (aws_secure_strlen(remote_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { - return AWS_OP_ERR; - } - - if (aws_socket_validate_port_for_connect(remote_endpoint->port, socket->options.domain)) { - return AWS_OP_ERR; - } - - struct socket_address address; - AWS_ZERO_STRUCT(address); - socklen_t sock_size = 0; - int pton_err = 1; - if (socket->options.domain == AWS_SOCKET_IPV4) { - pton_err = inet_pton(AF_INET, remote_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); - address.sock_addr_types.addr_in.sin_port = htons((uint16_t)remote_endpoint->port); - address.sock_addr_types.addr_in.sin_family = AF_INET; - sock_size = sizeof(address.sock_addr_types.addr_in); - } else if (socket->options.domain == AWS_SOCKET_IPV6) { - pton_err = inet_pton(AF_INET6, remote_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); - address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)remote_endpoint->port); - address.sock_addr_types.addr_in6.sin6_family = AF_INET6; - sock_size = sizeof(address.sock_addr_types.addr_in6); - } else if (socket->options.domain == AWS_SOCKET_LOCAL) { - address.sock_addr_types.un_addr.sun_family = AF_UNIX; - strncpy(address.sock_addr_types.un_addr.sun_path, remote_endpoint->address, AWS_ADDRESS_MAX_LEN); - sock_size = sizeof(address.sock_addr_types.un_addr); - } else { - AWS_ASSERT(0); - return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); - } - - if (pton_err != 1) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to parse address %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - remote_endpoint->address, - remote_endpoint->port); - return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connecting to endpoint %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - remote_endpoint->address, - remote_endpoint->port); - - socket->state = CONNECTING; - socket->remote_endpoint = *remote_endpoint; - socket->connect_accept_user_data = user_data; - socket->connection_result_fn = on_connection_result; - - struct posix_socket *socket_impl = socket->impl; - - socket_impl->connect_args = aws_mem_calloc(socket->allocator, 1, sizeof(struct posix_socket_connect_args)); - if (!socket_impl->connect_args) { - return AWS_OP_ERR; - } - - socket_impl->connect_args->socket = socket; - socket_impl->connect_args->allocator = socket->allocator; - - socket_impl->connect_args->task.fn = s_handle_socket_timeout; - socket_impl->connect_args->task.arg = socket_impl->connect_args; - - int error_code = connect(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size); - socket->event_loop = event_loop; - - if (!error_code) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connected immediately, not scheduling timeout.", - (void *)socket, - socket->io_handle.data.fd); - socket_impl->connect_args->task.fn = s_run_connect_success; - /* the subscription for IO will happen once we setup the connection in the task. Since we already - * know the connection succeeded, we don't need to register for events yet. */ - aws_event_loop_schedule_task_now(event_loop, &socket_impl->connect_args->task); - } - - if (error_code) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - if (errno_value == EINPROGRESS || errno_value == EALREADY) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connection pending waiting on event-loop notification or timeout.", - (void *)socket, - socket->io_handle.data.fd); - /* cache the timeout task; it is possible for the IO subscription to come back virtually immediately - * and null out the connect args */ - struct aws_task *timeout_task = &socket_impl->connect_args->task; - - socket_impl->currently_subscribed = true; - /* This event is for when the connection finishes. (the fd will flip writable). */ - if (aws_event_loop_subscribe_to_io_events( - event_loop, - &socket->io_handle, - AWS_IO_EVENT_TYPE_WRITABLE, - s_socket_connect_event, - socket_impl->connect_args)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to register with event-loop %p.", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop); - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - goto err_clean_up; - } - - /* schedule a task to run at the connect timeout interval, if this task runs before the connect - * happens, we consider that a timeout. */ - uint64_t timeout = 0; - aws_event_loop_current_clock_time(event_loop, &timeout); - timeout += aws_timestamp_convert( - socket->options.connect_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: scheduling timeout task for %llu.", - (void *)socket, - socket->io_handle.data.fd, - (unsigned long long)timeout); - aws_event_loop_schedule_task_future(event_loop, timeout_task, timeout); - } else { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connect failed with error code %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - int aws_error = s_determine_socket_error(errno_value); - aws_raise_error(aws_error); - socket->event_loop = NULL; - socket_impl->currently_subscribed = false; - goto err_clean_up; - } - } - return AWS_OP_SUCCESS; - -err_clean_up: - aws_mem_release(socket->allocator, socket_impl->connect_args); - socket_impl->connect_args = NULL; - return AWS_OP_ERR; -} - -int aws_socket_bind(struct aws_socket *socket, const struct aws_socket_endpoint *local_endpoint) { - if (socket->state != INIT) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: invalid state for bind operation.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - size_t address_strlen; - if (aws_secure_strlen(local_endpoint->address, AWS_ADDRESS_MAX_LEN, &address_strlen)) { - return AWS_OP_ERR; - } - - if (aws_socket_validate_port_for_bind(local_endpoint->port, socket->options.domain)) { - return AWS_OP_ERR; - } - - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: binding to %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - local_endpoint->address, - local_endpoint->port); - - struct socket_address address; - AWS_ZERO_STRUCT(address); - socklen_t sock_size = 0; - int pton_err = 1; - if (socket->options.domain == AWS_SOCKET_IPV4) { - pton_err = inet_pton(AF_INET, local_endpoint->address, &address.sock_addr_types.addr_in.sin_addr); - address.sock_addr_types.addr_in.sin_port = htons((uint16_t)local_endpoint->port); - address.sock_addr_types.addr_in.sin_family = AF_INET; - sock_size = sizeof(address.sock_addr_types.addr_in); - } else if (socket->options.domain == AWS_SOCKET_IPV6) { - pton_err = inet_pton(AF_INET6, local_endpoint->address, &address.sock_addr_types.addr_in6.sin6_addr); - address.sock_addr_types.addr_in6.sin6_port = htons((uint16_t)local_endpoint->port); - address.sock_addr_types.addr_in6.sin6_family = AF_INET6; - sock_size = sizeof(address.sock_addr_types.addr_in6); - } else if (socket->options.domain == AWS_SOCKET_LOCAL) { - address.sock_addr_types.un_addr.sun_family = AF_UNIX; - strncpy(address.sock_addr_types.un_addr.sun_path, local_endpoint->address, AWS_ADDRESS_MAX_LEN); - sock_size = sizeof(address.sock_addr_types.un_addr); - } else { - AWS_ASSERT(0); - return aws_raise_error(AWS_IO_SOCKET_UNSUPPORTED_ADDRESS_FAMILY); - } - - if (pton_err != 1) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to parse address %s:%u.", - (void *)socket, - socket->io_handle.data.fd, - local_endpoint->address, - local_endpoint->port); - return aws_raise_error(s_convert_pton_error(pton_err, errno_value)); - } - - if (bind(socket->io_handle.data.fd, (struct sockaddr *)&address.sock_addr_types, sock_size) != 0) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: bind failed with error code %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - - aws_raise_error(s_determine_socket_error(errno_value)); - goto error; - } - - if (s_update_local_endpoint(socket)) { - goto error; - } - - if (socket->options.type == AWS_SOCKET_STREAM) { - socket->state = BOUND; - } else { - /* e.g. UDP is now readable */ - socket->state = CONNECTED_READ; - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: successfully bound to %s:%u", - (void *)socket, - socket->io_handle.data.fd, - socket->local_endpoint.address, - socket->local_endpoint.port); - - return AWS_OP_SUCCESS; - -error: - socket->state = ERROR; - return AWS_OP_ERR; -} - -int aws_socket_get_bound_address(const struct aws_socket *socket, struct aws_socket_endpoint *out_address) { - if (socket->local_endpoint.address[0] == 0) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: Socket has no local address. Socket must be bound first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - *out_address = socket->local_endpoint; - return AWS_OP_SUCCESS; -} - -int aws_socket_listen(struct aws_socket *socket, int backlog_size) { - if (socket->state != BOUND) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: invalid state for listen operation. You must call bind first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - int error_code = listen(socket->io_handle.data.fd, backlog_size); - - if (!error_code) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, "id=%p fd=%d: successfully listening", (void *)socket, socket->io_handle.data.fd); - socket->state = LISTENING; - return AWS_OP_SUCCESS; - } - - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: listen failed with error code %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - - socket->state = ERROR; - - return aws_raise_error(s_determine_socket_error(errno_value)); -} - -/* this is called by the event loop handler that was installed in start_accept(). It runs once the FD goes readable, - * accepts as many as it can and then returns control to the event loop. */ -static void s_socket_accept_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - - (void)event_loop; - - struct aws_socket *socket = user_data; - struct posix_socket *socket_impl = socket->impl; - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, "id=%p fd=%d: listening event received", (void *)socket, socket->io_handle.data.fd); - - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - - if (socket_impl->continue_accept && events & AWS_IO_EVENT_TYPE_READABLE) { - int in_fd = 0; - while (socket_impl->continue_accept && in_fd != -1) { - struct sockaddr_storage in_addr; - socklen_t in_len = sizeof(struct sockaddr_storage); - - in_fd = accept(handle->data.fd, (struct sockaddr *)&in_addr, &in_len); - if (in_fd == -1) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; - break; - } - - int aws_error = aws_socket_get_error(socket); - aws_raise_error(aws_error); - s_on_connection_error(socket, aws_error); - io_op_result.read_error_code = aws_error; - break; - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, "id=%p fd=%d: incoming connection", (void *)socket, socket->io_handle.data.fd); - - struct aws_socket *new_sock = aws_mem_acquire(socket->allocator, sizeof(struct aws_socket)); - - if (!new_sock) { - close(in_fd); - s_on_connection_error(socket, aws_last_error()); - continue; - } - - if (s_socket_init(new_sock, socket->allocator, &socket->options, in_fd)) { - aws_mem_release(socket->allocator, new_sock); - s_on_connection_error(socket, aws_last_error()); - continue; - } - - new_sock->local_endpoint = socket->local_endpoint; - new_sock->state = CONNECTED_READ | CONNECTED_WRITE; - uint32_t port = 0; - - /* get the info on the incoming socket's address */ - if (in_addr.ss_family == AF_INET) { - struct sockaddr_in *s = (struct sockaddr_in *)&in_addr; - port = ntohs(s->sin_port); - /* this came from the kernel, a.) it won't fail. b.) even if it does - * its not fatal. come back and add logging later. */ - if (!inet_ntop( - AF_INET, - &s->sin_addr, - new_sock->remote_endpoint.address, - sizeof(new_sock->remote_endpoint.address))) { - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d:. Failed to determine remote address.", - (void *)socket, - socket->io_handle.data.fd); - } - new_sock->options.domain = AWS_SOCKET_IPV4; - } else if (in_addr.ss_family == AF_INET6) { - /* this came from the kernel, a.) it won't fail. b.) even if it does - * its not fatal. come back and add logging later. */ - struct sockaddr_in6 *s = (struct sockaddr_in6 *)&in_addr; - port = ntohs(s->sin6_port); - if (!inet_ntop( - AF_INET6, - &s->sin6_addr, - new_sock->remote_endpoint.address, - sizeof(new_sock->remote_endpoint.address))) { - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d:. Failed to determine remote address.", - (void *)socket, - socket->io_handle.data.fd); - } - new_sock->options.domain = AWS_SOCKET_IPV6; - } else if (in_addr.ss_family == AF_UNIX) { - new_sock->remote_endpoint = socket->local_endpoint; - new_sock->options.domain = AWS_SOCKET_LOCAL; - } - - new_sock->remote_endpoint.port = port; - - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: connected to %s:%d, incoming fd %d", - (void *)socket, - socket->io_handle.data.fd, - new_sock->remote_endpoint.address, - new_sock->remote_endpoint.port, - in_fd); - - int flags = fcntl(in_fd, F_GETFL, 0); - - flags |= O_NONBLOCK | O_CLOEXEC; - fcntl(in_fd, F_SETFL, flags); - - bool close_occurred = false; - socket_impl->close_happened = &close_occurred; - socket->accept_result_fn(socket, AWS_ERROR_SUCCESS, new_sock, socket->connect_accept_user_data); - - if (close_occurred) { - return; - } - - socket_impl->close_happened = NULL; - } - } - - // Return results back to event loop. - if (handle->update_io_result) { - handle->update_io_result(event_loop, handle, &io_op_result); - } - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: finished processing incoming connections, " - "waiting on event-loop notification", - (void *)socket, - socket->io_handle.data.fd); -} - -int aws_socket_start_accept( - struct aws_socket *socket, - struct aws_event_loop *accept_loop, - aws_socket_on_accept_result_fn *on_accept_result, - void *user_data) { - AWS_ASSERT(on_accept_result); - AWS_ASSERT(accept_loop); - - if (socket->event_loop) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: is already assigned to event-loop %p.", - (void *)socket, - socket->io_handle.data.fd, - (void *)socket->event_loop); - return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); - } - - if (socket->state != LISTENING) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: invalid state for start_accept operation. You must call listen first.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - socket->accept_result_fn = on_accept_result; - socket->connect_accept_user_data = user_data; - socket->event_loop = accept_loop; - struct posix_socket *socket_impl = socket->impl; - socket_impl->continue_accept = true; - socket_impl->currently_subscribed = true; - - if (aws_event_loop_subscribe_to_io_events( - socket->event_loop, &socket->io_handle, AWS_IO_EVENT_TYPE_READABLE, s_socket_accept_event, socket)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: failed to subscribe to event-loop %p.", - (void *)socket, - socket->io_handle.data.fd, - (void *)socket->event_loop); - socket_impl->continue_accept = false; - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; -} - -struct stop_accept_args { - struct aws_task task; - struct aws_mutex mutex; - struct aws_condition_variable condition_variable; - struct aws_socket *socket; - int ret_code; - bool invoked; -}; - -static bool s_stop_accept_pred(void *arg) { - struct stop_accept_args *stop_accept_args = arg; - return stop_accept_args->invoked; -} - -static void s_stop_accept_task(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - (void)status; - - struct stop_accept_args *stop_accept_args = arg; - aws_mutex_lock(&stop_accept_args->mutex); - stop_accept_args->ret_code = AWS_OP_SUCCESS; - if (aws_socket_stop_accept(stop_accept_args->socket)) { - stop_accept_args->ret_code = aws_last_error(); - } - stop_accept_args->invoked = true; - aws_condition_variable_notify_one(&stop_accept_args->condition_variable); - aws_mutex_unlock(&stop_accept_args->mutex); -} - -int aws_socket_stop_accept(struct aws_socket *socket) { - if (socket->state != LISTENING) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: is not in a listening state, can't stop_accept.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, "id=%p fd=%d: stopping accepting new connections", (void *)socket, socket->io_handle.data.fd); - - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - struct stop_accept_args args = { - .mutex = AWS_MUTEX_INIT, - .condition_variable = AWS_CONDITION_VARIABLE_INIT, - .invoked = false, - .socket = socket, - .ret_code = AWS_OP_SUCCESS, - .task = {.fn = s_stop_accept_task}, - }; - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: stopping accepting new connections from a different thread than " - "the socket is running from. Blocking until it shuts down.", - (void *)socket, - socket->io_handle.data.fd); - /* Look.... I know what I'm doing.... trust me, I'm an engineer. - * We wait on the completion before 'args' goes out of scope. - * NOLINTNEXTLINE */ - args.task.arg = &args; - aws_mutex_lock(&args.mutex); - aws_event_loop_schedule_task_now(socket->event_loop, &args.task); - aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_stop_accept_pred, &args); - aws_mutex_unlock(&args.mutex); - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: stop accept task finished running.", - (void *)socket, - socket->io_handle.data.fd); - - if (args.ret_code) { - return aws_raise_error(args.ret_code); - } - return AWS_OP_SUCCESS; - } - - int ret_val = AWS_OP_SUCCESS; - struct posix_socket *socket_impl = socket->impl; - if (socket_impl->currently_subscribed) { - ret_val = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); - socket_impl->currently_subscribed = false; - socket_impl->continue_accept = false; - socket->event_loop = NULL; - } - - return ret_val; -} - -int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_options *options) { - if (socket->options.domain != options->domain || socket->options.type != options->type) { - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setting socket options to: keep-alive %d, keep idle %d, keep-alive interval %d, keep-alive probe " - "count %d.", - (void *)socket, - socket->io_handle.data.fd, - (int)options->keepalive, - (int)options->keep_alive_timeout_sec, - (int)options->keep_alive_interval_sec, - (int)options->keep_alive_max_failed_probes); - - socket->options = *options; - - int reuse = 1; - if (AWS_UNLIKELY(setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(int)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for SO_REUSEADDR failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - size_t network_interface_length = 0; - if (aws_secure_strlen(options->network_interface_name, AWS_NETWORK_INTERFACE_NAME_MAX, &network_interface_length)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: network_interface_name max length must be %d length and NULL terminated", - (void *)socket, - socket->io_handle.data.fd, - AWS_NETWORK_INTERFACE_NAME_MAX); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } - if (network_interface_length != 0) { -#if defined(SO_BINDTODEVICE) - if (setsockopt( - socket->io_handle.data.fd, - SOL_SOCKET, - SO_BINDTODEVICE, - options->network_interface_name, - network_interface_length)) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() with SO_BINDTODEVICE for \"%s\" failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - options->network_interface_name, - errno_value); - return aws_raise_error(AWS_IO_SOCKET_INVALID_OPTIONS); - } -#else - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: network_interface_name is not supported on this platform.", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED); -#endif - } - if (options->type == AWS_SOCKET_STREAM && options->domain != AWS_SOCKET_LOCAL) { - if (socket->options.keepalive) { - int keep_alive = 1; - if (AWS_UNLIKELY( - setsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_KEEPALIVE, &keep_alive, sizeof(int)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling SO_KEEPALIVE failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - } - - if (socket->options.keep_alive_interval_sec && socket->options.keep_alive_timeout_sec) { - int ival_in_secs = socket->options.keep_alive_interval_sec; - if (AWS_UNLIKELY(setsockopt( - socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPIDLE, &ival_in_secs, sizeof(ival_in_secs)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling TCP_KEEPIDLE for TCP failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - - ival_in_secs = socket->options.keep_alive_timeout_sec; - if (AWS_UNLIKELY(setsockopt( - socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPINTVL, &ival_in_secs, sizeof(ival_in_secs)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling TCP_KEEPINTVL for TCP failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - } - - if (socket->options.keep_alive_max_failed_probes) { - int max_probes = socket->options.keep_alive_max_failed_probes; - if (AWS_UNLIKELY( - setsockopt(socket->io_handle.data.fd, IPPROTO_TCP, TCP_KEEPCNT, &max_probes, sizeof(max_probes)))) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - AWS_LOGF_WARN( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: setsockopt() for enabling TCP_KEEPCNT for TCP failed with errno %d.", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - } - } - } - - return AWS_OP_SUCCESS; -} - -struct socket_write_request { - struct aws_byte_cursor cursor_cpy; - aws_socket_on_write_completed_fn *written_fn; - void *write_user_data; - struct aws_linked_list_node node; - size_t original_buffer_len; - int error_code; -}; - -struct posix_socket_close_args { - struct aws_mutex mutex; - struct aws_condition_variable condition_variable; - struct aws_socket *socket; - bool invoked; - int ret_code; -}; - -static bool s_close_predicate(void *arg) { - struct posix_socket_close_args *close_args = arg; - return close_args->invoked; -} - -static void s_close_task(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - (void)status; - - struct posix_socket_close_args *close_args = arg; - aws_mutex_lock(&close_args->mutex); - close_args->ret_code = AWS_OP_SUCCESS; - - if (aws_socket_close(close_args->socket)) { - close_args->ret_code = aws_last_error(); - } - - close_args->invoked = true; - aws_condition_variable_notify_one(&close_args->condition_variable); - aws_mutex_unlock(&close_args->mutex); -} - -int aws_socket_close(struct aws_socket *socket) { - struct posix_socket *socket_impl = socket->impl; - AWS_LOGF_DEBUG(AWS_LS_IO_SOCKET, "id=%p fd=%d: closing", (void *)socket, socket->io_handle.data.fd); - struct aws_event_loop *event_loop = socket->event_loop; - if (socket->event_loop) { - /* don't freak out on me, this almost never happens, and never occurs inside a channel - * it only gets hit from a listening socket shutting down or from a unit test. */ - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: closing from a different thread than " - "the socket is running from. Blocking until it closes down.", - (void *)socket, - socket->io_handle.data.fd); - /* the only time we allow this kind of thing is when you're a listener.*/ - if (socket->state != LISTENING) { - return aws_raise_error(AWS_IO_SOCKET_ILLEGAL_OPERATION_FOR_STATE); - } - - struct posix_socket_close_args args = { - .mutex = AWS_MUTEX_INIT, - .condition_variable = AWS_CONDITION_VARIABLE_INIT, - .socket = socket, - .ret_code = AWS_OP_SUCCESS, - .invoked = false, - }; - - struct aws_task close_task = { - .fn = s_close_task, - .arg = &args, - }; - - int fd_for_logging = socket->io_handle.data.fd; /* socket's fd gets reset before final log */ - (void)fd_for_logging; - - aws_mutex_lock(&args.mutex); - aws_event_loop_schedule_task_now(socket->event_loop, &close_task); - aws_condition_variable_wait_pred(&args.condition_variable, &args.mutex, s_close_predicate, &args); - aws_mutex_unlock(&args.mutex); - AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: close task completed.", (void *)socket, fd_for_logging); - if (args.ret_code) { - return aws_raise_error(args.ret_code); - } - - return AWS_OP_SUCCESS; - } - - if (socket_impl->currently_subscribed) { - if (socket->state & LISTENING) { - aws_socket_stop_accept(socket); - } else { - int err_code = aws_event_loop_unsubscribe_from_io_events(socket->event_loop, &socket->io_handle); - - if (err_code) { - return AWS_OP_ERR; - } - } - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - } - } - - if (socket_impl->close_happened) { - *socket_impl->close_happened = true; - } - - if (socket_impl->connect_args) { - socket_impl->connect_args->socket = NULL; - socket_impl->connect_args = NULL; - } - - if (aws_socket_is_open(socket)) { - close(socket->io_handle.data.fd); - socket->io_handle.data.fd = -1; - socket->state = CLOSED; - - /* ensure callbacks for pending writes fire (in order) before this close function returns */ - - if (socket_impl->written_task_scheduled) { - aws_event_loop_cancel_task(event_loop, &socket_impl->written_task); - } - - while (!aws_linked_list_empty(&socket_impl->written_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; - write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); - aws_mem_release(socket->allocator, write_request); - } - - while (!aws_linked_list_empty(&socket_impl->write_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; - write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data); - aws_mem_release(socket->allocator, write_request); - } - } - - return AWS_OP_SUCCESS; -} - -int aws_socket_shutdown_dir(struct aws_socket *socket, enum aws_channel_direction dir) { - int how = dir == AWS_CHANNEL_DIR_READ ? 0 : 1; - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, "id=%p fd=%d: shutting down in direction %d", (void *)socket, socket->io_handle.data.fd, dir); - if (shutdown(socket->io_handle.data.fd, how)) { - int errno_value = errno; /* Always cache errno before potential side-effect */ - int aws_error = s_determine_socket_error(errno_value); - return aws_raise_error(aws_error); - } - - if (dir == AWS_CHANNEL_DIR_READ) { - socket->state &= ~CONNECTED_READ; - } else { - socket->state &= ~CONNECTED_WRITE; - } - - return AWS_OP_SUCCESS; -} - -static void s_written_task(struct aws_task *task, void *arg, enum aws_task_status status) { - (void)task; - (void)status; - - struct aws_socket *socket = arg; - struct posix_socket *socket_impl = socket->impl; - - socket_impl->written_task_scheduled = false; - - /* this is to handle a race condition when a callback kicks off a cleanup, or the user decides - * to close the socket based on something they read (SSL validation failed for example). - * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling */ - aws_ref_count_acquire(&socket_impl->internal_refcount); - - /* Notes about weird loop: - * 1) Only process the initial contents of queue when this task is run, - * ignoring any writes queued during delivery. - * If we simply looped until the queue was empty, we could get into a - * synchronous loop of completing and writing and completing and writing... - * and it would be tough for multiple sockets to share an event-loop fairly. - * 2) Check if queue is empty with each iteration. - * If user calls close() from the callback, close() will process all - * nodes in the written_queue, and the queue will be empty when the - * callstack gets back to here. */ - if (!aws_linked_list_empty(&socket_impl->written_queue)) { - struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue); - do { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len; - write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data); - aws_mem_release(socket_impl->allocator, write_request); - if (node == stop_after) { - break; - } - } while (!aws_linked_list_empty(&socket_impl->written_queue)); - } - - aws_ref_count_release(&socket_impl->internal_refcount); -} - -/* this gets called in two scenarios. - * 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned - * immediately if something bad has happened to the socket. In this case, `parent_request` is set. - * 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */ -static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) { - struct posix_socket *socket_impl = socket->impl; - - if (parent_request) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: processing write requests, called from aws_socket_write", - (void *)socket, - socket->io_handle.data.fd); - } else { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: processing write requests, invoked by the event-loop", - (void *)socket, - socket->io_handle.data.fd); - } - - bool purge = false; - int aws_error = AWS_OP_SUCCESS; - bool parent_request_failed = false; - bool pushed_to_written_queue = false; - - struct aws_io_handle_io_op_result io_op_result; - AWS_ZERO_STRUCT(io_op_result); - - /* if a close call happens in the middle, this queue will have been cleaned out from under us. */ - while (!aws_linked_list_empty(&socket_impl->write_queue)) { - struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: dequeued write request of size %llu, remaining to write %llu", - (void *)socket, - socket->io_handle.data.fd, - (unsigned long long)write_request->original_buffer_len, - (unsigned long long)write_request->cursor_cpy.len); - - ssize_t written = - send(socket->io_handle.data.fd, write_request->cursor_cpy.ptr, write_request->cursor_cpy.len, MSG_NOSIGNAL); - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: send written size %d", - (void *)socket, - socket->io_handle.data.fd, - (int)written); - - if (written < 0) { - if (errno_value == EAGAIN) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: returned would block", (void *)socket, socket->io_handle.data.fd); - io_op_result.write_error_code = AWS_IO_READ_WOULD_BLOCK; - break; - } - - if (errno_value == EPIPE) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: already closed before write", - (void *)socket, - socket->io_handle.data.fd); - aws_error = AWS_IO_SOCKET_CLOSED; - aws_raise_error(aws_error); - purge = true; - io_op_result.write_error_code = aws_error; - break; - } - - purge = true; - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: write error with error code %d", - (void *)socket, - socket->io_handle.data.fd, - errno_value); - aws_error = s_determine_socket_error(errno_value); - aws_raise_error(aws_error); - io_op_result.write_error_code = aws_error; - break; - } - - io_op_result.written_bytes += (size_t)written; - - size_t remaining_to_write = write_request->cursor_cpy.len; - - aws_byte_cursor_advance(&write_request->cursor_cpy, (size_t)written); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: remaining write request to write %llu", - (void *)socket, - socket->io_handle.data.fd, - (unsigned long long)write_request->cursor_cpy.len); - - if ((size_t)written == remaining_to_write) { - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: write request completed", (void *)socket, socket->io_handle.data.fd); - - aws_linked_list_remove(node); - write_request->error_code = AWS_ERROR_SUCCESS; - aws_linked_list_push_back(&socket_impl->written_queue, node); - pushed_to_written_queue = true; - } - } - - if (purge) { - while (!aws_linked_list_empty(&socket_impl->write_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue); - struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node); - - /* If this fn was invoked directly from aws_socket_write(), don't invoke the error callback - * as the user will be able to rely on the return value from aws_socket_write() */ - if (write_request == parent_request) { - parent_request_failed = true; - aws_mem_release(socket->allocator, write_request); - } else { - write_request->error_code = aws_error; - aws_linked_list_push_back(&socket_impl->written_queue, node); - pushed_to_written_queue = true; - } - } - } - - if (pushed_to_written_queue && !socket_impl->written_task_scheduled) { - socket_impl->written_task_scheduled = true; - aws_task_init(&socket_impl->written_task, s_written_task, socket, "socket_written_task"); - aws_event_loop_schedule_task_now(socket->event_loop, &socket_impl->written_task); - } - - // Return results back to event loop. - if (socket->io_handle.update_io_result) { - socket->io_handle.update_io_result(socket->event_loop, &socket->io_handle, &io_op_result); - } - - /* Only report error if aws_socket_write() invoked this function and its write_request failed */ - if (!parent_request_failed) { - return AWS_OP_SUCCESS; - } - - aws_raise_error(aws_error); - return AWS_OP_ERR; -} - -static void s_on_socket_io_event( - struct aws_event_loop *event_loop, - struct aws_io_handle *handle, - int events, - void *user_data) { - (void)event_loop; - (void)handle; - struct aws_socket *socket = user_data; - struct posix_socket *socket_impl = socket->impl; - - /* this is to handle a race condition when an error kicks off a cleanup, or the user decides - * to close the socket based on something they read (SSL validation failed for example). - * if clean_up happens when internal_refcount > 0, socket_impl is kept dangling but currently - * subscribed is set to false. */ - aws_ref_count_acquire(&socket_impl->internal_refcount); - - /* NOTE: READABLE|WRITABLE|HANG_UP events might arrive simultaneously - * (e.g. peer sends last few bytes and immediately hangs up). - * Notify user of READABLE|WRITABLE events first, so they try to read any remaining bytes. */ - - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_READABLE) { - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is readable", (void *)socket, socket->io_handle.data.fd); - if (socket->readable_fn) { - socket->readable_fn(socket, AWS_OP_SUCCESS, socket->readable_user_data); - } - } - /* if socket closed in between these branches, the currently_subscribed will be false and socket_impl will not - * have been cleaned up, so this next branch is safe. */ - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) { - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd); - s_process_socket_write_requests(socket, NULL); - } - - if (events & AWS_IO_EVENT_TYPE_REMOTE_HANG_UP || events & AWS_IO_EVENT_TYPE_CLOSED) { - aws_raise_error(AWS_IO_SOCKET_CLOSED); - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: closed remotely", (void *)socket, socket->io_handle.data.fd); - if (socket->readable_fn) { - socket->readable_fn(socket, AWS_IO_SOCKET_CLOSED, socket->readable_user_data); - } - goto end_check; - } - - if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_ERROR) { - int aws_error = aws_socket_get_error(socket); - aws_raise_error(aws_error); - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: error event occurred", (void *)socket, socket->io_handle.data.fd); - if (socket->readable_fn) { - socket->readable_fn(socket, aws_error, socket->readable_user_data); - } - goto end_check; - } - -end_check: - aws_ref_count_release(&socket_impl->internal_refcount); -} - -int aws_socket_assign_to_event_loop(struct aws_socket *socket, struct aws_event_loop *event_loop) { - if (!socket->event_loop) { - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: assigning to event loop %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop); - socket->event_loop = event_loop; - struct posix_socket *socket_impl = socket->impl; - socket_impl->currently_subscribed = true; - if (aws_event_loop_subscribe_to_io_events( - event_loop, - &socket->io_handle, - AWS_IO_EVENT_TYPE_WRITABLE | AWS_IO_EVENT_TYPE_READABLE, - s_on_socket_io_event, - socket)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: assigning to event loop %p failed with error %d", - (void *)socket, - socket->io_handle.data.fd, - (void *)event_loop, - aws_last_error()); - socket_impl->currently_subscribed = false; - socket->event_loop = NULL; - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; - } - - return aws_raise_error(AWS_IO_EVENT_LOOP_ALREADY_ASSIGNED); -} - -struct aws_event_loop *aws_socket_get_event_loop(struct aws_socket *socket) { - return socket->event_loop; -} - -int aws_socket_subscribe_to_readable_events( - struct aws_socket *socket, - aws_socket_on_readable_fn *on_readable, - void *user_data) { - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, " id=%p fd=%d: subscribing to readable events", (void *)socket, socket->io_handle.data.fd); - if (!(socket->state & CONNECTED_READ)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: can't subscribe to readable events since the socket is not connected", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); - } - - if (socket->readable_fn) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: can't subscribe to readable events since it is already subscribed", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_ERROR_IO_ALREADY_SUBSCRIBED); - } - - AWS_ASSERT(on_readable); - socket->readable_user_data = user_data; - socket->readable_fn = on_readable; - - return AWS_OP_SUCCESS; -} - -int aws_socket_read(struct aws_socket *socket, struct aws_byte_buf *buffer, size_t *amount_read) { - AWS_ASSERT(amount_read); - - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: cannot read from a different thread than event loop %p", - (void *)socket, - socket->io_handle.data.fd, - (void *)socket->event_loop); - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (!(socket->state & CONNECTED_READ)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: cannot read because it is not connected", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); - } - - ssize_t read_val = read(socket->io_handle.data.fd, buffer->buffer + buffer->len, buffer->capacity - buffer->len); - int errno_value = errno; /* Always cache errno before potential side-effect */ - - AWS_LOGF_TRACE( - AWS_LS_IO_SOCKET, "id=%p fd=%d: read of %d", (void *)socket, socket->io_handle.data.fd, (int)read_val); - - if (read_val > 0) { - *amount_read = (size_t)read_val; - buffer->len += *amount_read; - return AWS_OP_SUCCESS; - } - - /* read_val of 0 means EOF which we'll treat as AWS_IO_SOCKET_CLOSED */ - if (read_val == 0) { - AWS_LOGF_INFO( - AWS_LS_IO_SOCKET, "id=%p fd=%d: zero read, socket is closed", (void *)socket, socket->io_handle.data.fd); - *amount_read = 0; - - if (buffer->capacity - buffer->len > 0) { - return aws_raise_error(AWS_IO_SOCKET_CLOSED); - } - - return AWS_OP_SUCCESS; - } - - if (errno_value == EAGAIN || errno_value == EWOULDBLOCK) { - AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: read would block", (void *)socket, socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_READ_WOULD_BLOCK); - } - - if (errno_value == EPIPE || errno_value == ECONNRESET) { - AWS_LOGF_INFO(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket is closed.", (void *)socket, socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_CLOSED); - } - - if (errno_value == ETIMEDOUT) { - AWS_LOGF_ERROR(AWS_LS_IO_SOCKET, "id=%p fd=%d: socket timed out.", (void *)socket, socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_TIMEOUT); - } - - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: read failed with error: %s", - (void *)socket, - socket->io_handle.data.fd, - strerror(errno_value)); - return aws_raise_error(s_determine_socket_error(errno_value)); -} - -int aws_socket_write( - struct aws_socket *socket, - const struct aws_byte_cursor *cursor, - aws_socket_on_write_completed_fn *written_fn, - void *user_data) { - if (!aws_event_loop_thread_is_callers_thread(socket->event_loop)) { - return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY); - } - - if (!(socket->state & CONNECTED_WRITE)) { - AWS_LOGF_ERROR( - AWS_LS_IO_SOCKET, - "id=%p fd=%d: cannot write to because it is not connected", - (void *)socket, - socket->io_handle.data.fd); - return aws_raise_error(AWS_IO_SOCKET_NOT_CONNECTED); - } - - AWS_ASSERT(written_fn); - struct posix_socket *socket_impl = socket->impl; - struct socket_write_request *write_request = - aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_write_request)); - - if (!write_request) { - return AWS_OP_ERR; - } - - write_request->original_buffer_len = cursor->len; - write_request->written_fn = written_fn; - write_request->write_user_data = user_data; - write_request->cursor_cpy = *cursor; - aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node); - - return s_process_socket_write_requests(socket, write_request); -} - -int aws_socket_get_error(struct aws_socket *socket) { - int connect_result; - socklen_t result_length = sizeof(connect_result); - - if (getsockopt(socket->io_handle.data.fd, SOL_SOCKET, SO_ERROR, &connect_result, &result_length) < 0) { - return s_determine_socket_error(errno); - } - - if (connect_result) { - return s_determine_socket_error(connect_result); - } - - return AWS_OP_SUCCESS; -} - -bool aws_socket_is_open(struct aws_socket *socket) { - return socket->io_handle.data.fd >= 0; -} - -void aws_socket_endpoint_init_local_address_for_test(struct aws_socket_endpoint *endpoint) { - struct aws_uuid uuid; - AWS_FATAL_ASSERT(aws_uuid_init(&uuid) == AWS_OP_SUCCESS); - char uuid_str[AWS_UUID_STR_LEN] = {0}; - struct aws_byte_buf uuid_buf = aws_byte_buf_from_empty_array(uuid_str, sizeof(uuid_str)); - AWS_FATAL_ASSERT(aws_uuid_to_str(&uuid, &uuid_buf) == AWS_OP_SUCCESS); - snprintf(endpoint->address, sizeof(endpoint->address), "/tmp/testsock" PRInSTR ".sock", AWS_BYTE_BUF_PRI(uuid_buf)); -} diff --git a/source/socket_channel_handler.c b/source/socket_channel_handler.c index 4332e8637..eb8f47568 100644 --- a/source/socket_channel_handler.c +++ b/source/socket_channel_handler.c @@ -140,11 +140,8 @@ static void s_do_read(struct socket_handler *socket_handler) { if (max_to_read == 0) { return; } -#if AWS_USE_ON_EVENT_WITH_RESULT struct aws_io_handle_io_op_result io_op_result; - memset(&io_op_result, 0, sizeof(struct aws_io_handle_io_op_result)); - AWS_ASSERT(socket_handler->socket->io_handle.update_io_result); -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ + AWS_ZERO_STRUCT(io_op_result); size_t total_read = 0; size_t read = 0; @@ -158,16 +155,12 @@ static void s_do_read(struct socket_handler *socket_handler) { if (aws_socket_read(socket_handler->socket, &message->message_data, &read)) { last_error = aws_last_error(); aws_mem_release(message->allocator, message); -#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } total_read += read; -#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_bytes += read; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ AWS_LOGF_TRACE( AWS_LS_IO_SOCKET_HANDLER, "id=%p: read %llu from socket", @@ -177,9 +170,7 @@ static void s_do_read(struct socket_handler *socket_handler) { if (aws_channel_slot_send_message(socket_handler->slot, message, AWS_CHANNEL_DIR_READ)) { last_error = aws_last_error(); aws_mem_release(message->allocator, message); -#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ break; } } @@ -197,9 +188,7 @@ static void s_do_read(struct socket_handler *socket_handler) { AWS_ASSERT(last_error != 0); if (last_error != AWS_IO_READ_WOULD_BLOCK) { -#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = last_error; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ aws_channel_shutdown(socket_handler->slot->channel, last_error); } else { AWS_LOGF_TRACE( @@ -207,14 +196,13 @@ static void s_do_read(struct socket_handler *socket_handler) { "id=%p: out of data to read on socket. " "Waiting on event-loop notification.", (void *)socket_handler->slot->handler); -#if AWS_USE_ON_EVENT_WITH_RESULT io_op_result.read_error_code = AWS_IO_READ_WOULD_BLOCK; -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ } -#if AWS_USE_ON_EVENT_WITH_RESULT - socket_handler->socket->io_handle.update_io_result( - socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ + + if (socket_handler->socket->io_handle.update_io_result) { + socket_handler->socket->io_handle.update_io_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); + } return; } /* in this case, everything was fine, but there's still pending reads. We need to schedule a task to do the read @@ -231,10 +219,10 @@ static void s_do_read(struct socket_handler *socket_handler) { aws_channel_schedule_task_now(socket_handler->slot->channel, &socket_handler->read_task_storage); } -#if AWS_USE_ON_EVENT_WITH_RESULT - socket_handler->socket->io_handle.update_io_result( - socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); -#endif /* AWS_USE_ON_EVENT_WITH_RESULT */ + if (socket_handler->socket->io_handle.update_io_result) { + socket_handler->socket->io_handle.update_io_result( + socket_handler->socket->event_loop, &socket_handler->socket->io_handle, &io_op_result); + } } /* the socket is either readable or errored out. If it's readable, kick off s_do_read() to do its thing. */ diff --git a/tests/event_loop_test.c b/tests/event_loop_test.c index 1f988a24a..9e51ffc4e 100644 --- a/tests/event_loop_test.c +++ b/tests/event_loop_test.c @@ -836,7 +836,6 @@ static int s_state_read_until_blocked(struct thread_tester *tester) { uint8_t buffer[512]; while (simple_pipe_read(&tester->read_handle, buffer, sizeof(buffer)) > 0) { } -# if AWS_USE_ON_EVENT_WITH_RESULT if (errno == EAGAIN) { if (tester->read_handle.update_io_result != NULL) { struct aws_io_handle_io_op_result io_op_result; @@ -845,7 +844,6 @@ static int s_state_read_until_blocked(struct thread_tester *tester) { tester->read_handle.update_io_result(tester->event_loop, &tester->read_handle, &io_op_result); } } -# endif return AWS_OP_SUCCESS; } From 4b59301f6b9f6b44bc9208172b2fd526bc960572 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Wed, 25 Sep 2024 11:28:28 -0700 Subject: [PATCH 30/39] Fix naming and comments --- CMakeLists.txt | 2 +- source/qnx/ionotify_event_loop.c | 206 +++++++++++++++++-------------- tests/pipe_test.c | 3 + 3 files changed, 115 insertions(+), 96 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d83201e51..a53efc64e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,7 +146,7 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") "source/qnx/*.c" "source/posix/*.c" ) - set(EVENT_LOOP_DEFINE "ON_EVENT_WITH_RESULT") + set(EVENT_LOOP_DEFINE "IONOTIFY") set(USE_S2N ON) list(APPEND PLATFORM_LIBS "socket") endif() diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 677d9f809..aedd27ebc 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -53,7 +53,9 @@ static struct aws_event_loop_vtable s_vtable = { .is_on_callers_thread = s_is_on_callers_thread, }; -struct ionotify_loop { +struct ionotify_event_loop { + struct aws_allocator *allocator; + struct aws_event_loop base; struct aws_task_scheduler scheduler; struct aws_thread thread_created_on; struct aws_thread_options thread_options; @@ -126,29 +128,29 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( goto clean_up_loop; } - struct ionotify_loop *ionotify_loop = aws_mem_calloc(alloc, 1, sizeof(struct ionotify_loop)); + struct ionotify_event_loop *ionotify_event_loop = aws_mem_calloc(alloc, 1, sizeof(struct ionotify_event_loop)); if (options->thread_options) { - ionotify_loop->thread_options = *options->thread_options; + ionotify_event_loop->thread_options = *options->thread_options; } else { - ionotify_loop->thread_options = *aws_default_thread_options(); + ionotify_event_loop->thread_options = *aws_default_thread_options(); } /* initialize thread id to NULL, it should be updated when the event loop thread starts. */ - aws_atomic_init_ptr(&ionotify_loop->running_thread_id, NULL); + aws_atomic_init_ptr(&ionotify_event_loop->running_thread_id, NULL); - aws_linked_list_init(&ionotify_loop->task_pre_queue); - ionotify_loop->task_pre_queue_mutex = (struct aws_mutex)AWS_MUTEX_INIT; - aws_atomic_init_ptr(&ionotify_loop->stop_task_ptr, NULL); + aws_linked_list_init(&ionotify_event_loop->task_pre_queue); + ionotify_event_loop->task_pre_queue_mutex = (struct aws_mutex)AWS_MUTEX_INIT; + aws_atomic_init_ptr(&ionotify_event_loop->stop_task_ptr, NULL); - if (aws_thread_init(&ionotify_loop->thread_created_on, alloc)) { + if (aws_thread_init(&ionotify_event_loop->thread_created_on, alloc)) { goto clean_up_ionotify; } /* Setup channel to receive events from resource managers. */ - ionotify_loop->io_events_channel_id = ChannelCreate(0); + ionotify_event_loop->io_events_channel_id = ChannelCreate(0); int errno_value = errno; /* Always cache errno before potential side-effect */ - if (ionotify_loop->io_events_channel_id == -1) { + if (ionotify_event_loop->io_events_channel_id == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: ChannelCreate failed with errno %d (%s)\n", @@ -161,12 +163,13 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( AWS_LS_IO_EVENT_LOOP, "id=%p: Opened QNX channel with ID %d", (void *)event_loop, - ionotify_loop->io_events_channel_id); + ionotify_event_loop->io_events_channel_id); /* Open connection over the QNX channel for pulses. */ - ionotify_loop->pulse_connection_id = ConnectAttach(0, 0, ionotify_loop->io_events_channel_id, _NTO_SIDE_CHANNEL, 0); + ionotify_event_loop->pulse_connection_id = + ConnectAttach(0, 0, ionotify_event_loop->io_events_channel_id, _NTO_SIDE_CHANNEL, 0); errno_value = errno; /* Always cache errno before potential side-effect */ - if (ionotify_loop->pulse_connection_id == -1) { + if (ionotify_event_loop->pulse_connection_id == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: ConnectAttach failed with errno %d (%s)\n", @@ -176,27 +179,27 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( goto clean_up_thread; } - if (aws_task_scheduler_init(&ionotify_loop->scheduler, alloc)) { + if (aws_task_scheduler_init(&ionotify_event_loop->scheduler, alloc)) { AWS_LOGF_ERROR(AWS_LS_IO_EVENT_LOOP, "id=%p: aws_task_scheduler_init failed\n", (void *)event_loop); goto clean_up_thread; } - ionotify_loop->should_continue = false; + ionotify_event_loop->should_continue = false; - event_loop->impl_data = ionotify_loop; + event_loop->impl_data = ionotify_event_loop; event_loop->vtable = &s_vtable; - if (aws_hash_table_init(&ionotify_loop->handles, alloc, 32, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { + if (aws_hash_table_init(&ionotify_event_loop->handles, alloc, 32, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { goto clean_up_thread; } return event_loop; clean_up_thread: - aws_thread_clean_up(&ionotify_loop->thread_created_on); + aws_thread_clean_up(&ionotify_event_loop->thread_created_on); clean_up_ionotify: - aws_mem_release(alloc, ionotify_loop); + aws_mem_release(alloc, ionotify_event_loop); clean_up_loop: aws_mem_release(alloc, event_loop); @@ -207,7 +210,7 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( static void s_destroy(struct aws_event_loop *event_loop) { AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* we don't know if stop() has been called by someone else, * just call stop() again and wait for event-loop to finish. */ @@ -215,38 +218,41 @@ static void s_destroy(struct aws_event_loop *event_loop) { s_wait_for_stop_completion(event_loop); /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ - ionotify_loop->thread_joined_to = aws_thread_current_thread_id(); - aws_atomic_store_ptr(&ionotify_loop->running_thread_id, &ionotify_loop->thread_joined_to); - aws_task_scheduler_clean_up(&ionotify_loop->scheduler); + ionotify_event_loop->thread_joined_to = aws_thread_current_thread_id(); + aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_joined_to); + aws_task_scheduler_clean_up(&ionotify_event_loop->scheduler); - while (!aws_linked_list_empty(&ionotify_loop->task_pre_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_loop->task_pre_queue); + while (!aws_linked_list_empty(&ionotify_event_loop->task_pre_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_event_loop->task_pre_queue); struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); } - aws_thread_clean_up(&ionotify_loop->thread_created_on); + aws_thread_clean_up(&ionotify_event_loop->thread_created_on); - aws_hash_table_clean_up(&ionotify_loop->handles); + aws_hash_table_clean_up(&ionotify_event_loop->handles); - aws_mem_release(event_loop->alloc, ionotify_loop); + aws_mem_release(event_loop->alloc, ionotify_event_loop); aws_event_loop_clean_up_base(event_loop); aws_mem_release(event_loop->alloc, event_loop); } static int s_run(struct aws_event_loop *event_loop) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); - ionotify_loop->should_continue = true; + ionotify_event_loop->should_continue = true; aws_thread_increment_unjoined_count(); if (aws_thread_launch( - &ionotify_loop->thread_created_on, &aws_event_loop_thread, event_loop, &ionotify_loop->thread_options)) { + &ionotify_event_loop->thread_created_on, + &aws_event_loop_thread, + event_loop, + &ionotify_event_loop->thread_options)) { aws_thread_decrement_unjoined_count(); AWS_LOGF_FATAL(AWS_LS_IO_EVENT_LOOP, "id=%p: Thread creation failed.", (void *)event_loop); - ionotify_loop->should_continue = false; + ionotify_event_loop->should_continue = false; return AWS_OP_ERR; } @@ -256,42 +262,42 @@ static int s_run(struct aws_event_loop *event_loop) { static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; struct aws_event_loop *event_loop = args; - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* now okay to reschedule stop tasks. */ - aws_atomic_store_ptr(&ionotify_loop->stop_task_ptr, NULL); + aws_atomic_store_ptr(&ionotify_event_loop->stop_task_ptr, NULL); if (status == AWS_TASK_STATUS_RUN_READY) { /* this allows the event loop to invoke the callback once the event loop has completed. */ - ionotify_loop->should_continue = false; + ionotify_event_loop->should_continue = false; } } static int s_stop(struct aws_event_loop *event_loop) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; void *expected_ptr = NULL; - bool update_succeeded = - aws_atomic_compare_exchange_ptr(&ionotify_loop->stop_task_ptr, &expected_ptr, &ionotify_loop->stop_task); + bool update_succeeded = aws_atomic_compare_exchange_ptr( + &ionotify_event_loop->stop_task_ptr, &expected_ptr, &ionotify_event_loop->stop_task); if (!update_succeeded) { /* the stop task is already scheduled. */ return AWS_OP_SUCCESS; } AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Stopping event-loop thread", (void *)event_loop); - aws_task_init(&ionotify_loop->stop_task, s_stop_task, event_loop, "ionotify_event_loop_stop"); - s_schedule_task_now(event_loop, &ionotify_loop->stop_task); + aws_task_init(&ionotify_event_loop->stop_task, s_stop_task, event_loop, "ionotify_event_loop_stop"); + s_schedule_task_now(event_loop, &ionotify_event_loop->stop_task); return AWS_OP_SUCCESS; } static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; - int result = aws_thread_join(&ionotify_loop->thread_created_on); + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + int result = aws_thread_join(&ionotify_event_loop->thread_created_on); aws_thread_decrement_unjoined_count(); return result; } static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* if event loop and the caller are the same thread, just schedule and be done with it. */ if (s_is_on_callers_thread(event_loop)) { @@ -303,9 +309,9 @@ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws (unsigned long long)run_at_nanos); if (run_at_nanos == 0) { /* zero denotes "now" task */ - aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, task); + aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, task); } else { - aws_task_scheduler_schedule_future(&ionotify_loop->scheduler, task, run_at_nanos); + aws_task_scheduler_schedule_future(&ionotify_event_loop->scheduler, task, run_at_nanos); } return; } @@ -318,10 +324,10 @@ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws (unsigned long long)run_at_nanos); task->timestamp = run_at_nanos; - aws_mutex_lock(&ionotify_loop->task_pre_queue_mutex); - bool is_first_task = aws_linked_list_empty(&ionotify_loop->task_pre_queue); - aws_linked_list_push_back(&ionotify_loop->task_pre_queue, &task->node); - aws_mutex_unlock(&ionotify_loop->task_pre_queue_mutex); + aws_mutex_lock(&ionotify_event_loop->task_pre_queue_mutex); + bool is_first_task = aws_linked_list_empty(&ionotify_event_loop->task_pre_queue); + aws_linked_list_push_back(&ionotify_event_loop->task_pre_queue, &task->node); + aws_mutex_unlock(&ionotify_event_loop->task_pre_queue_mutex); /* If the list was not empty, we already sent a cross-thread pulse. No need to send it again. */ if (is_first_task) { @@ -329,10 +335,11 @@ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws AWS_LS_IO_EVENT_LOOP, "id=%p: Waking up event-loop thread by sending pulse to connection ID %d", (void *)event_loop, - ionotify_loop->pulse_connection_id); + ionotify_event_loop->pulse_connection_id); /* The pulse itself is enough for cross-thread notifications. */ int user_data_value = 0; - int rc = MsgSendPulse(ionotify_loop->pulse_connection_id, -1, CROSS_THREAD_PULSE_SIGEV_CODE, user_data_value); + int rc = + MsgSendPulse(ionotify_event_loop->pulse_connection_id, -1, CROSS_THREAD_PULSE_SIGEV_CODE, user_data_value); int errno_value = errno; if (rc == -1) { /* The task was scheduled, but we couldn't notify the main loop about it. According to QNX docs, inability @@ -358,18 +365,20 @@ static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Cancelling task %p", (void *)event_loop, (void *)task); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; - aws_task_scheduler_cancel_task(&ionotify_loop->scheduler, task); + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, task); } /* Map ionotify_event_data to internal ID. */ -static int s_add_handle(struct ionotify_loop *ionotify_loop, struct ionotify_event_data *ionotify_event_data) { +static int s_add_handle( + struct ionotify_event_loop *ionotify_event_loop, + struct ionotify_event_data *ionotify_event_data) { AWS_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); /* Special constant, _NOTIFY_DATA_MASK, limits the maximum value that can be used as user data in I/O events. */ int max_handle_id = _NOTIFY_DATA_MASK; - if (AWS_UNLIKELY(aws_hash_table_get_entry_count(&ionotify_loop->handles) == (size_t)max_handle_id)) { + if (AWS_UNLIKELY(aws_hash_table_get_entry_count(&ionotify_event_loop->handles) == (size_t)max_handle_id)) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: Maximum number of registered handles reached", @@ -378,14 +387,14 @@ static int s_add_handle(struct ionotify_loop *ionotify_loop, struct ionotify_eve } struct aws_hash_element *elem = NULL; - int next_handle_id = ionotify_loop->last_handle_id; + int next_handle_id = ionotify_event_loop->last_handle_id; int was_created = 0; do { ++next_handle_id; if (next_handle_id > max_handle_id) { next_handle_id = 1; } - aws_hash_table_create(&ionotify_loop->handles, (void *)next_handle_id, &elem, &was_created); + aws_hash_table_create(&ionotify_event_loop->handles, (void *)next_handle_id, &elem, &was_created); /* next_handle_id is already present in the hash table, skip it. */ if (was_created == 0) { elem = NULL; @@ -393,7 +402,7 @@ static int s_add_handle(struct ionotify_loop *ionotify_loop, struct ionotify_eve } while (elem == NULL); ionotify_event_data->handle_id = next_handle_id; - ionotify_loop->last_handle_id = next_handle_id; + ionotify_event_loop->last_handle_id = next_handle_id; elem->value = ionotify_event_data; return AWS_OP_SUCCESS; @@ -401,23 +410,26 @@ static int s_add_handle(struct ionotify_loop *ionotify_loop, struct ionotify_eve struct ionotify_event_data *s_find_handle( struct aws_event_loop *event_loop, - struct ionotify_loop *ionotify_loop, + struct ionotify_event_loop *ionotify_event_loop, int handle_id) { AWS_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; struct ionotify_event_data *ionotify_event_data = NULL; struct aws_hash_element *elem = NULL; - aws_hash_table_find(&ionotify_loop->handles, (void *)handle_id, &elem); + aws_hash_table_find(&ionotify_event_loop->handles, (void *)handle_id, &elem); if (elem != NULL) { ionotify_event_data = elem->value; } return ionotify_event_data; } -static void s_remove_handle(struct aws_event_loop *event_loop, struct ionotify_loop *ionotify_loop, int handle_id) { +static void s_remove_handle( + struct aws_event_loop *event_loop, + struct ionotify_event_loop *ionotify_event_loop, + int handle_id) { AWS_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; - aws_hash_table_remove(&ionotify_loop->handles, (void *)handle_id, NULL, NULL); + aws_hash_table_remove(&ionotify_event_loop->handles, (void *)handle_id, NULL, NULL); } /* Scheduled task that performs the actual subscription using ionotify. */ @@ -431,7 +443,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta struct ionotify_event_data *ionotify_event_data = user_data; struct aws_event_loop *event_loop = ionotify_event_data->event_loop; - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, @@ -442,7 +454,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta /* Map ionotify_event_data to ID. This ID will be returned with the I/O events from ionotify. */ if (ionotify_event_data->handle_id == 0) { - s_add_handle(ionotify_loop, ionotify_event_data); + s_add_handle(ionotify_event_loop, ionotify_event_data); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Mapped fd %d to handle ID %u", @@ -536,8 +548,8 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta * unified manner. */ int kick_start_event_mask = rc & _NOTIFY_COND_MASK; kick_start_event_mask |= ionotify_event_data->handle_id; - int send_rc = - MsgSendPulse(ionotify_loop->pulse_connection_id, -1, IO_EVENT_KICKSTART_SIGEV_CODE, kick_start_event_mask); + int send_rc = MsgSendPulse( + ionotify_event_loop->pulse_connection_id, -1, IO_EVENT_KICKSTART_SIGEV_CODE, kick_start_event_mask); if (send_rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, @@ -624,9 +636,9 @@ static void s_process_io_result( AWS_LS_IO_EVENT_LOOP, "id=%p: Got EWOULDBLOCK for fd %d, rearming it", (void *)event_loop, handle->data.fd); /* We're on the event loop thread, just schedule subscribing task. */ ionotify_event_data->events_subscribed = event_types; - struct ionotify_loop *ionotify_loop = event_loop->impl_data; - aws_task_scheduler_cancel_task(&ionotify_loop->scheduler, &ionotify_event_data->resubscribe_task); - aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, &ionotify_event_data->resubscribe_task); + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, &ionotify_event_data->resubscribe_task); + aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, &ionotify_event_data->resubscribe_task); } /* Notify event loop of error conditions. */ @@ -636,9 +648,12 @@ static void s_process_io_result( "id=%p: fd errored, sending pulse for fd %d", (void *)event_loop, ionotify_event_data->handle->data.fd); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; int send_rc = MsgSendPulse( - ionotify_loop->pulse_connection_id, -1, IO_EVENT_UPDATE_ERROR_SIGEV_CODE, ionotify_event_data->handle_id); + ionotify_event_loop->pulse_connection_id, + -1, + IO_EVENT_UPDATE_ERROR_SIGEV_CODE, + ionotify_event_data->handle_id); int errno_value = errno; if (send_rc == -1) { AWS_LOGF_ERROR( @@ -682,7 +697,8 @@ static void s_update_io_result( const struct aws_io_handle_io_op_result *io_op_result) { if (!s_is_on_callers_thread(event_loop)) { - /* Move processing I/O operation results to the epoll thread if the operation is performed in another thread.*/ + /* Move processing I/O operation results to the event loop thread if the operation is performed in another + * thread.*/ AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got I/O operation result from another thread", (void *)event_loop); struct aws_task *task = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_task)); struct ionotify_io_op_results *ionotify_io_op_results = @@ -705,7 +721,7 @@ static int s_subscribe_to_io_events( aws_event_loop_on_event_fn *on_event, void *user_data) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Subscribing to events on fd %d", (void *)event_loop, handle->data.fd); struct ionotify_event_data *ionotify_event_data = @@ -717,7 +733,7 @@ static int s_subscribe_to_io_events( ionotify_event_data->event_loop = event_loop; ionotify_event_data->on_event = on_event; ionotify_event_data->events_subscribed = events; - ionotify_event_data->pulse_connection_id = ionotify_loop->pulse_connection_id; + ionotify_event_data->pulse_connection_id = ionotify_event_loop->pulse_connection_id; ionotify_event_data->user_data = user_data; ionotify_event_data->handle->update_io_result = s_update_io_result; @@ -751,7 +767,7 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Unsubscribing from events on fd %d", (void *)event_loop, handle->data.fd); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_ASSERT(handle->additional_data); struct ionotify_event_data *ionotify_event_data = handle->additional_data; @@ -782,7 +798,7 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc "id=%p: Removing from handles map using ID %u", (void *)event_loop, ionotify_event_data->handle_id); - s_remove_handle(event_loop, ionotify_loop, ionotify_event_data->handle_id); + s_remove_handle(event_loop, ionotify_event_loop, ionotify_event_data->handle_id); handle->additional_data = NULL; handle->update_io_result = NULL; @@ -799,23 +815,23 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc } static bool s_is_on_callers_thread(struct aws_event_loop *event_loop) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - aws_thread_id_t *thread_id = aws_atomic_load_ptr(&ionotify_loop->running_thread_id); + aws_thread_id_t *thread_id = aws_atomic_load_ptr(&ionotify_event_loop->running_thread_id); return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); } static void s_process_task_pre_queue(struct aws_event_loop *event_loop) { - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Processing cross-thread tasks", (void *)event_loop); struct aws_linked_list task_pre_queue; aws_linked_list_init(&task_pre_queue); - aws_mutex_lock(&ionotify_loop->task_pre_queue_mutex); - aws_linked_list_swap_contents(&ionotify_loop->task_pre_queue, &task_pre_queue); - aws_mutex_unlock(&ionotify_loop->task_pre_queue_mutex); + aws_mutex_lock(&ionotify_event_loop->task_pre_queue_mutex); + aws_linked_list_swap_contents(&ionotify_event_loop->task_pre_queue, &task_pre_queue); + aws_mutex_unlock(&ionotify_event_loop->task_pre_queue_mutex); while (!aws_linked_list_empty(&task_pre_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(&task_pre_queue); @@ -827,9 +843,9 @@ static void s_process_task_pre_queue(struct aws_event_loop *event_loop) { (void *)task); /* Timestamp 0 is used to denote "now" tasks */ if (task->timestamp == 0) { - aws_task_scheduler_schedule_now(&ionotify_loop->scheduler, task); + aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, task); } else { - aws_task_scheduler_schedule_future(&ionotify_loop->scheduler, task, task->timestamp); + aws_task_scheduler_schedule_future(&ionotify_event_loop->scheduler, task, task->timestamp); } } } @@ -888,8 +904,8 @@ static void s_process_pulse( AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %u", (void *)event_loop, handle_id); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; - struct ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_loop, handle_id); + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); if (ionotify_event_data == NULL) { /* This situation is totally OK when the corresponding fd is already unsubscribed. */ AWS_LOGF_DEBUG( @@ -945,10 +961,10 @@ static void s_process_pulse( static void aws_event_loop_thread(void *args) { struct aws_event_loop *event_loop = args; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); - struct ionotify_loop *ionotify_loop = event_loop->impl_data; + struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* set thread id to the thread of the event loop */ - aws_atomic_store_ptr(&ionotify_loop->running_thread_id, &ionotify_loop->thread_created_on.thread_id); + aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_created_on.thread_id); aws_thread_current_at_exit(s_aws_ionotify_cleanup_aws_lc_thread_local_state, NULL); @@ -965,15 +981,15 @@ static void aws_event_loop_thread(void *args) { * - Run all scheduled tasks. * - Process queued subscription cleanups. */ - while (ionotify_loop->should_continue) { + while (ionotify_event_loop->should_continue) { bool should_process_cross_thread_tasks = false; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Waiting for a maximum of %" PRIu64 " ns", (void *)event_loop, timeout); struct _pulse pulse; int errno_value; - rcvid_t rcvid = - aws_event_loop_listen_for_io_events(ionotify_loop->io_events_channel_id, &timeout, &pulse, &errno_value); + rcvid_t rcvid = aws_event_loop_listen_for_io_events( + ionotify_event_loop->io_events_channel_id, &timeout, &pulse, &errno_value); aws_event_loop_register_tick_start(event_loop); AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Wake up with rcvid %ld\n", (void *)event_loop, rcvid); @@ -1003,7 +1019,7 @@ static void aws_event_loop_thread(void *args) { uint64_t now_ns = 0; event_loop->clock(&now_ns); AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Running scheduled tasks", (void *)event_loop); - aws_task_scheduler_run_all(&ionotify_loop->scheduler, now_ns); + aws_task_scheduler_run_all(&ionotify_event_loop->scheduler, now_ns); /* Set timeout for next MsgReceive call. * If clock fails, or scheduler has no tasks, use default timeout. */ @@ -1014,7 +1030,7 @@ static void aws_event_loop_thread(void *args) { } uint64_t next_run_time_ns; - if (!aws_task_scheduler_has_tasks(&ionotify_loop->scheduler, &next_run_time_ns)) { + if (!aws_task_scheduler_has_tasks(&ionotify_event_loop->scheduler, &next_run_time_ns)) { use_default_timeout = true; } @@ -1038,5 +1054,5 @@ static void aws_event_loop_thread(void *args) { AWS_LOGF_DEBUG(AWS_LS_IO_EVENT_LOOP, "id=%p: Exiting main loop", (void *)event_loop); /* set thread id back to NULL. This should be updated again in destroy, before tasks are canceled. */ - aws_atomic_store_ptr(&ionotify_loop->running_thread_id, NULL); + aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, NULL); } diff --git a/tests/pipe_test.c b/tests/pipe_test.c index 9607f1f3c..e057fd87a 100644 --- a/tests/pipe_test.c +++ b/tests/pipe_test.c @@ -430,6 +430,9 @@ static void s_on_readable_event(struct aws_pipe_read_end *read_end, int error_co s_signal_done_on_read_end_closed(state); } } else if (error_code == AWS_ERROR_SUCCESS) { + /* Some event loop implementations (only QNX, to be fair) can't detect a pipe closed one of its ends without + * performing operation on the other end. So, this read operation should notify event loop that the writing end + * is closed. */ aws_pipe_read(&state->read_end, &state->buffers.dst, NULL); } From 1700c7641168a1318a0052223f3b3c9385097dd7 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Wed, 25 Sep 2024 15:49:27 -0700 Subject: [PATCH 31/39] Use single destroy --- source/qnx/ionotify_event_loop.c | 215 ++++++++++++++++++------------- 1 file changed, 123 insertions(+), 92 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index aedd27ebc..beea2a761 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -53,14 +53,19 @@ static struct aws_event_loop_vtable s_vtable = { .is_on_callers_thread = s_is_on_callers_thread, }; -struct ionotify_event_loop { +enum aws_ionotify_event_loop_state { AIELS_BASE_UNINITIALIZED, AIELS_LOOP_STOPPED, AIELS_LOOP_STARTED }; + +struct aws_ionotify_event_loop { struct aws_allocator *allocator; struct aws_event_loop base; + struct aws_task_scheduler scheduler; struct aws_thread thread_created_on; struct aws_thread_options thread_options; aws_thread_id_t thread_joined_to; struct aws_atomic_var running_thread_id; + enum aws_ionotify_event_loop_state event_loop_state; + /* Channel to receive I/O events. Resource managers open connections to this channel to send their events. */ int io_events_channel_id; /* Connection to the events channel opened by the event loop. It's used by ionotify and some event loop logic (e.g. @@ -84,7 +89,7 @@ struct ionotify_event_loop { }; /* Data associated with a subscribed I/O handle. */ -struct ionotify_event_data { +struct aws_ionotify_event_data { struct aws_allocator *alloc; struct aws_io_handle *handle; struct aws_event_loop *event_loop; @@ -114,6 +119,61 @@ static short CROSS_THREAD_PULSE_SIGEV_CODE = _PULSE_CODE_MINAVAIL; static short IO_EVENT_KICKSTART_SIGEV_CODE = _PULSE_CODE_MINAVAIL + 1; static short IO_EVENT_UPDATE_ERROR_SIGEV_CODE = _PULSE_CODE_MINAVAIL + 2; +static void s_destroy(struct aws_event_loop *event_loop) { + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); + + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + + /* FIXME Data race */ + if (ionotify_event_loop->event_loop_state == AIELS_LOOP_STARTED) { + /* we don't know if stop() has been called by someone else, + * just call stop() again and wait for event-loop to finish. */ + aws_event_loop_stop(event_loop); + s_wait_for_stop_completion(event_loop); + } + + aws_hash_table_clean_up(&ionotify_event_loop->handles); + + if (aws_task_scheduler_is_valid(&ionotify_event_loop->scheduler)) { + /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ + ionotify_event_loop->thread_joined_to = aws_thread_current_thread_id(); + aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_joined_to); + aws_task_scheduler_clean_up(&ionotify_event_loop->scheduler); + + while (!aws_linked_list_empty(&ionotify_event_loop->task_pre_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_event_loop->task_pre_queue); + struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); + task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); + } + } + + if (ionotify_event_loop->pulse_connection_id != 0 && ionotify_event_loop->pulse_connection_id != -1) { + int rc = ConnectDetach(ionotify_event_loop->pulse_connection_id); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_WARN( + AWS_LS_IO_EVENT_LOOP, "id=%p: ConnectDetach failed with errno %d", (void *)event_loop, errno_value); + } + } + + if (ionotify_event_loop->io_events_channel_id != 0 && ionotify_event_loop->io_events_channel_id != -1) { + int rc = ChannelDestroy(ionotify_event_loop->io_events_channel_id); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_WARN( + AWS_LS_IO_EVENT_LOOP, "id=%p: ChannelDestroy failed with errno %d", (void *)event_loop, errno_value); + } + } + + aws_thread_clean_up(&ionotify_event_loop->thread_created_on); + + if (ionotify_event_loop->event_loop_state != AIELS_BASE_UNINITIALIZED) { + aws_event_loop_clean_up_base(event_loop); + } + + aws_mem_release(ionotify_event_loop->allocator, ionotify_event_loop); +} + /* Setup edge triggered ionotify with a scheduler. */ struct aws_event_loop *aws_event_loop_new_default_with_options( struct aws_allocator *alloc, @@ -121,14 +181,18 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( AWS_PRECONDITION(options); AWS_PRECONDITION(options->clock); - struct aws_event_loop *event_loop = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop)); + struct aws_ionotify_event_loop *ionotify_event_loop = + aws_mem_calloc(alloc, 1, sizeof(struct aws_ionotify_event_loop)); + struct aws_event_loop *base_event_loop = &ionotify_event_loop->base; - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered ionotify", (void *)event_loop); - if (aws_event_loop_init_base(event_loop, alloc, options->clock)) { - goto clean_up_loop; + ionotify_event_loop->event_loop_state = AIELS_BASE_UNINITIALIZED; + + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered ionotify", (void *)base_event_loop); + if (aws_event_loop_init_base(&ionotify_event_loop->base, alloc, options->clock)) { + goto error; } - struct ionotify_event_loop *ionotify_event_loop = aws_mem_calloc(alloc, 1, sizeof(struct ionotify_event_loop)); + ionotify_event_loop->event_loop_state = AIELS_LOOP_STOPPED; if (options->thread_options) { ionotify_event_loop->thread_options = *options->thread_options; @@ -144,101 +208,65 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( aws_atomic_init_ptr(&ionotify_event_loop->stop_task_ptr, NULL); if (aws_thread_init(&ionotify_event_loop->thread_created_on, alloc)) { - goto clean_up_ionotify; + goto error; } - /* Setup channel to receive events from resource managers. */ + /* Setup QNX channel to receive events from resource managers. */ ionotify_event_loop->io_events_channel_id = ChannelCreate(0); int errno_value = errno; /* Always cache errno before potential side-effect */ if (ionotify_event_loop->io_events_channel_id == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: ChannelCreate failed with errno %d (%s)\n", - (void *)event_loop, + "id=%p: ChannelCreate failed with errno %d\n", + (void *)base_event_loop, errno_value, strerror(errno_value)); - goto clean_up_thread; + goto error; } AWS_LOGF_DEBUG( AWS_LS_IO_EVENT_LOOP, "id=%p: Opened QNX channel with ID %d", - (void *)event_loop, + (void *)base_event_loop, ionotify_event_loop->io_events_channel_id); - /* Open connection over the QNX channel for pulses. */ + /* Open connection over the QNX channel for sending pulses. */ + int owner_pid = 0; /* PID of the owner of the channel, 0 means the calling process. */ ionotify_event_loop->pulse_connection_id = - ConnectAttach(0, 0, ionotify_event_loop->io_events_channel_id, _NTO_SIDE_CHANNEL, 0); + ConnectAttach(0 /* reserved */, owner_pid, ionotify_event_loop->io_events_channel_id, _NTO_SIDE_CHANNEL, 0); errno_value = errno; /* Always cache errno before potential side-effect */ if (ionotify_event_loop->pulse_connection_id == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: ConnectAttach failed with errno %d (%s)\n", - (void *)event_loop, + (void *)ionotify_event_loop, errno_value, strerror(errno_value)); - goto clean_up_thread; + goto error; } if (aws_task_scheduler_init(&ionotify_event_loop->scheduler, alloc)) { - AWS_LOGF_ERROR(AWS_LS_IO_EVENT_LOOP, "id=%p: aws_task_scheduler_init failed\n", (void *)event_loop); - goto clean_up_thread; + AWS_LOGF_ERROR(AWS_LS_IO_EVENT_LOOP, "id=%p: aws_task_scheduler_init failed\n", (void *)base_event_loop); + goto error; } - ionotify_event_loop->should_continue = false; - - event_loop->impl_data = ionotify_event_loop; - event_loop->vtable = &s_vtable; - if (aws_hash_table_init(&ionotify_event_loop->handles, alloc, 32, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { - goto clean_up_thread; + goto error; } - return event_loop; - -clean_up_thread: - aws_thread_clean_up(&ionotify_event_loop->thread_created_on); + ionotify_event_loop->should_continue = false; -clean_up_ionotify: - aws_mem_release(alloc, ionotify_event_loop); + ionotify_event_loop->base.impl_data = ionotify_event_loop; + ionotify_event_loop->base.vtable = &s_vtable; -clean_up_loop: - aws_mem_release(alloc, event_loop); + return &ionotify_event_loop->base; +error: + s_destroy(&ionotify_event_loop->base); return NULL; } -static void s_destroy(struct aws_event_loop *event_loop) { - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); - - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - - /* we don't know if stop() has been called by someone else, - * just call stop() again and wait for event-loop to finish. */ - aws_event_loop_stop(event_loop); - s_wait_for_stop_completion(event_loop); - - /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ - ionotify_event_loop->thread_joined_to = aws_thread_current_thread_id(); - aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_joined_to); - aws_task_scheduler_clean_up(&ionotify_event_loop->scheduler); - - while (!aws_linked_list_empty(&ionotify_event_loop->task_pre_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_event_loop->task_pre_queue); - struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); - task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); - } - - aws_thread_clean_up(&ionotify_event_loop->thread_created_on); - - aws_hash_table_clean_up(&ionotify_event_loop->handles); - - aws_mem_release(event_loop->alloc, ionotify_event_loop); - aws_event_loop_clean_up_base(event_loop); - aws_mem_release(event_loop->alloc, event_loop); -} - static int s_run(struct aws_event_loop *event_loop) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); @@ -256,13 +284,15 @@ static int s_run(struct aws_event_loop *event_loop) { return AWS_OP_ERR; } + ionotify_event_loop->event_loop_state = AIELS_LOOP_STARTED; + return AWS_OP_SUCCESS; } static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status) { (void)task; struct aws_event_loop *event_loop = args; - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* now okay to reschedule stop tasks. */ aws_atomic_store_ptr(&ionotify_event_loop->stop_task_ptr, NULL); @@ -273,7 +303,7 @@ static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status } static int s_stop(struct aws_event_loop *event_loop) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; void *expected_ptr = NULL; bool update_succeeded = aws_atomic_compare_exchange_ptr( @@ -290,14 +320,15 @@ static int s_stop(struct aws_event_loop *event_loop) { } static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; int result = aws_thread_join(&ionotify_event_loop->thread_created_on); aws_thread_decrement_unjoined_count(); + ionotify_event_loop->event_loop_state = AIELS_LOOP_STOPPED; return result; } static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* if event loop and the caller are the same thread, just schedule and be done with it. */ if (s_is_on_callers_thread(event_loop)) { @@ -365,14 +396,14 @@ static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Cancelling task %p", (void *)event_loop, (void *)task); - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, task); } /* Map ionotify_event_data to internal ID. */ static int s_add_handle( - struct ionotify_event_loop *ionotify_event_loop, - struct ionotify_event_data *ionotify_event_data) { + struct aws_ionotify_event_loop *ionotify_event_loop, + struct aws_ionotify_event_data *ionotify_event_data) { AWS_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); /* Special constant, _NOTIFY_DATA_MASK, limits the maximum value that can be used as user data in I/O events. */ @@ -408,13 +439,13 @@ static int s_add_handle( return AWS_OP_SUCCESS; } -struct ionotify_event_data *s_find_handle( +struct aws_ionotify_event_data *s_find_handle( struct aws_event_loop *event_loop, - struct ionotify_event_loop *ionotify_event_loop, + struct aws_ionotify_event_loop *ionotify_event_loop, int handle_id) { AWS_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; - struct ionotify_event_data *ionotify_event_data = NULL; + struct aws_ionotify_event_data *ionotify_event_data = NULL; struct aws_hash_element *elem = NULL; aws_hash_table_find(&ionotify_event_loop->handles, (void *)handle_id, &elem); if (elem != NULL) { @@ -425,7 +456,7 @@ struct ionotify_event_data *s_find_handle( static void s_remove_handle( struct aws_event_loop *event_loop, - struct ionotify_event_loop *ionotify_event_loop, + struct aws_ionotify_event_loop *ionotify_event_loop, int handle_id) { AWS_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; @@ -441,9 +472,9 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta return; } - struct ionotify_event_data *ionotify_event_data = user_data; + struct aws_ionotify_event_data *ionotify_event_data = user_data; struct aws_event_loop *event_loop = ionotify_event_data->event_loop; - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, @@ -601,7 +632,7 @@ static void s_process_io_result( AWS_ASSERT(s_is_on_callers_thread(event_loop)); AWS_ASSERT(handle->additional_data); - struct ionotify_event_data *ionotify_event_data = handle->additional_data; + struct aws_ionotify_event_data *ionotify_event_data = handle->additional_data; if (!ionotify_event_data->is_subscribed) { return; @@ -636,7 +667,7 @@ static void s_process_io_result( AWS_LS_IO_EVENT_LOOP, "id=%p: Got EWOULDBLOCK for fd %d, rearming it", (void *)event_loop, handle->data.fd); /* We're on the event loop thread, just schedule subscribing task. */ ionotify_event_data->events_subscribed = event_types; - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, &ionotify_event_data->resubscribe_task); aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, &ionotify_event_data->resubscribe_task); } @@ -648,7 +679,7 @@ static void s_process_io_result( "id=%p: fd errored, sending pulse for fd %d", (void *)event_loop, ionotify_event_data->handle->data.fd); - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; int send_rc = MsgSendPulse( ionotify_event_loop->pulse_connection_id, -1, @@ -721,11 +752,11 @@ static int s_subscribe_to_io_events( aws_event_loop_on_event_fn *on_event, void *user_data) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Subscribing to events on fd %d", (void *)event_loop, handle->data.fd); - struct ionotify_event_data *ionotify_event_data = - aws_mem_calloc(event_loop->alloc, 1, sizeof(struct ionotify_event_data)); + struct aws_ionotify_event_data *ionotify_event_data = + aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_ionotify_event_data)); handle->additional_data = ionotify_event_data; ionotify_event_data->alloc = event_loop->alloc; @@ -751,7 +782,7 @@ static int s_subscribe_to_io_events( } static void s_free_io_event_resources(void *user_data) { - struct ionotify_event_data *event_data = user_data; + struct aws_ionotify_event_data *event_data = user_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "Releasing ionotify_event_data at %p", user_data); aws_mem_release(event_data->alloc, (void *)event_data); } @@ -759,7 +790,7 @@ static void s_free_io_event_resources(void *user_data) { static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; - struct ionotify_event_data *ionotify_event_data = (struct ionotify_event_data *)arg; + struct aws_ionotify_event_data *ionotify_event_data = (struct aws_ionotify_event_data *)arg; s_free_io_event_resources(ionotify_event_data); } @@ -767,10 +798,10 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p: Unsubscribing from events on fd %d", (void *)event_loop, handle->data.fd); - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_ASSERT(handle->additional_data); - struct ionotify_event_data *ionotify_event_data = handle->additional_data; + struct aws_ionotify_event_data *ionotify_event_data = handle->additional_data; /* Disarm resource manager for a given fd. */ int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; @@ -815,14 +846,14 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc } static bool s_is_on_callers_thread(struct aws_event_loop *event_loop) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; aws_thread_id_t *thread_id = aws_atomic_load_ptr(&ionotify_event_loop->running_thread_id); return thread_id && aws_thread_thread_id_equal(*thread_id, aws_thread_current_thread_id()); } static void s_process_task_pre_queue(struct aws_event_loop *event_loop) { - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Processing cross-thread tasks", (void *)event_loop); @@ -904,8 +935,8 @@ static void s_process_pulse( AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %u", (void *)event_loop, handle_id); - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - struct ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); if (ionotify_event_data == NULL) { /* This situation is totally OK when the corresponding fd is already unsubscribed. */ AWS_LOGF_DEBUG( @@ -961,7 +992,7 @@ static void s_process_pulse( static void aws_event_loop_thread(void *args) { struct aws_event_loop *event_loop = args; AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); - struct ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* set thread id to the thread of the event loop */ aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_created_on.thread_id); From 88aff4deff48c73773f0b4f0893fc6415505408a Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Wed, 25 Sep 2024 17:27:42 -0700 Subject: [PATCH 32/39] Use atomic for event loop state --- source/qnx/ionotify_event_loop.c | 164 ++++++++++++++++++------------- 1 file changed, 93 insertions(+), 71 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index beea2a761..a36fddff2 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -64,7 +64,7 @@ struct aws_ionotify_event_loop { struct aws_thread_options thread_options; aws_thread_id_t thread_joined_to; struct aws_atomic_var running_thread_id; - enum aws_ionotify_event_loop_state event_loop_state; + struct aws_atomic_var event_loop_state; /* Channel to receive I/O events. Resource managers open connections to this channel to send their events. */ int io_events_channel_id; @@ -119,80 +119,29 @@ static short CROSS_THREAD_PULSE_SIGEV_CODE = _PULSE_CODE_MINAVAIL; static short IO_EVENT_KICKSTART_SIGEV_CODE = _PULSE_CODE_MINAVAIL + 1; static short IO_EVENT_UPDATE_ERROR_SIGEV_CODE = _PULSE_CODE_MINAVAIL + 2; -static void s_destroy(struct aws_event_loop *event_loop) { - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)event_loop); - - struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - - /* FIXME Data race */ - if (ionotify_event_loop->event_loop_state == AIELS_LOOP_STARTED) { - /* we don't know if stop() has been called by someone else, - * just call stop() again and wait for event-loop to finish. */ - aws_event_loop_stop(event_loop); - s_wait_for_stop_completion(event_loop); - } - - aws_hash_table_clean_up(&ionotify_event_loop->handles); - - if (aws_task_scheduler_is_valid(&ionotify_event_loop->scheduler)) { - /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ - ionotify_event_loop->thread_joined_to = aws_thread_current_thread_id(); - aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_joined_to); - aws_task_scheduler_clean_up(&ionotify_event_loop->scheduler); - - while (!aws_linked_list_empty(&ionotify_event_loop->task_pre_queue)) { - struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_event_loop->task_pre_queue); - struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); - task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); - } - } - - if (ionotify_event_loop->pulse_connection_id != 0 && ionotify_event_loop->pulse_connection_id != -1) { - int rc = ConnectDetach(ionotify_event_loop->pulse_connection_id); - int errno_value = errno; - if (rc == -1) { - AWS_LOGF_WARN( - AWS_LS_IO_EVENT_LOOP, "id=%p: ConnectDetach failed with errno %d", (void *)event_loop, errno_value); - } - } - - if (ionotify_event_loop->io_events_channel_id != 0 && ionotify_event_loop->io_events_channel_id != -1) { - int rc = ChannelDestroy(ionotify_event_loop->io_events_channel_id); - int errno_value = errno; - if (rc == -1) { - AWS_LOGF_WARN( - AWS_LS_IO_EVENT_LOOP, "id=%p: ChannelDestroy failed with errno %d", (void *)event_loop, errno_value); - } - } - - aws_thread_clean_up(&ionotify_event_loop->thread_created_on); - - if (ionotify_event_loop->event_loop_state != AIELS_BASE_UNINITIALIZED) { - aws_event_loop_clean_up_base(event_loop); - } - - aws_mem_release(ionotify_event_loop->allocator, ionotify_event_loop); -} +static void s_destroy_ionotify_event_loop(struct aws_ionotify_event_loop *ionotify_event_loop); /* Setup edge triggered ionotify with a scheduler. */ struct aws_event_loop *aws_event_loop_new_default_with_options( - struct aws_allocator *alloc, + struct aws_allocator *allocator, const struct aws_event_loop_options *options) { AWS_PRECONDITION(options); AWS_PRECONDITION(options->clock); struct aws_ionotify_event_loop *ionotify_event_loop = - aws_mem_calloc(alloc, 1, sizeof(struct aws_ionotify_event_loop)); + aws_mem_calloc(allocator, 1, sizeof(struct aws_ionotify_event_loop)); + ionotify_event_loop->allocator = allocator; struct aws_event_loop *base_event_loop = &ionotify_event_loop->base; - ionotify_event_loop->event_loop_state = AIELS_BASE_UNINITIALIZED; + aws_atomic_store_int_explicit( + &ionotify_event_loop->event_loop_state, AIELS_BASE_UNINITIALIZED, aws_memory_order_relaxed); AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Initializing edge-triggered ionotify", (void *)base_event_loop); - if (aws_event_loop_init_base(&ionotify_event_loop->base, alloc, options->clock)) { + if (aws_event_loop_init_base(&ionotify_event_loop->base, allocator, options->clock)) { goto error; } - ionotify_event_loop->event_loop_state = AIELS_LOOP_STOPPED; + aws_atomic_store_int_explicit(&ionotify_event_loop->event_loop_state, AIELS_LOOP_STOPPED, aws_memory_order_relaxed); if (options->thread_options) { ionotify_event_loop->thread_options = *options->thread_options; @@ -207,7 +156,7 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( ionotify_event_loop->task_pre_queue_mutex = (struct aws_mutex)AWS_MUTEX_INIT; aws_atomic_init_ptr(&ionotify_event_loop->stop_task_ptr, NULL); - if (aws_thread_init(&ionotify_event_loop->thread_created_on, alloc)) { + if (aws_thread_init(&ionotify_event_loop->thread_created_on, allocator)) { goto error; } @@ -216,11 +165,7 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( int errno_value = errno; /* Always cache errno before potential side-effect */ if (ionotify_event_loop->io_events_channel_id == -1) { AWS_LOGF_ERROR( - AWS_LS_IO_EVENT_LOOP, - "id=%p: ChannelCreate failed with errno %d\n", - (void *)base_event_loop, - errno_value, - strerror(errno_value)); + AWS_LS_IO_EVENT_LOOP, "id=%p: ChannelCreate failed with errno %d\n", (void *)base_event_loop, errno_value); goto error; } AWS_LOGF_DEBUG( @@ -244,12 +189,12 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( goto error; } - if (aws_task_scheduler_init(&ionotify_event_loop->scheduler, alloc)) { + if (aws_task_scheduler_init(&ionotify_event_loop->scheduler, allocator)) { AWS_LOGF_ERROR(AWS_LS_IO_EVENT_LOOP, "id=%p: aws_task_scheduler_init failed\n", (void *)base_event_loop); goto error; } - if (aws_hash_table_init(&ionotify_event_loop->handles, alloc, 32, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { + if (aws_hash_table_init(&ionotify_event_loop->handles, allocator, 32, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) { goto error; } @@ -261,13 +206,90 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( return &ionotify_event_loop->base; error: - s_destroy(&ionotify_event_loop->base); + s_destroy_ionotify_event_loop(ionotify_event_loop); return NULL; } +static void s_destroy_ionotify_event_loop(struct aws_ionotify_event_loop *ionotify_event_loop) { + struct aws_event_loop *base_event_loop = &ionotify_event_loop->base; + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Destroying event_loop", (void *)base_event_loop); + + int event_loop_state = (int)aws_atomic_load_int(&ionotify_event_loop->event_loop_state); + + if (event_loop_state == AIELS_LOOP_STARTED) { + /* we don't know if stop() has been called by someone else, + * just call stop() again and wait for event-loop to finish. */ + aws_event_loop_stop(base_event_loop); + s_wait_for_stop_completion(base_event_loop); + } + + if (aws_hash_table_is_valid(&ionotify_event_loop->handles)) { + aws_hash_table_clean_up(&ionotify_event_loop->handles); + } + + if (aws_task_scheduler_is_valid(&ionotify_event_loop->scheduler)) { + /* setting this so that canceled tasks don't blow up when asking if they're on the event-loop thread. */ + ionotify_event_loop->thread_joined_to = aws_thread_current_thread_id(); + aws_atomic_store_ptr(&ionotify_event_loop->running_thread_id, &ionotify_event_loop->thread_joined_to); + aws_task_scheduler_clean_up(&ionotify_event_loop->scheduler); + + while (!aws_linked_list_empty(&ionotify_event_loop->task_pre_queue)) { + struct aws_linked_list_node *node = aws_linked_list_pop_front(&ionotify_event_loop->task_pre_queue); + struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); + task->fn(task, task->arg, AWS_TASK_STATUS_CANCELED); + } + } + + if (ionotify_event_loop->pulse_connection_id != 0 && ionotify_event_loop->pulse_connection_id != -1) { + int rc = ConnectDetach(ionotify_event_loop->pulse_connection_id); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_WARN( + AWS_LS_IO_EVENT_LOOP, + "id=%p: ConnectDetach failed with errno %d", + (void *)base_event_loop, + errno_value); + } + } + + if (ionotify_event_loop->io_events_channel_id != 0 && ionotify_event_loop->io_events_channel_id != -1) { + int rc = ChannelDestroy(ionotify_event_loop->io_events_channel_id); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_WARN( + AWS_LS_IO_EVENT_LOOP, + "id=%p: ChannelDestroy failed with errno %d", + (void *)base_event_loop, + errno_value); + } + } + + aws_thread_clean_up(&ionotify_event_loop->thread_created_on); + + if (event_loop_state != AIELS_BASE_UNINITIALIZED) { + aws_event_loop_clean_up_base(base_event_loop); + } + + aws_mem_release(ionotify_event_loop->allocator, ionotify_event_loop); +} + +static void s_destroy(struct aws_event_loop *event_loop) { + s_destroy_ionotify_event_loop(event_loop->impl_data); +} + static int s_run(struct aws_event_loop *event_loop) { struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + int current_state = (int)aws_atomic_load_int(&ionotify_event_loop->event_loop_state); + if (current_state != AIELS_LOOP_STOPPED) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p: Failed to start event-loop thread: event loop state is %d", + (void *)event_loop, + current_state); + return AWS_OP_ERR; + } + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Starting event-loop thread.", (void *)event_loop); ionotify_event_loop->should_continue = true; @@ -284,7 +306,7 @@ static int s_run(struct aws_event_loop *event_loop) { return AWS_OP_ERR; } - ionotify_event_loop->event_loop_state = AIELS_LOOP_STARTED; + aws_atomic_store_int(&ionotify_event_loop->event_loop_state, AIELS_LOOP_STARTED); return AWS_OP_SUCCESS; } @@ -323,7 +345,7 @@ static int s_wait_for_stop_completion(struct aws_event_loop *event_loop) { struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; int result = aws_thread_join(&ionotify_event_loop->thread_created_on); aws_thread_decrement_unjoined_count(); - ionotify_event_loop->event_loop_state = AIELS_LOOP_STOPPED; + aws_atomic_store_int(&ionotify_event_loop->event_loop_state, AIELS_LOOP_STOPPED); return result; } From 78b4d3855a4b214c34cdfb99d06f4c7b723bee47 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Thu, 26 Sep 2024 09:32:33 -0700 Subject: [PATCH 33/39] Use AWS_FATAL_ASSERT --- source/qnx/ionotify_event_loop.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index a36fddff2..e09e32a60 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -426,7 +426,7 @@ static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *ta static int s_add_handle( struct aws_ionotify_event_loop *ionotify_event_loop, struct aws_ionotify_event_data *ionotify_event_data) { - AWS_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); + AWS_FATAL_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); /* Special constant, _NOTIFY_DATA_MASK, limits the maximum value that can be used as user data in I/O events. */ int max_handle_id = _NOTIFY_DATA_MASK; @@ -465,7 +465,7 @@ struct aws_ionotify_event_data *s_find_handle( struct aws_event_loop *event_loop, struct aws_ionotify_event_loop *ionotify_event_loop, int handle_id) { - AWS_ASSERT(s_is_on_callers_thread(event_loop)); + AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; struct aws_ionotify_event_data *ionotify_event_data = NULL; struct aws_hash_element *elem = NULL; @@ -480,7 +480,7 @@ static void s_remove_handle( struct aws_event_loop *event_loop, struct aws_ionotify_event_loop *ionotify_event_loop, int handle_id) { - AWS_ASSERT(s_is_on_callers_thread(event_loop)); + AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; aws_hash_table_remove(&ionotify_event_loop->handles, (void *)handle_id, NULL, NULL); } @@ -651,9 +651,9 @@ static void s_process_io_result( struct aws_io_handle *handle, const struct aws_io_handle_io_op_result *io_op_result) { - AWS_ASSERT(s_is_on_callers_thread(event_loop)); + AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); - AWS_ASSERT(handle->additional_data); + AWS_FATAL_ASSERT(handle->additional_data); struct aws_ionotify_event_data *ionotify_event_data = handle->additional_data; if (!ionotify_event_data->is_subscribed) { @@ -822,7 +822,7 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - AWS_ASSERT(handle->additional_data); + AWS_FATAL_ASSERT(handle->additional_data); struct aws_ionotify_event_data *ionotify_event_data = handle->additional_data; /* Disarm resource manager for a given fd. */ From 22f3e24bdeb7098c5fdefc7efb00b64c3faec047 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Thu, 26 Sep 2024 09:32:49 -0700 Subject: [PATCH 34/39] Fix log format string --- source/qnx/ionotify_event_loop.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index e09e32a60..47c366869 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -510,7 +510,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta s_add_handle(ionotify_event_loop, ionotify_event_data); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Mapped fd %d to handle ID %u", + "id=%p: Mapped fd %d to handle ID %d", (void *)event_loop, ionotify_event_data->handle->data.fd, ionotify_event_data->handle_id); @@ -848,7 +848,7 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Removing from handles map using ID %u", + "id=%p: Removing from handles map using ID %d", (void *)event_loop, ionotify_event_data->handle_id); s_remove_handle(event_loop, ionotify_event_loop, ionotify_event_data->handle_id); @@ -955,7 +955,7 @@ static void s_process_pulse( return; } - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %u", (void *)event_loop, handle_id); + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %d", (void *)event_loop, handle_id); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; struct aws_ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); From 52110fb525b9319af7b48822e95830cbf76f27b9 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Thu, 26 Sep 2024 10:23:37 -0700 Subject: [PATCH 35/39] Remove strerror --- source/qnx/ionotify_event_loop.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 47c366869..7263f2bbb 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -182,10 +182,9 @@ struct aws_event_loop *aws_event_loop_new_default_with_options( if (ionotify_event_loop->pulse_connection_id == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: ConnectAttach failed with errno %d (%s)\n", + "id=%p: ConnectAttach failed with errno %d\n", (void *)ionotify_event_loop, - errno_value, - strerror(errno_value)); + errno_value); goto error; } @@ -400,10 +399,9 @@ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws * is the minor thing in such a scenario. So, just log the error. */ AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to send cross-thread pulse: %d (%s)", + "id=%p: Failed to send cross-thread pulse with errno %d", (void *)event_loop, - errno_value, - strerror(errno_value)); + errno_value); } } } @@ -568,11 +566,10 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to subscribe to events on fd %d: error %d (%s)", + "id=%p: Failed to subscribe to events on fd %d with errno %d", (void *)event_loop, ionotify_event_data->handle->data.fd, - errno_value, - strerror(errno_value)); + errno_value); ionotify_event_data->on_event( event_loop, ionotify_event_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_event_data->user_data); return; @@ -711,11 +708,10 @@ static void s_process_io_result( if (send_rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to send UPDATE_ERROR pulse for fd %d: error %d (%s)", + "id=%p: Failed to send UPDATE_ERROR pulse for fd %d with errno %d", (void *)event_loop, ionotify_event_data->handle->data.fd, - errno_value, - strerror(errno_value)); + errno_value); } } } @@ -834,11 +830,10 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to unsubscribe from events on fd %d: error %d (%s)", + "id=%p: Failed to unsubscribe from events on fd %d with errno %d", (void *)event_loop, ionotify_event_data->handle->data.fd, - errno_value, - strerror(errno_value)); + errno_value); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } @@ -1056,10 +1051,9 @@ static void aws_event_loop_thread(void *args) { } else { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Listening for I/O events failed with error %d (%s)", + "id=%p: Listening for I/O events failed with errno %d", (void *)event_loop, - errno_value, - strerror(errno_value)); + errno_value); } } From 93c0c39765b2315647185c7ca4e0b1f0505172ee Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Thu, 26 Sep 2024 11:46:34 -0700 Subject: [PATCH 36/39] Add MsgUnregisterEvent on unsubscribing --- source/qnx/ionotify_event_loop.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 7263f2bbb..6da14b6e1 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -540,7 +540,21 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta * It's enough to register an event only once and then reuse it on followup ionotify rearming calls. * NOTE: If you create a new sigevent for the same file descriptor, with the same flags, you HAVE to register * it again. */ - MsgRegisterEvent(&ionotify_event_data->event, ionotify_event_data->handle->data.fd); + int rc = MsgRegisterEvent(&ionotify_event_data->event, ionotify_event_data->handle->data.fd); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Failed to register sigevent, errno %d", + (void *)event_loop, + ionotify_event_data->handle->data.fd, + errno_value); + /* With sigevent not registered in the system, I/O events from QNX resource managers can't be delivered + * to the event loop. Notify about error via a callback and stop subscribing. */ + ionotify_event_data->on_event( + event_loop, ionotify_event_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_event_data->user_data); + return; + } } else if (!ionotify_event_data->is_subscribed) { /* This is a resubscribing task, but unsubscribe happened, so ignore it. */ return; @@ -809,6 +823,19 @@ static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aw (void)task; (void)status; struct aws_ionotify_event_data *ionotify_event_data = (struct aws_ionotify_event_data *)arg; + + int rc = MsgUnregisterEvent(&ionotify_event_data->event); + int errno_value = errno; + if (rc == -1) { + /* Not much can be done here, so just log error. */ + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Failed to unregister sigevent, errno %d", + (void *)ionotify_event_data->event_loop, + ionotify_event_data->handle->data.fd, + errno_value); + } + s_free_io_event_resources(ionotify_event_data); } From 477a071440433723cc737a43c624108a961334f0 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Thu, 26 Sep 2024 14:36:58 -0700 Subject: [PATCH 37/39] Fix logs, comments, code style --- source/qnx/ionotify_event_loop.c | 96 ++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 37 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 6da14b6e1..4e784363e 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -498,7 +498,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Subscribing to events on fd %d for events %d", + "id=%p fd=%d: Subscribing to events, event mask is %d", (void *)event_loop, ionotify_event_data->handle->data.fd, ionotify_event_data->events_subscribed); @@ -508,7 +508,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta s_add_handle(ionotify_event_loop, ionotify_event_data); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Mapped fd %d to handle ID %d", + "id=%p fd=%d: Mapped fd to handle ID %d", (void *)event_loop, ionotify_event_data->handle->data.fd, ionotify_event_data->handle_id); @@ -580,7 +580,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to subscribe to events on fd %d with errno %d", + "id=%p fd=%d: Failed to subscribe to I/O events, errno %d", (void *)event_loop, ionotify_event_data->handle->data.fd, errno_value); @@ -604,7 +604,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta ionotify_event_data->latest_io_event_types) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Sending pulse for fd %d because it has desired I/O conditions (rc is %d)", + "id=%p fd=%d: Sending a kick-start pulse because fd has desired I/O conditions (rc is %d)", (void *)event_loop, ionotify_event_data->handle->data.fd, rc); @@ -617,7 +617,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta if (send_rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to send pulse for fd %d", + "id=%p fd=%d: Failed to send a kick-start pulse", (void *)event_loop, ionotify_event_data->handle->data.fd); } @@ -648,7 +648,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to disarm events for fd %d", + "id=%p fd=%d: Failed to disarm events", (void *)event_loop, ionotify_event_data->handle->data.fd); } @@ -673,7 +673,7 @@ static void s_process_io_result( AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Processing I/O operation result for fd %d: status %d (%s); read status %d (%s); write status %d (%s)", + "id=%p fd=%d: Processing I/O operation result: status %d (%s); read status %d (%s); write status %d (%s)", (void *)event_loop, handle->data.fd, io_op_result->error_code, @@ -697,7 +697,7 @@ static void s_process_io_result( /* Rearm resource manager. */ if (event_types != 0) { AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: Got EWOULDBLOCK for fd %d, rearming it", (void *)event_loop, handle->data.fd); + AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Got EWOULDBLOCK, rearming fd", (void *)event_loop, handle->data.fd); /* We're on the event loop thread, just schedule subscribing task. */ ionotify_event_data->events_subscribed = event_types; struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; @@ -709,7 +709,7 @@ static void s_process_io_result( if (ionotify_event_data->latest_io_event_types != 0) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: fd errored, sending pulse for fd %d", + "id=%p fd=%d: fd errored, sending UPDATE_ERROR pulse", (void *)event_loop, ionotify_event_data->handle->data.fd); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; @@ -722,7 +722,7 @@ static void s_process_io_result( if (send_rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to send UPDATE_ERROR pulse for fd %d with errno %d", + "id=%p fd=%d: Failed to send UPDATE_ERROR pulse, errno %d", (void *)event_loop, ionotify_event_data->handle->data.fd, errno_value); @@ -762,7 +762,11 @@ static void s_update_io_result( if (!s_is_on_callers_thread(event_loop)) { /* Move processing I/O operation results to the event loop thread if the operation is performed in another * thread.*/ - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got I/O operation result from another thread", (void *)event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Got I/O operation result from another thread", + (void *)event_loop, + handle->data.fd); struct aws_task *task = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_task)); struct ionotify_io_op_results *ionotify_io_op_results = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct ionotify_io_op_results)); @@ -786,7 +790,8 @@ static int s_subscribe_to_io_events( struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Subscribing to events on fd %d", (void *)event_loop, handle->data.fd); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Initiate subscription to events", (void *)event_loop, handle->data.fd); struct aws_ionotify_event_data *ionotify_event_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_ionotify_event_data)); handle->additional_data = ionotify_event_data; @@ -840,8 +845,7 @@ static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aw } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, "id=%p: Unsubscribing from events on fd %d", (void *)event_loop, handle->data.fd); + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Unsubscribing from events", (void *)event_loop, handle->data.fd); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; @@ -857,7 +861,7 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, - "id=%p: Failed to unsubscribe from events on fd %d with errno %d", + "id=%p fd=%d: Failed to unsubscribe from events, errno %d", (void *)event_loop, ionotify_event_data->handle->data.fd, errno_value); @@ -870,8 +874,9 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Removing from handles map using ID %d", + "id=%p fd=%d: Removing from handles map using ID %d", (void *)event_loop, + handle->data.fd, ionotify_event_data->handle_id); s_remove_handle(event_loop, ionotify_event_loop, ionotify_event_data->handle_id); @@ -913,7 +918,7 @@ static void s_process_task_pre_queue(struct aws_event_loop *event_loop) { struct aws_task *task = AWS_CONTAINER_OF(node, struct aws_task, node); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: task %p pulled to event-loop, scheduling now.", + "id=%p: Task %p pulled to event-loop, scheduling now.", (void *)event_loop, (void *)task); /* Timestamp 0 is used to denote "now" tasks */ @@ -959,15 +964,7 @@ static void s_aws_ionotify_cleanup_aws_lc_thread_local_state(void *user_data) { aws_cal_thread_clean_up(); } -static void s_process_pulse( - struct aws_event_loop *event_loop, - const struct _pulse *pulse, - bool *should_process_cross_thread_tasks) { - if (pulse->code == CROSS_THREAD_PULSE_SIGEV_CODE) { - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: MsgReceive got cross-thread pulse", (void *)event_loop); - *should_process_cross_thread_tasks = true; - return; - } +static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pulse *pulse) { int user_data = pulse->value.sival_int; @@ -997,34 +994,54 @@ static void s_process_pulse( AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: Processing fd %d: pulse code %d", + "id=%p fd=%d: Processing pulse with code %d", (void *)event_loop, ionotify_event_data->handle->data.fd, pulse->code); int event_mask = 0; if (pulse->value.sival_int & _NOTIFY_COND_OBAND) { - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: fd got out-of-band data", (void *)event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: fd got out-of-band data", + (void *)event_loop, + ionotify_event_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (pulse->value.sival_int & _NOTIFY_COND_INPUT) { - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: fd is readable", (void *)event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: fd is readable", + (void *)event_loop, + ionotify_event_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (pulse->value.sival_int & _NOTIFY_COND_OUTPUT) { - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: fd is writable", (void *)event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: fd is writable", + (void *)event_loop, + ionotify_event_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_WRITABLE; } if (pulse->value.sival_int & _NOTIFY_COND_EXTEN) { + /* "If extended conditions are requested, and they need to be returned in an armed event, the negative of the + * satisfied conditions are returned in (io_notify_t).i.event.sigev_code" - a quote from iomgr.h. + * pulse.code value is changed whenever fd has the _NOTIFY_COND_EXTEN flag. However, not one bit corresponding + * to any extended flag (or its negation) is ever set in this field. */ AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, - "id=%p: fd has extended condition: %d %d", + "id=%p fd=%d: fd has extended condition, pulse code is %d", (void *)event_loop, - pulse->code, - ionotify_event_data->event.sigev_code); + ionotify_event_data->handle->data.fd, + pulse->code); } if (ionotify_event_data->latest_io_event_types) { - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: latest_io_event_types is non-empty", (void *)event_loop); + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: latest_io_event_types is non-empty", + (void *)event_loop, + ionotify_event_data->handle->data.fd); event_mask |= ionotify_event_data->latest_io_event_types; /* Reset additional I/O event types to not process them twice. */ ionotify_event_data->latest_io_event_types = 0; @@ -1035,7 +1052,7 @@ static void s_process_pulse( static void aws_event_loop_thread(void *args) { struct aws_event_loop *event_loop = args; - AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: main loop started", (void *)event_loop); + AWS_LOGF_INFO(AWS_LS_IO_EVENT_LOOP, "id=%p: Main loop started", (void *)event_loop); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; /* set thread id to the thread of the event loop */ @@ -1069,9 +1086,14 @@ static void aws_event_loop_thread(void *args) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Wake up with rcvid %ld\n", (void *)event_loop, rcvid); if (rcvid == 0) { - s_process_pulse(event_loop, &pulse, &should_process_cross_thread_tasks); + if (pulse.code == CROSS_THREAD_PULSE_SIGEV_CODE) { + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: MsgReceive got cross-thread pulse", (void *)event_loop); + should_process_cross_thread_tasks = true; + } else { + s_process_pulse(event_loop, &pulse); + } } else if (rcvid > 0) { - AWS_LOGF_WARN(AWS_LS_IO_EVENT_LOOP, "id=%p: Received message, ignoring it\n", (void *)event_loop); + AWS_LOGF_WARN(AWS_LS_IO_EVENT_LOOP, "id=%p: Received QNX message, ignoring it\n", (void *)event_loop); } else { if (errno_value == ETIMEDOUT) { AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Woke up by timeout\n", (void *)event_loop); From bda976c6ec14cb0b447b66084e596bf9efe0c10d Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Thu, 26 Sep 2024 15:41:50 -0700 Subject: [PATCH 38/39] Fix naming --- source/qnx/ionotify_event_loop.c | 209 ++++++++++++++++--------------- 1 file changed, 105 insertions(+), 104 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index 4e784363e..dd61edea3 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -89,7 +89,7 @@ struct aws_ionotify_event_loop { }; /* Data associated with a subscribed I/O handle. */ -struct aws_ionotify_event_data { +struct aws_ionotify_handle_data { struct aws_allocator *alloc; struct aws_io_handle *handle; struct aws_event_loop *event_loop; @@ -420,11 +420,11 @@ static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *ta aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, task); } -/* Map ionotify_event_data to internal ID. */ +/* Map ionotify_handle_data to internal ID. */ static int s_add_handle( struct aws_ionotify_event_loop *ionotify_event_loop, - struct aws_ionotify_event_data *ionotify_event_data) { - AWS_FATAL_ASSERT(s_is_on_callers_thread(ionotify_event_data->event_loop)); + struct aws_ionotify_handle_data *ionotify_handle_data) { + AWS_FATAL_ASSERT(s_is_on_callers_thread(ionotify_handle_data->event_loop)); /* Special constant, _NOTIFY_DATA_MASK, limits the maximum value that can be used as user data in I/O events. */ int max_handle_id = _NOTIFY_DATA_MASK; @@ -433,7 +433,7 @@ static int s_add_handle( AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: Maximum number of registered handles reached", - (void *)ionotify_event_data->event_loop); + (void *)ionotify_handle_data->event_loop); return AWS_OP_ERR; } @@ -452,26 +452,26 @@ static int s_add_handle( } } while (elem == NULL); - ionotify_event_data->handle_id = next_handle_id; + ionotify_handle_data->handle_id = next_handle_id; ionotify_event_loop->last_handle_id = next_handle_id; - elem->value = ionotify_event_data; + elem->value = ionotify_handle_data; return AWS_OP_SUCCESS; } -struct aws_ionotify_event_data *s_find_handle( +struct aws_ionotify_handle_data *s_find_handle( struct aws_event_loop *event_loop, struct aws_ionotify_event_loop *ionotify_event_loop, int handle_id) { AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; - struct aws_ionotify_event_data *ionotify_event_data = NULL; + struct aws_ionotify_handle_data *ionotify_handle_data = NULL; struct aws_hash_element *elem = NULL; aws_hash_table_find(&ionotify_event_loop->handles, (void *)handle_id, &elem); if (elem != NULL) { - ionotify_event_data = elem->value; + ionotify_handle_data = elem->value; } - return ionotify_event_data; + return ionotify_handle_data; } static void s_remove_handle( @@ -492,45 +492,45 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta return; } - struct aws_ionotify_event_data *ionotify_event_data = user_data; - struct aws_event_loop *event_loop = ionotify_event_data->event_loop; + struct aws_ionotify_handle_data *ionotify_handle_data = user_data; + struct aws_event_loop *event_loop = ionotify_handle_data->event_loop; struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Subscribing to events, event mask is %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, - ionotify_event_data->events_subscribed); + ionotify_handle_data->handle->data.fd, + ionotify_handle_data->events_subscribed); - /* Map ionotify_event_data to ID. This ID will be returned with the I/O events from ionotify. */ - if (ionotify_event_data->handle_id == 0) { - s_add_handle(ionotify_event_loop, ionotify_event_data); + /* Map ionotify_handle_data to ID. This ID will be returned with the I/O events from ionotify. */ + if (ionotify_handle_data->handle_id == 0) { + s_add_handle(ionotify_event_loop, ionotify_handle_data); AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Mapped fd to handle ID %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, - ionotify_event_data->handle_id); + ionotify_handle_data->handle->data.fd, + ionotify_handle_data->handle_id); /* I/O events from ionotify will be delivered as pulses with a user-defined 28-bit ID. * SIGEV_PULSE_PRIO_INHERIT means the thread that receives the pulse will run at the initial priority of the * process. */ short pulse_priority = SIGEV_PULSE_PRIO_INHERIT; short pulse_sigev_code = IO_EVENT_PULSE_SIGEV_CODE; SIGEV_PULSE_INT_INIT( - &ionotify_event_data->event, - ionotify_event_data->pulse_connection_id, + &ionotify_handle_data->event, + ionotify_handle_data->pulse_connection_id, pulse_priority, pulse_sigev_code, - ionotify_event_data->handle_id); + ionotify_handle_data->handle_id); /* From the iomgr.h header: * If extended conditions are requested, and they need to be returned in an armed event, the negative of the * satisfied conditions are returned in (io_notify_t).i.event.sigev_code. * Extended conditions are the ones starting with _NOTIFY_CONDE_. * For that feature to work, special bits in the event structure must be set. */ - ionotify_event_data->event.sigev_notify |= SIGEV_FLAG_CODE_UPDATEABLE; - SIGEV_MAKE_UPDATEABLE(&ionotify_event_data->event); + ionotify_handle_data->event.sigev_notify |= SIGEV_FLAG_CODE_UPDATEABLE; + SIGEV_MAKE_UPDATEABLE(&ionotify_handle_data->event); /* The application must register the event by calling MsgRegisterEvent() with the fd processed in ionotify(). * See: @@ -540,78 +540,78 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta * It's enough to register an event only once and then reuse it on followup ionotify rearming calls. * NOTE: If you create a new sigevent for the same file descriptor, with the same flags, you HAVE to register * it again. */ - int rc = MsgRegisterEvent(&ionotify_event_data->event, ionotify_event_data->handle->data.fd); + int rc = MsgRegisterEvent(&ionotify_handle_data->event, ionotify_handle_data->handle->data.fd); int errno_value = errno; if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to register sigevent, errno %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, errno_value); /* With sigevent not registered in the system, I/O events from QNX resource managers can't be delivered * to the event loop. Notify about error via a callback and stop subscribing. */ - ionotify_event_data->on_event( - event_loop, ionotify_event_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_event_data->user_data); + ionotify_handle_data->on_event( + event_loop, ionotify_handle_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_handle_data->user_data); return; } - } else if (!ionotify_event_data->is_subscribed) { + } else if (!ionotify_handle_data->is_subscribed) { /* This is a resubscribing task, but unsubscribe happened, so ignore it. */ return; } - ionotify_event_data->is_subscribed = true; + ionotify_handle_data->is_subscribed = true; /* Everyone is always registered for errors. */ int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; - if (ionotify_event_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { + if (ionotify_handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { event_mask |= _NOTIFY_COND_INPUT; event_mask |= _NOTIFY_COND_OBAND; } - if (ionotify_event_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { + if (ionotify_handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { event_mask |= _NOTIFY_COND_OUTPUT; } /* Arm resource manager associated with a given file descriptor in edge-triggered mode. * After this call, a corresponding resource manager starts sending events. */ - int rc = - ionotify(ionotify_event_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, &ionotify_event_data->event); + int rc = ionotify( + ionotify_handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, &ionotify_handle_data->event); int errno_value = errno; if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to subscribe to I/O events, errno %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, errno_value); - ionotify_event_data->on_event( - event_loop, ionotify_event_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_event_data->user_data); + ionotify_handle_data->on_event( + event_loop, ionotify_handle_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_handle_data->user_data); return; } /* ionotify can return active conditions if they are among specified. Send notification to kick-start processing fd * if it has desired conditions. */ - /* User-provided field has no space for extended conditions, so set field in ionotify_event_data. */ + /* User-provided field has no space for extended conditions, so set field in ionotify_handle_data. */ if (rc & (_NOTIFY_CONDE_ERR | _NOTIFY_CONDE_NVAL)) { - ionotify_event_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_ERROR; + ionotify_handle_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_ERROR; } if (rc & _NOTIFY_CONDE_HUP) { - ionotify_event_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_CLOSED; + ionotify_handle_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_CLOSED; } if ((rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT)) || - ionotify_event_data->latest_io_event_types) { + ionotify_handle_data->latest_io_event_types) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Sending a kick-start pulse because fd has desired I/O conditions (rc is %d)", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, rc); /* Set _NOTIFY_COND_MASK low bits to ID, the same as ionotify does, so the main loop can process all pulses in * unified manner. */ int kick_start_event_mask = rc & _NOTIFY_COND_MASK; - kick_start_event_mask |= ionotify_event_data->handle_id; + kick_start_event_mask |= ionotify_handle_data->handle_id; int send_rc = MsgSendPulse( ionotify_event_loop->pulse_connection_id, -1, IO_EVENT_KICKSTART_SIGEV_CODE, kick_start_event_mask); if (send_rc == -1) { @@ -619,7 +619,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to send a kick-start pulse", (void *)event_loop, - ionotify_event_data->handle->data.fd); + ionotify_handle_data->handle->data.fd); } /* QNX resource manager for POSIX pipes has a bug/undocumented behavior when under specific conditions it stops @@ -644,13 +644,13 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta */ int active_events = rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT); if (active_events) { - rc = ionotify(ionotify_event_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, active_events, NULL); + rc = ionotify(ionotify_handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, active_events, NULL); if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to disarm events", (void *)event_loop, - ionotify_event_data->handle->data.fd); + ionotify_handle_data->handle->data.fd); } } } @@ -665,9 +665,9 @@ static void s_process_io_result( AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); AWS_FATAL_ASSERT(handle->additional_data); - struct aws_ionotify_event_data *ionotify_event_data = handle->additional_data; + struct aws_ionotify_handle_data *ionotify_handle_data = handle->additional_data; - if (!ionotify_event_data->is_subscribed) { + if (!ionotify_handle_data->is_subscribed) { return; } @@ -685,7 +685,7 @@ static void s_process_io_result( int event_types = 0; if (io_op_result->error_code == AWS_IO_SOCKET_CLOSED) { - ionotify_event_data->latest_io_event_types = AWS_IO_EVENT_TYPE_CLOSED; + ionotify_handle_data->latest_io_event_types = AWS_IO_EVENT_TYPE_CLOSED; } if (io_op_result->read_error_code == AWS_IO_READ_WOULD_BLOCK) { event_types |= AWS_IO_EVENT_TYPE_READABLE; @@ -699,32 +699,32 @@ static void s_process_io_result( AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Got EWOULDBLOCK, rearming fd", (void *)event_loop, handle->data.fd); /* We're on the event loop thread, just schedule subscribing task. */ - ionotify_event_data->events_subscribed = event_types; + ionotify_handle_data->events_subscribed = event_types; struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, &ionotify_event_data->resubscribe_task); - aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, &ionotify_event_data->resubscribe_task); + aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, &ionotify_handle_data->resubscribe_task); + aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, &ionotify_handle_data->resubscribe_task); } /* Notify event loop of error conditions. */ - if (ionotify_event_data->latest_io_event_types != 0) { + if (ionotify_handle_data->latest_io_event_types != 0) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd errored, sending UPDATE_ERROR pulse", (void *)event_loop, - ionotify_event_data->handle->data.fd); + ionotify_handle_data->handle->data.fd); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; int send_rc = MsgSendPulse( ionotify_event_loop->pulse_connection_id, -1, IO_EVENT_UPDATE_ERROR_SIGEV_CODE, - ionotify_event_data->handle_id); + ionotify_handle_data->handle_id); int errno_value = errno; if (send_rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to send UPDATE_ERROR pulse, errno %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, errno_value); } } @@ -792,56 +792,56 @@ static int s_subscribe_to_io_events( AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Initiate subscription to events", (void *)event_loop, handle->data.fd); - struct aws_ionotify_event_data *ionotify_event_data = - aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_ionotify_event_data)); - handle->additional_data = ionotify_event_data; - - ionotify_event_data->alloc = event_loop->alloc; - ionotify_event_data->handle = handle; - ionotify_event_data->event_loop = event_loop; - ionotify_event_data->on_event = on_event; - ionotify_event_data->events_subscribed = events; - ionotify_event_data->pulse_connection_id = ionotify_event_loop->pulse_connection_id; - ionotify_event_data->user_data = user_data; - ionotify_event_data->handle->update_io_result = s_update_io_result; + struct aws_ionotify_handle_data *ionotify_handle_data = + aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_ionotify_handle_data)); + handle->additional_data = ionotify_handle_data; + + ionotify_handle_data->alloc = event_loop->alloc; + ionotify_handle_data->handle = handle; + ionotify_handle_data->event_loop = event_loop; + ionotify_handle_data->on_event = on_event; + ionotify_handle_data->events_subscribed = events; + ionotify_handle_data->pulse_connection_id = ionotify_event_loop->pulse_connection_id; + ionotify_handle_data->user_data = user_data; + ionotify_handle_data->handle->update_io_result = s_update_io_result; aws_task_init( - &ionotify_event_data->resubscribe_task, + &ionotify_handle_data->resubscribe_task, s_subscribe_task, - ionotify_event_data, + ionotify_handle_data, "ionotify_event_loop_resubscribe"); aws_task_init( - &ionotify_event_data->subscribe_task, s_subscribe_task, ionotify_event_data, "ionotify_event_loop_subscribe"); - s_schedule_task_now(event_loop, &ionotify_event_data->subscribe_task); + &ionotify_handle_data->subscribe_task, s_subscribe_task, ionotify_handle_data, "ionotify_event_loop_subscribe"); + s_schedule_task_now(event_loop, &ionotify_handle_data->subscribe_task); return AWS_OP_SUCCESS; } static void s_free_io_event_resources(void *user_data) { - struct aws_ionotify_event_data *event_data = user_data; - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "Releasing ionotify_event_data at %p", user_data); - aws_mem_release(event_data->alloc, (void *)event_data); + struct aws_ionotify_handle_data *handle_data = user_data; + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "Releasing ionotify_handle_data at %p", user_data); + aws_mem_release(handle_data->alloc, (void *)handle_data); } static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; - struct aws_ionotify_event_data *ionotify_event_data = (struct aws_ionotify_event_data *)arg; + struct aws_ionotify_handle_data *ionotify_handle_data = (struct aws_ionotify_handle_data *)arg; - int rc = MsgUnregisterEvent(&ionotify_event_data->event); + int rc = MsgUnregisterEvent(&ionotify_handle_data->event); int errno_value = errno; if (rc == -1) { /* Not much can be done here, so just log error. */ AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to unregister sigevent, errno %d", - (void *)ionotify_event_data->event_loop, - ionotify_event_data->handle->data.fd, + (void *)ionotify_handle_data->event_loop, + ionotify_handle_data->handle->data.fd, errno_value); } - s_free_io_event_resources(ionotify_event_data); + s_free_io_event_resources(ionotify_handle_data); } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { @@ -850,46 +850,46 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_FATAL_ASSERT(handle->additional_data); - struct aws_ionotify_event_data *ionotify_event_data = handle->additional_data; + struct aws_ionotify_handle_data *ionotify_handle_data = handle->additional_data; /* Disarm resource manager for a given fd. */ int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; event_mask |= _NOTIFY_COND_INPUT | _NOTIFY_CONDE_RDNORM | _NOTIFY_COND_OBAND; event_mask |= _NOTIFY_COND_OUTPUT | _NOTIFY_CONDE_WRNORM; - int rc = ionotify(ionotify_event_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, NULL); + int rc = ionotify(ionotify_handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, NULL); int errno_value = errno; if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to unsubscribe from events, errno %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, errno_value); return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } /* We can't clean up yet, because we have schedule tasks and more events to process, * mark it as unsubscribed and schedule a cleanup task. */ - ionotify_event_data->is_subscribed = false; + ionotify_handle_data->is_subscribed = false; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Removing from handles map using ID %d", (void *)event_loop, handle->data.fd, - ionotify_event_data->handle_id); - s_remove_handle(event_loop, ionotify_event_loop, ionotify_event_data->handle_id); + ionotify_handle_data->handle_id); + s_remove_handle(event_loop, ionotify_event_loop, ionotify_handle_data->handle_id); handle->additional_data = NULL; handle->update_io_result = NULL; - /* There might be pending tasks for ionotify_event_data, so put a cleanup task. */ + /* There might be pending tasks for ionotify_handle_data, so put a cleanup task. */ aws_task_init( - &ionotify_event_data->cleanup_task, + &ionotify_handle_data->cleanup_task, s_unsubscribe_cleanup_task, - ionotify_event_data, + ionotify_handle_data, "ionotify_event_loop_unsubscribe_cleanup"); - s_schedule_task_now(event_loop, &ionotify_event_data->cleanup_task); + s_schedule_task_now(event_loop, &ionotify_handle_data->cleanup_task); return AWS_OP_SUCCESS; } @@ -977,8 +977,8 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %d", (void *)event_loop, handle_id); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - struct aws_ionotify_event_data *ionotify_event_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); - if (ionotify_event_data == NULL) { + struct aws_ionotify_handle_data *ionotify_handle_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); + if (ionotify_handle_data == NULL) { /* This situation is totally OK when the corresponding fd is already unsubscribed. */ AWS_LOGF_DEBUG( AWS_LS_IO_EVENT_LOOP, @@ -988,7 +988,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul return; } - if (!ionotify_event_data->is_subscribed) { + if (!ionotify_handle_data->is_subscribed) { return; } @@ -996,7 +996,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Processing pulse with code %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, pulse->code); int event_mask = 0; if (pulse->value.sival_int & _NOTIFY_COND_OBAND) { @@ -1004,7 +1004,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd got out-of-band data", (void *)event_loop, - ionotify_event_data->handle->data.fd); + ionotify_handle_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (pulse->value.sival_int & _NOTIFY_COND_INPUT) { @@ -1012,7 +1012,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd is readable", (void *)event_loop, - ionotify_event_data->handle->data.fd); + ionotify_handle_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (pulse->value.sival_int & _NOTIFY_COND_OUTPUT) { @@ -1020,7 +1020,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd is writable", (void *)event_loop, - ionotify_event_data->handle->data.fd); + ionotify_handle_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_WRITABLE; } if (pulse->value.sival_int & _NOTIFY_COND_EXTEN) { @@ -1032,22 +1032,23 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd has extended condition, pulse code is %d", (void *)event_loop, - ionotify_event_data->handle->data.fd, + ionotify_handle_data->handle->data.fd, pulse->code); } - if (ionotify_event_data->latest_io_event_types) { + if (ionotify_handle_data->latest_io_event_types) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: latest_io_event_types is non-empty", (void *)event_loop, - ionotify_event_data->handle->data.fd); - event_mask |= ionotify_event_data->latest_io_event_types; + ionotify_handle_data->handle->data.fd); + event_mask |= ionotify_handle_data->latest_io_event_types; /* Reset additional I/O event types to not process them twice. */ - ionotify_event_data->latest_io_event_types = 0; + ionotify_handle_data->latest_io_event_types = 0; } - ionotify_event_data->on_event(event_loop, ionotify_event_data->handle, event_mask, ionotify_event_data->user_data); + ionotify_handle_data->on_event( + event_loop, ionotify_handle_data->handle, event_mask, ionotify_handle_data->user_data); } static void aws_event_loop_thread(void *args) { From be74788797f2727dcd9a008401cb99a1a53625f7 Mon Sep 17 00:00:00 2001 From: Igor Abdrakhimov Date: Sun, 29 Sep 2024 15:43:14 -0700 Subject: [PATCH 39/39] Refactor cleaning up and resubscribing --- source/qnx/ionotify_event_loop.c | 421 +++++++++++++++++-------------- 1 file changed, 229 insertions(+), 192 deletions(-) diff --git a/source/qnx/ionotify_event_loop.c b/source/qnx/ionotify_event_loop.c index dd61edea3..1da273566 100644 --- a/source/qnx/ionotify_event_loop.c +++ b/source/qnx/ionotify_event_loop.c @@ -55,6 +55,8 @@ static struct aws_event_loop_vtable s_vtable = { enum aws_ionotify_event_loop_state { AIELS_BASE_UNINITIALIZED, AIELS_LOOP_STOPPED, AIELS_LOOP_STARTED }; +enum aws_ionotify_subscription_state { AISS_INITIATED, AISS_SIGEVENT_REGISTERED }; + struct aws_ionotify_event_loop { struct aws_allocator *allocator; struct aws_event_loop base; @@ -95,6 +97,7 @@ struct aws_ionotify_handle_data { struct aws_event_loop *event_loop; aws_event_loop_on_event_fn *on_event; int events_subscribed; + int events_to_resubscribe; /* A QNX event notification can use only 4 bits for I/O event types (input data, output data, out-of-band data, and * extended flag indicating that additional events happened). So, the latest_io_event_types field contains these * additional event types converted to CRT event loop domain (enum aws_io_event_type). */ @@ -110,6 +113,7 @@ struct aws_ionotify_handle_data { int handle_id; /* False when handle is unsubscribed, but this struct hasn't been cleaned up yet. */ bool is_subscribed; + enum aws_ionotify_subscription_state subscription_state; }; /* SI_NOTIFY is a QNX special sigev code requesting resource managers to return active event type along with the event @@ -420,11 +424,11 @@ static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *ta aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, task); } -/* Map ionotify_handle_data to internal ID. */ +/* Map aws_ionotify_handle_data to internal ID. */ static int s_add_handle( struct aws_ionotify_event_loop *ionotify_event_loop, - struct aws_ionotify_handle_data *ionotify_handle_data) { - AWS_FATAL_ASSERT(s_is_on_callers_thread(ionotify_handle_data->event_loop)); + struct aws_ionotify_handle_data *handle_data) { + AWS_FATAL_ASSERT(s_is_on_callers_thread(handle_data->event_loop)); /* Special constant, _NOTIFY_DATA_MASK, limits the maximum value that can be used as user data in I/O events. */ int max_handle_id = _NOTIFY_DATA_MASK; @@ -433,7 +437,7 @@ static int s_add_handle( AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p: Maximum number of registered handles reached", - (void *)ionotify_handle_data->event_loop); + (void *)handle_data->event_loop); return AWS_OP_ERR; } @@ -452,9 +456,9 @@ static int s_add_handle( } } while (elem == NULL); - ionotify_handle_data->handle_id = next_handle_id; + handle_data->handle_id = next_handle_id; ionotify_event_loop->last_handle_id = next_handle_id; - elem->value = ionotify_handle_data; + elem->value = handle_data; return AWS_OP_SUCCESS; } @@ -465,13 +469,13 @@ struct aws_ionotify_handle_data *s_find_handle( int handle_id) { AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); (void)event_loop; - struct aws_ionotify_handle_data *ionotify_handle_data = NULL; + struct aws_ionotify_handle_data *handle_data = NULL; struct aws_hash_element *elem = NULL; aws_hash_table_find(&ionotify_event_loop->handles, (void *)handle_id, &elem); if (elem != NULL) { - ionotify_handle_data = elem->value; + handle_data = elem->value; } - return ionotify_handle_data; + return handle_data; } static void s_remove_handle( @@ -483,135 +487,50 @@ static void s_remove_handle( aws_hash_table_remove(&ionotify_event_loop->handles, (void *)handle_id, NULL, NULL); } -/* Scheduled task that performs the actual subscription using ionotify. */ -static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { - (void)task; - - /* If task was cancelled, nothing to do. */ - if (status == AWS_TASK_STATUS_CANCELED) { - return; - } - - struct aws_ionotify_handle_data *ionotify_handle_data = user_data; - struct aws_event_loop *event_loop = ionotify_handle_data->event_loop; - struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p fd=%d: Subscribing to events, event mask is %d", - (void *)event_loop, - ionotify_handle_data->handle->data.fd, - ionotify_handle_data->events_subscribed); - - /* Map ionotify_handle_data to ID. This ID will be returned with the I/O events from ionotify. */ - if (ionotify_handle_data->handle_id == 0) { - s_add_handle(ionotify_event_loop, ionotify_handle_data); - AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p fd=%d: Mapped fd to handle ID %d", - (void *)event_loop, - ionotify_handle_data->handle->data.fd, - ionotify_handle_data->handle_id); - /* I/O events from ionotify will be delivered as pulses with a user-defined 28-bit ID. - * SIGEV_PULSE_PRIO_INHERIT means the thread that receives the pulse will run at the initial priority of the - * process. */ - short pulse_priority = SIGEV_PULSE_PRIO_INHERIT; - short pulse_sigev_code = IO_EVENT_PULSE_SIGEV_CODE; - SIGEV_PULSE_INT_INIT( - &ionotify_handle_data->event, - ionotify_handle_data->pulse_connection_id, - pulse_priority, - pulse_sigev_code, - ionotify_handle_data->handle_id); - - /* From the iomgr.h header: - * If extended conditions are requested, and they need to be returned in an armed event, the negative of the - * satisfied conditions are returned in (io_notify_t).i.event.sigev_code. - * Extended conditions are the ones starting with _NOTIFY_CONDE_. - * For that feature to work, special bits in the event structure must be set. */ - ionotify_handle_data->event.sigev_notify |= SIGEV_FLAG_CODE_UPDATEABLE; - SIGEV_MAKE_UPDATEABLE(&ionotify_handle_data->event); - - /* The application must register the event by calling MsgRegisterEvent() with the fd processed in ionotify(). - * See: - * https://www.qnx.com/developers/docs/8.0/com.qnx.doc.neutrino.lib_ref/topic/i/ionotify.html - * https://www.qnx.com/developers/docs/8.0/com.qnx.doc.neutrino.lib_ref/topic/m/msgregisterevent.html - * - * It's enough to register an event only once and then reuse it on followup ionotify rearming calls. - * NOTE: If you create a new sigevent for the same file descriptor, with the same flags, you HAVE to register - * it again. */ - int rc = MsgRegisterEvent(&ionotify_handle_data->event, ionotify_handle_data->handle->data.fd); - int errno_value = errno; - if (rc == -1) { - AWS_LOGF_ERROR( - AWS_LS_IO_EVENT_LOOP, - "id=%p fd=%d: Failed to register sigevent, errno %d", - (void *)event_loop, - ionotify_handle_data->handle->data.fd, - errno_value); - /* With sigevent not registered in the system, I/O events from QNX resource managers can't be delivered - * to the event loop. Notify about error via a callback and stop subscribing. */ - ionotify_handle_data->on_event( - event_loop, ionotify_handle_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_handle_data->user_data); - return; - } - } else if (!ionotify_handle_data->is_subscribed) { - /* This is a resubscribing task, but unsubscribe happened, so ignore it. */ - return; - } - - ionotify_handle_data->is_subscribed = true; +static void s_process_ionotify( + struct aws_ionotify_event_loop *ionotify_event_loop, + struct aws_ionotify_handle_data *handle_data, + int event_mask) { - /* Everyone is always registered for errors. */ - int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; - if (ionotify_handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { - event_mask |= _NOTIFY_COND_INPUT; - event_mask |= _NOTIFY_COND_OBAND; - } - if (ionotify_handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { - event_mask |= _NOTIFY_COND_OUTPUT; - } + struct aws_event_loop *event_loop = &ionotify_event_loop->base; /* Arm resource manager associated with a given file descriptor in edge-triggered mode. * After this call, a corresponding resource manager starts sending events. */ - int rc = ionotify( - ionotify_handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, &ionotify_handle_data->event); + int rc = ionotify(handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, &handle_data->event); int errno_value = errno; if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to subscribe to I/O events, errno %d", (void *)event_loop, - ionotify_handle_data->handle->data.fd, + handle_data->handle->data.fd, errno_value); - ionotify_handle_data->on_event( - event_loop, ionotify_handle_data->handle, AWS_IO_EVENT_TYPE_ERROR, ionotify_handle_data->user_data); + handle_data->on_event(event_loop, handle_data->handle, AWS_IO_EVENT_TYPE_ERROR, handle_data->user_data); return; } /* ionotify can return active conditions if they are among specified. Send notification to kick-start processing fd * if it has desired conditions. */ - /* User-provided field has no space for extended conditions, so set field in ionotify_handle_data. */ + /* User-provided field has no space for extended conditions, so set field in aws_ionotify_handle_data. */ if (rc & (_NOTIFY_CONDE_ERR | _NOTIFY_CONDE_NVAL)) { - ionotify_handle_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_ERROR; + handle_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_ERROR; } if (rc & _NOTIFY_CONDE_HUP) { - ionotify_handle_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_CLOSED; + handle_data->latest_io_event_types |= AWS_IO_EVENT_TYPE_CLOSED; } - if ((rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT)) || - ionotify_handle_data->latest_io_event_types) { + if ((rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT)) || handle_data->latest_io_event_types) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Sending a kick-start pulse because fd has desired I/O conditions (rc is %d)", (void *)event_loop, - ionotify_handle_data->handle->data.fd, + handle_data->handle->data.fd, rc); /* Set _NOTIFY_COND_MASK low bits to ID, the same as ionotify does, so the main loop can process all pulses in * unified manner. */ int kick_start_event_mask = rc & _NOTIFY_COND_MASK; - kick_start_event_mask |= ionotify_handle_data->handle_id; + kick_start_event_mask |= handle_data->handle_id; int send_rc = MsgSendPulse( ionotify_event_loop->pulse_connection_id, -1, IO_EVENT_KICKSTART_SIGEV_CODE, kick_start_event_mask); if (send_rc == -1) { @@ -619,7 +538,7 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to send a kick-start pulse", (void *)event_loop, - ionotify_handle_data->handle->data.fd); + handle_data->handle->data.fd); } /* QNX resource manager for POSIX pipes has a bug/undocumented behavior when under specific conditions it stops @@ -644,19 +563,148 @@ static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_ta */ int active_events = rc & (_NOTIFY_COND_OBAND | _NOTIFY_COND_INPUT | _NOTIFY_COND_OUTPUT); if (active_events) { - rc = ionotify(ionotify_handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, active_events, NULL); + rc = ionotify(handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, active_events, NULL); if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to disarm events", (void *)event_loop, - ionotify_handle_data->handle->data.fd); + handle_data->handle->data.fd); } } } } -/* This callback is called by I/O operations to notify about their results. */ +/* Scheduled task that performs the actual subscription using ionotify. */ +static void s_subscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { + (void)task; + + /* If task was cancelled, nothing to do. */ + if (status == AWS_TASK_STATUS_CANCELED) { + return; + } + + struct aws_ionotify_handle_data *handle_data = user_data; + struct aws_event_loop *event_loop = handle_data->event_loop; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Subscribing to events, event mask is %d", + (void *)event_loop, + handle_data->handle->data.fd, + handle_data->events_subscribed); + + /* Map aws_ionotify_handle_data to ID. This ID will be returned with the I/O events from ionotify. */ + if (s_add_handle(ionotify_event_loop, handle_data)) { + return; + } + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Mapped fd to handle ID %d", + (void *)event_loop, + handle_data->handle->data.fd, + handle_data->handle_id); + /* I/O events from ionotify will be delivered as pulses with a user-defined 28-bit ID. + * SIGEV_PULSE_PRIO_INHERIT means the thread that receives the pulse will run at the initial priority of the + * process. */ + short pulse_priority = SIGEV_PULSE_PRIO_INHERIT; + short pulse_sigev_code = IO_EVENT_PULSE_SIGEV_CODE; + SIGEV_PULSE_INT_INIT( + &handle_data->event, + handle_data->pulse_connection_id, + pulse_priority, + pulse_sigev_code, + handle_data->handle_id); + + /* From the iomgr.h header: + * If extended conditions are requested, and they need to be returned in an armed event, the negative of the + * satisfied conditions are returned in (io_notify_t).i.event.sigev_code. + * Extended conditions are the ones starting with _NOTIFY_CONDE_. + * For that feature to work, special bits in the event structure must be set. */ + handle_data->event.sigev_notify |= SIGEV_FLAG_CODE_UPDATEABLE; + SIGEV_MAKE_UPDATEABLE(&handle_data->event); + + /* The application must register the event by calling MsgRegisterEvent() with the fd processed in ionotify(). + * See: + * https://www.qnx.com/developers/docs/8.0/com.qnx.doc.neutrino.lib_ref/topic/i/ionotify.html + * https://www.qnx.com/developers/docs/8.0/com.qnx.doc.neutrino.lib_ref/topic/m/msgregisterevent.html + * + * It's enough to register an event only once and then reuse it on followup ionotify rearming calls. + * NOTE: If you create a new sigevent for the same file descriptor, with the same flags, you HAVE to register + * it again. */ + int rc = MsgRegisterEvent(&handle_data->event, handle_data->handle->data.fd); + int errno_value = errno; + if (rc == -1) { + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Failed to register sigevent, errno %d", + (void *)event_loop, + handle_data->handle->data.fd, + errno_value); + /* With sigevent not registered in the system, I/O events can't be delivered to the event loop. Notify about + * error via a callback and stop subscribing. */ + handle_data->on_event(event_loop, handle_data->handle, AWS_IO_EVENT_TYPE_ERROR, handle_data->user_data); + return; + } + + handle_data->subscription_state = AISS_SIGEVENT_REGISTERED; + + handle_data->is_subscribed = true; + + /* Everyone is always registered for errors. */ + int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { + event_mask |= _NOTIFY_COND_INPUT; + event_mask |= _NOTIFY_COND_OBAND; + } + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { + event_mask |= _NOTIFY_COND_OUTPUT; + } + + s_process_ionotify(ionotify_event_loop, handle_data, event_mask); +} + +static void s_resubscribe_task(struct aws_task *task, void *user_data, enum aws_task_status status) { + (void)task; + + /* If task was cancelled, nothing to do. */ + if (status == AWS_TASK_STATUS_CANCELED) { + return; + } + + struct aws_ionotify_handle_data *handle_data = user_data; + struct aws_event_loop *event_loop = handle_data->event_loop; + struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; + + if (!handle_data->is_subscribed) { + return; + } + + AWS_LOGF_TRACE( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Resubscribing to events, event mask is %d", + (void *)event_loop, + handle_data->handle->data.fd, + handle_data->events_to_resubscribe); + + handle_data->is_subscribed = true; + + int event_mask = 0; + if (handle_data->events_to_resubscribe & AWS_IO_EVENT_TYPE_READABLE) { + event_mask |= _NOTIFY_COND_INPUT; + event_mask |= _NOTIFY_COND_OBAND; + } + if (handle_data->events_to_resubscribe & AWS_IO_EVENT_TYPE_WRITABLE) { + event_mask |= _NOTIFY_COND_OUTPUT; + } + + handle_data->events_to_resubscribe = 0; + + s_process_ionotify(ionotify_event_loop, handle_data, event_mask); +} + static void s_process_io_result( struct aws_event_loop *event_loop, struct aws_io_handle *handle, @@ -665,9 +713,9 @@ static void s_process_io_result( AWS_FATAL_ASSERT(s_is_on_callers_thread(event_loop)); AWS_FATAL_ASSERT(handle->additional_data); - struct aws_ionotify_handle_data *ionotify_handle_data = handle->additional_data; + struct aws_ionotify_handle_data *handle_data = handle->additional_data; - if (!ionotify_handle_data->is_subscribed) { + if (!handle_data->is_subscribed) { return; } @@ -685,7 +733,7 @@ static void s_process_io_result( int event_types = 0; if (io_op_result->error_code == AWS_IO_SOCKET_CLOSED) { - ionotify_handle_data->latest_io_event_types = AWS_IO_EVENT_TYPE_CLOSED; + handle_data->latest_io_event_types = AWS_IO_EVENT_TYPE_CLOSED; } if (io_op_result->read_error_code == AWS_IO_READ_WOULD_BLOCK) { event_types |= AWS_IO_EVENT_TYPE_READABLE; @@ -698,33 +746,31 @@ static void s_process_io_result( if (event_types != 0) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Got EWOULDBLOCK, rearming fd", (void *)event_loop, handle->data.fd); - /* We're on the event loop thread, just schedule subscribing task. */ - ionotify_handle_data->events_subscribed = event_types; + /* Mark a newly appeared event for resubscribing and reschedule the resubscribing task in case it's already + * scheduled. */ + handle_data->events_to_resubscribe |= event_types; struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, &ionotify_handle_data->resubscribe_task); - aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, &ionotify_handle_data->resubscribe_task); + aws_task_scheduler_cancel_task(&ionotify_event_loop->scheduler, &handle_data->resubscribe_task); + aws_task_scheduler_schedule_now(&ionotify_event_loop->scheduler, &handle_data->resubscribe_task); } /* Notify event loop of error conditions. */ - if (ionotify_handle_data->latest_io_event_types != 0) { + if (handle_data->latest_io_event_types != 0) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd errored, sending UPDATE_ERROR pulse", (void *)event_loop, - ionotify_handle_data->handle->data.fd); + handle_data->handle->data.fd); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; int send_rc = MsgSendPulse( - ionotify_event_loop->pulse_connection_id, - -1, - IO_EVENT_UPDATE_ERROR_SIGEV_CODE, - ionotify_handle_data->handle_id); + ionotify_event_loop->pulse_connection_id, -1, IO_EVENT_UPDATE_ERROR_SIGEV_CODE, handle_data->handle_id); int errno_value = errno; if (send_rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to send UPDATE_ERROR pulse, errno %d", (void *)event_loop, - ionotify_handle_data->handle->data.fd, + handle_data->handle->data.fd, errno_value); } } @@ -792,56 +838,53 @@ static int s_subscribe_to_io_events( AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Initiate subscription to events", (void *)event_loop, handle->data.fd); - struct aws_ionotify_handle_data *ionotify_handle_data = + struct aws_ionotify_handle_data *handle_data = aws_mem_calloc(event_loop->alloc, 1, sizeof(struct aws_ionotify_handle_data)); - handle->additional_data = ionotify_handle_data; + handle->additional_data = handle_data; - ionotify_handle_data->alloc = event_loop->alloc; - ionotify_handle_data->handle = handle; - ionotify_handle_data->event_loop = event_loop; - ionotify_handle_data->on_event = on_event; - ionotify_handle_data->events_subscribed = events; - ionotify_handle_data->pulse_connection_id = ionotify_event_loop->pulse_connection_id; - ionotify_handle_data->user_data = user_data; - ionotify_handle_data->handle->update_io_result = s_update_io_result; + handle_data->alloc = event_loop->alloc; + handle_data->handle = handle; + handle_data->event_loop = event_loop; + handle_data->on_event = on_event; + handle_data->events_subscribed = events; + handle_data->pulse_connection_id = ionotify_event_loop->pulse_connection_id; + handle_data->user_data = user_data; + handle_data->handle->update_io_result = s_update_io_result; - aws_task_init( - &ionotify_handle_data->resubscribe_task, - s_subscribe_task, - ionotify_handle_data, - "ionotify_event_loop_resubscribe"); + aws_task_init(&handle_data->resubscribe_task, s_resubscribe_task, handle_data, "ionotify_event_loop_resubscribe"); - aws_task_init( - &ionotify_handle_data->subscribe_task, s_subscribe_task, ionotify_handle_data, "ionotify_event_loop_subscribe"); - s_schedule_task_now(event_loop, &ionotify_handle_data->subscribe_task); + aws_task_init(&handle_data->subscribe_task, s_subscribe_task, handle_data, "ionotify_event_loop_subscribe"); + s_schedule_task_now(event_loop, &handle_data->subscribe_task); return AWS_OP_SUCCESS; } static void s_free_io_event_resources(void *user_data) { struct aws_ionotify_handle_data *handle_data = user_data; - AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "Releasing ionotify_handle_data at %p", user_data); + AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "Releasing aws_ionotify_handle_data at %p", user_data); aws_mem_release(handle_data->alloc, (void *)handle_data); } static void s_unsubscribe_cleanup_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)task; (void)status; - struct aws_ionotify_handle_data *ionotify_handle_data = (struct aws_ionotify_handle_data *)arg; + struct aws_ionotify_handle_data *handle_data = (struct aws_ionotify_handle_data *)arg; - int rc = MsgUnregisterEvent(&ionotify_handle_data->event); - int errno_value = errno; - if (rc == -1) { - /* Not much can be done here, so just log error. */ - AWS_LOGF_ERROR( - AWS_LS_IO_EVENT_LOOP, - "id=%p fd=%d: Failed to unregister sigevent, errno %d", - (void *)ionotify_handle_data->event_loop, - ionotify_handle_data->handle->data.fd, - errno_value); + if (handle_data->subscription_state == AISS_SIGEVENT_REGISTERED) { + int rc = MsgUnregisterEvent(&handle_data->event); + int errno_value = errno; + if (rc == -1) { + /* Not much can be done at this point, just log the error. */ + AWS_LOGF_ERROR( + AWS_LS_IO_EVENT_LOOP, + "id=%p fd=%d: Failed to unregister sigevent, errno %d", + (void *)handle_data->event_loop, + handle_data->handle->data.fd, + errno_value); + } } - s_free_io_event_resources(ionotify_handle_data); + s_free_io_event_resources(handle_data); } static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) { @@ -850,46 +893,47 @@ static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struc struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; AWS_FATAL_ASSERT(handle->additional_data); - struct aws_ionotify_handle_data *ionotify_handle_data = handle->additional_data; + struct aws_ionotify_handle_data *handle_data = handle->additional_data; /* Disarm resource manager for a given fd. */ int event_mask = _NOTIFY_COND_EXTEN | _NOTIFY_CONDE_ERR | _NOTIFY_CONDE_HUP | _NOTIFY_CONDE_NVAL; - event_mask |= _NOTIFY_COND_INPUT | _NOTIFY_CONDE_RDNORM | _NOTIFY_COND_OBAND; - event_mask |= _NOTIFY_COND_OUTPUT | _NOTIFY_CONDE_WRNORM; - int rc = ionotify(ionotify_handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, NULL); + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_READABLE) { + event_mask |= _NOTIFY_COND_INPUT; + event_mask |= _NOTIFY_COND_OBAND; + } + if (handle_data->events_subscribed & AWS_IO_EVENT_TYPE_WRITABLE) { + event_mask |= _NOTIFY_COND_OUTPUT; + } + int rc = ionotify(handle_data->handle->data.fd, _NOTIFY_ACTION_EDGEARM, event_mask, NULL); int errno_value = errno; if (rc == -1) { AWS_LOGF_ERROR( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Failed to unsubscribe from events, errno %d", (void *)event_loop, - ionotify_handle_data->handle->data.fd, + handle_data->handle->data.fd, errno_value); - return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE); } /* We can't clean up yet, because we have schedule tasks and more events to process, * mark it as unsubscribed and schedule a cleanup task. */ - ionotify_handle_data->is_subscribed = false; + handle_data->is_subscribed = false; AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Removing from handles map using ID %d", (void *)event_loop, handle->data.fd, - ionotify_handle_data->handle_id); - s_remove_handle(event_loop, ionotify_event_loop, ionotify_handle_data->handle_id); + handle_data->handle_id); + s_remove_handle(event_loop, ionotify_event_loop, handle_data->handle_id); handle->additional_data = NULL; handle->update_io_result = NULL; - /* There might be pending tasks for ionotify_handle_data, so put a cleanup task. */ + /* There might be pending tasks for handle_data, so put a cleanup task. */ aws_task_init( - &ionotify_handle_data->cleanup_task, - s_unsubscribe_cleanup_task, - ionotify_handle_data, - "ionotify_event_loop_unsubscribe_cleanup"); - s_schedule_task_now(event_loop, &ionotify_handle_data->cleanup_task); + &handle_data->cleanup_task, s_unsubscribe_cleanup_task, handle_data, "ionotify_event_loop_unsubscribe_cleanup"); + s_schedule_task_now(event_loop, &handle_data->cleanup_task); return AWS_OP_SUCCESS; } @@ -977,8 +1021,8 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LOGF_TRACE(AWS_LS_IO_EVENT_LOOP, "id=%p: Got pulse for handle ID %d", (void *)event_loop, handle_id); struct aws_ionotify_event_loop *ionotify_event_loop = event_loop->impl_data; - struct aws_ionotify_handle_data *ionotify_handle_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); - if (ionotify_handle_data == NULL) { + struct aws_ionotify_handle_data *handle_data = s_find_handle(event_loop, ionotify_event_loop, handle_id); + if (handle_data == NULL) { /* This situation is totally OK when the corresponding fd is already unsubscribed. */ AWS_LOGF_DEBUG( AWS_LS_IO_EVENT_LOOP, @@ -988,7 +1032,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul return; } - if (!ionotify_handle_data->is_subscribed) { + if (!handle_data->is_subscribed) { return; } @@ -996,7 +1040,7 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: Processing pulse with code %d", (void *)event_loop, - ionotify_handle_data->handle->data.fd, + handle_data->handle->data.fd, pulse->code); int event_mask = 0; if (pulse->value.sival_int & _NOTIFY_COND_OBAND) { @@ -1004,23 +1048,17 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd got out-of-band data", (void *)event_loop, - ionotify_handle_data->handle->data.fd); + handle_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (pulse->value.sival_int & _NOTIFY_COND_INPUT) { AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p fd=%d: fd is readable", - (void *)event_loop, - ionotify_handle_data->handle->data.fd); + AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd is readable", (void *)event_loop, handle_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_READABLE; } if (pulse->value.sival_int & _NOTIFY_COND_OUTPUT) { AWS_LOGF_TRACE( - AWS_LS_IO_EVENT_LOOP, - "id=%p fd=%d: fd is writable", - (void *)event_loop, - ionotify_handle_data->handle->data.fd); + AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd is writable", (void *)event_loop, handle_data->handle->data.fd); event_mask |= AWS_IO_EVENT_TYPE_WRITABLE; } if (pulse->value.sival_int & _NOTIFY_COND_EXTEN) { @@ -1032,23 +1070,22 @@ static void s_process_pulse(struct aws_event_loop *event_loop, const struct _pul AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: fd has extended condition, pulse code is %d", (void *)event_loop, - ionotify_handle_data->handle->data.fd, + handle_data->handle->data.fd, pulse->code); } - if (ionotify_handle_data->latest_io_event_types) { + if (handle_data->latest_io_event_types) { AWS_LOGF_TRACE( AWS_LS_IO_EVENT_LOOP, "id=%p fd=%d: latest_io_event_types is non-empty", (void *)event_loop, - ionotify_handle_data->handle->data.fd); - event_mask |= ionotify_handle_data->latest_io_event_types; + handle_data->handle->data.fd); + event_mask |= handle_data->latest_io_event_types; /* Reset additional I/O event types to not process them twice. */ - ionotify_handle_data->latest_io_event_types = 0; + handle_data->latest_io_event_types = 0; } - ionotify_handle_data->on_event( - event_loop, ionotify_handle_data->handle, event_mask, ionotify_handle_data->user_data); + handle_data->on_event(event_loop, handle_data->handle, event_mask, handle_data->user_data); } static void aws_event_loop_thread(void *args) {