Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix C++ One Definition Rules (ODR) Violations #528

Merged
merged 1 commit into from
Nov 8, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions source/posix/pipe.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct read_end_impl {
bool is_subscribed;
};

struct write_request {
struct pipe_write_request {
struct aws_byte_cursor original_cursor;
struct aws_byte_cursor cursor; /* tracks progress of write */
size_t num_bytes_written;
Expand All @@ -65,11 +65,11 @@ struct write_end_impl {
struct aws_linked_list write_list;

/* Valid while invoking user callback on a completed write request. */
struct write_request *currently_invoking_write_callback;
struct pipe_write_request *currently_invoking_write_callback;

bool is_writable;

/* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated write_request around
/* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around
* and re-using it whenever possible */
};

Expand Down Expand Up @@ -410,14 +410,14 @@ static bool s_write_end_complete_front_write_request(struct aws_pipe_write_end *

AWS_ASSERT(!aws_linked_list_empty(&write_impl->write_list));
struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list);
struct write_request *request = AWS_CONTAINER_OF(node, struct write_request, list_node);
struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node);

struct aws_allocator *alloc = write_impl->alloc;

/* Let the write-end know that a callback is in process, so the write-end can inform the callback
* whether it resulted in clean_up() being called. */
bool write_end_cleaned_up_during_callback = false;
struct write_request *prev_invoking_request = write_impl->currently_invoking_write_callback;
struct pipe_write_request *prev_invoking_request = write_impl->currently_invoking_write_callback;
write_impl->currently_invoking_write_callback = request;

if (request->user_callback) {
Expand All @@ -441,7 +441,7 @@ static void s_write_end_process_requests(struct aws_pipe_write_end *write_end) {

while (!aws_linked_list_empty(&write_impl->write_list)) {
struct aws_linked_list_node *node = aws_linked_list_front(&write_impl->write_list);
struct write_request *request = AWS_CONTAINER_OF(node, struct write_request, list_node);
struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node);

int completed_error_code = AWS_ERROR_SUCCESS;

Expand Down Expand Up @@ -522,7 +522,7 @@ int aws_pipe_write(
return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY);
}

struct write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct write_request));
struct pipe_write_request *request = aws_mem_calloc(write_impl->alloc, 1, sizeof(struct pipe_write_request));
if (!request) {
return AWS_OP_ERR;
}
Expand Down Expand Up @@ -571,7 +571,7 @@ int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) {
/* Force any outstanding write requests to complete with an error status. */
while (!aws_linked_list_empty(&write_impl->write_list)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list);
struct write_request *request = AWS_CONTAINER_OF(node, struct write_request, list_node);
struct pipe_write_request *request = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node);
if (request->user_callback) {
request->user_callback(NULL, AWS_IO_BROKEN_PIPE, request->original_cursor, request->user_data);
}
Expand Down
21 changes: 11 additions & 10 deletions source/posix/socket.c
Original file line number Diff line number Diff line change
Expand Up @@ -1296,7 +1296,7 @@ int aws_socket_set_options(struct aws_socket *socket, const struct aws_socket_op
return AWS_OP_SUCCESS;
}

struct write_request {
struct socket_write_request {
struct aws_byte_cursor cursor_cpy;
aws_socket_on_write_completed_fn *written_fn;
void *write_user_data;
Expand Down Expand Up @@ -1419,15 +1419,15 @@ int aws_socket_close(struct aws_socket *socket) {

while (!aws_linked_list_empty(&socket_impl->written_queue)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue);
struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node);
size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len;
write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data);
aws_mem_release(socket->allocator, write_request);
}

while (!aws_linked_list_empty(&socket_impl->write_queue)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue);
struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node);
size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len;
write_request->written_fn(socket, AWS_IO_SOCKET_CLOSED, bytes_written, write_request->write_user_data);
aws_mem_release(socket->allocator, write_request);
Expand Down Expand Up @@ -1483,7 +1483,7 @@ static void s_written_task(struct aws_task *task, void *arg, enum aws_task_statu
struct aws_linked_list_node *stop_after = aws_linked_list_back(&socket_impl->written_queue);
do {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->written_queue);
struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node);
size_t bytes_written = write_request->original_buffer_len - write_request->cursor_cpy.len;
write_request->written_fn(socket, write_request->error_code, bytes_written, write_request->write_user_data);
aws_mem_release(socket_impl->allocator, write_request);
Expand All @@ -1500,7 +1500,7 @@ static void s_written_task(struct aws_task *task, void *arg, enum aws_task_statu
* 1st scenario, someone called aws_socket_write() and we want to try writing now, so an error can be returned
* immediately if something bad has happened to the socket. In this case, `parent_request` is set.
* 2nd scenario, the event loop notified us that the socket went writable. In this case `parent_request` is NULL */
static int s_process_write_requests(struct aws_socket *socket, struct write_request *parent_request) {
static int s_process_socket_write_requests(struct aws_socket *socket, struct socket_write_request *parent_request) {
struct posix_socket *socket_impl = socket->impl;

if (parent_request) {
Expand All @@ -1525,7 +1525,7 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
/* if a close call happens in the middle, this queue will have been cleaned out from under us. */
while (!aws_linked_list_empty(&socket_impl->write_queue)) {
struct aws_linked_list_node *node = aws_linked_list_front(&socket_impl->write_queue);
struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node);

AWS_LOGF_TRACE(
AWS_LS_IO_SOCKET,
Expand Down Expand Up @@ -1601,7 +1601,7 @@ static int s_process_write_requests(struct aws_socket *socket, struct write_requ
if (purge) {
while (!aws_linked_list_empty(&socket_impl->write_queue)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&socket_impl->write_queue);
struct write_request *write_request = AWS_CONTAINER_OF(node, struct write_request, node);
struct socket_write_request *write_request = AWS_CONTAINER_OF(node, struct socket_write_request, node);

/* If this fn was invoked directly from aws_socket_write(), don't invoke the error callback
* as the user will be able to rely on the return value from aws_socket_write() */
Expand Down Expand Up @@ -1677,7 +1677,7 @@ static void s_on_socket_io_event(
* have been cleaned up, so this next branch is safe. */
if (socket_impl->currently_subscribed && events & AWS_IO_EVENT_TYPE_WRITABLE) {
AWS_LOGF_TRACE(AWS_LS_IO_SOCKET, "id=%p fd=%d: is writable", (void *)socket, socket->io_handle.data.fd);
s_process_write_requests(socket, NULL);
s_process_socket_write_requests(socket, NULL);
}

end_check:
Expand Down Expand Up @@ -1850,7 +1850,8 @@ int aws_socket_write(

AWS_ASSERT(written_fn);
struct posix_socket *socket_impl = socket->impl;
struct write_request *write_request = aws_mem_calloc(socket->allocator, 1, sizeof(struct write_request));
struct socket_write_request *write_request =
aws_mem_calloc(socket->allocator, 1, sizeof(struct socket_write_request));

if (!write_request) {
return AWS_OP_ERR;
Expand All @@ -1862,7 +1863,7 @@ int aws_socket_write(
write_request->cursor_cpy = *cursor;
aws_linked_list_push_back(&socket_impl->write_queue, &write_request->node);

return s_process_write_requests(socket, write_request);
return s_process_socket_write_requests(socket, write_request);
}

int aws_socket_get_error(struct aws_socket *socket) {
Expand Down
12 changes: 6 additions & 6 deletions source/windows/iocp/pipe.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ enum write_end_state {
};

/* Data describing an async write request */
struct write_request {
struct pipe_write_request {
struct aws_byte_cursor original_cursor;
aws_pipe_on_write_completed_fn *user_callback;
void *user_data;
Expand All @@ -106,10 +106,10 @@ struct write_end_impl {
struct aws_io_handle handle;
struct aws_event_loop *event_loop;

/* List of currently active write_requests */
/* List of currently active pipe_write_requests */
struct aws_linked_list write_list;

/* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated write_request around
/* Future optimization idea: avoid an allocation on each write by keeping 1 pre-allocated pipe_write_request around
* and re-using it whenever possible */
};

Expand Down Expand Up @@ -709,7 +709,7 @@ int aws_pipe_clean_up_write_end(struct aws_pipe_write_end *write_end) {
/* Inform outstanding writes about the clean up. */
while (!aws_linked_list_empty(&write_impl->write_list)) {
struct aws_linked_list_node *node = aws_linked_list_pop_front(&write_impl->write_list);
struct write_request *write_req = AWS_CONTAINER_OF(node, struct write_request, list_node);
struct pipe_write_request *write_req = AWS_CONTAINER_OF(node, struct pipe_write_request, list_node);
write_req->is_write_end_cleaned_up = true;
}

Expand Down Expand Up @@ -739,7 +739,7 @@ int aws_pipe_write(
}
DWORD num_bytes_to_write = (DWORD)src_buffer.len;

struct write_request *write = aws_mem_acquire(write_impl->alloc, sizeof(struct write_request));
struct pipe_write_request *write = aws_mem_acquire(write_impl->alloc, sizeof(struct pipe_write_request));
if (!write) {
return AWS_OP_ERR;
}
Expand Down Expand Up @@ -778,7 +778,7 @@ void s_write_end_on_write_completion(
(void)event_loop;
(void)num_bytes_transferred;

struct write_request *write_request = AWS_CONTAINER_OF(overlapped, struct write_request, overlapped);
struct pipe_write_request *write_request = AWS_CONTAINER_OF(overlapped, struct pipe_write_request, overlapped);
struct aws_pipe_write_end *write_end = write_request->is_write_end_cleaned_up ? NULL : overlapped->user_data;

AWS_ASSERT((num_bytes_transferred == write_request->original_cursor.len) || status_code);
Expand Down