diff --git a/api/s2n.h b/api/s2n.h index 0f78ff458e5..7c120391a11 100644 --- a/api/s2n.h +++ b/api/s2n.h @@ -1812,6 +1812,65 @@ S2N_API extern int s2n_connection_prefer_throughput(struct s2n_connection *conn) */ S2N_API extern int s2n_connection_prefer_low_latency(struct s2n_connection *conn); +/** + * Configure the connection to reduce potentially expensive calls to recv. + * + * If this setting is disabled, s2n-tls will call read twice for every TLS record, + * which can be expensive but ensures that s2n-tls will always attempt to read the + * exact number of bytes it requires. If this setting is enabled, s2n-tls will + * instead reduce the number of calls to read by attempting to read as much data + * as possible in each read call, storing the extra in the existing IO buffers. + * This may cause it to request more data than will ever actually be available. + * + * There is no additional memory cost of enabling this setting. It reuses the + * existing IO buffers. + * + * This setting is disabled by default. Depending on how your application detects + * data available for reading, buffering reads may break your event loop. + * In particular, note that: + * + * 1. File descriptor reads or calls to your custom s2n_recv_cb may request more + * data than is available. Reads must return partial data when available rather + * than blocking until all requested data is available. + * + * 2. s2n_negotiate may read and buffer application data records. + * You must call s2n_recv at least once after negotiation to ensure that you + * handle any buffered data. + * + * 3. s2n_recv may read and buffer more records than it parses and decrypts. + * You must call s2n_recv until it reports S2N_ERR_T_BLOCKED, rather than just + * until it reports S2N_SUCCESS. + * + * 4. s2n_peek reports available decrypted data. It does not report any data + * buffered by this feature. + * + * 5. s2n_connection_release_buffers will not release the input buffer if it + * contains buffered data. + * + * For example: if your event loop uses `poll`, you will receive a POLLIN event + * for your read file descriptor when new data is available. When you call s2n_recv + * to read that data, s2n-tls reads one or more TLS records from the file descriptor. + * If you stop calling s2n_recv before it reports S2N_ERR_T_BLOCKED, some of those + * records may remain in s2n-tls's read buffer. If you read part of a record, + * s2n_peek will report the remainder of that record as available. But if you don't + * read any of a record, it remains encrypted and is not reported by s2n_peek. + * And because the data is buffered in s2n-tls instead of in the file descriptor, + * another call to `poll` will NOT report any more data available. Your application + * may hang waiting for more data. + * + * @warning This feature cannot be enabled for a connection that will enable kTLS for receiving. + * + * @warning This feature may work with blocking IO, if used carefully. Your blocking + * IO must support partial reads (so MSG_WAITALL cannot be used). You will need + * to know how much data will eventually be available rather than relying on + * S2N_ERR_T_BLOCKED as noted in #3 above. + * + * @param conn The connection object being updated + * @param enabled Set to `true` to enable, `false` to disable. + * @returns S2N_SUCCESS on success. S2N_FAILURE on failure + */ +S2N_API extern int s2n_connection_set_recv_buffering(struct s2n_connection *conn, bool enabled); + /** * Configure the connection to free IO buffers when they are not currently in use. * diff --git a/api/unstable/ktls.h b/api/unstable/ktls.h index 0d9c28fd588..289698000bc 100644 --- a/api/unstable/ktls.h +++ b/api/unstable/ktls.h @@ -38,6 +38,7 @@ * The TLS kernel module currently doesn't support renegotiation. * - By default, you must negotiate TLS1.2. See s2n_config_ktls_enable_tls13 * for the requirements to also support TLS1.3. + * - You must not use s2n_connection_set_recv_buffering */ /** diff --git a/stuffer/s2n_stuffer.c b/stuffer/s2n_stuffer.c index 6e6814cd54a..5820a55c69b 100644 --- a/stuffer/s2n_stuffer.c +++ b/stuffer/s2n_stuffer.c @@ -215,7 +215,7 @@ int s2n_stuffer_wipe_n(struct s2n_stuffer *stuffer, const uint32_t size) bool s2n_stuffer_is_consumed(struct s2n_stuffer *stuffer) { - return stuffer && (stuffer->read_cursor == stuffer->write_cursor); + return stuffer && (stuffer->read_cursor == stuffer->write_cursor) && !stuffer->tainted; } int s2n_stuffer_wipe(struct s2n_stuffer *stuffer) diff --git a/tests/cbmc/proofs/s2n_stuffer_is_consumed/s2n_stuffer_is_consumed_harness.c b/tests/cbmc/proofs/s2n_stuffer_is_consumed/s2n_stuffer_is_consumed_harness.c index 4ec0db70de7..16cba19b8b7 100644 --- a/tests/cbmc/proofs/s2n_stuffer_is_consumed/s2n_stuffer_is_consumed_harness.c +++ b/tests/cbmc/proofs/s2n_stuffer_is_consumed/s2n_stuffer_is_consumed_harness.c @@ -33,10 +33,13 @@ void s2n_stuffer_is_consumed_harness() save_byte_from_blob(&stuffer->blob, &old_byte_from_stuffer); /* Operation under verification. */ - if (s2n_stuffer_is_consumed(stuffer)) { - assert(stuffer->read_cursor == old_stuffer.write_cursor); + bool result = s2n_stuffer_is_consumed(stuffer); + if (old_stuffer.read_cursor != old_stuffer.write_cursor) { + assert(result == false); + } else if (old_stuffer.tainted) { + assert(result == false); } else { - assert(stuffer->read_cursor != old_stuffer.write_cursor); + assert(result == true); } /* Post-conditions. */ diff --git a/tests/unit/s2n_connection_size_test.c b/tests/unit/s2n_connection_size_test.c index 9e53914b4cd..7847ad7de8b 100644 --- a/tests/unit/s2n_connection_size_test.c +++ b/tests/unit/s2n_connection_size_test.c @@ -45,7 +45,7 @@ int main(int argc, char **argv) } /* Carefully consider any increases to this number. */ - const uint16_t max_connection_size = 4290; + const uint16_t max_connection_size = 4350; const uint16_t min_connection_size = max_connection_size * 0.9; size_t connection_size = sizeof(struct s2n_connection); diff --git a/tests/unit/s2n_ktls_io_test.c b/tests/unit/s2n_ktls_io_test.c index eeaefad05f7..2f5aafcd922 100644 --- a/tests/unit/s2n_ktls_io_test.c +++ b/tests/unit/s2n_ktls_io_test.c @@ -1118,7 +1118,8 @@ int main(int argc, char **argv) EXPECT_SUCCESS(s2n_ktls_read_full_record(conn, &record_type)); EXPECT_EQUAL(record_type, TLS_ALERT); - EXPECT_EQUAL(conn->in.blob.allocated, max_frag_len); + EXPECT_EQUAL(conn->buffer_in.blob.allocated, max_frag_len); + EXPECT_EQUAL(conn->in.blob.size, max_frag_len); EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), max_frag_len); uint8_t *read = s2n_stuffer_raw_read(&conn->in, max_frag_len); EXPECT_BYTEARRAY_EQUAL(read, test_data, max_frag_len); @@ -1152,7 +1153,8 @@ int main(int argc, char **argv) /* Verify that conn->in reflects the correct size of the "record" * read and doesn't just assume the maximum read size. */ - EXPECT_EQUAL(conn->in.blob.allocated, max_frag_len); + EXPECT_EQUAL(conn->buffer_in.blob.allocated, max_frag_len); + EXPECT_EQUAL(conn->in.blob.size, small_frag_len); EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), small_frag_len); uint8_t *read = s2n_stuffer_raw_read(&conn->in, small_frag_len); EXPECT_BYTEARRAY_EQUAL(read, test_data, small_frag_len); @@ -1172,6 +1174,8 @@ int main(int argc, char **argv) /* Write half the test data into conn->in */ const size_t offset = sizeof(test_data) / 2; EXPECT_SUCCESS(s2n_stuffer_write_bytes(&conn->in, test_data, offset)); + /* Resize conn->buffer_in to match conn->in */ + EXPECT_SUCCESS(s2n_stuffer_resize(&conn->buffer_in, offset)); /* Write the other half into a new record */ size_t written = 0; @@ -1201,6 +1205,41 @@ int main(int argc, char **argv) read = s2n_stuffer_raw_read(&conn->in, offset_iovec.iov_len); EXPECT_BYTEARRAY_EQUAL(read, offset_iovec.iov_base, offset_iovec.iov_len); }; + + /* Test: Receive multiple records */ + { + const size_t small_frag_len = 10; + EXPECT_TRUE(small_frag_len < max_frag_len); + EXPECT_TRUE(small_frag_len < sizeof(test_data)); + struct iovec small_test_iovec = test_iovec; + small_test_iovec.iov_len = small_frag_len; + + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer_pair pair = { 0 }, + s2n_ktls_io_stuffer_pair_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer(conn, conn, &pair)); + struct s2n_test_ktls_io_stuffer *ctx = &pair.client_in; + + for (size_t i = 0; i < 100; i++) { + size_t written = 0; + EXPECT_OK(s2n_ktls_sendmsg(ctx, TLS_ALERT, &small_test_iovec, 1, &blocked, &written)); + EXPECT_EQUAL(written, small_frag_len); + + uint8_t record_type = 0; + EXPECT_SUCCESS(s2n_ktls_read_full_record(conn, &record_type)); + EXPECT_EQUAL(record_type, TLS_ALERT); + EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), written); + uint8_t *read = s2n_stuffer_raw_read(&conn->in, small_frag_len); + EXPECT_BYTEARRAY_EQUAL(read, test_data, small_frag_len); + + EXPECT_OK(s2n_record_wipe(conn)); + size_t space_remaining = s2n_stuffer_space_remaining(&conn->buffer_in); + EXPECT_EQUAL(space_remaining, max_frag_len); + } + }; }; /* Test: key encryption limit tracked */ diff --git a/tests/unit/s2n_ktls_test.c b/tests/unit/s2n_ktls_test.c index a57254c4432..3ea6cbbefbd 100644 --- a/tests/unit/s2n_ktls_test.c +++ b/tests/unit/s2n_ktls_test.c @@ -387,6 +387,22 @@ int main(int argc, char **argv) EXPECT_SUCCESS(s2n_connection_ktls_enable_recv(server_conn)); }; + /* Fail if buffer_in contains any data. + * A connection that will enable ktls needs to disable recv_greedy + */ + { + DEFER_CLEANUP(struct s2n_connection *server_conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_OK(s2n_test_configure_connection_for_ktls(server_conn)); + + EXPECT_SUCCESS(s2n_stuffer_write_uint8(&server_conn->buffer_in, 1)); + EXPECT_FAILURE_WITH_ERRNO(s2n_connection_ktls_enable_recv(server_conn), + S2N_ERR_KTLS_UNSUPPORTED_CONN); + + EXPECT_SUCCESS(s2n_stuffer_skip_read(&server_conn->buffer_in, 1)); + EXPECT_SUCCESS(s2n_connection_ktls_enable_recv(server_conn)); + }; + /* Fail if not using managed IO for send */ { DEFER_CLEANUP(struct s2n_connection *server_conn = s2n_connection_new(S2N_SERVER), diff --git a/tests/unit/s2n_quic_support_io_test.c b/tests/unit/s2n_quic_support_io_test.c index 5bb7b02d5f5..8a2b744cfb7 100644 --- a/tests/unit/s2n_quic_support_io_test.c +++ b/tests/unit/s2n_quic_support_io_test.c @@ -244,6 +244,87 @@ int main(int argc, char **argv) EXPECT_SUCCESS(s2n_stuffer_free(&stuffer)); EXPECT_SUCCESS(s2n_connection_free(conn)); }; + + /* Succeeds for a handshake message larger than the input buffer */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_stuffer stuffer = { 0 }, s2n_stuffer_free); + EXPECT_SUCCESS(s2n_stuffer_growable_alloc(&stuffer, 0)); + EXPECT_SUCCESS(s2n_connection_set_io_stuffers(&stuffer, &stuffer, conn)); + + uint8_t actual_message_type = 0; + + /* Read a small message to initialize the input buffer */ + const size_t small_message_size = 10; + EXPECT_SUCCESS(s2n_stuffer_write_uint8(&stuffer, 7)); + EXPECT_SUCCESS(s2n_stuffer_write_uint24(&stuffer, small_message_size)); + EXPECT_SUCCESS(s2n_stuffer_skip_write(&stuffer, small_message_size)); + EXPECT_OK(s2n_quic_read_handshake_message(conn, &actual_message_type)); + EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), small_message_size); + + EXPECT_SUCCESS(s2n_stuffer_wipe(&conn->handshake.io)); + EXPECT_OK(s2n_record_wipe(conn)); + const size_t max_buffer_size = s2n_stuffer_space_remaining(&conn->buffer_in); + EXPECT_TRUE(max_buffer_size > small_message_size); + + /* Read a large message to force the input buffer to resize */ + const size_t large_message_size = max_buffer_size + 10; + EXPECT_SUCCESS(s2n_stuffer_write_uint8(&stuffer, 7)); + EXPECT_SUCCESS(s2n_stuffer_write_uint24(&stuffer, large_message_size)); + EXPECT_SUCCESS(s2n_stuffer_skip_write(&stuffer, large_message_size)); + EXPECT_OK(s2n_quic_read_handshake_message(conn, &actual_message_type)); + EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), large_message_size); + + EXPECT_SUCCESS(s2n_stuffer_wipe(&conn->handshake.io)); + EXPECT_OK(s2n_record_wipe(conn)); + const size_t resized_buffer_size = s2n_stuffer_space_remaining(&conn->buffer_in); + EXPECT_TRUE(resized_buffer_size >= large_message_size); + + /* Read another message to check that the resize doesn't prevent future reads */ + EXPECT_SUCCESS(s2n_stuffer_write_uint8(&stuffer, 7)); + EXPECT_SUCCESS(s2n_stuffer_write_uint24(&stuffer, TEST_DATA_SIZE)); + EXPECT_SUCCESS(s2n_stuffer_write_bytes(&stuffer, TEST_DATA, TEST_DATA_SIZE)); + EXPECT_OK(s2n_quic_read_handshake_message(conn, &actual_message_type)); + EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), TEST_DATA_SIZE); + EXPECT_BYTEARRAY_EQUAL(s2n_stuffer_raw_read(&conn->in, TEST_DATA_SIZE), + TEST_DATA, sizeof(TEST_DATA)); + }; + + /* Succeeds for multiple messages */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_stuffer stuffer = { 0 }, s2n_stuffer_free); + EXPECT_SUCCESS(s2n_stuffer_growable_alloc(&stuffer, 0)); + EXPECT_SUCCESS(s2n_connection_set_io_stuffers(&stuffer, &stuffer, conn)); + + uint8_t actual_message_type = 0; + size_t expected_buffer_size = 0; + for (size_t i = 0; i < 100; i++) { + EXPECT_SUCCESS(s2n_stuffer_write_uint8(&stuffer, 7)); + EXPECT_SUCCESS(s2n_stuffer_write_uint24(&stuffer, TEST_DATA_SIZE)); + EXPECT_SUCCESS(s2n_stuffer_write_bytes(&stuffer, TEST_DATA, TEST_DATA_SIZE)); + EXPECT_OK(s2n_quic_read_handshake_message(conn, &actual_message_type)); + EXPECT_EQUAL(s2n_stuffer_data_available(&conn->in), TEST_DATA_SIZE); + EXPECT_BYTEARRAY_EQUAL(s2n_stuffer_raw_read(&conn->in, TEST_DATA_SIZE), + TEST_DATA, sizeof(TEST_DATA)); + + EXPECT_SUCCESS(s2n_stuffer_wipe(&conn->handshake.io)); + EXPECT_OK(s2n_record_wipe(conn)); + + /* Ensure buffer size stays constant */ + const size_t buffer_size = s2n_stuffer_space_remaining(&conn->buffer_in); + if (i == 0) { + expected_buffer_size = buffer_size; + } + EXPECT_EQUAL(expected_buffer_size, buffer_size); + } + }; }; /* Functional Tests */ diff --git a/tests/unit/s2n_recv_buffering_test.c b/tests/unit/s2n_recv_buffering_test.c new file mode 100644 index 00000000000..60b4401671f --- /dev/null +++ b/tests/unit/s2n_recv_buffering_test.c @@ -0,0 +1,518 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +#include "api/s2n.h" +#include "api/unstable/renegotiate.h" +#include "s2n_test.h" +#include "testlib/s2n_ktls_test_utils.h" +#include "testlib/s2n_testlib.h" +#include "utils/s2n_random.h" + +struct s2n_recv_wrapper { + size_t count; + s2n_recv_fn *inner_recv; + void *inner_recv_ctx; +}; + +static int s2n_counting_read(void *io_context, uint8_t *buf, uint32_t len) +{ + struct s2n_recv_wrapper *context = (struct s2n_recv_wrapper *) io_context; + context->count++; + return context->inner_recv(context->inner_recv_ctx, buf, len); +} + +static S2N_RESULT s2n_connection_set_counting_read(struct s2n_connection *reader, + struct s2n_recv_wrapper *wrapper) +{ + /* We'd need to handle cleanup for managed IO */ + RESULT_ENSURE(!reader->managed_recv_io, S2N_ERR_SAFETY); + + wrapper->inner_recv = reader->recv; + reader->recv = s2n_counting_read; + wrapper->inner_recv_ctx = reader->recv_io_context; + reader->recv_io_context = wrapper; + wrapper->count = 0; + return S2N_RESULT_OK; +} + +int main(int argc, char **argv) +{ + BEGIN_TEST(); + + const uint8_t test_data[20] = "hello world"; + const size_t buffer_in_size = S2N_LARGE_FRAGMENT_LENGTH; + + DEFER_CLEANUP(struct s2n_cert_chain_and_key * chain_and_key, + s2n_cert_chain_and_key_ptr_free); + EXPECT_SUCCESS(s2n_test_cert_chain_and_key_new(&chain_and_key, + S2N_DEFAULT_ECDSA_TEST_CERT_CHAIN, S2N_DEFAULT_ECDSA_TEST_PRIVATE_KEY)); + + DEFER_CLEANUP(struct s2n_config *config = s2n_config_new(), + s2n_config_ptr_free); + EXPECT_SUCCESS(s2n_config_add_cert_chain_and_key_to_store(config, chain_and_key)); + EXPECT_SUCCESS(s2n_config_set_cipher_preferences(config, "default_tls13")); + EXPECT_SUCCESS(s2n_config_disable_x509_verification(config)); + + /* Test: Read a single record */ + uint32_t test_record_size_val = 0; + { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + struct s2n_recv_wrapper counter = { 0 }; + EXPECT_OK(s2n_connection_set_counting_read(server, &counter)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + test_record_size_val = s2n_stuffer_data_available(&io_pair.server_in); + EXPECT_TRUE(test_record_size_val > sizeof(test_data)); + + uint8_t buffer[sizeof(test_data)] = { 0 }; + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + EXPECT_EQUAL(counter.count, 1); + } + const uint32_t test_record_size = test_record_size_val; + + /* Test: Read the handshake */ + { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + } + + /* Test: Read a record larger than the input buffer */ + { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + client->max_outgoing_fragment_length = UINT16_MAX; + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + struct s2n_recv_wrapper counter = { 0 }; + EXPECT_OK(s2n_connection_set_counting_read(server, &counter)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + DEFER_CLEANUP(struct s2n_blob max_fragment_buffer = { 0 }, s2n_free); + EXPECT_SUCCESS(s2n_alloc(&max_fragment_buffer, S2N_LARGE_FRAGMENT_LENGTH)); + + /* Send a record that won't fit in the default input buffer */ + EXPECT_EQUAL( + s2n_send(client, max_fragment_buffer.data, max_fragment_buffer.size, &blocked), + max_fragment_buffer.size); + size_t record_size = s2n_stuffer_data_available(&io_pair.server_in); + size_t fragment_size = record_size - S2N_TLS_RECORD_HEADER_LENGTH; + EXPECT_TRUE(fragment_size > buffer_in_size); + + /* Test that the record can be received and the input buffer resized */ + EXPECT_EQUAL( + s2n_recv(server, max_fragment_buffer.data, max_fragment_buffer.size, &blocked), + max_fragment_buffer.size); + EXPECT_TRUE(s2n_stuffer_space_remaining(&server->buffer_in) > fragment_size); + /* The header fits on the first read, but the rest of the data doesn't. + * We need a (large) shift + read to get the rest of the data. + */ + EXPECT_EQUAL(counter.count, 2); + + /* Check that another record can be received afterwards */ + uint8_t buffer[sizeof(test_data)] = { 0 }; + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + EXPECT_EQUAL(counter.count, 3); + } + + /* Test: Read multiple small records */ + for (size_t greedy = 0; greedy <= 1; greedy++) { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + if (greedy) { + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + } + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + struct s2n_recv_wrapper counter = { 0 }; + EXPECT_OK(s2n_connection_set_counting_read(server, &counter)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + for (size_t i = 1; i <= sizeof(test_data); i++) { + EXPECT_EQUAL(s2n_send(client, test_data, i, &blocked), i); + } + + uint8_t buffer[sizeof(test_data)] = { 0 }; + for (size_t i = 1; i <= sizeof(test_data); i++) { + EXPECT_EQUAL(s2n_recv(server, buffer, i, &blocked), i); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, i); + + if (greedy) { + /* All our small records combined are smaller than the maximum + * TLS record size, so they should all be buffered immediately. + * Only one read is ever necessary. + */ + EXPECT_EQUAL(counter.count, 1); + } else { + /* We call recv twice for every record */ + EXPECT_EQUAL(counter.count, i * 2); + } + } + + /* The input buffer size does not change with greedy vs not greedy */ + EXPECT_EQUAL(server->buffer_in.blob.allocated, buffer_in_size); + + /* If all data is consumed, the input buffer can be released */ + EXPECT_SUCCESS(s2n_connection_release_buffers(server)); + EXPECT_EQUAL(server->buffer_in.blob.allocated, 0); + } + + /* Test: Read the rest of a partial record */ + for (size_t i = 0; i < test_record_size; i++) { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + struct s2n_recv_wrapper counter = { 0 }; + EXPECT_OK(s2n_connection_set_counting_read(server, &counter)); + + size_t expected_count = 0; + + /* Test: manually copy some of the record into the read buffer */ + { + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.server_in), test_record_size); + EXPECT_SUCCESS(s2n_stuffer_copy(&io_pair.server_in, &server->buffer_in, i)); + + uint8_t buffer[sizeof(test_data)] = { 0 }; + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + expected_count++; + EXPECT_EQUAL(counter.count, expected_count); + } + + /* Test: force the first recv to return partial data */ + { + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.server_in), 0); + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.server_in), test_record_size); + + io_pair.server_in.write_cursor -= (test_record_size - i); + + uint8_t buffer[sizeof(test_data)] = { 0 }; + EXPECT_FAILURE_WITH_ERRNO(s2n_recv(server, buffer, sizeof(buffer), &blocked), + S2N_ERR_IO_BLOCKED); + expected_count++; + /* If the first call returns any data, then a second call is made. + * The second call blocks. */ + if (i != 0) { + expected_count++; + } + EXPECT_EQUAL(counter.count, expected_count); + + io_pair.server_in.write_cursor += (test_record_size - i); + + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + expected_count++; + EXPECT_EQUAL(counter.count, expected_count); + } + } + + /* Test: read a single record one byte at a time */ + { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + struct s2n_recv_wrapper counter = { 0 }; + EXPECT_OK(s2n_connection_set_counting_read(server, &counter)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.server_in), 0); + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.server_in), test_record_size); + io_pair.server_in.write_cursor -= test_record_size; + + size_t expected_count = 0; + uint8_t buffer[sizeof(test_data)] = { 0 }; + for (size_t i = 1; i < test_record_size; i++) { + /* Reads no additional data-- just blocks */ + EXPECT_FAILURE_WITH_ERRNO(s2n_recv(server, buffer, sizeof(buffer), &blocked), + S2N_ERR_IO_BLOCKED); + expected_count++; + EXPECT_EQUAL(counter.count, expected_count); + + /* Reads the next byte, then blocks again */ + io_pair.server_in.write_cursor++; + EXPECT_FAILURE_WITH_ERRNO(s2n_recv(server, buffer, sizeof(buffer), &blocked), + S2N_ERR_IO_BLOCKED); + expected_count += 2; + EXPECT_EQUAL(counter.count, expected_count); + } + + /* Reads the final byte and succeeds */ + io_pair.server_in.write_cursor++; + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + expected_count++; + EXPECT_EQUAL(counter.count, expected_count); + } + + /* Test: Read into a buffer that already contains data from a previous read */ + const struct { + /* The offset the current record should begin at */ + uint16_t offset; + /* Assert that shifting occurred if necessary */ + uint16_t final_offset; + /* Most offsets result in a single read */ + uint8_t reads; + } test_offsets[] = { + /* Basic small offsets: single read, no shifting */ + { .offset = 0, .final_offset = test_record_size, .reads = 1 }, + { .offset = 10, .final_offset = 10 + test_record_size, .reads = 1 }, + { .offset = 1000, .final_offset = 1000 + test_record_size, .reads = 1 }, + /* Exactly enough space remaining in the buffer, so no shift or second read. + * We wipe the buffer after: the extra byte we add to avoid the wipe isn't + * read because we read exactly as much data as we need. + */ + { + .offset = buffer_in_size - test_record_size, + .final_offset = 0, + .reads = 1, + }, + /* If we have enough space in the buffer for the next header, + * but not enough for the next fragment, then we must still read twice. + */ + { + .offset = buffer_in_size - S2N_TLS_RECORD_HEADER_LENGTH, + .final_offset = test_record_size - S2N_TLS_RECORD_HEADER_LENGTH, + .reads = 2, + }, + { + .offset = buffer_in_size - S2N_TLS_RECORD_HEADER_LENGTH - 1, + .final_offset = test_record_size - S2N_TLS_RECORD_HEADER_LENGTH, + .reads = 2, + }, + /* Not enough space in the buffer for the header or the fragment. + * We have to shift but don't need a second read. + */ + { .offset = buffer_in_size - 3, .final_offset = test_record_size, .reads = 1 }, + { .offset = buffer_in_size - 1, .final_offset = test_record_size, .reads = 1 }, + { .offset = buffer_in_size, .final_offset = test_record_size, .reads = 1 }, + }; + for (size_t i = 0; i < s2n_array_len(test_offsets); i++) { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + struct s2n_recv_wrapper counter = { 0 }; + EXPECT_OK(s2n_connection_set_counting_read(server, &counter)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.server_in), test_record_size); + /* Write one more byte so that we won't wipe buffer_in after the read. + * This will let us better examine the state of the buffer. + */ + EXPECT_SUCCESS(s2n_stuffer_write_uint8(&io_pair.server_in, 0)); + + uint16_t offset = test_offsets[i].offset; + EXPECT_SUCCESS(s2n_stuffer_wipe(&server->buffer_in)); + EXPECT_SUCCESS(s2n_stuffer_skip_write(&server->buffer_in, offset)); + EXPECT_SUCCESS(s2n_stuffer_skip_read(&server->buffer_in, offset)); + if (offset < buffer_in_size) { + /* Preemptively copy one byte of the next record into buffer_in. + * If we don't do this, we just wipe buffer_in before the read, + * making this test trivial. + */ + EXPECT_SUCCESS(s2n_stuffer_copy(&io_pair.server_in, &server->buffer_in, 1)); + } + + uint8_t buffer[sizeof(test_data)] = { 0 }; + + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + EXPECT_EQUAL(counter.count, test_offsets[i].reads); + uint32_t expected_final_offset = test_offsets[i].final_offset; + /* If there is an offset, consider the extra byte we added to avoid the final wipe. */ + if (expected_final_offset != 0) { + expected_final_offset++; + } + EXPECT_EQUAL(server->buffer_in.write_cursor, expected_final_offset); + } + + /* Test: Toggle recv_greedy while reading */ + { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + EXPECT_SUCCESS(s2n_stuffer_wipe(&io_pair.server_in)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + uint8_t buffer[sizeof(test_data)] = { 0 }; + + /* Send many records */ + const size_t records_count = 100; + for (size_t i = 0; i < records_count; i++) { + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + } + + for (size_t i = 0; i < records_count / 2; i++) { + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + EXPECT_TRUE(s2n_stuffer_data_available(&server->buffer_in)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, false)); + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + } + } + + /* Test: s2n_connection_release_buffers with data remaining in buffer_in */ + { + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(server, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + EXPECT_SUCCESS(s2n_stuffer_wipe(&io_pair.server_in)); + + /* Send two records */ + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + EXPECT_EQUAL(s2n_send(client, test_data, sizeof(test_data), &blocked), sizeof(test_data)); + + /* Only consume a partial record */ + io_pair.server_in.write_cursor = test_record_size / 2; + uint8_t buffer[sizeof(test_data)] = { 0 }; + EXPECT_FAILURE_WITH_ERRNO( + s2n_recv(server, buffer, sizeof(test_data), &blocked), + S2N_ERR_IO_BLOCKED); + EXPECT_TRUE(s2n_stuffer_data_available(&server->in)); + EXPECT_FAILURE_WITH_ERRNO( + s2n_connection_release_buffers(server), + S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA); + + /* Consume the full first record */ + /* cppcheck-suppress redundantAssignment */ + io_pair.server_in.write_cursor = test_record_size * 2; + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + + /* Release buffers */ + EXPECT_TRUE(s2n_stuffer_data_available(&server->buffer_in)); + EXPECT_SUCCESS(s2n_connection_release_buffers(server)); + EXPECT_TRUE(s2n_stuffer_data_available(&server->buffer_in)); + + /* Consume the full second record */ + EXPECT_EQUAL(s2n_recv(server, buffer, sizeof(buffer), &blocked), sizeof(test_data)); + EXPECT_BYTEARRAY_EQUAL(buffer, test_data, sizeof(test_data)); + } + + END_TEST(); +} diff --git a/tests/unit/s2n_renegotiate_test.c b/tests/unit/s2n_renegotiate_test.c index d599b1e949f..80a880daadc 100644 --- a/tests/unit/s2n_renegotiate_test.c +++ b/tests/unit/s2n_renegotiate_test.c @@ -452,6 +452,41 @@ int main(int argc, char *argv[]) EXPECT_SUCCESS(s2n_renegotiate_wipe(client_conn)); }; + + /* Wipe with next record buffered allowed, and data preserved */ + { + DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), s2n_connection_ptr_free); + EXPECT_NOT_NULL(server); + EXPECT_SUCCESS(s2n_connection_set_config(server, config)); + + DEFER_CLEANUP(struct s2n_connection *client = s2n_connection_new(S2N_CLIENT), s2n_connection_ptr_free); + EXPECT_NOT_NULL(client); + EXPECT_SUCCESS(s2n_connection_set_config(client, config)); + EXPECT_SUCCESS(s2n_connection_set_recv_buffering(client, true)); + + DEFER_CLEANUP(struct s2n_test_io_stuffer_pair io_pair = { 0 }, s2n_io_stuffer_pair_free); + EXPECT_OK(s2n_io_stuffer_pair_init(&io_pair)); + EXPECT_OK(s2n_connections_set_io_stuffer_pair(client, server, &io_pair)); + + EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server, client)); + + /* Write two records, but only receive one. + * Due to recv buffering, the second record will be read and buffered + * at the same time as the first record, but not processed yet. + */ + uint8_t recv_buffer[sizeof(app_data)] = { 0 }; + EXPECT_EQUAL(s2n_send(server, app_data, sizeof(app_data), &blocked), sizeof(app_data)); + EXPECT_EQUAL(s2n_send(server, app_data, sizeof(app_data), &blocked), sizeof(app_data)); + EXPECT_EQUAL(s2n_recv(client, recv_buffer, sizeof(app_data), &blocked), sizeof(app_data)); + EXPECT_EQUAL(s2n_peek(client), 0); + EXPECT_TRUE(s2n_stuffer_data_available(&client->buffer_in)); + + EXPECT_SUCCESS(s2n_renegotiate_wipe(client)); + + /* The second record is still available to read after the wipe */ + EXPECT_EQUAL(s2n_recv(client, recv_buffer, sizeof(app_data), &blocked), sizeof(app_data)); + EXPECT_BYTEARRAY_EQUAL(recv_buffer, app_data, sizeof(app_data)); + }; }; /* Test the basic renegotiation mechanism with a variety of connection parameters. diff --git a/tests/unit/s2n_self_talk_io_mem_test.c b/tests/unit/s2n_self_talk_io_mem_test.c index 2453150b03c..f28a1d1f8fc 100644 --- a/tests/unit/s2n_self_talk_io_mem_test.c +++ b/tests/unit/s2n_self_talk_io_mem_test.c @@ -184,8 +184,8 @@ int main(int argc, char **argv) EXPECT_SUCCESS(s2n_connection_set_config(server_conn, config)); /* All IO buffers empty */ - EXPECT_EQUAL(client_conn->in.blob.size, 0); - EXPECT_EQUAL(server_conn->in.blob.size, 0); + EXPECT_EQUAL(client_conn->buffer_in.blob.size, 0); + EXPECT_EQUAL(server_conn->buffer_in.blob.size, 0); EXPECT_EQUAL(client_conn->out.blob.size, 0); EXPECT_EQUAL(server_conn->out.blob.size, 0); @@ -199,16 +199,16 @@ int main(int argc, char **argv) EXPECT_OK(s2n_negotiate_test_server_and_client_until_message(server_conn, client_conn, SERVER_CERT)); /* All IO buffers not empty */ - EXPECT_NOT_EQUAL(client_conn->in.blob.size, 0); - EXPECT_NOT_EQUAL(server_conn->in.blob.size, 0); + EXPECT_NOT_EQUAL(client_conn->buffer_in.blob.size, 0); + EXPECT_NOT_EQUAL(server_conn->buffer_in.blob.size, 0); EXPECT_NOT_EQUAL(client_conn->out.blob.size, 0); EXPECT_NOT_EQUAL(server_conn->out.blob.size, 0); /* Wipe connections */ EXPECT_SUCCESS(s2n_connection_wipe(client_conn)); EXPECT_SUCCESS(s2n_connection_wipe(server_conn)); - EXPECT_EQUAL(client_conn->in.blob.size, 0); - EXPECT_EQUAL(server_conn->in.blob.size, 0); + EXPECT_EQUAL(client_conn->buffer_in.blob.size, 0); + EXPECT_EQUAL(server_conn->buffer_in.blob.size, 0); EXPECT_EQUAL(client_conn->out.blob.size, 0); EXPECT_EQUAL(server_conn->out.blob.size, 0); @@ -244,9 +244,9 @@ int main(int argc, char **argv) EXPECT_SUCCESS(s2n_negotiate_test_server_and_client(server_conn, client_conn)); /* all IO buffers should be empty after the handshake */ - EXPECT_EQUAL(client_conn->in.blob.size, 0); + EXPECT_EQUAL(client_conn->buffer_in.blob.size, 0); EXPECT_EQUAL(client_conn->out.blob.size, 0); - EXPECT_EQUAL(server_conn->in.blob.size, 0); + EXPECT_EQUAL(server_conn->buffer_in.blob.size, 0); EXPECT_EQUAL(server_conn->out.blob.size, 0); /* block the server from sending */ @@ -283,13 +283,13 @@ int main(int argc, char **argv) EXPECT_EQUAL(s2n_recv(client_conn, &buf, s2n_array_len(buf) / 2, &blocked), s2n_array_len(buf) / 2); /* the `in` buffer should not be freed until it's completely flushed to the application */ - EXPECT_NOT_EQUAL(client_conn->in.blob.size, 0); + EXPECT_NOT_EQUAL(client_conn->buffer_in.blob.size, 0); /* Receive the second half of the payload on the second call */ EXPECT_EQUAL(s2n_recv(client_conn, &buf, s2n_array_len(buf) / 2, &blocked), s2n_array_len(buf) / 2); /* at this point the application has received the full message and the `in` buffer should be freed */ - EXPECT_EQUAL(client_conn->in.blob.size, 0); + EXPECT_EQUAL(client_conn->buffer_in.blob.size, 0); }; EXPECT_SUCCESS(s2n_config_free(config)); diff --git a/tls/s2n_connection.c b/tls/s2n_connection.c index 1a555bdc054..2ac788d4e18 100644 --- a/tls/s2n_connection.c +++ b/tls/s2n_connection.c @@ -102,7 +102,7 @@ struct s2n_connection *s2n_connection_new(s2n_mode mode) PTR_GUARD_POSIX(s2n_blob_init(&blob, conn->header_in_data, S2N_TLS_RECORD_HEADER_LENGTH)); PTR_GUARD_POSIX(s2n_stuffer_init(&conn->header_in, &blob)); PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->out, 0)); - PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->in, 0)); + PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->buffer_in, 0)); PTR_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->handshake.io, 0)); PTR_GUARD_RESULT(s2n_timer_start(conn->config, &conn->write_timer)); @@ -262,6 +262,7 @@ int s2n_connection_free(struct s2n_connection *conn) POSIX_GUARD(s2n_free(&conn->peer_quic_transport_parameters)); POSIX_GUARD(s2n_free(&conn->server_early_data_context)); POSIX_GUARD(s2n_free(&conn->tls13_ticket_fields.session_secret)); + POSIX_GUARD(s2n_stuffer_free(&conn->buffer_in)); POSIX_GUARD(s2n_stuffer_free(&conn->in)); POSIX_GUARD(s2n_stuffer_free(&conn->out)); POSIX_GUARD(s2n_stuffer_free(&conn->handshake.io)); @@ -407,7 +408,9 @@ int s2n_connection_release_buffers(struct s2n_connection *conn) POSIX_GUARD(s2n_stuffer_resize(&conn->out, 0)); POSIX_ENSURE(s2n_stuffer_is_consumed(&conn->in), S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA); - POSIX_GUARD(s2n_stuffer_resize(&conn->in, 0)); + if (s2n_stuffer_is_consumed(&conn->buffer_in)) { + POSIX_GUARD(s2n_stuffer_resize(&conn->buffer_in, 0)); + } POSIX_ENSURE(s2n_stuffer_is_consumed(&conn->post_handshake.in), S2N_ERR_STUFFER_HAS_UNPROCESSED_DATA); POSIX_GUARD(s2n_stuffer_free(&conn->post_handshake.in)); @@ -464,7 +467,7 @@ int s2n_connection_wipe(struct s2n_connection *conn) struct s2n_stuffer client_ticket_to_decrypt = { 0 }; struct s2n_stuffer handshake_io = { 0 }; struct s2n_stuffer header_in = { 0 }; - struct s2n_stuffer in = { 0 }; + struct s2n_stuffer buffer_in = { 0 }; struct s2n_stuffer out = { 0 }; /* Some required structures might have been freed to conserve memory between handshakes. @@ -501,11 +504,12 @@ int s2n_connection_wipe(struct s2n_connection *conn) POSIX_GUARD(s2n_stuffer_wipe(&conn->post_handshake.in)); POSIX_GUARD(s2n_blob_zero(&conn->client_hello.raw_message)); POSIX_GUARD(s2n_stuffer_wipe(&conn->header_in)); - POSIX_GUARD(s2n_stuffer_wipe(&conn->in)); + POSIX_GUARD(s2n_stuffer_wipe(&conn->buffer_in)); POSIX_GUARD(s2n_stuffer_wipe(&conn->out)); /* Free stuffers we plan to just recreate */ POSIX_GUARD(s2n_stuffer_free(&conn->post_handshake.in)); + POSIX_GUARD(s2n_stuffer_free(&conn->in)); POSIX_GUARD_RESULT(s2n_psk_parameters_wipe(&conn->psk_params)); @@ -526,7 +530,7 @@ int s2n_connection_wipe(struct s2n_connection *conn) /* Truncate the message buffers to save memory, we will dynamically resize it as needed */ POSIX_GUARD(s2n_free(&conn->client_hello.raw_message)); - POSIX_GUARD(s2n_stuffer_resize(&conn->in, 0)); + POSIX_GUARD(s2n_stuffer_resize(&conn->buffer_in, 0)); POSIX_GUARD(s2n_stuffer_resize(&conn->out, 0)); /* Remove context associated with connection */ @@ -545,7 +549,7 @@ int s2n_connection_wipe(struct s2n_connection *conn) POSIX_CHECKED_MEMCPY(&client_ticket_to_decrypt, &conn->client_ticket_to_decrypt, sizeof(struct s2n_stuffer)); POSIX_CHECKED_MEMCPY(&handshake_io, &conn->handshake.io, sizeof(struct s2n_stuffer)); POSIX_CHECKED_MEMCPY(&header_in, &conn->header_in, sizeof(struct s2n_stuffer)); - POSIX_CHECKED_MEMCPY(&in, &conn->in, sizeof(struct s2n_stuffer)); + POSIX_CHECKED_MEMCPY(&buffer_in, &conn->buffer_in, sizeof(struct s2n_stuffer)); POSIX_CHECKED_MEMCPY(&out, &conn->out, sizeof(struct s2n_stuffer)); #ifdef S2N_DIAGNOSTICS_POP_SUPPORTED #pragma GCC diagnostic pop @@ -557,9 +561,14 @@ int s2n_connection_wipe(struct s2n_connection *conn) POSIX_CHECKED_MEMCPY(&conn->client_ticket_to_decrypt, &client_ticket_to_decrypt, sizeof(struct s2n_stuffer)); POSIX_CHECKED_MEMCPY(&conn->handshake.io, &handshake_io, sizeof(struct s2n_stuffer)); POSIX_CHECKED_MEMCPY(&conn->header_in, &header_in, sizeof(struct s2n_stuffer)); - POSIX_CHECKED_MEMCPY(&conn->in, &in, sizeof(struct s2n_stuffer)); + POSIX_CHECKED_MEMCPY(&conn->buffer_in, &buffer_in, sizeof(struct s2n_stuffer)); POSIX_CHECKED_MEMCPY(&conn->out, &out, sizeof(struct s2n_stuffer)); + /* conn->in will eventually point to part of conn->buffer_in, but we initialize + * it as growable and allocated to support legacy tests. + */ + POSIX_GUARD(s2n_stuffer_growable_alloc(&conn->in, 0)); + conn->handshake.hashes = handshake_hashes; conn->prf_space = prf_workspace; conn->initial = initial; @@ -1628,13 +1637,13 @@ S2N_RESULT s2n_connection_dynamic_free_in_buffer(struct s2n_connection *conn) { RESULT_ENSURE_REF(conn); - /* free the `in` buffer if we're in dynamic mode and it's completely flushed */ - if (conn->dynamic_buffers && s2n_stuffer_is_consumed(&conn->in)) { + /* free `buffer_in` if we're in dynamic mode and it's completely flushed */ + if (conn->dynamic_buffers && s2n_stuffer_is_consumed(&conn->buffer_in)) { /* when copying the buffer into the application, we use `s2n_stuffer_erase_and_read`, which already zeroes the memory */ - RESULT_GUARD_POSIX(s2n_stuffer_free_without_wipe(&conn->in)); + RESULT_GUARD_POSIX(s2n_stuffer_free_without_wipe(&conn->buffer_in)); /* reset the stuffer to its initial state */ - RESULT_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->in, 0)); + RESULT_GUARD_POSIX(s2n_stuffer_growable_alloc(&conn->buffer_in, 0)); } return S2N_RESULT_OK; @@ -1726,3 +1735,12 @@ int s2n_connection_get_key_update_counts(struct s2n_connection *conn, *recv_key_updates = conn->recv_key_updated; return S2N_SUCCESS; } + +int s2n_connection_set_recv_buffering(struct s2n_connection *conn, bool enabled) +{ + POSIX_ENSURE_REF(conn); + /* QUIC support is not currently compatible with recv_buffering */ + POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_INVALID_STATE); + conn->recv_buffering = enabled; + return S2N_SUCCESS; +} diff --git a/tls/s2n_connection.h b/tls/s2n_connection.h index 8c85ac9783c..58c4757cac8 100644 --- a/tls/s2n_connection.h +++ b/tls/s2n_connection.h @@ -137,6 +137,14 @@ struct s2n_connection { * and therefore knowledge of the original handshake is limited. */ unsigned deserialized_conn : 1; + /* Indicates s2n_recv should reduce read calls by attempting to buffer more + * data than is required for a single record. + * + * This is more efficient, but will break applications that expect exact reads, + * for example any custom IO that behaves like MSG_WAITALL. + */ + unsigned recv_buffering : 1; + /* The configuration (cert, key .. etc ) */ struct s2n_config *config; @@ -244,6 +252,7 @@ struct s2n_connection { */ uint8_t header_in_data[S2N_TLS_RECORD_HEADER_LENGTH]; struct s2n_stuffer header_in; + struct s2n_stuffer buffer_in; struct s2n_stuffer in; struct s2n_stuffer out; enum { diff --git a/tls/s2n_ktls.c b/tls/s2n_ktls.c index 15caac00337..d0f571e3747 100644 --- a/tls/s2n_ktls.c +++ b/tls/s2n_ktls.c @@ -98,13 +98,14 @@ static S2N_RESULT s2n_ktls_validate(struct s2n_connection *conn, s2n_ktls_mode k case S2N_KTLS_MODE_SEND: RESULT_ENSURE(conn->managed_send_io, S2N_ERR_KTLS_MANAGED_IO); /* The output stuffer should be empty before enabling kTLS. */ - RESULT_ENSURE(s2n_stuffer_data_available(&conn->out) == 0, S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING); + RESULT_ENSURE(s2n_stuffer_is_consumed(&conn->out), S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING); break; case S2N_KTLS_MODE_RECV: RESULT_ENSURE(conn->managed_recv_io, S2N_ERR_KTLS_MANAGED_IO); /* The input stuffers should be empty before enabling kTLS. */ - RESULT_ENSURE(s2n_stuffer_data_available(&conn->header_in) == 0, S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING); - RESULT_ENSURE(s2n_stuffer_data_available(&conn->in) == 0, S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING); + RESULT_ENSURE(s2n_stuffer_is_consumed(&conn->header_in), S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING); + RESULT_ENSURE(s2n_stuffer_is_consumed(&conn->in), S2N_ERR_RECORD_STUFFER_NEEDS_DRAINING); + RESULT_ENSURE(s2n_stuffer_is_consumed(&conn->buffer_in), S2N_ERR_KTLS_UNSUPPORTED_CONN); break; default: RESULT_BAIL(S2N_ERR_SAFETY); diff --git a/tls/s2n_ktls_io.c b/tls/s2n_ktls_io.c index 34a1de6f02d..ad1d984c6e2 100644 --- a/tls/s2n_ktls_io.c +++ b/tls/s2n_ktls_io.c @@ -532,9 +532,9 @@ int s2n_ktls_read_full_record(struct s2n_connection *conn, uint8_t *record_type) return S2N_SUCCESS; } - POSIX_GUARD(s2n_stuffer_resize_if_empty(&conn->in, S2N_DEFAULT_FRAGMENT_LENGTH)); + POSIX_GUARD(s2n_stuffer_resize_if_empty(&conn->buffer_in, S2N_DEFAULT_FRAGMENT_LENGTH)); - struct s2n_stuffer record_stuffer = conn->in; + struct s2n_stuffer record_stuffer = conn->buffer_in; size_t len = s2n_stuffer_space_remaining(&record_stuffer); uint8_t *buf = s2n_stuffer_raw_write(&record_stuffer, len); POSIX_ENSURE_REF(buf); @@ -549,6 +549,12 @@ int s2n_ktls_read_full_record(struct s2n_connection *conn, uint8_t *record_type) buf, len, &blocked, &bytes_read); WITH_ERROR_BLINDING(conn, POSIX_GUARD_RESULT(result)); - POSIX_GUARD(s2n_stuffer_skip_write(&conn->in, bytes_read)); + POSIX_GUARD(s2n_stuffer_skip_write(&conn->buffer_in, bytes_read)); + + /* We don't care about returning a full fragment because we don't need to decrypt. + * kTLS handled decryption already. + * So we can always set conn->in equal to the full buffer_in. + */ + POSIX_GUARD_RESULT(s2n_recv_in_init(conn, bytes_read, bytes_read)); return S2N_SUCCESS; } diff --git a/tls/s2n_quic_support.c b/tls/s2n_quic_support.c index ba146eacbc9..881aa313796 100644 --- a/tls/s2n_quic_support.c +++ b/tls/s2n_quic_support.c @@ -45,6 +45,8 @@ int s2n_connection_enable_quic(struct s2n_connection *conn) { POSIX_ENSURE_REF(conn); POSIX_GUARD_RESULT(s2n_connection_validate_tls13_support(conn)); + /* QUIC support is not currently compatible with recv_buffering */ + POSIX_ENSURE(!conn->recv_buffering, S2N_ERR_INVALID_STATE); conn->quic_enabled = true; return S2N_SUCCESS; } @@ -130,9 +132,13 @@ int s2n_recv_quic_post_handshake_message(struct s2n_connection *conn, s2n_blocke S2N_RESULT s2n_quic_read_handshake_message(struct s2n_connection *conn, uint8_t *message_type) { RESULT_ENSURE_REF(conn); + /* The use of handshake.io here would complicate recv_buffering, and there's + * no real use case for recv_buffering when QUIC is handling the IO. + */ + RESULT_ENSURE(!conn->recv_buffering, S2N_ERR_INVALID_STATE); /* Allocate stuffer space now so that we don't have to realloc later in the handshake. */ - RESULT_GUARD_POSIX(s2n_stuffer_resize_if_empty(&conn->in, S2N_EXPECTED_QUIC_MESSAGE_SIZE)); + RESULT_GUARD_POSIX(s2n_stuffer_resize_if_empty(&conn->buffer_in, S2N_EXPECTED_QUIC_MESSAGE_SIZE)); RESULT_GUARD(s2n_read_in_bytes(conn, &conn->handshake.io, TLS_HANDSHAKE_HEADER_LENGTH)); @@ -141,8 +147,14 @@ S2N_RESULT s2n_quic_read_handshake_message(struct s2n_connection *conn, uint8_t RESULT_GUARD_POSIX(s2n_stuffer_reread(&conn->handshake.io)); RESULT_ENSURE(message_len < S2N_MAXIMUM_HANDSHAKE_MESSAGE_LENGTH, S2N_ERR_BAD_MESSAGE); - RESULT_GUARD(s2n_read_in_bytes(conn, &conn->in, message_len)); + RESULT_GUARD(s2n_read_in_bytes(conn, &conn->buffer_in, message_len)); + /* Although we call s2n_read_in_bytes, recv_greedy is always disabled for quic. + * Therefore buffer_in will always contain exactly message_len bytes of data. + * So we don't need to handle the possibility of extra data in buffer_in. + */ + RESULT_ENSURE_EQ(s2n_stuffer_data_available(&conn->buffer_in), message_len); + RESULT_GUARD(s2n_recv_in_init(conn, message_len, message_len)); return S2N_RESULT_OK; } diff --git a/tls/s2n_record_read.c b/tls/s2n_record_read.c index 5281e1734f3..13323a41470 100644 --- a/tls/s2n_record_read.c +++ b/tls/s2n_record_read.c @@ -274,5 +274,16 @@ S2N_RESULT s2n_record_wipe(struct s2n_connection *conn) RESULT_GUARD_POSIX(s2n_stuffer_wipe(&conn->header_in)); RESULT_GUARD_POSIX(s2n_stuffer_wipe(&conn->in)); conn->in_status = ENCRYPTED; + + /* Release the memory in conn->in, which un-taints buffer_in */ + RESULT_GUARD_POSIX(s2n_stuffer_free(&conn->in)); + conn->buffer_in.tainted = false; + + /* Reclaim any memory in buffer_in if possible. + * We want to avoid an expensive shift / copy later if possible. + */ + if (s2n_stuffer_is_consumed(&conn->buffer_in)) { + RESULT_GUARD_POSIX(s2n_stuffer_rewrite(&conn->buffer_in)); + } return S2N_RESULT_OK; } diff --git a/tls/s2n_recv.c b/tls/s2n_recv.c index 323c4b5edef..9cdbd849cf8 100644 --- a/tls/s2n_recv.c +++ b/tls/s2n_recv.c @@ -36,11 +36,32 @@ #include "utils/s2n_safety.h" #include "utils/s2n_socket.h" +S2N_RESULT s2n_recv_in_init(struct s2n_connection *conn, uint32_t written, uint32_t total) +{ + RESULT_ENSURE_REF(conn); + + /* If we're going to initialize conn->in to point to more memory than + * is actually readable, make sure that the additional memory exists. + */ + RESULT_ENSURE_LTE(written, total); + uint32_t remaining = total - written; + RESULT_ENSURE_LTE(remaining, s2n_stuffer_space_remaining(&conn->buffer_in)); + + uint8_t *data = s2n_stuffer_raw_read(&conn->buffer_in, written); + RESULT_ENSURE_REF(data); + RESULT_GUARD_POSIX(s2n_stuffer_free(&conn->in)); + RESULT_GUARD_POSIX(s2n_blob_init(&conn->in.blob, data, total)); + RESULT_GUARD_POSIX(s2n_stuffer_skip_write(&conn->in, written)); + return S2N_RESULT_OK; +} + S2N_RESULT s2n_read_in_bytes(struct s2n_connection *conn, struct s2n_stuffer *output, uint32_t length) { while (s2n_stuffer_data_available(output) < length) { uint32_t remaining = length - s2n_stuffer_data_available(output); - + if (conn->recv_buffering) { + remaining = MAX(remaining, s2n_stuffer_space_remaining(output)); + } errno = 0; int r = s2n_connection_recv_stuffer(output, conn, remaining); if (r == 0) { @@ -53,6 +74,20 @@ S2N_RESULT s2n_read_in_bytes(struct s2n_connection *conn, struct s2n_stuffer *ou return S2N_RESULT_OK; } +static S2N_RESULT s2n_recv_buffer_in(struct s2n_connection *conn, size_t min_size) +{ + RESULT_GUARD_POSIX(s2n_stuffer_resize_if_empty(&conn->buffer_in, S2N_LARGE_FRAGMENT_LENGTH)); + uint32_t buffer_in_available = s2n_stuffer_data_available(&conn->buffer_in); + if (buffer_in_available < min_size) { + uint32_t remaining = min_size - buffer_in_available; + if (s2n_stuffer_space_remaining(&conn->buffer_in) < remaining) { + RESULT_GUARD_POSIX(s2n_stuffer_shift(&conn->buffer_in)); + } + RESULT_GUARD(s2n_read_in_bytes(conn, &conn->buffer_in, min_size)); + } + return S2N_RESULT_OK; +} + int s2n_read_full_record(struct s2n_connection *conn, uint8_t *record_type, int *isSSLv2) { *isSSLv2 = 0; @@ -67,11 +102,17 @@ int s2n_read_full_record(struct s2n_connection *conn, uint8_t *record_type, int *record_type = TLS_APPLICATION_DATA; return S2N_SUCCESS; } - POSIX_GUARD(s2n_stuffer_resize_if_empty(&conn->in, S2N_LARGE_FRAGMENT_LENGTH)); /* Read the record until we at least have a header */ POSIX_GUARD(s2n_stuffer_reread(&conn->header_in)); - POSIX_GUARD_RESULT(s2n_read_in_bytes(conn, &conn->header_in, S2N_TLS_RECORD_HEADER_LENGTH)); + uint32_t header_available = s2n_stuffer_data_available(&conn->header_in); + if (header_available < S2N_TLS_RECORD_HEADER_LENGTH) { + uint32_t header_remaining = S2N_TLS_RECORD_HEADER_LENGTH - header_available; + s2n_result ret = s2n_recv_buffer_in(conn, header_remaining); + uint32_t header_read = MIN(header_remaining, s2n_stuffer_data_available(&conn->buffer_in)); + POSIX_GUARD(s2n_stuffer_copy(&conn->buffer_in, &conn->header_in, header_read)); + POSIX_GUARD_RESULT(ret); + } uint16_t fragment_length = 0; @@ -84,7 +125,14 @@ int s2n_read_full_record(struct s2n_connection *conn, uint8_t *record_type, int } /* Read enough to have the whole record */ - POSIX_GUARD_RESULT(s2n_read_in_bytes(conn, &conn->in, fragment_length)); + uint32_t fragment_available = s2n_stuffer_data_available(&conn->in); + if (fragment_available < fragment_length || fragment_length == 0) { + POSIX_GUARD(s2n_stuffer_rewind_read(&conn->buffer_in, fragment_available)); + s2n_result ret = s2n_recv_buffer_in(conn, fragment_length); + uint32_t fragment_read = MIN(fragment_length, s2n_stuffer_data_available(&conn->buffer_in)); + POSIX_GUARD_RESULT(s2n_recv_in_init(conn, fragment_read, fragment_length)); + POSIX_GUARD_RESULT(ret); + } if (*isSSLv2) { return 0; diff --git a/tls/s2n_renegotiate.c b/tls/s2n_renegotiate.c index 84765e44722..34389b5a741 100644 --- a/tls/s2n_renegotiate.c +++ b/tls/s2n_renegotiate.c @@ -72,6 +72,11 @@ int s2n_renegotiate_wipe(struct s2n_connection *conn) POSIX_ENSURE(s2n_stuffer_data_available(&conn->in) == 0, S2N_ERR_INVALID_STATE); POSIX_ENSURE(s2n_stuffer_data_available(&conn->out) == 0, S2N_ERR_INVALID_STATE); + /* buffer_in might contain data needed to read the next records. */ + DEFER_CLEANUP(struct s2n_stuffer buffer_in = conn->buffer_in, s2n_stuffer_free); + conn->buffer_in = (struct s2n_stuffer){ 0 }; + POSIX_GUARD(s2n_stuffer_growable_alloc(&conn->buffer_in, 0)); + /* Save the crypto parameters. * We need to continue encrypting / decrypting with the old secure parameters. */ @@ -152,6 +157,8 @@ int s2n_renegotiate_wipe(struct s2n_connection *conn) conn->recv = recv_fn; conn->recv_io_context = recv_ctx; conn->secure_renegotiation = secure_renegotiation; + conn->buffer_in = buffer_in; + ZERO_TO_DISABLE_DEFER_CLEANUP(buffer_in); conn->handshake.renegotiation = true; return S2N_SUCCESS; diff --git a/tls/s2n_tls.h b/tls/s2n_tls.h index ff7670535b7..3f7b6344ee2 100644 --- a/tls/s2n_tls.h +++ b/tls/s2n_tls.h @@ -85,6 +85,7 @@ S2N_RESULT s2n_handshake_parse_header(struct s2n_stuffer *io, uint8_t *message_t int s2n_read_full_record(struct s2n_connection *conn, uint8_t *record_type, int *isSSLv2); S2N_RESULT s2n_sendv_with_offset_total_size(const struct iovec *bufs, ssize_t count, ssize_t offs, ssize_t *total_size_out); +S2N_RESULT s2n_recv_in_init(struct s2n_connection *conn, uint32_t written, uint32_t size); extern uint16_t mfl_code_to_length[5];