diff --git a/libudpard/_udpard_cavl.h b/libudpard/_udpard_cavl.h index 18b5e7a..e8fe2e2 100644 --- a/libudpard/_udpard_cavl.h +++ b/libudpard/_udpard_cavl.h @@ -47,7 +47,7 @@ typedef struct UdpardTreeNode Cavl; /// Returns POSITIVE if the search target is GREATER than the provided node, negative if smaller, zero on match (found). /// Values other than {-1, 0, +1} are not recommended to avoid overflow during the narrowing conversion of the result. -typedef int8_t (*CavlPredicate)(void* user_reference, const Cavl* node); +typedef int_fast8_t (*CavlPredicate)(void* user_reference, const Cavl* node); /// If provided, the factory will be invoked when the sought node does not exist in the tree. /// It is expected to return a new node that will be inserted immediately (without the need to traverse the tree again). @@ -117,13 +117,13 @@ static inline void cavlPrivateRotate(Cavl* const x, const bool r) static inline Cavl* cavlPrivateAdjustBalance(Cavl* const x, const bool increment) { CAVL_ASSERT((x != NULL) && ((x->bf >= -1) && (x->bf <= +1))); - Cavl* out = x; - const int8_t new_bf = (int8_t) (x->bf + (increment ? +1 : -1)); + Cavl* out = x; + const int_fast8_t new_bf = (int_fast8_t) (x->bf + (increment ? +1 : -1)); if ((new_bf < -1) || (new_bf > 1)) { - const bool r = new_bf < 0; // bf<0 if left-heavy --> right rotation is needed. - const int8_t sign = r ? +1 : -1; // Positive if we are rotating right. - Cavl* const z = x->lr[!r]; + const bool r = new_bf < 0; // bf<0 if left-heavy --> right rotation is needed. + const int_fast8_t sign = r ? +1 : -1; // Positive if we are rotating right. + Cavl* const z = x->lr[!r]; CAVL_ASSERT(z != NULL); // Heavy side cannot be empty. // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) if ((z->bf * sign) <= 0) // Parent and child are heavy on the same side or the child is balanced. @@ -132,8 +132,8 @@ static inline Cavl* cavlPrivateAdjustBalance(Cavl* const x, const bool increment cavlPrivateRotate(x, r); if (0 == z->bf) { - x->bf = (int8_t) (-sign); - z->bf = (int8_t) (+sign); + x->bf = (int_fast8_t) (-sign); + z->bf = (int_fast8_t) (+sign); } else { @@ -150,7 +150,7 @@ static inline Cavl* cavlPrivateAdjustBalance(Cavl* const x, const bool increment cavlPrivateRotate(x, r); if ((y->bf * sign) < 0) { - x->bf = (int8_t) (+sign); + x->bf = (int_fast8_t) (+sign); y->bf = 0; z->bf = 0; } @@ -158,7 +158,7 @@ static inline Cavl* cavlPrivateAdjustBalance(Cavl* const x, const bool increment { x->bf = 0; y->bf = 0; - z->bf = (int8_t) (-sign); + z->bf = (int_fast8_t) (-sign); } else { @@ -209,7 +209,7 @@ static inline Cavl* cavlSearch(Cavl** const root, Cavl** n = root; while (*n != NULL) { - const int8_t cmp = predicate(user_reference, *n); + const int_fast8_t cmp = predicate(user_reference, *n); if (0 == cmp) { out = *n; diff --git a/libudpard/udpard.c b/libudpard/udpard.c index 3bfc7de..123208a 100644 --- a/libudpard/udpard.c +++ b/libudpard/udpard.c @@ -35,6 +35,11 @@ typedef uint_least8_t byte_t; ///< For compatibility with platforms where byte static const uint_fast8_t ByteWidth = 8U; static const byte_t ByteMask = 0xFFU; +#define RX_SLOT_COUNT 2 +#define TIMESTAMP_UNSET UINT64_MAX +#define FRAME_INDEX_UNSET UINT32_MAX +#define TRANSFER_ID_UNSET UINT64_MAX + typedef struct { enum UdpardPriority priority; @@ -99,9 +104,48 @@ static inline size_t larger(const size_t a, const size_t b) return (a > b) ? a : b; } -static inline bool isValidMemoryResource(const struct UdpardMemoryResource* const memory) +static inline uint32_t max32(const uint32_t a, const uint32_t b) +{ + return (a > b) ? a : b; +} + +/// Returns the sign of the subtraction of the operands; zero if equal. This is useful for AVL search. +static inline int_fast8_t compare32(const uint32_t a, const uint32_t b) +{ + int_fast8_t result = 0; + if (a > b) + { + result = +1; + } + if (a < b) + { + result = -1; + } + return result; +} + +static inline void* memAlloc(const struct UdpardMemoryResource memory, const size_t size) +{ + UDPARD_ASSERT(memory.allocate != NULL); + return memory.allocate(memory.user_reference, size); +} + +static inline void memFree(const struct UdpardMemoryResource memory, const size_t size, void* const data) +{ + UDPARD_ASSERT(memory.deallocate != NULL); + memory.deallocate(memory.user_reference, size, data); +} + +static inline void memFreePayload(const struct UdpardMemoryDeleter memory, const struct UdpardMutablePayload payload) { - return (memory != NULL) && (memory->allocate != NULL) && (memory->free != NULL); + UDPARD_ASSERT(memory.deallocate != NULL); + memory.deallocate(memory.user_reference, payload.size, payload.data); +} + +static inline void memZero(const size_t size, void* const data) +{ + // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) + (void) memset(data, 0, size); } // --------------------------------------------- HEADER CRC --------------------------------------------- @@ -158,6 +202,7 @@ static inline uint16_t headerCRCCompute(const size_t size, const void* const dat #define TRANSFER_CRC_INITIAL 0xFFFFFFFFUL #define TRANSFER_CRC_OUTPUT_XOR 0xFFFFFFFFUL #define TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR 0xB798B438UL +#define TRANSFER_CRC_RESIDUE_AFTER_OUTPUT_XOR (TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR ^ TRANSFER_CRC_OUTPUT_XOR) #define TRANSFER_CRC_SIZE_BYTES 4U static inline uint32_t transferCRCAddByte(const uint32_t crc, const byte_t byte) @@ -244,16 +289,15 @@ typedef struct size_t count; } TxChain; -static inline TxItem* txNewItem(struct UdpardMemoryResource* const memory, - const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U], - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const struct UdpardUDPIPEndpoint endpoint, - const size_t datagram_payload_size, - void* const user_transfer_reference) +static inline TxItem* txNewItem(const struct UdpardMemoryResource memory, + const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U], + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const struct UdpardUDPIPEndpoint endpoint, + const size_t datagram_payload_size, + void* const user_transfer_reference) { - UDPARD_ASSERT(memory != NULL); - TxItem* const out = (TxItem*) memory->allocate(memory, sizeof(TxItem) + datagram_payload_size); + TxItem* const out = (TxItem*) memAlloc(memory, sizeof(TxItem) + datagram_payload_size); if (out != NULL) { // No tree linkage by default. @@ -278,8 +322,8 @@ static inline TxItem* txNewItem(struct UdpardMemoryResource* const memory, /// Frames with identical weight are processed in the FIFO order. /// Frames with higher weight compare smaller (i.e., put on the left side of the tree). -static inline int8_t txAVLPredicate(void* const user_reference, // NOSONAR Cavl API requires pointer to non-const. - const struct UdpardTreeNode* const node) +static inline int_fast8_t txAVLPredicate(void* const user_reference, // NOSONAR Cavl API requires pointer to non-const. + const struct UdpardTreeNode* const node) { const TxItem* const target = (const TxItem*) user_reference; const TxItem* const other = (const TxItem*) (const void*) node; @@ -342,16 +386,15 @@ static inline byte_t* txSerializeHeader(byte_t* const destination_buffe /// Produces a chain of Tx queue items for later insertion into the Tx queue. The tail is NULL if OOM. /// The caller is responsible for freeing the memory allocated for the chain. -static inline TxChain txMakeChain(struct UdpardMemoryResource* const memory, - const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U], - const size_t mtu, - const UdpardMicrosecond deadline_usec, - const TransferMetadata meta, - const struct UdpardUDPIPEndpoint endpoint, - const struct UdpardConstPayload payload, - void* const user_transfer_reference) -{ - UDPARD_ASSERT(memory != NULL); +static inline TxChain txMakeChain(const struct UdpardMemoryResource memory, + const uint_least8_t dscp_value_per_priority[UDPARD_PRIORITY_MAX + 1U], + const size_t mtu, + const UdpardMicrosecond deadline_usec, + const TransferMetadata meta, + const struct UdpardUDPIPEndpoint endpoint, + const struct UdpardPayload payload, + void* const user_transfer_reference) +{ UDPARD_ASSERT(mtu > 0); UDPARD_ASSERT((payload.data != NULL) || (payload.size == 0U)); const size_t payload_size_with_crc = payload.size + TRANSFER_CRC_SIZE_BYTES; @@ -417,7 +460,7 @@ static inline int32_t txPush(struct UdpardTx* const tx, const UdpardMicrosecond deadline_usec, const TransferMetadata meta, const struct UdpardUDPIPEndpoint endpoint, - const struct UdpardConstPayload payload, + const struct UdpardPayload payload, void* const user_transfer_reference) { UDPARD_ASSERT(tx != NULL); @@ -470,7 +513,7 @@ static inline int32_t txPush(struct UdpardTx* const tx, while (head != NULL) { struct UdpardTxItem* const next = head->next_in_transfer; - tx->memory->free(tx->memory, sizeof(TxItem) + head->datagram_payload.size, head); + memFree(tx->memory, sizeof(TxItem) + head->datagram_payload.size, head); head = next; } } @@ -479,17 +522,16 @@ static inline int32_t txPush(struct UdpardTx* const tx, return out; } -int8_t udpardTxInit(struct UdpardTx* const self, - const UdpardNodeID* const local_node_id, - const size_t queue_capacity, - struct UdpardMemoryResource* const memory) +int_fast8_t udpardTxInit(struct UdpardTx* const self, + const UdpardNodeID* const local_node_id, + const size_t queue_capacity, + const struct UdpardMemoryResource memory) { - int8_t ret = -UDPARD_ERROR_ARGUMENT; - if ((NULL != self) && (NULL != local_node_id) && isValidMemoryResource(memory)) + int_fast8_t ret = -UDPARD_ERROR_ARGUMENT; + if ((NULL != self) && (NULL != local_node_id) && (memory.allocate != NULL) && (memory.deallocate != NULL)) { ret = 0; - // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) - (void) memset(self, 0, sizeof(*self)); + memZero(sizeof(*self), self); self->local_node_id = local_node_id; self->queue_capacity = queue_capacity; self->mtu = UDPARD_MTU_DEFAULT; @@ -501,13 +543,13 @@ int8_t udpardTxInit(struct UdpardTx* const self, return ret; } -int32_t udpardTxPublish(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID subject_id, - UdpardTransferID* const transfer_id, - const struct UdpardConstPayload payload, - void* const user_transfer_reference) +int32_t udpardTxPublish(struct UdpardTx* const self, + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const UdpardPortID subject_id, + UdpardTransferID* const transfer_id, + const struct UdpardPayload payload, + void* const user_transfer_reference) { int32_t out = -UDPARD_ERROR_ARGUMENT; const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) && @@ -535,14 +577,14 @@ int32_t udpardTxPublish(struct UdpardTx* const self, return out; } -int32_t udpardTxRequest(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID server_node_id, - UdpardTransferID* const transfer_id, - const struct UdpardConstPayload payload, - void* const user_transfer_reference) +int32_t udpardTxRequest(struct UdpardTx* const self, + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const UdpardPortID service_id, + const UdpardNodeID server_node_id, + UdpardTransferID* const transfer_id, + const struct UdpardPayload payload, + void* const user_transfer_reference) { int32_t out = -UDPARD_ERROR_ARGUMENT; const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) && @@ -571,14 +613,14 @@ int32_t udpardTxRequest(struct UdpardTx* const self, return out; } -int32_t udpardTxRespond(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID client_node_id, - const UdpardTransferID transfer_id, - const struct UdpardConstPayload payload, - void* const user_transfer_reference) +int32_t udpardTxRespond(struct UdpardTx* const self, + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const UdpardPortID service_id, + const UdpardNodeID client_node_id, + const UdpardTransferID transfer_id, + const struct UdpardPayload payload, + void* const user_transfer_reference) { int32_t out = -UDPARD_ERROR_ARGUMENT; const bool args_ok = (self != NULL) && (self->local_node_id != NULL) && (priority <= UDPARD_PRIORITY_MAX) && @@ -634,11 +676,11 @@ struct UdpardTxItem* udpardTxPop(struct UdpardTx* const self, const struct Udpar return out; } -void udpardTxFree(struct UdpardMemoryResource* const memory, struct UdpardTxItem* const item) +void udpardTxFree(const struct UdpardMemoryResource memory, struct UdpardTxItem* const item) { - if ((memory != NULL) && (item != NULL)) + if (item != NULL) { - memory->free(memory, sizeof(TxItem) + item->datagram_payload.size, item); + memFree(memory, sizeof(TxItem) + item->datagram_payload.size, item); } } @@ -646,28 +688,21 @@ void udpardTxFree(struct UdpardMemoryResource* const memory, struct UdpardTxItem // ================================================= RX PIPELINE ================================================= // ===================================================================================================================== +/// All but the transfer metadata. typedef struct { - TransferMetadata meta; - uint32_t index; - bool end_of_transfer; - struct UdpardConstPayload payload; ///< Also contains the transfer CRC (but not the header CRC). -} RxFrame; + uint32_t index; + bool end_of_transfer; + struct UdpardPayload payload; ///< Also contains the transfer CRC (but not the header CRC). + struct UdpardMutablePayload origin; ///< The entirety of the free-able buffer passed from the application. +} RxFrameBase; +/// Full frame state. typedef struct { - struct UdpardTreeNode base; - struct RxFragment* owner; // This is needed only to avoid pointer arithmetic. Ugly but safe. -} RxFragmentTreeNode; - -/// This is designed to be convertible to/from UdpardPayloadFragmentHandle, so that the application could be -/// given a linked list of these objects represented as a list of UdpardPayloadFragmentHandle. -typedef struct RxFragment -{ - struct UdpardPayloadFragmentHandle base; - RxFragmentTreeNode tree; - uint32_t frame_index; -} RxFragment; + RxFrameBase base; + TransferMetadata meta; +} RxFrame; /// The primitive deserialization functions are endian-agnostic. static inline const byte_t* txDeserializeU16(const byte_t* const source_buffer, uint16_t* const out_value) @@ -708,10 +743,11 @@ static inline const byte_t* txDeserializeU64(const byte_t* const source_buffer, } /// This is roughly the inverse of the txSerializeHeader function, but it also handles the frame payload. -static inline bool rxParseFrame(const struct UdpardConstPayload datagram_payload, RxFrame* const out) +static inline bool rxParseFrame(const struct UdpardMutablePayload datagram_payload, RxFrame* const out) { UDPARD_ASSERT((out != NULL) && (datagram_payload.data != NULL)); - bool ok = false; + out->base.origin = datagram_payload; + bool ok = false; if (datagram_payload.size > 0) // HEADER_SIZE_BYTES may change in the future depending on the header version. { const byte_t* ptr = (const byte_t*) datagram_payload.data; @@ -720,50 +756,898 @@ static inline bool rxParseFrame(const struct UdpardConstPayload datagram_payload if ((datagram_payload.size > HEADER_SIZE_BYTES) && (version == HEADER_VERSION) && (headerCRCCompute(HEADER_SIZE_BYTES, datagram_payload.data) == HEADER_CRC_RESIDUE)) { - const uint_fast8_t prio = *ptr++; - if (prio <= UDPARD_PRIORITY_MAX) + const uint_fast8_t priority = *ptr++; + if (priority <= UDPARD_PRIORITY_MAX) { - out->meta.priority = (enum UdpardPriority) prio; - ptr = txDeserializeU16(ptr, &out->meta.src_node_id); - ptr = txDeserializeU16(ptr, &out->meta.dst_node_id); - ptr = txDeserializeU16(ptr, &out->meta.data_specifier); - ptr = txDeserializeU64(ptr, &out->meta.transfer_id); - uint32_t index_eot = 0; - ptr = txDeserializeU32(ptr, &index_eot); - out->index = (uint32_t) (index_eot & HEADER_FRAME_INDEX_MASK); - out->end_of_transfer = (index_eot & HEADER_FRAME_INDEX_EOT_MASK) != 0U; + out->meta.priority = (enum UdpardPriority) priority; + ptr = txDeserializeU16(ptr, &out->meta.src_node_id); + ptr = txDeserializeU16(ptr, &out->meta.dst_node_id); + ptr = txDeserializeU16(ptr, &out->meta.data_specifier); + ptr = txDeserializeU64(ptr, &out->meta.transfer_id); + uint32_t index_eot = 0; + ptr = txDeserializeU32(ptr, &index_eot); + out->base.index = (uint32_t) (index_eot & HEADER_FRAME_INDEX_MASK); + out->base.end_of_transfer = (index_eot & HEADER_FRAME_INDEX_EOT_MASK) != 0U; ptr += 2; // Opaque user data. ptr += HEADER_CRC_SIZE_BYTES; - out->payload.data = ptr; - out->payload.size = datagram_payload.size - HEADER_SIZE_BYTES; - ok = true; + out->base.payload.data = ptr; + out->base.payload.size = datagram_payload.size - HEADER_SIZE_BYTES; + ok = true; UDPARD_ASSERT((ptr == (((const byte_t*) datagram_payload.data) + HEADER_SIZE_BYTES)) && - (out->payload.size > 0U)); + (out->base.payload.size > 0U)); } } + // Parsers for other header versions may be added here later. } - // Parsers for other header versions may be added here later. if (ok) // Version-agnostic semantics check. { - UDPARD_ASSERT(out->payload.size > 0); // Follows from the prior checks. + UDPARD_ASSERT(out->base.payload.size > 0); // Follows from the prior checks. const bool anonymous = out->meta.src_node_id == UDPARD_NODE_ID_UNSET; const bool broadcast = out->meta.dst_node_id == UDPARD_NODE_ID_UNSET; const bool service = (out->meta.data_specifier & DATA_SPECIFIER_SERVICE_NOT_MESSAGE_MASK) != 0; - const bool single_frame = (out->index == 0) && out->end_of_transfer; + const bool single_frame = (out->base.index == 0) && out->base.end_of_transfer; ok = service ? ((!broadcast) && (!anonymous)) : (broadcast && ((!anonymous) || single_frame)); } return ok; } -int8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self, - const UdpardPortID subject_id, - const size_t extent, - const struct UdpardRxMemoryResources memory) +/// This helper is needed to minimize the risk of argument swapping when passing these two resources around, +/// as they almost always go side by side. +typedef struct +{ + struct UdpardMemoryResource fragment; + struct UdpardMemoryDeleter payload; +} RxMemory; + +typedef struct +{ + struct UdpardTreeNode base; + struct RxFragment* this; // This is needed to avoid pointer arithmetic with multiple inheritance. +} RxFragmentTreeNode; + +/// This is designed to be convertible to/from UdpardFragment, so that the application could be +/// given a linked list of these objects represented as a list of UdpardFragment. +typedef struct RxFragment +{ + struct UdpardFragment base; + RxFragmentTreeNode tree; + uint32_t frame_index; +} RxFragment; + +/// Internally, the RX pipeline is arranged as follows: +/// +/// - There is one port per subscription or an RPC-service listener. Within the port, there are N sessions, +/// one session per remote node emitting transfers on this port (i.e., on this subject, or sending +/// request/response of this service). Sessions are constructed dynamically in memory provided by +/// UdpardMemoryResource. +/// +/// - Per session, there are UDPARD_NETWORK_INTERFACE_COUNT_MAX interface states to support interface redundancy. +/// +/// - Per interface, there are RX_SLOT_COUNT slots; a slot keeps the state of a transfer in the process of being +/// reassembled which includes its payload fragments. +/// +/// Port -> Session -> Interface -> Slot -> Fragments. +/// +/// Consider the following examples, where A,B,C denote distinct multi-frame transfers: +/// +/// A0 A1 A2 B0 B1 B2 -- two transfers without OOO frames; both accepted +/// A2 A0 A1 B0 B2 B1 -- two transfers with OOO frames; both accepted +/// A0 A1 B0 A2 B1 B2 -- two transfers with interleaved frames; both accepted (this is why we need 2 slots) +/// B1 A2 A0 C0 B0 A1 C1 -- B evicted by C; A and C accepted, B dropped (to accept B we would need 3 slots) +/// B0 A0 A1 C0 B1 A2 C1 -- ditto +/// A0 A1 C0 B0 A2 C1 B1 -- A evicted by B; B and C accepted, A dropped +/// +/// In this implementation we postpone the implicit truncation until all fragments of a transfer are received. +/// Early truncation such that excess payload is not stored in memory at all is difficult to implement if +/// out-of-order reassembly is a requirement. +/// To implement early truncation with out-of-order reassembly, we need to deduce the MTU of the sender per transfer +/// (which is easy as we only need to take note of the payload size of any non-last frame of the transfer), +/// then, based on the MTU, determine the maximum frame index we should accept (higher indexes will be dropped); +/// then, for each fragment (i.e., frame) we need to compute the CRC (including those that are discarded). +/// At the end, when all frames have been observed, combine all CRCs to obtain the final transfer CRC +/// (this is possible because all common CRC functions are linear). +typedef struct +{ + UdpardMicrosecond ts_usec; ///< Timestamp of the earliest frame; TIMESTAMP_UNSET upon restart. + UdpardTransferID transfer_id; ///< When first constructed, this shall be set to UINT64_MAX (unreachable value). + uint32_t max_index; ///< Maximum observed frame index in this transfer (so far); zero upon restart. + uint32_t eot_index; ///< Frame index where the EOT flag was observed; FRAME_INDEX_UNSET upon restart. + uint32_t accepted_frames; ///< Number of frames accepted so far. + size_t payload_size; + RxFragmentTreeNode* fragments; +} RxSlot; + +typedef struct +{ + UdpardMicrosecond ts_usec; ///< The timestamp of the last valid transfer to arrive on this interface. + RxSlot slots[RX_SLOT_COUNT]; +} RxIface; + +/// This type is forward-declared externally, hence why it has such a long name with the "udpard" prefix. +/// Keep in mind that we have a dedicated session object per remote node per port; this means that the states +/// kept here -- the timestamp and the transfer-ID -- are specific per remote node, as it should be. +struct UdpardInternalRxSession +{ + struct UdpardTreeNode base; + /// The remote node-ID is needed here as this is the ordering/search key. + UdpardNodeID remote_node_id; + /// This shared state is used for redundant transfer deduplication. + /// Redundancies occur as a result of the use of multiple network interfaces, spurious frame duplication along + /// the network path, and trivial forward error correction through duplication (if used by the sender). + UdpardMicrosecond last_ts_usec; + UdpardTransferID last_transfer_id; + /// Each redundant interface maintains its own session state independently. + /// The first interface to receive a transfer takes precedence, thus the redundant group always operates + /// at the speed of the fastest interface. Duplicate transfers delivered by the slower interfaces are discarded. + RxIface ifaces[UDPARD_NETWORK_INTERFACE_COUNT_MAX]; +}; + +// -------------------------------------------------- RX FRAGMENT -------------------------------------------------- + +/// Frees all fragments in the tree and their payload buffers. Destroys the passed fragment. +/// This is meant to be invoked on the root of the tree. +/// The maximum recursion depth is ceil(1.44*log2(FRAME_INDEX_MAX+1)-0.328) = 22 levels. +// NOLINTNEXTLINE(misc-no-recursion) MISRA C:2012 rule 17.2 +static inline void rxFragmentDestroyTree(RxFragment* const self, const RxMemory memory) +{ + UDPARD_ASSERT(self != NULL); + memFreePayload(memory.payload, self->base.origin); + for (uint_fast8_t i = 0; i < 2; i++) + { + RxFragmentTreeNode* const child = (RxFragmentTreeNode*) self->tree.base.lr[i]; + if (child != NULL) + { + UDPARD_ASSERT(child->base.up == &self->tree.base); + rxFragmentDestroyTree(child->this, memory); // NOSONAR recursion + } + } + memFree(memory.fragment, sizeof(RxFragment), self); // self-destruct +} + +/// Frees all fragments in the list and their payload buffers. Destroys the passed fragment. +/// This is meant to be invoked on the head of the list. +/// This function is needed because when a fragment tree is transformed into a list, the tree structure itself +/// is invalidated and cannot be used to free the fragments anymore. +static inline void rxFragmentDestroyList(struct UdpardFragment* const head, const RxMemory memory) +{ + struct UdpardFragment* handle = head; + while (handle != NULL) + { + struct UdpardFragment* const next = handle->next; + memFreePayload(memory.payload, handle->origin); // May be NULL, is okay. + memFree(memory.fragment, sizeof(RxFragment), handle); + handle = next; + } +} + +// -------------------------------------------------- RX SLOT -------------------------------------------------- + +static inline void rxSlotFree(RxSlot* const self, const RxMemory memory) +{ + UDPARD_ASSERT(self != NULL); + if (self->fragments != NULL) + { + rxFragmentDestroyTree(self->fragments->this, memory); + self->fragments = NULL; + } +} + +static inline void rxSlotRestart(RxSlot* const self, const UdpardTransferID transfer_id, const RxMemory memory) +{ + UDPARD_ASSERT(self != NULL); + rxSlotFree(self, memory); + self->ts_usec = TIMESTAMP_UNSET; // Will be assigned when the first frame of the transfer has arrived. + self->transfer_id = transfer_id; + self->max_index = 0; + self->eot_index = FRAME_INDEX_UNSET; + self->accepted_frames = 0; + self->payload_size = 0; +} + +/// This is a helper for rxSlotRestart that restarts the transfer for the next transfer-ID value. +/// The transfer-ID increment is necessary to weed out duplicate transfers. +static inline void rxSlotRestartAdvance(RxSlot* const self, const RxMemory memory) +{ + rxSlotRestart(self, self->transfer_id + 1U, memory); +} + +typedef struct +{ + uint32_t frame_index; + bool accepted; + struct UdpardMemoryResource memory_fragment; +} RxSlotUpdateContext; + +static inline int_fast8_t rxSlotFragmentSearch(void* const user_reference, // NOSONAR Cavl API requires non-const. + const struct UdpardTreeNode* node) +{ + UDPARD_ASSERT((user_reference != NULL) && (node != NULL)); + return compare32(((const RxSlotUpdateContext*) user_reference)->frame_index, + ((const RxFragmentTreeNode*) node)->this->frame_index); +} + +static inline struct UdpardTreeNode* rxSlotFragmentFactory(void* const user_reference) +{ + RxSlotUpdateContext* const ctx = (RxSlotUpdateContext*) user_reference; + UDPARD_ASSERT((ctx != NULL) && (ctx->memory_fragment.allocate != NULL) && + (ctx->memory_fragment.deallocate != NULL)); + struct UdpardTreeNode* out = NULL; + RxFragment* const frag = memAlloc(ctx->memory_fragment, sizeof(RxFragment)); + if (frag != NULL) + { + memZero(sizeof(RxFragment), frag); + out = &frag->tree.base; // this is not an escape bug, we retain the pointer via "this" + frag->frame_index = ctx->frame_index; + frag->tree.this = frag; // <-- right here, see? + ctx->accepted = true; + } + return out; // OOM handled by the caller +} + +/// States outliving each level of recursion while ejecting the transfer from the fragment tree. +typedef struct +{ + struct UdpardFragment* head; // Points to the first fragment in the list. + struct UdpardFragment* predecessor; + uint32_t crc; + size_t retain_size; + size_t offset; + RxMemory memory; +} RxSlotEjectContext; + +/// See rxSlotEject() for details. +/// The maximum recursion depth is ceil(1.44*log2(FRAME_INDEX_MAX+1)-0.328) = 22 levels. +/// NOLINTNEXTLINE(misc-no-recursion) MISRA C:2012 rule 17.2 +static inline void rxSlotEjectFragment(RxFragment* const frag, RxSlotEjectContext* const ctx) +{ + UDPARD_ASSERT((frag != NULL) && (ctx != NULL)); + if (frag->tree.base.lr[0] != NULL) + { + RxFragment* const child = ((RxFragmentTreeNode*) frag->tree.base.lr[0])->this; + UDPARD_ASSERT(child->frame_index < frag->frame_index); + UDPARD_ASSERT(child->tree.base.up == &frag->tree.base); + rxSlotEjectFragment(child, ctx); // NOSONAR recursion + } + const size_t fragment_size = frag->base.view.size; + frag->base.next = NULL; // Default state; may be overwritten. + ctx->crc = transferCRCAdd(ctx->crc, fragment_size, frag->base.view.data); + // Truncate unnecessary payload past the specified limit. This enforces the extent and removes the transfer CRC. + const bool retain = ctx->offset < ctx->retain_size; + if (retain) + { + UDPARD_ASSERT(ctx->retain_size >= ctx->offset); + ctx->head = (ctx->head == NULL) ? &frag->base : ctx->head; + frag->base.view.size = smaller(frag->base.view.size, ctx->retain_size - ctx->offset); + if (ctx->predecessor != NULL) + { + ctx->predecessor->next = &frag->base; + } + ctx->predecessor = &frag->base; + } + // Adjust the offset of the next fragment and descend into it. Keep the sub-tree alive for now even if not needed. + ctx->offset += fragment_size; + if (frag->tree.base.lr[1] != NULL) + { + RxFragment* const child = ((RxFragmentTreeNode*) frag->tree.base.lr[1])->this; + UDPARD_ASSERT(child->frame_index > frag->frame_index); + UDPARD_ASSERT(child->tree.base.up == &frag->tree.base); + rxSlotEjectFragment(child, ctx); // NOSONAR recursion + } + // Drop the unneeded fragments and their handles after the sub-tree is fully traversed. + if (!retain) + { + memFreePayload(ctx->memory.payload, frag->base.origin); + memFree(ctx->memory.fragment, sizeof(RxFragment), frag); + } +} + +/// This function finalizes the fragmented transfer payload by doing multiple things in one pass through the tree: +/// +/// - Compute the transfer-CRC. The caller should verify the result. +/// - Build a linked list of fragments ordered by frame index, as the application would expect it. +/// - Truncate the payload according to the specified size limit. +/// - Free the tree nodes and their payload buffers past the size limit. +/// +/// It is guaranteed that the output list is sorted by frame index. It may be empty. +/// After this function is invoked, the tree will be destroyed and cannot be used anymore; +/// hence, in the event of invalid transfer being received (bad CRC), the fragments will have to be freed +/// by traversing the linked list instead of the tree. +/// +/// The payload shall contain at least the transfer CRC, so the minimum size is TRANSFER_CRC_SIZE_BYTES. +/// There shall be at least one fragment (because a Cyphal transfer contains at least one frame). +/// +/// The return value indicates whether the transfer is valid (CRC is correct). +static inline bool rxSlotEject(size_t* const out_payload_size, + struct UdpardFragment* const out_payload_head, + RxFragmentTreeNode* const fragment_tree, + const size_t received_total_size, // With CRC. + const size_t extent, + const RxMemory memory) +{ + UDPARD_ASSERT((received_total_size >= TRANSFER_CRC_SIZE_BYTES) && (fragment_tree != NULL) && + (out_payload_size != NULL) && (out_payload_head != NULL)); + bool result = false; + RxSlotEjectContext eject_ctx = { + .head = NULL, + .predecessor = NULL, + .crc = TRANSFER_CRC_INITIAL, + .retain_size = smaller(received_total_size - TRANSFER_CRC_SIZE_BYTES, extent), + .offset = 0, + .memory = memory, + }; + rxSlotEjectFragment(fragment_tree->this, &eject_ctx); + UDPARD_ASSERT(eject_ctx.offset == received_total_size); // Ensure we have traversed the entire tree. + if (TRANSFER_CRC_RESIDUE_BEFORE_OUTPUT_XOR == eject_ctx.crc) + { + result = true; + *out_payload_size = eject_ctx.retain_size; + *out_payload_head = (eject_ctx.head != NULL) + ? (*eject_ctx.head) // Slice off the derived type fields as they are not needed. + : (struct UdpardFragment){.next = NULL, .view = {0, NULL}, .origin = {0, NULL}}; + // This is the single-frame transfer optimization suggested by Scott: we free the first fragment handle + // early by moving the contents into the rx_transfer structure by value. + // No need to free the payload buffer because it has been transferred to the transfer. + memFree(memory.fragment, sizeof(RxFragment), eject_ctx.head); // May be empty. + } + else // The transfer turned out to be invalid. We have to free the fragments. Can't use the tree anymore. + { + rxFragmentDestroyList(eject_ctx.head, memory); + } + return result; +} + +/// Update the frame count discovery state in this transfer. +/// Returns true on success, false if inconsistencies are detected and the slot should be restarted. +static inline bool rxSlotAccept_UpdateFrameCount(RxSlot* const self, const RxFrameBase frame) +{ + UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0)); + bool ok = true; + self->max_index = max32(self->max_index, frame.index); + if (frame.end_of_transfer) + { + if ((self->eot_index != FRAME_INDEX_UNSET) && (self->eot_index != frame.index)) + { + ok = false; // Inconsistent EOT flag, could be a node-ID conflict. + } + self->eot_index = frame.index; + } + UDPARD_ASSERT(frame.index <= self->max_index); + if (self->max_index > self->eot_index) + { + ok = false; // Frames past EOT found, discard the entire transfer because we don't trust it anymore. + } + return ok; +} + +/// Insert the fragment into the fragment tree. If it already exists, drop and free the duplicate. +/// Returns 0 if the fragment is not needed, 1 if it is needed, negative on error. +/// The fragment shall be deallocated unless the return value is 1. +static inline int_fast8_t rxSlotAccept_InsertFragment(RxSlot* const self, + const RxFrameBase frame, + const RxMemory memory) +{ + UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0) && (self->max_index <= self->eot_index) && + (self->accepted_frames <= self->eot_index)); + RxSlotUpdateContext update_ctx = {.frame_index = frame.index, + .accepted = false, + .memory_fragment = memory.fragment}; + RxFragmentTreeNode* const frag = (RxFragmentTreeNode*) cavlSearch((struct UdpardTreeNode**) &self->fragments, // + &update_ctx, + &rxSlotFragmentSearch, + &rxSlotFragmentFactory); + int_fast8_t result = update_ctx.accepted ? 1 : 0; + if (frag == NULL) + { + UDPARD_ASSERT(!update_ctx.accepted); + result = -UDPARD_ERROR_MEMORY; + // No restart because there is hope that there will be enough memory when we receive a duplicate. + } + UDPARD_ASSERT(self->max_index <= self->eot_index); + if (update_ctx.accepted) + { + UDPARD_ASSERT((result > 0) && (frag->this->frame_index == frame.index)); + frag->this->base.view = frame.payload; + frag->this->base.origin = frame.origin; + self->payload_size += frame.payload.size; + self->accepted_frames++; + } + return result; +} + +/// Detect transfer completion. If complete, eject the payload from the fragment tree and check its CRC. +/// The return value is passed over from rxSlotEject. +static inline int_fast8_t rxSlotAccept_FinalizeMaybe(RxSlot* const self, + size_t* const out_transfer_payload_size, + struct UdpardFragment* const out_transfer_payload_head, + const size_t extent, + const RxMemory memory) +{ + UDPARD_ASSERT((self != NULL) && (out_transfer_payload_size != NULL) && (out_transfer_payload_head != NULL) && + (self->fragments != NULL)); + int_fast8_t result = 0; + if (self->accepted_frames > self->eot_index) // Mind the off-by-one: cardinal vs. ordinal. + { + if (self->payload_size >= TRANSFER_CRC_SIZE_BYTES) + { + result = rxSlotEject(out_transfer_payload_size, + out_transfer_payload_head, + self->fragments, + self->payload_size, + extent, + memory) + ? 1 + : 0; + // The tree is now unusable and the data is moved into rx_transfer. + self->fragments = NULL; + } + rxSlotRestartAdvance(self, memory); // Restart needed even if invalid. + } + return result; +} + +/// This function will either move the frame payload into the session, or free it if it can't be used. +/// Upon return, certain state fields may be overwritten, so the caller should not rely on them. +/// Returns: 1 -- transfer available, payload written; 0 -- transfer not yet available; <0 -- error. +static inline int_fast8_t rxSlotAccept(RxSlot* const self, + size_t* const out_transfer_payload_size, + struct UdpardFragment* const out_transfer_payload_head, + const RxFrameBase frame, + const size_t extent, + const RxMemory memory) +{ + UDPARD_ASSERT((self != NULL) && (frame.payload.size > 0) && (out_transfer_payload_size != NULL) && + (out_transfer_payload_head != NULL)); + int_fast8_t result = 0; + bool release = true; + if (rxSlotAccept_UpdateFrameCount(self, frame)) + { + result = rxSlotAccept_InsertFragment(self, frame, memory); + UDPARD_ASSERT(result <= 1); + if (result > 0) + { + release = false; + result = rxSlotAccept_FinalizeMaybe(self, // + out_transfer_payload_size, + out_transfer_payload_head, + extent, + memory); + } + } + else + { + rxSlotRestartAdvance(self, memory); + } + if (release) + { + memFreePayload(memory.payload, frame.origin); + } + UDPARD_ASSERT(result <= 1); + return result; +} + +// -------------------------------------------------- RX IFACE -------------------------------------------------- + +/// Whether the supplied transfer-ID is greater than all transfer-IDs in the RX slots. +/// This indicates that the new transfer is not a duplicate and should be accepted. +static inline bool rxIfaceIsFutureTransferID(const RxIface* const self, const UdpardTransferID transfer_id) +{ + bool is_future_tid = true; + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler. + { + is_future_tid = is_future_tid && ((self->slots[i].transfer_id < transfer_id) || + (self->slots[i].transfer_id == TRANSFER_ID_UNSET)); + } + return is_future_tid; +} + +/// Whether the time that has passed since the last accepted first frame of a transfer exceeds the TID timeout. +/// This indicates that the transfer should be accepted even if its transfer-ID is not greater than all transfer-IDs +/// in the RX slots. +static inline bool rxIfaceCheckTransferIDTimeout(const RxIface* const self, + const UdpardMicrosecond ts_usec, + const UdpardMicrosecond transfer_id_timeout_usec) +{ + // We use the RxIface state here because the RxSlot state is reset between transfers. + // If there is reassembly in progress, we want to use the timestamps from these in-progress transfers, + // as that eliminates the risk of a false-positive TID-timeout detection. + UdpardMicrosecond most_recent_ts_usec = self->ts_usec; + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler. + { + if ((most_recent_ts_usec == TIMESTAMP_UNSET) || + ((self->slots[i].ts_usec != TIMESTAMP_UNSET) && (self->slots[i].ts_usec > most_recent_ts_usec))) + { + most_recent_ts_usec = self->slots[i].ts_usec; + } + } + return (most_recent_ts_usec == TIMESTAMP_UNSET) || + ((ts_usec >= most_recent_ts_usec) && ((ts_usec - most_recent_ts_usec) >= transfer_id_timeout_usec)); +} + +/// Traverses the list of slots trying to find a slot with a matching transfer-ID that is already IN PROGRESS. +/// If there is no such slot, tries again without the IN PROGRESS requirement. +/// The purpose of this complicated dual check is to support the case where multiple slots have the same +/// transfer-ID, which may occur with interleaved transfers. +static inline RxSlot* rxIfaceFindMatchingSlot(RxSlot slots[RX_SLOT_COUNT], const UdpardTransferID transfer_id) +{ + RxSlot* slot = NULL; + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) + { + if ((slots[i].transfer_id == transfer_id) && (slots[i].ts_usec != TIMESTAMP_UNSET)) + { + slot = &slots[i]; + break; + } + } + if (slot == NULL) + { + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) + { + if (slots[i].transfer_id == transfer_id) + { + slot = &slots[i]; + break; + } + } + } + return slot; +} + +/// This function is invoked when a new datagram pertaining to a certain session is received on an interface. +/// This function will either move the frame payload into the session, or free it if it cannot be made use of. +/// Returns: 1 -- transfer available; 0 -- transfer not yet available; <0 -- error. +static inline int_fast8_t rxIfaceAccept(RxIface* const self, + const UdpardMicrosecond ts_usec, + const RxFrame frame, + const size_t extent, + const UdpardMicrosecond transfer_id_timeout_usec, + const RxMemory memory, + struct UdpardRxTransfer* const out_transfer) +{ + UDPARD_ASSERT((self != NULL) && (frame.base.payload.size > 0) && (out_transfer != NULL)); + RxSlot* slot = rxIfaceFindMatchingSlot(self->slots, frame.meta.transfer_id); + // If there is no suitable slot, we should check if the transfer is a future one (high transfer-ID), + // or a transfer-ID timeout has occurred. In this case we sacrifice the oldest slot. + if (slot == NULL) + { + // The timestamp is UNSET when the slot is waiting for the next transfer. + // Such slots are the best candidates for replacement because reusing them does not cause loss of + // transfers that are in the process of being reassembled. If there are no such slots, we must + // sacrifice the one whose first frame has arrived the longest time ago. + RxSlot* victim = &self->slots[0]; + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) // Expected to be unrolled by the compiler. + { + if ((self->slots[i].ts_usec == TIMESTAMP_UNSET) || + ((victim->ts_usec != TIMESTAMP_UNSET) && (self->slots[i].ts_usec < victim->ts_usec))) + { + victim = &self->slots[i]; + } + } + if (rxIfaceIsFutureTransferID(self, frame.meta.transfer_id) || + rxIfaceCheckTransferIDTimeout(self, ts_usec, transfer_id_timeout_usec)) + { + rxSlotRestart(victim, frame.meta.transfer_id, memory); + slot = victim; + UDPARD_ASSERT(slot != NULL); + } + } + // If there is a suitable slot (perhaps a newly created one for this frame), update it. + // If there is neither a suitable slot nor a new one was created, the frame cannot be used. + int_fast8_t result = 0; + if (slot != NULL) + { + if (slot->ts_usec == TIMESTAMP_UNSET) + { + slot->ts_usec = ts_usec; // Transfer timestamp is the timestamp of the earliest frame. + } + const UdpardMicrosecond ts = slot->ts_usec; + UDPARD_ASSERT(slot->transfer_id == frame.meta.transfer_id); + result = rxSlotAccept(slot, // May invalidate state variables such as timestamp or transfer-ID. + &out_transfer->payload_size, + &out_transfer->payload, + frame.base, + extent, + memory); + if (result > 0) // Transfer successfully received, populate the transfer descriptor for the client. + { + self->ts_usec = ts; // Update the last valid transfer timestamp on this iface. + out_transfer->timestamp_usec = ts; + out_transfer->priority = frame.meta.priority; + out_transfer->source_node_id = frame.meta.src_node_id; + out_transfer->transfer_id = frame.meta.transfer_id; + } + } + else + { + memFreePayload(memory.payload, frame.base.origin); + } + return result; +} + +static inline void rxIfaceInit(RxIface* const self, const RxMemory memory) +{ + UDPARD_ASSERT(self != NULL); + memZero(sizeof(*self), self); + self->ts_usec = TIMESTAMP_UNSET; + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) + { + self->slots[i].fragments = NULL; + rxSlotRestart(&self->slots[i], TRANSFER_ID_UNSET, memory); + } +} + +/// Frees the iface and all slots in it. The iface instance itself is not freed. +static inline void rxIfaceFree(RxIface* const self, const RxMemory memory) +{ + UDPARD_ASSERT(self != NULL); + for (uint_fast8_t i = 0; i < RX_SLOT_COUNT; i++) + { + rxSlotFree(&self->slots[i], memory); + } +} + +// -------------------------------------------------- RX SESSION -------------------------------------------------- + +/// Checks if the given transfer should be accepted. If not, the transfer is freed. +/// Internal states are updated. +static inline bool rxSessionDeduplicate(struct UdpardInternalRxSession* const self, + const UdpardMicrosecond transfer_id_timeout_usec, + struct UdpardRxTransfer* const transfer, + const RxMemory memory) +{ + UDPARD_ASSERT((self != NULL) && (transfer != NULL)); + const bool future_tid = (self->last_transfer_id == TRANSFER_ID_UNSET) || // + (transfer->transfer_id > self->last_transfer_id); + const bool tid_timeout = (self->last_ts_usec == TIMESTAMP_UNSET) || + ((transfer->timestamp_usec >= self->last_ts_usec) && + ((transfer->timestamp_usec - self->last_ts_usec) >= transfer_id_timeout_usec)); + const bool accept = future_tid || tid_timeout; + if (accept) + { + self->last_ts_usec = transfer->timestamp_usec; + self->last_transfer_id = transfer->transfer_id; + } + else // This is a duplicate: received from another interface, a FEC retransmission, or a network glitch. + { + memFreePayload(memory.payload, transfer->payload.origin); + rxFragmentDestroyList(transfer->payload.next, memory); + transfer->payload_size = 0; + transfer->payload = (struct UdpardFragment){.next = NULL, + .view = {.size = 0, .data = NULL}, + .origin = {.size = 0, .data = NULL}}; + } + return accept; +} + +/// Takes ownership of the frame payload buffer. +static inline int_fast8_t rxSessionAccept(struct UdpardInternalRxSession* const self, + const uint_fast8_t redundant_iface_index, + const UdpardMicrosecond ts_usec, + const RxFrame frame, + const size_t extent, + const UdpardMicrosecond transfer_id_timeout_usec, + const RxMemory memory, + struct UdpardRxTransfer* const out_transfer) +{ + UDPARD_ASSERT((self != NULL) && (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && + (out_transfer != NULL)); + int_fast8_t result = rxIfaceAccept(&self->ifaces[redundant_iface_index], + ts_usec, + frame, + extent, + transfer_id_timeout_usec, + memory, + out_transfer); + UDPARD_ASSERT(result <= 1); + if (result > 0) + { + result = rxSessionDeduplicate(self, transfer_id_timeout_usec, out_transfer, memory) ? 1 : 0; + } + return result; +} + +static inline void rxSessionInit(struct UdpardInternalRxSession* const self, const RxMemory memory) +{ + UDPARD_ASSERT(self != NULL); + memZero(sizeof(*self), self); + self->remote_node_id = UDPARD_NODE_ID_UNSET; + self->last_ts_usec = TIMESTAMP_UNSET; + self->last_transfer_id = TRANSFER_ID_UNSET; + for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) + { + rxIfaceInit(&self->ifaces[i], memory); + } +} + +/// Frees all ifaces in the session, all children in the session tree recursively, and destroys the session itself. +/// The maximum recursion depth is ceil(1.44*log2(UDPARD_NODE_ID_MAX+1)-0.328) = 23 levels. +// NOLINTNEXTLINE(*-no-recursion) MISRA C:2012 rule 17.2 +static inline void rxSessionDestroyTree(struct UdpardInternalRxSession* const self, + const struct UdpardRxMemoryResources memory) +{ + for (uint_fast8_t i = 0; i < UDPARD_NETWORK_INTERFACE_COUNT_MAX; i++) + { + rxIfaceFree(&self->ifaces[i], (RxMemory){.fragment = memory.fragment, .payload = memory.payload}); + } + for (uint_fast8_t i = 0; i < 2; i++) + { + struct UdpardInternalRxSession* const child = (struct UdpardInternalRxSession*) (void*) self->base.lr[i]; + if (child != NULL) + { + UDPARD_ASSERT(child->base.up == &self->base); + rxSessionDestroyTree(child, memory); // NOSONAR recursion + } + } + memFree(memory.session, sizeof(struct UdpardInternalRxSession), self); +} + +// -------------------------------------------------- RX PORT -------------------------------------------------- + +typedef struct +{ + UdpardNodeID remote_node_id; + struct UdpardRxMemoryResources memory; +} RxPortSessionSearchContext; + +static inline int_fast8_t rxPortSessionSearch(void* const user_reference, // NOSONAR non-const API + const struct UdpardTreeNode* node) +{ + UDPARD_ASSERT((user_reference != NULL) && (node != NULL)); + return compare32(((const RxPortSessionSearchContext*) user_reference)->remote_node_id, + ((const struct UdpardInternalRxSession*) (const void*) node)->remote_node_id); +} + +static inline struct UdpardTreeNode* rxPortSessionFactory(void* const user_reference) // NOSONAR non-const API +{ + const RxPortSessionSearchContext* const ctx = (const RxPortSessionSearchContext*) user_reference; + UDPARD_ASSERT((ctx != NULL) && (ctx->remote_node_id <= UDPARD_NODE_ID_MAX)); + struct UdpardTreeNode* out = NULL; + struct UdpardInternalRxSession* const session = + memAlloc(ctx->memory.session, sizeof(struct UdpardInternalRxSession)); + if (session != NULL) + { + rxSessionInit(session, (RxMemory){.payload = ctx->memory.payload, .fragment = ctx->memory.fragment}); + session->remote_node_id = ctx->remote_node_id; + out = &session->base; + } + return out; // OOM handled by the caller +} + +/// Accepts a frame into a port, possibly creating a new session along the way. +/// The frame shall not be anonymous. Takes ownership of the frame payload buffer. +static inline int_fast8_t rxPortAccept(struct UdpardRxPort* const self, + const uint_fast8_t redundant_iface_index, + const UdpardMicrosecond ts_usec, + const RxFrame frame, + const struct UdpardRxMemoryResources memory, + struct UdpardRxTransfer* const out_transfer) +{ + UDPARD_ASSERT((self != NULL) && (redundant_iface_index < UDPARD_NETWORK_INTERFACE_COUNT_MAX) && + (out_transfer != NULL) && (frame.meta.src_node_id != UDPARD_NODE_ID_UNSET)); + int_fast8_t result = 0; + struct UdpardInternalRxSession* const session = (struct UdpardInternalRxSession*) (void*) + cavlSearch((struct UdpardTreeNode**) &self->sessions, + &(RxPortSessionSearchContext){.remote_node_id = frame.meta.src_node_id, .memory = memory}, + &rxPortSessionSearch, + &rxPortSessionFactory); + if (session != NULL) + { + UDPARD_ASSERT(session->remote_node_id == frame.meta.src_node_id); + result = rxSessionAccept(session, // The callee takes ownership of the memory. + redundant_iface_index, + ts_usec, + frame, + self->extent, + self->transfer_id_timeout_usec, + (RxMemory){.payload = memory.payload, .fragment = memory.fragment}, + out_transfer); + } + else // Failed to allocate a new session. + { + result = -UDPARD_ERROR_MEMORY; + memFreePayload(memory.payload, frame.base.origin); + } + return result; +} + +/// A special case of rxPortAccept() for anonymous transfers. Accepts all transfers unconditionally. +/// Does not allocate new memory. Takes ownership of the frame payload buffer. +static inline int_fast8_t rxPortAcceptAnonymous(const UdpardMicrosecond ts_usec, + const RxFrame frame, + const struct UdpardMemoryDeleter memory, + struct UdpardRxTransfer* const out_transfer) +{ + UDPARD_ASSERT((out_transfer != NULL) && (frame.meta.src_node_id == UDPARD_NODE_ID_UNSET)); + int_fast8_t result = 0; + const bool size_ok = frame.base.payload.size >= TRANSFER_CRC_SIZE_BYTES; + const bool crc_ok = + transferCRCCompute(frame.base.payload.size, frame.base.payload.data) == TRANSFER_CRC_RESIDUE_AFTER_OUTPUT_XOR; + if (size_ok && crc_ok) + { + result = 1; + memZero(sizeof(*out_transfer), out_transfer); + // Copy relevant metadata from the frame. Remember that anonymous transfers are always single-frame. + out_transfer->timestamp_usec = ts_usec; + out_transfer->priority = frame.meta.priority; + out_transfer->source_node_id = frame.meta.src_node_id; + out_transfer->transfer_id = frame.meta.transfer_id; + // Manually set up the transfer payload to point to the relevant slice inside the frame payload. + out_transfer->payload.next = NULL; + out_transfer->payload.view.size = frame.base.payload.size - TRANSFER_CRC_SIZE_BYTES; + out_transfer->payload.view.data = frame.base.payload.data; + out_transfer->payload.origin = frame.base.origin; + out_transfer->payload_size = out_transfer->payload.view.size; + } + else + { + memFreePayload(memory, frame.base.origin); + } + return result; +} + +/// Accepts a raw frame and, if valid, passes it on to rxPortAccept() for further processing. +/// Takes ownership of the frame payload buffer. +static inline int_fast8_t rxPortAcceptFrame(struct UdpardRxPort* const self, + const uint_fast8_t redundant_iface_index, + const UdpardMicrosecond ts_usec, + const struct UdpardMutablePayload datagram_payload, + const struct UdpardRxMemoryResources memory, + struct UdpardRxTransfer* const out_transfer) +{ + int_fast8_t result = 0; + RxFrame frame = {0}; + if (rxParseFrame(datagram_payload, &frame)) + { + if (frame.meta.src_node_id != UDPARD_NODE_ID_UNSET) + { + result = rxPortAccept(self, redundant_iface_index, ts_usec, frame, memory, out_transfer); + } + else + { + result = rxPortAcceptAnonymous(ts_usec, frame, memory.payload, out_transfer); + } + } + else // Malformed datagram or unsupported header version, drop. + { + memFreePayload(memory.payload, datagram_payload); + } + return result; +} + +static inline void rxPortInit(struct UdpardRxPort* const self) +{ + memZero(sizeof(*self), self); + self->extent = SIZE_MAX; // Unlimited extent by default. + self->transfer_id_timeout_usec = UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC; + self->sessions = NULL; +} + +static inline void rxPortFree(struct UdpardRxPort* const self, const struct UdpardRxMemoryResources memory) +{ + rxSessionDestroyTree(self->sessions, memory); +} + +// -------------------------------------------------- RX API -------------------------------------------------- + +void udpardRxFragmentFree(const struct UdpardFragment head, + const struct UdpardMemoryResource memory_fragment, + const struct UdpardMemoryDeleter memory_payload) +{ + // The head is not heap-allocated so not freed. + memFreePayload(memory_payload, head.origin); // May be NULL, is okay. + rxFragmentDestroyList(head.next, (RxMemory){.fragment = memory_fragment, .payload = memory_payload}); +} + +int_fast8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self, + const UdpardPortID subject_id, + const size_t extent, + const struct UdpardRxMemoryResources memory) { (void) self; (void) subject_id; (void) extent; (void) memory; - (void) rxParseFrame; + (void) &rxPortAcceptFrame; + (void) &rxPortInit; + (void) &rxPortFree; return 0; } diff --git a/libudpard/udpard.h b/libudpard/udpard.h index 9d167bf..0f18753 100644 --- a/libudpard/udpard.h +++ b/libudpard/udpard.h @@ -71,6 +71,9 @@ /// As will be shown below, a typical application with R redundant network interfaces and S topic subscriptions needs /// R*(S+2) sockets (or equivalent abstractions provided by the underlying UDP/IP stack). /// +/// As a matter of convention, resource disposal functions are named "free" if the memory of the resource itself is +/// not deallocated, and "destroy" if the memory is deallocated. +/// /// /// Transmission pipeline /// @@ -110,7 +113,9 @@ /// Said pipelines are entirely independent from each other and can be operated from different threads, /// as they share no resources. /// -/// The reception pipeline is able to accept datagrams with arbitrary MTU. +/// The reception pipeline is able to accept datagrams with arbitrary MTU, frames delivered out-of-order (OOO) with +/// arbitrary duplication, including duplication of non-adjacent frames, and/or frames interleaved between adjacent +/// transfers. The support for OOO reassembly is particularly interesting when simple repetition coding FEC is used. /// /// The application should instantiate one subscription instance per subject it needs to receive messages from, /// irrespective of the number of redundant interfaces. There needs to be one socket (or a similar abstraction @@ -170,7 +175,7 @@ /// /// - (MTU+library overhead) blocks for the TX and RX pipelines (usually less than 2048 bytes); /// - RX session object sized blocks for the RX pipeline (less than 512 bytes); -/// - RX payload fragment handle sized blocks for the RX pipeline (less than 128 bytes). +/// - RX fragment handle sized blocks for the RX pipeline (less than 128 bytes). /// /// The detailed information is given in the API documentation. /// @@ -237,10 +242,10 @@ extern "C" { /// The library supports at most this many redundant network interfaces per Cyphal node. #define UDPARD_NETWORK_INTERFACE_COUNT_MAX 3U -typedef uint64_t UdpardMicrosecond; +typedef uint64_t UdpardMicrosecond; ///< UINT64_MAX is not a valid timestamp value. typedef uint16_t UdpardPortID; typedef uint16_t UdpardNodeID; -typedef uint64_t UdpardTransferID; +typedef uint64_t UdpardTransferID; ///< UINT64_MAX is not a valid transfer-ID value. /// Transfer priority level mnemonics per the recommendations given in the Cyphal Specification. /// For outgoing transfers they are mapped to DSCP values as configured per redundant interface (per UdpardTx instance). @@ -262,7 +267,7 @@ struct UdpardTreeNode { struct UdpardTreeNode* up; ///< Do not access this field. struct UdpardTreeNode* lr[2]; ///< Left and right children of this node may be accessed for tree traversal. - int8_t bf; ///< Do not access this field. + int_fast8_t bf; ///< Do not access this field. }; struct UdpardMutablePayload @@ -271,7 +276,7 @@ struct UdpardMutablePayload void* data; }; -struct UdpardConstPayload +struct UdpardPayload { size_t size; const void* data; @@ -282,27 +287,27 @@ struct UdpardConstPayload /// as well as the payload structure itself, assuming that it is also heap-allocated. /// The model is as follows: /// -/// (payload header) ---> UdpardPayloadFragmentHandle: -/// next ---> UdpardPayloadFragmentHandle... -/// owner ---> (the free()able payload data buffer) -/// view ---> (somewhere inside the payload data buffer) +/// (payload header) ---> UdpardFragment: +/// next ---> UdpardFragment... +/// origin ---> (the free()able payload data buffer) +/// view ---> (somewhere inside the payload data buffer) /// /// Payloads of received transfers are represented using this type, where each fragment corresponds to a frame. /// The application can either consume them directly or to copy the data into a contiguous buffer beforehand /// at the expense of extra time and memory utilization. -struct UdpardPayloadFragmentHandle +struct UdpardFragment { /// Points to the next fragment in the fragmented buffer; NULL if this is the last fragment. - struct UdpardPayloadFragmentHandle* next; + struct UdpardFragment* next; /// Contains the actual data to be used by the application. /// The memory pointed to by this fragment shall not be freed by the application. - struct UdpardConstPayload view; + struct UdpardPayload view; /// This entity points to the base buffer that contains this fragment. /// The application can use this pointer to free the outer buffer after the payload has been consumed. /// In the most simple case this field is identical to the "view" field above, but it is not always the case. - struct UdpardMutablePayload owner; + struct UdpardMutablePayload origin; }; /// Cyphal/UDP uses only multicast traffic. @@ -317,38 +322,51 @@ struct UdpardUDPIPEndpoint // ================================================= MEMORY RESOURCE ================================================= // ===================================================================================================================== -struct UdpardMemoryResource; - /// A pointer to the memory allocation function. The semantics are similar to malloc(): /// - The returned pointer shall point to an uninitialized block of memory that is at least "size" bytes large. /// - If there is not enough memory, the returned pointer shall be NULL. /// - The memory shall be aligned at least at max_align_t. /// - The execution time should be constant (O(1)). /// - The worst-case memory consumption (worst fragmentation) should be understood by the developer. +/// /// If the standard dynamic memory manager of the target platform does not satisfy the above requirements, -/// consider using O1Heap: https://github.com/pavel-kirienko/o1heap. -typedef void* (*UdpardMemoryAllocate)(struct UdpardMemoryResource* const self, const size_t size); +/// consider using O1Heap: https://github.com/pavel-kirienko/o1heap. Alternatively, some applications may prefer to +/// use a set of fixed-size block pool allocators (see the high-level overview for details). +/// +/// The API documentation is written on the assumption that the memory management functions have constant +/// complexity and are non-blocking. +/// +/// The value of the user reference is taken from the corresponding field of the memory resource structure. +typedef void* (*UdpardMemoryAllocate)(void* const user_reference, const size_t size); /// The counterpart of the above -- this function is invoked to return previously allocated memory to the allocator. -/// The size argument contains the amount of memory that was originally requested via the allocation function. +/// The size argument contains the amount of memory that was originally requested via the allocation function; +/// its value is undefined if the pointer is NULL. /// The semantics are similar to free(): /// - The pointer was previously returned by the allocation function. /// - The pointer may be NULL, in which case the function shall have no effect. /// - The execution time should be constant (O(1)). -typedef void (*UdpardMemoryFree)(struct UdpardMemoryResource* const self, const size_t size, void* const pointer); +/// +/// The value of the user reference is taken from the corresponding field of the memory resource structure. +typedef void (*UdpardMemoryDeallocate)(void* const user_reference, const size_t size, void* const pointer); + +/// A kind of memory resource that can only be used to free memory previously allocated by the user. +/// Instances are mostly intended to be passed by value. +struct UdpardMemoryDeleter +{ + void* user_reference; ///< Passed as the first argument. + UdpardMemoryDeallocate deallocate; ///< Shall be a valid pointer. +}; /// A memory resource encapsulates the dynamic memory allocation and deallocation facilities. -/// The time complexity models given in the API documentation are made on the assumption that the memory management -/// functions have constant complexity O(1). /// Note that the library allocates a large amount of small fixed-size objects for bookkeeping purposes; -/// allocators for them can be implemented using fixed-size block pools to eliminate memory fragmentation. +/// allocators for them can be implemented using fixed-size block pools to eliminate extrinsic memory fragmentation. +/// Instances are mostly intended to be passed by value. struct UdpardMemoryResource { - /// The function pointers shall be valid at all times. - UdpardMemoryAllocate allocate; - UdpardMemoryFree free; - /// This is an opaque pointer that can be freely utilized by the user for arbitrary needs. - void* user_reference; + void* user_reference; ///< Passed as the first argument. + UdpardMemoryDeallocate deallocate; ///< Shall be a valid pointer. + UdpardMemoryAllocate allocate; ///< Shall be a valid pointer. }; // ===================================================================================================================== @@ -410,8 +428,8 @@ struct UdpardTx /// There is exactly one allocation per enqueued item, each allocation contains both the UdpardTxItem /// and its payload, hence the size is variable. /// In a simple application there would be just one memory resource shared by all parts of the library. - /// If the application knows its MTU, it can use block allocation to avoid fragmentation. - struct UdpardMemoryResource* memory; + /// If the application knows its MTU, it can use block allocation to avoid extrinsic fragmentation. + struct UdpardMemoryResource memory; /// The number of frames that are currently contained in the queue, initially zero. /// READ-ONLY @@ -471,10 +489,10 @@ struct UdpardTxItem /// To safely discard it, simply pop all enqueued frames from it. /// /// The time complexity is constant. This function does not invoke the dynamic memory manager. -int8_t udpardTxInit(struct UdpardTx* const self, - const UdpardNodeID* const local_node_id, - const size_t queue_capacity, - struct UdpardMemoryResource* const memory); +int_fast8_t udpardTxInit(struct UdpardTx* const self, + const UdpardNodeID* const local_node_id, + const size_t queue_capacity, + const struct UdpardMemoryResource memory); /// This function serializes a message transfer into a sequence of UDP datagrams and inserts them into the prioritized /// transmission queue at the appropriate position. Afterwards, the application is supposed to take the enqueued frames @@ -527,13 +545,13 @@ int8_t udpardTxInit(struct UdpardTx* const self, /// /// The time complexity is O(p + log e), where p is the amount of payload in the transfer, and e is the number of /// frames already enqueued in the transmission queue. -int32_t udpardTxPublish(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID subject_id, - UdpardTransferID* const transfer_id, - const struct UdpardConstPayload payload, - void* const user_transfer_reference); +int32_t udpardTxPublish(struct UdpardTx* const self, + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const UdpardPortID subject_id, + UdpardTransferID* const transfer_id, + const struct UdpardPayload payload, + void* const user_transfer_reference); /// This is similar to udpardTxPublish except that it is intended for service request transfers. /// It takes the node-ID of the server that is intended to receive the request. @@ -550,27 +568,27 @@ int32_t udpardTxPublish(struct UdpardTx* const self, /// - UDPARD_ERROR_ANONYMOUS if the local node is anonymous (the local node-ID is unset). /// /// Other considerations are the same as for udpardTxPublish. -int32_t udpardTxRequest(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID server_node_id, - UdpardTransferID* const transfer_id, - const struct UdpardConstPayload payload, - void* const user_transfer_reference); +int32_t udpardTxRequest(struct UdpardTx* const self, + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const UdpardPortID service_id, + const UdpardNodeID server_node_id, + UdpardTransferID* const transfer_id, + const struct UdpardPayload payload, + void* const user_transfer_reference); /// This is similar to udpardTxRequest except that it takes the node-ID of the client instead of server, /// and the transfer-ID is passed by value rather than by pointer. /// The transfer-ID is passed by value because when responding to an RPC-service request, the server must /// reuse the transfer-ID value of the request (this is to allow the client to match responses with their requests). -int32_t udpardTxRespond(struct UdpardTx* const self, - const UdpardMicrosecond deadline_usec, - const enum UdpardPriority priority, - const UdpardPortID service_id, - const UdpardNodeID client_node_id, - const UdpardTransferID transfer_id, - const struct UdpardConstPayload payload, - void* const user_transfer_reference); +int32_t udpardTxRespond(struct UdpardTx* const self, + const UdpardMicrosecond deadline_usec, + const enum UdpardPriority priority, + const UdpardPortID service_id, + const UdpardNodeID client_node_id, + const UdpardTransferID transfer_id, + const struct UdpardPayload payload, + void* const user_transfer_reference); /// This function accesses the enqueued UDP datagram scheduled for transmission next. The queue itself is not modified /// (i.e., the accessed element is not removed). The application should invoke this function to collect the datagrams @@ -612,8 +630,8 @@ struct UdpardTxItem* udpardTxPop(struct UdpardTx* const self, const struct Udpar /// This is a simple helper that frees the memory allocated for the item with the correct size. /// It is needed because the application does not have access to the required context to compute the size. /// If the chosen allocator does not leverage the size information, the deallocation function can be invoked directly. -/// If any of the arguments are NULL, the function has no effect. The time complexity is constant. -void udpardTxFree(struct UdpardMemoryResource* const memory, struct UdpardTxItem* const item); +/// If the item argument is NULL, the function has no effect. The time complexity is constant. +void udpardTxFree(const struct UdpardMemoryResource memory, struct UdpardTxItem* const item); // ===================================================================================================================== // ================================================= RX PIPELINE ================================================= @@ -626,10 +644,6 @@ void udpardTxFree(struct UdpardMemoryResource* const memory, struct UdpardTxItem /// redundant network interfaces. struct UdpardRxPort { - /// For subject ports this is the subject-ID. For RPC-service ports this is the service-ID. - /// READ-ONLY - UdpardPortID port_id; - /// The maximum payload size that can be accepted at this port. /// The rest will be truncated away following the implicit truncation rule defined in the Cyphal specification. /// READ-ONLY @@ -654,7 +668,7 @@ struct UdpardRxPort /// Each session instance takes sizeof(UdpardInternalRxSession) bytes of dynamic memory for itself, /// which is at most 512 bytes on wide-word platforms (on small word size platforms it is usually much smaller). /// On top of that, each session instance holds memory for the transfer payload fragments and small fixed-size - /// metadata objects called "payload fragment handles" (at most 128 bytes large, usually much smaller, + /// metadata objects called "fragment handles" (at most 128 bytes large, usually much smaller, /// depending on the pointer width and the word size), one handle per fragment. /// /// The transfer payload memory is not allocated by the library but rather moved from the application @@ -664,9 +678,9 @@ struct UdpardRxPort /// of the entire datagram payload (including all overheads such as the Cyphal/UDP frame header and possible /// data that spills over the configured extent value for this port). /// If the library does not need the datagram to reassemble the transfer, its payload buffer is freed immediately. - /// There is a 1-to-1 correspondence between the payload fragment handles and the payload fragments. + /// There is a 1-to-1 correspondence between the fragment handles and the payload fragments. /// Remote nodes that emit highly fragmented transfers cause a higher memory utilization in the local node - /// because of the increased number of payload fragment handles and per-datagram overheads. + /// because of the increased number of fragment handles and per-datagram overheads. /// /// In the worst case, the library may keep up to two full transfer payloads in memory at the same time /// (two transfer states are kept to allow acceptance of interleaved frames). @@ -698,18 +712,17 @@ struct UdpardRxMemoryResources { /// The session memory resource is used to provide memory for the session instances described above. /// Each instance is fixed-size, so a trivial zero-fragmentation block allocator is sufficient. - struct UdpardMemoryResource* session; + struct UdpardMemoryResource session; - /// The payload fragment handles are allocated per payload fragment; each handle contains a pointer to its fragment. + /// The fragment handles are allocated per payload fragment; each handle contains a pointer to its fragment. /// Each instance is of a very small fixed size, so a trivial zero-fragmentation block allocator is sufficient. - struct UdpardMemoryResource* payload_fragment_handle; + struct UdpardMemoryResource fragment; /// The library never allocates payload buffers itself, as they are handed over by the application via /// udpardRx*Receive. Once a buffer is handed over, the library may choose to keep it if it is deemed to be /// necessary to complete a transfer reassembly, or to discard it if it is deemed to be unnecessary. - /// Discarded payload buffers are freed using this memory resource. - /// As this resource is never used to allocate memory, the "allocate" pointer can be NULL. - struct UdpardMemoryResource* payload; + /// Discarded payload buffers are freed using this object. + struct UdpardMemoryDeleter payload; }; /// Represents a received Cyphal transfer. @@ -732,18 +745,23 @@ struct UdpardRxTransfer /// this requires freeing both the handles and the payload buffers they point to. /// Beware that different memory resources may have been used to allocate the handles and the payload buffers; /// the application is responsible for freeing them using the correct memory resource. - size_t payload_size; - struct UdpardPayloadFragmentHandle payload; + /// + /// If the payload is empty, the corresponding buffer pointers may be NULL. + size_t payload_size; + struct UdpardFragment payload; }; /// This is, essentially, a helper that frees the memory allocated for the payload and its fragment headers -/// using the correct memory resource. The application can do the same thing manually if it has access to the +/// using the correct memory resources. The application can do the same thing manually if it has access to the /// required context to compute the size, or if the memory resource implementation does not require deallocation size. /// +/// The head of the fragment list is passed by value so it is not freed. This is in line with the UdpardRxTransfer +/// design, where the head is stored by value to reduce indirection in small transfers. We call it Scott's Head. +/// /// If any of the arguments are NULL, the function has no effect. -void udpardRxTransferFree(struct UdpardRxTransfer* const self, - struct UdpardMemoryResource* const memory_payload_fragment_handle, - struct UdpardMemoryResource* const memory_payload); +void udpardRxFragmentFree(const struct UdpardFragment head, + const struct UdpardMemoryResource memory_fragment, + const struct UdpardMemoryDeleter memory_payload); // --------------------------------------------- SUBJECTS --------------------------------------------- @@ -800,15 +818,15 @@ struct UdpardRxSubscription /// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. /// /// The time complexity is constant. This function does not invoke the dynamic memory manager. -int8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self, - const UdpardPortID subject_id, - const size_t extent, - const struct UdpardRxMemoryResources memory); +int_fast8_t udpardRxSubscriptionInit(struct UdpardRxSubscription* const self, + const UdpardPortID subject_id, + const size_t extent, + const struct UdpardRxMemoryResources memory); /// Frees all memory held by the subscription instance. /// After invoking this function, the instance is no longer usable. /// Do not forget to close the sockets that were opened for this subscription. -void udpardRxSubscriptionDestroy(struct UdpardRxSubscription* const self); +void udpardRxSubscriptionFree(struct UdpardRxSubscription* const self); /// Datagrams received from the sockets of this subscription are fed into this function. /// @@ -822,18 +840,20 @@ void udpardRxSubscriptionDestroy(struct UdpardRxSubscription* const self); /// /// The function takes ownership of the passed datagram payload buffer. The library will either store it as a /// fragment of the reassembled transfer payload or free it using the corresponding memory resource -/// (see UdpardRxMemoryResources) if the datagram is not needed for reassembly. +/// (see UdpardRxMemoryResources) if the datagram is not needed for reassembly. Because of the ownership transfer, +/// the datagram payload buffer has to be mutable (non-const). /// /// The accepted datagram may either be invalid, carry a non-final part of a multi-frame transfer, /// carry a final part of a valid multi-frame transfer, or carry a valid single-frame transfer. /// The last two cases are said to complete a transfer. /// -/// If the datagram completes a transfer, the received_transfer argument is filled with the transfer details +/// If the datagram completes a transfer, the out_transfer argument is filled with the transfer details /// and the return value is one. /// The caller is assigned ownership of the transfer payload buffer memory; it has to be freed after use as described /// in the documentation for UdpardRxTransfer. +/// The memory pointed to by out_transfer may be mutated arbitrarily if no transfer is completed. /// -/// If the datagram does not complete a transfer or is malformed, the function returns zero and the received_transfer +/// If the datagram does not complete a transfer or is malformed, the function returns zero and the out_transfer /// is not modified. Observe that malformed frames are not treated as errors, as the local application is not /// responsible for the behavior of external agents producing the datagrams. /// @@ -841,7 +861,7 @@ void udpardRxSubscriptionDestroy(struct UdpardRxSubscription* const self); /// /// 1. A new session state instance is allocated when a new session is initiated. /// -/// 2. A new transfer payload fragment handle is allocated when a new transfer fragment is accepted. +/// 2. A new transfer fragment handle is allocated when a new transfer fragment is accepted. /// /// 3. Allocated objects may occasionally be deallocated at the discretion of the library. /// This behavior does not increase the worst case execution time and does not improve the worst case memory @@ -850,14 +870,17 @@ void udpardRxSubscriptionDestroy(struct UdpardRxSubscription* const self); /// /// The time complexity is O(log n) where n is the number of remote notes publishing on this subject (topic). /// No data copy takes place. Malformed frames are discarded in constant time. +/// Linear time is spent on the CRC verification of the transfer payload when the transfer is complete. +/// +/// This function performs log(n) of recursive calls internally, where n is the number of frames in a transfer. /// /// UDPARD_ERROR_MEMORY is returned if the function fails to allocate memory. /// UDPARD_ERROR_ARGUMENT is returned if any of the input arguments are invalid. -int8_t udpardRxSubscriptionReceive(struct UdpardRxSubscription* const self, - const UdpardMicrosecond timestamp_usec, - const struct UdpardConstPayload datagram_payload, - const uint_fast8_t redundant_iface_index, - struct UdpardRxTransfer* const received_transfer); +int_fast8_t udpardRxSubscriptionReceive(struct UdpardRxSubscription* const self, + const UdpardMicrosecond timestamp_usec, + const struct UdpardMutablePayload datagram_payload, + const uint_fast8_t redundant_iface_index, + struct UdpardRxTransfer* const out_transfer); // --------------------------------------------- RPC-SERVICES --------------------------------------------- @@ -868,6 +891,9 @@ struct UdpardRxRPC /// READ-ONLY struct UdpardTreeNode base; + /// READ-ONLY + UdpardPortID service_id; + /// See UdpardRxPort. /// Use this to change the transfer-ID timeout value for this RPC-service port. struct UdpardRxPort port; @@ -935,14 +961,14 @@ struct UdpardRxRPCTransfer /// The return value is a negated UDPARD_ERROR_ARGUMENT if any of the input arguments are invalid. /// /// The time complexity is constant. This function does not invoke the dynamic memory manager. -int8_t udpardRxRPCDispatcherInit(struct UdpardRxRPCDispatcher* const self, - const UdpardNodeID local_node_id, - const struct UdpardRxMemoryResources memory); +int_fast8_t udpardRxRPCDispatcherInit(struct UdpardRxRPCDispatcher* const self, + const UdpardNodeID local_node_id, + const struct UdpardRxMemoryResources memory); /// Frees all memory held by the RPC-service dispatcher instance. /// After invoking this function, the instance is no longer usable. /// Do not forget to close the sockets that were opened for this instance. -void udpardRxRPCDispatcherDestroy(struct UdpardRxRPCDispatcher* const self); +void udpardRxRPCDispatcherFree(struct UdpardRxRPCDispatcher* const self); /// This function lets the application register its interest in a particular service-ID and kind (request/response) /// by creating an RPC-service RX port. The service pointer shall retain validity until its unregistration or until @@ -965,11 +991,11 @@ void udpardRxRPCDispatcherDestroy(struct UdpardRxRPCDispatcher* const self); /// (request or response). /// This function does not allocate new memory. The function may deallocate memory if such registration already /// existed; the deallocation behavior is specified in the documentation for udpardRxRPCDispatcherCancel. -int8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self, - struct UdpardRxRPC* const service, - const UdpardPortID service_id, - const bool is_request, - const size_t extent); +int_fast8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self, + struct UdpardRxRPC* const service, + const UdpardPortID service_id, + const bool is_request, + const size_t extent); /// This function reverses the effect of udpardRxRPCDispatcherListen. /// If the registration is found, all its memory is de-allocated (session states and payload buffers). @@ -981,19 +1007,19 @@ int8_t udpardRxRPCDispatcherListen(struct UdpardRxRPCDispatcher* const self, /// /// The time complexity is logarithmic from the number of current registration under the specified transfer kind. /// This function does not allocate new memory. -int8_t udpardRxRPCDispatcherCancel(struct UdpardRxRPCDispatcher* const self, - const UdpardPortID service_id, - const bool is_request); +int_fast8_t udpardRxRPCDispatcherCancel(struct UdpardRxRPCDispatcher* const self, + const UdpardPortID service_id, + const bool is_request); /// Datagrams received from the sockets of this service dispatcher are fed into this function. /// It is the analog of udpardRxSubscriptionReceive for RPC-service transfers. /// Please refer to the documentation of udpardRxSubscriptionReceive for the usage information. -int8_t udpardRxRPCDispatcherReceive(struct UdpardRxRPCDispatcher* const self, - struct UdpardRxRPC** const service, - const UdpardMicrosecond timestamp_usec, - const struct UdpardConstPayload datagram_payload, - const uint_fast8_t redundant_iface_index, - struct UdpardRxRPCTransfer* const received_transfer); +int_fast8_t udpardRxRPCDispatcherReceive(struct UdpardRxRPCDispatcher* const self, + struct UdpardRxRPC** const service, + const UdpardMicrosecond timestamp_usec, + const struct UdpardMutablePayload datagram_payload, + const uint_fast8_t redundant_iface_index, + struct UdpardRxRPCTransfer* const out_transfer); #ifdef __cplusplus } diff --git a/tests/.idea/dictionaries/pavel.xml b/tests/.idea/dictionaries/pavel.xml index 2c334dd..9c14631 100644 --- a/tests/.idea/dictionaries/pavel.xml +++ b/tests/.idea/dictionaries/pavel.xml @@ -2,13 +2,21 @@ baremetal + cavl cfamily + cyphal deallocation + deduplicator discardment dscp + dsdl dudpard + ffee ghcr + iface + ifaces intravehicular + kirienko libcanard libgtest libudpard @@ -18,7 +26,11 @@ mmcu nosonar opencyphal + pard + prio profraw + pycyphal + spdx stringmakers udpard udpip diff --git a/tests/src/helpers.h b/tests/src/helpers.h index 0a81967..87d881e 100644 --- a/tests/src/helpers.h +++ b/tests/src/helpers.h @@ -38,17 +38,17 @@ extern "C" { } \ } while (0) -static inline void* dummyAllocatorAllocate(struct UdpardMemoryResource* const self, const size_t size) +static inline void* dummyAllocatorAllocate(void* const user_reference, const size_t size) { - (void) self; + (void) user_reference; (void) size; return NULL; } -static inline void dummyAllocatorFree(struct UdpardMemoryResource* const self, const size_t size, void* const pointer) +static inline void dummyAllocatorDeallocate(void* const user_reference, const size_t size, void* const pointer) { + (void) user_reference; (void) size; - TEST_PANIC_UNLESS(self != NULL); TEST_PANIC_UNLESS(pointer == NULL); } @@ -57,22 +57,23 @@ static inline void dummyAllocatorFree(struct UdpardMemoryResource* const self, c #define INSTRUMENTED_ALLOCATOR_CANARY_SIZE 1024U typedef struct { - struct UdpardMemoryResource base; - uint_least8_t canary[INSTRUMENTED_ALLOCATOR_CANARY_SIZE]; + uint_least8_t canary[INSTRUMENTED_ALLOCATOR_CANARY_SIZE]; /// The limit can be changed at any moment to control the maximum amount of memory that can be allocated. /// It may be set to a value less than the currently allocated amount. + size_t limit_fragments; size_t limit_bytes; /// The current state of the allocator. size_t allocated_fragments; size_t allocated_bytes; } InstrumentedAllocator; -static inline void* instrumentedAllocatorAllocate(struct UdpardMemoryResource* const base, const size_t size) +static inline void* instrumentedAllocatorAllocate(void* const user_reference, const size_t size) { - InstrumentedAllocator* const self = (InstrumentedAllocator*) base; - TEST_PANIC_UNLESS(self->base.allocate == &instrumentedAllocatorAllocate); - void* result = NULL; - if ((size > 0U) && ((self->allocated_bytes + size) <= self->limit_bytes)) + InstrumentedAllocator* const self = (InstrumentedAllocator*) user_reference; + void* result = NULL; + if ((size > 0U) && // + ((self->allocated_bytes + size) <= self->limit_bytes) && // + ((self->allocated_fragments + 1U) <= self->limit_fragments)) { const size_t size_with_canaries = size + ((size_t) INSTRUMENTED_ALLOCATOR_CANARY_SIZE * 2U); void* origin = malloc(size_with_canaries); @@ -98,13 +99,9 @@ static inline void* instrumentedAllocatorAllocate(struct UdpardMemoryResource* c return result; } -static inline void instrumentedAllocatorFree(struct UdpardMemoryResource* const base, - const size_t size, - void* const pointer) +static inline void instrumentedAllocatorDeallocate(void* const user_reference, const size_t size, void* const pointer) { - InstrumentedAllocator* const self = (InstrumentedAllocator*) base; - TEST_PANIC_UNLESS(self->base.allocate == &instrumentedAllocatorAllocate); - TEST_PANIC_UNLESS(self->base.free == &instrumentedAllocatorFree); + InstrumentedAllocator* const self = (InstrumentedAllocator*) user_reference; if (pointer != NULL) { uint_least8_t* p = ((uint_least8_t*) pointer) - INSTRUMENTED_ALLOCATOR_CANARY_SIZE; @@ -135,18 +132,32 @@ static inline void instrumentedAllocatorFree(struct UdpardMemoryResource* const /// By default, the limit is unrestricted (set to the maximum possible value). static inline void instrumentedAllocatorNew(InstrumentedAllocator* const self) { - self->base.allocate = &instrumentedAllocatorAllocate; - self->base.free = &instrumentedAllocatorFree; - self->base.user_reference = NULL; for (size_t i = 0; i < INSTRUMENTED_ALLOCATOR_CANARY_SIZE; i++) { self->canary[i] = (uint_least8_t) (rand() % (UINT_LEAST8_MAX + 1)); } + self->limit_fragments = SIZE_MAX; self->limit_bytes = SIZE_MAX; self->allocated_fragments = 0U; self->allocated_bytes = 0U; } +static inline struct UdpardMemoryResource instrumentedAllocatorMakeMemoryResource( + const InstrumentedAllocator* const self) +{ + const struct UdpardMemoryResource out = {.user_reference = (void*) self, + .deallocate = &instrumentedAllocatorDeallocate, + .allocate = &instrumentedAllocatorAllocate}; + return out; +} + +static inline struct UdpardMemoryDeleter instrumentedAllocatorMakeMemoryDeleter(const InstrumentedAllocator* const self) +{ + const struct UdpardMemoryDeleter out = {.user_reference = (void*) self, + .deallocate = &instrumentedAllocatorDeallocate}; + return out; +} + #ifdef __cplusplus } #endif diff --git a/tests/src/hexdump.hpp b/tests/src/hexdump.hpp index 3e44e63..d27bc83 100644 --- a/tests/src/hexdump.hpp +++ b/tests/src/hexdump.hpp @@ -11,14 +11,16 @@ namespace hexdump { -template +using Byte = std::uint_least8_t; + +template [[nodiscard]] std::string hexdump(InputIterator begin, const InputIterator end) { static_assert(BytesPerRow > 0); - static constexpr std::pair PrintableASCIIRange{32, 126}; - std::uint32_t offset = 0; - std::ostringstream output; - bool first = true; + static constexpr std::pair PrintableASCIIRange{32, 126}; + std::uint32_t offset = 0; + std::ostringstream output; + bool first = true; output << std::hex << std::setfill('0'); do { @@ -33,7 +35,7 @@ template output << std::setw(8) << offset << " "; offset += BytesPerRow; auto it = begin; - for (std::uint8_t i = 0; i < BytesPerRow; ++i) + for (Byte i = 0; i < BytesPerRow; ++i) { if (i == 8) { @@ -50,7 +52,7 @@ template } } output << " "; - for (std::uint8_t i = 0; i < BytesPerRow; ++i) + for (Byte i = 0; i < BytesPerRow; ++i) { if (begin != end) { @@ -76,6 +78,6 @@ template [[nodiscard]] inline auto hexdump(const void* const data, const std::size_t size) { - return hexdump(static_cast(data), static_cast(data) + size); + return hexdump(static_cast(data), static_cast(data) + size); } } // namespace hexdump diff --git a/tests/src/test_cavl.cpp b/tests/src/test_cavl.cpp index c944f09..8d816b6 100644 --- a/tests/src/test_cavl.cpp +++ b/tests/src/test_cavl.cpp @@ -31,10 +31,10 @@ struct Node final : Cavl T value{}; - auto checkLinkageUpLeftRightBF(const Cavl* const check_up, - const Cavl* const check_le, - const Cavl* const check_ri, - const std::int8_t check_bf) const -> bool + auto checkLinkageUpLeftRightBF(const Cavl* const check_up, + const Cavl* const check_le, + const Cavl* const check_ri, + const std::int_fast8_t check_bf) const -> bool { return (up == check_up) && // (lr[0] == check_le) && (lr[1] == check_ri) && // @@ -64,7 +64,7 @@ auto search(Node** const root, const Predicate& predicate, const Factory& fac Predicate predicate; Factory factory; - static auto callPredicate(void* const user_reference, const Cavl* const node) -> std::int8_t + static auto callPredicate(void* const user_reference, const Cavl* const node) -> std::int_fast8_t { const auto ret = static_cast(user_reference)->predicate(static_cast&>(*node)); if (ret > 0) @@ -101,20 +101,20 @@ void remove(Node** const root, const Node* const n) } template -auto getHeight(const Node* const n) -> std::uint8_t // NOLINT recursion +auto getHeight(const Node* const n) -> std::uint_fast8_t // NOLINT recursion { - return (n != nullptr) ? static_cast(1U + std::max(getHeight(static_cast*>(n->lr[0])), - getHeight(static_cast*>(n->lr[1])))) + return (n != nullptr) ? static_cast(1U + std::max(getHeight(static_cast*>(n->lr[0])), + getHeight(static_cast*>(n->lr[1])))) : 0; } template -void print(const Node* const nd, const std::uint8_t depth = 0, const char marker = 'T') // NOLINT recursion +void print(const Node* const nd, const std::uint_fast8_t depth = 0, const char marker = 'T') // NOLINT recursion { TEST_ASSERT(10 > getHeight(nd)); // Fail early for malformed cyclic trees, do not overwhelm stdout. if (nd != nullptr) { - print(static_cast*>(nd->lr[0]), static_cast(depth + 1U), 'L'); + print(static_cast*>(nd->lr[0]), static_cast(depth + 1U), 'L'); for (std::uint16_t i = 1U; i < depth; i++) { std::cout << " "; @@ -133,7 +133,7 @@ void print(const Node* const nd, const std::uint8_t depth = 0, const char mar } std::cout << marker << "=" << static_cast(nd->value) // << " [" << static_cast(nd->bf) << "]" << std::endl; - print(static_cast*>(nd->lr[1]), static_cast(depth + 1U), 'R'); + print(static_cast*>(nd->lr[1]), static_cast(depth + 1U), 'R'); } } @@ -211,7 +211,7 @@ auto findBrokenBalanceFactor(const Node* const n) -> const Cavl* // NOLINT r void testCheckAscension() { - using N = Node; + using N = Node; N t{2}; N l{1}; N r{3}; @@ -238,7 +238,7 @@ void testCheckAscension() void testRotation() { - using N = Node; + using N = Node; // Original state: // x.left = a // x.right = z @@ -284,7 +284,7 @@ void testRotation() void testBalancingA() { - using N = Node; + using N = Node; // Double left-right rotation. // X X Y // / ` / ` / ` @@ -334,7 +334,7 @@ void testBalancingA() void testBalancingB() { - using N = Node; + using N = Node; // Without F the handling of Z and Y is more complex; Z flips the sign of its balance factor: // X X Y // / ` / ` / ` @@ -383,7 +383,7 @@ void testBalancingB() void testBalancingC() { - using N = Node; + using N = Node; // Both X and Z are heavy on the same side. // X Z // / ` / ` @@ -434,9 +434,9 @@ void testBalancingC() void testRetracingOnGrowth() { - using N = Node; + using N = Node; std::array t{}; - for (std::uint8_t i = 0; i < 100; i++) + for (std::uint_fast8_t i = 0; i < 100; i++) { t[i].value = i; } @@ -640,7 +640,7 @@ void testRetracingOnGrowth() void testSearchTrivial() { - using N = Node; + using N = Node; // A // B C // D E F G @@ -684,7 +684,7 @@ void testSearchTrivial() void testRemovalA() { - using N = Node; + using N = Node; // 4 // / ` // 2 6 @@ -693,7 +693,7 @@ void testRemovalA() // / ` // 7 9 std::array t{}; - for (std::uint8_t i = 0; i < 10; i++) + for (std::uint_fast8_t i = 0; i < 10; i++) { t[i].value = i; } @@ -1005,7 +1005,7 @@ void testRemovalA() void testMutationManual() { - using N = Node; + using N = Node; // Build a tree with 31 elements from 1 to 31 inclusive by adding new elements successively: // 16 // / ` @@ -1017,13 +1017,13 @@ void testMutationManual() // / ` / ` / ` / ` / ` / ` / ` / ` // 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 std::array t{}; - for (std::uint8_t i = 0; i < 32; i++) + for (std::uint_fast8_t i = 0; i < 32; i++) { t[i].value = i; } // Build the actual tree. N* root = nullptr; - for (std::uint8_t i = 1; i < 32; i++) + for (std::uint_fast8_t i = 1; i < 32; i++) { const auto pred = [&](const N& v) { return t.at(i).value - v.value; }; TEST_ASSERT(nullptr == search(&root, pred)); @@ -1276,16 +1276,16 @@ void testMutationManual() auto getRandomByte() { - return static_cast((0xFFLL * std::rand()) / RAND_MAX); + return static_cast((0xFFLL * std::rand()) / RAND_MAX); } void testMutationRandomized() { - using N = Node; + using N = Node; std::array t{}; for (auto i = 0U; i < 256U; i++) { - t.at(i).value = static_cast(i); + t.at(i).value = static_cast(i); } std::array mask{}; std::size_t size = 0; @@ -1307,7 +1307,7 @@ void testMutationRandomized() }; validate(); - const auto add = [&](const std::uint8_t x) { + const auto add = [&](const std::uint_fast8_t x) { const auto predicate = [&](const N& v) { return x - v.value; }; if (N* const existing = search(&root, predicate)) { @@ -1332,7 +1332,7 @@ void testMutationRandomized() } }; - const auto drop = [&](const std::uint8_t x) { + const auto drop = [&](const std::uint_fast8_t x) { const auto predicate = [&](const N& v) { return x - v.value; }; if (N* const existing = search(&root, predicate)) { diff --git a/tests/src/test_helpers.c b/tests/src/test_helpers.c index 73d9369..0fe09ec 100644 --- a/tests/src/test_helpers.c +++ b/tests/src/test_helpers.c @@ -11,41 +11,49 @@ static void testInstrumentedAllocator(void) TEST_ASSERT_EQUAL_size_t(0, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(SIZE_MAX, al.limit_bytes); - void* a = al.base.allocate(&al.base, 123); + const struct UdpardMemoryResource resource = instrumentedAllocatorMakeMemoryResource(&al); + + void* a = resource.allocate(resource.user_reference, 123); TEST_ASSERT_EQUAL_size_t(1, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(123, al.allocated_bytes); - void* b = al.base.allocate(&al.base, 456); + void* b = resource.allocate(resource.user_reference, 456); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(579, al.allocated_bytes); - al.limit_bytes = 600; + al.limit_bytes = 600; + al.limit_fragments = 2; + + TEST_ASSERT_EQUAL_PTR(NULL, resource.allocate(resource.user_reference, 100)); + TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); + TEST_ASSERT_EQUAL_size_t(579, al.allocated_bytes); - TEST_ASSERT_EQUAL_PTR(NULL, al.base.allocate(&al.base, 100)); + TEST_ASSERT_EQUAL_PTR(NULL, resource.allocate(resource.user_reference, 21)); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(579, al.allocated_bytes); + al.limit_fragments = 4; - void* c = al.base.allocate(&al.base, 21); + void* c = resource.allocate(resource.user_reference, 21); TEST_ASSERT_EQUAL_size_t(3, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(600, al.allocated_bytes); - al.base.free(&al.base, 123, a); + resource.deallocate(resource.user_reference, 123, a); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(477, al.allocated_bytes); - void* d = al.base.allocate(&al.base, 100); + void* d = resource.allocate(resource.user_reference, 100); TEST_ASSERT_EQUAL_size_t(3, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(577, al.allocated_bytes); - al.base.free(&al.base, 21, c); + resource.deallocate(resource.user_reference, 21, c); TEST_ASSERT_EQUAL_size_t(2, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(556, al.allocated_bytes); - al.base.free(&al.base, 100, d); + resource.deallocate(resource.user_reference, 100, d); TEST_ASSERT_EQUAL_size_t(1, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(456, al.allocated_bytes); - al.base.free(&al.base, 456, b); + resource.deallocate(resource.user_reference, 456, b); TEST_ASSERT_EQUAL_size_t(0, al.allocated_fragments); TEST_ASSERT_EQUAL_size_t(0, al.allocated_bytes); } diff --git a/tests/src/test_intrusive_rx.c b/tests/src/test_intrusive_rx.c index 004a37e..8330114 100644 --- a/tests/src/test_intrusive_rx.c +++ b/tests/src/test_intrusive_rx.c @@ -7,6 +7,196 @@ #include "helpers.h" #include +// NOLINTBEGIN(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) + +/// Moves the payload from the origin into a new buffer and attaches is to the newly allocated fragment. +/// This function performs two allocations. This function is infallible. +static RxFragment* makeRxFragment(const RxMemory memory, + const uint32_t frame_index, + const struct UdpardPayload view, + const struct UdpardMutablePayload origin, + RxFragmentTreeNode* const parent) +{ + TEST_PANIC_UNLESS((view.data >= origin.data) && (view.size <= origin.size)); + TEST_PANIC_UNLESS((((const byte_t*) view.data) + view.size) <= (((const byte_t*) origin.data) + origin.size)); + byte_t* const new_origin = (byte_t*) instrumentedAllocatorAllocate(memory.payload.user_reference, origin.size); + RxFragment* const frag = (RxFragment*) memAlloc(memory.fragment, sizeof(RxFragment)); + if ((new_origin != NULL) && (frag != NULL)) + { + (void) memmove(new_origin, origin.data, origin.size); + (void) memset(frag, 0, sizeof(RxFragment)); + frag->tree.base.lr[0] = NULL; + frag->tree.base.lr[1] = NULL; + frag->tree.base.up = &parent->base; + frag->tree.this = frag; + frag->frame_index = frame_index; + frag->base.origin.data = new_origin; + frag->base.origin.size = origin.size; + frag->base.view.data = new_origin + (((const byte_t*) view.data) - ((byte_t*) origin.data)); + frag->base.view.size = view.size; + } + else + { + TEST_PANIC("Failed to allocate RxFragment"); + } + return frag; +} + +/// This is a simple helper wrapper that constructs a new fragment using a null-terminated string as a payload. +static RxFragment* makeRxFragmentString(const RxMemory memory, + const uint32_t frame_index, + const char* const payload, + RxFragmentTreeNode* const parent) +{ + const size_t sz = strlen(payload); + return makeRxFragment(memory, + frame_index, + (struct UdpardPayload){.data = payload, .size = sz}, + (struct UdpardMutablePayload){.data = (void*) payload, .size = sz}, + parent); +} + +static bool compareMemory(const size_t expected_size, + const void* const expected, + const size_t actual_size, + const void* const actual) +{ + return (expected_size == actual_size) && (memcmp(expected, actual, expected_size) == 0); +} +static bool compareStringWithPayload(const char* const expected, const struct UdpardPayload payload) +{ + return compareMemory(strlen(expected), expected, payload.size, payload.data); +} + +static RxFrameBase makeRxFrameBase(InstrumentedAllocator* const memory_payload, + const uint32_t frame_index, + const bool end_of_transfer, + const struct UdpardPayload view, + const struct UdpardMutablePayload origin) +{ + TEST_PANIC_UNLESS((view.data >= origin.data) && (view.size <= origin.size)); + TEST_PANIC_UNLESS((((const byte_t*) view.data) + view.size) <= (((const byte_t*) origin.data) + origin.size)); + RxFrameBase out = {0}; + byte_t* const new_origin = (byte_t*) instrumentedAllocatorAllocate(memory_payload, origin.size); + if (new_origin != NULL) + { + (void) memmove(new_origin, origin.data, origin.size); + out.index = frame_index; + out.end_of_transfer = end_of_transfer; + out.origin.data = new_origin; + out.origin.size = origin.size; + out.payload.data = new_origin + (((const byte_t*) view.data) - ((byte_t*) origin.data)); + out.payload.size = view.size; + } + else + { + TEST_PANIC("Failed to allocate payload buffer for RxFrameBase"); + } + return out; +} + +static RxFrameBase makeRxFrameBaseString(InstrumentedAllocator* const memory, + const uint32_t frame_index, + const bool end_of_transfer, + const char* const payload) +{ + return makeRxFrameBase(memory, + frame_index, + end_of_transfer, + (struct UdpardPayload){.data = payload, .size = strlen(payload)}, + (struct UdpardMutablePayload){.data = (void*) payload, .size = strlen(payload)}); +} + +static RxFrame makeRxFrameString(InstrumentedAllocator* const memory, + const TransferMetadata meta, + const uint32_t frame_index, + const bool end_of_transfer, + const char* const payload) +{ + return (RxFrame){.base = makeRxFrameBaseString(memory, frame_index, end_of_transfer, payload), .meta = meta}; +} + +static RxMemory makeRxMemory(InstrumentedAllocator* const fragment, InstrumentedAllocator* const payload) +{ + return (RxMemory){.fragment = instrumentedAllocatorMakeMemoryResource(fragment), + .payload = instrumentedAllocatorMakeMemoryDeleter(payload)}; +} + +static struct UdpardMutablePayload makeDatagramPayload(InstrumentedAllocator* const memory, + const TransferMetadata meta, + const uint32_t frame_index, + const bool end_of_transfer, + const struct UdpardPayload payload) +{ + struct UdpardMutablePayload pld = {.size = payload.size + HEADER_SIZE_BYTES}; + pld.data = instrumentedAllocatorAllocate(memory, pld.size); + if (pld.data != NULL) + { + (void) memcpy(txSerializeHeader(pld.data, meta, frame_index, end_of_transfer), payload.data, payload.size); + } + else + { + TEST_PANIC("Failed to allocate datagram payload"); + } + return pld; +} + +static struct UdpardMutablePayload makeDatagramPayloadString(InstrumentedAllocator* const memory, + const TransferMetadata meta, + const uint32_t frame_index, + const bool end_of_transfer, + const char* const string) +{ + return makeDatagramPayload(memory, + meta, + frame_index, + end_of_transfer, + (struct UdpardPayload){.data = string, .size = strlen(string)}); +} + +static struct UdpardMutablePayload makeDatagramPayloadSingleFrame(InstrumentedAllocator* const memory, + const TransferMetadata meta, + const struct UdpardPayload payload) +{ + struct UdpardMutablePayload pld = + makeDatagramPayload(memory, + meta, + 0, + true, + (struct UdpardPayload){.data = payload.data, + .size = payload.size + TRANSFER_CRC_SIZE_BYTES}); + TEST_PANIC_UNLESS(pld.size == (payload.size + HEADER_SIZE_BYTES + TRANSFER_CRC_SIZE_BYTES)); + txSerializeU32(((byte_t*) pld.data) + HEADER_SIZE_BYTES + payload.size, + transferCRCCompute(payload.size, payload.data)); + return pld; +} + +static struct UdpardMutablePayload makeDatagramPayloadSingleFrameString(InstrumentedAllocator* const memory, + const TransferMetadata meta, + const char* const payload) +{ + return makeDatagramPayloadSingleFrame(memory, + meta, + (struct UdpardPayload){.data = payload, .size = strlen(payload)}); +} + +// -------------------------------------------------- MISC -------------------------------------------------- + +static void testCompare32(void) +{ + TEST_ASSERT_EQUAL(0, compare32(0, 0)); + TEST_ASSERT_EQUAL(0, compare32(1, 1)); + TEST_ASSERT_EQUAL(0, compare32(0xdeadbeef, 0xdeadbeef)); + TEST_ASSERT_EQUAL(0, compare32(0x0badc0de, 0x0badc0de)); + TEST_ASSERT_EQUAL(0, compare32(0xffffffff, 0xffffffff)); + TEST_ASSERT_EQUAL(+1, compare32(1, 0)); + TEST_ASSERT_EQUAL(+1, compare32(0xffffffff, 0xfffffffe)); + TEST_ASSERT_EQUAL(-1, compare32(0, 1)); + TEST_ASSERT_EQUAL(-1, compare32(0xfffffffe, 0xffffffff)); +} + +// -------------------------------------------------- FRAME PARSING -------------------------------------------------- + // Generate reference data using PyCyphal: // // >>> from pycyphal.transport.udp import UDPFrame @@ -16,34 +206,36 @@ // data_specifier=MessageDataSpecifier(7654), user_data=0) // >>> list(frame.compile_header_and_payload()[0]) // [1, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 224, 60] -static void testRxParseFrameValidMessage(void) +static void testParseFrameValidMessage(void) { - const byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 179, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, + 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 179, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); TEST_ASSERT_EQUAL_UINT64(UdpardPriorityFast, rxf.meta.priority); TEST_ASSERT_EQUAL_UINT64(2345, rxf.meta.src_node_id); TEST_ASSERT_EQUAL_UINT64(UDPARD_NODE_ID_UNSET, rxf.meta.dst_node_id); TEST_ASSERT_EQUAL_UINT64(7654, rxf.meta.data_specifier); TEST_ASSERT_EQUAL_UINT64(0xbadc0ffee0ddf00d, rxf.meta.transfer_id); - TEST_ASSERT_EQUAL_UINT64(12345, rxf.index); - TEST_ASSERT_FALSE(rxf.end_of_transfer); - TEST_ASSERT_EQUAL_UINT64(3, rxf.payload.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.payload.data, 3); + TEST_ASSERT_EQUAL_UINT64(12345, rxf.base.index); + TEST_ASSERT_FALSE(rxf.base.end_of_transfer); + TEST_ASSERT_EQUAL_UINT64(3, rxf.base.payload.size); + TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.base.payload.data, 3); + TEST_ASSERT_EQUAL_UINT64(sizeof(data), rxf.base.origin.size); + TEST_ASSERT_EQUAL_UINT8_ARRAY(data, rxf.base.origin.data, sizeof(data)); } -static void testRxParseFrameValidRPCService(void) +static void testParseFrameValidRPCService(void) { // frame = UDPFrame(priority=Priority.FAST, transfer_id=0xbadc0ffee0ddf00d, index=6654, end_of_transfer=False, // payload=memoryview(b''), source_node_id=2345, destination_node_id=4567, // data_specifier=ServiceDataSpecifier(role=ServiceDataSpecifier.Role.REQUEST, service_id=123), user_data=0) - const byte_t data[] = {1, 2, 41, 9, 215, 17, 123, 192, 13, 240, 221, 224, - 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 173, 122, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 41, 9, 215, 17, 123, 192, 13, 240, 221, 224, + 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 173, 122, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); TEST_ASSERT_EQUAL_UINT64(UdpardPriorityFast, rxf.meta.priority); TEST_ASSERT_EQUAL_UINT64(2345, rxf.meta.src_node_id); TEST_ASSERT_EQUAL_UINT64(4567, rxf.meta.dst_node_id); @@ -51,92 +243,2138 @@ static void testRxParseFrameValidRPCService(void) DATA_SPECIFIER_SERVICE_REQUEST_NOT_RESPONSE_MASK, rxf.meta.data_specifier); TEST_ASSERT_EQUAL_UINT64(0xbadc0ffee0ddf00d, rxf.meta.transfer_id); - TEST_ASSERT_EQUAL_UINT64(6654, rxf.index); - TEST_ASSERT_FALSE(rxf.end_of_transfer); - TEST_ASSERT_EQUAL_UINT64(3, rxf.payload.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.payload.data, 3); + TEST_ASSERT_EQUAL_UINT64(6654, rxf.base.index); + TEST_ASSERT_FALSE(rxf.base.end_of_transfer); + TEST_ASSERT_EQUAL_UINT64(3, rxf.base.payload.size); + TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.base.payload.data, 3); + TEST_ASSERT_EQUAL_UINT64(sizeof(data), rxf.base.origin.size); + TEST_ASSERT_EQUAL_UINT8_ARRAY(data, rxf.base.origin.data, sizeof(data)); } -static void testRxParseFrameValidMessageAnonymous(void) +static void testParseFrameValidMessageAnonymous(void) { - const byte_t data[] = {1, 2, 255, 255, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 128, 0, 0, 168, 92, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 255, 255, 255, 255, 230, 29, 13, 240, 221, 224, + 254, 15, 220, 186, 0, 0, 0, 128, 0, 0, 168, 92, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); TEST_ASSERT_EQUAL_UINT64(UdpardPriorityFast, rxf.meta.priority); TEST_ASSERT_EQUAL_UINT64(UDPARD_NODE_ID_UNSET, rxf.meta.src_node_id); TEST_ASSERT_EQUAL_UINT64(UDPARD_NODE_ID_UNSET, rxf.meta.dst_node_id); TEST_ASSERT_EQUAL_UINT64(7654, rxf.meta.data_specifier); TEST_ASSERT_EQUAL_UINT64(0xbadc0ffee0ddf00d, rxf.meta.transfer_id); - TEST_ASSERT_EQUAL_UINT64(0, rxf.index); - TEST_ASSERT_TRUE(rxf.end_of_transfer); - TEST_ASSERT_EQUAL_UINT64(3, rxf.payload.size); - TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.payload.data, 3); + TEST_ASSERT_EQUAL_UINT64(0, rxf.base.index); + TEST_ASSERT_TRUE(rxf.base.end_of_transfer); + TEST_ASSERT_EQUAL_UINT64(3, rxf.base.payload.size); + TEST_ASSERT_EQUAL_UINT8_ARRAY("abc", rxf.base.payload.data, 3); + TEST_ASSERT_EQUAL_UINT64(sizeof(data), rxf.base.origin.size); + TEST_ASSERT_EQUAL_UINT8_ARRAY(data, rxf.base.origin.data, sizeof(data)); } -static void testRxParseFrameRPCServiceAnonymous(void) +static void testParseFrameRPCServiceAnonymous(void) { - const byte_t data[] = {1, 2, 255, 255, 215, 17, 123, 192, 13, 240, 221, 224, - 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 75, 79, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 255, 255, 215, 17, 123, 192, 13, 240, 221, 224, + 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 75, 79, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); } -static void testRxParseFrameRPCServiceBroadcast(void) +static void testParseFrameRPCServiceBroadcast(void) { - const byte_t data[] = {1, 2, 41, 9, 255, 255, 123, 192, 13, 240, 221, 224, - 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 248, 152, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 41, 9, 255, 255, 123, 192, 13, 240, 221, 224, + 254, 15, 220, 186, 254, 25, 0, 0, 0, 0, 248, 152, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); } -static void testRxParseFrameAnonymousNonSingleFrame(void) +static void testParseFrameAnonymousNonSingleFrame(void) { // Invalid anonymous message frame because EOT not set (multi-frame anonymous transfers are not allowed). - const byte_t data[] = {1, 2, 255, 255, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 0, 0, 0, 0, 0, 0, 147, 6, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 255, 255, 255, 255, 230, 29, 13, 240, 221, 224, + 254, 15, 220, 186, 0, 0, 0, 0, 0, 0, 147, 6, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); } -static void testRxParseFrameBadHeaderCRC(void) +static void testParseFrameBadHeaderCRC(void) { // Bad header CRC. - const byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 180, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, + 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 180, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); } -static void testRxParseFrameUnknownHeaderVersion(void) +static void testParseFrameUnknownHeaderVersion(void) { // >>> from pycyphal.transport.commons.crc import CRC16CCITT // >>> list(CRC16CCITT.new(bytes( // [0, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0])).value_as_bytes) - const byte_t data[] = {0, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 141, 228, // - 'a', 'b', 'c'}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {0, 2, 41, 9, 56, 21, 230, 29, 13, 240, 221, 224, + 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 141, 228, // + 'a', 'b', 'c'}; + RxFrame rxf = {0}; + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); } -static void testRxParseFrameHeaderWithoutPayload(void) +static void testParseFrameHeaderWithoutPayload(void) { - const byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, - 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 179}; - RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = data, .size = sizeof(data)}, &rxf)); + byte_t data[] = {1, 2, 41, 9, 255, 255, 230, 29, 13, 240, 221, 224, 254, 15, 220, 186, 57, 48, 0, 0, 0, 0, 30, 179}; + RxFrame rxf = {0}; + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, &rxf)); } -static void testRxParseFrameEmpty(void) +static void testParseFrameEmpty(void) { RxFrame rxf = {0}; - TEST_ASSERT_FALSE(rxParseFrame((struct UdpardConstPayload){.data = "", .size = 0}, &rxf)); + TEST_ASSERT_FALSE(rxParseFrame((struct UdpardMutablePayload){.data = "", .size = 0}, &rxf)); } +// -------------------------------------------------- SLOT -------------------------------------------------- + +static void testSlotRestartEmpty(void) +{ + RxSlot slot = { + .ts_usec = 1234567890, + .transfer_id = 0x123456789abcdef0, + .max_index = 546, + .eot_index = 654, + .accepted_frames = 555, + .payload_size = 987, + .fragments = NULL, + }; + InstrumentedAllocator alloc = {0}; + rxSlotRestart(&slot, 0x1122334455667788ULL, makeRxMemory(&alloc, &alloc)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x1122334455667788ULL, slot.transfer_id); + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_EQUAL(NULL, slot.fragments); +} + +static void testSlotRestartNonEmpty(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + byte_t data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + // + RxSlot slot = { + .ts_usec = 1234567890, + .transfer_id = 0x123456789abcdef0, + .max_index = 546, + .eot_index = 654, + .accepted_frames = 555, + .payload_size = 987, + // + .fragments = &makeRxFragment(mem, + 1, + (struct UdpardPayload){.data = &data[2], .size = 2}, + (struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, + NULL) + ->tree, + }; + slot.fragments->base.lr[0] = &makeRxFragment(mem, + 0, + (struct UdpardPayload){.data = &data[1], .size = 1}, + (struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, + slot.fragments) + ->tree.base; + slot.fragments->base.lr[1] = &makeRxFragment(mem, + 2, + (struct UdpardPayload){.data = &data[3], .size = 3}, + (struct UdpardMutablePayload){.data = data, .size = sizeof(data)}, + slot.fragments) + ->tree.base; + // Initialization done, ensure the memory utilization is as we expect. + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(data) * 3, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); + // Now we reset the slot, causing all memory to be freed correctly. + rxSlotRestart(&slot, 0x1122334455667788ULL, makeRxMemory(&mem_fragment, &mem_payload)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x1122334455667788ULL, slot.transfer_id); + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_EQUAL(NULL, slot.fragments); + // Ensure all memory was freed. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); +} + +static void testSlotEjectValidLarge(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + //>>> from pycyphal.transport.commons.crc import CRC32C + //>>> CRC32C.new(data_bytes).value_as_bytes + static const size_t PayloadSize = 171; + // Build the fragment tree: + // 2 + // / ` + // 1 3 + // / + // 0 + RxFragment* const root = // + makeRxFragmentString(mem, 2, "Where does Man go? ", NULL); + root->tree.base.lr[0] = // + &makeRxFragmentString(mem, 1, "For example, where does Man come from? ", &root->tree)->tree.base; + root->tree.base.lr[1] = // + &makeRxFragmentString(mem, 3, "Where does the universe come from? xL\xAE\xCB", &root->tree)->tree.base; + root->tree.base.lr[0]->lr[0] = + &makeRxFragmentString(mem, // + 0, + "Da Shi, have you ever... considered certain ultimate philosophical questions? ", + ((RxFragmentTreeNode*) root->tree.base.lr[0])) + ->tree.base; + // Initialization done, ensure the memory utilization is as we expect. + TEST_ASSERT_EQUAL(4, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(PayloadSize + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(4, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 4, mem_fragment.allocated_bytes); + // Eject and verify the payload. + size_t payload_size = 0; + struct UdpardFragment payload = {0}; + TEST_ASSERT(rxSlotEject(&payload_size, + &payload, + &root->tree, + mem_payload.allocated_bytes, + 1024, + makeRxMemory(&mem_fragment, &mem_payload))); + TEST_ASSERT_EQUAL(PayloadSize, payload_size); // CRC removed! + TEST_ASSERT( // + compareStringWithPayload("Da Shi, have you ever... considered certain ultimate philosophical questions? ", + payload.view)); + TEST_ASSERT(compareStringWithPayload("For example, where does Man come from? ", payload.next->view)); + TEST_ASSERT(compareStringWithPayload("Where does Man go? ", payload.next->next->view)); + TEST_ASSERT(compareStringWithPayload("Where does the universe come from? ", payload.next->next->next->view)); + TEST_ASSERT_NULL(payload.next->next->next->next); + // Check the memory utilization. All payload fragments are still kept, but the first fragment is freed because of + // the Scott's short payload optimization. + TEST_ASSERT_EQUAL(4, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(PayloadSize + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); // One gone!!1 + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); // yes yes! + // Now, free the payload as the application would. + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + // All memory shall be free now. As in "free beer". + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); +} + +static void testSlotEjectValidSmall(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + //>>> from pycyphal.transport.commons.crc import CRC32C + //>>> CRC32C.new(data_bytes).value_as_bytes + static const size_t PayloadSize = 262; + // Build the fragment tree: + // 1 + // / ` + // 0 3 + // / ` + // 2 4 + RxFragment* const root = // + makeRxFragmentString(mem, 1, "You told me that you came from the sea. Did you build the sea?\n", NULL); + root->tree.base.lr[0] = // + &makeRxFragmentString(mem, 0, "Did you build this four-dimensional fragment?\n", &root->tree)->tree.base; + root->tree.base.lr[1] = // + &makeRxFragmentString(mem, 3, "this four-dimensional space is like the sea for us?\n", &root->tree)->tree.base; + root->tree.base.lr[1]->lr[0] = // + &makeRxFragmentString(mem, + 2, + "Are you saying that for you, or at least for your creators, ", + ((RxFragmentTreeNode*) root->tree.base.lr[1])) + ->tree.base; + root->tree.base.lr[1]->lr[1] = // + &makeRxFragmentString(mem, + 4, + "More like a puddle. The sea has gone dry.\xA2\x93-\xB2", + ((RxFragmentTreeNode*) root->tree.base.lr[1])) + ->tree.base; + // Initialization done, ensure the memory utilization is as we expect. + TEST_ASSERT_EQUAL(5, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(PayloadSize + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(5, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 5, mem_fragment.allocated_bytes); + // Eject and verify the payload. Use a small extent and ensure the excess is dropped. + size_t payload_size = 0; + struct UdpardFragment payload = {0}; + TEST_ASSERT(rxSlotEject(&payload_size, + &payload, + &root->tree, + mem_payload.allocated_bytes, + 136, // <-- small extent, rest truncated + makeRxMemory(&mem_fragment, &mem_payload))); + TEST_ASSERT_EQUAL(136, payload_size); // Equals the extent due to the truncation. + TEST_ASSERT(compareStringWithPayload("Did you build this four-dimensional fragment?\n", payload.view)); + TEST_ASSERT(compareStringWithPayload("You told me that you came from the sea. Did you build the sea?\n", + payload.next->view)); + TEST_ASSERT(compareStringWithPayload("Are you saying that for you", payload.next->next->view)); + TEST_ASSERT_NULL(payload.next->next->next); + // Check the memory utilization. + // The first fragment is freed because of the Scott's short payload optimization; + // the two last fragments are freed because of the truncation. + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(169, mem_payload.allocated_bytes); // The last block is rounded up. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); // One gone!!1 + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 2, mem_fragment.allocated_bytes); + // Now, free the payload as the application would. + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + // All memory shall be free now. As in "free beer". + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); +} + +static void testSlotEjectValidEmpty(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + // Build the fragment tree: + // 1 + // / ` + // 0 2 + RxFragment* const root = makeRxFragmentString(mem, 1, "BBB", NULL); + root->tree.base.lr[0] = &makeRxFragmentString(mem, 0, "AAA", &root->tree)->tree.base; + root->tree.base.lr[1] = &makeRxFragmentString(mem, 2, "P\xF5\xA5?", &root->tree)->tree.base; + // Initialization done, ensure the memory utilization is as we expect. + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(6 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); + // Eject and verify the payload. The extent is zero, so all payload is removed. + size_t payload_size = 0; + struct UdpardFragment payload = {0}; + TEST_ASSERT(rxSlotEject(&payload_size, + &payload, + &root->tree, + mem_payload.allocated_bytes, + 0, + makeRxMemory(&mem_fragment, &mem_payload))); + TEST_ASSERT_EQUAL(0, payload_size); // Equals the extent due to the truncation. + TEST_ASSERT_NULL(payload.next); + TEST_ASSERT_EQUAL(0, payload.view.size); + // Check the memory utilization. No memory should be in use by this point. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Now, free the payload as the application would. + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + // No memory is in use anyway, so no change here. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); +} + +static void testSlotEjectInvalid(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + // Build the fragment tree; no valid CRC here: + // 1 + // / ` + // 0 2 + RxFragment* const root = makeRxFragmentString(mem, 1, "BBB", NULL); + root->tree.base.lr[0] = &makeRxFragmentString(mem, 0, "AAA", &root->tree)->tree.base; + root->tree.base.lr[1] = &makeRxFragmentString(mem, 2, "CCC", &root->tree)->tree.base; + // Initialization done, ensure the memory utilization is as we expect. + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(9, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 3, mem_fragment.allocated_bytes); + // Eject and verify the payload. + size_t payload_size = 0; + struct UdpardFragment payload = {0}; + TEST_ASSERT_FALSE(rxSlotEject(&payload_size, + &payload, + &root->tree, + mem_payload.allocated_bytes, + 1000, + makeRxMemory(&mem_fragment, &mem_payload))); + // The call was unsuccessful, so the memory was freed instead of being handed over to the application. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); +} + +static void testSlotAcceptA(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + // Set up the RX slot instance we're going to be working with. + RxSlot slot = { + .ts_usec = 1234567890, + .transfer_id = 0x1122334455667788, + .max_index = 0, + .eot_index = FRAME_INDEX_UNSET, + .accepted_frames = 0, + .payload_size = 0, + .fragments = NULL, + }; + size_t payload_size = 0; + struct UdpardFragment payload = {0}; + + // === TRANSFER === + // Accept a single-frame transfer. Ownership transferred to the payload object. + //>>> from pycyphal.transport.commons.crc import CRC32C + //>>> CRC32C.new(data_bytes).value_as_bytes + TEST_ASSERT_EQUAL(1, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, + 0, + true, + "The fish responsible for drying the sea are not here." + "\x04\x1F\x8C\x1F"), + 1000, + mem)); + // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(53 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Verify the payload and free it. Note the CRC is not part of the payload, obviously. + TEST_ASSERT_EQUAL(53, payload_size); + TEST_ASSERT(compareStringWithPayload("The fish responsible for drying the sea are not here.", payload.view)); + TEST_ASSERT_NULL(payload.next); + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x1122334455667789, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); + + // === TRANSFER === + // Accept a multi-frame transfer. Here, frames arrive in order. + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, + 0, + false, + "We're sorry. What you said is really hard to understand.\n"), + 1000, + mem)); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, + 1, + false, + "The fish who dried the sea went onto land before they did " + "this. "), + 1000, + mem)); + TEST_ASSERT_EQUAL(1, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, + 2, + true, + "They moved from one dark forest to another dark forest." + "?\xAC(\xBE"), + 1000, + mem)); + // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(176 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); // One freed. + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 2, mem_fragment.allocated_bytes); + // Verify the payload and free it. Note the CRC is not part of the payload, obviously. + TEST_ASSERT_EQUAL(176, payload_size); + TEST_ASSERT(compareStringWithPayload("We're sorry. What you said is really hard to understand.\n", payload.view)); + TEST_ASSERT_NOT_NULL(payload.next); + TEST_ASSERT(compareStringWithPayload("The fish who dried the sea went onto land before they did this. ", + payload.next->view)); + TEST_ASSERT_NOT_NULL(payload.next->next); + TEST_ASSERT(compareStringWithPayload("They moved from one dark forest to another dark forest.", // + payload.next->next->view)); + TEST_ASSERT_NULL(payload.next->next->next); + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x112233445566778A, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); + + // === TRANSFER === + // Accept an out-of-order transfer with extent truncation. Frames arrive out-of-order with duplicates. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 2, + true, + "Toss it over." + "K(\xBB\xEE"), + 45, + mem)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 1, + false, + "How do we give it to you?\n"), + 45, + mem)); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 1, + false, + "DUPLICATE #1"), + 45, + mem)); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // NO CHANGE, duplicate discarded. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 2, + true, + "DUPLICATE #2"), + 45, + mem)); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // NO CHANGE, duplicate discarded. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(1, // transfer completed + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 0, + false, + "I like fish. Can I have it?\n"), + 45, + mem)); + // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. + // Due to the implicit truncation (the extent is small), the last fragment is already freed. + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // One freed because of truncation. + TEST_ASSERT_EQUAL(28 + 26, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); // One freed because truncation, one optimized away. + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 1, mem_fragment.allocated_bytes); + // Verify the payload and free it. Note the CRC is not part of the payload, obviously. + TEST_ASSERT_EQUAL(45, payload_size); // Equals the extent. + TEST_ASSERT(compareStringWithPayload("I like fish. Can I have it?\n", payload.view)); + TEST_ASSERT_NOT_NULL(payload.next); + TEST_ASSERT(compareStringWithPayload("How do we give it", payload.next->view)); // TRUNCATED + TEST_ASSERT_NULL(payload.next->next); + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x112233445566778B, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); + + // === TRANSFER === + // Shorter than TRANSFER_CRC_SIZE_BYTES, discarded early. + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, 0, true, ":D"), + 1000, + mem)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x112233445566778C, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); + + // === TRANSFER === + // OOM on reception. Note that the payload allocator does not require restrictions as the library does not + // allocate memory for the payload, only for the fragments. + mem_fragment.limit_fragments = 1; // Can only store one fragment, but the transfer requires more. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 2, + true, + "Toss it over." + "K(\xBB\xEE"), + 1000, + mem)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); // Limit reached here. Cannot accept next fragment. + TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 1, + false, + "How do we give it to you?\n"), + 1000, + mem)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Payload not accepted, cannot alloc fragment. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + mem_fragment.limit_fragments = 2; // Lift the limit and repeat the same frame, this time it is accepted. + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 0, + false, + "I like fish. Can I have it?\n"), + 1000, + mem)); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Accepted! + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, // Cannot alloc third fragment. + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 1, + false, + "How do we give it to you?\n"), + 1000, + mem)); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Payload not accepted, cannot alloc fragment. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + mem_fragment.limit_fragments = 3; // Lift the limit and repeat the same frame, this time it is accepted. + TEST_ASSERT_EQUAL(1, // transfer completed + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 1, + false, + "How do we give it to you?\n"), + 1000, + mem)); + // Verify the memory utilization. Note that the small transfer optimization is in effect: head fragment moved. + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(67 + TRANSFER_CRC_SIZE_BYTES, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(sizeof(RxFragment) * 2, mem_fragment.allocated_bytes); + // Verify the payload and free it. Note the CRC is not part of the payload, obviously. + TEST_ASSERT_EQUAL(67, payload_size); // Equals the extent. + TEST_ASSERT(compareStringWithPayload("I like fish. Can I have it?\n", payload.view)); + TEST_ASSERT_NOT_NULL(payload.next); + TEST_ASSERT(compareStringWithPayload("How do we give it to you?\n", payload.next->view)); + TEST_ASSERT_NOT_NULL(payload.next->next); + TEST_ASSERT(compareStringWithPayload("Toss it over.", payload.next->next->view)); + TEST_ASSERT_NULL(payload.next->next->next); + udpardRxFragmentFree(payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_bytes); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_bytes); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x112233445566778D, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); + + // === TRANSFER === + // Inconsistent EOT flag. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, // Just an ordinary transfer passing by, what could go wrong? + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 2, + true, + "Toss it over." + "K(\xBB\xEE"), + 45, + mem)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Okay, accepted, some data stored... + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 1, // + true, // SURPRISE! EOT is set in distinct frames! + "How do we give it to you?\n"), + 45, + mem)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // This is outrageous. Of course we have to drop everything. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x112233445566778E, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); + + // === TRANSFER === + // More frames past the EOT; or, in other words, the frame index where EOT is set is not the maximum index. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 2, + true, + "Toss it over." + "K(\xBB\xEE"), + 45, + mem)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Okay, accepted, some data stored... + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxSlotAccept(&slot, + &payload_size, + &payload, + makeRxFrameBaseString(&mem_payload, // + 3, // SURPRISE! Frame #3 while #2 was EOT! + false, + "How do we give it to you?\n"), + 45, + mem)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // This is outrageous. Of course we have to drop everything. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Ensure the slot has been restarted correctly. + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, slot.ts_usec); + TEST_ASSERT_EQUAL(0x112233445566778F, slot.transfer_id); // INCREMENTED + TEST_ASSERT_EQUAL(0, slot.max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, slot.eot_index); + TEST_ASSERT_EQUAL(0, slot.accepted_frames); + TEST_ASSERT_EQUAL(0, slot.payload_size); + TEST_ASSERT_NULL(slot.fragments); +} + +// -------------------------------------------------- IFACE -------------------------------------------------- + +static void testIfaceIsFutureTransferID(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + RxIface iface; + rxIfaceInit(&iface, makeRxMemory(&mem_fragment, &mem_payload)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) + { + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); + TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); + TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); + TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); + TEST_ASSERT_NULL(iface.slots[i].fragments); + } + TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 0)); + TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 0xFFFFFFFFFFFFFFFFULL)); + iface.slots[0].transfer_id = 100; + TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 99)); + TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 100)); + TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 101)); + iface.slots[0].transfer_id = TRANSFER_ID_UNSET; + iface.slots[1].transfer_id = 100; + TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 99)); + TEST_ASSERT_FALSE(rxIfaceIsFutureTransferID(&iface, 100)); + TEST_ASSERT_TRUE(rxIfaceIsFutureTransferID(&iface, 101)); +} + +static void testIfaceCheckTransferIDTimeout(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + RxIface iface; + rxIfaceInit(&iface, makeRxMemory(&mem_fragment, &mem_payload)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) + { + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); + TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); + TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); + TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); + TEST_ASSERT_NULL(iface.slots[i].fragments); + } + // No successful transfers so far, and no slots in progress at the moment. + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 0, 100)); + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); + // Suppose we have on successful transfer now. + iface.ts_usec = 1000; + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // Yup, this is a timeout. + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // Yup, this is a timeout. + // Suppose there are some slots in progress. + iface.ts_usec = 1000; + iface.slots[0].ts_usec = 2000; + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // No timeout because of the slot in progress. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2050, 100)); // Nope. + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // Yeah. + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 3050, 100)); // Ooh. + // More slots in progress. + iface.ts_usec = 1000; + iface.slots[0].ts_usec = 2000; + iface.slots[1].ts_usec = 3000; + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // No timeout because of the slot in progress. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2050, 100)); // Nope. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // The other slot is newer. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 3050, 100)); // Yes, but not yet. + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 3150, 100)); // Yes. + // Now suppose there is no successful transfer, but there are some slots in progress. It's all the same. + iface.ts_usec = TIMESTAMP_UNSET; + iface.slots[0].ts_usec = 2000; + iface.slots[1].ts_usec = 3000; + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 500, 100)); // TS is in the past! Check overflows. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1000, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1050, 100)); + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 1150, 100)); // No timeout because of the slot in progress. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2050, 100)); // Nope. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 2150, 100)); // The other slot is newer. + TEST_ASSERT_FALSE(rxIfaceCheckTransferIDTimeout(&iface, 3050, 100)); // Yes, but not yet. + TEST_ASSERT_TRUE(rxIfaceCheckTransferIDTimeout(&iface, 3150, 100)); // Ooh yes. +} + +static void testIfaceFindMatchingSlot(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + RxSlot slots[RX_SLOT_COUNT] = {0}; + rxSlotRestart(&slots[0], 1000, makeRxMemory(&mem_fragment, &mem_payload)); + rxSlotRestart(&slots[1], 1001, makeRxMemory(&mem_fragment, &mem_payload)); + // No matching slot. + TEST_ASSERT_NULL(rxIfaceFindMatchingSlot(slots, 123)); + // Matching slots. + TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); + TEST_ASSERT_EQUAL_PTR(&slots[1], rxIfaceFindMatchingSlot(slots, 1001)); + // Identical slots, neither in progress. + slots[0].ts_usec = TIMESTAMP_UNSET; + slots[1].ts_usec = TIMESTAMP_UNSET; + slots[0].transfer_id = 1000; + slots[1].transfer_id = 1000; + TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); // First match. + TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); + // Identical slots, one of them in progress. + slots[0].ts_usec = TIMESTAMP_UNSET; + slots[1].ts_usec = 1234567890; + TEST_ASSERT_EQUAL_PTR(&slots[1], rxIfaceFindMatchingSlot(slots, 1000)); + TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); + // The other is in progress now. + slots[0].ts_usec = 1234567890; + slots[1].ts_usec = TIMESTAMP_UNSET; + TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); + TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); + // Both in progress, pick first. + slots[0].ts_usec = 1234567890; + slots[1].ts_usec = 2345678901; + TEST_ASSERT_EQUAL_PTR(&slots[0], rxIfaceFindMatchingSlot(slots, 1000)); + TEST_ASSERT_EQUAL_PTR(NULL, rxIfaceFindMatchingSlot(slots, 1001)); +} + +static void testIfaceAcceptA(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + RxIface iface; + rxIfaceInit(&iface, mem); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) + { + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); + TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); + TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); + TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); + TEST_ASSERT_NULL(iface.slots[i].fragments); + } + struct UdpardRxTransfer transfer = {0}; + + // === TRANSFER === + // A simple single-frame transfer successfully accepted. + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 1234567890, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1234, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1234, + .transfer_id = 0x1122334455667788U}, + 0, + true, + "I am a tomb." + "\x1F\\\xCDs"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head fragment is not heap-allocated. + // Check the transfer we just accepted. + TEST_ASSERT_EQUAL(1234567890, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); + TEST_ASSERT_EQUAL(1234, transfer.source_node_id); + TEST_ASSERT_EQUAL(0x1122334455667788U, transfer.transfer_id); + TEST_ASSERT_EQUAL(12, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("I am a tomb.", transfer.payload.view)); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Check the internal states of the iface. + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. + TEST_ASSERT_EQUAL(0x1122334455667789U, iface.slots[1].transfer_id); // Incremented. + + // === TRANSFER === + // Send a duplicate and ensure it is rejected. + TEST_ASSERT_EQUAL(0, // No transfer accepted. + rxIfaceAccept(&iface, + 1234567891, // different timestamp but ignored anyway + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1234, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1234, + .transfer_id = 0x1122334455667788U}, + 0, + true, + "I am a tomb." + "\x1F\\\xCDs"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Check the internal states of the iface. + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. + TEST_ASSERT_EQUAL(0x1122334455667789U, iface.slots[1].transfer_id); // good ol' transfer id + + // === TRANSFER === + // Send a non-duplicate transfer with an invalid CRC using an in-sequence (matching) transfer-ID. + TEST_ASSERT_EQUAL(0, // No transfer accepted. + rxIfaceAccept(&iface, + 1234567892, // different timestamp but ignored anyway + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1234, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1234, + .transfer_id = 0x1122334455667789U}, + 0, + true, + "I am a tomb." + "No CRC here."), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Check the internal states of the iface. + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. + TEST_ASSERT_EQUAL(0x112233445566778AU, iface.slots[1].transfer_id); // Incremented. + + // === TRANSFER === + // Send a non-duplicate transfer with an invalid CRC using an out-of-sequence (non-matching) transfer-ID. + // Transfer-ID jumps forward, no existing slot; will use the second one. + TEST_ASSERT_EQUAL(0, // No transfer accepted. + rxIfaceAccept(&iface, + 1234567893, // different timestamp but ignored anyway + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1234, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1234, + .transfer_id = 0x1122334455667790U}, + 0, + true, + "I am a tomb." + "No CRC here, #2."), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Check the internal states of the iface. + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. + TEST_ASSERT_EQUAL(0x1122334455667791U, iface.slots[1].transfer_id); // Replaced the old one, it was unneeded. + + // === TRANSFER === (x2) + // Send two interleaving multi-frame out-of-order transfers: + // A2 B1 A0 B0 A1 + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // A2 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000020, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1111, + .transfer_id = 1000U}, + 2, + true, + "A2" + "v\x1E\xBD]"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); // Still unused. + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); // Replaced the old one, it was unneeded. + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // B1 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000010, // Transfer-ID timeout. + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPrioritySlow, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x2222, + .transfer_id = 1001U}, + 1, + true, + "B1" + "g\x8D\x9A\xD7"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); // Used for B because the other one is taken. + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); // Keeps A because it is in-progress, can't discard. + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + // A0 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000030, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1111, + .transfer_id = 1000U}, + 0, + false, + "A0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(1234567890, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + // B0 + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000040, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPrioritySlow, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x2222, + .transfer_id = 1001U}, + 0, + false, + "B0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // TRANSFER B RECEIVED, check it. + TEST_ASSERT_EQUAL(2000000010, iface.ts_usec); + TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); // Incremented to meet the next transfer. + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(4, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); // One fragment freed because of the head optimization. + // Check the payload. + TEST_ASSERT_EQUAL(2000000010, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPrioritySlow, transfer.priority); + TEST_ASSERT_EQUAL(2222, transfer.source_node_id); + TEST_ASSERT_EQUAL(1001, transfer.transfer_id); + TEST_ASSERT_EQUAL(4, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("B0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("B1", transfer.payload.next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Only the remaining A0 A2 are left. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + // A1 + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000050, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1111, + .transfer_id = 1000U}, + 1, + false, + "A1"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // TRANSFER A RECEIVED, check it. + TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); // Incremented to meet the next transfer. + // Check the payload. + TEST_ASSERT_EQUAL(2000000020, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); + TEST_ASSERT_EQUAL(1111, transfer.source_node_id); + TEST_ASSERT_EQUAL(1000, transfer.transfer_id); + TEST_ASSERT_EQUAL(6, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("A0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("A1", transfer.payload.next->view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next->next); + TEST_ASSERT(compareStringWithPayload("A2", transfer.payload.next->next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); +} + +static void testIfaceAcceptB(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + RxIface iface; + rxIfaceInit(&iface, mem); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) + { + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); + TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); + TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); + TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); + TEST_ASSERT_NULL(iface.slots[i].fragments); + } + struct UdpardRxTransfer transfer = {0}; + // === TRANSFER === (x3) + // Send three interleaving multi-frame out-of-order transfers (primes for duplicates): + // A2 B1 A0 C0 B0 A1 C0' C1 + // A2 arrives before B1 but its timestamp is higher. + // Transfer B will be evicted by C because by the time C0 arrives, transfer B is the oldest one, + // since its timestamp is inherited from B0. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // A2 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000020, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1111, + .transfer_id = 1000U}, + 2, + true, + "A2" + "v\x1E\xBD]"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // B1 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000010, // TIME REORDERING -- lower than previous. + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPrioritySlow, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x2222, + .transfer_id = 1001U}, + 1, + true, + "B1" + "g\x8D\x9A\xD7"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + // A0 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000030, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1111, + .transfer_id = 1000U}, + 0, + false, + "A0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(1001, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + // C0 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000040, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 3333, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x3333, + .transfer_id = 1002U}, + 0, + false, + "C0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); // B evicted by C. + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); // Payload of B is freed, so the usage is unchanged. + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + // B0 + TEST_ASSERT_EQUAL(0, // Cannot be accepted because its slot is taken over by C. + rxIfaceAccept(&iface, + 2000000050, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPrioritySlow, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x2222, + .transfer_id = 1001U}, + 0, + false, + "B0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1000, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); // No increase, frame not accepted. + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + // A1 + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000050, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x1111, + .transfer_id = 1000U}, + 1, + false, + "A1"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // TRANSFER A RECEIVED, check it. + TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); // same old timestamp + TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); // Incremented to meet the next transfer. + // Check the payload. + TEST_ASSERT_EQUAL(2000000020, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); + TEST_ASSERT_EQUAL(1111, transfer.source_node_id); + TEST_ASSERT_EQUAL(1000, transfer.transfer_id); + TEST_ASSERT_EQUAL(6, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("A0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("A1", transfer.payload.next->view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next->next); + TEST_ASSERT(compareStringWithPayload("A2", transfer.payload.next->next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Some memory is retained for the C0 payload. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // C0 DUPLICATE + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000060, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 3333, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x3333, + .transfer_id = 1002U}, + 0, + false, + "C0 DUPLICATE"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); // Last transfer timestamp. + TEST_ASSERT_EQUAL(1002, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Not accepted, so no change. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // C1 + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000070, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityHigh, + .src_node_id = 3333, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0x3333, + .transfer_id = 1002U}, + 1, + true, + "C1" + "\xA8\xBF}\x19"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // TRANSFER C RECEIVED, check it. + TEST_ASSERT_EQUAL(2000000040, iface.ts_usec); + TEST_ASSERT_EQUAL(1003, iface.slots[0].transfer_id); // Incremented to meet the next transfer. + TEST_ASSERT_EQUAL(1001, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Keeping two fragments of C. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); // Head optimization in effect. + // Check the payload. + TEST_ASSERT_EQUAL(2000000040, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityHigh, transfer.priority); + TEST_ASSERT_EQUAL(3333, transfer.source_node_id); + TEST_ASSERT_EQUAL(1002, transfer.transfer_id); + TEST_ASSERT_EQUAL(4, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("C0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("C1", transfer.payload.next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Some memory is retained for the C0 payload. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); +} + +static void testIfaceAcceptC(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + RxIface iface; + rxIfaceInit(&iface, mem); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + for (size_t i = 0; i < RX_SLOT_COUNT; i++) + { + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.slots[i].ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[i].transfer_id); + TEST_ASSERT_EQUAL(0, iface.slots[i].max_index); + TEST_ASSERT_EQUAL(FRAME_INDEX_UNSET, iface.slots[i].eot_index); + TEST_ASSERT_EQUAL(0, iface.slots[i].accepted_frames); + TEST_ASSERT_EQUAL(0, iface.slots[i].payload_size); + TEST_ASSERT_NULL(iface.slots[i].fragments); + } + struct UdpardRxTransfer transfer = {0}; + // === TRANSFER === + // Send interleaving multi-frame transfers such that in the end slots have the same transfer-ID value + // (primes for duplicates): + // A0 B0 A1 C0 B1 C1 B1' + // The purpose of this test is to ensure that the case of multiple RX slots having the same transfer-ID is + // handled correctly (including correct duplicate detection). + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // A0 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000010, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityOptional, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xA}, + 0, + false, + "A0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(0xA, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // B0 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000020, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + 0, + false, + "B0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, iface.ts_usec); + TEST_ASSERT_EQUAL(0xB, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(0xA, iface.slots[1].transfer_id); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + // A1 + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000030, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityOptional, + .src_node_id = 1111, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xA}, + 1, + true, + "A1" + "\xc7\xac_\x81"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // Check the received transfer. + TEST_ASSERT_EQUAL(2000000010, iface.ts_usec); + TEST_ASSERT_EQUAL(0xB, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(0xB, iface.slots[1].transfer_id); // SAME VALUE!!1 + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); // Head optimization in effect. + TEST_ASSERT_EQUAL(UdpardPriorityOptional, transfer.priority); + TEST_ASSERT_EQUAL(4, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("A0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("A1", transfer.payload.next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // B0 still allocated. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // C0 + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000040, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 3333, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xC}, + 0, + false, + "C0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(2000000010, iface.ts_usec); // <- unchanged. + TEST_ASSERT_EQUAL(0xB, iface.slots[0].transfer_id); + TEST_ASSERT_EQUAL(0xC, iface.slots[1].transfer_id); // <- reused for C. + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // Two transfers in transit again: B and C. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + // B1 + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000050, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + 1, + true, + "B1" + "g\x8D\x9A\xD7"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // Check the received transfer. + TEST_ASSERT_EQUAL(2000000020, iface.ts_usec); + TEST_ASSERT_EQUAL(0xC, iface.slots[0].transfer_id); // <-- INCREMENTED, SO + TEST_ASSERT_EQUAL(0xC, iface.slots[1].transfer_id); // WE HAVE TWO IDENTICAL VALUES AGAIN! + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(UdpardPriorityExceptional, transfer.priority); + TEST_ASSERT_EQUAL(4, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("B0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("B1", transfer.payload.next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // C0 is still allocated. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // C1 + // This is the DIFFICULT CASE because we have two RX slots with the same transfer-ID, but THE FIRST ONE IS NOT + // THE ONE THAT WE NEED! Watch what happens next. + TEST_ASSERT_EQUAL(1, + rxIfaceAccept(&iface, + 2000000060, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 3333, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xC}, + 1, + true, + "C1" + "\xA8\xBF}\x19"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + // Check the received transfer. + TEST_ASSERT_EQUAL(2000000040, iface.ts_usec); + TEST_ASSERT_EQUAL(0xC, iface.slots[0].transfer_id); // Old, unused. + TEST_ASSERT_EQUAL(0xD, iface.slots[1].transfer_id); // INCREMENTED! + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(UdpardPriorityExceptional, transfer.priority); + TEST_ASSERT_EQUAL(3333, transfer.source_node_id); + TEST_ASSERT_EQUAL(4, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("C0", transfer.payload.view)); + TEST_ASSERT_NOT_NULL(transfer.payload.next); + TEST_ASSERT(compareStringWithPayload("C1", transfer.payload.next->view)); + TEST_ASSERT_NULL(transfer.payload.next->next); + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // B0 duplicate multi-frame; shall be rejected. + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000070, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + 0, + false, + "B0"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // B0 duplicate single-frame; shall be rejected. + TEST_ASSERT_EQUAL(0, + rxIfaceAccept(&iface, + 2000000080, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + 0, + true, + "B0" + "g\x8D\x9A\xD7"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); +} + +// -------------------------------------------------- SESSION -------------------------------------------------- + +static void testSessionDeduplicate(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + struct UdpardInternalRxSession session = {0}; + rxSessionInit(&session, mem); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, session.last_ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, session.last_transfer_id); + { + struct UdpardFragment* const head = &makeRxFragmentString(mem, 0, "ABC", NULL)->base; + head->next = &makeRxFragmentString(mem, 1, "DEF", NULL)->base; + struct UdpardRxTransfer transfer = {.timestamp_usec = 10000000, + .transfer_id = 0x0DDC0FFEEBADF00D, + .payload_size = 6, + .payload = *head}; + memFree(mem.fragment, sizeof(RxFragment), head); // Cloned, no longer needed. + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // The first transfer after initialization is always accepted. + TEST_ASSERT(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); + // Check the final states. + TEST_ASSERT_EQUAL(6, transfer.payload_size); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // The application shall free the payload. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(10000000, session.last_ts_usec); + TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF00D, session.last_transfer_id); + // Feed the same transfer again; now it is a duplicate and so it is rejected and freed. + transfer.timestamp_usec = 10000001; + TEST_ASSERT_FALSE(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(10000000, session.last_ts_usec); // Timestamp is not updated. + TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF00D, session.last_transfer_id); + } + { + // Emit a duplicate but after the transfer-ID timeout has occurred. Ensure it is accepted. + struct UdpardFragment* const head = &makeRxFragmentString(mem, 0, "ABC", NULL)->base; + head->next = &makeRxFragmentString(mem, 1, "DEF", NULL)->base; + struct UdpardRxTransfer transfer = {.timestamp_usec = 12000000, // TID timeout. + .transfer_id = 0x0DDC0FFEEBADF000, // transfer-ID reduced. + .payload_size = 6, + .payload = *head}; + memFree(mem.fragment, sizeof(RxFragment), head); // Cloned, no longer needed. + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // Accepted due to the TID timeout. + TEST_ASSERT(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); + // Check the final states. + TEST_ASSERT_EQUAL(6, transfer.payload_size); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // The application shall free the payload. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(12000000, session.last_ts_usec); + TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF000, session.last_transfer_id); + // Feed the same transfer again; now it is a duplicate and so it is rejected and freed. + transfer.timestamp_usec = 12000001; + TEST_ASSERT_FALSE(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(12000000, session.last_ts_usec); // Timestamp is not updated. + TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF000, session.last_transfer_id); + } + { + // Ensure another transfer with a greater transfer-ID is accepted immediately. + struct UdpardFragment* const head = &makeRxFragmentString(mem, 0, "ABC", NULL)->base; + head->next = &makeRxFragmentString(mem, 1, "DEF", NULL)->base; + struct UdpardRxTransfer transfer = {.timestamp_usec = 11000000, // Simulate clock jitter. + .transfer_id = 0x0DDC0FFEEBADF001, // Incremented. + .payload_size = 6, + .payload = *head}; + memFree(mem.fragment, sizeof(RxFragment), head); // Cloned, no longer needed. + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + // Accepted because TID greater. + TEST_ASSERT(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); + // Check the final states. + TEST_ASSERT_EQUAL(6, transfer.payload_size); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); // The application shall free the payload. + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(11000000, session.last_ts_usec); // Updated. + TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF001, session.last_transfer_id); // Updated. + // Feed the same transfer again; now it is a duplicate and so it is rejected and freed. + transfer.timestamp_usec = 11000000; + TEST_ASSERT_FALSE(rxSessionDeduplicate(&session, UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, &transfer, mem)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(11000000, session.last_ts_usec); // Timestamp is not updated. + TEST_ASSERT_EQUAL(0x0DDC0FFEEBADF001, session.last_transfer_id); + } +} + +static void testSessionAcceptA(void) +{ + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const RxMemory mem = makeRxMemory(&mem_fragment, &mem_payload); + struct UdpardInternalRxSession session = {0}; + rxSessionInit(&session, mem); + TEST_ASSERT_EQUAL(TIMESTAMP_UNSET, session.last_ts_usec); + TEST_ASSERT_EQUAL(TRANSFER_ID_UNSET, session.last_transfer_id); + struct UdpardRxTransfer transfer = {0}; + // Accept a simple transfer through iface #1. + TEST_ASSERT_EQUAL(1, + rxSessionAccept(&session, + 1, + 10000000, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + 0, + true, + "Z\xBA\xA1\xBAh"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Free the payload. + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Send the same transfer again through a different iface; it is a duplicate and so it is rejected and freed. + TEST_ASSERT_EQUAL(0, + rxSessionAccept(&session, + 0, + 10000010, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + 0, + true, + "Z\xBA\xA1\xBAh"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + // Send a valid transfer that should be accepted but we inject an OOM error. + mem_fragment.limit_fragments = 0; + TEST_ASSERT_EQUAL(-UDPARD_ERROR_MEMORY, + rxSessionAccept(&session, + 2, + 12000020, + makeRxFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityExceptional, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xC}, + 0, + true, + "Z\xBA\xA1\xBAh"), + 1000, + UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, + mem, + &transfer)); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); +} + +// -------------------------------------------------- PORT -------------------------------------------------- + +static inline void testPortAcceptFrameA(void) +{ + InstrumentedAllocator mem_session = {0}; + InstrumentedAllocator mem_fragment = {0}; + InstrumentedAllocator mem_payload = {0}; + instrumentedAllocatorNew(&mem_session); + instrumentedAllocatorNew(&mem_fragment); + instrumentedAllocatorNew(&mem_payload); + const struct UdpardRxMemoryResources mem = {.session = instrumentedAllocatorMakeMemoryResource(&mem_session), // + .fragment = instrumentedAllocatorMakeMemoryResource(&mem_fragment), + .payload = instrumentedAllocatorMakeMemoryDeleter(&mem_payload)}; + struct UdpardRxTransfer transfer = {0}; + // Initialize the port. + struct UdpardRxPort port; + rxPortInit(&port); + TEST_ASSERT_EQUAL(SIZE_MAX, port.extent); + TEST_ASSERT_EQUAL(UDPARD_DEFAULT_TRANSFER_ID_TIMEOUT_USEC, port.transfer_id_timeout_usec); + TEST_ASSERT_NULL(port.sessions); + + // Accept valid non-anonymous transfer. + TEST_ASSERT_EQUAL( + 1, + rxPortAcceptFrame(&port, + 1, + 10000000, + makeDatagramPayloadSingleFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityImmediate, + .src_node_id = 2222, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xB}, + "When will the collapse of space in the vicinity of the " + "Solar System into two dimensions cease?"), + mem, + &transfer)); + TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + // Check the received transfer. + TEST_ASSERT_EQUAL(10000000, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.priority); + TEST_ASSERT_EQUAL(2222, transfer.source_node_id); + TEST_ASSERT_EQUAL(0xB, transfer.transfer_id); + TEST_ASSERT_EQUAL(94, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("When will the collapse of space in the vicinity of the " + "Solar System into two dimensions cease?", + transfer.payload.view)); + // Free the memory. + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(1, mem_session.allocated_fragments); // The session remains. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + + // Send another transfer from another node and see the session count increase. + TEST_ASSERT_EQUAL( + 1, + rxPortAcceptFrame(&port, + 0, + 10000010, + makeDatagramPayloadSingleFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityImmediate, + .src_node_id = 3333, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xC}, + "It will never cease."), + mem, + &transfer)); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // New session created. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + // Check the received transfer. + TEST_ASSERT_EQUAL(10000010, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.priority); + TEST_ASSERT_EQUAL(3333, transfer.source_node_id); + TEST_ASSERT_EQUAL(0xC, transfer.transfer_id); + TEST_ASSERT_EQUAL(20, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("It will never cease.", transfer.payload.view)); + // Free the memory. + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // The sessions remain. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + + // Try sending another frame with no memory left and see it fail during session allocation. + mem_session.limit_fragments = 0; + TEST_ASSERT_EQUAL( + -UDPARD_ERROR_MEMORY, + rxPortAcceptFrame(&port, + 2, + 10000020, + makeDatagramPayloadSingleFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityImmediate, + .src_node_id = 4444, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xD}, + "Cheng Xin shuddered."), + mem, + &transfer)); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // Not increased. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Not accepted. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Buffer freed. + + // Anonymous transfers are stateless and do not require session allocation. + mem_session.limit_fragments = 0; + TEST_ASSERT_EQUAL( + 1, + rxPortAcceptFrame(&port, + 2, + 10000030, + makeDatagramPayloadSingleFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityImmediate, + .src_node_id = UDPARD_NODE_ID_UNSET, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xD}, + "Cheng Xin shuddered."), + mem, + &transfer)); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // Not increased. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Head optimization in effect. + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); // Frame passed to the application. + // Check the received transfer. + TEST_ASSERT_EQUAL(10000030, transfer.timestamp_usec); + TEST_ASSERT_EQUAL(UdpardPriorityImmediate, transfer.priority); + TEST_ASSERT_EQUAL(UDPARD_NODE_ID_UNSET, transfer.source_node_id); + TEST_ASSERT_EQUAL(0xD, transfer.transfer_id); + TEST_ASSERT_EQUAL(20, transfer.payload_size); + TEST_ASSERT(compareStringWithPayload("Cheng Xin shuddered.", transfer.payload.view)); + // Free the memory. + udpardRxFragmentFree(transfer.payload, mem.fragment, mem.payload); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // The sessions remain. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + + // Send invalid anonymous transfers and see them fail. + { // Bad CRC. + struct UdpardMutablePayload datagram = + makeDatagramPayloadSingleFrameString(&mem_payload, // + (TransferMetadata){.priority = UdpardPriorityImmediate, + .src_node_id = UDPARD_NODE_ID_UNSET, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xE}, + "You are scared? Do you think that in this galaxy, in this universe, " + "only the Solar System is collapsing into two dimensions? Haha..."); + *(((byte_t*) datagram.data) + HEADER_SIZE_BYTES) = 0x00; // Corrupt the payload, CRC invalid. + TEST_ASSERT_EQUAL(0, rxPortAcceptFrame(&port, 0, 10000040, datagram, mem, &transfer)); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + } + { // No payload (transfer CRC is always required). + byte_t* const payload = instrumentedAllocatorAllocate(&mem_payload, HEADER_SIZE_BYTES); + (void) txSerializeHeader(payload, + (TransferMetadata){.priority = UdpardPriorityImmediate, + .src_node_id = UDPARD_NODE_ID_UNSET, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xE}, + 0, + true); + TEST_ASSERT_EQUAL(0, + rxPortAcceptFrame(&port, + 0, + 10000050, + (struct UdpardMutablePayload){.size = HEADER_SIZE_BYTES, .data = payload}, + mem, + &transfer)); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); + } + + // Send an invalid frame and make sure the memory is freed. + TEST_ASSERT_EQUAL(0, + rxPortAcceptFrame(&port, + 0, + 10000060, + (struct + UdpardMutablePayload){.size = HEADER_SIZE_BYTES, + .data = + instrumentedAllocatorAllocate(&mem_payload, + HEADER_SIZE_BYTES)}, + mem, + &transfer)); + TEST_ASSERT_EQUAL(2, mem_session.allocated_fragments); // Not increased. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); // Not accepted. + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); // Buffer freed. + + // Send incomplete transfers to see them cleaned up upon destruction. + mem_session.limit_fragments = SIZE_MAX; + TEST_ASSERT_EQUAL(0, + rxPortAcceptFrame(&port, + 0, + 10000070, + makeDatagramPayloadString(&mem_payload, // + (TransferMetadata){ + .priority = UdpardPriorityImmediate, + .src_node_id = 10000, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xD, + }, + 100, + false, + "What you're saying makes no sense. " + "At least, it doesn't make sense to lower spatial " + "dimensions as a weapon. "), + mem, + &transfer)); + TEST_ASSERT_EQUAL(3, mem_session.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(1, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxPortAcceptFrame(&port, + 0, + 10000080, + makeDatagramPayloadString(&mem_payload, // + (TransferMetadata){ + .priority = UdpardPriorityImmediate, + .src_node_id = 10000, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xD, + }, + 101, + false, + "In the long run, that's the sort of attack that " + "would kill the attacker as well as the target. " + "Eventually, the side that initiated attack would " + "also see their own space fall into the " + "two-dimensional abyss they created."), + mem, + &transfer)); + TEST_ASSERT_EQUAL(3, mem_session.allocated_fragments); // Same session because it comes from the same source. + TEST_ASSERT_EQUAL(2, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(2, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(0, + rxPortAcceptFrame(&port, + 2, + 10000090, + makeDatagramPayloadString(&mem_payload, // + (TransferMetadata){ + .priority = UdpardPriorityImmediate, + .src_node_id = 10001, + .dst_node_id = UDPARD_NODE_ID_UNSET, + .data_specifier = 0, + .transfer_id = 0xD, + }, + 10, + true, + "You're too... kind-hearted."), + mem, + &transfer)); + TEST_ASSERT_EQUAL(4, mem_session.allocated_fragments); // New source. + TEST_ASSERT_EQUAL(3, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(3, mem_payload.allocated_fragments); + TEST_ASSERT_EQUAL(4 * sizeof(struct UdpardInternalRxSession), mem_session.allocated_bytes); + TEST_ASSERT_EQUAL(3 * sizeof(RxFragment), mem_fragment.allocated_bytes); + + // Free the port instance and ensure all ifaces and sessions are cleaned up. + rxPortFree(&port, mem); + TEST_ASSERT_EQUAL(0, mem_session.allocated_fragments); // All gone. + TEST_ASSERT_EQUAL(0, mem_fragment.allocated_fragments); + TEST_ASSERT_EQUAL(0, mem_payload.allocated_fragments); +} + +// --------------------------------------------------------------------------------------------------------------------- + void setUp(void) {} void tearDown(void) {} @@ -144,15 +2382,40 @@ void tearDown(void) {} int main(void) { UNITY_BEGIN(); - RUN_TEST(testRxParseFrameValidMessage); - RUN_TEST(testRxParseFrameValidRPCService); - RUN_TEST(testRxParseFrameValidMessageAnonymous); - RUN_TEST(testRxParseFrameRPCServiceAnonymous); - RUN_TEST(testRxParseFrameRPCServiceBroadcast); - RUN_TEST(testRxParseFrameAnonymousNonSingleFrame); - RUN_TEST(testRxParseFrameBadHeaderCRC); - RUN_TEST(testRxParseFrameUnknownHeaderVersion); - RUN_TEST(testRxParseFrameHeaderWithoutPayload); - RUN_TEST(testRxParseFrameEmpty); + // misc + RUN_TEST(testCompare32); + // frame parser + RUN_TEST(testParseFrameValidMessage); + RUN_TEST(testParseFrameValidRPCService); + RUN_TEST(testParseFrameValidMessageAnonymous); + RUN_TEST(testParseFrameRPCServiceAnonymous); + RUN_TEST(testParseFrameRPCServiceBroadcast); + RUN_TEST(testParseFrameAnonymousNonSingleFrame); + RUN_TEST(testParseFrameBadHeaderCRC); + RUN_TEST(testParseFrameUnknownHeaderVersion); + RUN_TEST(testParseFrameHeaderWithoutPayload); + RUN_TEST(testParseFrameEmpty); + // slot + RUN_TEST(testSlotRestartEmpty); + RUN_TEST(testSlotRestartNonEmpty); + RUN_TEST(testSlotEjectValidLarge); + RUN_TEST(testSlotEjectValidSmall); + RUN_TEST(testSlotEjectValidEmpty); + RUN_TEST(testSlotEjectInvalid); + RUN_TEST(testSlotAcceptA); + // iface + RUN_TEST(testIfaceIsFutureTransferID); + RUN_TEST(testIfaceCheckTransferIDTimeout); + RUN_TEST(testIfaceFindMatchingSlot); + RUN_TEST(testIfaceAcceptA); + RUN_TEST(testIfaceAcceptB); + RUN_TEST(testIfaceAcceptC); + // session + RUN_TEST(testSessionDeduplicate); + RUN_TEST(testSessionAcceptA); + // port + RUN_TEST(testPortAcceptFrameA); return UNITY_END(); } + +// NOLINTEND(clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling) diff --git a/tests/src/test_intrusive_tx.c b/tests/src/test_intrusive_tx.c index ac84aca..bdd57d7 100644 --- a/tests/src/test_intrusive_tx.c +++ b/tests/src/test_intrusive_tx.c @@ -26,7 +26,7 @@ static const size_t InterstellarWarSize = sizeof(InterstellarWar) - 1; static const byte_t InterstellarWarCRC[4] = {102, 217, 109, 188}; // These aliases cannot be defined in the public API section: https://github.com/OpenCyphal-Garage/libudpard/issues/36 -typedef struct UdpardConstPayload UdpardConstPayload; +typedef struct UdpardPayload UdpardPayload; typedef struct UdpardUDPIPEndpoint UdpardUDPIPEndpoint; typedef struct UdpardTx UdpardTx; typedef struct UdpardTxItem UdpardTxItem; @@ -94,21 +94,22 @@ static void testMakeChainEmpty(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityFast, - .src_node_id = 1234, - .dst_node_id = 2345, - .data_specifier = 5432, - .transfer_id = 0xBADC0FFEE0DDF00DULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPriorityFast, + .src_node_id = 1234, + .dst_node_id = 2345, + .data_specifier = 5432, + .transfer_id = 0xBADC0FFEE0DDF00DULL, }; - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, 30, 1234567890, meta, (UdpardUDPIPEndpoint){.ip_address = 0x0A0B0C0DU, .udp_port = 0x1234}, - (UdpardConstPayload){.size = 0, .data = ""}, + (UdpardPayload){.size = 0, .data = ""}, &user_transfer_referent); TEST_ASSERT_EQUAL(1, alloc.allocated_fragments); TEST_ASSERT_EQUAL(sizeof(TxItem) + HEADER_SIZE_BYTES + 4, alloc.allocated_bytes); @@ -129,7 +130,7 @@ static void testMakeChainEmpty(void) (byte_t*) (chain.head->base.datagram_payload.data) + HEADER_SIZE_BYTES, 4)); TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->base.user_transfer_reference); - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + 4, chain.head); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + 4, chain.head); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -137,21 +138,22 @@ static void testMakeChainSingleMaxMTU(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPrioritySlow, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPrioritySlow, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL, }; - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, 1234567890, meta, (UdpardUDPIPEndpoint){.ip_address = 0x0A0B0C00U, .udp_port = 7474}, - (UdpardConstPayload){.size = DetailOfTheCosmosSize, .data = DetailOfTheCosmos}, + (UdpardPayload){.size = DetailOfTheCosmosSize, .data = DetailOfTheCosmos}, &user_transfer_referent); TEST_ASSERT_EQUAL(1, alloc.allocated_fragments); TEST_ASSERT_EQUAL(sizeof(TxItem) + HEADER_SIZE_BYTES + DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, @@ -179,9 +181,7 @@ static void testMakeChainSingleMaxMTU(void) DetailOfTheCosmosSize, TRANSFER_CRC_SIZE_BYTES)); TEST_ASSERT_EQUAL(&user_transfer_referent, chain.head->base.user_transfer_reference); - alloc.base.free(&alloc.base, - sizeof(TxItem) + HEADER_SIZE_BYTES + DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, - chain.head); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + DetailOfTheCosmosSize + TRANSFER_CRC_SIZE_BYTES, chain.head); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -189,21 +189,21 @@ static void testMakeChainSingleFrameDefaultMTU(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const byte_t payload[UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1] = {0}; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const byte_t payload[UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1] = {0}; { // Ensure UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME bytes fit in a single frame with the default MTU. - const TxChain chain = - txMakeChain(&alloc.base, - (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, - UDPARD_MTU_DEFAULT, - 1234567890, - (TransferMetadata){.priority = UdpardPrioritySlow, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL}, - (UdpardUDPIPEndpoint){.ip_address = 0x0A0B0C00U, .udp_port = 7474}, - (UdpardConstPayload){.size = UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME, .data = payload}, - NULL); + const TxChain chain = txMakeChain(mem, + (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, + UDPARD_MTU_DEFAULT, + 1234567890, + (TransferMetadata){.priority = UdpardPrioritySlow, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL}, + (UdpardUDPIPEndpoint){.ip_address = 0x0A0B0C00U, .udp_port = 7474}, + (UdpardPayload){.size = UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME, .data = payload}, + NULL); TEST_ASSERT_EQUAL(1, alloc.allocated_fragments); TEST_ASSERT_EQUAL(sizeof(TxItem) + HEADER_SIZE_BYTES + UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + TRANSFER_CRC_SIZE_BYTES, @@ -211,15 +211,14 @@ static void testMakeChainSingleFrameDefaultMTU(void) TEST_ASSERT_EQUAL(1, chain.count); TEST_ASSERT_EQUAL(chain.head, chain.tail); TEST_ASSERT_EQUAL(NULL, chain.head->base.next_in_transfer); - alloc.base.free(&alloc.base, - sizeof(TxItem) + HEADER_SIZE_BYTES + UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + - TRANSFER_CRC_SIZE_BYTES, - chain.head); + memFree(mem, + sizeof(TxItem) + HEADER_SIZE_BYTES + UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + TRANSFER_CRC_SIZE_BYTES, + chain.head); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } { // Increase the payload by 1 byte and ensure it spills over. const TxChain chain = - txMakeChain(&alloc.base, + txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, UDPARD_MTU_DEFAULT, 1234567890, @@ -229,7 +228,7 @@ static void testMakeChainSingleFrameDefaultMTU(void) .data_specifier = 7766, .transfer_id = 0x0123456789ABCDEFULL}, (UdpardUDPIPEndpoint){.ip_address = 0x0A0B0C00U, .udp_port = 7474}, - (UdpardConstPayload){.size = UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1, .data = payload}, + (UdpardPayload){.size = UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1, .data = payload}, NULL); TEST_ASSERT_EQUAL(2, alloc.allocated_fragments); TEST_ASSERT_EQUAL((sizeof(TxItem) + HEADER_SIZE_BYTES) * 2 + UDPARD_MTU_DEFAULT_MAX_SINGLE_FRAME + 1 + @@ -239,8 +238,8 @@ static void testMakeChainSingleFrameDefaultMTU(void) TEST_ASSERT_NOT_EQUAL(chain.head, chain.tail); TEST_ASSERT_EQUAL((UdpardTxItem*) chain.tail, chain.head->base.next_in_transfer); TEST_ASSERT_EQUAL(NULL, chain.tail->base.next_in_transfer); - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + UDPARD_MTU_DEFAULT, chain.head); - alloc.base.free(&alloc.base, alloc.allocated_bytes, chain.tail); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + UDPARD_MTU_DEFAULT, chain.head); + memFree(mem, alloc.allocated_bytes, chain.tail); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } } @@ -249,22 +248,23 @@ static void testMakeChainThreeFrames(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPriorityNominal, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL, }; const size_t mtu = (EtherealStrengthSize + 4U + 3U) / 3U; // Force payload split into three frames. - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, mtu, 223574680, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, &user_transfer_referent); TEST_ASSERT_EQUAL(3, alloc.allocated_fragments); TEST_ASSERT_EQUAL(3 * (sizeof(TxItem) + HEADER_SIZE_BYTES) + EtherealStrengthSize + 4U, alloc.allocated_bytes); @@ -320,9 +320,9 @@ static void testMakeChainThreeFrames(void) TEST_ASSERT_EQUAL(&user_transfer_referent, third->user_transfer_reference); // Clean up. - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, (void*) first); - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, (void*) second); - alloc.base.free(&alloc.base, alloc.allocated_bytes, (void*) third); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, (void*) first); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, (void*) second); + memFree(mem, alloc.allocated_bytes, (void*) third); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -330,22 +330,23 @@ static void testMakeChainCRCSpill1(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPriorityNominal, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL, }; const size_t mtu = InterstellarWarSize + 3U; - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, mtu, 223574680, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, &user_transfer_referent); TEST_ASSERT_EQUAL(2, alloc.allocated_fragments); TEST_ASSERT_EQUAL(2 * (sizeof(TxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, alloc.allocated_bytes); @@ -392,8 +393,8 @@ static void testMakeChainCRCSpill1(void) TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->base.user_transfer_reference); // Clean up. - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); - alloc.base.free(&alloc.base, alloc.allocated_bytes, chain.tail); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); + memFree(mem, alloc.allocated_bytes, chain.tail); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -401,22 +402,23 @@ static void testMakeChainCRCSpill2(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPriorityNominal, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL, }; const size_t mtu = InterstellarWarSize + 2U; - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, mtu, 223574680, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, &user_transfer_referent); TEST_ASSERT_EQUAL(2, alloc.allocated_fragments); TEST_ASSERT_EQUAL(2 * (sizeof(TxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, alloc.allocated_bytes); @@ -463,8 +465,8 @@ static void testMakeChainCRCSpill2(void) TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->base.user_transfer_reference); // Clean up. - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); - alloc.base.free(&alloc.base, alloc.allocated_bytes, chain.tail); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); + memFree(mem, alloc.allocated_bytes, chain.tail); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -472,22 +474,23 @@ static void testMakeChainCRCSpill3(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPriorityNominal, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL, }; const size_t mtu = InterstellarWarSize + 1U; - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, mtu, 223574680, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, &user_transfer_referent); TEST_ASSERT_EQUAL(2, alloc.allocated_fragments); TEST_ASSERT_EQUAL(2 * (sizeof(TxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, alloc.allocated_bytes); @@ -534,8 +537,8 @@ static void testMakeChainCRCSpill3(void) TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->base.user_transfer_reference); // Clean up. - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); - alloc.base.free(&alloc.base, alloc.allocated_bytes, chain.tail); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); + memFree(mem, alloc.allocated_bytes, chain.tail); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -543,22 +546,23 @@ static void testMakeChainCRCSpillFull(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - char user_transfer_referent = '\0'; - const TransferMetadata meta = { - .priority = UdpardPriorityNominal, - .src_node_id = 4321, - .dst_node_id = 5432, - .data_specifier = 7766, - .transfer_id = 0x0123456789ABCDEFULL, + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + char user_transfer_referent = '\0'; + const TransferMetadata meta = { + .priority = UdpardPriorityNominal, + .src_node_id = 4321, + .dst_node_id = 5432, + .data_specifier = 7766, + .transfer_id = 0x0123456789ABCDEFULL, }; const size_t mtu = InterstellarWarSize; - const TxChain chain = txMakeChain(&alloc.base, + const TxChain chain = txMakeChain(mem, (byte_t[]){11, 22, 33, 44, 55, 66, 77, 88}, mtu, 223574680, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, &user_transfer_referent); TEST_ASSERT_EQUAL(2, alloc.allocated_fragments); TEST_ASSERT_EQUAL(2 * (sizeof(TxItem) + HEADER_SIZE_BYTES) + InterstellarWarSize + 4U, alloc.allocated_bytes); @@ -601,8 +605,8 @@ static void testMakeChainCRCSpillFull(void) TEST_ASSERT_EQUAL(&user_transfer_referent, chain.tail->base.user_transfer_reference); // Clean up. - alloc.base.free(&alloc.base, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); - alloc.base.free(&alloc.base, alloc.allocated_bytes, chain.tail); + memFree(mem, sizeof(TxItem) + HEADER_SIZE_BYTES + mtu, chain.head); + memFree(mem, alloc.allocated_bytes, chain.tail); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); } @@ -610,14 +614,15 @@ static void testPushPeekPopFree(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx = { .local_node_id = &node_id, .queue_capacity = 3, .mtu = (EtherealStrengthSize + 4U + 3U) / 3U, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = NULL, }; @@ -634,7 +639,7 @@ static void testPushPeekPopFree(void) 1234567890U, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, &user_transfer_referent)); TEST_ASSERT_EQUAL(3, alloc.allocated_fragments); TEST_ASSERT_EQUAL(3 * (sizeof(TxItem) + HEADER_SIZE_BYTES) + EtherealStrengthSize + 4U, alloc.allocated_bytes); @@ -688,14 +693,15 @@ static void testPushPrioritization(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx = { .local_node_id = &node_id, .queue_capacity = 7, .mtu = 140, // This is chosen to match the test data. .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = NULL, }; @@ -712,7 +718,7 @@ static void testPushPrioritization(void) 0, meta_a, (UdpardUDPIPEndpoint){.ip_address = 0xAAAAAAAA, .udp_port = 0xAAAA}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, NULL)); TEST_ASSERT_EQUAL(3, alloc.allocated_fragments); TEST_ASSERT_EQUAL(3, tx.queue_size); @@ -732,7 +738,7 @@ static void testPushPrioritization(void) .transfer_id = 100000, }, (UdpardUDPIPEndpoint){.ip_address = 0xBBBBBBBB, .udp_port = 0xBBBB}, - (UdpardConstPayload){.size = DetailOfTheCosmosSize, .data = DetailOfTheCosmos}, + (UdpardPayload){.size = DetailOfTheCosmosSize, .data = DetailOfTheCosmos}, NULL)); TEST_ASSERT_EQUAL(4, alloc.allocated_fragments); TEST_ASSERT_EQUAL(4, tx.queue_size); @@ -752,7 +758,7 @@ static void testPushPrioritization(void) .transfer_id = 10000, }, (UdpardUDPIPEndpoint){.ip_address = 0xCCCCCCCC, .udp_port = 0xCCCC}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, NULL)); TEST_ASSERT_EQUAL(5, alloc.allocated_fragments); TEST_ASSERT_EQUAL(5, tx.queue_size); @@ -772,7 +778,7 @@ static void testPushPrioritization(void) .transfer_id = 10001, }, (UdpardUDPIPEndpoint){.ip_address = 0xDDDDDDDD, .udp_port = 0xDDDD}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, NULL)); TEST_ASSERT_EQUAL(6, alloc.allocated_fragments); TEST_ASSERT_EQUAL(6, tx.queue_size); @@ -792,7 +798,7 @@ static void testPushPrioritization(void) .transfer_id = 1000, }, (UdpardUDPIPEndpoint){.ip_address = 0xEEEEEEEE, .udp_port = 0xEEEE}, - (UdpardConstPayload){.size = InterstellarWarSize, .data = InterstellarWar}, + (UdpardPayload){.size = InterstellarWarSize, .data = InterstellarWar}, NULL)); TEST_ASSERT_EQUAL(7, alloc.allocated_fragments); TEST_ASSERT_EQUAL(7, tx.queue_size); @@ -858,14 +864,15 @@ static void testPushCapacityLimit(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx = { .local_node_id = &node_id, .queue_capacity = 2, .mtu = 10U, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = NULL, }; @@ -881,7 +888,7 @@ static void testPushCapacityLimit(void) 1234567890U, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, NULL)); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); @@ -892,14 +899,15 @@ static void testPushOOM(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx = { .local_node_id = &node_id, .queue_capacity = 10000U, .mtu = (EtherealStrengthSize + 4U + 3U) / 3U, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = NULL, }; @@ -916,7 +924,7 @@ static void testPushOOM(void) 1234567890U, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, NULL)); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); @@ -927,14 +935,15 @@ static void testPushAnonymousMultiFrame(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 0xFFFFU; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 0xFFFFU; // UdpardTx tx = { .local_node_id = &node_id, .queue_capacity = 10000U, .mtu = (EtherealStrengthSize + 4U + 3U) / 3U, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = NULL, }; @@ -950,7 +959,7 @@ static void testPushAnonymousMultiFrame(void) 1234567890U, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, NULL)); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); @@ -961,14 +970,15 @@ static void testPushAnonymousService(void) { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 0xFFFFU; + struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 0xFFFFU; // UdpardTx tx = { .local_node_id = &node_id, .queue_capacity = 10000, .mtu = 1500, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = NULL, }; @@ -984,7 +994,7 @@ static void testPushAnonymousService(void) 1234567890U, meta, (UdpardUDPIPEndpoint){.ip_address = 0xBABADEDAU, .udp_port = 0xD0ED}, - (UdpardConstPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, + (UdpardPayload){.size = EtherealStrengthSize, .data = EtherealStrength}, NULL)); TEST_ASSERT_EQUAL(0, alloc.allocated_fragments); TEST_ASSERT_EQUAL(0, alloc.allocated_bytes); diff --git a/tests/src/test_tx.cpp b/tests/src/test_tx.cpp index 6648833..29a413d 100644 --- a/tests/src/test_tx.cpp +++ b/tests/src/test_tx.cpp @@ -29,49 +29,49 @@ void testInit() std::monostate user_referent; const UdpardNodeID node_id = 0; { - UdpardMemoryResource memory{ - .allocate = &dummyAllocatorAllocate, - .free = &dummyAllocatorFree, + const UdpardMemoryResource memory{ .user_reference = &user_referent, + .deallocate = &dummyAllocatorDeallocate, + .allocate = &dummyAllocatorAllocate, }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(nullptr, &node_id, 0, &memory)); + TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(nullptr, &node_id, 0, memory)); } { - UdpardTx tx{}; - UdpardMemoryResource memory{ - .allocate = &dummyAllocatorAllocate, - .free = &dummyAllocatorFree, + UdpardTx tx{}; + const UdpardMemoryResource memory{ .user_reference = &user_referent, + .deallocate = &dummyAllocatorDeallocate, + .allocate = &dummyAllocatorAllocate, }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, nullptr, 0, &memory)); + TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, nullptr, 0, memory)); } { - UdpardTx tx{}; - UdpardMemoryResource memory{ - .allocate = nullptr, - .free = &dummyAllocatorFree, + UdpardTx tx{}; + const UdpardMemoryResource memory{ .user_reference = &user_referent, + .deallocate = &dummyAllocatorDeallocate, + .allocate = nullptr, }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, &node_id, 0, &memory)); + TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, &node_id, 0, memory)); } { - UdpardTx tx{}; - UdpardMemoryResource memory{ - .allocate = &dummyAllocatorAllocate, - .free = nullptr, + UdpardTx tx{}; + const UdpardMemoryResource memory{ .user_reference = &user_referent, + .deallocate = nullptr, + .allocate = &dummyAllocatorAllocate, }; - TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, &node_id, 0, &memory)); + TEST_ASSERT_EQUAL(-UDPARD_ERROR_ARGUMENT, udpardTxInit(&tx, &node_id, 0, memory)); } { - UdpardTx tx{}; - UdpardMemoryResource memory{ - .allocate = &dummyAllocatorAllocate, - .free = &dummyAllocatorFree, + UdpardTx tx{}; + const UdpardMemoryResource memory{ .user_reference = &user_referent, + .deallocate = &dummyAllocatorDeallocate, + .allocate = &dummyAllocatorAllocate, }; - TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &node_id, 0, &memory)); - TEST_ASSERT_EQUAL(&user_referent, tx.memory->user_reference); + TEST_ASSERT_EQUAL(0, udpardTxInit(&tx, &node_id, 0, memory)); + TEST_ASSERT_EQUAL(&user_referent, tx.memory.user_reference); TEST_ASSERT_EQUAL(UDPARD_MTU_DEFAULT, tx.mtu); } } @@ -80,14 +80,15 @@ void testPublish() { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + const struct UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx{ .local_node_id = &node_id, .queue_capacity = 1U, .mtu = UDPARD_MTU_DEFAULT, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = nullptr, }; @@ -227,14 +228,15 @@ void testRequest() { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + const UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx{ .local_node_id = &node_id, .queue_capacity = 1U, .mtu = UDPARD_MTU_DEFAULT, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = nullptr, }; @@ -394,14 +396,15 @@ void testRespond() { InstrumentedAllocator alloc; instrumentedAllocatorNew(&alloc); - const UdpardNodeID node_id = 1234; + const UdpardMemoryResource mem = instrumentedAllocatorMakeMemoryResource(&alloc); + const UdpardNodeID node_id = 1234; // UdpardTx tx{ .local_node_id = &node_id, .queue_capacity = 1U, .mtu = UDPARD_MTU_DEFAULT, .dscp_value_per_priority = {0, 1, 2, 3, 4, 5, 6, 7}, - .memory = &alloc.base, + .memory = mem, .queue_size = 0, .root = nullptr, }; @@ -541,7 +544,7 @@ void testPeekPopFreeNULL() // Just make sure we don't crash. { TEST_ASSERT_EQUAL(nullptr, udpardTxPeek(nullptr)); TEST_ASSERT_EQUAL(nullptr, udpardTxPop(nullptr, nullptr)); - udpardTxFree(nullptr, nullptr); + udpardTxFree({}, nullptr); } } // namespace