Skip to content

Commit

Permalink
Use entire HW ring in ethernet drivers, keep head and tail overflowing
Browse files Browse the repository at this point in the history
Signed-off-by: Courtney Darville <courtneydarville94@outlook.com>
  • Loading branch information
Courtney3141 committed Sep 23, 2024
1 parent f655259 commit abe510d
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 65 deletions.
76 changes: 42 additions & 34 deletions drivers/network/imx/ethernet.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,11 @@ struct descriptor {

/* HW ring buffer data type */
typedef struct {
unsigned int tail; /* index to insert at */
unsigned int head; /* index to remove from */
uint32_t tail; /* index to insert at */
uint32_t head; /* index to remove from */
uint32_t capacity; /* capacity of the ring */
volatile struct descriptor *descr; /* buffer descriptor array */
net_buff_desc_t descr_mdata[MAX_COUNT]; /* associated meta data array */
volatile struct descriptor *descr; /* buffer descripter array */
} hw_ring_t;

hw_ring_t rx; /* Rx NIC ring */
Expand All @@ -58,14 +59,14 @@ net_queue_handle_t tx_queue;

volatile struct enet_regs *eth;

static inline bool hw_ring_full(hw_ring_t *ring, size_t ring_capacity)
static inline bool hw_ring_full(hw_ring_t *ring)
{
return !((ring->tail - ring->head + 1) % ring_capacity);
return ring->tail - ring->head == ring->capacity;
}

static inline bool hw_ring_empty(hw_ring_t *ring, size_t ring_capacity)
static inline bool hw_ring_empty(hw_ring_t *ring)
{
return !((ring->tail - ring->head) % ring_capacity);
return ring->tail - ring->head == 0;
}

static void update_ring_slot(hw_ring_t *ring, unsigned int idx, uintptr_t phys,
Expand All @@ -78,38 +79,39 @@ static void update_ring_slot(hw_ring_t *ring, unsigned int idx, uintptr_t phys,
/* Ensure all writes to the descriptor complete, before we set the flags
* that makes hardware aware of this slot.
*/
__sync_synchronize();
THREAD_MEMORY_RELEASE();
d->stat = stat;
}

static void rx_provide(void)
{
bool reprocess = true;
while (reprocess) {
while (!hw_ring_full(&rx, RX_COUNT) && !net_queue_empty_free(&rx_queue)) {
while (!hw_ring_full(&rx) && !net_queue_empty_free(&rx_queue)) {
net_buff_desc_t buffer;
int err = net_dequeue_free(&rx_queue, &buffer);
assert(!err);

uint32_t idx = rx.tail % rx.capacity;
uint16_t stat = RXD_EMPTY;
if (rx.tail + 1 == RX_COUNT) {
if (idx + 1 == rx.capacity) {
stat |= WRAP;
}
rx.descr_mdata[rx.tail] = buffer;
update_ring_slot(&rx, rx.tail, buffer.io_or_offset, 0, stat);
rx.tail = (rx.tail + 1) % RX_COUNT;
rx.descr_mdata[idx] = buffer;
update_ring_slot(&rx, idx, buffer.io_or_offset, 0, stat);
rx.tail++;
eth->rdar = RDAR_RDAR;
}

/* Only request a notification from virtualiser if HW ring not full */
if (!hw_ring_full(&rx, RX_COUNT)) {
if (!hw_ring_full(&rx)) {
net_request_signal_free(&rx_queue);
} else {
net_cancel_signal_free(&rx_queue);
}
reprocess = false;

if (!net_queue_empty_free(&rx_queue) && !hw_ring_full(&rx, RX_COUNT)) {
if (!net_queue_empty_free(&rx_queue) && !hw_ring_full(&rx)) {
net_cancel_signal_free(&rx_queue);
reprocess = true;
}
Expand All @@ -119,20 +121,23 @@ static void rx_provide(void)
static void rx_return(void)
{
bool packets_transferred = false;
while (!hw_ring_empty(&rx, RX_COUNT)) {
while (!hw_ring_empty(&rx)) {
/* If buffer slot is still empty, we have processed all packets the device has filled */
volatile struct descriptor *d = &(rx.descr[rx.head]);
uint32_t idx = rx.head % rx.capacity;
volatile struct descriptor *d = &(rx.descr[idx]);
if (d->stat & RXD_EMPTY) {
break;
}

net_buff_desc_t buffer = rx.descr_mdata[rx.head];
THREAD_MEMORY_ACQUIRE();

net_buff_desc_t buffer = rx.descr_mdata[idx];
buffer.len = d->len;
int err = net_enqueue_active(&rx_queue, buffer);
assert(!err);

packets_transferred = true;
rx.head = (rx.head + 1) % RX_COUNT;
rx.head++;
}

if (packets_transferred && net_require_signal_active(&rx_queue)) {
Expand All @@ -145,26 +150,26 @@ static void tx_provide(void)
{
bool reprocess = true;
while (reprocess) {
while (!(hw_ring_full(&tx, TX_COUNT)) && !net_queue_empty_active(&tx_queue)) {
while (!(hw_ring_full(&tx)) && !net_queue_empty_active(&tx_queue)) {
net_buff_desc_t buffer;
int err = net_dequeue_active(&tx_queue, &buffer);
assert(!err);

uint32_t idx = tx.tail % tx.capacity;
uint16_t stat = TXD_READY | TXD_ADDCRC | TXD_LAST;
if (tx.tail + 1 == TX_COUNT) {
if (idx + 1 == tx.capacity) {
stat |= WRAP;
}
tx.descr_mdata[tx.tail] = buffer;
update_ring_slot(&tx, tx.tail, buffer.io_or_offset, buffer.len, stat);

tx.tail = (tx.tail + 1) % TX_COUNT;
tx.descr_mdata[idx] = buffer;
update_ring_slot(&tx, idx, buffer.io_or_offset, buffer.len, stat);
tx.tail++;
eth->tdar = TDAR_TDAR;
}

net_request_signal_active(&tx_queue);
reprocess = false;

if (!hw_ring_full(&tx, TX_COUNT) && !net_queue_empty_active(&tx_queue)) {
if (!hw_ring_full(&tx) && !net_queue_empty_active(&tx_queue)) {
net_cancel_signal_active(&tx_queue);
reprocess = true;
}
Expand All @@ -174,21 +179,23 @@ static void tx_provide(void)
static void tx_return(void)
{
bool enqueued = false;
while (!hw_ring_empty(&tx, TX_COUNT)) {
while (!hw_ring_empty(&tx)) {
/* Ensure that this buffer has been sent by the device */
volatile struct descriptor *d = &(tx.descr[tx.head]);
uint32_t idx = tx.head % tx.capacity;
volatile struct descriptor *d = &(tx.descr[idx]);
if (d->stat & TXD_READY) {
break;
}

net_buff_desc_t buffer = tx.descr_mdata[tx.head];
buffer.len = 0;

tx.head = (tx.head + 1) % TX_COUNT;
THREAD_MEMORY_ACQUIRE();

net_buff_desc_t buffer = tx.descr_mdata[idx];
buffer.len = 0;
int err = net_enqueue_free(&tx_queue, buffer);
assert(!err);

enqueued = true;
tx.head++;
}

if (enqueued && net_require_signal_free(&tx_queue)) {
Expand Down Expand Up @@ -225,7 +232,9 @@ static void eth_setup(void)
uint32_t h = eth->paur;

/* Set up HW rings */
rx.capacity = RX_COUNT;
rx.descr = (volatile struct descriptor *)hw_ring_buffer_vaddr;
tx.capacity = TX_COUNT;
tx.descr = (volatile struct descriptor *)(hw_ring_buffer_vaddr + (sizeof(struct descriptor) * RX_COUNT));

/* Perform reset */
Expand Down Expand Up @@ -269,14 +278,13 @@ static void eth_setup(void)
eth->tipg = TIPG;
/* Transmit FIFO Watermark register - store and forward */
eth->tfwr = STRFWD;
/* clear rx store and forward. This must be done for hardware csums*/
/* clear rx store and forward. This must be done for hardware csums */
eth->rsfl = 0;
/* Do not forward frames with errors + check the csum */
eth->racc = RACC_LINEDIS | RACC_IPDIS | RACC_PRODIS;
/* Add the checksum for known IP protocols */
eth->tacc = TACC_PROCHK | TACC_IPCHK;

/* Set RDSR */
eth->rdsr = hw_ring_buffer_paddr;
eth->tdsr = hw_ring_buffer_paddr + (sizeof(struct descriptor) * RX_COUNT);

Expand Down
72 changes: 41 additions & 31 deletions drivers/network/meson/ethernet.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,11 @@ _Static_assert((RX_COUNT + TX_COUNT) * sizeof(struct descriptor) <= NET_HW_REGIO
"Expect rx+tx buffers to fit in single 2MB page");

typedef struct {
unsigned int tail; /* index to insert at */
unsigned int head; /* index to remove from */
uint32_t tail; /* index to insert at */
uint32_t head; /* index to remove from */
uint32_t capacity; /* capacity of the ring */
volatile struct descriptor *descr; /* buffer descriptor array */
net_buff_desc_t descr_mdata[MAX_COUNT]; /* associated meta data array */
volatile struct descriptor *descr; /* buffer descripter array */
} hw_ring_t;

hw_ring_t rx;
Expand All @@ -59,14 +60,14 @@ net_queue_handle_t tx_queue;
volatile struct eth_mac_regs *eth_mac;
volatile struct eth_dma_regs *eth_dma;

static inline bool hw_ring_full(hw_ring_t *ring, size_t ring_capacity)
static inline bool hw_ring_full(hw_ring_t *ring)
{
return !((ring->tail + 2 - ring->head) % ring_capacity);
return ring->tail - ring->head == ring->capacity;
}

static inline bool hw_ring_empty(hw_ring_t *ring, size_t ring_capacity)
static inline bool hw_ring_empty(hw_ring_t *ring)
{
return !((ring->tail - ring->head) % ring_capacity);
return ring->tail - ring->head == 0;
}

static void update_ring_slot(hw_ring_t *ring, unsigned int idx, uint32_t status,
Expand All @@ -87,27 +88,28 @@ static void rx_provide()
{
bool reprocess = true;
while (reprocess) {
while (!hw_ring_full(&rx, RX_COUNT) && !net_queue_empty_free(&rx_queue)) {
while (!hw_ring_full(&rx) && !net_queue_empty_free(&rx_queue)) {
net_buff_desc_t buffer;
int err = net_dequeue_free(&rx_queue, &buffer);
assert(!err);

uint32_t idx = rx.tail % rx.capacity;
uint32_t cntl = (MAX_RX_FRAME_SZ << DESC_RXCTRL_SIZE1SHFT) & DESC_RXCTRL_SIZE1MASK;
if (rx.tail + 1 == RX_COUNT) {
if (idx + 1 == rx.capacity) {
cntl |= DESC_RXCTRL_RXRINGEND;
}

rx.descr_mdata[rx.tail] = buffer;
update_ring_slot(&rx, rx.tail, DESC_RXSTS_OWNBYDMA, cntl, buffer.io_or_offset, 0);
rx.descr_mdata[idx] = buffer;
update_ring_slot(&rx, idx, DESC_RXSTS_OWNBYDMA, cntl, buffer.io_or_offset, 0);
eth_dma->rxpolldemand = POLL_DATA;

rx.tail = (rx.tail + 1) % RX_COUNT;
rx.tail++;
}

net_request_signal_free(&rx_queue);
reprocess = false;

if (!net_queue_empty_free(&rx_queue) && !hw_ring_full(&rx, RX_COUNT)) {
if (!net_queue_empty_free(&rx_queue) && !hw_ring_full(&rx)) {
net_cancel_signal_free(&rx_queue);
reprocess = true;
}
Expand All @@ -117,33 +119,36 @@ static void rx_provide()
static void rx_return(void)
{
bool packets_transferred = false;
while (!hw_ring_empty(&rx, RX_COUNT)) {
while (!hw_ring_empty(&rx)) {
/* If buffer slot is still empty, we have processed all packets the device has filled */
volatile struct descriptor *d = &(rx.descr[rx.head]);
uint32_t idx = rx.head % rx.capacity;
volatile struct descriptor *d = &(rx.descr[idx]);
if (d->status & DESC_RXSTS_OWNBYDMA) {
break;
}
net_buff_desc_t buffer = rx.descr_mdata[rx.head];

THREAD_MEMORY_ACQUIRE();

net_buff_desc_t buffer = rx.descr_mdata[idx];
if (d->status & DESC_RXSTS_ERROR) {
sddf_dprintf("ETH|ERROR: RX descriptor returned with error status %x\n", d->status);
idx = rx.tail % rx.capacity;
uint32_t cntl = (MAX_RX_FRAME_SZ << DESC_RXCTRL_SIZE1SHFT) & DESC_RXCTRL_SIZE1MASK;
if (rx.tail + 1 == RX_COUNT) {
if (idx + 1 == rx.capacity) {
cntl |= DESC_RXCTRL_RXRINGEND;
}

rx.descr_mdata[rx.tail] = buffer;
update_ring_slot(&rx, rx.tail, DESC_RXSTS_OWNBYDMA, cntl, buffer.io_or_offset, 0);
rx.descr_mdata[idx] = buffer;
update_ring_slot(&rx, idx, DESC_RXSTS_OWNBYDMA, cntl, buffer.io_or_offset, 0);
eth_dma->rxpolldemand = POLL_DATA;
rx.tail = (rx.tail + 1) % RX_COUNT;
rx.tail++;
} else {
buffer.len = (d->status & DESC_RXSTS_LENMSK) >> DESC_RXSTS_LENSHFT;
int err = net_enqueue_active(&rx_queue, buffer);
assert(!err);
packets_transferred = true;
}
rx.head = (rx.head + 1) % RX_COUNT;
rx.head++;
}

if (packets_transferred && net_require_signal_active(&rx_queue)) {
Expand All @@ -156,26 +161,27 @@ static void tx_provide(void)
{
bool reprocess = true;
while (reprocess) {
while (!(hw_ring_full(&tx, TX_COUNT)) && !net_queue_empty_active(&tx_queue)) {
while (!(hw_ring_full(&tx)) && !net_queue_empty_active(&tx_queue)) {
net_buff_desc_t buffer;
int err = net_dequeue_active(&tx_queue, &buffer);
assert(!err);

uint32_t idx = tx.tail % tx.capacity;
uint32_t cntl = (((uint32_t) buffer.len) << DESC_TXCTRL_SIZE1SHFT) & DESC_TXCTRL_SIZE1MASK;
cntl |= DESC_TXCTRL_TXLAST | DESC_TXCTRL_TXFIRST | DESC_TXCTRL_TXINT;
if (tx.tail + 1 == TX_COUNT) {
if (idx + 1 == tx.capacity) {
cntl |= DESC_TXCTRL_TXRINGEND;
}
tx.descr_mdata[tx.tail] = buffer;
update_ring_slot(&tx, tx.tail, DESC_TXSTS_OWNBYDMA, cntl, buffer.io_or_offset, 0);
tx.descr_mdata[idx] = buffer;
update_ring_slot(&tx, idx, DESC_TXSTS_OWNBYDMA, cntl, buffer.io_or_offset, 0);

tx.tail = (tx.tail + 1) % TX_COUNT;
tx.tail++;
}

net_request_signal_active(&tx_queue);
reprocess = false;

if (!hw_ring_full(&tx, TX_COUNT) && !net_queue_empty_active(&tx_queue)) {
if (!hw_ring_full(&tx) && !net_queue_empty_active(&tx_queue)) {
net_cancel_signal_active(&tx_queue);
reprocess = true;
}
Expand All @@ -186,19 +192,21 @@ static void tx_provide(void)
static void tx_return(void)
{
bool enqueued = false;
while (!hw_ring_empty(&tx, TX_COUNT)) {
while (!hw_ring_empty(&tx)) {
/* Ensure that this buffer has been sent by the device */
volatile struct descriptor *d = &(tx.descr[tx.head]);
uint32_t idx = tx.head % tx.capacity;
volatile struct descriptor *d = &(tx.descr[idx]);
if (d->status & DESC_TXSTS_OWNBYDMA) {
break;
}
net_buff_desc_t buffer = tx.descr_mdata[tx.head];

THREAD_MEMORY_ACQUIRE();

net_buff_desc_t buffer = tx.descr_mdata[idx];
int err = net_enqueue_free(&tx_queue, buffer);
assert(!err);
enqueued = true;
tx.head = (tx.head + 1) % TX_COUNT;
tx.head++;
}

if (enqueued && net_require_signal_free(&tx_queue)) {
Expand Down Expand Up @@ -238,7 +246,9 @@ static void eth_setup(void)

assert((hw_ring_buffer_paddr & 0xFFFFFFFF) == hw_ring_buffer_paddr);

rx.capacity = RX_COUNT;
rx.descr = (volatile struct descriptor *)hw_ring_buffer_vaddr;
tx.capacity = TX_COUNT;
tx.descr = (volatile struct descriptor *)(hw_ring_buffer_vaddr + (sizeof(struct descriptor) * RX_COUNT));

/* Perform reset */
Expand Down

0 comments on commit abe510d

Please sign in to comment.