Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 51 additions & 18 deletions src/drivers/virtio/virtqueue/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -512,27 +512,60 @@ pub enum BufferType {
Indirect,
}

/// MemPool allows to easily control, request and provide memory for Virtqueues.
///
/// The struct is initialized with a limit of free running "tracked"
/// memory descriptor ids. As Virtqueus do only allow a limited amount of descriptors in their queue,
/// the independent queues, can control the number of descriptors by this.
struct MemPool {
pool: Vec<u16>,
limit: u16,
}
mod index_alloc {
use alloc::boxed::Box;

use align_address::Align;

impl MemPool {
/// Returns a given id to the id pool
fn ret_id(&mut self, id: u16) {
self.pool.push(id);
/// This type allows allocating indices.
///
/// The indices can be used as descriptor IDs.
pub struct IndexAlloc {
/// Zero bits are available.
bits: Box<[usize]>,
}

/// Returns a new instance, with a pool of the specified size.
fn new(size: u16) -> MemPool {
MemPool {
pool: (0..size).collect(),
limit: size,
const USIZE_BITS: usize = usize::BITS as usize;

impl IndexAlloc {
pub fn new(len: usize) -> Self {
let usizes = len.div_ceil(USIZE_BITS);
let extra_bits = len % USIZE_BITS;

let mut bits = vec![0; usizes].into_boxed_slice();

if extra_bits != 0 {
*bits.last_mut().unwrap() = usize::MAX >> extra_bits;
}

Self { bits }
}

#[inline]
pub fn allocate(&mut self) -> Option<usize> {
for (word_index, word) in self.bits.iter_mut().enumerate() {
let trailing_ones = word.trailing_ones();
if trailing_ones < usize::BITS {
let mask = 1 << trailing_ones;
*word |= mask;
let index = word_index * USIZE_BITS + usize::try_from(trailing_ones).unwrap();
return Some(index);
}
}

None
}

#[inline]
pub unsafe fn deallocate(&mut self, index: usize) {
let word_index = index / USIZE_BITS;
let bit = index % USIZE_BITS;
let mask = 1 << bit;

debug_assert!(self.bits[word_index] & mask == mask);
unsafe {
*self.bits.get_unchecked_mut(word_index) &= !mask;
}
}
}
}
Expand Down
20 changes: 10 additions & 10 deletions src/drivers/virtio/virtqueue/packed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,8 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
#[cfg(feature = "pci")]
use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
use super::error::VirtqError;
use super::{
AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
};
use super::index_alloc::IndexAlloc;
use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
use crate::arch::mm::paging::{BasePageSize, PageSize};
use crate::mm::device_alloc::DeviceAlloc;

Expand Down Expand Up @@ -69,9 +68,8 @@ struct DescriptorRing {
/// See Virtio specification v1.1. - 2.7.1
drv_wc: bool,
dev_wc: bool,
/// Memory pool controls the amount of "free floating" descriptors
/// See [MemPool] docs for detail.
mem_pool: MemPool,
/// This allocates available descriptors.
indexes: IndexAlloc,
}

impl DescriptorRing {
Expand All @@ -92,7 +90,7 @@ impl DescriptorRing {
poll_index: 0,
drv_wc: true,
dev_wc: true,
mem_pool: MemPool::new(size),
indexes: IndexAlloc::new(size.into()),
}
}

Expand Down Expand Up @@ -186,13 +184,13 @@ impl DescriptorRing {
/// Returns an initialized write controller in order
/// to write the queue correctly.
fn get_write_ctrler(&mut self) -> Result<WriteCtrl<'_>, VirtqError> {
let desc_id = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
let desc_id = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
Ok(WriteCtrl {
start: self.write_index,
position: self.write_index,
modulo: u16::try_from(self.ring.len()).unwrap(),
first_flags: DescF::empty(),
buff_id: desc_id,
buff_id: u16::try_from(desc_id).unwrap(),

desc_ring: self,
})
Expand Down Expand Up @@ -303,7 +301,9 @@ impl ReadCtrl<'_> {
for _ in 0..tkn.num_consuming_descr() {
self.incrmt();
}
self.desc_ring.mem_pool.ret_id(buff_id);
unsafe {
self.desc_ring.indexes.deallocate(buff_id.into());
}

Some((tkn, write_len))
} else {
Expand Down
32 changes: 17 additions & 15 deletions src/drivers/virtio/virtqueue/split.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,15 @@ use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
#[cfg(feature = "pci")]
use super::super::transport::pci::{ComCfg, NotifCfg, NotifCtrl};
use super::error::VirtqError;
use super::{
AvailBufferToken, BufferType, MemPool, TransferToken, UsedBufferToken, Virtq, VirtqPrivate,
};
use super::index_alloc::IndexAlloc;
use super::{AvailBufferToken, BufferType, TransferToken, UsedBufferToken, Virtq, VirtqPrivate};
use crate::arch::memory_barrier;
use crate::mm::device_alloc::DeviceAlloc;

struct DescrRing {
read_idx: u16,
token_ring: Box<[Option<Box<TransferToken<virtq::Desc>>>]>,
mem_pool: MemPool,
indexes: IndexAlloc,

descr_table_cell: Box<UnsafeCell<[MaybeUninit<virtq::Desc>]>, DeviceAlloc>,
avail_ring_cell: Box<UnsafeCell<virtq::Avail>, DeviceAlloc>,
Expand All @@ -52,8 +51,8 @@ impl DescrRing {
if let Some(ctrl_desc) = tkn.ctrl_desc.as_ref() {
let descriptor = SplitVq::indirect_desc(ctrl_desc.as_ref());

index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
} else {
let mut rev_all_desc_iter = SplitVq::descriptor_iter(&tkn.buff_tkn)?.rev();

Expand All @@ -62,25 +61,26 @@ impl DescrRing {
// If the [AvailBufferToken] is empty, we panic
let descriptor = rev_all_desc_iter.next().unwrap();

index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
}
for mut descriptor in rev_all_desc_iter {
// We have not updated `index` yet, so it is at this point the index of the previous descriptor that had been written.
descriptor.next = le16::from(index);
descriptor.next = le16::from_ne(index.try_into().unwrap());

index = self.mem_pool.pool.pop().ok_or(VirtqError::NoDescrAvail)?;
self.descr_table_mut()[usize::from(index)] = MaybeUninit::new(descriptor);
index = self.indexes.allocate().ok_or(VirtqError::NoDescrAvail)?;
self.descr_table_mut()[index] = MaybeUninit::new(descriptor);
}
// At this point, `index` is the index of the last element of the reversed iterator,
// thus the head of the descriptor chain.
}

self.token_ring[usize::from(index)] = Some(Box::new(tkn));
self.token_ring[index] = Some(Box::new(tkn));

let len = self.token_ring.len();
let idx = self.avail_ring_mut().idx.to_ne();
self.avail_ring_mut().ring_mut(true)[idx as usize % len] = index.into();
self.avail_ring_mut().ring_mut(true)[idx as usize % len] =
le16::from_ne(index.try_into().unwrap());

memory_barrier();
let next_idx = idx.wrapping_add(1);
Expand All @@ -105,7 +105,9 @@ impl DescrRing {
// We return the indices of the now freed ring slots back to `mem_pool.`
let mut id_ret_idx = u16::try_from(used_elem.id.to_ne()).unwrap();
loop {
self.mem_pool.ret_id(id_ret_idx);
unsafe {
self.indexes.deallocate(id_ret_idx.into());
}
let cur_chain_elem =
unsafe { self.descr_table_mut()[usize::from(id_ret_idx)].assume_init() };
if cur_chain_elem.flags.contains(virtq::DescF::NEXT) {
Expand Down Expand Up @@ -289,7 +291,7 @@ impl SplitVq {
.take(size.into())
.collect::<Vec<_>>()
.into_boxed_slice(),
mem_pool: MemPool::new(size),
indexes: IndexAlloc::new(size.into()),

descr_table_cell,
avail_ring_cell,
Expand Down
Loading