Skip to content

Commit

Permalink
Objcache: add locking option and drain functionality
Browse files Browse the repository at this point in the history
This change introduces a new heap sub-type, called caching_heap,
that exports an additional callback to drain memory that has been
cached. The objcache heap is now a caching_heap.
In addition, the objcache heap now implements locking, which allows
safe use of the drain functionality in concurrently accessed heaps
without the need for an external lock.
Memory cleaner instances have been added in various places where an
objcache is used, so that caching heaps can be drained when the
system is low on memory.

Partially addresses #1494.
  • Loading branch information
francescolavra committed Aug 6, 2022
1 parent fef47c8 commit ffd6df3
Show file tree
Hide file tree
Showing 18 changed files with 246 additions and 103 deletions.
4 changes: 4 additions & 0 deletions src/config.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,10 @@
/* don't go below this minimum amount of physical memory when inflating balloon */
#define BALLOON_MEMORY_MINIMUM (16 * MB)

/* Number of objects that should be retained in the cache when a cache drain is requested */
#define NET_RX_BUFFERS_RETAIN 64
#define STORAGE_REQUESTS_RETAIN 64

/* must be large enough for vendor code that use malloc/free interface */
#define MAX_MCACHE_ORDER 16

Expand Down
7 changes: 4 additions & 3 deletions src/drivers/acpi.c
Original file line number Diff line number Diff line change
Expand Up @@ -491,8 +491,8 @@ ACPI_STATUS AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *init_val, ACPI
ACPI_STATUS AcpiOsCreateCache(char *cache_name, UINT16 object_size, UINT16 max_depth,
ACPI_CACHE_T **return_cache)
{
heap h = allocate_objcache(acpi_heap, (heap)heap_linear_backed(get_kernel_heaps()), object_size,
PAGESIZE);
caching_heap h = allocate_objcache(acpi_heap, (heap)heap_linear_backed(get_kernel_heaps()),
object_size, PAGESIZE, false);
if (h == INVALID_ADDRESS)
return AE_NO_MEMORY;
*return_cache = (ACPI_CACHE_T *)h;
Expand All @@ -515,7 +515,8 @@ ACPI_STATUS AcpiOsReleaseObject(ACPI_CACHE_T *cache, void *object)

ACPI_STATUS AcpiOsPurgeCache(ACPI_CACHE_T *cache)
{
/* not implemented */
caching_heap ch = (caching_heap)cache;
cache_drain(ch, CACHE_DRAIN_ALL, 0);
return AE_OK;
}

Expand Down
6 changes: 4 additions & 2 deletions src/hyperv/netvsc/hv_net_vsc.h
Original file line number Diff line number Diff line change
Expand Up @@ -958,6 +958,8 @@ typedef struct netvsc_packet_ {
/*
* Device-specific softc structure
*/
declare_closure_struct(0, 1, u64, hn_mem_cleaner,
u64, clean_bytes);
typedef struct hn_softc {
heap general;
heap contiguous; /* physically */
Expand All @@ -968,8 +970,8 @@ typedef struct hn_softc {
/* lwIP */
struct netif *netif;
u16 rxbuflen;
heap rxbuffers;
struct spinlock rx_buflock;
caching_heap rxbuffers;
closure_struct(hn_mem_cleaner, mem_cleaner);
} hn_softc_t;


Expand Down
20 changes: 12 additions & 8 deletions src/hyperv/netvsc/netvsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -53,16 +53,13 @@ static void
receive_buffer_release(struct pbuf *p)
{
xpbuf x = (void *)p;
u64 flags = spin_lock_irq(&x->hn->rx_buflock);
deallocate(x->hn->rxbuffers, x, x->hn->rxbuflen + sizeof(struct xpbuf));
spin_unlock_irq(&x->hn->rx_buflock, flags);
deallocate((heap)x->hn->rxbuffers, x, x->hn->rxbuflen + sizeof(struct xpbuf));
}

static xpbuf
receive_buffer_alloc(hn_softc_t *hn)
{
u64 flags = spin_lock_irq(&hn->rx_buflock);
xpbuf x = allocate(hn->rxbuffers, sizeof(struct xpbuf) + hn->rxbuflen);
xpbuf x = allocate((heap)hn->rxbuffers, sizeof(struct xpbuf) + hn->rxbuflen);
assert(x != INVALID_ADDRESS);
x->hn = hn;
x->p.custom_free_function = receive_buffer_release;
Expand All @@ -73,7 +70,6 @@ receive_buffer_alloc(hn_softc_t *hn)
&x->p,
x+1,
hn->rxbuflen);
spin_unlock_irq(&hn->rx_buflock, flags);
return x;
}

Expand Down Expand Up @@ -222,6 +218,14 @@ low_level_output(struct netif *netif, struct pbuf *p)
return ERR_OK;
}

define_closure_function(0, 1, u64, hn_mem_cleaner,
u64, clean_bytes)
{
hn_softc_t *hn = struct_from_field(closure_self(), hn_softc_t *, mem_cleaner);
return cache_drain(hn->rxbuffers, clean_bytes,
NET_RX_BUFFERS_RETAIN * (sizeof(struct xpbuf) + hn->rxbuflen));
}

static err_t
vmxif_init(struct netif *netif)
{
Expand Down Expand Up @@ -257,8 +261,7 @@ netvsc_attach(kernel_heaps kh, hv_device* device)

hn->rxbuflen = NETVSC_RX_MAXSEGSIZE;
hn->rxbuffers = allocate_objcache(hn->general, hn->contiguous,
hn->rxbuflen + sizeof(struct xpbuf), PAGESIZE_2M);
spin_lock_init(&hn->rx_buflock);
hn->rxbuflen + sizeof(struct xpbuf), PAGESIZE_2M, true);

struct netif *netif = allocate(h, sizeof(struct netif));
assert(netif != INVALID_ADDRESS);
Expand All @@ -280,6 +283,7 @@ netvsc_attach(kernel_heaps kh, hv_device* device)
ethernet_input);
lwip_unlock();

mm_register_mem_cleaner(init_closure(&hn->mem_cleaner, hn_mem_cleaner));
netvsc_debug("%s: hwaddr %02x:%02x:%02x:%02x:%02x:%02x", __func__,
netif->hwaddr[0], netif->hwaddr[1], netif->hwaddr[2],
netif->hwaddr[3], netif->hwaddr[4], netif->hwaddr[5]);
Expand Down
27 changes: 17 additions & 10 deletions src/hyperv/storvsc/storvsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,14 @@ struct hv_storvsc_request {
volatile boolean channel_wait_msg_flag;
};

declare_closure_struct(0, 1, u64, storvsc_mem_cleaner,
u64, clean_bytes);
struct storvsc_softc {
heap general;
heap contiguous; /* physically */

heap hcb_objcache;
struct spinlock mem_lock;
caching_heap hcb_objcache;
closure_struct(storvsc_mem_cleaner, mem_cleaner);

struct list hcb_queue;
struct spinlock queue_lock;
Expand Down Expand Up @@ -643,19 +645,16 @@ static void storvsc_init_requests(struct storvsc_softc *sc)

static void storvsc_hcb_dealloc(struct storvsc_softc *sc, struct storvsc_hcb *hcb)
{
u64 flags = spin_lock_irq(&sc->mem_lock);
if (hcb->alloc_len) {
deallocate(sc->contiguous, hcb->data, pad(hcb->alloc_len, sc->contiguous->pagesize));
}
deallocate(sc->hcb_objcache, hcb, sizeof(struct storvsc_hcb));
spin_unlock_irq(&sc->mem_lock, flags);
deallocate((heap)sc->hcb_objcache, hcb, sizeof(struct storvsc_hcb));
}

static struct storvsc_hcb *storvsc_hcb_alloc(struct storvsc_softc* sc, u16 target, u16 lun, u8 cmd)
{
int alloc_len = scsi_data_len(cmd);
u64 flags = spin_lock_irq(&sc->mem_lock);
struct storvsc_hcb *hcb = allocate_zero(sc->hcb_objcache, sizeof(struct storvsc_hcb));
struct storvsc_hcb *hcb = allocate_zero((heap)sc->hcb_objcache, sizeof(struct storvsc_hcb));
assert(hcb != INVALID_ADDRESS);
if (alloc_len) {
hcb->data = allocate(sc->contiguous, alloc_len);
Expand All @@ -665,7 +664,6 @@ static struct storvsc_hcb *storvsc_hcb_alloc(struct storvsc_softc* sc, u16 targe
hcb->data = 0;
hcb->alloc_len = 0;
}
spin_unlock_irq(&sc->mem_lock, flags);
hcb->cdb[0] = cmd;
return hcb;
}
Expand Down Expand Up @@ -910,6 +908,15 @@ static void storvsc_report_luns(struct storvsc_softc *sc, u16 target)
storvsc_action(sc, r, target, 0);
}

define_closure_function(0, 1, u64, storvsc_mem_cleaner,
u64, clean_bytes)
{
struct storvsc_softc *sc = struct_from_field(closure_self(), struct storvsc_softc *,
mem_cleaner);
return cache_drain(sc->hcb_objcache, clean_bytes,
STORAGE_REQUESTS_RETAIN * sizeof(struct storvsc_hcb));
}

/**
* @brief StorVSC attach function
*
Expand Down Expand Up @@ -943,8 +950,8 @@ static status storvsc_attach(kernel_heaps kh, hv_device* device, storage_attach
spin_lock_init(&sc->queue_lock);
// setup hcb cache
sc->hcb_objcache = allocate_objcache(sc->general, sc->contiguous,
sizeof(struct storvsc_hcb), PAGESIZE_2M);
spin_lock_init(&sc->mem_lock);
sizeof(struct storvsc_hcb), PAGESIZE_2M, true);
mm_register_mem_cleaner(init_closure(&sc->mem_cleaner, storvsc_mem_cleaner));
sc->sa = a;
sc->disks = allocate_vector(h, 1);
spin_lock_init(&sc->disks_lock);
Expand Down
6 changes: 2 additions & 4 deletions src/kernel/pagecache.c
Original file line number Diff line number Diff line change
Expand Up @@ -1652,10 +1652,8 @@ void init_pagecache(heap general, heap contiguous, heap physical, u64 pagesize)
assert(pc->zero_page != INVALID_ADDRESS);

#ifdef KERNEL
pc->completions =
locking_heap_wrapper(general, allocate_objcache(general, contiguous,
sizeof(struct page_completion),
PAGESIZE));
pc->completions = (heap)allocate_objcache(general, contiguous, sizeof(struct page_completion),
PAGESIZE, true);
assert(pc->completions != INVALID_ADDRESS);
spin_lock_init(&pc->state_lock);
spin_lock_init(&pc->global_lock);
Expand Down
4 changes: 2 additions & 2 deletions src/net/netsyscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -2346,8 +2346,8 @@ boolean netsyscall_init(unix_heaps uh, tuple cfg)
else
so_rcvbuf = DEFAULT_SO_RCVBUF;
kernel_heaps kh = (kernel_heaps)uh;
heap socket_cache = locking_heap_wrapper(heap_general(kh), allocate_objcache(heap_general(kh),
(heap)heap_linear_backed(kh), sizeof(struct netsock), PAGESIZE));
caching_heap socket_cache = allocate_objcache(heap_general(kh), (heap)heap_linear_backed(kh),
sizeof(struct netsock), PAGESIZE, true);
if (socket_cache == INVALID_ADDRESS)
return false;
uh->socket_cache = socket_cache;
Expand Down
15 changes: 12 additions & 3 deletions src/runtime/heap/heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,15 @@ typedef struct backed_heap {
#define alloc_map(__bh, __l, __p) ((__bh)->alloc_map(__bh, __l, __p))
#define dealloc_unmap(__bh, __v, __p, __l) ((__bh)->dealloc_unmap(__bh, __v, __p, __l))

typedef struct caching_heap {
struct heap h;
bytes (*drain)(struct caching_heap *ch, bytes len, bytes retain);
} *caching_heap;

#define CACHE_DRAIN_ALL ((bytes)-1)

#define cache_drain(__ch, __l, __r) ((__ch)->drain(__ch, __l, __r))

heap debug_heap(heap m, heap p);
heap mem_debug(heap m, heap p, u64 padsize);
heap mem_debug_objcache(heap meta, heap parent, u64 objsize, u64 pagesize);
Expand All @@ -44,9 +53,9 @@ static inline value heap_management(heap h)
}

heap wrap_freelist(heap meta, heap parent, bytes size);
heap allocate_objcache(heap meta, heap parent, bytes objsize, bytes pagesize);
heap allocate_wrapped_objcache(heap meta, heap parent, bytes objsize, bytes pagesize, heap wrapper);
heap allocate_objcache_preallocated(heap meta, heap parent, bytes objsize, bytes pagesize, u64 prealloc_count, boolean prealloc_only);
caching_heap allocate_objcache(heap meta, heap parent, bytes objsize, bytes pagesize, boolean locking);
caching_heap allocate_wrapped_objcache(heap meta, heap parent, bytes objsize, bytes pagesize, heap wrapper);
caching_heap allocate_objcache_preallocated(heap meta, heap parent, bytes objsize, bytes pagesize, u64 prealloc_count, boolean prealloc_only);
boolean objcache_validate(heap h);
heap objcache_from_object(u64 obj, bytes parent_pagesize);
heap allocate_mcache(heap meta, heap parent, int min_order, int max_order, bytes pagesize);
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/heap/mcache.c
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ heap allocate_mcache(heap meta, heap parent, int min_order, int max_order, bytes
#if defined(MEMDEBUG_MCACHE) || defined(MEMDEBUG_ALL)
heap h = mem_debug_objcache(meta, parent, obj_size, pagesize);
#else
heap h = allocate_objcache(meta, parent, obj_size, pagesize);
caching_heap h = allocate_objcache(meta, parent, obj_size, pagesize, false);
#endif
#ifdef MCACHE_DEBUG
rputs(" - cache size ");
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/heap/mem_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ heap mem_debug_objcache(heap meta, heap parent, u64 objsize, u64 pagesize)
u64 padding = objsize >= PAGESIZE ? PAGESIZE : PAD_MIN;

newsize = objsize + padding * 2;
mdh->parent = allocate_wrapped_objcache(meta, parent, newsize, pagesize, &mdh->h);
mdh->parent = (heap)allocate_wrapped_objcache(meta, parent, newsize, pagesize, &mdh->h);
mdh->h.pagesize = objsize;
mdh->h.alloc = mem_debug_alloc;
mdh->h.dealloc = mem_debug_dealloc;
Expand Down
Loading

0 comments on commit ffd6df3

Please sign in to comment.