Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support for LFSR replacement policy and isolate functionality for cache levels. #20

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 59 additions & 4 deletions cachesim/backend.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ static PyMethodDef cachesim_methods[] = {

#endif

#define LFSR_SEED 1 // Initial state (you can choose any initial state)
#define LFSR_POLY 0x9 // This is a 16-bit LFSR with the polynomial x^3 + 1
long lfsr_state = LFSR_SEED ;

#ifndef NO_PYTHON
static void Cache_dealloc(Cache* self) {
Expand Down Expand Up @@ -175,7 +178,6 @@ static int Cache__inject(Cache* self, cache_entry* entry) {
// Get cacheline id to be replaced according to replacement strategy
int replace_idx;
cache_entry replace_entry;

if(self->replacement_policy_id == 0 || self->replacement_policy_id == 1) {
// FIFO: replace end of queue
// LRU: replace end of queue
Expand Down Expand Up @@ -223,11 +225,20 @@ static int Cache__inject(Cache* self, cache_entry* entry) {
}
}
}
} else { // if(self->replacement_policy_id == 3) {
} else if (self->replacement_policy_id == 3) {
// RR: replace random element
replace_idx = rand() & (self->ways - 1);
replace_entry = self->placement[set_id*self->ways+replace_idx];
} else if (self->replacement_policy_id == 4) {
// LFSR: replace based on LFSR
replace_idx = lfsr_state & (self->ways - 1); // ensures that the random number generated by LFSR falls within the valid range of cache line indices.
replace_entry = self->placement[set_id * self->ways + replace_idx]; // gives the cache line id which has to be replaced.
// Update the LFSR state
lfsr_state = (lfsr_state >> 1) ^ (-(lfsr_state & 1) & LFSR_POLY);
}
// if (replace_entry.dirty == 1){
// printf("%s REPLACED cl_id=%li invalid=%u dirty=%u\n",
// self->name, replace_entry.cl_id, replace_entry.invalid, replace_entry.dirty);}

// Replace other cacheline according to replacement strategy (using placement order as state)
self->placement[set_id*self->ways+replace_idx] = *entry;
Expand Down Expand Up @@ -342,7 +353,7 @@ int Cache__load(Cache* self, addr_range range) {

cache_entry entry = self->placement[set_id*self->ways+location];

if(self->replacement_policy_id == 0 || self->replacement_policy_id == 3) {
if(self->replacement_policy_id == 0 || self->replacement_policy_id == 3 || self->replacement_policy_id == 4) {
// FIFO: nothing to do
// RR: nothing to do
placement_idx = self->ways-1;
Expand Down Expand Up @@ -780,10 +791,53 @@ static PyObject* Cache_contains(Cache* self, PyObject *args, PyObject *kwds) {
Py_RETURN_FALSE;
}


static PyObject* Dirty_line_clid(Cache* self) {
PyObject* dirtyList = PyList_New(0);

if (dirtyList == NULL) {
// Handle error, for example, by raising an exception
PyErr_SetString(PyExc_RuntimeError, "Failed to create a new list");
return NULL;
}

for (long i = 0; i < self->ways * self->sets; i++) {
// PySys_WriteStdout("%u inv=%u dirty=%u\n", i, self->placement[i].invalid, self->placement[i].dirty);
if (self->placement[i].dirty == 1) {
// Append the index of the dirty element to the list
PyObject* indexObj = PyLong_FromLong(i);
if (indexObj == NULL) {
// Handle error, for example, by freeing resources and returning NULL
Py_DECREF(dirtyList);
PyErr_SetString(PyExc_RuntimeError, "Failed to convert index to Python object");
return NULL;
}
if (PyList_Append(dirtyList, indexObj) == -1) {
// Handle error, for example, by freeing resources and returning NULL
Py_DECREF(indexObj);
Py_DECREF(dirtyList);
PyErr_SetString(PyExc_RuntimeError, "Failed to append index to the list");
return NULL;
}
Py_DECREF(indexObj);
}
}

return dirtyList;
}

static PyObject* Cache_force_write_back(Cache* self) {
// PySys_WriteStdout("%s force_write_back\n", self->name);
int* dirtyArray = NULL;
long dirtyCount = 0;
for(long i=0; i<self->ways*self->sets; i++) {
// PySys_WriteStdout("%i inv=%i dirty=%i\n", i, self->placement[i].invalid, self->placement[i].dirty);
// PySys_WriteStdout("%u inv=%u dirty=%u\n", i, self->placement[i].invalid, self->placement[i].dirty);
if (self->placement[i].dirty == 1) {
// Append the dirty element to the array
dirtyCount++;
dirtyArray = realloc(dirtyArray, dirtyCount * sizeof(int));
dirtyArray[dirtyCount - 1] = i; // Store the index of the dirty element
}
// TODO merge with Cache__inject (last section)?
if(self->placement[i].invalid == 0 && self->placement[i].dirty == 1) {
self->EVICT.count++;
Expand Down Expand Up @@ -874,6 +928,7 @@ static PyMethodDef Cache_methods[] = {
{"force_write_back", (PyCFunction)Cache_force_write_back, METH_VARARGS, NULL},
{"reset_stats", (PyCFunction)Cache_reset_stats, METH_VARARGS, NULL},
{"count_invalid_entries", (PyCFunction)Cache_count_invalid_entries, METH_VARARGS, NULL},
{"dirty_lines", (PyCFunction)Dirty_line_clid, METH_VARARGS, NULL},
{"mark_all_invalid", (PyCFunction)Cache_mark_all_invalid, METH_VARARGS, NULL},

/* Sentinel */
Expand Down
27 changes: 19 additions & 8 deletions cachesim/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,13 @@ def reset_stats(self):
for c in self.levels(with_mem=False):
c.reset_stats()

def force_write_back(self):
def force_write_back(self, level=None):
"""Write all pending dirty lines back."""
# force_write_back() is acting recursive by it self, but multiple write-back first level
# caches are imaginable. Better safe than sorry:
for c in self.levels(with_mem=False):
c.force_write_back()
if level in c.name:
return c.force_write_back()

def load(self, addr, length=1):
"""
Expand Down Expand Up @@ -192,16 +193,26 @@ def levels(self, with_mem=True):
if with_mem:
yield self.main_memory

def count_invalid_entries(self):
def count_invalid_entries(self, level):
"""Sum of all invalid entry counts from cache levels."""
return sum([c.count_invalid_entries() for c in self.levels(with_mem=False)])
for c in self.levels(with_mem=False):
if level in c.name:
return c.count_invalid_entries()
# return sum([c.count_invalid_entries() for c in self.levels(with_mem=False)])

def mark_all_invalid(self):
def mark_all_invalid(self, level):
"""Mark all entries invalid and reset stats."""
for c in self.levels(with_mem=False):
c.mark_all_invalid()
if level in c.name:
c.mark_all_invalid()
self.reset_stats()

def dirty_cl_ids(self, level):
for c in self.levels(with_mem=False):
if level in c.name:
return c.dirty_lines()


# def draw_array(self, start, width, height, block=1):
# """Return image representation of cache states."""
# length = (width*height)//block
Expand Down Expand Up @@ -243,7 +254,7 @@ def get_backend(cache):
class Cache(object):
"""Cache level object."""

replacement_policy_enum = {"FIFO": 0, "LRU": 1, "MRU": 2, "RR": 3}
replacement_policy_enum = {"FIFO": 0, "LRU": 1, "MRU": 2, "RR": 3, "LFSR": 4}

def __init__(self, name, sets, ways, cl_size,
replacement_policy="LRU",
Expand All @@ -258,7 +269,7 @@ def __init__(self, name, sets, ways, cl_size,
:param sets: total number of sets, if 1 cache will be full-associative
:param ways: total number of ways, if 1 cache will be direct mapped
:param cl_size: number of bytes that can be addressed individually
:param replacement_policy: FIFO, LRU (default), MRU or RR
:param replacement_policy: FIFO, LRU (default), MRU or RR or LFSR
:param write_back: if true (default), write back will be done on evict.
Otherwise write-through is used
:param write_allocate: if true (default), a load will be issued on a
Expand Down