Skip to content

Commit

Permalink
1, flush cache if batch writing done
Browse files Browse the repository at this point in the history
2, put empty vec as children when insering a new hash
  • Loading branch information
jackzhhuang committed Nov 29, 2024
1 parent 327649c commit 216d7b4
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 10 deletions.
7 changes: 7 additions & 0 deletions flexidag/src/consensusdb/access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,13 @@ where
Ok(())
}

pub fn flush_cache(&self, data: &[(S::Key, S::Value)]) -> Result<(), StoreError> {
for (key, value) in data {
self.cache.insert(key.clone(), value.clone());
}
Ok(())
}

/// Write directly from an iterator and do not cache any data. NOTE: this action also clears the cache
pub fn write_many_without_cache(
&self,
Expand Down
36 changes: 26 additions & 10 deletions flexidag/src/consensusdb/consensus_relations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,8 @@ impl RelationsStore for DbRelationsStore {
}

let mut parent_to_children = HashMap::new();
parent_to_children.insert(hash, vec![]);

for parent in parents.iter().cloned() {
let mut children = match self.get_children(parent) {
Ok(children) => (*children).clone(),
Expand All @@ -128,10 +130,7 @@ impl RelationsStore for DbRelationsStore {
},
};
children.push(hash);
parent_to_children.insert(
parent.to_vec(),
bcs_ext::to_bytes(&children).map_err(|e| StoreError::EncodeError(e.to_string()))?,
);
parent_to_children.insert(parent, children);
}

let batch = WriteBatchWithColumn {
Expand All @@ -141,25 +140,42 @@ impl RelationsStore for DbRelationsStore {
row_data: WriteBatch::new_with_rows(vec![(
hash.to_vec(),
WriteOp::Value(
bcs_ext::to_bytes(&parents)
.map_err(|e| StoreError::EncodeError(e.to_string()))?,
<Arc<Vec<Hash>> as ValueCodec<RelationParent>>::encode_value(&parents)?,
),
)]),
},
WriteBatchData {
column: CHILDREN_CF.to_string(),
row_data: WriteBatch::new_with_rows(
parent_to_children
.into_iter()
.map(|(key, value)| (key, WriteOp::Value(value)))
.collect(),
.iter()
.map(|(key, value)| {
std::result::Result::Ok((
key.to_vec(),
WriteOp::Value(<Arc<Vec<Hash>> as ValueCodec<
RelationChildren,
>>::encode_value(
&Arc::new(value.clone())
)?),
))
})
.collect::<std::result::Result<Vec<_>, StoreError>>()?,
),
},
],
};
self.db
.write_batch_with_column(batch)
.write_batch_with_column_sync(batch)
.map_err(|e| StoreError::DBIoError(e.to_string()))?;

self.parents_access.flush_cache(&[(hash, parents)])?;
self.children_access.flush_cache(
&parent_to_children
.into_iter()
.map(|(key, value)| (key, BlockHashes::new(value)))
.collect::<Vec<_>>(),
)?;

Ok(())
}
}
Expand Down

0 comments on commit 216d7b4

Please sign in to comment.