Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move to mimalloc #361

Merged
merged 8 commits into from
Dec 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

15 changes: 13 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ members = [
"rothschild",
"metrics/perf_monitor",
"metrics/core",
"utils/alloc",
]

[workspace.package]
Expand All @@ -73,6 +74,9 @@ include = [
]

[workspace.dependencies]
mimalloc = { version = "0.1.39", default-features = false, features = [
'override',
] }
# kaspa-testing-integration = { version = "0.13.1", path = "testing/integration" }
kaspa-addresses = { version = "0.13.1", path = "crypto/addresses" }
kaspa-addressmanager = { version = "0.13.1", path = "components/addressmanager" }
Expand Down Expand Up @@ -124,6 +128,8 @@ kaspa-wrpc-proxy = { version = "0.13.1", path = "rpc/wrpc/proxy" }
kaspa-wrpc-server = { version = "0.13.1", path = "rpc/wrpc/server" }
kaspa-wrpc-wasm = { version = "0.13.1", path = "rpc/wrpc/wasm" }
kaspad = { version = "0.13.1", path = "kaspad" }
kaspa-alloc = { version = "0.13.1", path = "utils/alloc" }


# external
aes = "0.8.3"
Expand Down Expand Up @@ -162,7 +168,9 @@ faster-hex = "0.6.1" # TODO "0.8.1" - fails unit tests
fixedstr = { version = "0.5.4", features = ["serde"] }
flate2 = "1.0.28"
futures = { version = "0.3.29" }
futures-util = { version = "0.3.29", default-features = false, features = [ "alloc", ] }
futures-util = { version = "0.3.29", default-features = false, features = [
"alloc",
] }
getrandom = { version = "0.2.10", features = ["js"] }
h2 = "0.3.21"
heapless = "0.7.16"
Expand Down Expand Up @@ -237,7 +245,10 @@ web-sys = "=0.3.64"
xxhash-rust = { version = "0.8.7", features = ["xxh3"] }
zeroize = { version = "1.6.0", default-features = false, features = ["alloc"] }
pin-project-lite = "0.2.13"
tower-http = { version = "0.4.4", features = ["map-response-body", "map-request-body"] }
tower-http = { version = "0.4.4", features = [
"map-response-body",
"map-request-body",
] }
tower = "0.4.7"
hyper = "0.14.27"
# workflow dependencies that are not a part of core libraries
Expand Down
4 changes: 3 additions & 1 deletion kaspad/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ name = "kaspad_lib"
crate-type = ["cdylib", "lib"]

[dependencies]
kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first

kaspa-addresses.workspace = true
kaspa-addressmanager.workspace = true
kaspa-consensus-core.workspace = true
Expand Down Expand Up @@ -50,5 +52,5 @@ tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] }
workflow-log.workspace = true

[features]
heap = ["dhat"]
heap = ["dhat", "kaspa-alloc/heap"]
devnet-prealloc = ["kaspa-consensus/devnet-prealloc"]
3 changes: 3 additions & 0 deletions kaspad/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ extern crate kaspa_hashes;

use std::sync::Arc;

use kaspa_alloc::init_allocator_with_default_settings;
use kaspa_core::{info, signals::Signals};
use kaspa_utils::fd_budget;
use kaspad_lib::{
Expand All @@ -19,6 +20,8 @@ pub fn main() {
#[cfg(feature = "heap")]
let _profiler = dhat::Profiler::builder().file_name("kaspad-heap.json").build();

init_allocator_with_default_settings();

let args = parse_args();

match fd_budget::try_set_fd_limit(DESIRED_DAEMON_SOFT_FD_LIMIT) {
Expand Down
5 changes: 3 additions & 2 deletions simpa/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ include.workspace = true
license.workspace = true

[dependencies]
kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first
kaspa-consensus-core.workspace = true
kaspa-consensus-notify.workspace = true
kaspa-consensus.workspace = true
Expand All @@ -20,7 +21,7 @@ kaspa-utils.workspace = true

async-channel.workspace = true
clap.workspace = true
dhat = {workspace = true, optional = true}
dhat = { workspace = true, optional = true }
futures-util.workspace = true
futures.workspace = true
indexmap.workspace = true
Expand All @@ -34,4 +35,4 @@ secp256k1.workspace = true
tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] }

[features]
heap = ["dhat"]
heap = ["dhat", "kaspa-alloc/heap"]
3 changes: 3 additions & 0 deletions simpa/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use async_channel::unbounded;
use clap::Parser;
use futures::{future::try_join_all, Future};
use itertools::Itertools;
use kaspa_alloc::init_allocator_with_default_settings;
use kaspa_consensus::{
config::ConfigBuilder,
consensus::Consensus,
Expand Down Expand Up @@ -118,6 +119,8 @@ fn main() {
#[cfg(feature = "heap")]
let _profiler = dhat::Profiler::builder().file_name("simpa-heap.json").build();

init_allocator_with_default_settings();

// Get CLI arguments
let args = Args::parse();

Expand Down
1 change: 1 addition & 0 deletions testing/integration/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ include.workspace = true
license.workspace = true

[dependencies]
kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first
kaspa-addresses.workspace = true
kaspa-consensus-core.workspace = true
kaspa-consensus-notify.workspace = true
Expand Down
23 changes: 23 additions & 0 deletions testing/integration/src/consensus_integration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
//!

use async_channel::unbounded;
use kaspa_alloc::init_allocator_with_default_settings;
use kaspa_consensus::config::genesis::GENESIS;
use kaspa_consensus::config::{Config, ConfigBuilder};
use kaspa_consensus::consensus::factory::Factory as ConsensusFactory;
Expand Down Expand Up @@ -173,16 +174,19 @@ fn reachability_stretch_test(use_attack_json: bool) {

#[test]
fn test_attack_json() {
init_allocator_with_default_settings();
reachability_stretch_test(true);
}

#[test]
fn test_noattack_json() {
init_allocator_with_default_settings();
reachability_stretch_test(false);
}

#[tokio::test]
async fn consensus_sanity_test() {
init_allocator_with_default_settings();
let genesis_child: Hash = 2.into();
let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build();
let consensus = TestConsensus::new(&config);
Expand Down Expand Up @@ -232,6 +236,7 @@ struct GhostdagTestBlock {

#[tokio::test]
async fn ghostdag_test() {
init_allocator_with_default_settings();
let mut path_strings: Vec<String> =
common::read_dir("testdata/dags").map(|f| f.unwrap().path().to_str().unwrap().to_owned()).collect();
path_strings.sort();
Expand Down Expand Up @@ -316,6 +321,7 @@ fn strings_to_hashes(strings: &Vec<String>) -> Vec<Hash> {

#[tokio::test]
async fn block_window_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS)
.skip_proof_of_work()
.edit_consensus_params(|p| {
Expand Down Expand Up @@ -385,6 +391,7 @@ async fn block_window_test() {

#[tokio::test]
async fn header_in_isolation_validation_test() {
init_allocator_with_default_settings();
let config = Config::new(MAINNET_PARAMS);
let consensus = TestConsensus::new(&config);
let wait_handles = consensus.init();
Expand Down Expand Up @@ -453,6 +460,7 @@ async fn header_in_isolation_validation_test() {

#[tokio::test]
async fn incest_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build();
let consensus = TestConsensus::new(&config);
let wait_handles = consensus.init();
Expand Down Expand Up @@ -481,6 +489,7 @@ async fn incest_test() {

#[tokio::test]
async fn missing_parents_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build();
let consensus = TestConsensus::new(&config);
let wait_handles = consensus.init();
Expand All @@ -505,6 +514,7 @@ async fn missing_parents_test() {
// as a known invalid.
#[tokio::test]
async fn known_invalid_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build();
let consensus = TestConsensus::new(&config);
let wait_handles = consensus.init();
Expand All @@ -530,6 +540,7 @@ async fn known_invalid_test() {

#[tokio::test]
async fn median_time_test() {
init_allocator_with_default_settings();
struct Test {
name: &'static str,
config: Config,
Expand Down Expand Up @@ -603,6 +614,7 @@ async fn median_time_test() {

#[tokio::test]
async fn mergeset_size_limit_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build();
let consensus = TestConsensus::new(&config);
let wait_handles = consensus.init();
Expand Down Expand Up @@ -828,39 +840,46 @@ impl KaspadGoParams {

#[tokio::test]
async fn goref_custom_pruning_depth_test() {
init_allocator_with_default_settings();
json_test("testdata/dags_for_json_tests/goref_custom_pruning_depth", false).await
}

#[tokio::test]
async fn goref_notx_test() {
init_allocator_with_default_settings();
json_test("testdata/dags_for_json_tests/goref-notx-5000-blocks", false).await
}

#[tokio::test]
async fn goref_notx_concurrent_test() {
init_allocator_with_default_settings();
json_test("testdata/dags_for_json_tests/goref-notx-5000-blocks", true).await
}

#[tokio::test]
async fn goref_tx_small_test() {
init_allocator_with_default_settings();
json_test("testdata/dags_for_json_tests/goref-905-tx-265-blocks", false).await
}

#[tokio::test]
async fn goref_tx_small_concurrent_test() {
init_allocator_with_default_settings();
json_test("testdata/dags_for_json_tests/goref-905-tx-265-blocks", true).await
}

#[ignore]
#[tokio::test]
async fn goref_tx_big_test() {
init_allocator_with_default_settings();
// TODO: add this directory to a data repo and fetch dynamically
json_test("testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks", false).await
}

#[ignore]
#[tokio::test]
async fn goref_tx_big_concurrent_test() {
init_allocator_with_default_settings();
// TODO: add this file to a data repo and fetch dynamically
json_test("testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks", true).await
}
Expand Down Expand Up @@ -1223,6 +1242,7 @@ fn hex_decode(src: &str) -> Vec<u8> {

#[tokio::test]
async fn bounded_merge_depth_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(DEVNET_PARAMS)
.skip_proof_of_work()
.edit_consensus_params(|p| {
Expand Down Expand Up @@ -1302,6 +1322,7 @@ async fn bounded_merge_depth_test() {

#[tokio::test]
async fn difficulty_test() {
init_allocator_with_default_settings();
async fn add_block(consensus: &TestConsensus, block_time: Option<u64>, parents: Vec<Hash>) -> Header {
let selected_parent = consensus.ghostdag_manager().find_selected_parent(parents.iter().copied());
let block_time = block_time.unwrap_or_else(|| {
Expand Down Expand Up @@ -1620,6 +1641,7 @@ async fn difficulty_test() {

#[tokio::test]
async fn selected_chain_test() {
init_allocator_with_default_settings();
kaspa_core::log::try_init_logger("info");

let config = ConfigBuilder::new(MAINNET_PARAMS)
Expand Down Expand Up @@ -1688,6 +1710,7 @@ fn selected_chain_store_iterator(consensus: &TestConsensus, pruning_point: Hash)

#[tokio::test]
async fn staging_consensus_test() {
init_allocator_with_default_settings();
let config = ConfigBuilder::new(MAINNET_PARAMS).build();

let db_tempdir = get_kaspa_tempdir();
Expand Down
Loading