From e854c0a6291b55e71622358402e21c65928e1cbc Mon Sep 17 00:00:00 2001 From: EthanYuan Date: Tue, 28 Nov 2023 22:04:44 +0800 Subject: [PATCH 1/3] CKB Text Optimization. --- block-filter/src/filter.rs | 12 +-- chain/src/chain.rs | 20 ++-- ckb-bin/src/helper.rs | 2 +- ckb-bin/src/lib.rs | 4 +- ckb-bin/src/subcommand/init.rs | 30 +++--- ckb-bin/src/subcommand/migrate.rs | 10 +- ckb-bin/src/subcommand/replay.rs | 16 ++-- ckb-bin/src/subcommand/reset_data.rs | 4 +- db-migration/src/lib.rs | 8 +- error/src/internal.rs | 2 +- freezer/src/freezer.rs | 10 +- miner/src/client.rs | 18 ++-- miner/src/worker/eaglesong_simple.rs | 4 +- network/src/compress.rs | 2 +- network/src/network.rs | 34 +++---- network/src/peer_registry.rs | 2 +- network/src/peer_store/peer_store_db.rs | 4 +- network/src/protocols/discovery/mod.rs | 19 ++-- network/src/protocols/discovery/state.rs | 7 +- network/src/protocols/identify/mod.rs | 10 +- network/src/protocols/mod.rs | 6 +- network/src/protocols/ping.rs | 16 ++-- network/src/services/dns_seeding/mod.rs | 8 +- network/src/services/dump_peer_store.rs | 2 +- network/src/services/protocol_type_checker.rs | 2 +- notify/src/lib.rs | 26 ++--- rpc/src/module/chain.rs | 6 +- rpc/src/module/miner.rs | 4 +- rpc/src/module/pool.rs | 6 +- rpc/src/module/test.rs | 4 +- rpc/src/server.rs | 9 +- script/src/error.rs | 2 +- shared/src/shared.rs | 18 ++-- shared/src/shared_builder.rs | 23 +++-- sync/src/net_time_checker.rs | 8 +- sync/src/relayer/mod.rs | 5 +- sync/src/synchronizer/block_fetcher.rs | 4 +- sync/src/synchronizer/get_blocks_process.rs | 4 +- sync/src/synchronizer/get_headers_process.rs | 2 +- sync/src/synchronizer/headers_process.rs | 16 ++-- sync/src/synchronizer/mod.rs | 47 +++++---- sync/src/types/mod.rs | 12 ++- tx-pool/src/block_assembler/mod.rs | 11 ++- tx-pool/src/pool.rs | 8 +- tx-pool/src/process.rs | 13 ++- tx-pool/src/service.rs | 52 +++++----- tx-pool/src/util.rs | 2 +- util/app-config/src/app_config.rs | 6 +- util/app-config/src/cli.rs | 96 +++++++++---------- util/app-config/src/configs/network.rs | 4 +- util/crypto/src/secp/error.rs | 10 +- util/fixed-hash/core/src/error.rs | 6 +- util/indexer/src/service.rs | 4 +- util/launcher/src/lib.rs | 12 +-- util/light-client-protocol-server/src/lib.rs | 6 +- .../src/tests/utils/network_context.rs | 2 +- util/memory-tracker/src/process.rs | 4 +- util/metrics-service/src/lib.rs | 4 +- util/multisig/src/error.rs | 4 +- util/multisig/src/secp256k1.rs | 2 +- util/network-alert/src/alert_relayer.rs | 10 +- util/network-alert/src/notifier.rs | 2 +- .../src/tests/generate_alert_signature.rs | 4 +- util/network-alert/src/verifier.rs | 2 +- util/runtime/src/lib.rs | 2 +- util/stop-handler/src/stop_register.rs | 18 ++-- util/types/src/core/error.rs | 6 +- util/types/src/core/tx_pool.rs | 10 +- verification/src/error.rs | 2 +- 69 files changed, 388 insertions(+), 362 deletions(-) diff --git a/block-filter/src/filter.rs b/block-filter/src/filter.rs index 56fcc92f6b..3c8a217557 100644 --- a/block-filter/src/filter.rs +++ b/block-filter/src/filter.rs @@ -78,13 +78,13 @@ impl BlockFilter { let tip_header = snapshot.get_tip_header().expect("tip stored"); let start_number = match snapshot.get_latest_built_filter_data_block_hash() { Some(block_hash) => { - debug!("Latest built block hash {:#x}", block_hash); + debug!("Hash of the latest created block {:#x}", block_hash); if snapshot.is_main_chain(&block_hash) { let header = snapshot .get_block_header(&block_hash) .expect("header stored"); debug!( - "Latest built block is main chain, start from {}", + "Latest created block on the main chain, starting from {}", header.number() + 1 ); header.number() + 1 @@ -99,7 +99,7 @@ impl BlockFilter { .expect("parent header stored"); } debug!( - "Latest built filter data block is fork chain, start from {}", + "Block with the latest built filter data on the forked chain, starting from {}", header.number() ); header.number() @@ -126,7 +126,7 @@ impl BlockFilter { let db = self.shared.store(); if db.get_block_filter_hash(&header.hash()).is_some() { debug!( - "Filter data for block {:#x} already exist, skip build", + "Filter data for block {:#x} already exists. Skip building.", header.hash() ); return; @@ -144,8 +144,8 @@ impl BlockFilter { let (filter_data, missing_out_points) = build_filter_data(provider, &transactions); for out_point in missing_out_points { warn!( - "Can't find input cell for out_point: {:#x}, \ - should only happen in test, skip adding to filter", + "Unable to find the input cell for the out_point: {:#x}, \ + Skip adding it to the filter. This should only happen during testing.", out_point ); } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 2f0789b4a4..e6bb6de030 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -345,13 +345,13 @@ impl ChainService { let block_number = block.number(); let block_hash = block.hash(); - debug!("begin processing block: {}-{}", block_number, block_hash); + debug!("Begin processing block: {}-{}", block_number, block_hash); if block_number < 1 { - warn!("receive 0 number block: 0-{}", block_hash); + warn!("Receive 0 number block: 0-{}", block_hash); } self.insert_block(block, switch).map(|ret| { - debug!("finish processing block"); + debug!("Finish processing block"); ret }) } @@ -444,7 +444,7 @@ impl ChainService { let current_total_difficulty = shared_snapshot.total_difficulty().to_owned(); debug!( - "difficulty current = {:#x}, cannon = {:#x}", + "Current difficulty = {:#x}, cannon = {:#x}", current_total_difficulty, cannon_total_difficulty, ); @@ -453,7 +453,7 @@ impl ChainService { if new_best_block { debug!( - "new best block found: {} => {:#x}, difficulty diff = {:#x}", + "Newly found best block : {} => {:#x}, difficulty diff = {:#x}", block.header().number(), block.header().hash(), &cannon_total_difficulty - ¤t_total_difficulty @@ -506,7 +506,7 @@ impl ChainService { fork.detached_proposal_id().clone(), new_snapshot, ) { - error!("notify update_tx_pool_for_reorg error {}", e); + error!("Notify update_tx_pool_for_reorg error {}", e); } } @@ -535,7 +535,7 @@ impl ChainService { if tx_pool_controller.service_started() { let block_ref: &BlockView = █ if let Err(e) = tx_pool_controller.notify_new_uncle(block_ref.as_uncle()) { - error!("notify new_uncle error {}", e); + error!("Notify new_uncle error {}", e); } } } @@ -576,7 +576,7 @@ impl ChainService { let proposal_start = cmp::max(1, (new_tip + 1).saturating_sub(proposal_window.farthest())); - debug!("reload_proposal_table [{}, {}]", proposal_start, common); + debug!("Reload_proposal_table [{}, {}]", proposal_start, common); for bn in proposal_start..=common { let blk = self .shared @@ -930,13 +930,13 @@ impl ChainService { fn print_error(&self, b: &BlockView, err: &Error) { error!( - "block verify error, block number: {}, hash: {}, error: {:?}", + "Block verify error. Block number: {}, hash: {}, error: {:?}", b.header().number(), b.header().hash(), err ); if log_enabled!(ckb_logger::Level::Trace) { - trace!("block {}", b.data()); + trace!("Block {}", b.data()); } } diff --git a/ckb-bin/src/helper.rs b/ckb-bin/src/helper.rs index 21c93732b8..98f1bd5362 100644 --- a/ckb-bin/src/helper.rs +++ b/ckb-bin/src/helper.rs @@ -13,7 +13,7 @@ pub fn deadlock_detection() { use ckb_util::parking_lot::deadlock; use std::{thread, time::Duration}; - info!("deadlock_detection enable"); + info!("deadlock_detection enabled"); let dead_lock_jh = thread::spawn({ let ticker = ckb_channel::tick(Duration::from_secs(10)); let stop_rx = new_crossbeam_exit_rx(); diff --git a/ckb-bin/src/lib.rs b/ckb-bin/src/lib.rs index 81d9376b07..825a95b097 100644 --- a/ckb-bin/src/lib.rs +++ b/ckb-bin/src/lib.rs @@ -80,9 +80,9 @@ pub fn run_app(version: Version) -> Result<(), ExitCode> { handle.drop_guard(); tokio::task::block_in_place(|| { - info!("waiting all tokio tasks exit..."); + info!("Waiting for all tokio tasks to exit..."); handle_stop_rx.blocking_recv(); - info!("all tokio tasks and threads have exited, ckb shutdown"); + info!("All tokio tasks and threads have exited. CKB shutdown"); }); } diff --git a/ckb-bin/src/subcommand/init.rs b/ckb-bin/src/subcommand/init.rs index d2214133ee..2cdcc13b26 100644 --- a/ckb-bin/src/subcommand/init.rs +++ b/ckb-bin/src/subcommand/init.rs @@ -26,13 +26,13 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { } if args.chain != "dev" && !args.customize_spec.is_unset() { - eprintln!("Customizing consensus parameters for chain spec only works for dev chains."); + eprintln!("Customizing consensus parameters for chain spec; only works for dev chains."); return Err(ExitCode::Failure); } let exported = Resource::exported_in(&args.root_dir); if !args.force && exported { - eprintln!("Config files already exist, use --force to overwrite."); + eprintln!("Config files already exist; use --force to overwrite."); if args.interactive { let input = prompt("Overwrite config files now? "); @@ -103,15 +103,15 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { ); } else if *default_code_hash != *hash { eprintln!( - "WARN: the default secp256k1 code hash is `{default_code_hash}`, you are using `{hash}`.\n\ - It will require `ckb run --ba-advanced` to enable this block assembler" + "WARN: Use the default secp256k1 code hash `{default_code_hash}` rather than `{hash}`.\n\ + To enable this block assembler, use `ckb run --ba-advanced`." ); } else if args.block_assembler_args.len() != 1 || args.block_assembler_args[0].len() != SECP256K1_BLAKE160_SIGHASH_ALL_ARG_LEN { eprintln!( - "WARN: the block assembler arg is not a valid secp256k1 pubkey hash.\n\ - It will require `ckb run --ba-advanced` to enable this block assembler" + "WARN: The block assembler arg is not a valid secp256k1 pubkey hash.\n\ + To enable this block assembler, use `ckb run --ba-advanced`. " ); } } @@ -129,7 +129,7 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { ) } None => { - eprintln!("WARN: mining feature is disabled because of lacking the block assembler config options"); + eprintln!("WARN: Mining feature is disabled because of the lack of the block assembler config options."); format!( "# secp256k1_blake160_sighash_all example:\n\ # [block_assembler]\n\ @@ -175,7 +175,7 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { let target_file = specs_dir.join(format!("{}.toml", args.chain)); if spec_file == "-" { - println!("create specs/{}.toml from stdin", args.chain); + println!("Create specs/{}.toml from stdin", args.chain); let mut encoded_content = String::new(); io::stdin().read_to_string(&mut encoded_content)?; let base64_config = @@ -185,11 +185,11 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { let spec_content = base64_engine.encode(encoded_content.trim()); fs::write(target_file, spec_content)?; } else { - println!("cp {} specs/{}.toml", spec_file, args.chain); + println!("copy {} to specs/{}.toml", spec_file, args.chain); fs::copy(spec_file, target_file)?; } } else if args.chain == "dev" { - println!("create {SPEC_DEV_FILE_NAME}"); + println!("Create {SPEC_DEV_FILE_NAME}"); let bundled = Resource::bundled(SPEC_DEV_FILE_NAME.to_string()); let kvs = args.customize_spec.key_value_pairs(); let context_spec = @@ -197,18 +197,20 @@ pub fn init(args: InitArgs) -> Result<(), ExitCode> { bundled.export(&context_spec, &args.root_dir)?; } - println!("create {CKB_CONFIG_FILE_NAME}"); + println!("Create {CKB_CONFIG_FILE_NAME}"); Resource::bundled_ckb_config().export(&context, &args.root_dir)?; - println!("create {MINER_CONFIG_FILE_NAME}"); + println!("Create {MINER_CONFIG_FILE_NAME}"); Resource::bundled_miner_config().export(&context, &args.root_dir)?; - println!("create {DB_OPTIONS_FILE_NAME}"); + println!("Create {DB_OPTIONS_FILE_NAME}"); Resource::bundled_db_options().export(&context, &args.root_dir)?; let genesis_hash = AppConfig::load_for_subcommand(args.root_dir, cli::CMD_INIT)? .chain_spec()? .build_genesis() .map_err(|err| { - eprintln!("couldn't build genesis from generated chain spec, since {err}"); + eprintln!( + "Couldn't build the genesis block from the generated chain spec, since {err}" + ); ExitCode::Failure })? .hash(); diff --git a/ckb-bin/src/subcommand/migrate.rs b/ckb-bin/src/subcommand/migrate.rs index 1dde58ce6f..c62b4dd47a 100644 --- a/ckb-bin/src/subcommand/migrate.rs +++ b/ckb-bin/src/subcommand/migrate.rs @@ -10,7 +10,7 @@ pub fn migrate(args: MigrateArgs) -> Result<(), ExitCode> { { let read_only_db = migrate.open_read_only_db().map_err(|e| { - eprintln!("migrate error {e}"); + eprintln!("Migration error {e}"); ExitCode::Failure })?; @@ -18,8 +18,8 @@ pub fn migrate(args: MigrateArgs) -> Result<(), ExitCode> { let db_status = migrate.check(&db); if matches!(db_status, Ordering::Greater) { eprintln!( - "The database is created by a higher version CKB executable binary, \n\ - so that the current CKB executable binary couldn't open this database.\n\ + "The database was created by a higher version CKB executable binary \n\ + and cannot be opened by the current binary.\n\ Please download the latest CKB executable binary." ); return Err(ExitCode::Failure); @@ -50,7 +50,7 @@ pub fn migrate(args: MigrateArgs) -> Result<(), ExitCode> { > ", ); if input.trim().to_lowercase() != "yes" { - eprintln!("The migration was declined since the user didn't confirm."); + eprintln!("Migration was declined since the user didn't confirm."); return Err(ExitCode::Failure); } } else { @@ -62,7 +62,7 @@ pub fn migrate(args: MigrateArgs) -> Result<(), ExitCode> { } let bulk_load_db_db = migrate.open_bulk_load_db().map_err(|e| { - eprintln!("migrate error {e}"); + eprintln!("Migration error {e}"); ExitCode::Failure })?; diff --git a/ckb-bin/src/subcommand/replay.rs b/ckb-bin/src/subcommand/replay.rs index ea84479cf0..4f524f47db 100644 --- a/ckb-bin/src/subcommand/replay.rs +++ b/ckb-bin/src/subcommand/replay.rs @@ -25,13 +25,13 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { if !args.tmp_target.is_dir() { eprintln!( - "replay error: {:?}", + "Replay error: {:?}", "The specified path does not exist or not directory" ); return Err(ExitCode::Failure); } let tmp_db_dir = tempfile::tempdir_in(args.tmp_target).map_err(|err| { - eprintln!("replay error: {err:?}"); + eprintln!("Replay error: {err:?}"); ExitCode::Failure })?; { @@ -58,7 +58,7 @@ pub fn replay(args: ReplayArgs, async_handle: Handle) -> Result<(), ExitCode> { } } tmp_db_dir.close().map_err(|err| { - eprintln!("replay error: {err:?}"); + eprintln!("Replay error: {err:?}"); ExitCode::Failure })?; @@ -72,7 +72,7 @@ fn profile(shared: Shared, mut chain: ChainService, from: Option, to: Optio .map(|v| std::cmp::min(v, tip_number)) .unwrap_or(tip_number); process_range_block(&shared, &mut chain, 1..from); - println!("start profiling, re-process blocks {from}..{to}:"); + println!("Start profiling; re-process blocks {from}..{to}:"); let now = std::time::Instant::now(); let tx_count = process_range_block(&shared, &mut chain, from..=to); let duration = std::time::Instant::now().saturating_duration_since(now); @@ -136,7 +136,7 @@ fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool let header = block.header(); if let Err(e) = chain.process_block(Arc::new(block), switch) { eprintln!( - "replay sanity-check error: {:?} at block({}-{})", + "Replay sanity-check error: {:?} at block({}-{})", e, header.number(), header.hash(), @@ -152,7 +152,7 @@ fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool if cursor != tip_header { eprintln!( - "sanity-check break at block({}-{}), expect tip({}-{})", + "Sanity-check break at block({}-{}); expect tip({}-{})", cursor.number(), cursor.hash(), tip_header.number(), @@ -160,11 +160,11 @@ fn sanity_check(shared: Shared, mut chain: ChainService, full_verification: bool ); } else { println!( - "sanity-check pass, tip({}-{})", + "Sanity-check pass, tip({}-{})", tip_header.number(), tip_header.hash() ); } - println!("replay finishing, please wait..."); + println!("Finishing replay; please wait..."); } diff --git a/ckb-bin/src/subcommand/reset_data.rs b/ckb-bin/src/subcommand/reset_data.rs index 8a8f614859..ee09812fb8 100644 --- a/ckb-bin/src/subcommand/reset_data.rs +++ b/ckb-bin/src/subcommand/reset_data.rs @@ -50,7 +50,7 @@ pub fn reset_data(args: ResetDataArgs) -> Result<(), ExitCode> { for dir in target_dirs.iter() { if dir.exists() { - println!("deleting {}", dir.display()); + println!("Deleting {}", dir.display()); if let Some(e) = fs::remove_dir_all(dir).err() { eprintln!("{e}"); errors_count += 1; @@ -60,7 +60,7 @@ pub fn reset_data(args: ResetDataArgs) -> Result<(), ExitCode> { for file in target_files.iter() { if file.exists() { - println!("deleting {}", file.display()); + println!("Deleting {}", file.display()); if let Some(e) = fs::remove_file(file).err() { eprintln!("{e}"); errors_count += 1; diff --git a/db-migration/src/lib.rs b/db-migration/src/lib.rs index 1dff404b50..e2243f216f 100644 --- a/db-migration/src/lib.rs +++ b/db-migration/src/lib.rs @@ -62,7 +62,7 @@ impl Migrations { } } }; - debug!("current database version [{}]", db_version); + debug!("Current database version [{}]", db_version); let latest_version = self .migrations @@ -70,7 +70,7 @@ impl Migrations { .last() .unwrap_or_else(|| panic!("should have at least one version")) .version(); - debug!("latest database version [{}]", latest_version); + debug!("Latest database version [{}]", latest_version); db_version.as_str().cmp(latest_version) } @@ -176,8 +176,8 @@ impl Migrations { if m.version() < v.as_str() { error!( "Database downgrade detected. \ - The database schema version is newer than client schema version,\ - please upgrade to the newer version" + The database schema version is more recent than the client schema version.\ + Please upgrade to the latest client version." ); return Err(internal_error( "Database downgrade is not supported".to_string(), diff --git a/error/src/internal.rs b/error/src/internal.rs index aa52fc54ac..71e99cd214 100644 --- a/error/src/internal.rs +++ b/error/src/internal.rs @@ -7,7 +7,7 @@ use thiserror::Error; /// An error with no reason. #[derive(Error, Debug, Clone, Copy)] -#[error("no reason is provided")] +#[error("No reason provided")] pub struct SilentError; /// An error with only a string as the reason. diff --git a/freezer/src/freezer.rs b/freezer/src/freezer.rs index cc8a1db10c..6812e7620a 100644 --- a/freezer/src/freezer.rs +++ b/freezer/src/freezer.rs @@ -91,7 +91,11 @@ impl Freezer { let number = self.number(); let mut guard = self.inner.lock(); let mut ret = BTreeMap::new(); - ckb_logger::trace!("freezer freeze start {} threshold {}", number, threshold); + ckb_logger::trace!( + "Freezer process initiated, starting from {}, threshold {}", + number, + threshold + ); for number in number..threshold { if self.stopped.load(Ordering::SeqCst) { @@ -120,9 +124,9 @@ impl Freezer { (number, block.transactions().len() as u32), ); guard.tip = Some(block.header()); - ckb_logger::trace!("freezer block append {}", number); + ckb_logger::trace!("Freezer block append {}", number); } else { - ckb_logger::error!("freezer block missing {}", number); + ckb_logger::error!("Freezer block missing {}", number); break; } } diff --git a/miner/src/client.rs b/miner/src/client.rs index dda47570a7..1cea7e2ae0 100644 --- a/miner/src/client.rs +++ b/miner/src/client.rs @@ -199,15 +199,15 @@ impl Client { ckb_logger::info!("listen notify mode : {}", addr); ckb_logger::info!( r#" -Please note that ckb-miner runs in notify mode, -and you need to configure the corresponding information in the block assembler of the ckb, -for example +Please note that ckb-miner runs in notify mode. \ +You should configure the corresponding information in CKB block assembler, \ +for example: [block_assembler] ... notify = ["http://{}"] -Otherwise ckb-miner does not work properly and will behave as it stopped committing new valid blocks after a while +Otherwise ckb-miner will malfunction and stop submitting valid blocks after a certain period. "#, addr ); @@ -235,7 +235,7 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ let stop_rx: CancellationToken = new_tokio_exit_rx(); let graceful = server.with_graceful_shutdown(async move { stop_rx.cancelled().await; - info!("Miner client received exit signal, exit now"); + info!("Miner client received exit signal. Exit now"); }); if let Err(e) = graceful.await { @@ -302,10 +302,10 @@ Otherwise ckb-miner does not work properly and will behave as it stopped committ if is_method_not_found { error!( "RPC Method Not Found: \ - please do checks as follow: \ - 1. if the CKB server has enabled the Miner API module; \ - 2. if the CKB server has set `block_assembler`; \ - 3. If the RPC URL for CKB miner is right.", + Please perform the following checks: \ + 1. Ensure that the CKB server has enabled the Miner API module; \ + 2. Verify that the CKB server has set the `block_assembler` correctly; \ + 3. Confirm that the RPC URL for CKB miner is correct.", ); } else { error!("rpc call get_block_template error: {:?}", err); diff --git a/miner/src/worker/eaglesong_simple.rs b/miner/src/worker/eaglesong_simple.rs index 6e097b8538..22e91e70f1 100644 --- a/miner/src/worker/eaglesong_simple.rs +++ b/miner/src/worker/eaglesong_simple.rs @@ -60,7 +60,7 @@ impl EaglesongSimple { } fn solve(&mut self, pow_hash: Byte32, work: Work, nonce: u128) { - debug!("solve, pow_hash {}, nonce {:?}", pow_hash, nonce); + debug!("Solved. pow_hash {}, nonce {:?}", pow_hash, nonce); let input = pow_message(&pow_hash, nonce); let output = { let mut output_tmp = [0u8; 32]; @@ -72,7 +72,7 @@ impl EaglesongSimple { }; if U256::from_big_endian(&output[..]).expect("bound checked") <= self.target { debug!( - "send new found nonce, pow_hash {}, nonce {:?}", + "Send newly found nonce, pow_hash {}, nonce {:?}", pow_hash, nonce ); if let Err(err) = self.nonce_tx.send((pow_hash, work, nonce)) { diff --git a/network/src/compress.rs b/network/src/compress.rs index 6193d1dce7..597ee473a7 100644 --- a/network/src/compress.rs +++ b/network/src/compress.rs @@ -72,7 +72,7 @@ impl Message { Ok(decompressed_bytes_len) => { if decompressed_bytes_len > MAX_UNCOMPRESSED_LEN { debug!( - "the maximum uncompressed bytes len limit is exceeded, limit: {}, len: {}", + "The limit for uncompressed bytes len is exceeded. limit: {}, len: {}", MAX_UNCOMPRESSED_LEN, decompressed_bytes_len ); Err(io::ErrorKind::InvalidData.into()) diff --git a/network/src/network.rs b/network/src/network.rs index 78567b02ee..59eb78a87e 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -113,7 +113,7 @@ impl NetworkState { }) }) .collect(); - info!("loading the peer store, which may take a few seconds to complete"); + info!("Loading the peer store. This process may take a few seconds to complete."); let peer_store = Mutex::new(PeerStore::load_from_dir_or_default( config.peer_store_path(), )); @@ -168,7 +168,7 @@ impl NetworkState { .filter(|peer| !peer.is_whitelist) .map(|peer| peer.connected_addr.clone()) }) { - trace!("report {:?} because {:?}", addr, behaviour); + trace!("Report {:?} because {:?}", addr, behaviour); let report_result = self.peer_store.lock().report(&addr, behaviour); if report_result.is_banned() { if let Err(err) = disconnect_with_message(p2p_control, session_id, "banned") { @@ -177,7 +177,7 @@ impl NetworkState { } } else { debug!( - "Report {} failed: not in peer registry or it is in the whitelist", + "Report {} failure: not found in peer registry; could be on whitelist", session_id ); } @@ -219,7 +219,7 @@ impl NetworkState { } } else { debug!( - "Ban session({}) failed: not in peer registry or it is in the whitelist", + "Ban session({}) failed: not found in peer registry or on the whitelist", session_id ); } @@ -356,7 +356,7 @@ impl NetworkState { if let Some(dial_started) = self.dialing_addrs.read().get(peer_id) { trace!( - "Do not repeat send dial command to network service: {:?}, {}", + "Do not send repeated dial commands to network service: {:?}, {}", peer_id, addr ); @@ -412,7 +412,7 @@ impl NetworkState { return Err(Error::Dial(format!("ignore dialing addr {addr}"))); } - debug!("dialing {addr}"); + debug!("Dialing {addr}"); p2p_control.dial(addr.clone(), target)?; self.dialing_addrs.write().insert( extract_peer_id(&addr).expect("verified addr"), @@ -461,7 +461,7 @@ impl NetworkState { addr.clone(), TargetProtocol::Single(SupportProtocols::Identify.protocol_id()), ) { - trace!("try_dial_observed_addrs fail {err} on public address") + trace!("try_dial_observed_addrs {err} failed in public address") } } } else { @@ -471,7 +471,7 @@ impl NetworkState { addr, TargetProtocol::Single(SupportProtocols::Identify.protocol_id()), ) { - trace!("try_dial_observed_addrs fail {err} on pending observed") + trace!("try_dial_observed_addrs {err} failed in pending observed addresses") } } } @@ -689,14 +689,14 @@ impl ServiceHandle for EventHandler { .with_peer_registry(|reg| reg.is_feeler(&session_context.address)) { debug!( - "feeler connected {} => {}", + "Feeler connected {} => {}", session_context.id, session_context.address, ); } else { match self.network_state.accept_peer(&session_context) { Ok(Some(evicted_peer)) => { debug!( - "evict peer (disconnect it), {} => {}", + "Disconnect peer, {} => {}", evicted_peer.session_id, evicted_peer.connected_addr, ); if let Err(err) = disconnect_with_message( @@ -716,7 +716,7 @@ impl ServiceHandle for EventHandler { ), Err(err) => { debug!( - "registry peer failed {:?} disconnect it, {} => {}", + "Peer registry failed {:?}. Disconnect {} => {}", err, session_context.id, session_context.address, ); if let Err(err) = disconnect_with_message( @@ -745,7 +745,7 @@ impl ServiceHandle for EventHandler { }); if peer_exists { debug!( - "{} closed, remove {} from peer_registry", + "{} closed. Remove {} from peer_registry", session_context.id, session_context.address, ); self.network_state.with_peer_store_mut(|peer_store| { @@ -1044,7 +1044,7 @@ impl NetworkService { // dial whitelist_nodes for addr in self.network_state.config.whitelist_peers() { - debug!("dial whitelist_peers {:?}", addr); + debug!("Dial whitelist_peers {:?}", addr); self.network_state.dial_identify(&p2p_control, addr); } @@ -1073,7 +1073,7 @@ impl NetworkService { // dial half bootnodes for addr in bootnodes { - debug!("dial bootnode {:?}", addr); + debug!("Dial bootnode {:?}", addr); self.network_state.dial_identify(&p2p_control, addr); } @@ -1111,7 +1111,7 @@ impl NetworkService { } Err(err) => { warn!( - "listen on address {} failed, due to error: {}", + "Listen on address {} failed, due to error: {}", addr.clone(), err ); @@ -1333,13 +1333,13 @@ impl NetworkController { } Err(SendErrorKind::WouldBlock) => { if Instant::now().saturating_duration_since(now) > P2P_SEND_TIMEOUT { - warn!("broadcast message to {} timeout", proto_id); + warn!("Broadcast message to {} timeout", proto_id); return Err(SendErrorKind::WouldBlock); } thread::sleep(P2P_TRY_SEND_INTERVAL); } Err(err) => { - warn!("broadcast message to {} failed: {:?}", proto_id, err); + warn!("Broadcast message to {} failed: {:?}", proto_id, err); return Err(err); } } diff --git a/network/src/peer_registry.rs b/network/src/peer_registry.rs index 67432058e7..3ff70efaae 100644 --- a/network/src/peer_registry.rs +++ b/network/src/peer_registry.rs @@ -181,7 +181,7 @@ impl PeerRegistry { // randomly evict a peer let mut rng = thread_rng(); evict_group.choose(&mut rng).map(|peer| { - debug!("evict inbound peer {:?}", peer.connected_addr); + debug!("Disconnect inbound peer {:?}", peer.connected_addr); peer.session_id }) } diff --git a/network/src/peer_store/peer_store_db.rs b/network/src/peer_store/peer_store_db.rs index 17fe197409..a3e71d44ce 100644 --- a/network/src/peer_store/peer_store_db.rs +++ b/network/src/peer_store/peer_store_db.rs @@ -29,7 +29,7 @@ impl AddrManager { /// Dump address list to disk pub fn dump(&self, mut file: File) -> Result<(), Error> { let addrs: Vec<_> = self.addrs_iter().collect(); - debug!("dump {} addrs", addrs.len()); + debug!("Dump {} addrs", addrs.len()); // empty file and dump the json string to it file.set_len(0) .and_then(|_| serde_json::to_string(&addrs).map_err(Into::into)) @@ -54,7 +54,7 @@ impl BanList { /// Dump ban list to disk pub fn dump(&self, mut file: File) -> Result<(), Error> { let banned_addrs = self.get_banned_addrs(); - debug!("dump {} banned addrs", banned_addrs.len()); + debug!("Dump {} banned addrs", banned_addrs.len()); // empty file and dump the json string to it file.set_len(0) .and_then(|_| serde_json::to_string(&banned_addrs).map_err(Into::into)) diff --git a/network/src/protocols/discovery/mod.rs b/network/src/protocols/discovery/mod.rs index a0ffc51e3b..97f931abf1 100644 --- a/network/src/protocols/discovery/mod.rs +++ b/network/src/protocols/discovery/mod.rs @@ -111,7 +111,7 @@ impl ServiceProtocol for DiscoveryProtocol { if let Some(state) = self.sessions.get_mut(&session.id) { if state.received_get_nodes && check(Misbehavior::DuplicateGetNodes) { if context.disconnect(session.id).await.is_err() { - debug!("disconnect {:?} send fail", session.id) + debug!("Disconnect {:?} msg failed to send", session.id) } return; } @@ -173,7 +173,7 @@ impl ServiceProtocol for DiscoveryProtocol { if let Some(misbehavior) = verify_nodes_message(&nodes) { if check(misbehavior) { if context.disconnect(session.id).await.is_err() { - debug!("disconnect {:?} send fail", session.id) + debug!("Disconnect {:?} msg failed to send", session.id) } return; } @@ -181,11 +181,11 @@ impl ServiceProtocol for DiscoveryProtocol { if let Some(state) = self.sessions.get_mut(&session.id) { if !nodes.announce && state.received_nodes { - warn!("already received Nodes(announce=false) message"); + warn!("Nodes (announce=false) message received"); if check(Misbehavior::DuplicateFirstNodes) && context.disconnect(session.id).await.is_err() { - debug!("disconnect {:?} send fail", session.id) + debug!("Disconnect {:?} msg failed to send", session.id) } } else { let addrs = nodes @@ -217,7 +217,7 @@ impl ServiceProtocol for DiscoveryProtocol { .is_disconnect() && context.disconnect(session.id).await.is_err() { - debug!("disconnect {:?} send fail", session.id) + debug!("Disconnect {:?} msg failed to send", session.id) } } } @@ -248,7 +248,7 @@ impl ServiceProtocol for DiscoveryProtocol { for key in keys.iter().take(3) { if let Some(value) = self.sessions.get_mut(key) { trace!( - ">> send {:?} to: {:?}, contains: {}", + ">> send {:?} to: {:?}, containing: {}", announce_multiaddr, value.remote_addr, value.addr_known.contains(&announce_multiaddr) @@ -270,7 +270,10 @@ fn verify_nodes_message(nodes: &Nodes) -> Option { let mut misbehavior = None; if nodes.announce { if nodes.items.len() > ANNOUNCE_THRESHOLD { - warn!("Nodes items more than {}", ANNOUNCE_THRESHOLD); + warn!( + "Number of nodes exceeds announce threshhold {}", + ANNOUNCE_THRESHOLD + ); misbehavior = Some(Misbehavior::TooManyItems { announce: nodes.announce, length: nodes.items.len(), @@ -380,7 +383,7 @@ impl AddressManager for DiscoveryAddressManager { Some((paddr.addr, f)) }) .collect(); - trace!("discovery send random addrs: {:?}", addrs); + trace!("Discovered random addrs: {:?}", addrs); addrs } diff --git a/network/src/protocols/discovery/state.rs b/network/src/protocols/discovery/state.rs index 2d33de8996..cba4d41d14 100644 --- a/network/src/protocols/discovery/state.rs +++ b/network/src/protocols/discovery/state.rs @@ -68,7 +68,10 @@ impl SessionState { }); if context.send_message(msg).await.is_err() { - debug!("{:?} send discovery msg GetNode fail", context.session.id) + debug!( + "{:?} sending discovery msg GetNode failed", + context.session.id + ) } addr_known.insert(&context.session.address); @@ -120,7 +123,7 @@ impl SessionState { items, })); if cx.send_message_to(id, cx.proto_id, msg).await.is_err() { - debug!("{:?} send discovery msg Nodes fail", id) + debug!("{:?} sending discovery msg Nodes failed", id) } } } diff --git a/network/src/protocols/identify/mod.rs b/network/src/protocols/identify/mod.rs index 21dbd29d41..8f2448050d 100644 --- a/network/src/protocols/identify/mod.rs +++ b/network/src/protocols/identify/mod.rs @@ -265,7 +265,7 @@ impl ServiceProtocol for IdentifyProtocol { // Interrupt processing if error, avoid pollution if let MisbehaveResult::Disconnect = self.check_duplicate(&mut context) { error!( - "IdentifyProtocol disconnect session {:?}, reason: duplicate", + "Disconnect IdentifyProtocol session {:?} due to duplication.", session ); let _ = context.disconnect(session.id).await; @@ -277,7 +277,7 @@ impl ServiceProtocol for IdentifyProtocol { .await { error!( - "IdentifyProtocol disconnect session {:?}, reason: invalid identify message", + "Disconnect IdentifyProtocol session {:?} due to invalid identify message.", session, ); let _ = context.disconnect(session.id).await; @@ -287,7 +287,7 @@ impl ServiceProtocol for IdentifyProtocol { self.process_listens(&mut context, message.listen_addrs.clone()) { error!( - "IdentifyProtocol disconnect session {:?}, reason: invalid listen addrs: {:?}", + "Disconnect IdentifyProtocol session {:?} due to invalid listen addrs: {:?}.", session, message.listen_addrs, ); let _ = context.disconnect(session.id).await; @@ -297,7 +297,7 @@ impl ServiceProtocol for IdentifyProtocol { self.process_observed(&mut context, message.observed_addr.clone()) { error!( - "IdentifyProtocol disconnect session {:?}, reason: invalid observed addr: {}", + "Disconnect IdentifyProtocol session {:?} due to invalid observed addr: {}.", session, message.observed_addr, ); let _ = context.disconnect(session.id).await; @@ -489,7 +489,7 @@ impl Callback for IdentifyCallback { .await; } else { // The remote end cannot support all local protocols. - warn!("IdentifyProtocol close session, reason: the peer's flag does not meet the requirement"); + warn!("Session closed from IdentifyProtocol due to peer's flag not meeting the requirements"); return MisbehaveResult::Disconnect; } } diff --git a/network/src/protocols/mod.rs b/network/src/protocols/mod.rs index 5697109d15..f2da17e4a0 100644 --- a/network/src/protocols/mod.rs +++ b/network/src/protocols/mod.rs @@ -294,7 +294,7 @@ impl ServiceProtocol for CKBHandler { && context.proto_id != SupportProtocols::RelayV2.protocol_id() { debug!( - "session {}, protocol {} with version {}, not 3, so disconnect it", + "The version of session {}, protocol {} is {}, not 3. It will be disconnected.", context.session.id, context.proto_id, version ); let id = context.session.id; @@ -508,7 +508,7 @@ impl CKBProtocolContext for DefaultCKBProtocolContext { Ok(()) } async fn async_disconnect(&self, peer_index: PeerIndex, message: &str) -> Result<(), Error> { - debug!("disconnect peer: {}, message: {}", peer_index, message); + debug!("Disconnect peer: {}, message: {}", peer_index, message); async_disconnect_with_message(&self.async_p2p_control, peer_index, message).await?; Ok(()) } @@ -586,7 +586,7 @@ impl CKBProtocolContext for DefaultCKBProtocolContext { Ok(()) } fn disconnect(&self, peer_index: PeerIndex, message: &str) -> Result<(), Error> { - debug!("disconnect peer: {}, message: {}", peer_index, message); + debug!("Disconnect peer: {}, message: {}", peer_index, message); disconnect_with_message(&self.p2p_control, peer_index, message)?; Ok(()) } diff --git a/network/src/protocols/ping.rs b/network/src/protocols/ping.rs index 9160f6d436..984dbf2a15 100644 --- a/network/src/protocols/ping.rs +++ b/network/src/protocols/ping.rs @@ -108,7 +108,7 @@ impl PingHandler { .await .is_err() { - debug!("send message fail"); + debug!("Failed to send message"); } } } @@ -150,14 +150,14 @@ impl ServiceProtocol for PingHandler { .await .is_err() { - warn!("start ping fail"); + warn!("Failed to start ping"); } if context .set_service_notify(proto_id, self.timeout, CHECK_TIMEOUT_TOKEN) .await .is_err() { - warn!("start ping fail"); + warn!("Failed to start ping"); } } @@ -174,7 +174,7 @@ impl ServiceProtocol for PingHandler { "proto id [{}] open on session [{}], address: [{}], type: [{:?}], version: {}", context.proto_id, session.id, session.address, session.ty, version ); - debug!("connected sessions are: {:?}", self.connected_session_ids); + debug!("Connected sessions are: {:?}", self.connected_session_ids); // Register open ping protocol self.network_state.with_peer_registry_mut(|reg| { reg.get_peer_mut(session.id).map(|peer| { @@ -193,7 +193,7 @@ impl ServiceProtocol for PingHandler { }); }); debug!( - "proto id [{}] close on session [{}]", + "Proto id [{}] closed on session [{}]", context.proto_id, session.id ); } @@ -202,7 +202,7 @@ impl ServiceProtocol for PingHandler { let session = context.session; match PingMessage::decode(data.as_ref()) { None => { - error!("decode message error"); + error!("Message decode error"); if let Err(err) = async_disconnect_with_message(context.control(), session.id, "ping failed") .await @@ -219,7 +219,7 @@ impl ServiceProtocol for PingHandler { .await .is_err() { - debug!("send message fail"); + debug!("Failed to send message"); } } PingPayload::Pong(nonce) => { @@ -258,7 +258,7 @@ impl ServiceProtocol for PingHandler { .iter() .filter(|(_id, ps)| ps.processing && ps.elapsed() >= timeout) { - debug!("ping timeout, {:?}", id); + debug!("Ping timeout, {:?}", id); if let Err(err) = async_disconnect_with_message(context.control(), *id, "ping timeout").await { diff --git a/network/src/services/dns_seeding/mod.rs b/network/src/services/dns_seeding/mod.rs index 749b65a308..2b5432cc69 100644 --- a/network/src/services/dns_seeding/mod.rs +++ b/network/src/services/dns_seeding/mod.rs @@ -57,7 +57,7 @@ impl DnsSeedingService { >= 2 }); if enough_outbound { - debug!("Enough outbound peers"); + debug!("Enough outbound peers available"); return Ok(()); } @@ -82,18 +82,18 @@ impl DnsSeedingService { match SeedRecord::decode_with_pubkey(record, &pubkey) { Ok(seed_record) => { let address = seed_record.address(); - trace!("got dns txt address: {}", address); + trace!("Received DNS txt address: {}", address); addrs.push(address); } Err(err) => { debug!( - "decode dns txt record failed: {err:?}, {record:?}" + "DNS txt record decode failed: {err:?}, {record:?}" ); } } } Err(err) => { - debug!("get dns txt record error: {:?}", err); + debug!("DNS txt record retrivial error: {:?}", err); } } } diff --git a/network/src/services/dump_peer_store.rs b/network/src/services/dump_peer_store.rs index 2db8ec8295..735ace3a71 100644 --- a/network/src/services/dump_peer_store.rs +++ b/network/src/services/dump_peer_store.rs @@ -39,7 +39,7 @@ impl DumpPeerStoreService { impl Drop for DumpPeerStoreService { fn drop(&mut self) { - debug!("dump peer store before exit"); + debug!("Dump peer store before exiting"); self.dump_peer_store(); } } diff --git a/network/src/services/protocol_type_checker.rs b/network/src/services/protocol_type_checker.rs index 0b6fda4237..9a469a7d92 100644 --- a/network/src/services/protocol_type_checker.rs +++ b/network/src/services/protocol_type_checker.rs @@ -90,7 +90,7 @@ impl ProtocolTypeCheckerService { // check open protocol type if let Err(err) = self.opened_protocol_type(peer) { debug!( - "close peer {:?} due to open protocols error: {}", + "Close peer {:?} due to open protocols error: {}", peer.connected_addr, err ); if let Err(err) = disconnect_with_message( diff --git a/notify/src/lib.rs b/notify/src/lib.rs index b6a0172a8a..32d09c77e3 100644 --- a/notify/src/lib.rs +++ b/notify/src/lib.rs @@ -225,7 +225,7 @@ impl NotifyService { } fn handle_notify_new_block(&self, block: BlockView) { - trace!("event new block {:?}", block); + trace!("New block event {:?}", block); let block_hash = block.hash(); // notify all subscribers for subscriber in self.new_block_subscribers.values() { @@ -233,7 +233,7 @@ impl NotifyService { let subscriber = subscriber.clone(); self.handle.spawn(async move { if let Err(e) = subscriber.send(block).await { - error!("notify new block error {}", e); + error!("Failed to notify new block, error: {}", e); } }); } @@ -241,7 +241,7 @@ impl NotifyService { // notify all watchers for watcher in self.new_block_watchers.values() { if let Err(e) = watcher.send(block_hash.clone()) { - error!("notify new block watcher error {}", e); + error!("Failed to notify new block watcher, error: {}", e); } } @@ -252,9 +252,9 @@ impl NotifyService { let args = [format!("{block_hash:#x}")]; match timeout(script_timeout, Command::new(&script).args(&args).status()).await { Ok(ret) => match ret { - Ok(status) => debug!("the new_block_notify script exited with: {status}"), + Ok(status) => debug!("The new_block_notify script exited with: {status}"), Err(e) => error!( - "failed to run new_block_notify_script: {} {:?}, error: {}", + "Failed to run new_block_notify_script: {} {:?}, error: {}", script, args[0], e ), }, @@ -279,7 +279,7 @@ impl NotifyService { } fn handle_notify_new_transaction(&self, tx_entry: PoolTransactionEntry) { - trace!("event new tx {:?}", tx_entry); + trace!("New tx event {:?}", tx_entry); // notify all subscribers let tx_timeout = self.timeout.tx; // notify all subscribers @@ -288,7 +288,7 @@ impl NotifyService { let subscriber = subscriber.clone(); self.handle.spawn(async move { if let Err(e) = subscriber.send_timeout(tx_entry, tx_timeout).await { - error!("notify new transaction error {}", e); + error!("Failed to notify new transaction, error: {}", e); } }); } @@ -309,7 +309,7 @@ impl NotifyService { } fn handle_notify_proposed_transaction(&self, tx_entry: PoolTransactionEntry) { - trace!("event proposed tx {:?}", tx_entry); + trace!("Proposed tx event {:?}", tx_entry); // notify all subscribers let tx_timeout = self.timeout.tx; // notify all subscribers @@ -318,7 +318,7 @@ impl NotifyService { let subscriber = subscriber.clone(); self.handle.spawn(async move { if let Err(e) = subscriber.send_timeout(tx_entry, tx_timeout).await { - error!("notify proposed transaction error {}", e); + error!("Failed to notify proposed transaction, error {}", e); } }); } @@ -339,7 +339,7 @@ impl NotifyService { } fn handle_notify_reject_transaction(&self, tx_entry: (PoolTransactionEntry, Reject)) { - trace!("event reject tx {:?}", tx_entry); + trace!("Tx reject event {:?}", tx_entry); // notify all subscribers let tx_timeout = self.timeout.tx; // notify all subscribers @@ -348,7 +348,7 @@ impl NotifyService { let subscriber = subscriber.clone(); self.handle.spawn(async move { if let Err(e) = subscriber.send_timeout(tx_entry, tx_timeout).await { - error!("notify reject transaction error {}", e); + error!("Failed to notify transaction reject, error: {}", e); } }); } @@ -366,7 +366,7 @@ impl NotifyService { } fn handle_notify_network_alert(&self, alert: Alert) { - trace!("event network alert {:?}", alert); + trace!("Network alert event {:?}", alert); let alert_timeout = self.timeout.alert; let message = alert .as_reader() @@ -381,7 +381,7 @@ impl NotifyService { let alert = alert.clone(); self.handle.spawn(async move { if let Err(e) = subscriber.send_timeout(alert, alert_timeout).await { - error!("notify network_alert error {}", e); + error!("Failed to notify network_alert, error: {}", e); } }); } diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 3704976f80..fdbab35d67 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -2147,7 +2147,7 @@ impl ChainRpcImpl { let tx_pool = self.shared.tx_pool_controller(); let tx_status = tx_pool.get_tx_status(tx_hash); if let Err(e) = tx_status { - error!("send get_tx_status request error {}", e); + error!("Send get_tx_status request error {}", e); return Err(RPCError::ckb_internal_error(e)); }; let tx_status = tx_status.unwrap(); @@ -2194,13 +2194,13 @@ impl ChainRpcImpl { let tx_pool = self.shared.tx_pool_controller(); let transaction_with_status = tx_pool.get_transaction_with_status(tx_hash); if let Err(e) = transaction_with_status { - error!("send get_transaction_with_status request error {}", e); + error!("Send get_transaction_with_status request error {}", e); return Err(RPCError::ckb_internal_error(e)); }; let transaction_with_status = transaction_with_status.unwrap(); if let Err(e) = transaction_with_status { - error!("get transaction_with_status from db error {}", e); + error!("Get transaction_with_status from db error {}", e); return Err(RPCError::ckb_internal_error(e)); }; let transaction_with_status = transaction_with_status.unwrap(); diff --git a/rpc/src/module/miner.rs b/rpc/src/module/miner.rs index 5313fc4c34..a334e88180 100644 --- a/rpc/src/module/miner.rs +++ b/rpc/src/module/miner.rs @@ -243,11 +243,11 @@ impl MinerRpc for MinerRpcImpl { self.shared .get_block_template(bytes_limit, proposals_limit, max_version.map(Into::into)) .map_err(|err| { - error!("send get_block_template request error {}", err); + error!("Send get_block_template request error {}", err); RPCError::ckb_internal_error(err) })? .map_err(|err| { - error!("get_block_template result error {}", err); + error!("Get_block_template result error {}", err); RPCError::from_any_error(err) }) } diff --git a/rpc/src/module/pool.rs b/rpc/src/module/pool.rs index 9fb1c0555b..cdad05f947 100644 --- a/rpc/src/module/pool.rs +++ b/rpc/src/module/pool.rs @@ -466,7 +466,7 @@ impl PoolRpc for PoolRpcImpl { let submit_tx = tx_pool.submit_local_tx(tx.clone()); if let Err(e) = submit_tx { - error!("send submit_tx request error {}", e); + error!("Send submit_tx request error {}", e); return Err(RPCError::ckb_internal_error(e)); } @@ -481,7 +481,7 @@ impl PoolRpc for PoolRpcImpl { let tx_pool = self.shared.tx_pool_controller(); tx_pool.remove_local_tx(tx_hash.pack()).map_err(|e| { - error!("send remove_tx request error {}", e); + error!("Send remove_tx request error {}", e); RPCError::ckb_internal_error(e) }) } @@ -490,7 +490,7 @@ impl PoolRpc for PoolRpcImpl { let tx_pool = self.shared.tx_pool_controller(); let get_tx_pool_info = tx_pool.get_tx_pool_info(); if let Err(e) = get_tx_pool_info { - error!("send get_tx_pool_info request error {}", e); + error!("Send get_tx_pool_info request error {}", e); return Err(RPCError::ckb_internal_error(e)); }; diff --git a/rpc/src/module/test.rs b/rpc/src/module/test.rs index e9c563553f..d56c85239f 100644 --- a/rpc/src/module/test.rs +++ b/rpc/src/module/test.rs @@ -605,7 +605,7 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { let tx_pool = self.shared.tx_pool_controller(); let tx_hash = tx.hash(); if let Err(e) = tx_pool.notify_txs(vec![tx]) { - error!("send notify_txs request error {}", e); + error!("Send notify_txs request error {}", e); return Err(RPCError::ckb_internal_error(e)); } Ok(tx_hash.unpack()) @@ -647,7 +647,7 @@ impl IntegrationTestRpc for IntegrationTestRpcImpl { ) .map_err(|err| { error!( - "resolve transactions error when generating block \ + "Resolve transactions error when generating block \ with block template, error: {:?}", err ); diff --git a/rpc/src/server.rs b/rpc/src/server.rs index 44d0cd8328..c778f961c3 100644 --- a/rpc/src/server.rs +++ b/rpc/src/server.rs @@ -47,7 +47,10 @@ impl RpcServer { .expect("config listen_address parsed"), ) .expect("Start Jsonrpc HTTP service"); - info!("Listen HTTP RPCServer on address {}", config.listen_address); + info!( + "Listen HTTP RPC server on address {}", + config.listen_address + ); let _tcp = config .tcp_listen_address @@ -75,7 +78,7 @@ impl RpcServer { .expect("config tcp_listen_address parsed"), ) .expect("Start Jsonrpc TCP service"); - info!("Listen TCP RPCServer on address {}", tcp_listen_address); + info!("Listen TCP RPC server on address {}", tcp_listen_address); tcp_server }); @@ -100,7 +103,7 @@ impl RpcServer { .expect("config ws_listen_address parsed"), ) .expect("Start Jsonrpc WebSocket service"); - info!("Listen WS RPCServer on address {}", ws_listen_address); + info!("Listen WS RPC server on address {}", ws_listen_address); ws_server }); diff --git a/script/src/error.rs b/script/src/error.rs index 74d91014a4..8d09c54b86 100644 --- a/script/src/error.rs +++ b/script/src/error.rs @@ -24,7 +24,7 @@ pub enum ScriptError { MultipleMatches, /// Non-zero exit code returns by script - #[error("ValidationFailure: see the error code {1} in the page https://nervosnetwork.github.io/ckb-script-error-codes/{0}.html#{1}")] + #[error("ValidationFailure: see error code {1} on page https://nervosnetwork.github.io/ckb-script-error-codes/{0}.html#{1}")] ValidationFailure(String, i8), /// Known bugs are detected in transaction script outputs diff --git a/shared/src/shared.rs b/shared/src/shared.rs index 377b941df7..fc3e9fea04 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -83,7 +83,7 @@ impl Shared { /// Spawn freeze background thread that periodically checks and moves ancient data from the kv database into the freezer. pub fn spawn_freeze(&self) -> Option { if let Some(freezer) = self.store.freezer() { - ckb_logger::info!("Freezer enable"); + ckb_logger::info!("Freezer enabled"); let signal_receiver = new_crossbeam_exit_rx(); let shared = self.clone(); let freeze_jh = thread::Builder::new() @@ -123,7 +123,7 @@ impl Shared { } if current_epoch <= THRESHOLD_EPOCH { - ckb_logger::trace!("freezer loaf"); + ckb_logger::trace!("Freezer idles"); return Ok(()); } @@ -143,7 +143,7 @@ impl Shared { ); ckb_logger::trace!( - "freezer current_epoch {} number {} threshold {}", + "Freezer current_epoch {} number {} threshold {}", current_epoch, frozen_number, threshold @@ -163,7 +163,7 @@ impl Shared { // Wipe out frozen data self.wipe_out_frozen_data(&snapshot, ret, stopped)?; - ckb_logger::trace!("freezer finish"); + ckb_logger::trace!("Freezer completed"); Ok(()) } @@ -183,7 +183,7 @@ impl Shared { // remain header for (hash, (number, txs)) in &frozen { batch.delete_block_body(*number, hash, *txs).map_err(|e| { - ckb_logger::error!("freezer delete_block_body failed {}", e); + ckb_logger::error!("Freezer delete_block_body failed {}", e); e })?; @@ -206,7 +206,7 @@ impl Shared { } } self.store.write_sync(&batch).map_err(|e| { - ckb_logger::error!("freezer write_batch delete failed {}", e); + ckb_logger::error!("Freezer write_batch delete failed {}", e); e })?; batch.clear()?; @@ -224,13 +224,13 @@ impl Shared { batch .delete_block(number.unpack(), hash, *txs) .map_err(|e| { - ckb_logger::error!("freezer delete_block_body failed {}", e); + ckb_logger::error!("Freezer delete_block_body failed {}", e); e })?; } self.store.write(&batch).map_err(|e| { - ckb_logger::error!("freezer write_batch delete failed {}", e); + ckb_logger::error!("Freezer write_batch delete failed {}", e); e })?; @@ -259,7 +259,7 @@ impl Shared { Some(start_t.as_slice()), Some(end_t.as_slice()), ) { - ckb_logger::error!("freezer compact_range {}-{} error {}", start, end, e); + ckb_logger::error!("Freezer compact_range {}-{} error {}", start, end, e); } } diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index e8bb9f9773..0e6937eb02 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -55,7 +55,7 @@ pub fn open_or_create_db( let migrate = Migrate::new(&config.path); let read_only_db = migrate.open_read_only_db().map_err(|e| { - eprintln!("migrate error {e}"); + eprintln!("Migration error {e}"); ExitCode::Failure })?; @@ -63,8 +63,8 @@ pub fn open_or_create_db( match migrate.check(&db) { Ordering::Greater => { eprintln!( - "The database is created by a higher version CKB executable binary, \n\ - so that the current CKB executable binary couldn't open this database.\n\ + "The database was created by a higher version CKB executable binary \n\ + and cannot be opened by the current binary.\n\ Please download the latest CKB executable binary." ); Err(ExitCode::Failure) @@ -73,21 +73,20 @@ pub fn open_or_create_db( Ordering::Less => { if migrate.require_expensive(&db) { eprintln!( - "For optimal performance, CKB wants to migrate the data into new format.\n\ - You can use the old version CKB if you don't want to do the migration.\n\ - We strongly recommended you to use the latest stable version of CKB, \ - since the old versions may have unfixed vulnerabilities.\n\ - Run `\"{}\" migrate -C \"{}\"` and confirm by typing \"YES\" to migrate the data.\n\ - We strongly recommend that you backup the data directory before migration.", + "For optimal performance, CKB recommends migrating your data into a new format.\n\ + If you prefer to stick with the older version, \n\ + it's important to note that they may have unfixed vulnerabilities.\n\ + Before migrating, we strongly recommend backuping your data directory. + To migrate, run `\"{}\" migrate -C \"{}\"` and confirm by typing \"YES\".", bin_name, root_dir.display() ); Err(ExitCode::Failure) } else { - info!("process fast migrations ..."); + info!("Processing fast migrations ..."); let bulk_load_db_db = migrate.open_bulk_load_db().map_err(|e| { - eprintln!("migrate error {e}"); + eprintln!("Migration error {e}"); ExitCode::Failure })?; @@ -105,7 +104,7 @@ pub fn open_or_create_db( } else { let db = RocksDB::open(config, COLUMNS); migrate.init_db_version(&db).map_err(|e| { - eprintln!("migrate init_db_version error {e}"); + eprintln!("Migrate init_db_version error {e}"); ExitCode::Failure })?; Ok(db) diff --git a/sync/src/net_time_checker.rs b/sync/src/net_time_checker.rs index 3d306f4102..259a2eae57 100644 --- a/sync/src/net_time_checker.rs +++ b/sync/src/net_time_checker.rs @@ -131,7 +131,7 @@ impl CKBProtocolHandler for NetTimeProtocol { ) { if let Some(true) = nc.get_peer(peer_index).map(|peer| peer.is_inbound()) { info!( - "Peer {} is not outbound but sends us time message", + "Received a time message from a non-outbound peer {}", peer_index ); } @@ -142,7 +142,7 @@ impl CKBProtocolHandler for NetTimeProtocol { { Some(timestamp) => timestamp, None => { - info!("Peer {} sends us malformed message", peer_index); + info!("Received a malformed message from peer {}", peer_index); nc.ban_peer( peer_index, BAD_MESSAGE_BAN_TIME, @@ -155,10 +155,10 @@ impl CKBProtocolHandler for NetTimeProtocol { let now: u64 = ckb_systemtime::unix_time_as_millis(); let offset: i64 = (i128::from(now) - i128::from(timestamp)) as i64; let mut net_time_checker = self.checker.write(); - debug!("new net time offset sample {}ms", offset); + debug!("New net time offset sample {}ms", offset); net_time_checker.add_sample(offset); if let Err(offset) = net_time_checker.check() { - warn!("Please check your computer's local clock({}ms offset from network peers), If your clock is wrong, it may cause unexpected errors.", offset); + warn!("Please check your computer's local clock ({}ms offset from network peers). Incorrect time setting may cause unexpected errors.", offset); } } } diff --git a/sync/src/relayer/mod.rs b/sync/src/relayer/mod.rs index 591d4ec477..ac801db511 100644 --- a/sync/src/relayer/mod.rs +++ b/sync/src/relayer/mod.rs @@ -667,7 +667,10 @@ impl Relayer { let message = packed::RelayMessage::new_builder().set(content).build(); let status = send_message_to(nc, peer, &message); if !status.is_ok() { - ckb_logger::error!("break asking for transactions, status: {:?}", status); + ckb_logger::error!( + "interrupted request for transactions, status: {:?}", + status + ); } } } diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 2920db6446..e880716d6d 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -80,7 +80,7 @@ impl BlockFetcher { pub fn fetch(self) -> Option>> { if self.reached_inflight_limit() { trace!( - "[block_fetcher] inflight count reach limit, can't download any more from peer {}", + "[block_fetcher] inflight count has reached the limit, preventing further downloads from peer {}", self.peer ); return None; @@ -110,7 +110,7 @@ impl BlockFetcher { Some(t) => t, None => { debug!( - "peer {} doesn't have best known header, ignore it", + "Peer {} doesn't have best known header; ignore it", self.peer ); return None; diff --git a/sync/src/synchronizer/get_blocks_process.rs b/sync/src/synchronizer/get_blocks_process.rs index 3a5912fe1e..b9670d5f85 100644 --- a/sync/src/synchronizer/get_blocks_process.rs +++ b/sync/src/synchronizer/get_blocks_process.rs @@ -59,7 +59,7 @@ impl<'a> GetBlocksProcess<'a> { if !active_chain.contains_block_status(&block_hash, BlockStatus::BLOCK_VALID) { debug!( - "ignoring get_block {} request from peer={} for unverified", + "Ignoring get_block {} request from peer={} as it is not unverified.", block_hash, self.peer ); continue; @@ -83,7 +83,7 @@ impl<'a> GetBlocksProcess<'a> { // We expect that `block_hashes` is sorted descending by height. // So if we cannot find the current one from local, we cannot find // the next either. - debug!("getblocks stopping since {} is not found", block_hash); + debug!("Stopping getblocks, since {} is not found", block_hash); break; } } diff --git a/sync/src/synchronizer/get_headers_process.rs b/sync/src/synchronizer/get_headers_process.rs index eb10de59bb..3b4b44cf12 100644 --- a/sync/src/synchronizer/get_headers_process.rs +++ b/sync/src/synchronizer/get_headers_process.rs @@ -51,7 +51,7 @@ impl<'a> GetHeadersProcess<'a> { if active_chain.is_initial_block_download() { info!( - "Ignoring getheaders from peer={} because node is in initial block download", + "Ignoring getheaders from peer={} because the node is in initial block download stage.", self.peer ); self.send_in_ibd(); diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index 4126a526a2..1cb5d7e19f 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -91,7 +91,7 @@ impl<'a> HeadersProcess<'a> { } pub fn execute(self) -> Status { - debug!("HeadersProcess begin"); + debug!("HeadersProcess begins"); let shared: &SyncShared = self.synchronizer.shared(); let consensus = shared.consensus(); let headers = self @@ -103,7 +103,7 @@ impl<'a> HeadersProcess<'a> { .collect::>(); if headers.len() > MAX_HEADERS_LEN { - warn!("HeadersProcess is oversize"); + warn!("HeadersProcess is oversized"); return StatusCode::HeadersIsInvalid.with_context("oversize"); } @@ -166,7 +166,7 @@ impl<'a> HeadersProcess<'a> { } ValidationState::TemporaryInvalid => { debug!( - "HeadersProcess accept result is temporary invalid, header = {:?}", + "HeadersProcess accept result is temporarily invalid, header = {:?}", header ); return Status::ok(); @@ -201,7 +201,7 @@ impl<'a> HeadersProcess<'a> { && headers.len() != MAX_HEADERS_LEN && (!peer_flags.is_protect && !peer_flags.is_whitelist && peer_flags.is_outbound) { - debug!("Disconnect peer({}) is unprotected outbound", self.peer); + debug!("Disconnect an unprotected outbound peer ({})", self.peer); if let Err(err) = self .nc .disconnect(self.peer, "useless outbound peer in IBD") @@ -250,7 +250,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { pub fn non_contextual_check(&self, state: &mut ValidationResult) -> Result<(), bool> { self.verifier.verify(self.header).map_err(|error| { debug!( - "HeadersProcess accept {:?} error {:?}", + "HeadersProcess accepted {:?} error {:?}", self.header.number(), error ); @@ -303,7 +303,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { if self.prev_block_check(&mut result).is_err() { debug!( - "HeadersProcess reject invalid-parent header: {} {}", + "HeadersProcess rejected invalid-parent header: {} {}", self.header.number(), self.header.hash(), ); @@ -313,7 +313,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { if let Some(is_invalid) = self.non_contextual_check(&mut result).err() { debug!( - "HeadersProcess reject non-contextual header: {} {}", + "HeadersProcess rejected non-contextual header: {} {}", self.header.number(), self.header.hash(), ); @@ -325,7 +325,7 @@ impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { if self.version_check(&mut result).is_err() { debug!( - "HeadersProcess reject invalid-version header {} {}", + "HeadersProcess rejected invalid-version header: {} {}", self.header.number(), self.header.hash(), ); diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 817acfb549..8974e10ea2 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -103,9 +103,8 @@ impl BlockFetchCMD { if number != self.number && (number - self.number) % 10000 == 0 { self.number = number; info!( - "best known header number: {}, total difficulty: {:#x}, \ - require min header number on 500_000, min total difficulty: {:#x}, \ - then start to download block", + "The current best known header number: {}, total difficulty: {:#x}. \ + Block download minimum requirements: header number: 500_000, total difficulty: {:#x}.", number, best_known.total_difficulty(), self.sync_shared.state().min_chain_work() @@ -126,8 +125,8 @@ impl BlockFetchCMD { self.number = number; info!( "best known header number: {}, hash: {:#?}, \ - can't find assume valid target temporarily, hash: {:#?} \ - please wait", + temporarily can't find assume valid target, hash: {:#?} \ + Please wait", number, best_known.hash(), assume_valid_target @@ -221,7 +220,7 @@ impl BlockFetchCMD { SupportProtocols::Sync.protocol_id(), message.as_bytes(), ) { - debug!("synchronizer send GetBlocks error: {:?}", err); + debug!("synchronizer sending GetBlocks error: {:?}", err); } } } @@ -298,14 +297,14 @@ impl Synchronizer { if let Some(ban_time) = status.should_ban() { error!( - "receive {} from {}, ban {:?} for {}", + "Receive {} from {}. Ban {:?} for {}", item_name, peer, ban_time, status ); nc.ban_peer(peer, ban_time, status.to_string()); } else if status.should_warn() { - warn!("receive {} from {}, {}", item_name, peer, status); + warn!("Receive {} from {}, {}", item_name, peer, status); } else if !status.is_ok() { - debug!("receive {} from {}, {}", item_name, peer, status); + debug!("Receive {} from {}, {}", item_name, peer, status); } } @@ -339,7 +338,7 @@ impl Synchronizer { // NOTE: Filtering `BLOCK_STORED` but not `BLOCK_RECEIVED`, is for avoiding // stopping synchronization even when orphan_pool maintains dirty items by bugs. if status.contains(BlockStatus::BLOCK_STORED) { - debug!("block {} already stored", block_hash); + debug!("Block {} already stored", block_hash); Ok(false) } else if status.contains(BlockStatus::HEADER_VALID) { self.shared.insert_new_block(&self.chain, Arc::new(block)) @@ -484,7 +483,7 @@ impl Synchronizer { } } for peer in eviction { - info!("timeout eviction peer={}", peer); + info!("Timeout eviction peer={}", peer); if let Err(err) = nc.disconnect(peer, "sync timeout eviction") { debug!("synchronizer disconnect error: {:?}", err); } @@ -532,7 +531,7 @@ impl Synchronizer { } } - debug!("start sync peer={}", peer); + debug!("Start sync peer={}", peer); active_chain.send_getheaders_to_peer(nc, peer, tip.number_and_hash()); } } @@ -542,7 +541,7 @@ impl Synchronizer { ibd: IBDState, disconnect_list: &HashSet, ) -> Vec { - trace!("poll find_blocks_to_fetch select peers"); + trace!("Poll find_blocks_to_fetch selecting peers"); let state = &self .shared .state() @@ -721,8 +720,8 @@ impl CKBProtocolHandler for Synchronizer { if let packed::SyncMessageUnionReader::SendBlock(ref reader) = item { if reader.has_extra_fields() || reader.block().count_extra_fields() > 1 { info!( - "Peer {} sends us a malformed message: \ - too many fields in SendBlock", + "A malformed message from peer {}: \ + excessive fields detected in SendBlock", peer_index ); nc.ban_peer( @@ -742,8 +741,8 @@ impl CKBProtocolHandler for Synchronizer { Ok(msg) => msg.to_enum(), _ => { info!( - "Peer {} sends us a malformed message: \ - too many fields", + "A malformed message from peer {}: \ + excessive fields", peer_index ); nc.ban_peer( @@ -760,7 +759,7 @@ impl CKBProtocolHandler for Synchronizer { } } _ => { - info!("Peer {} sends us a malformed message", peer_index); + info!("A malformed message from peer {}", peer_index); nc.ban_peer( peer_index, BAD_MESSAGE_BAN_TIME, @@ -770,7 +769,7 @@ impl CKBProtocolHandler for Synchronizer { } }; - debug!("received msg {} from {}", msg.item_name(), peer_index); + debug!("Received msg {} from {}", msg.item_name(), peer_index); #[cfg(feature = "with_sentry")] { let sentry_hub = sentry::Hub::current(); @@ -784,7 +783,7 @@ impl CKBProtocolHandler for Synchronizer { let start_time = Instant::now(); tokio::task::block_in_place(|| self.process(nc.as_ref(), peer_index, msg)); debug!( - "process message={}, peer={}, cost={:?}", + "Process message={}, peer={}, cost={:?}", msg.item_name(), peer_index, Instant::now().saturating_duration_since(start_time), @@ -813,7 +812,7 @@ impl CKBProtocolHandler for Synchronizer { async fn notify(&mut self, nc: Arc, token: u64) { if !self.peers().state.is_empty() { let start_time = Instant::now(); - trace!("start notify token={}", token); + trace!("Start notify token={}", token); match token { SEND_GET_HEADERS_TOKEN => { self.start_sync_headers(nc.as_ref()); @@ -827,7 +826,7 @@ impl CKBProtocolHandler for Synchronizer { } self.shared.state().peers().clear_unknown_list(); if nc.remove_notify(IBD_BLOCK_FETCH_TOKEN).await.is_err() { - trace!("remove ibd block fetch fail"); + trace!("Ibd block fetch token removal failed"); } } } @@ -844,12 +843,12 @@ impl CKBProtocolHandler for Synchronizer { } trace!( - "finished notify token={} cost={:?}", + "Finished notify token={} cost={:?}", token, Instant::now().saturating_duration_since(start_time) ); } else if token == NO_PEER_CHECK_TOKEN { - debug!("no peers connected"); + debug!("No peers connected"); } } } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 3171878242..67ab26c1a7 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -204,7 +204,9 @@ impl HeadersSyncController { self.last_updated_tip_ts = now_tip_ts; self.is_close_to_the_end = false; // if the node is behind the estimated tip header too much, sync again; - trace!("headers-sync: send GetHeaders again since we behind the tip too much"); + trace!( + "headers-sync: send GetHeaders again since we are significantly behind the tip" + ); None } else { // ignore timeout because the tip already almost reach the real time; @@ -213,7 +215,7 @@ impl HeadersSyncController { } } else if expected_before_finished < inspect_window { self.is_close_to_the_end = true; - trace!("headers-sync: ignore timeout because the tip almost reach the real time"); + trace!("headers-sync: ignore timeout because the tip almost reaches the real time"); Some(false) } else { let spent_since_last_updated = now.saturating_sub(self.last_updated_ts); @@ -249,7 +251,7 @@ impl HeadersSyncController { // the global average speed is too slow trace!( "headers-sync: both the global average speed and the instantaneous speed \ - is slow than expected" + are slower than expected" ); Some(true) } else { @@ -1414,7 +1416,7 @@ impl SyncShared { if self.is_stored(&hash) { let descendants = self.state.remove_orphan_by_parent(&hash); debug!( - "try accepting {} descendant orphan blocks by exist parents hash", + "attempting to accept {} descendant orphan blocks with existing parents hash", descendants.len() ); for block in descendants { @@ -2298,7 +2300,7 @@ impl ActiveChain { { if Instant::now() < *last_time + GET_HEADERS_TIMEOUT { debug!( - "last send get headers from {} less than {:?} ago, ignore it", + "Last get_headers request to peer {} is less than {:?}; Ignore it.", peer, GET_HEADERS_TIMEOUT, ); return; diff --git a/tx-pool/src/block_assembler/mod.rs b/tx-pool/src/block_assembler/mod.rs index 121e79072e..a21319fc5c 100644 --- a/tx-pool/src/block_assembler/mod.rs +++ b/tx-pool/src/block_assembler/mod.rs @@ -602,7 +602,7 @@ impl BlockAssembler { .check(&mut seen_inputs, &overlay_cell_checker, snapshot) { error!( - "resolve transactions when build block template, \ + "Resolving transactions while building block template, \ tip_number: {}, tip_hash: {}, tx_hash: {}, error: {:?}", tip_header.number(), tip_header.hash(), @@ -649,7 +649,10 @@ impl BlockAssembler { timeout(notify_timeout, client.request(req)) .await .map_err(|_| { - ckb_logger::warn!("block assembler notify {} timed out", url); + ckb_logger::warn!( + "block assembler notifying {} timed out", + url + ); }); }); } @@ -676,7 +679,9 @@ impl BlockAssembler { Ok(status) => debug!("the command exited with: {}", status), Err(e) => error!("the script {} failed to spawn {}", script, e), }, - Err(_) => ckb_logger::warn!("block assembler notify {} timed out", script), + Err(_) => { + ckb_logger::warn!("block assembler notifying {} timed out", script) + } } }); } diff --git a/tx-pool/src/pool.rs b/tx-pool/src/pool.rs index 732cf996af..d51be1682e 100644 --- a/tx-pool/src/pool.rs +++ b/tx-pool/src/pool.rs @@ -131,14 +131,14 @@ impl TxPool { pub fn update_statics_for_remove_tx(&mut self, tx_size: usize, cycles: Cycle) { let total_tx_size = self.total_tx_size.checked_sub(tx_size).unwrap_or_else(|| { error!( - "total_tx_size {} overflow by sub {}", + "total_tx_size {} overflown by sub {}", self.total_tx_size, tx_size ); 0 }); let total_tx_cycles = self.total_tx_cycles.checked_sub(cycles).unwrap_or_else(|| { error!( - "total_tx_cycles {} overflow by sub {}", + "total_tx_cycles {} overflown by sub {}", self.total_tx_cycles, cycles ); 0 @@ -279,7 +279,7 @@ impl TxPool { for entry in removed { let tx_hash = entry.transaction().hash(); debug!( - "removed by size limit {} timestamp({})", + "Removed by size limit {} timestamp({})", tx_hash, entry.timestamp ); let reject = Reject::Full(format!( @@ -671,7 +671,7 @@ impl TxPool { Ok(recent_reject) => Some(recent_reject), Err(err) => { error!( - "Failed to open recent reject database {:?} {}", + "Failed to open the recent reject database {:?} {}", config.recent_reject, err ); None diff --git a/tx-pool/src/process.rs b/tx-pool/src/process.rs index 6e04b9dcfe..32ec43ece8 100644 --- a/tx-pool/src/process.rs +++ b/tx-pool/src/process.rs @@ -110,7 +110,7 @@ impl TxPoolService { let tip_hash = snapshot.tip_hash(); if pre_resolve_tip != tip_hash { debug!( - "submit_entry {} context changed previous:{} now:{}", + "submit_entry {} context changed. previous:{} now:{}", entry.proposal_short_id(), pre_resolve_tip, tip_hash @@ -338,7 +338,10 @@ impl TxPoolService { let mut tx_pool = self.tx_pool.write().await; if let Some(ref mut recent_reject) = tx_pool.recent_reject { if let Err(e) = recent_reject.put(tx_hash, reject.clone()) { - error!("record recent_reject failed {} {} {}", tx_hash, reject, e); + error!( + "Failed to record recent_reject {} {} {}", + tx_hash, reject, e + ); } } } @@ -508,7 +511,7 @@ impl TxPoolService { for orphan in orphans.into_iter() { if orphan.cycle > self.tx_pool_config.max_tx_verify_cycles { debug!( - "process_orphan {} add to chunk, find previous from {}", + "process_orphan {} added to chunk; find previous from {}", orphan.tx.hash(), tx.hash(), ); @@ -1004,7 +1007,7 @@ impl TxPoolService { if let Err(err) = tx_pool.save_into_file() { error!("failed to save pool, error: {:?}", err) } else { - info!("TxPool save successfully") + info!("TxPool saved successfully") } } @@ -1025,7 +1028,7 @@ impl TxPoolService { } } if count != 0 { - info!("{}/{} transactions are failed to process", count, total); + info!("{}/{} transaction process failed.", count, total); } } diff --git a/tx-pool/src/service.rs b/tx-pool/src/service.rs index 033615a2a7..69e183437a 100644 --- a/tx-pool/src/service.rs +++ b/tx-pool/src/service.rs @@ -333,7 +333,7 @@ impl TxPoolController { /// Load persisted txs into pool, assume that all txs are sorted fn load_persisted_data(&self, txs: Vec) -> Result<(), AnyError> { if !txs.is_empty() { - info!("Loading persisted tx-pool data, total {} txs", txs.len()); + info!("Loading persistent tx-pool data, total {} txs", txs.len()); let mut failed_txs = 0; for tx in txs { if self.submit_local_tx(tx)?.is_err() { @@ -341,10 +341,10 @@ impl TxPoolController { } } if failed_txs == 0 { - info!("Persisted tx-pool data is loaded"); + info!("Persistent tx-pool data is loaded"); } else { info!( - "Persisted tx-pool data is loaded, {} stale txs are ignored", + "Persistent tx-pool data is loaded, {} stale txs are ignored", failed_txs ); } @@ -471,7 +471,7 @@ impl TxPoolServiceBuilder { Ok(txs) => txs, Err(e) => { error!("{}", e.to_string()); - error!("Failed to load txs from tx-pool persisted data file, all txs are ignored"); + error!("Failed to load txs from tx-pool persistent data file, all txs are ignored"); Vec::new() } }; @@ -533,8 +533,8 @@ impl TxPoolServiceBuilder { // block_assembler.update_interval_millis set zero interval should only be used for tests, // external notification will be disabled. ckb_logger::warn!( - "block_assembler.update_interval_millis set zero interval should only be used for tests, \ - external notification will be disabled." + "block_assembler.update_interval_millis set to zero interval. \ + This should only be used for tests, as external notification will be disabled." ); self.handle.spawn(async move { loop { @@ -626,7 +626,7 @@ impl TxPoolServiceBuilder { }); self.started.store(true, Ordering::Relaxed); if let Err(err) = self.tx_pool_controller.load_persisted_data(txs) { - error!("Failed to import persisted txs, cause: {}", err); + error!("Failed to import persistent txs, cause: {}", err); } } } @@ -672,7 +672,7 @@ async fn process(mut service: TxPoolService, message: Message) { Message::GetTxPoolInfo(Request { responder, .. }) => { let info = service.info().await; if let Err(e) = responder.send(info) { - error!("responder send get_tx_pool_info failed {:?}", e); + error!("Responder sending get_tx_pool_info failed {:?}", e); }; } Message::BlockTemplate(Request { @@ -681,7 +681,7 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let block_template_result = service.get_block_template().await; if let Err(e) = responder.send(block_template_result) { - error!("responder send block_template_result failed {:?}", e); + error!("Responder sending block_template_result failed {:?}", e); }; } Message::SubmitLocalTx(Request { @@ -690,7 +690,7 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let result = service.resumeble_process_tx(tx, None).await; if let Err(e) = responder.send(result) { - error!("responder send submit_tx result failed {:?}", e); + error!("Responder sending submit_tx result failed {:?}", e); }; } Message::RemoveLocalTx(Request { @@ -699,7 +699,7 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { let result = service.remove_tx(tx_hash).await; if let Err(e) = responder.send(result) { - error!("responder send remove_tx result failed {:?}", e); + error!("Responder sending remove_tx result failed {:?}", e); }; } Message::SubmitRemoteTx(Request { @@ -711,12 +711,12 @@ async fn process(mut service: TxPoolService, message: Message) { .resumeble_process_tx(tx, Some((declared_cycles, peer))) .await; if let Err(e) = responder.send(()) { - error!("responder send submit_tx result failed {:?}", e); + error!("Responder sending submit_tx result failed {:?}", e); }; } else { let _result = service.process_tx(tx, Some((declared_cycles, peer))).await; if let Err(e) = responder.send(()) { - error!("responder send submit_tx result failed {:?}", e); + error!("Responder sending submit_tx result failed {:?}", e); }; } } @@ -732,7 +732,7 @@ async fn process(mut service: TxPoolService, message: Message) { let tx_pool = service.tx_pool.read().await; proposals.retain(|id| !tx_pool.contains_proposal_id(id)); if let Err(e) = responder.send(proposals) { - error!("responder send fresh_proposals_filter failed {:?}", e); + error!("Responder sending fresh_proposals_filter failed {:?}", e); }; } Message::GetTxStatus(Request { @@ -769,7 +769,7 @@ async fn process(mut service: TxPoolService, message: Message) { }; if let Err(e) = responder.send(ret) { - error!("responder send get_tx_status failed {:?}", e) + error!("Responder sending get_tx_status failed {:?}", e) }; } Message::GetTransactionWithStatus(Request { @@ -808,7 +808,7 @@ async fn process(mut service: TxPoolService, message: Message) { }; if let Err(e) = responder.send(ret) { - error!("responder send get_tx_status failed {:?}", e) + error!("Responder sending get_tx_status failed {:?}", e) }; } Message::FetchTxs(Request { @@ -825,7 +825,7 @@ async fn process(mut service: TxPoolService, message: Message) { }) .collect(); if let Err(e) = responder.send(txs) { - error!("responder send fetch_txs failed {:?}", e); + error!("Responder sending fetch_txs failed {:?}", e); }; } Message::FetchTxsWithCycles(Request { @@ -842,7 +842,7 @@ async fn process(mut service: TxPoolService, message: Message) { }) .collect(); if let Err(e) = responder.send(txs) { - error!("responder send fetch_txs_with_cycles failed {:?}", e); + error!("Responder sending fetch_txs_with_cycles failed {:?}", e); }; } Message::NewUncle(Notify { arguments: uncle }) => { @@ -854,7 +854,7 @@ async fn process(mut service: TxPoolService, message: Message) { }) => { service.clear_pool(new_snapshot).await; if let Err(e) = responder.send(()) { - error!("responder send clear_pool failed {:?}", e) + error!("Responder sending clear_pool failed {:?}", e) }; } Message::GetPoolTxDetails(Request { @@ -874,20 +874,20 @@ async fn process(mut service: TxPoolService, message: Message) { let tx_pool = service.tx_pool.read().await; let info = tx_pool.get_all_entry_info(); if let Err(e) = responder.send(info) { - error!("responder send get_all_entry_info failed {:?}", e) + error!("Responder sending get_all_entry_info failed {:?}", e) }; } Message::GetAllIds(Request { responder, .. }) => { let tx_pool = service.tx_pool.read().await; let ids = tx_pool.get_ids(); if let Err(e) = responder.send(ids) { - error!("responder send get_ids failed {:?}", e) + error!("Responder sending get_ids failed {:?}", e) }; } Message::SavePool(Request { responder, .. }) => { service.save_pool().await; if let Err(e) = responder.send(()) { - error!("responder send save_pool failed {:?}", e) + error!("Responder sending save_pool failed {:?}", e) }; } #[cfg(feature = "internal")] @@ -898,7 +898,7 @@ async fn process(mut service: TxPoolService, message: Message) { service.plug_entry(entries, target).await; if let Err(e) = responder.send(()) { - error!("responder send plug_entry failed {:?}", e); + error!("Responder sending plug_entry failed {:?}", e); }; } #[cfg(feature = "internal")] @@ -914,7 +914,7 @@ async fn process(mut service: TxPoolService, message: Message) { bytes_limit.unwrap_or(max_block_bytes) as usize, ); if let Err(e) = responder.send(txs) { - error!("responder send plug_entry failed {:?}", e); + error!("Responder sending plug_entry failed {:?}", e); }; } } @@ -999,14 +999,14 @@ impl TxPoolService { PlugTarget::Pending => { for entry in entries { if let Err(err) = tx_pool.add_pending(entry) { - error!("plug entry add_pending error {}", err); + error!("Plug entry add_pending error {}", err); } } } PlugTarget::Proposed => { for entry in entries { if let Err(err) = tx_pool.add_proposed(entry) { - error!("plug entry add_proposed error {}", err); + error!("Plug entry add_proposed error {}", err); } } } diff --git a/tx-pool/src/util.rs b/tx-pool/src/util.rs index ea8757e997..c94eab5b25 100644 --- a/tx-pool/src/util.rs +++ b/tx-pool/src/util.rs @@ -47,7 +47,7 @@ pub(crate) fn check_tx_fee( if fee < min_fee { let reject = Reject::LowFeeRate(tx_pool.config.min_fee_rate, min_fee.as_u64(), fee.as_u64()); - ckb_logger::debug!("reject tx {}", reject); + ckb_logger::debug!("Reject tx {}", reject); return Err(reject); } Ok(fee) diff --git a/util/app-config/src/app_config.rs b/util/app-config/src/app_config.rs index 476ed1a4e4..963bd7ef18 100644 --- a/util/app-config/src/app_config.rs +++ b/util/app-config/src/app_config.rs @@ -220,7 +220,7 @@ impl AppConfig { match self { AppConfig::CKB(config) => Ok(config), _ => { - eprintln!("unmatched config file"); + eprintln!("Unmatched config file"); Err(ExitCode::Failure) } } @@ -233,7 +233,7 @@ impl AppConfig { match self { AppConfig::Miner(config) => Ok(config), _ => { - eprintln!("unmatched config file"); + eprintln!("Unmatched config file"); Err(ExitCode::Failure) } } @@ -373,7 +373,7 @@ fn ensure_ckb_dir(r: Resource) -> Result { if r.exists() { Ok(r) } else { - eprintln!("Not a CKB directory, initialize one with `ckb init`."); + eprintln!("Not a CKB directory; initialize one with `ckb init`."); Err(ExitCode::Config) } } diff --git a/util/app-config/src/cli.rs b/util/app-config/src/cli.rs index 2c4044f7f7..06bc647076 100644 --- a/util/app-config/src/cli.rs +++ b/util/app-config/src/cli.rs @@ -130,7 +130,7 @@ pub fn basic_app() -> Command { .value_name("path") .action(clap::ArgAction::Set) .help( - "Runs as if ckb was started in instead of the current working directory.", + "Run as if CKB was started in , instead of the current working directory.", ), ) .subcommand(run()) @@ -162,38 +162,38 @@ pub fn get_bin_name_and_matches(version: &Version) -> (String, ArgMatches) { fn run() -> Command { Command::new(CMD_RUN) - .about("Runs ckb node") + .about("Run CKB node") .arg( Arg::new(ARG_BA_ADVANCED) .long(ARG_BA_ADVANCED) .action(clap::ArgAction::SetTrue) - .help("Allows any block assembler code hash and args"), + .help("Allow any block assembler code hash and args"), ) .arg( Arg::new(ARG_SKIP_CHAIN_SPEC_CHECK) .long(ARG_SKIP_CHAIN_SPEC_CHECK) .action(clap::ArgAction::SetTrue) - .help("Skips checking the chain spec with the hash stored in the database"), + .help("Skip checking the chain spec with the hash stored in the database"), ).arg( Arg::new(ARG_OVERWRITE_CHAIN_SPEC) .long(ARG_OVERWRITE_CHAIN_SPEC) .action(clap::ArgAction::SetTrue) - .help("Overwrites the chain spec in the database with the present configured chain spec") + .help("Overwrite the chain spec in the database with the present configured chain spec") ).arg( Arg::new(ARG_ASSUME_VALID_TARGET) .long(ARG_ASSUME_VALID_TARGET) .action(clap::ArgAction::Set) .value_parser(is_h256) .help("This parameter specifies the hash of a block. \ - When the height does not reach this block's height, the execution of the script will be disabled, \ - that is, skip verifying the script content. \ - \n\nIt should be noted that when this option is enabled, the header is first synchronized to \ - the highest currently found. During this period, if the assume valid target is found, \ - the download of the block starts; If the assume valid target is not found or it's \ - timestamp within 24 hours of the current time, the target will automatically become invalid, \ - and the download of the block will be started with verify") - ) - .arg( + When the current height does not reach this block's height, script execution will be disabled, \ + meaning it will skip the verification of the script content. \ + \ + Please note that when this option is enabled, the header will be synchronized to \ + the highest block currently found. During this period, if the assume valid target is found, \ + the block download starts; \ + if the assume valid target is either absent or has a timestamp within 24 hours of the current time, \ + the target is considered invalid, and the block download proceeds with verification.") + ).arg( Arg::new(ARG_INDEXER) .long(ARG_INDEXER) .action(clap::ArgAction::SetTrue) @@ -202,7 +202,7 @@ fn run() -> Command { } fn miner() -> Command { - Command::new(CMD_MINER).about("Runs ckb miner").arg( + Command::new(CMD_MINER).about("Run CKB miner").arg( Arg::new(ARG_LIMIT) .short('l') .long(ARG_LIMIT) @@ -210,7 +210,7 @@ fn miner() -> Command { .value_parser(clap::value_parser!(u128)) .default_value("0") .help( - "Exit after how many nonces found; \ + "Exit after finding this specific number of nonces; \ 0 means the miner will never exit. [default: 0]", ), ) @@ -271,7 +271,7 @@ fn reset_data() -> Command { pub(crate) fn stats() -> Command { Command::new(CMD_STATS) .about( - "Statics chain information\n\ + "Chain stats\n\ Example:\n\ ckb -C stats --from 1 --to 500", ) @@ -280,26 +280,26 @@ pub(crate) fn stats() -> Command { .long(ARG_FROM) .value_parser(clap::value_parser!(u64)) .action(clap::ArgAction::Set) - .help("Specifies from block number."), + .help("Specify from block number"), ) .arg( Arg::new(ARG_TO) .long(ARG_TO) .value_parser(clap::value_parser!(u64)) .action(clap::ArgAction::Set) - .help("Specifies to block number."), + .help("Specify to block number"), ) } fn replay() -> Command { Command::new(CMD_REPLAY) - .about("replay ckb process block") + .about("Replay CKB process block") .override_help(" --tmp-target --profile 1 10,\n --tmp-target --sanity-check,\n ") .arg(Arg::new(ARG_TMP_TARGET).long(ARG_TMP_TARGET).value_parser(clap::builder::PathBufValueParser::new()).action(clap::ArgAction::Set).required(true).help( - "Specifies a target path, prof command make a temporary directory inside of target and the directory will be automatically deleted when finished", + "Specify a target path. The profile command makes a temporary directory within the specified target path. This temporary directory will be automatically deleted when the command completes.", )) .arg(Arg::new(ARG_PROFILE).long(ARG_PROFILE).action(clap::ArgAction::SetTrue).help( "Enable profile", @@ -307,12 +307,12 @@ fn replay() -> Command { .arg( Arg::new(ARG_FROM) .value_parser(clap::value_parser!(u64)) - .help("Specifies profile from block number."), + .help("Specify profile from block number"), ) .arg( Arg::new(ARG_TO) .value_parser(clap::value_parser!(u64)) - .help("Specifies profile to block number."), + .help("Specify profile to block number"), ) .arg( Arg::new(ARG_SANITY_CHECK).long(ARG_SANITY_CHECK).action(clap::ArgAction::SetTrue).help("Enable sanity check") @@ -328,38 +328,38 @@ fn replay() -> Command { } fn export() -> Command { - Command::new(CMD_EXPORT).about("Exports ckb data").arg( + Command::new(CMD_EXPORT).about("Export CKB data").arg( Arg::new(ARG_TARGET) .short('t') .long(ARG_TARGET) .value_name("path") .value_parser(clap::builder::PathBufValueParser::new()) .required(true) - .help("Specifies the export target path."), + .help("Specify the export target path"), ) } fn import() -> Command { - Command::new(CMD_IMPORT).about("Imports ckb data").arg( + Command::new(CMD_IMPORT).about("Import CKB data").arg( Arg::new(ARG_SOURCE) .index(1) .value_name("path") .value_parser(clap::builder::PathBufValueParser::new()) .required(true) - .help("Specifies the exported data path."), + .help("Specify the exported data path"), ) } fn migrate() -> Command { Command::new(CMD_MIGRATE) - .about("Runs ckb migration") + .about("Run CKB migration") .arg( Arg::new(ARG_MIGRATE_CHECK) .long(ARG_MIGRATE_CHECK) .action(clap::ArgAction::SetTrue) .help( - "Perform database version check without migrating, \ - if migration is in need ExitCode(0) is returned,\ + "Perform database version check without migrating. \ + If migration is in need, ExitCode(0) is returned; \ otherwise ExitCode(64) is returned", ), ) @@ -368,20 +368,20 @@ fn migrate() -> Command { .long(ARG_FORCE) .action(clap::ArgAction::SetTrue) .conflicts_with(ARG_MIGRATE_CHECK) - .help("Do migration without interactive prompt"), + .help("Migrate without interactive prompt"), ) } fn list_hashes() -> Command { Command::new(CMD_LIST_HASHES) - .about("Lists well known hashes") + .about("List well known hashes") .arg( Arg::new(ARG_BUNDLED) .short('b') .long(ARG_BUNDLED) .action(clap::ArgAction::SetTrue) .help( - "Lists hashes of the bundled chain specs instead of the current effective one.", + "List hashes of the bundled chain specs, instead of the current effective ones.", ), ) .arg( @@ -390,13 +390,13 @@ fn list_hashes() -> Command { .long(ARG_FORMAT) .value_parser(["json", "toml"]) .default_value("toml") - .help("Set the format of the printed hashes."), + .help("Set the format of the printed hashes"), ) } fn init() -> Command { Command::new(CMD_INIT) - .about("Creates a CKB directory or re-initializes an existing one") + .about("Create a CKB directory or re-initialize an existing one") .arg( Arg::new(ARG_INTERACTIVE) .short('i') @@ -409,7 +409,7 @@ fn init() -> Command { .short('l') .long(ARG_LIST_CHAINS) .action(clap::ArgAction::SetTrue) - .help("Lists available options for --chain"), + .help("List available options for --chain"), ) .arg( Arg::new(ARG_CHAIN) @@ -422,14 +422,14 @@ fn init() -> Command { .collect::>(), ) .default_value(DEFAULT_SPEC) - .help("Initializes CKB directory for "), + .help("Initialize CKB directory for "), ) .arg( Arg::new(ARG_IMPORT_SPEC) .long(ARG_IMPORT_SPEC) .action(clap::ArgAction::Set) .help( - "Uses the specifies file as chain spec. Specially, \ + "Use the specified file as the chain spec. Specially, \ The dash \"-\" denotes importing the spec from stdin encoded in base64", ), ) @@ -438,26 +438,26 @@ fn init() -> Command { .long(ARG_LOG_TO) .value_parser(["file", "stdout", "both"]) .default_value("both") - .help("Configures where the logs should print"), + .help("Configure where the logs should be printed"), ) .arg( Arg::new(ARG_FORCE) .short('f') .long(ARG_FORCE) .action(clap::ArgAction::SetTrue) - .help("Forces overwriting existing files"), + .help("Enforce overwriting existing files"), ) .arg( Arg::new(ARG_RPC_PORT) .long(ARG_RPC_PORT) .default_value(DEFAULT_RPC_PORT) - .help("Replaces CKB RPC port in the created config file"), + .help("Replace CKB RPC port in the created config file"), ) .arg( Arg::new(ARG_P2P_PORT) .long(ARG_P2P_PORT) .default_value(DEFAULT_P2P_PORT) - .help("Replaces CKB P2P port in the created config file"), + .help("Replace CKB P2P port in the created config file"), ) .arg( Arg::new(ARG_BA_CODE_HASH) @@ -466,7 +466,7 @@ fn init() -> Command { .value_parser(is_h256) .action(clap::ArgAction::Set) .help( - "Sets code_hash in [block_assembler] \ + "Set code_hash in [block_assembler] \ [default: secp256k1 if --ba-arg is present]", ), ) @@ -476,7 +476,7 @@ fn init() -> Command { .value_name("arg") .action(clap::ArgAction::Append) .value_parser(is_hex) - .help("Sets args in [block_assembler]"), + .help("Set args in [block_assembler]"), ) .arg( Arg::new(ARG_BA_HASH_TYPE) @@ -485,7 +485,7 @@ fn init() -> Command { .action(clap::ArgAction::Set) .value_parser(["data", "type", "data1"]) .default_value("type") - .help("Sets hash type in [block_assembler]"), + .help("Set hash type in [block_assembler]"), ) .group( ArgGroup::new(GROUP_BA) @@ -498,7 +498,7 @@ fn init() -> Command { .value_name("message") .value_parser(is_hex) .requires(GROUP_BA) - .help("Sets message in [block_assembler]"), + .help("Set message in [block_assembler]"), ) .arg(Arg::new("export-specs").long("export-specs").hide(true)) .arg(Arg::new("list-specs").long("list-specs").hide(true)) @@ -516,8 +516,8 @@ fn init() -> Command { .action(clap::ArgAction::Set) .help( "Specify a string as the genesis message. \ - Only works for dev chains. \ - If no message is provided, use current timestamp.", + This only works for dev chains. \ + If no message is provided, use the current timestamp.", ), ) } diff --git a/util/app-config/src/configs/network.rs b/util/app-config/src/configs/network.rs index d7bf9585c2..42106c6601 100644 --- a/util/app-config/src/configs/network.rs +++ b/util/app-config/src/configs/network.rs @@ -206,8 +206,8 @@ pub(crate) fn read_secret_key(path: PathBuf) -> Result { if block.parent_hash() == tip_hash { - info!("append {}, {}", block.number(), block.hash()); + info!("Append {}, {}", block.number(), block.hash()); indexer.append(&block).expect("append block should be OK"); } else { - info!("rollback {}, {}", tip_number, tip_hash); + info!("Rollback {}, {}", tip_number, tip_hash); indexer.rollback().expect("rollback block should be OK"); } } diff --git a/util/launcher/src/lib.rs b/util/launcher/src/lib.rs index 3c14a139df..c6ff62f5d7 100644 --- a/util/launcher/src/lib.rs +++ b/util/launcher/src/lib.rs @@ -99,8 +99,8 @@ impl Launcher { Some(block_assembler) } else { info!( - "Miner is disabled because block assembler is not a recommended lock format. \ - Edit ckb.toml or use `ckb run --ba-advanced` to use other lock scripts" + "Miner is disabled because block assembler uses a non-recommended lock format. \ + Edit ckb.toml or use `ckb run --ba-advanced` for other lock scripts" ); None @@ -108,7 +108,7 @@ impl Launcher { } _ => { - info!("Miner is disabled, edit ckb.toml to enable it"); + info!("Miner is disabled; edit ckb.toml to enable it"); None } @@ -160,9 +160,9 @@ impl Launcher { } else { // stored != configured eprintln!( - "chain_spec_hash mismatch Config({}) storage({}), pass command line argument \ - --skip-spec-check if you are sure that the two different chains are compatible; \ - or pass --overwrite-spec to force overriding stored chain spec with configured chain spec", + "chain_spec_hash mismatch: Config({}), storage({}). \ + If the two chains are compatible, pass command line argument --skip-spec-check; \ + otherwise, pass --overwrite-spec to enforce overriding the stored chain spec with the configured one.", self.args.chain_spec_hash, stored_spec_hash.expect("checked") ); return Err(ExitCode::Config); diff --git a/util/light-client-protocol-server/src/lib.rs b/util/light-client-protocol-server/src/lib.rs index c93b3c8021..d2e6bd938e 100644 --- a/util/light-client-protocol-server/src/lib.rs +++ b/util/light-client-protocol-server/src/lib.rs @@ -80,14 +80,14 @@ impl CKBProtocolHandler for LightClientProtocol { let status = self.try_process(nc.as_ref(), peer, msg); if let Some(ban_time) = status.should_ban() { error!( - "process {} from {}, ban {:?} since result is {}", + "process {} from {}; ban {:?} since result is {}", item_name, peer, ban_time, status ); nc.ban_peer(peer, ban_time, status.to_string()); } else if status.should_warn() { - warn!("process {} from {}, result is {}", item_name, peer, status); + warn!("process {} from {}; result is {}", item_name, peer, status); } else if !status.is_ok() { - debug!("process {} from {}, result is {}", item_name, peer, status); + debug!("process {} from {}; result is {}", item_name, peer, status); } } } diff --git a/util/light-client-protocol-server/src/tests/utils/network_context.rs b/util/light-client-protocol-server/src/tests/utils/network_context.rs index 721c5dba80..c84b84d117 100644 --- a/util/light-client-protocol-server/src/tests/utils/network_context.rs +++ b/util/light-client-protocol-server/src/tests/utils/network_context.rs @@ -62,7 +62,7 @@ impl MockNetworkContext { pub(crate) fn not_banned(&self, target: PeerIndex) -> bool { self.has_banned(target) .map(|(_, reason)| { - eprintln!("banned reason is {reason}"); + eprintln!("Banned due to {reason}"); false }) .unwrap_or(true) diff --git a/util/memory-tracker/src/process.rs b/util/memory-tracker/src/process.rs index dee8f4f0db..0a3ad8d392 100644 --- a/util/memory-tracker/src/process.rs +++ b/util/memory-tracker/src/process.rs @@ -58,7 +58,7 @@ pub fn track_current_process Result<(), String> { let make_svc = make_service_fn(move |_conn| async move { Ok::<_, Infallible>(service_fn(start_prometheus_service)) }); - ckb_logger::info!("start prometheus exporter at {}", addr); + ckb_logger::info!("Start prometheus exporter at {}", addr); handle.spawn(async move { let server = Server::bind(&addr) .serve(make_svc) .with_graceful_shutdown(async { let exit_rx: CancellationToken = new_tokio_exit_rx(); exit_rx.cancelled().await; - info!("prometheus server received exit signal, exit now"); + info!("Prometheus server received exit signal; exit now"); }); if let Err(err) = server.await { ckb_logger::error!("prometheus server error: {}", err); diff --git a/util/multisig/src/error.rs b/util/multisig/src/error.rs index 2f0a744044..d414aedd03 100644 --- a/util/multisig/src/error.rs +++ b/util/multisig/src/error.rs @@ -6,10 +6,10 @@ use ckb_error::{def_error_base_on_kind, prelude::*}; #[derive(Error, Copy, Clone, Eq, PartialEq, Debug)] pub enum ErrorKind { /// The count of signatures should be less than the count of private keys. - #[error("The count of sigs should less than pks.")] + #[error("The count of sigs should be less than privkeys.")] SigCountOverflow, /// The count of signatures is less than the threshold. - #[error("The count of sigs less than threshold.")] + #[error("The count of sigs is less than threshold.")] SigNotEnough, /// The verified signatures count is less than the threshold. #[error("Failed to meet threshold {threshold}, actual: {pass_sigs}.")] diff --git a/util/multisig/src/secp256k1.rs b/util/multisig/src/secp256k1.rs index d37200939f..b728307542 100644 --- a/util/multisig/src/secp256k1.rs +++ b/util/multisig/src/secp256k1.rs @@ -29,7 +29,7 @@ where .iter() .filter_map(|sig| { trace!( - "recover sig {:x?} with message {:x?}", + "Recover sig {:x?} with message {:x?}", &sig.serialize()[..], message.as_ref() ); diff --git a/util/network-alert/src/alert_relayer.rs b/util/network-alert/src/alert_relayer.rs index c8220ccddb..e4b8f506c8 100644 --- a/util/network-alert/src/alert_relayer.rs +++ b/util/network-alert/src/alert_relayer.rs @@ -87,7 +87,7 @@ impl CKBProtocolHandler for AlertRelayer { self.clear_expired_alerts(); for alert in self.notifier.lock().received_alerts() { let alert_id: u32 = alert.as_reader().raw().id().unpack(); - trace!("send alert {} to peer {}", alert_id, peer_index); + trace!("Send alert {} to peer {}", alert_id, peer_index); if let Err(err) = nc.quick_send_message_to(peer_index, alert.as_bytes()) { debug!("alert_relayer send alert when connected error: {:?}", err); } @@ -120,7 +120,7 @@ impl CKBProtocolHandler for AlertRelayer { alert.to_entity() } else { info!( - "Peer {} sends us malformed message: not utf-8 string", + "A malformed message fromP peer {} : not utf-8 string", peer_index ); nc.ban_peer( @@ -132,7 +132,7 @@ impl CKBProtocolHandler for AlertRelayer { } } Err(err) => { - info!("Peer {} sends us malformed message: {:?}", peer_index, err); + info!("A malformed message from peer {}: {:?}", peer_index, err); nc.ban_peer( peer_index, BAD_MESSAGE_BAN_TIME, @@ -142,7 +142,7 @@ impl CKBProtocolHandler for AlertRelayer { } }; let alert_id = alert.as_reader().raw().id().unpack(); - trace!("receive alert {} from peer {}", alert_id, peer_index); + trace!("ReceiveD alert {} from peer {}", alert_id, peer_index); // ignore alert if self.notifier.lock().has_received(alert_id) { return; @@ -150,7 +150,7 @@ impl CKBProtocolHandler for AlertRelayer { // verify if let Err(err) = self.verifier.verify_signatures(&alert) { debug!( - "Peer {} sends us an alert with invalid signatures, error {:?}", + "An alert from peer {} with invalid signatures, error {:?}", peer_index, err ); nc.ban_peer( diff --git a/util/network-alert/src/notifier.rs b/util/network-alert/src/notifier.rs index dedab7598b..363d5b45e3 100644 --- a/util/network-alert/src/notifier.rs +++ b/util/network-alert/src/notifier.rs @@ -105,7 +105,7 @@ impl Notifier { // check conditions, figure out do we need to notice this alert if !self.is_version_effective(alert) { - debug!("received a version ineffective alert {:?}", alert); + debug!("Received a version ineffective alert {:?}", alert); return; } diff --git a/util/network-alert/src/tests/generate_alert_signature.rs b/util/network-alert/src/tests/generate_alert_signature.rs index 601a047791..7dd04f6762 100644 --- a/util/network-alert/src/tests/generate_alert_signature.rs +++ b/util/network-alert/src/tests/generate_alert_signature.rs @@ -47,7 +47,7 @@ fn test_signing_alert_using_dummy_keypair() { .build(); let alert_json = Alert::from(alert.clone()); println!( - "alert:\n{}", + "Alert:\n{}", serde_json::to_string_pretty(&alert_json).unwrap() ); println!("raw hash: 0x{hash:x}"); @@ -86,7 +86,7 @@ fn test_alert_20230001() { .build(); let alert_json = Alert::from(alert.clone()); println!( - "alert:\n{}", + "Alert:\n{}", serde_json::to_string_pretty(&alert_json).unwrap() ); assert!(verifier.verify_signatures(&alert).is_ok()); diff --git a/util/network-alert/src/verifier.rs b/util/network-alert/src/verifier.rs index 38b0ac5716..5eddd9d6ba 100644 --- a/util/network-alert/src/verifier.rs +++ b/util/network-alert/src/verifier.rs @@ -31,7 +31,7 @@ impl Verifier { /// Verify signatures pub fn verify_signatures(&self, alert: &packed::Alert) -> Result<(), AnyError> { - trace!("verify alert {:?}", alert); + trace!("Verifying alert {:?}", alert); let message = Message::from_slice(alert.calc_alert_hash().as_slice())?; let signatures: Vec = alert .signatures() diff --git a/util/runtime/src/lib.rs b/util/runtime/src/lib.rs index 17a60a9d53..0e50cbc68a 100644 --- a/util/runtime/src/lib.rs +++ b/util/runtime/src/lib.rs @@ -142,7 +142,7 @@ pub fn new_background_runtime() -> Handle { .name("GlobalRtBuilder".to_string()) .spawn(move || { let ret = runtime.block_on(async move { handle_stop_rx.recv().await }); - ckb_logger::debug!("global runtime finish {:?}", ret); + ckb_logger::debug!("Global runtime finished {:?}", ret); }) .expect("tokio runtime started"); diff --git a/util/stop-handler/src/stop_register.rs b/util/stop-handler/src/stop_register.rs index 403ab24d83..f508a75dcf 100644 --- a/util/stop-handler/src/stop_register.rs +++ b/util/stop-handler/src/stop_register.rs @@ -9,22 +9,22 @@ struct CkbServiceHandles { /// Wait all ckb services exit pub fn wait_all_ckb_services_exit() { - info!("waiting exit signal..."); + info!("Waiting exit signal..."); let exit_signal = new_crossbeam_exit_rx(); let _ = exit_signal.recv(); let mut handles = CKB_HANDLES.lock(); - debug!("wait_all_ckb_services_exit wait all threads to exit"); + debug!("wait_all_ckb_services_exit waiting all threads to exit"); for (name, join_handle) in handles.thread_handles.drain(..) { match join_handle.join() { Ok(_) => { - info!("wait thread {} done", name); + info!("Waiting thread {} done.", name); } Err(e) => { - warn!("wait thread {}: ERROR: {:?}", name, e) + warn!("Waiting thread {}: ERROR: {:?}", name, e) } } } - debug!("all ckb threads have been stopped"); + debug!("All ckb threads have been stopped."); } static CKB_HANDLES: once_cell::sync::Lazy> = @@ -54,14 +54,14 @@ pub fn new_crossbeam_exit_rx() -> ckb_channel::Receiver<()> { /// Broadcast exit signals to all threads and all tokio tasks pub fn broadcast_exit_signals() { - debug!("received exit signal, broadcasting exit signal to all threads"); + debug!("Received exit signal; broadcasting exit signal to all threads"); TOKIO_EXIT.cancel(); CROSSBEAM_EXIT_SENDERS .lock() .iter() .for_each(|tx| match tx.try_send(()) { Ok(_) => {} - Err(TrySendError::Full(_)) => info!("ckb process has received exit signal"), + Err(TrySendError::Full(_)) => info!("Ckb process has received exit signal"), Err(TrySendError::Disconnected(_)) => { debug!("broadcast thread: channel is disconnected") } @@ -70,10 +70,10 @@ pub fn broadcast_exit_signals() { /// Register a thread `JoinHandle` to `CKB_HANDLES` pub fn register_thread(name: &str, thread_handle: std::thread::JoinHandle<()>) { - trace!("register thread {}", name); + trace!("Registering thread {}", name); CKB_HANDLES .lock() .thread_handles .push((name.into(), thread_handle)); - trace!("register thread done {}", name); + trace!("Thread registration completed {}", name); } diff --git a/util/types/src/core/error.rs b/util/types/src/core/error.rs index 9e807ed03d..21274bd137 100644 --- a/util/types/src/core/error.rs +++ b/util/types/src/core/error.rs @@ -125,7 +125,7 @@ pub enum TransactionError { /// Error dues to the the fact that the since rule is not respected. /// /// See also [0017-tx-valid-since](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0017-tx-valid-since/0017-tx-valid-since.md). - #[error("InvalidSince(Inputs[{index}]): the field since is invalid")] + #[error("InvalidSince(Inputs[{index}]): the since field is invalid")] InvalidSince { /// The index of input with invalid since field index: usize, @@ -171,7 +171,7 @@ pub enum TransactionError { }, /// The compatible error. - #[error("Compatible: the feature \"{feature}\" is used in current transaction but not enabled in current chain")] + #[error("Compatible: the feature \"{feature}\" is used in current transaction, but not enabled in current chain")] Compatible { /// The feature name. feature: &'static str, @@ -185,7 +185,7 @@ pub enum TransactionError { }, /// The internal error. - #[error("Internal: {description}, this error shouldn't happen, please report this bug to developers.")] + #[error("Internal: {description}, this error shouldn't happen; please report this bug to developers.")] Internal { /// The error description description: String, diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 1254eb179e..2093447ae0 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -17,11 +17,11 @@ use std::collections::HashMap; #[derive(Error, Debug, Clone)] pub enum Reject { /// Transaction fee lower than config - #[error("The min fee rate is {0}, so the transaction fee should be {1} shannons at least, but only got {2}")] + #[error("The min fee rate is {0}, requiring a transaction fee of at least {1} shannons, but the fee provided is only {2}")] LowFeeRate(FeeRate, u64, u64), /// Transaction exceeded maximum ancestors count limit - #[error("Transaction exceeded maximum ancestors count limit, try send it later")] + #[error("Transaction exceeded maximum ancestors count limit; try later")] ExceededMaximumAncestorsCount, /// Transaction exceeded maximum size limit @@ -29,11 +29,11 @@ pub enum Reject { ExceededTransactionSizeLimit(u64, u64), /// Transaction are replaced because the pool is full - #[error("Transaction are replaced because the pool is full, {0}")] + #[error("Transaction is replaced because the pool is full, {0}")] Full(String), /// Transaction already exist in transaction_pool - #[error("Transaction({0}) already exist in transaction_pool")] + #[error("Transaction({0}) already exists in transaction_pool")] Duplicated(Byte32), /// Malformed transaction @@ -45,7 +45,7 @@ pub enum Reject { DeclaredWrongCycles(Cycle, Cycle), /// Resolve failed - #[error("Resolve failed {0}")] + #[error("Resolution failed {0}")] Resolve(OutPointError), /// Verification failed diff --git a/verification/src/error.rs b/verification/src/error.rs index 838c949cc9..d019251002 100644 --- a/verification/src/error.rs +++ b/verification/src/error.rs @@ -275,7 +275,7 @@ pub struct InvalidParentError { #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum PowError { /// Error occurs during PoW verification. - #[error("InvalidNonce: please set logger.filter to \"info,ckb-pow=debug\" to see detailed PoW verification information in the log")] + #[error("InvalidNonce: please set logger.filter to \"info,ckb-pow=debug\" for detailed PoW verification information")] InvalidNonce, } From 6db1982b80a475de135f2b7e4e6ae181938e27a8 Mon Sep 17 00:00:00 2001 From: EthanYuan Date: Tue, 28 Nov 2023 23:58:46 +0800 Subject: [PATCH 2/3] fix ci. --- rpc/README.md | 4 ++-- rpc/src/module/alert.rs | 2 +- rpc/src/tests/error.rs | 8 ++++---- test/src/specs/relay/transaction_relay_low_fee_rate.rs | 2 +- test/src/specs/tx_pool/orphan_tx.rs | 2 +- util/app-config/src/tests/ckb_run_replay.bats | 2 +- util/app-config/src/tests/graceful_shutdown.bats | 10 +++++----- util/app-config/src/tests/init_reset.bats | 6 +++--- util/jsonrpc-types/src/pool.rs | 2 +- util/types/src/core/tx_pool.rs | 4 ++-- 10 files changed, 21 insertions(+), 21 deletions(-) diff --git a/rpc/README.md b/rpc/README.md index ef19194fcd..3dec06f930 100644 --- a/rpc/README.md +++ b/rpc/README.md @@ -263,7 +263,7 @@ Response "error": { "code": -1000, "data": "SigNotEnough", - "message":"AlertFailedToVerifySignatures: The count of sigs less than threshold." + "message":"AlertFailedToVerifySignatures: The count of sigs is less than threshold." }, "jsonrpc": "2.0", "result": null, @@ -6548,7 +6548,7 @@ Different reject types: * `ExceededMaximumAncestorsCount`: Transaction exceeded maximum ancestors count limit * `ExceededTransactionSizeLimit`: Transaction exceeded maximum size limit * `Full`: Transaction are replaced because the pool is full -* `Duplicated`: Transaction already exist in transaction_pool +* `Duplicated`: Transaction already exists in transaction_pool * `Malformed`: Malformed transaction * `DeclaredWrongCycles`: Declared wrong cycles * `Resolve`: Resolve failed diff --git a/rpc/src/module/alert.rs b/rpc/src/module/alert.rs index cce4254e0f..d1c6ce7762 100644 --- a/rpc/src/module/alert.rs +++ b/rpc/src/module/alert.rs @@ -59,7 +59,7 @@ pub trait AlertRpc { /// "error": { /// "code": -1000, /// "data": "SigNotEnough", - /// "message":"AlertFailedToVerifySignatures: The count of sigs less than threshold." + /// "message":"AlertFailedToVerifySignatures: The count of sigs is less than threshold." /// }, /// "jsonrpc": "2.0", /// "result": null, diff --git a/rpc/src/tests/error.rs b/rpc/src/tests/error.rs index 39fd18a539..1ebacb93ac 100644 --- a/rpc/src/tests/error.rs +++ b/rpc/src/tests/error.rs @@ -22,13 +22,13 @@ fn test_submit_transaction_error() { let min_fee_rate = FeeRate::from_u64(500); let reject = Reject::LowFeeRate(min_fee_rate, 100, 50); assert_eq!( - "PoolRejectedTransactionByMinFeeRate: The min fee rate is 500 shannons/KW, so the transaction fee should be 100 shannons at least, but only got 50", + "PoolRejectedTransactionByMinFeeRate: The min fee rate is 500 shannons/KW, requiring a transaction fee of at least 100 shannons, but the fee provided is only 50", RPCError::from_submit_transaction_reject(&reject).message ); let reject = Reject::ExceededMaximumAncestorsCount; assert_eq!( - "PoolRejectedTransactionByMaxAncestorsCountLimit: Transaction exceeded maximum ancestors count limit, try send it later", + "PoolRejectedTransactionByMaxAncestorsCountLimit: Transaction exceeded maximum ancestors count limit; try later", RPCError::from_submit_transaction_reject(&reject).message ); @@ -37,13 +37,13 @@ fn test_submit_transaction_error() { FeeRate::from_u64(500) )); assert_eq!( - "PoolIsFull: Transaction are replaced because the pool is full, the fee_rate for this transaction is: 500 shannons/KW", + "PoolIsFull: Transaction is replaced because the pool is full, the fee_rate for this transaction is: 500 shannons/KW", RPCError::from_submit_transaction_reject(&reject).message ); let reject = Reject::Duplicated(Byte32::new([0; 32])); assert_eq!( - "PoolRejectedDuplicatedTransaction: Transaction(Byte32(0x0000000000000000000000000000000000000000000000000000000000000000)) already exist in transaction_pool", + "PoolRejectedDuplicatedTransaction: Transaction(Byte32(0x0000000000000000000000000000000000000000000000000000000000000000)) already exists in transaction_pool", RPCError::from_submit_transaction_reject(&reject).message ); diff --git a/test/src/specs/relay/transaction_relay_low_fee_rate.rs b/test/src/specs/relay/transaction_relay_low_fee_rate.rs index dfe086636d..1aad334416 100644 --- a/test/src/specs/relay/transaction_relay_low_fee_rate.rs +++ b/test/src/specs/relay/transaction_relay_low_fee_rate.rs @@ -36,7 +36,7 @@ impl Spec for TransactionRelayLowFeeRate { node1, 0, 10, - "reject tx The min fee rate is 1000 shannons/KW, so the transaction fee should be 242 shannons at least, but only got 0" + "Reject tx The min fee rate is 1000 shannons/KW, requiring a transaction fee of at least 242 shannons, but the fee provided is only 0" ) .is_some()); } diff --git a/test/src/specs/tx_pool/orphan_tx.rs b/test/src/specs/tx_pool/orphan_tx.rs index 64d4545bd9..c79ca37233 100644 --- a/test/src/specs/tx_pool/orphan_tx.rs +++ b/test/src/specs/tx_pool/orphan_tx.rs @@ -285,7 +285,7 @@ impl Spec for TxPoolOrphanUnordered { .err() .unwrap() .to_string() - .contains("already exist in transaction_pool")); + .contains("already exists in transaction_pool")); assert!( run_replay_tx(&net, node0, parent, 3, 1), diff --git a/util/app-config/src/tests/ckb_run_replay.bats b/util/app-config/src/tests/ckb_run_replay.bats index 6603a1aea8..4b73d414a4 100644 --- a/util/app-config/src/tests/ckb_run_replay.bats +++ b/util/app-config/src/tests/ckb_run_replay.bats @@ -24,7 +24,7 @@ function ckb_run { #@test run _ckb_run [ "$status" -eq 0 ] # assert_output --regexp "ckb_chain::chain.*block number:.*, hash:.*, size:.*, cycles:.*" - assert_output --regexp "INFO ckb_bin all tokio tasks and threads have exited, ckb shutdown" + assert_output --regexp "INFO ckb_bin All tokio tasks and threads have exited. CKB shutdown" } function ckb_replay { #@test diff --git a/util/app-config/src/tests/graceful_shutdown.bats b/util/app-config/src/tests/graceful_shutdown.bats index b05c006255..17ac2661b3 100644 --- a/util/app-config/src/tests/graceful_shutdown.bats +++ b/util/app-config/src/tests/graceful_shutdown.bats @@ -32,12 +32,12 @@ function ckb_graceful_shutdown { #@test assert_output --regexp "INFO ckb_sync::types::header_map HeaderMap limit_memory received exit signal, exit now" assert_output --regexp "INFO ckb_network::network NetworkService receive exit signal, start shutdown..." assert_output --regexp "INFO ckb_network::network NetworkService shutdown now" - assert_output --regexp "INFO ckb_tx_pool::process TxPool save successfully" + assert_output --regexp "INFO ckb_tx_pool::process TxPool saved successfully" assert_output --regexp "INFO ckb_tx_pool::service TxPool process_service exit now" - assert_output --regexp "INFO ckb_stop_handler::stop_register wait thread ChainService done" - assert_output --regexp "INFO ckb_stop_handler::stop_register wait thread BlockDownload done" - assert_output --regexp "INFO ckb_bin waiting all tokio tasks exit..." - assert_output --regexp "INFO ckb_bin all tokio tasks and threads have exited, ckb shutdown" + assert_output --regexp "INFO ckb_stop_handler::stop_register Waiting thread ChainService done" + assert_output --regexp "INFO ckb_stop_handler::stop_register Waiting thread BlockDownload done" + assert_output --regexp "INFO ckb_bin Waiting for all tokio tasks to exit..." + assert_output --regexp "INFO ckb_bin All tokio tasks and threads have exited. CKB shutdown" } teardown_file() { diff --git a/util/app-config/src/tests/init_reset.bats b/util/app-config/src/tests/init_reset.bats index def5b1ef69..35caca9fc3 100644 --- a/util/app-config/src/tests/init_reset.bats +++ b/util/app-config/src/tests/init_reset.bats @@ -14,18 +14,18 @@ _reset_all() { function init { #@test run _init [ "$status" -eq 0 ] - assert_output --regexp "[iI]nitialized CKB directory.*create.*Genesis Hash: 0x92b197aa1fba0f63633922c61c92375c9c074a93e85963554f5499fe1450d0e5" + assert_output --regexp "[iI]nitialized CKB directory.*Create.*Genesis Hash: 0x92b197aa1fba0f63633922c61c92375c9c074a93e85963554f5499fe1450d0e5" } function init_mainnet { #@test run _init_mainnet [ "$status" -eq 0 ] - assert_output --regexp "Reinitialized CKB directory.*create.*Genesis Hash: 0x92b197aa1fba0f63633922c61c92375c9c074a93e85963554f5499fe1450d0e5" + assert_output --regexp "Reinitialized CKB directory.*Create.*Genesis Hash: 0x92b197aa1fba0f63633922c61c92375c9c074a93e85963554f5499fe1450d0e5" } function reset_all { #@test run _reset_all [ "$status" -eq 0 ] - assert_output --regexp "deleting .*data" + assert_output --regexp "Deleting .*data" } setup_file() { diff --git a/util/jsonrpc-types/src/pool.rs b/util/jsonrpc-types/src/pool.rs index 48806c7bc4..7a3a36f6df 100644 --- a/util/jsonrpc-types/src/pool.rs +++ b/util/jsonrpc-types/src/pool.rs @@ -267,7 +267,7 @@ pub enum PoolTransactionReject { /// Transaction are replaced because the pool is full Full(String), - /// Transaction already exist in transaction_pool + /// Transaction already exists in transaction_pool Duplicated(String), /// Malformed transaction diff --git a/util/types/src/core/tx_pool.rs b/util/types/src/core/tx_pool.rs index 2093447ae0..5620c903ea 100644 --- a/util/types/src/core/tx_pool.rs +++ b/util/types/src/core/tx_pool.rs @@ -32,7 +32,7 @@ pub enum Reject { #[error("Transaction is replaced because the pool is full, {0}")] Full(String), - /// Transaction already exist in transaction_pool + /// Transaction already exists in transaction_pool #[error("Transaction({0}) already exists in transaction_pool")] Duplicated(Byte32), @@ -45,7 +45,7 @@ pub enum Reject { DeclaredWrongCycles(Cycle, Cycle), /// Resolve failed - #[error("Resolution failed {0}")] + #[error("Resolve failed {0}")] Resolve(OutPointError), /// Verification failed From b8c269889674ac4781e42064449ee2748f2d0fc5 Mon Sep 17 00:00:00 2001 From: EthanYuan Date: Thu, 30 Nov 2023 08:13:21 +0800 Subject: [PATCH 3/3] Update according to review feedback. --- network/src/network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/src/network.rs b/network/src/network.rs index 59eb78a87e..efd51517dc 100644 --- a/network/src/network.rs +++ b/network/src/network.rs @@ -177,7 +177,7 @@ impl NetworkState { } } else { debug!( - "Report {} failure: not found in peer registry; could be on whitelist", + "Report {} failure: not found in peer registry or it is on the whitelist", session_id ); } @@ -219,7 +219,7 @@ impl NetworkState { } } else { debug!( - "Ban session({}) failed: not found in peer registry or on the whitelist", + "Ban session({}) failed: not found in peer registry or it is on the whitelist", session_id ); }