Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: apply lint fix on latest nightly #15760

Merged
merged 3 commits into from
Mar 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/batch/src/executor/generic_exchange.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ use crate::monitor::BatchMetricsWithTaskLabels;

pub struct GenericExchangeExecutor<CS, C> {
proto_sources: Vec<PbExchangeSource>,
/// Mock-able CreateSource.
/// Mock-able `CreateSource`.
source_creators: Vec<CS>,
sequential: bool,
context: C,
Expand Down
2 changes: 1 addition & 1 deletion src/batch/src/executor/join/hash_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ pub struct HashJoinExecutor<K> {
original_schema: Schema,
/// Output schema after applying `output_indices`
schema: Schema,
/// output_indices are the indices of the columns that we needed.
/// `output_indices` are the indices of the columns that we needed.
output_indices: Vec<usize>,
/// Left child executor
probe_side_source: BoxedExecutor,
Expand Down
2 changes: 1 addition & 1 deletion src/batch/src/executor/join/nested_loop_join.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ pub struct NestedLoopJoinExecutor {
/// Actual output schema
schema: Schema,
/// We may only need certain columns.
/// output_indices are the indices of the columns that we needed.
/// `output_indices` are the indices of the columns that we needed.
output_indices: Vec<usize>,
/// Left child executor
left_child: BoxedExecutor,
Expand Down
2 changes: 1 addition & 1 deletion src/batch/src/executor/merge_sort_exchange.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ pub struct MergeSortExchangeExecutorImpl<CS, C> {
min_heap: MemMonitoredHeap<HeapElem>,
proto_sources: Vec<PbExchangeSource>,
sources: Vec<ExchangeSourceImpl>, // impl
/// Mock-able CreateSource.
/// Mock-able `CreateSource`.
source_creators: Vec<CS>,
schema: Schema,
task_id: TaskId,
Expand Down
2 changes: 1 addition & 1 deletion src/batch/src/task/task_execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -681,7 +681,7 @@ impl<C: BatchTaskContext> BatchTaskExecution<C> {
}

let error = error.map(Arc::new);
*self.failure.lock() = error.clone();
self.failure.lock().clone_from(&error);
let err_str = error.as_ref().map(|e| e.to_report_string());
if let Err(e) = sender.close(error).await {
match e {
Expand Down
26 changes: 16 additions & 10 deletions src/cmd_all/src/single_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,14 @@ pub struct NodeSpecificOpts {

// ------- Meta Node Options -------
/// The HTTP REST-API address of the Prometheus instance associated to this cluster.
/// This address is used to serve PromQL queries to Prometheus.
/// This address is used to serve `PromQL` queries to Prometheus.
/// It is also used by Grafana Dashboard Service to fetch metrics and visualize them.
#[clap(long)]
pub prometheus_endpoint: Option<String>,

/// The additional selector used when querying Prometheus.
///
/// The format is same as PromQL. Example: `instance="foo",namespace="bar"`
/// The format is same as `PromQL`. Example: `instance="foo",namespace="bar"`
#[clap(long)]
pub prometheus_selector: Option<String>,

Expand All @@ -111,15 +111,21 @@ pub fn map_single_node_opts_to_standalone_opts(opts: SingleNodeOpts) -> ParsedSt

if let Some(prometheus_listener_addr) = &opts.prometheus_listener_addr {
meta_opts.prometheus_listener_addr = Some(prometheus_listener_addr.clone());
compute_opts.prometheus_listener_addr = prometheus_listener_addr.clone();
frontend_opts.prometheus_listener_addr = prometheus_listener_addr.clone();
compactor_opts.prometheus_listener_addr = prometheus_listener_addr.clone();
compute_opts
.prometheus_listener_addr
.clone_from(prometheus_listener_addr);
frontend_opts
.prometheus_listener_addr
.clone_from(prometheus_listener_addr);
compactor_opts
.prometheus_listener_addr
.clone_from(prometheus_listener_addr);
}
if let Some(config_path) = &opts.config_path {
meta_opts.config_path = config_path.clone();
compute_opts.config_path = config_path.clone();
frontend_opts.config_path = config_path.clone();
compactor_opts.config_path = config_path.clone();
meta_opts.config_path.clone_from(config_path);
compute_opts.config_path.clone_from(config_path);
frontend_opts.config_path.clone_from(config_path);
compactor_opts.config_path.clone_from(config_path);
}

let store_directory = opts.store_directory.unwrap_or_else(|| {
Expand Down Expand Up @@ -160,7 +166,7 @@ pub fn map_single_node_opts_to_standalone_opts(opts: SingleNodeOpts) -> ParsedSt
compute_opts.listen_addr = "0.0.0.0:5688".to_string();
compactor_opts.listen_addr = "0.0.0.0:6660".to_string();
if let Some(frontend_addr) = &opts.node_opts.listen_addr {
frontend_opts.listen_addr = frontend_addr.clone();
frontend_opts.listen_addr.clone_from(frontend_addr);
}

// Set Meta addresses for all nodes (force to override)
Expand Down
20 changes: 13 additions & 7 deletions src/cmd_all/src/standalone.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,27 +123,33 @@ pub fn parse_standalone_opt_args(opts: &StandaloneOpts) -> ParsedStandaloneOpts

if let Some(config_path) = opts.config_path.as_ref() {
if let Some(meta_opts) = meta_opts.as_mut() {
meta_opts.config_path = config_path.clone();
meta_opts.config_path.clone_from(config_path);
}
if let Some(compute_opts) = compute_opts.as_mut() {
compute_opts.config_path = config_path.clone();
compute_opts.config_path.clone_from(config_path);
}
if let Some(frontend_opts) = frontend_opts.as_mut() {
frontend_opts.config_path = config_path.clone();
frontend_opts.config_path.clone_from(config_path);
}
if let Some(compactor_opts) = compactor_opts.as_mut() {
compactor_opts.config_path = config_path.clone();
compactor_opts.config_path.clone_from(config_path);
}
}
if let Some(prometheus_listener_addr) = opts.prometheus_listener_addr.as_ref() {
if let Some(compute_opts) = compute_opts.as_mut() {
compute_opts.prometheus_listener_addr = prometheus_listener_addr.clone();
compute_opts
.prometheus_listener_addr
.clone_from(prometheus_listener_addr);
}
if let Some(frontend_opts) = frontend_opts.as_mut() {
frontend_opts.prometheus_listener_addr = prometheus_listener_addr.clone();
frontend_opts
.prometheus_listener_addr
.clone_from(prometheus_listener_addr);
}
if let Some(compactor_opts) = compactor_opts.as_mut() {
compactor_opts.prometheus_listener_addr = prometheus_listener_addr.clone();
compactor_opts
.prometheus_listener_addr
.clone_from(prometheus_listener_addr);
}
if let Some(meta_opts) = meta_opts.as_mut() {
meta_opts.prometheus_listener_addr = Some(prometheus_listener_addr.clone());
Expand Down
2 changes: 1 addition & 1 deletion src/common/src/catalog/external_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ pub struct CdcTableDesc {

pub value_indices: Vec<usize>,

/// properties will be passed into the StreamScanNode
/// properties will be passed into the `StreamScanNode`
pub connect_properties: BTreeMap<String, String>,
}

Expand Down
16 changes: 8 additions & 8 deletions src/common/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,9 @@ pub struct MetaConfig {
#[serde(default = "default::meta::hummock_version_checkpoint_interval_sec")]
pub hummock_version_checkpoint_interval_sec: u64,

/// If enabled, SSTable object file and version delta will be retained.
/// If enabled, `SSTable` object file and version delta will be retained.
///
/// SSTable object file need to be deleted via full GC.
/// `SSTable` object file need to be deleted via full GC.
///
/// version delta need to be manually deleted.
#[serde(default = "default::meta::enable_hummock_data_archive")]
Expand Down Expand Up @@ -279,11 +279,11 @@ pub struct MetaConfig {
#[serde(default = "default::meta::backend")]
pub backend: MetaBackend,

/// Schedule space_reclaim compaction for all compaction groups with this interval.
/// Schedule `space_reclaim` compaction for all compaction groups with this interval.
#[serde(default = "default::meta::periodic_space_reclaim_compaction_interval_sec")]
pub periodic_space_reclaim_compaction_interval_sec: u64,

/// Schedule ttl_reclaim compaction for all compaction groups with this interval.
/// Schedule `ttl_reclaim` compaction for all compaction groups with this interval.
#[serde(default = "default::meta::periodic_ttl_reclaim_compaction_interval_sec")]
pub periodic_ttl_reclaim_compaction_interval_sec: u64,

Expand Down Expand Up @@ -642,13 +642,13 @@ pub struct StorageConfig {
pub compactor_memory_limit_mb: Option<usize>,

/// Compactor calculates the maximum number of tasks that can be executed on the node based on
/// worker_num and compactor_max_task_multiplier.
/// max_pull_task_count = worker_num * compactor_max_task_multiplier
/// `worker_num` and `compactor_max_task_multiplier`.
/// `max_pull_task_count` = `worker_num` * `compactor_max_task_multiplier`
#[serde(default = "default::storage::compactor_max_task_multiplier")]
pub compactor_max_task_multiplier: f32,

/// The percentage of memory available when compactor is deployed separately.
/// non_reserved_memory_bytes = system_memory_available_bytes * compactor_memory_available_proportion
/// `non_reserved_memory_bytes` = `system_memory_available_bytes` * `compactor_memory_available_proportion`
#[serde(default = "default::storage::compactor_memory_available_proportion")]
pub compactor_memory_available_proportion: f64,

Expand Down Expand Up @@ -715,7 +715,7 @@ pub struct StorageConfig {

#[derive(Clone, Debug, Serialize, Deserialize, DefaultFromSerde, ConfigDoc)]
pub struct CacheRefillConfig {
/// SSTable levels to refill.
/// `SSTable` levels to refill.
#[serde(default = "default::cache_refill::data_refill_levels")]
pub data_refill_levels: Vec<u32>,

Expand Down
4 changes: 2 additions & 2 deletions src/common/src/session_config/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ pub struct ConfigMap {
synchronize_seqscans: bool,

/// Abort query statement that takes more than the specified amount of time in sec. If
/// log_min_error_statement is set to ERROR or lower, the statement that timed out will also be
/// `log_min_error_statement` is set to ERROR or lower, the statement that timed out will also be
/// logged. If this value is specified without units, it is taken as milliseconds. A value of
/// zero (the default) disables the timeout.
#[parameter(default = 0u32)]
Expand Down Expand Up @@ -235,7 +235,7 @@ pub struct ConfigMap {
streaming_rate_limit: ConfigNonZeroU64,

/// Cache policy for partition cache in streaming over window.
/// Can be "full", "recent", "recent_first_n" or "recent_last_n".
/// Can be "full", "recent", "`recent_first_n`" or "`recent_last_n`".
#[parameter(default = OverWindowCachePolicy::default(), rename = "rw_streaming_over_window_cache_policy")]
streaming_over_window_cache_policy: OverWindowCachePolicy,

Expand Down
14 changes: 7 additions & 7 deletions src/common/src/telemetry/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,19 +52,19 @@ pub enum TelemetryNodeType {

#[derive(Debug, Serialize, Deserialize)]
pub struct TelemetryReportBase {
/// tracking_id is persistent in etcd
/// `tracking_id` is persistent in etcd
pub tracking_id: String,
/// session_id is reset every time node restarts
/// `session_id` is reset every time node restarts
pub session_id: String,
/// system_data is hardware and os info
/// `system_data` is hardware and os info
pub system_data: SystemData,
/// up_time is how long the node has been running
/// `up_time` is how long the node has been running
pub up_time: u64,
/// time_stamp is when the report is created
/// `time_stamp` is when the report is created
pub time_stamp: u64,
/// node_type is the node that creates the report
/// `node_type` is the node that creates the report
pub node_type: TelemetryNodeType,
/// is_test is whether the report is from a test environment, default to be false
/// `is_test` is whether the report is from a test environment, default to be false
/// needed in CI for compatible tests with telemetry backend
pub is_test: bool,
}
Expand Down
2 changes: 1 addition & 1 deletion src/common/src/util/sort_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ pub struct HeapElem {
chunk: DataChunk,
chunk_idx: usize,
elem_idx: usize,
/// DataChunk can be encoded to accelerate the comparison.
/// `DataChunk` can be encoded to accelerate the comparison.
/// Use `risingwave_common::util::encoding_for_comparison::encode_chunk`
/// to perform encoding, otherwise the comparison will be performed
/// column by column.
Expand Down
2 changes: 1 addition & 1 deletion src/compute/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ pub struct ComputeNodeOpts {
/// The address for contacting this instance of the service.
/// This would be synonymous with the service's "public address"
/// or "identifying address".
/// Optional, we will use listen_addr if not specified.
/// Optional, we will use `listen_addr` if not specified.
#[clap(long, env = "RW_ADVERTISE_ADDR")]
pub advertise_addr: Option<String>,

Expand Down
10 changes: 5 additions & 5 deletions src/config/docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ This page is automatically generated by `./risedev generate-example-config`
| enable_committed_sst_sanity_check | Enable sanity check when SSTs are committed. | false |
| enable_compaction_deterministic | Whether to enable deterministic compaction scheduling, which will disable all auto scheduling of compaction tasks. Should only be used in e2e tests. | false |
| enable_dropped_column_reclaim | Whether compactor should rewrite row to remove dropped column. | false |
| enable_hummock_data_archive | If enabled, SSTable object file and version delta will be retained. SSTable object file need to be deleted via full GC. version delta need to be manually deleted. | false |
| enable_hummock_data_archive | If enabled, `SSTable` object file and version delta will be retained. `SSTable` object file need to be deleted via full GC. version delta need to be manually deleted. | false |
| event_log_channel_max_size | Keeps the latest N events per channel. | 10 |
| event_log_enabled | | true |
| full_gc_interval_sec | Interval of automatic hummock full GC. | 86400 |
Expand All @@ -48,10 +48,10 @@ This page is automatically generated by `./risedev generate-example-config`
| parallelism_control_trigger_period_sec | The period of parallelism control trigger. | 10 |
| partition_vnode_count | | 16 |
| periodic_compaction_interval_sec | Schedule compaction for all compaction groups with this interval. | 60 |
| periodic_space_reclaim_compaction_interval_sec | Schedule space_reclaim compaction for all compaction groups with this interval. | 3600 |
| periodic_space_reclaim_compaction_interval_sec | Schedule `space_reclaim` compaction for all compaction groups with this interval. | 3600 |
| periodic_split_compact_group_interval_sec | | 10 |
| periodic_tombstone_reclaim_compaction_interval_sec | | 600 |
| periodic_ttl_reclaim_compaction_interval_sec | Schedule ttl_reclaim compaction for all compaction groups with this interval. | 1800 |
| periodic_ttl_reclaim_compaction_interval_sec | Schedule `ttl_reclaim` compaction for all compaction groups with this interval. | 1800 |
| split_group_size_limit | | 68719476736 |
| table_write_throughput_threshold | | 16777216 |
| unrecognized | | |
Expand Down Expand Up @@ -102,8 +102,8 @@ This page is automatically generated by `./risedev generate-example-config`
| compactor_fast_max_compact_task_size | | 2147483648 |
| compactor_max_sst_key_count | | 2097152 |
| compactor_max_sst_size | | 536870912 |
| compactor_max_task_multiplier | Compactor calculates the maximum number of tasks that can be executed on the node based on worker_num and compactor_max_task_multiplier. max_pull_task_count = worker_num * compactor_max_task_multiplier | 2.5 |
| compactor_memory_available_proportion | The percentage of memory available when compactor is deployed separately. non_reserved_memory_bytes = system_memory_available_bytes * compactor_memory_available_proportion | 0.8 |
| compactor_max_task_multiplier | Compactor calculates the maximum number of tasks that can be executed on the node based on `worker_num` and `compactor_max_task_multiplier`. `max_pull_task_count` = `worker_num` * `compactor_max_task_multiplier` | 2.5 |
| compactor_memory_available_proportion | The percentage of memory available when compactor is deployed separately. `non_reserved_memory_bytes` = `system_memory_available_bytes` * `compactor_memory_available_proportion` | 0.8 |
| compactor_memory_limit_mb | | |
| data_file_cache | | |
| disable_remote_compactor | | false |
Expand Down
4 changes: 2 additions & 2 deletions src/connector/src/parser/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1055,7 +1055,7 @@ impl SpecificParserConfig {
config.enable_upsert = true;
}
if info.use_schema_registry {
config.topic = get_kafka_topic(with_properties)?.clone();
config.topic.clone_from(get_kafka_topic(with_properties)?);
config.client_config = SchemaRegistryAuth::from(&info.format_encode_options);
} else {
config.aws_auth_props = Some(
Expand Down Expand Up @@ -1085,7 +1085,7 @@ impl SpecificParserConfig {
config.enable_upsert = true;
}
if info.use_schema_registry {
config.topic = get_kafka_topic(with_properties)?.clone();
config.topic.clone_from(get_kafka_topic(with_properties)?);
config.client_config = SchemaRegistryAuth::from(&info.format_encode_options);
} else {
config.aws_auth_props = Some(
Expand Down
2 changes: 1 addition & 1 deletion src/connector/src/parser/unified/avro.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ use crate::error::ConnectorResult;
pub struct AvroParseOptions<'a> {
pub schema: Option<&'a Schema>,
/// Strict Mode
/// If strict mode is disabled, an int64 can be parsed from an AvroInt (int32) value.
/// If strict mode is disabled, an int64 can be parsed from an `AvroInt` (int32) value.
pub relax_numeric: bool,
}

Expand Down
12 changes: 6 additions & 6 deletions src/connector/src/sink/starrocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,25 +48,25 @@ const STARROCK_MYSQL_WAIT_TIMEOUT: usize = 28800;

#[derive(Deserialize, Debug, Clone, WithOptions)]
pub struct StarrocksCommon {
/// The StarRocks host address.
/// The `StarRocks` host address.
#[serde(rename = "starrocks.host")]
pub host: String,
/// The port to the MySQL server of StarRocks FE.
/// The port to the MySQL server of `StarRocks` FE.
#[serde(rename = "starrocks.mysqlport", alias = "starrocks.query_port")]
pub mysql_port: String,
/// The port to the HTTP server of StarRocks FE.
/// The port to the HTTP server of `StarRocks` FE.
#[serde(rename = "starrocks.httpport", alias = "starrocks.http_port")]
pub http_port: String,
/// The user name used to access the StarRocks database.
/// The user name used to access the `StarRocks` database.
#[serde(rename = "starrocks.user")]
pub user: String,
/// The password associated with the user.
#[serde(rename = "starrocks.password")]
pub password: String,
/// The StarRocks database where the target table is located
/// The `StarRocks` database where the target table is located
#[serde(rename = "starrocks.database")]
pub database: String,
/// The StarRocks table you want to sink data to.
/// The `StarRocks` table you want to sink data to.
#[serde(rename = "starrocks.table")]
pub table: String,
#[serde(rename = "starrocks.partial_update")]
Expand Down
2 changes: 1 addition & 1 deletion src/connector/src/source/cdc/enumerator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ pub const DATABASE_SERVERS_KEY: &str = "database.servers";

#[derive(Debug)]
pub struct DebeziumSplitEnumerator<T: CdcSourceTypeTrait> {
/// The source_id in the catalog
/// The `source_id` in the catalog
source_id: u32,
worker_node_addrs: Vec<HostAddr>,
_phantom: PhantomData<T>,
Expand Down
2 changes: 1 addition & 1 deletion src/connector/src/source/filesystem/opendal_source/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ pub struct OpendalS3Properties {
#[serde(flatten)]
pub s3_properties: S3PropertiesCommon,

/// The following are only supported by s3_v2 (opendal) source.
/// The following are only supported by `s3_v2` (opendal) source.
#[serde(rename = "s3.assume_role", default)]
pub assume_role: Option<String>,

Expand Down
3 changes: 2 additions & 1 deletion src/connector/src/source/filesystem/s3_v2/lister.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ impl FsListInner for S3SplitEnumerator {
.await
.with_context(|| format!("failed to list objects in bucket `{}`", self.bucket_name))?;
if res.is_truncated().unwrap_or_default() {
self.next_continuation_token = res.next_continuation_token.clone();
self.next_continuation_token
.clone_from(&res.next_continuation_token);
} else {
has_finished = true;
self.next_continuation_token = None;
Expand Down
Loading
Loading