diff --git a/Cargo.lock b/Cargo.lock index 98df54329..1e4f5f892 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -82,9 +82,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "const-random", @@ -1567,6 +1567,7 @@ dependencies = [ "helium-proto", "http 0.2.11", "http-serde", + "humantime-serde", "lazy_static", "metrics", "metrics-exporter-prometheus", @@ -2500,6 +2501,7 @@ dependencies = [ "chrono", "config", "helium-crypto", + "humantime-serde", "reqwest", "serde", "serde_json", @@ -2969,6 +2971,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.1.0" @@ -3276,7 +3293,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6fb938100651db317719f46877a3cd82105920be4ea2ff49d55d1d65fa7bec1" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", "auto_ops", "either", "float_eq", @@ -3320,7 +3337,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", ] [[package]] @@ -3328,6 +3345,9 @@ name = "hashbrown" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +dependencies = [ + "ahash 0.8.11", +] [[package]] name = "hashlink" @@ -3642,6 +3662,16 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime", + "serde", +] + [[package]] name = "hyper" version = "0.14.28" @@ -3728,6 +3758,19 @@ dependencies = [ "tokio-io-timeout", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper 0.14.28", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "hyper-util" version = "0.1.2" @@ -3854,6 +3897,7 @@ dependencies = [ "helium-crypto", "helium-proto", "http 0.2.11", + "humantime-serde", "metrics", "metrics-exporter-prometheus", "poc-metrics", @@ -3923,6 +3967,7 @@ dependencies = [ "hextree", "http 0.2.11", "http-serde", + "humantime-serde", "libflate", "metrics", "metrics-exporter-prometheus", @@ -3962,6 +4007,7 @@ dependencies = [ "helium-proto", "http 0.2.11", "http-serde", + "humantime-serde", "iot-config", "metrics", "poc-metrics", @@ -4001,6 +4047,7 @@ dependencies = [ "helium-crypto", "helium-proto", "http-serde", + "humantime-serde", "iot-config", "itertools", "lazy_static", @@ -4352,15 +4399,6 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - [[package]] name = "matchers" version = "0.1.0" @@ -4448,23 +4486,23 @@ dependencies = [ [[package]] name = "metrics" -version = "0.21.1" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" +checksum = "2be3cbd384d4e955b231c895ce10685e3d8260c5ccffae898c96c723b0772835" dependencies = [ - "ahash 0.8.7", - "metrics-macros", + "ahash 0.8.11", "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +checksum = "83a4c4718a371ddfb7806378f23617876eea8b82e5ff1324516bcd283249d9ea" dependencies = [ "base64 0.21.7", "hyper 0.14.28", + "hyper-tls", "indexmap 1.9.3", "ipnet", "metrics", @@ -4475,26 +4513,15 @@ dependencies = [ "tracing", ] -[[package]] -name = "metrics-macros" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" -dependencies = [ - "proc-macro2 1.0.69", - "quote 1.0.33", - "syn 2.0.38", -] - [[package]] name = "metrics-util" -version = "0.15.1" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" +checksum = "8b07a5eb561b8cbc16be2d216faf7757f9baf3bfb94dbb0fae3df8387a5bb47f" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.1", + "hashbrown 0.14.1", "metrics", "num_cpus", "quanta", @@ -4636,6 +4663,7 @@ dependencies = [ "helium-proto", "http 0.2.11", "http-serde", + "humantime-serde", "metrics", "mobile-config", "poc-metrics", @@ -4677,6 +4705,7 @@ dependencies = [ "hextree", "http-serde", "humantime", + "humantime-serde", "lazy_static", "metrics", "metrics-exporter-prometheus", @@ -4745,6 +4774,24 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nb" version = "0.1.3" @@ -5016,12 +5063,50 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +dependencies = [ + "bitflags 2.5.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "os_str_bytes" version = "6.4.1" @@ -5274,12 +5359,16 @@ dependencies = [ name = "poc-metrics" version = "0.1.0" dependencies = [ + "futures", "metrics", "metrics-exporter-prometheus", + "reqwest", "serde", "thiserror", + "tokio", "tower", "tracing", + "tracing-subscriber", ] [[package]] @@ -5329,6 +5418,7 @@ dependencies = [ "futures-util", "helium-anchor-gen", "helium-proto", + "humantime-serde", "metrics", "metrics-exporter-prometheus", "poc-metrics", @@ -5525,13 +5615,12 @@ dependencies = [ [[package]] name = "quanta" -version = "0.11.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" dependencies = [ "crossbeam-utils", "libc", - "mach2", "once_cell", "raw-cpuid", "wasi 0.11.0+wasi-snapshot-preview1", @@ -5689,11 +5778,11 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.7.0" +version = "11.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +checksum = "e29830cbb1290e404f24c73af91c5d8d631ce7e128691e9477556b540cd01ecd" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] @@ -5892,6 +5981,7 @@ dependencies = [ "futures-util", "helium-crypto", "helium-proto", + "humantime-serde", "lazy_static", "metrics", "metrics-exporter-prometheus", @@ -6758,7 +6848,7 @@ version = "1.16.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "361cc834e5fbbe1a73f1d904fcb8ab052a665e5be6061bd1ba7ab478d7d17c9c" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", "blake3", "block-buffer 0.10.4", "bs58 0.4.0", @@ -6860,7 +6950,7 @@ version = "1.16.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4d44a4998ba6d9b37e89399d9ce2812e84489dd4665df619fb23366e1c2ec1b" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", "bincode", "bv", "caps", @@ -7882,6 +7972,16 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.23.4" @@ -8326,6 +8426,12 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77439c1b53d2303b20d9459b1ade71a83c716e3f9c34f3228c00e6f185d6c002" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vec_map" version = "0.8.2" diff --git a/Cargo.toml b/Cargo.toml index 6ad36fdf0..47649d3ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,7 +80,8 @@ reqwest = { version = "0", default-features = false, features = [ ] } beacon = { git = "https://github.com/helium/proto", branch = "master" } humantime = "2" -metrics = "0.21" +humantime-serde = "1" +metrics = ">=0.22" metrics-exporter-prometheus = "0" tracing = "0" tracing-subscriber = { version = "0", default-features = false, features = [ diff --git a/boost_manager/Cargo.toml b/boost_manager/Cargo.toml index 802beea11..cadf8b971 100644 --- a/boost_manager/Cargo.toml +++ b/boost_manager/Cargo.toml @@ -48,3 +48,4 @@ http = {workspace = true} http-serde = {workspace = true} solana = {path = "../solana"} solana-sdk = {workspace = true} +humantime-serde = { workspace = true } diff --git a/boost_manager/pkg/settings-template.toml b/boost_manager/pkg/settings-template.toml index 46cb7f764..c325a6815 100644 --- a/boost_manager/pkg/settings-template.toml +++ b/boost_manager/pkg/settings-template.toml @@ -3,7 +3,7 @@ log = "boost_manager=info,solana=debug" # Cache location for generated boost manager outputs; Required cache = "/tmp/oracles/boost-manager" -start_after = 1702602001 +start_after = "2024-12-15 01:00:00Z" enable_solana_integration = true diff --git a/boost_manager/src/db.rs b/boost_manager/src/db.rs index 6dfac3ccb..6503b4f49 100644 --- a/boost_manager/src/db.rs +++ b/boost_manager/src/db.rs @@ -1,7 +1,8 @@ use crate::OnChainStatus; -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use file_store::hex_boost::BoostedHexActivation; use sqlx::{postgres::PgRow, FromRow, Pool, Postgres, Row, Transaction}; +use std::time::Duration; const MAX_RETRIES: i32 = 10; const MAX_BATCH_COUNT: i32 = 200; diff --git a/boost_manager/src/main.rs b/boost_manager/src/main.rs index adb022086..f3be11d49 100644 --- a/boost_manager/src/main.rs +++ b/boost_manager/src/main.rs @@ -85,6 +85,8 @@ impl Server { file_upload::FileUpload::from_settings_tm(&settings.output).await?; let store_base_path = path::Path::new(&settings.cache); + let reward_check_interval = settings.reward_check_interval; + // setup the received for the rewards manifest files let file_store = FileStore::from_settings(&settings.verifier).await?; let (manifest_receiver, manifest_server) = @@ -92,9 +94,9 @@ impl Server { .state(pool.clone()) .store(file_store) .prefix(FileType::RewardManifest.to_string()) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) - .poll_duration(settings.reward_check_interval()) - .offset(settings.reward_check_interval() * 2) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) + .poll_duration(reward_check_interval) + .offset(reward_check_interval * 2) .create() .await?; @@ -124,12 +126,12 @@ impl Server { let updater = Updater::new( pool.clone(), settings.enable_solana_integration, - settings.activation_check_interval(), + settings.activation_check_interval, settings.txn_batch_size(), solana, )?; - let purger = Purger::new(pool.clone(), settings.retention_period()); + let purger = Purger::new(pool.clone(), settings.retention_period); TaskManager::builder() .add_task(file_upload_server) diff --git a/boost_manager/src/purger.rs b/boost_manager/src/purger.rs index 48d691d95..0e6c41949 100644 --- a/boost_manager/src/purger.rs +++ b/boost_manager/src/purger.rs @@ -1,5 +1,4 @@ use crate::db; -use chrono::Duration as ChronoDuration; use futures::{future::LocalBoxFuture, TryFutureExt}; use sqlx::{Pool, Postgres}; use std::time::Duration; @@ -9,7 +8,7 @@ const PURGE_INTERVAL: Duration = Duration::from_secs(30); pub struct Purger { pool: Pool, - retention_period: ChronoDuration, + retention_period: Duration, } impl ManagedTask for Purger { @@ -27,7 +26,7 @@ impl ManagedTask for Purger { } impl Purger { - pub fn new(pool: Pool, retention_period: ChronoDuration) -> Self { + pub fn new(pool: Pool, retention_period: Duration) -> Self { Self { pool, retention_period, @@ -50,7 +49,7 @@ impl Purger { } } -pub async fn purge(pool: &Pool, retention_period: ChronoDuration) -> anyhow::Result<()> { +pub async fn purge(pool: &Pool, retention_period: Duration) -> anyhow::Result<()> { let num_records_purged = db::purge_stale_records(pool, retention_period).await?; tracing::info!("purged {} stale records", num_records_purged); Ok(()) diff --git a/boost_manager/src/settings.rs b/boost_manager/src/settings.rs index 6ff34cd5a..2679a781d 100644 --- a/boost_manager/src/settings.rs +++ b/boost_manager/src/settings.rs @@ -1,5 +1,6 @@ -use chrono::{DateTime, Duration as ChronoDuration, TimeZone, Utc}; +use chrono::{DateTime, Utc}; use config::{Config, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; use std::{path::Path, time::Duration}; @@ -11,13 +12,16 @@ pub struct Settings { pub log: String, /// Cache location for generated verified reports pub cache: String, - /// Reward files check interval in seconds. (Default is 900; 15 minutes) - #[serde(default = "default_reward_check_interval")] - pub reward_check_interval: i64, - /// Hex Activation check interval in seconds. (Default is 900; 15 minutes) + /// Reward files check interval in seconds. (Default is 15 minutes) + #[serde(with = "humantime_serde", default = "default_reward_check_interval")] + pub reward_check_interval: Duration, + /// Hex Activation check interval in seconds. (Default is 15 minutes) /// determines how often we will check the DB for queued txns to solana - #[serde(default = "default_activation_check_interval")] - pub activation_check_interval: i64, + #[serde( + with = "humantime_serde", + default = "default_activation_check_interval" + )] + pub activation_check_interval: Duration, pub database: db_store::Settings, pub verifier: file_store::Settings, pub mobile_config_client: mobile_config::ClientSettings, @@ -27,33 +31,33 @@ pub struct Settings { pub enable_solana_integration: bool, pub solana: Option, #[serde(default = "default_start_after")] - pub start_after: u64, + pub start_after: DateTime, // the number of records to fit per solana txn #[serde(default = "default_txn_batch_size")] pub txn_batch_size: u32, // default retention period in seconds - #[serde(default = "default_retention_period")] - pub retention_period: i64, + #[serde(with = "humantime_serde", default = "default_retention_period")] + pub retention_period: Duration, } -fn default_retention_period() -> i64 { - 86400 * 7 // 7 days +fn default_retention_period() -> Duration { + humantime::parse_duration("7 days").unwrap() } fn default_txn_batch_size() -> u32 { 18 } -fn default_reward_check_interval() -> i64 { - 900 +fn default_reward_check_interval() -> Duration { + humantime::parse_duration("15 minutes").unwrap() } -fn default_activation_check_interval() -> i64 { - 900 +fn default_activation_check_interval() -> Duration { + humantime::parse_duration("15 minutes").unwrap() } -pub fn default_start_after() -> u64 { - 0 +pub fn default_start_after() -> DateTime { + DateTime::UNIX_EPOCH } pub fn default_log() -> String { @@ -83,25 +87,7 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn reward_check_interval(&self) -> ChronoDuration { - ChronoDuration::seconds(self.reward_check_interval) - } - - pub fn activation_check_interval(&self) -> Duration { - Duration::from_secs(self.activation_check_interval as u64) - } - - pub fn retention_period(&self) -> ChronoDuration { - ChronoDuration::seconds(self.retention_period) - } - pub fn txn_batch_size(&self) -> usize { self.txn_batch_size as usize } - - pub fn start_after(&self) -> DateTime { - Utc.timestamp_opt(self.start_after as i64, 0) - .single() - .unwrap() - } } diff --git a/boost_manager/src/telemetry.rs b/boost_manager/src/telemetry.rs index 7589b68df..4437b9ed1 100644 --- a/boost_manager/src/telemetry.rs +++ b/boost_manager/src/telemetry.rs @@ -16,7 +16,7 @@ pub async fn last_reward_processed_time( db: &Pool, datetime: DateTime, ) -> anyhow::Result<()> { - metrics::gauge!(LAST_REWARD_PROCESSED_TIME, datetime.timestamp() as f64); + metrics::gauge!(LAST_REWARD_PROCESSED_TIME).set(datetime.timestamp() as f64); meta::store(db, LAST_REWARD_PROCESSED_TIME, datetime.timestamp()).await?; Ok(()) diff --git a/boost_manager/src/updater.rs b/boost_manager/src/updater.rs index 02b0350a1..22eafda50 100644 --- a/boost_manager/src/updater.rs +++ b/boost_manager/src/updater.rs @@ -143,9 +143,9 @@ where async fn check_failed_activations(&self) -> Result<()> { let num_marked_failed = db::update_failed_activations(&self.pool).await?; - metrics::counter!("failed_activations", num_marked_failed); + metrics::counter!("failed_activations").increment(num_marked_failed); let total_failed_count = db::get_failed_activations_count(&self.pool).await?; - metrics::gauge!("db_failed_row_count", total_failed_count as f64); + metrics::gauge!("db_failed_row_count").set(total_failed_count as f64); if total_failed_count > 0 { tracing::warn!("{} failed status activations ", total_failed_count); }; @@ -159,7 +159,7 @@ where summed_activations_count: u64, ) -> Result<()> { tracing::info!("processed batch of {} activations successfully", batch_size); - metrics::counter!("success_activations", summed_activations_count); + metrics::counter!("success_activations").increment(summed_activations_count); db::update_success_batch(&self.pool, ids).await?; Ok(()) } diff --git a/boost_manager/tests/integrations/purger_tests.rs b/boost_manager/tests/integrations/purger_tests.rs index 254be53e9..654875106 100644 --- a/boost_manager/tests/integrations/purger_tests.rs +++ b/boost_manager/tests/integrations/purger_tests.rs @@ -18,7 +18,7 @@ async fn test_purge(pool: PgPool) -> anyhow::Result<()> { assert_eq!(7, count); // do da purge - purger::purge(&pool, Duration::days(7)).await?; + purger::purge(&pool, Duration::days(7).to_std()?).await?; // assert the db contains the expected number of records post purge let count: i64 = sqlx::query_scalar("select count(*) from activated_hexes") diff --git a/db_store/src/meta.rs b/db_store/src/meta.rs index d4dfc73b0..ffaa99aa6 100644 --- a/db_store/src/meta.rs +++ b/db_store/src/meta.rs @@ -6,11 +6,11 @@ macro_rules! query_exec_timed { ( $name:literal, $query:expr, $meth:ident, $exec:expr ) => {{ match poc_metrics::record_duration!(concat!($name, "_duration"), $query.$meth($exec).await) { Ok(x) => { - metrics::increment_counter!(concat!($name, "_count"), "status" => "ok"); + metrics::counter!(concat!($name, "_count"), "status" => "ok").increment(1); Ok(x) } Err(e) => { - metrics::increment_counter!(concat!($name, "_count"), "status" => "error"); + metrics::counter!(concat!($name, "_count"), "status" => "error").increment(1); Err(Error::SqlError(e)) } } diff --git a/db_store/src/metric_tracker.rs b/db_store/src/metric_tracker.rs index 19357c237..b0be3db2c 100644 --- a/db_store/src/metric_tracker.rs +++ b/db_store/src/metric_tracker.rs @@ -14,7 +14,7 @@ async fn run(size_name: String, idle_name: String, pool: sqlx::Pool String { "https://api.github.com/repos/helium/denylist/releases/latest".to_string() } -fn default_trigger_interval() -> u64 { - 21600 +fn default_trigger_interval() -> Duration { + humantime::parse_duration("6 hours").unwrap() } impl Settings { @@ -58,10 +59,6 @@ impl Settings { .map_err(Error::from) } - pub fn trigger_interval(&self) -> Duration { - Duration::from_secs(self.trigger) - } - pub fn sign_keys(&self) -> std::result::Result, helium_crypto::Error> { self.sign_keys .iter() diff --git a/file_store/src/file_info_poller.rs b/file_store/src/file_info_poller.rs index 4815ba878..a7bf6950a 100644 --- a/file_store/src/file_info_poller.rs +++ b/file_store/src/file_info_poller.rs @@ -1,11 +1,11 @@ use crate::{file_store, traits::MsgDecode, Error, FileInfo, FileStore, Result}; use aws_sdk_s3::types::ByteStream; -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use derive_builder::Builder; use futures::{future::LocalBoxFuture, stream::BoxStream, StreamExt}; use futures_util::TryFutureExt; use retainer::Cache; -use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; +use std::{collections::VecDeque, marker::PhantomData, sync::Arc, time::Duration}; use task_manager::ManagedTask; use tokio::sync::mpsc::{Receiver, Sender}; @@ -68,9 +68,8 @@ where let latency = Utc::now() - self.file_info.timestamp; metrics::gauge!( "file-processing-latency", - latency.num_seconds() as f64, "file-type" => self.file_info.prefix.clone(), "process-name" => self.process_name.clone(), - ); + ).set(latency.num_seconds() as f64); recorder.record(&self.process_name, &self.file_info).await?; Ok(futures::stream::iter(self.data.into_iter()).boxed()) @@ -86,14 +85,14 @@ pub enum LookbackBehavior { #[derive(Debug, Clone, Builder)] #[builder(pattern = "owned")] pub struct FileInfoPollerConfig { - #[builder(default = "Duration::seconds(DEFAULT_POLL_DURATION_SECS)")] + #[builder(default = "DEFAULT_POLL_DURATION")] poll_duration: Duration, state: S, store: FileStore, prefix: String, parser: P, lookback: LookbackBehavior, - #[builder(default = "Duration::minutes(10)")] + #[builder(default = "Duration::from_secs(10 * 60)")] offset: Duration, #[builder(default = "5")] queue_size: usize, @@ -263,10 +262,7 @@ where } fn poll_duration(&self) -> std::time::Duration { - self.config - .poll_duration - .to_std() - .unwrap_or(DEFAULT_POLL_DURATION) + self.config.poll_duration } async fn is_already_processed(&self, file_info: &FileInfo) -> Result { diff --git a/file_store/src/file_sink.rs b/file_store/src/file_sink.rs index 13d512608..0081e53ca 100644 --- a/file_store/src/file_sink.rs +++ b/file_store/src/file_sink.rs @@ -127,7 +127,7 @@ impl FileSinkBuilder { metric: self.metric, }; - metrics::register_counter!(client.metric, vec![OK_LABEL]); + metrics::counter!(client.metric, vec![OK_LABEL]); let mut sink = FileSink { target_path: self.target_path, @@ -172,22 +172,22 @@ impl FileSinkClient { tokio::select! { result = self.sender.send_timeout(Message::Data(on_write_tx, bytes), SEND_TIMEOUT) => match result { Ok(_) => { - metrics::increment_counter!( + metrics::counter!( self.metric, labels .chain(std::iter::once(OK_LABEL)) .collect::>() - ); + ).increment(1); tracing::debug!("file_sink write succeeded for {:?}", self.metric); Ok(on_write_rx) } Err(SendTimeoutError::Closed(_)) => { - metrics::increment_counter!( + metrics::counter!( self.metric, labels .chain(std::iter::once(ERROR_LABEL)) .collect::>() - ); + ).increment(1); tracing::error!("file_sink write failed for {:?} channel closed", self.metric); Err(Error::channel()) } diff --git a/ingest/Cargo.toml b/ingest/Cargo.toml index f6cbc5d26..b8df84ebe 100644 --- a/ingest/Cargo.toml +++ b/ingest/Cargo.toml @@ -37,6 +37,7 @@ metrics-exporter-prometheus = { workspace = true } task-manager = { path = "../task_manager" } rand = { workspace = true } custom-tracing = { path = "../custom_tracing", features = ["grpc"] } +humantime-serde = { workspace = true } [dev-dependencies] backon = "0" diff --git a/ingest/src/server_iot.rs b/ingest/src/server_iot.rs index 4145944ce..737d3254d 100644 --- a/ingest/src/server_iot.rs +++ b/ingest/src/server_iot.rs @@ -344,8 +344,6 @@ impl poc_lora::PocLora for GrpcServer { } pub async fn grpc_server(settings: &Settings) -> Result<()> { - let grpc_addr = settings.listen_addr()?; - // Initialize uploader let (file_upload, file_upload_server) = file_upload::FileUpload::from_settings_tm(&settings.output).await?; @@ -378,13 +376,14 @@ pub async fn grpc_server(settings: &Settings) -> Result<()> { beacon_report_sink, witness_report_sink, required_network: settings.network, - address: grpc_addr, - session_key_offer_timeout: settings.session_key_offer_timeout(), - session_key_timeout: settings.session_key_timeout(), + address: settings.listen_addr, + session_key_offer_timeout: settings.session_key_offer_timeout, + session_key_timeout: settings.session_key_timeout, }; tracing::info!( - "grpc listening on {grpc_addr} and server mode {:?}", + "grpc listening on {} and server mode {:?}", + settings.listen_addr, settings.mode ); diff --git a/ingest/src/server_mobile.rs b/ingest/src/server_mobile.rs index a0116eb47..60ac67b34 100644 --- a/ingest/src/server_mobile.rs +++ b/ingest/src/server_mobile.rs @@ -342,8 +342,6 @@ impl poc_mobile::PocMobile for GrpcServer { } pub async fn grpc_server(settings: &Settings) -> Result<()> { - let grpc_addr = settings.listen_addr()?; - // Initialize uploader let (file_upload, file_upload_server) = file_upload::FileUpload::from_settings_tm(&settings.output).await?; @@ -461,12 +459,13 @@ pub async fn grpc_server(settings: &Settings) -> Result<()> { invalidated_radio_threshold_report_sink, coverage_object_report_sink, required_network: settings.network, - address: grpc_addr, + address: settings.listen_addr, api_token, }; tracing::info!( - "grpc listening on {grpc_addr} and server mode {:?}", + "grpc listening on {} and server mode {:?}", + settings.listen_addr, settings.mode ); diff --git a/ingest/src/settings.rs b/ingest/src/settings.rs index 6dc041a4b..3b0f648f6 100644 --- a/ingest/src/settings.rs +++ b/ingest/src/settings.rs @@ -1,11 +1,8 @@ use config::{Config, Environment, File}; use helium_crypto::Network; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::{ - net::{AddrParseError, SocketAddr}, - path::Path, - str::FromStr, -}; +use std::{net::SocketAddr, path::Path, time::Duration}; #[derive(Debug, Deserialize)] pub struct Settings { @@ -20,17 +17,20 @@ pub struct Settings { pub mode: Mode, /// Listen address. Required. Default is 0.0.0.0:9081 #[serde(default = "default_listen_addr")] - pub listen: String, + pub listen_addr: SocketAddr, /// Local folder for storing intermediate files pub cache: String, /// Network required in all public keys: mainnet | testnet pub network: Network, /// Timeout of session key offer in seconds - #[serde(default = "default_session_key_offer_timeout")] - pub session_key_offer_timeout: u64, + #[serde( + with = "humantime_serde", + default = "default_session_key_offer_timeout" + )] + pub session_key_offer_timeout: Duration, /// Timeout of session key session in seconds - #[serde(default = "default_session_key_timeout")] - pub session_key_timeout: u64, + #[serde(with = "humantime_serde", default = "default_session_key_timeout")] + pub session_key_timeout: Duration, /// Settings for exposed public API /// Target bucket for uploads pub output: file_store::Settings, @@ -41,16 +41,16 @@ pub struct Settings { pub metrics: poc_metrics::Settings, } -pub fn default_session_key_timeout() -> u64 { - 30 * 60 +pub fn default_session_key_timeout() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } -pub fn default_session_key_offer_timeout() -> u64 { - 5 +pub fn default_session_key_offer_timeout() -> Duration { + humantime::parse_duration("5 seconds").unwrap() } -pub fn default_listen_addr() -> String { - "0.0.0.0:9081".to_string() +pub fn default_listen_addr() -> SocketAddr { + "0.0.0.0:9081".parse().unwrap() } pub fn default_log() -> String { @@ -96,16 +96,4 @@ impl Settings { .build() .and_then(|config| config.try_deserialize()) } - - pub fn listen_addr(&self) -> Result { - SocketAddr::from_str(&self.listen) - } - - pub fn session_key_offer_timeout(&self) -> std::time::Duration { - std::time::Duration::from_secs(self.session_key_offer_timeout) - } - - pub fn session_key_timeout(&self) -> std::time::Duration { - std::time::Duration::from_secs(self.session_key_timeout) - } } diff --git a/ingest/tests/iot_ingest.rs b/ingest/tests/iot_ingest.rs index 5d8d700a5..94d19cf87 100644 --- a/ingest/tests/iot_ingest.rs +++ b/ingest/tests/iot_ingest.rs @@ -1,4 +1,4 @@ -use std::{net::SocketAddr, str::FromStr}; +use std::net::{SocketAddr, TcpListener}; use backon::{ExponentialBuilder, Retryable}; use file_store::file_sink::{FileSinkClient, Message as SinkMessage}; @@ -12,7 +12,7 @@ use helium_proto::services::poc_lora::{ }; use ingest::server_iot::GrpcServer; use prost::Message; -use rand::{rngs::OsRng, Rng}; +use rand::rngs::OsRng; use task_manager::TaskManager; use tokio::{sync::mpsc::error::TryRecvError, task::LocalSet, time::timeout}; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; @@ -22,12 +22,12 @@ use tonic::{transport::Channel, Streaming}; async fn initialize_session_and_send_beacon_and_witness() { let (beacon_client, mut beacons) = create_file_sink(); let (witness_client, mut witnesses) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -38,7 +38,7 @@ async fn initialize_session_and_send_beacon_and_witness() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -75,12 +75,12 @@ async fn initialize_session_and_send_beacon_and_witness() { async fn stream_stops_after_incorrectly_signed_init_request() { let (beacon_client, _) = create_file_sink(); let (witness_client, _) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -91,7 +91,7 @@ async fn stream_stops_after_incorrectly_signed_init_request() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -113,12 +113,12 @@ async fn stream_stops_after_incorrectly_signed_init_request() { async fn stream_stops_after_incorrectly_signed_beacon() { let (beacon_client, beacons) = create_file_sink(); let (witness_client, _) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -129,7 +129,7 @@ async fn stream_stops_after_incorrectly_signed_beacon() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -154,12 +154,12 @@ async fn stream_stops_after_incorrectly_signed_beacon() { async fn stream_stops_after_incorrect_beacon_pubkey() { let (beacon_client, beacons) = create_file_sink(); let (witness_client, _) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -170,7 +170,7 @@ async fn stream_stops_after_incorrect_beacon_pubkey() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -198,12 +198,12 @@ async fn stream_stops_after_incorrect_beacon_pubkey() { async fn stream_stops_after_incorrectly_signed_witness() { let (beacon_client, _) = create_file_sink(); let (witness_client, witnesses) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -214,7 +214,7 @@ async fn stream_stops_after_incorrectly_signed_witness() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -239,12 +239,12 @@ async fn stream_stops_after_incorrectly_signed_witness() { async fn stream_stops_after_incorrect_witness_pubkey() { let (beacon_client, _) = create_file_sink(); let (witness_client, witnesses) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -255,7 +255,7 @@ async fn stream_stops_after_incorrect_witness_pubkey() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -283,12 +283,12 @@ async fn stream_stops_after_incorrect_witness_pubkey() { async fn stream_stop_if_client_attempts_to_initiliaze_2nd_session() { let (beacon_client, mut beacons) = create_file_sink(); let (witness_client, _) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { - let server = create_test_server(port, beacon_client, witness_client, None, None); + let server = create_test_server(addr, beacon_client, witness_client, None, None); TaskManager::builder() .add_task(server) .build() @@ -299,7 +299,7 @@ async fn stream_stop_if_client_attempts_to_initiliaze_2nd_session() { let pub_key = generate_keypair(); let session_key = generate_keypair(); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; client @@ -337,13 +337,13 @@ async fn stream_stop_if_client_attempts_to_initiliaze_2nd_session() { async fn stream_stops_if_init_not_sent_within_timeout() { let (beacon_client, _) = create_file_sink(); let (witness_client, _) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { let server = - create_test_server(port, beacon_client, witness_client, Some(500), None); + create_test_server(addr, beacon_client, witness_client, Some(500), None); TaskManager::builder() .add_task(server) .build() @@ -351,7 +351,7 @@ async fn stream_stops_if_init_not_sent_within_timeout() { .await }); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let _offer = client.receive_offer().await; client.assert_closed().await; @@ -363,13 +363,13 @@ async fn stream_stops_if_init_not_sent_within_timeout() { async fn stream_stops_on_session_timeout() { let (beacon_client, mut beacons) = create_file_sink(); let (witness_client, _) = create_file_sink(); - let port = get_port(); + let addr = get_socket_addr().expect("socket addr"); LocalSet::new() .run_until(async move { tokio::task::spawn_local(async move { let server = - create_test_server(port, beacon_client, witness_client, Some(500), Some(900)); + create_test_server(addr, beacon_client, witness_client, Some(500), Some(900)); TaskManager::builder() .add_task(server) .build() @@ -377,7 +377,7 @@ async fn stream_stops_on_session_timeout() { .await }); - let mut client = connect_and_stream(port).await; + let mut client = connect_and_stream(addr).await; let offer = client.receive_offer().await; let pub_key = generate_keypair(); @@ -449,8 +449,8 @@ fn create_file_sink() -> (FileSinkClient, MockFileSinkReceiver) { ) } -async fn connect_and_stream(port: u64) -> TestClient { - let mut client = (|| PocLoraClient::connect(format!("http://127.0.0.1:{port}"))) +async fn connect_and_stream(socket_addr: SocketAddr) -> TestClient { + let mut client = (|| PocLoraClient::connect(format!("http://{socket_addr}"))) .retry(&ExponentialBuilder::default()) .await .expect("client connect"); @@ -572,7 +572,7 @@ impl TestClient { } fn create_test_server( - port: u64, + socket_addr: SocketAddr, beacon_file_sink: FileSinkClient, witness_file_sink: FileSinkClient, offer_timeout: Option, @@ -584,7 +584,7 @@ fn create_test_server( beacon_report_sink: beacon_file_sink, witness_report_sink: witness_file_sink, required_network: Network::MainNet, - address: SocketAddr::from_str(&format!("127.0.0.1:{port}")).expect("socket address"), + address: socket_addr, session_key_offer_timeout: std::time::Duration::from_millis(offer_timeout), session_key_timeout: std::time::Duration::from_millis(timeout), } @@ -598,6 +598,7 @@ fn seconds(s: u64) -> std::time::Duration { std::time::Duration::from_secs(s) } -fn get_port() -> u64 { - rand::thread_rng().gen_range(6000..10000) +fn get_socket_addr() -> anyhow::Result { + let listener = TcpListener::bind("127.0.0.1:0")?; + Ok(listener.local_addr()?) } diff --git a/iot_config/Cargo.toml b/iot_config/Cargo.toml index b7c724ab0..218dffc0f 100644 --- a/iot_config/Cargo.toml +++ b/iot_config/Cargo.toml @@ -42,6 +42,7 @@ tracing = {workspace = true} tracing-subscriber = {workspace = true} triggered = {workspace = true} task-manager = { path = "../task_manager" } +humantime-serde = { workspace = true } [dev-dependencies] rand = { workspace = true } diff --git a/iot_config/src/db_cleaner.rs b/iot_config/src/db_cleaner.rs index ae089e6fa..7da7b440a 100644 --- a/iot_config/src/db_cleaner.rs +++ b/iot_config/src/db_cleaner.rs @@ -1,6 +1,7 @@ -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use futures::TryFutureExt; use sqlx::{Pool, Postgres, Transaction}; +use std::time::Duration; use task_manager::ManagedTask; const SLEEP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(12 * 60 * 60); diff --git a/iot_config/src/main.rs b/iot_config/src/main.rs index 94d5a0966..04982a68b 100644 --- a/iot_config/src/main.rs +++ b/iot_config/src/main.rs @@ -68,8 +68,6 @@ impl Daemon { // Create on-chain metadata pool let metadata_pool = settings.metadata.connect("iot-config-metadata").await?; - let listen_addr = settings.listen_addr()?; - let (auth_updater, auth_cache) = AuthCache::new(settings.admin_pubkey()?, &pool).await?; let (region_updater, region_map) = RegionMapReader::new(&pool).await?; let (delegate_key_updater, delegate_key_cache) = org::delegate_keys_cache(&pool).await?; @@ -104,6 +102,7 @@ impl Daemon { region_updater, )?; + let listen_addr = settings.listen; let pubkey = settings .signing_keypair() .map(|keypair| keypair.public_key().to_string())?; @@ -118,7 +117,7 @@ impl Daemon { admin_svc, }; - let db_cleaner = DbCleaner::new(pool.clone(), settings.deleted_entry_retention()); + let db_cleaner = DbCleaner::new(pool.clone(), settings.deleted_entry_retention); TaskManager::builder() .add_task(grpc_server) diff --git a/iot_config/src/settings.rs b/iot_config/src/settings.rs index 040ce760a..ebf3c3dbf 100644 --- a/iot_config/src/settings.rs +++ b/iot_config/src/settings.rs @@ -1,11 +1,7 @@ -use chrono::Duration; use config::{Config, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::{ - net::{AddrParseError, SocketAddr}, - path::Path, - str::FromStr, -}; +use std::{net::SocketAddr, path::Path, str::FromStr, time::Duration}; #[derive(Debug, Deserialize)] pub struct Settings { @@ -15,13 +11,13 @@ pub struct Settings { pub log: String, /// Listen address. Required. Default is 0.0.0.0:8080 #[serde(default = "default_listen_addr")] - pub listen: String, + pub listen: SocketAddr, /// File from which to load config server signing keypair pub keypair: String, /// B58 encoded public key of the admin keypair pub admin: String, - #[serde(default = "default_deleted_entry_retention")] - pub deleted_entry_retention: u64, + #[serde(with = "humantime_serde", default = "default_deleted_entry_retention")] + pub deleted_entry_retention: Duration, pub database: db_store::Settings, /// Settings passed to the db_store crate for connecting to /// the database for Solana on-chain data @@ -33,13 +29,12 @@ pub fn default_log() -> String { "iot_config=debug".to_string() } -pub fn default_listen_addr() -> String { - "0.0.0.0:8080".to_string() +pub fn default_listen_addr() -> SocketAddr { + "0.0.0.0:8080".parse().unwrap() } -pub fn default_deleted_entry_retention() -> u64 { - // 48 hours - 48 * 60 * 60 +pub fn default_deleted_entry_retention() -> Duration { + humantime::parse_duration("48 hours").unwrap() } impl Settings { @@ -66,10 +61,6 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn listen_addr(&self) -> Result { - SocketAddr::from_str(&self.listen) - } - pub fn signing_keypair(&self) -> Result> { let data = std::fs::read(&self.keypair).map_err(helium_crypto::Error::from)?; Ok(helium_crypto::Keypair::try_from(&data[..])?) @@ -78,8 +69,4 @@ impl Settings { pub fn admin_pubkey(&self) -> Result { helium_crypto::PublicKey::from_str(&self.admin) } - - pub fn deleted_entry_retention(&self) -> Duration { - Duration::seconds(self.deleted_entry_retention as i64) - } } diff --git a/iot_config/src/telemetry.rs b/iot_config/src/telemetry.rs index 883c91f73..d55a8adc5 100644 --- a/iot_config/src/telemetry.rs +++ b/iot_config/src/telemetry.rs @@ -14,19 +14,19 @@ const GATEWAY_CHAIN_LOOKUP_DURATION_METRIC: &str = concat!(env!("CARGO_PKG_NAME"), "-", "gateway-info-lookup-duration"); pub fn initialize() { - metrics::gauge!(STREAM_METRIC, 0.0); + metrics::gauge!(STREAM_METRIC).set(0.0); } pub fn count_request(service: &'static str, rpc: &'static str) { - metrics::increment_counter!(RPC_METRIC, "service" => service, "rpc" => rpc); + metrics::counter!(RPC_METRIC, "service" => service, "rpc" => rpc).increment(1); } pub fn count_gateway_info_lookup(result: &'static str) { - metrics::increment_counter!(GATEWAY_CHAIN_LOOKUP_METRIC, "result" => result); + metrics::counter!(GATEWAY_CHAIN_LOOKUP_METRIC, "result" => result).increment(1); } pub fn gauge_hexes(cells: usize) { - metrics::gauge!(REGION_HEX_METRIC, cells as f64); + metrics::gauge!(REGION_HEX_METRIC).set(cells as f64); } pub fn count_region_lookup( @@ -35,37 +35,38 @@ pub fn count_region_lookup( ) { let reported_region = reported_region.map_or_else(|| "LOOKUP_FAILED".to_string(), |region| region.to_string()); - metrics::increment_counter!( + metrics::counter!( REGION_LOOKUP_METRIC, // per metrics docs, &str should be preferred for performance; should the regions be // mapped through a match of region => &'static str of the name? "default_region" => default_region.to_string(), "reported_region" => reported_region - ); + ) + .increment(1); } pub fn duration_gateway_info_lookup(start: std::time::Instant) { - metrics::histogram!(GATEWAY_CHAIN_LOOKUP_DURATION_METRIC, start.elapsed()); + metrics::histogram!(GATEWAY_CHAIN_LOOKUP_DURATION_METRIC).record(start.elapsed()); } pub fn count_skf_updates(adds: usize, removes: usize) { - metrics::counter!(SKF_ADD_COUNT_METRIC, adds as u64); - metrics::counter!(SKF_REMOVE_COUNT_METRIC, removes as u64); + metrics::counter!(SKF_ADD_COUNT_METRIC).increment(adds as u64); + metrics::counter!(SKF_REMOVE_COUNT_METRIC).increment(removes as u64); } pub fn count_eui_updates(adds: usize, removes: usize) { - metrics::counter!(EUI_ADD_COUNT_METRIC, adds as u64); - metrics::counter!(EUI_REMOVE_COUNT_METRIC, removes as u64); + metrics::counter!(EUI_ADD_COUNT_METRIC).increment(adds as u64); + metrics::counter!(EUI_REMOVE_COUNT_METRIC).increment(removes as u64); } pub fn count_devaddr_updates(adds: usize, removes: usize) { - metrics::counter!(DEVADDR_ADD_COUNT_METRIC, adds as u64); - metrics::counter!(DEVADDR_REMOVE_COUNT_METRIC, removes as u64); + metrics::counter!(DEVADDR_ADD_COUNT_METRIC).increment(adds as u64); + metrics::counter!(DEVADDR_REMOVE_COUNT_METRIC).increment(removes as u64); } pub fn route_stream_subscribe() { - metrics::increment_gauge!(STREAM_METRIC, 1.0); + metrics::gauge!(STREAM_METRIC).increment(1.0); } pub fn route_stream_unsubscribe() { - metrics::decrement_gauge!(STREAM_METRIC, 1.0); + metrics::gauge!(STREAM_METRIC).decrement(1.0); } diff --git a/iot_config/tests/route_service.rs b/iot_config/tests/route_service.rs index 314e06292..648587e9f 100644 --- a/iot_config/tests/route_service.rs +++ b/iot_config/tests/route_service.rs @@ -1,4 +1,7 @@ -use std::{net::SocketAddr, str::FromStr, sync::Arc}; +use std::{ + net::{SocketAddr, TcpListener}, + sync::Arc, +}; use backon::{ExponentialBuilder, Retryable}; use chrono::Utc; @@ -13,7 +16,7 @@ use iot_config::{ OrgService, RouteService, }; use prost::Message; -use rand::{rngs::OsRng, Rng}; +use rand::rngs::OsRng; use sqlx::{Pool, Postgres}; use tokio::task::JoinHandle; use tonic::{ @@ -27,7 +30,7 @@ async fn stream_sends_all_data_when_since_is_0(pool: Pool) { let admin_keypair = generate_keypair(); let client_keypair = generate_keypair(); - let port = get_port(); + let socket_addr = get_socket_addr().expect("socket addr"); let auth_cache = create_auth_cache( admin_keypair.public_key().clone(), @@ -36,10 +39,10 @@ async fn stream_sends_all_data_when_since_is_0(pool: Pool) { ) .await; - let _handle = start_server(port, signing_keypair, auth_cache, pool.clone()).await; - let mut client = connect_client(port).await; + let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; + let mut client = connect_client(socket_addr).await; - let org = create_org(port, &admin_keypair).await; + let org = create_org(socket_addr, &admin_keypair).await; let route = create_route(&mut client, &org.org.unwrap(), &admin_keypair).await; create_euis( @@ -129,7 +132,7 @@ async fn stream_only_sends_data_modified_since(pool: Pool) { let admin_keypair = generate_keypair(); let client_keypair = generate_keypair(); - let port = get_port(); + let socket_addr = get_socket_addr().expect("socket addr"); let auth_cache = create_auth_cache( admin_keypair.public_key().clone(), @@ -138,10 +141,10 @@ async fn stream_only_sends_data_modified_since(pool: Pool) { ) .await; - let _handle = start_server(port, signing_keypair, auth_cache, pool.clone()).await; - let mut client = connect_client(port).await; + let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; + let mut client = connect_client(socket_addr).await; - let org_res_v1 = create_org(port, &admin_keypair).await; + let org_res_v1 = create_org(socket_addr, &admin_keypair).await; let proto::OrgResV1 { org: Some(org), .. } = org_res_v1 else { panic!("invalid OrgResV1") @@ -236,7 +239,7 @@ async fn stream_updates_with_deactivate_reactivate(pool: Pool) { let admin_keypair = generate_keypair(); let client_keypair = generate_keypair(); - let port = get_port(); + let socket_addr = get_socket_addr().expect("socket addr"); let auth_cache = create_auth_cache( admin_keypair.public_key().clone(), @@ -245,10 +248,9 @@ async fn stream_updates_with_deactivate_reactivate(pool: Pool) { ) .await; - let _handle = start_server(port, signing_keypair, auth_cache, pool.clone()).await; - let mut client = connect_client(port).await; - - let org_res_v1 = create_org(port, &admin_keypair).await; + let _handle = start_server(socket_addr, signing_keypair, auth_cache, pool.clone()).await; + let mut client = connect_client(socket_addr).await; + let org_res_v1 = create_org(socket_addr, &admin_keypair).await; let proto::OrgResV1 { org: Some(org), .. } = org_res_v1 else { panic!("invalid OrgResV1") @@ -444,15 +446,15 @@ fn route_stream_req_v1(signer: &Keypair, since: u64) -> RouteStreamReqV1 { request } -async fn connect_client(port: u64) -> RouteClient { - (|| RouteClient::connect(format!("http://127.0.0.1:{port}"))) +async fn connect_client(socket_addr: SocketAddr) -> RouteClient { + (|| RouteClient::connect(format!("http://{socket_addr}"))) .retry(&ExponentialBuilder::default()) .await .expect("grpc client") } async fn start_server( - port: u64, + socket_addr: SocketAddr, signing_keypair: Arc, auth_cache: AuthCache, pool: Pool, @@ -477,25 +479,22 @@ async fn start_server( transport::Server::builder() .add_service(proto::OrgServer::new(org_service)) .add_service(proto::RouteServer::new(route_service)) - .serve(socket_addr(port).expect("socket addr")) + .serve(socket_addr) .map_err(anyhow::Error::from), ) } -fn socket_addr(port: u64) -> anyhow::Result { - SocketAddr::from_str(&format!("127.0.0.1:{port}")).map_err(anyhow::Error::from) -} - fn generate_keypair() -> Keypair { Keypair::generate(KeyTag::default(), &mut OsRng) } -fn get_port() -> u64 { - rand::thread_rng().gen_range(6000..10000) +fn get_socket_addr() -> anyhow::Result { + let listener = TcpListener::bind("127.0.0.1:0")?; + Ok(listener.local_addr()?) } -async fn create_org(port: u64, admin_keypair: &Keypair) -> proto::OrgResV1 { - let mut client = (|| OrgClient::connect(format!("http://127.0.0.1:{port}"))) +async fn create_org(socket_addr: SocketAddr, admin_keypair: &Keypair) -> proto::OrgResV1 { + let mut client = (|| OrgClient::connect(format!("http://{socket_addr}"))) .retry(&ExponentialBuilder::default()) .await .expect("org client"); diff --git a/iot_packet_verifier/Cargo.toml b/iot_packet_verifier/Cargo.toml index 3acf411b7..6250136d2 100644 --- a/iot_packet_verifier/Cargo.toml +++ b/iot_packet_verifier/Cargo.toml @@ -35,3 +35,4 @@ tracing-subscriber = {workspace = true} triggered = {workspace = true} http = {workspace = true} http-serde = {workspace = true} +humantime-serde = { workspace = true } diff --git a/iot_packet_verifier/src/burner.rs b/iot_packet_verifier/src/burner.rs index 9c8950cdf..750860511 100644 --- a/iot_packet_verifier/src/burner.rs +++ b/iot_packet_verifier/src/burner.rs @@ -49,11 +49,16 @@ pub enum BurnError { } impl Burner { - pub fn new(pending_tables: P, balances: &BalanceCache, burn_period: u64, solana: S) -> Self { + pub fn new( + pending_tables: P, + balances: &BalanceCache, + burn_period: Duration, + solana: S, + ) -> Self { Self { pending_tables, balances: balances.balances(), - burn_period: Duration::from_secs(60 * burn_period), + burn_period, solana, } } @@ -131,7 +136,7 @@ where payer_account.burned = payer_account.burned.saturating_sub(amount); payer_account.balance = payer_account.balance.saturating_sub(amount); - metrics::counter!("burned", amount, "payer" => payer.to_string()); + metrics::counter!("burned", "payer" => payer.to_string()).increment(amount); Ok(()) } diff --git a/iot_packet_verifier/src/daemon.rs b/iot_packet_verifier/src/daemon.rs index b26d22b3e..0d05e3cec 100644 --- a/iot_packet_verifier/src/daemon.rs +++ b/iot_packet_verifier/src/daemon.rs @@ -18,7 +18,7 @@ use futures_util::TryFutureExt; use iot_config::client::{org_client::Orgs, OrgClient}; use solana::burn::SolanaRpc; use sqlx::{Pool, Postgres}; -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use task_manager::{ManagedTask, TaskManager}; use tokio::sync::{mpsc::Receiver, Mutex}; @@ -167,7 +167,7 @@ impl Cmd { file_source::continuous_source::() .state(pool.clone()) .store(file_store) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::IotPacketReport.to_string()) .create() .await?; @@ -199,7 +199,7 @@ impl Cmd { solana, balance_store, minimum_allowed_balance, - Duration::from_secs(60 * monitor_funds_period), + monitor_funds_period, shutdown, ) .map_err(anyhow::Error::from) diff --git a/iot_packet_verifier/src/settings.rs b/iot_packet_verifier/src/settings.rs index c67aeb886..e7bd45c52 100644 --- a/iot_packet_verifier/src/settings.rs +++ b/iot_packet_verifier/src/settings.rs @@ -1,7 +1,8 @@ -use chrono::{DateTime, TimeZone, Utc}; +use chrono::{DateTime, Utc}; use config::{Config, ConfigError, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::path::Path; +use std::{path::Path, time::Duration}; #[derive(Debug, Deserialize)] pub struct Settings { @@ -12,8 +13,8 @@ pub struct Settings { /// Cache location for generated verified reports pub cache: String, /// Data credit burn period in minutes. Default is 1. - #[serde(default = "default_burn_period")] - pub burn_period: u64, + #[serde(with = "humantime_serde", default = "default_burn_period")] + pub burn_period: Duration, pub database: db_store::Settings, pub ingest: file_store::Settings, pub iot_config_client: iot_config::client::Settings, @@ -26,19 +27,19 @@ pub struct Settings { pub minimum_allowed_balance: u64, pub solana: Option, #[serde(default = "default_start_after")] - pub start_after: u64, + pub start_after: DateTime, /// Number of minutes we should sleep before checking to re-enable /// any disabled orgs. #[serde(default = "default_monitor_funds_period")] - pub monitor_funds_period: u64, + pub monitor_funds_period: Duration, } -pub fn default_start_after() -> u64 { - 0 +pub fn default_start_after() -> DateTime { + DateTime::UNIX_EPOCH } -pub fn default_burn_period() -> u64 { - 1 +pub fn default_burn_period() -> Duration { + humantime::parse_duration("1 minute").unwrap() } pub fn default_log() -> String { @@ -49,8 +50,8 @@ pub fn default_minimum_allowed_balance() -> u64 { 3_500_000 } -pub fn default_monitor_funds_period() -> u64 { - 30 +pub fn default_monitor_funds_period() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } impl Settings { @@ -75,10 +76,4 @@ impl Settings { .build() .and_then(|config| config.try_deserialize()) } - - pub fn start_after(&self) -> DateTime { - Utc.timestamp_opt(self.start_after as i64, 0) - .single() - .unwrap() - } } diff --git a/iot_packet_verifier/tests/integration_tests.rs b/iot_packet_verifier/tests/integration_tests.rs index b20587b36..c3ca415ac 100644 --- a/iot_packet_verifier/tests/integration_tests.rs +++ b/iot_packet_verifier/tests/integration_tests.rs @@ -452,7 +452,7 @@ async fn test_end_to_end() { let mut burner = Burner::new( pending_tables.clone(), &balance_cache, - 0, // Burn period does not matter, we manually burn + Duration::default(), // Burn period does not matter, we manually burn solana_network.clone(), ); diff --git a/iot_verifier/Cargo.toml b/iot_verifier/Cargo.toml index 6bbf783c8..234e1ccc6 100644 --- a/iot_verifier/Cargo.toml +++ b/iot_verifier/Cargo.toml @@ -55,3 +55,4 @@ price = { path = "../price" } tokio-util = { workspace = true } tokio-stream = { workspace = true } task-manager = { path = "../task_manager" } +humantime-serde = { workspace = true } diff --git a/iot_verifier/src/entropy.rs b/iot_verifier/src/entropy.rs index d582cd920..08d2fcbc7 100644 --- a/iot_verifier/src/entropy.rs +++ b/iot_verifier/src/entropy.rs @@ -1,12 +1,13 @@ -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; +use std::time::Duration; /// measurement in seconds of a piece of entropy /// its lifespan will be valid from entropy.timestamp to entropy.timestamp + ENTROPY_LIFESPAN /// any beacon or witness report received after this period and before the ENTROPY_STALE_PERIOD /// defined in the purger module will be rejected due to being outside of the entropy lifespan /// TODO: determine a sane value here -pub const ENTROPY_LIFESPAN: i64 = 180; +pub const ENTROPY_LIFESPAN: Duration = Duration::from_secs(180); #[derive(sqlx::Type, Serialize, Deserialize, Debug)] #[sqlx(type_name = "report_type", rename_all = "lowercase")] diff --git a/iot_verifier/src/entropy_loader.rs b/iot_verifier/src/entropy_loader.rs index 5f858cf22..3fddb31e2 100644 --- a/iot_verifier/src/entropy_loader.rs +++ b/iot_verifier/src/entropy_loader.rs @@ -63,7 +63,7 @@ impl EntropyLoader { report.version as i32, ) .await?; - metrics::increment_counter!("oracles_iot_verifier_loader_entropy"); + metrics::counter!("oracles_iot_verifier_loader_entropy").increment(1); Ok(transaction) }) .await? diff --git a/iot_verifier/src/gateway_cache.rs b/iot_verifier/src/gateway_cache.rs index 4b76018e8..fe2ef2afe 100644 --- a/iot_verifier/src/gateway_cache.rs +++ b/iot_verifier/src/gateway_cache.rs @@ -26,11 +26,11 @@ impl GatewayCache { ) -> Result { match self.gateway_cache_receiver.borrow().get(address) { Some(hit) => { - metrics::increment_counter!("oracles_iot_verifier_gateway_cache_hit"); + metrics::counter!("oracles_iot_verifier_gateway_cache_hit").increment(1); Ok(hit.clone()) } None => { - metrics::increment_counter!("oracles_iot_verifier_gateway_cache_miss"); + metrics::counter!("oracles_iot_verifier_gateway_cache_miss").increment(1); Err(GatewayCacheError::GatewayNotFound(address.clone())) } } diff --git a/iot_verifier/src/gateway_updater.rs b/iot_verifier/src/gateway_updater.rs index 1826775d1..f403a4ae2 100644 --- a/iot_verifier/src/gateway_updater.rs +++ b/iot_verifier/src/gateway_updater.rs @@ -1,8 +1,7 @@ -use chrono::Duration; use futures::{future::LocalBoxFuture, stream::StreamExt, TryFutureExt}; use helium_crypto::PublicKeyBinary; use iot_config::{client::Gateways, gateway_info::GatewayInfo}; -use std::collections::HashMap; +use std::{collections::HashMap, time::Duration}; use task_manager::ManagedTask; use tokio::sync::watch; use tokio::time; @@ -64,11 +63,7 @@ where pub async fn run(mut self, shutdown: triggered::Listener) -> anyhow::Result<()> { tracing::info!("starting gateway_updater"); - let mut trigger_timer = time::interval( - self.refresh_interval - .to_std() - .expect("valid interval in seconds"), - ); + let mut trigger_timer = time::interval(self.refresh_interval); loop { tokio::select! { biased; diff --git a/iot_verifier/src/loader.rs b/iot_verifier/src/loader.rs index 0be84f53d..c2c70ded7 100644 --- a/iot_verifier/src/loader.rs +++ b/iot_verifier/src/loader.rs @@ -6,7 +6,7 @@ use crate::{ Settings, }; use chrono::DateTime; -use chrono::{Duration as ChronoDuration, Utc}; +use chrono::Utc; use file_store::{ iot_beacon_report::IotBeaconIngestReport, iot_witness_report::IotWitnessIngestReport, @@ -15,8 +15,9 @@ use file_store::{ }; use futures::{future::LocalBoxFuture, stream, StreamExt}; use helium_crypto::PublicKeyBinary; +use humantime_serde::re::humantime; use sqlx::PgPool; -use std::{hash::Hasher, ops::DerefMut, str::FromStr}; +use std::{hash::Hasher, ops::DerefMut, str::FromStr, time::Duration}; use task_manager::ManagedTask; use tokio::{ sync::Mutex, @@ -31,9 +32,9 @@ pub struct Loader { ingest_store: FileStore, pool: PgPool, poll_time: time::Duration, - window_width: ChronoDuration, - ingestor_rollup_time: ChronoDuration, - max_lookback_age: ChronoDuration, + window_width: Duration, + ingestor_rollup_time: Duration, + max_lookback_age: Duration, gateway_cache: GatewayCache, } @@ -67,17 +68,13 @@ impl Loader { ) -> Result { tracing::info!("from_settings verifier loader"); let ingest_store = FileStore::from_settings(&settings.ingest).await?; - let poll_time = settings.poc_loader_poll_time(); - let window_width = settings.poc_loader_window_width(); - let ingestor_rollup_time = settings.ingestor_rollup_time(); - let max_lookback_age = settings.loader_window_max_lookback_age(); Ok(Self { pool, ingest_store, - poll_time, - window_width, - ingestor_rollup_time, - max_lookback_age, + poll_time: settings.poc_loader_poll_time, + window_width: settings.poc_loader_window_width, + ingestor_rollup_time: settings.ingestor_rollup_time, + max_lookback_age: settings.loader_window_max_lookback_age, gateway_cache, }) } @@ -121,10 +118,11 @@ impl Loader { .max(window_max_lookback); let before_max = after + self.window_width; let before = (now - (self.window_width * 3)).min(before_max); - let cur_window_width = before - after; + let cur_window_width = (before - after).to_std()?; tracing::info!( "sliding window, after: {after}, before: {before}, cur width: {:?}, required width: {:?}", - cur_window_width.num_minutes(), self.window_width.num_minutes() + humantime::format_duration(cur_window_width), + humantime::format_duration(self.window_width) ); // if the current window width is less than our expected width // then do nothing @@ -195,12 +193,13 @@ impl Loader { // to account for the potential of the ingestor write time for // witness reports being out of sync with that of beacon files // for witnesses we do need the filter but not the arc + let two_minutes = Duration::from_secs(120); match self .process_events( FileType::IotWitnessIngestReport, &self.ingest_store, - after - (self.ingestor_rollup_time + ChronoDuration::seconds(120)), - before + (self.ingestor_rollup_time + ChronoDuration::seconds(120)), + after - (self.ingestor_rollup_time + two_minutes), + before + (self.ingestor_rollup_time + two_minutes), None, Some(&filter), ) diff --git a/iot_verifier/src/main.rs b/iot_verifier/src/main.rs index 3fd50ab9a..4efcc15ed 100644 --- a/iot_verifier/src/main.rs +++ b/iot_verifier/src/main.rs @@ -84,11 +84,9 @@ impl Server { // * // setup caches // * - let (gateway_updater_receiver, gateway_updater_server) = GatewayUpdater::new( - settings.gateway_refresh_interval(), - iot_config_client.clone(), - ) - .await?; + let (gateway_updater_receiver, gateway_updater_server) = + GatewayUpdater::new(settings.gateway_refresh_interval, iot_config_client.clone()) + .await?; let gateway_cache = GatewayCache::new(gateway_updater_receiver.clone()); // * @@ -106,7 +104,7 @@ impl Server { // setup the density scaler requirements // * let density_scaler = DensityScaler::new( - settings.loader_window_max_lookback_age(), + settings.loader_window_max_lookback_age, pool.clone(), gateway_updater_receiver, ) @@ -143,17 +141,17 @@ impl Server { pool: pool.clone(), rewards_sink, reward_manifests_sink, - reward_period_hours: settings.rewards, - reward_offset: settings.reward_offset_duration(), + reward_period_hours: settings.reward_period, + reward_offset: settings.reward_period_offset, price_tracker, }; // * // setup entropy requirements // * - let max_lookback_age = settings.loader_window_max_lookback_age(); + let max_lookback_age = settings.loader_window_max_lookback_age; let entropy_store = FileStore::from_settings(&settings.entropy).await?; - let entropy_interval = settings.entropy_interval(); + let entropy_interval = settings.entropy_interval; let (entropy_loader_receiver, entropy_loader_server) = file_source::continuous_source::() .state(pool.clone()) @@ -186,7 +184,7 @@ impl Server { .await?; let packet_store = FileStore::from_settings(&settings.packet_ingest).await?; - let packet_interval = settings.packet_interval(); + let packet_interval = settings.packet_interval; let (pk_loader_receiver, pk_loader_server) = file_source::continuous_source::() .state(pool.clone()) @@ -232,15 +230,11 @@ impl Server { .create() .await?; - let base_stale_period = settings.base_stale_period(); - let beacon_stale_period = settings.beacon_stale_period(); - let witness_stale_period = settings.witness_stale_period(); - let entropy_stale_period = settings.entropy_stale_period(); let purger = purger::Purger::new( - base_stale_period, - beacon_stale_period, - witness_stale_period, - entropy_stale_period, + settings.base_stale_period, + settings.beacon_stale_period, + settings.witness_stale_period, + settings.entropy_stale_period, pool.clone(), purger_invalid_beacon_sink, purger_invalid_witness_sink, diff --git a/iot_verifier/src/poc.rs b/iot_verifier/src/poc.rs index 62c498eed..fdc8e1fb7 100644 --- a/iot_verifier/src/poc.rs +++ b/iot_verifier/src/poc.rs @@ -9,7 +9,7 @@ use crate::{ witness_updater::WitnessUpdater, }; use beacon; -use chrono::{DateTime, Duration, DurationRound, Utc}; +use chrono::{DateTime, DurationRound, Utc}; use denylist::denylist::DenyList; use file_store::{ iot_beacon_report::{IotBeaconIngestReport, IotBeaconReport}, @@ -31,7 +31,7 @@ use iot_config::{ use lazy_static::lazy_static; use rust_decimal::Decimal; use sqlx::PgPool; -use std::f64::consts::PI; +use std::{f64::consts::PI, time::Duration}; pub type GenericVerifyResult = Result; @@ -55,9 +55,9 @@ lazy_static! { /// would disqualify the hotspot from validating further beacons static ref DEFAULT_TX_SCALE: Decimal = Decimal::new(2000, 4); /// max permitted lag between the first witness and all subsequent witnesses - static ref MAX_WITNESS_LAG: Duration = Duration::milliseconds(1500); + static ref MAX_WITNESS_LAG: chrono::Duration = chrono::Duration::milliseconds(1500); /// max permitted lag between the beaconer and a witness - static ref MAX_BEACON_TO_WITNESS_LAG: Duration = Duration::milliseconds(4000); + static ref MAX_BEACON_TO_WITNESS_LAG: chrono::Duration = chrono::Duration::milliseconds(4000); } #[derive(Debug, PartialEq)] pub struct InvalidResponse { @@ -98,7 +98,7 @@ impl Poc { entropy_start: DateTime, entropy_version: i32, ) -> Self { - let entropy_end = entropy_start + Duration::seconds(ENTROPY_LIFESPAN); + let entropy_end = entropy_start + ENTROPY_LIFESPAN; Self { pool, beacon_interval, @@ -158,7 +158,7 @@ impl Poc { &self.beacon_report, &beaconer_info, &beaconer_region_info.region_params, - self.beacon_interval, + chrono::Duration::from_std(self.beacon_interval)?, ) { Ok(()) => { let tx_scale = hex_density_map @@ -380,7 +380,7 @@ pub fn do_beacon_verifications( beacon_report: &IotBeaconIngestReport, beaconer_info: &GatewayInfo, beaconer_region_params: &[BlockchainRegionParamV1], - beacon_interval: Duration, + beacon_interval: chrono::Duration, ) -> GenericVerifyResult { tracing::debug!( "verifying beacon from beaconer: {:?}", @@ -489,7 +489,7 @@ pub fn do_witness_verifications( fn verify_beacon_schedule( last_beacon: &Option, beacon_received_ts: DateTime, - beacon_interval: Duration, + beacon_interval: chrono::Duration, ) -> GenericVerifyResult { match last_beacon { Some(last_beacon) => { diff --git a/iot_verifier/src/poc_report.rs b/iot_verifier/src/poc_report.rs index 9235ee251..450e641f8 100644 --- a/iot_verifier/src/poc_report.rs +++ b/iot_verifier/src/poc_report.rs @@ -1,7 +1,8 @@ use crate::entropy::ENTROPY_LIFESPAN; -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use sqlx::{Postgres, Transaction}; +use std::time::Duration; const REPORT_INSERT_SQL: &str = "insert into poc_report ( id, @@ -190,7 +191,7 @@ impl Report { where E: sqlx::Executor<'c, Database = sqlx::Postgres>, { - let entropy_min_time = Utc::now() - Duration::seconds(ENTROPY_LIFESPAN); + let entropy_min_time = Utc::now() - ENTROPY_LIFESPAN; Ok(sqlx::query_as::<_, Self>( r#" select poc_report.id, diff --git a/iot_verifier/src/purger.rs b/iot_verifier/src/purger.rs index 6f52ec05b..c9a01d7db 100644 --- a/iot_verifier/src/purger.rs +++ b/iot_verifier/src/purger.rs @@ -1,5 +1,4 @@ use crate::{entropy::Entropy, poc_report::Report, telemetry}; -use chrono::Duration; use file_store::{ file_sink::FileSinkClient, iot_beacon_report::IotBeaconIngestReport, @@ -15,9 +14,9 @@ use futures::{ use helium_proto::services::poc_lora::{ InvalidParticipantSide, InvalidReason, LoraInvalidBeaconReportV1, LoraInvalidWitnessReportV1, }; -use lazy_static::lazy_static; +use humantime_serde::re::humantime; use sqlx::{PgPool, Postgres}; -use std::ops::DerefMut; +use std::{ops::DerefMut, time::Duration}; use task_manager::ManagedTask; use tokio::{ sync::Mutex, @@ -27,15 +26,6 @@ use tokio::{ const DB_POLL_TIME: time::Duration = time::Duration::from_secs(60 * 35); const PURGER_WORKERS: usize = 50; -lazy_static! { - /// the period after which a beacon report in the DB will be deemed stale - static ref BEACON_STALE_PERIOD: Duration = Duration::minutes(45); - /// the period after which a witness report in the DB will be deemed stale - static ref WITNESS_STALE_PERIOD: Duration = Duration::minutes(45); - /// the period after which an entropy entry in the DB will be deemed stale - static ref ENTROPY_STALE_PERIOD: Duration = Duration::minutes(60); -} - pub struct Purger { pub pool: PgPool, pub base_stale_period: Duration, @@ -110,7 +100,8 @@ impl Purger { // once the report is safely on s3 we can then proceed to purge from the db let beacon_stale_period = self.base_stale_period + self.beacon_stale_period; tracing::info!( - "starting query get_stale_pending_beacons with stale period: {beacon_stale_period}" + "starting query get_stale_pending_beacons with stale period: {}", + humantime::format_duration(beacon_stale_period) ); let stale_beacons = Report::get_stale_beacons(&self.pool, beacon_stale_period).await?; tracing::info!("completed query get_stale_beacons"); @@ -132,7 +123,8 @@ impl Purger { let witness_stale_period = self.base_stale_period + self.witness_stale_period; tracing::info!( - "starting query get_stale_pending_witnesses with stale period: {witness_stale_period}" + "starting query get_stale_pending_witnesses with stale period: {}", + humantime::format_duration(witness_stale_period) ); let stale_witnesses = Report::get_stale_witnesses(&self.pool, witness_stale_period).await?; tracing::info!("completed query get_stale_witnesses"); diff --git a/iot_verifier/src/region_cache.rs b/iot_verifier/src/region_cache.rs index 127e2be0e..ceab85a2c 100644 --- a/iot_verifier/src/region_cache.rs +++ b/iot_verifier/src/region_cache.rs @@ -49,12 +49,12 @@ where ) -> Result> { match self.cache.get(®ion).await { Some(hit) => { - metrics::increment_counter!("oracles_iot_verifier_region_params_cache_hit"); + metrics::counter!("oracles_iot_verifier_region_params_cache_hit").increment(1); Ok(hit.value().clone()) } _ => match self.gateways.clone().resolve_region_params(region).await { Ok(res) => { - metrics::increment_counter!("oracles_iot_verifier_region_params_cache_miss"); + metrics::counter!("oracles_iot_verifier_region_params_cache_miss").increment(1); self.cache .insert(region, res.clone(), self.refresh_interval) .await; diff --git a/iot_verifier/src/rewarder.rs b/iot_verifier/src/rewarder.rs index 1db4be290..2c273fc37 100644 --- a/iot_verifier/src/rewarder.rs +++ b/iot_verifier/src/rewarder.rs @@ -2,7 +2,7 @@ use crate::{ reward_share::{self, GatewayShares}, telemetry, }; -use chrono::{DateTime, Duration, TimeZone, Utc}; +use chrono::{DateTime, TimeZone, Utc}; use db_store::meta; use file_store::{file_sink, traits::TimestampEncode}; use futures::future::LocalBoxFuture; @@ -10,22 +10,23 @@ use helium_proto::services::poc_lora as proto; use helium_proto::services::poc_lora::iot_reward_share::Reward as ProtoReward; use helium_proto::services::poc_lora::{UnallocatedReward, UnallocatedRewardType}; use helium_proto::RewardManifest; +use humantime_serde::re::humantime; use price::PriceTracker; use reward_scheduler::Scheduler; use rust_decimal::prelude::*; use rust_decimal_macros::dec; use sqlx::{PgExecutor, PgPool, Pool, Postgres}; -use std::ops::Range; +use std::{ops::Range, time::Duration}; use task_manager::ManagedTask; use tokio::time::sleep; -const REWARDS_NOT_CURRENT_DELAY_PERIOD: i64 = 5; +const REWARDS_NOT_CURRENT_DELAY_PERIOD: Duration = Duration::from_secs(5 * 60); pub struct Rewarder { pub pool: Pool, pub rewards_sink: file_sink::FileSinkClient, pub reward_manifests_sink: file_sink::FileSinkClient, - pub reward_period_hours: i64, + pub reward_period_hours: Duration, pub reward_offset: Duration, pub price_tracker: PriceTracker, } @@ -44,7 +45,7 @@ impl Rewarder { pool: PgPool, rewards_sink: file_sink::FileSinkClient, reward_manifests_sink: file_sink::FileSinkClient, - reward_period_hours: i64, + reward_period_hours: Duration, reward_offset: Duration, price_tracker: PriceTracker, ) -> Self { @@ -61,7 +62,7 @@ impl Rewarder { pub async fn run(mut self, shutdown: triggered::Listener) -> anyhow::Result<()> { tracing::info!("Starting rewarder"); - let reward_period_length = Duration::hours(self.reward_period_hours); + let reward_period_length = self.reward_period_hours; loop { let now = Utc::now(); @@ -87,9 +88,10 @@ impl Rewarder { scheduler.sleep_duration(Utc::now())? } else { tracing::info!( - "rewards will be retried in {REWARDS_NOT_CURRENT_DELAY_PERIOD} minutes:" + "rewards will be retried in {}", + humantime::format_duration(REWARDS_NOT_CURRENT_DELAY_PERIOD) ); - Duration::minutes(REWARDS_NOT_CURRENT_DELAY_PERIOD).to_std()? + REWARDS_NOT_CURRENT_DELAY_PERIOD } } else { scheduler.sleep_duration(Utc::now())? diff --git a/iot_verifier/src/runner.rs b/iot_verifier/src/runner.rs index 4e0656ec7..7ef74ffff 100644 --- a/iot_verifier/src/runner.rs +++ b/iot_verifier/src/runner.rs @@ -50,7 +50,7 @@ lazy_static! { pub struct Runner { pub pool: PgPool, - pub beacon_interval: ChronoDuration, + pub beacon_interval: Duration, pub max_witnesses_per_poc: u64, pub beacon_max_retries: u64, pub witness_max_retries: u64, @@ -110,13 +110,13 @@ where hex_density_map: HexDensityMap, witness_updater: WitnessUpdater, ) -> anyhow::Result { - let beacon_interval = settings.beacon_interval()?; + let beacon_interval = settings.beacon_interval; let max_witnesses_per_poc = settings.max_witnesses_per_poc; let beacon_max_retries = settings.beacon_max_retries; let witness_max_retries = settings.witness_max_retries; let deny_list_latest_url = settings.denylist.denylist_url.clone(); let mut deny_list = DenyList::new(&settings.denylist)?; - let region_cache = RegionCache::new(settings.region_params_refresh_interval(), gateways)?; + let region_cache = RegionCache::new(settings.region_params_refresh_interval, gateways)?; // force update to latest in order to update the tag name // when first run, the denylist will load the local filter // but we dont save the tag name so it defaults to 0 @@ -139,7 +139,7 @@ where beacon_max_retries, witness_max_retries, deny_list_latest_url, - deny_list_trigger_interval: settings.denylist.trigger_interval(), + deny_list_trigger_interval: settings.denylist.trigger_interval, deny_list, invalid_beacon_sink, invalid_witness_sink, diff --git a/iot_verifier/src/settings.rs b/iot_verifier/src/settings.rs index c4f8e74a3..34993eb9c 100644 --- a/iot_verifier/src/settings.rs +++ b/iot_verifier/src/settings.rs @@ -1,9 +1,8 @@ use anyhow::bail; -use chrono::Duration; use config::{Config, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::path::Path; -use tokio::time; +use std::{path::Path, time::Duration}; #[derive(Debug, Deserialize, Clone)] pub struct Settings { @@ -17,17 +16,17 @@ pub struct Settings { /// if this is set, this value will be added to the entropy and report /// stale periods and is to prevent data being unnecessarily purged /// in the event the verifier is down for an extended period of time - #[serde(default = "default_base_stale_period")] - pub base_stale_period: i64, + #[serde(with = "humantime_serde", default = "default_base_stale_period")] + pub base_stale_period: Duration, /// the period after which a beacon report in the DB will be deemed stale - #[serde(default = "default_beacon_stale_period")] - pub beacon_stale_period: i64, + #[serde(with = "humantime_serde", default = "default_beacon_stale_period")] + pub beacon_stale_period: Duration, /// the period after which a witness report in the DB will be deemed stale - #[serde(default = "default_witness_stale_period")] - pub witness_stale_period: i64, + #[serde(with = "humantime_serde", default = "default_witness_stale_period")] + pub witness_stale_period: Duration, /// the period after which an entropy report in the DB will be deemed stale - #[serde(default = "default_entropy_stale_period")] - pub entropy_stale_period: i64, + #[serde(with = "humantime_serde", default = "default_entropy_stale_period")] + pub entropy_stale_period: Duration, pub database: db_store::Settings, pub iot_config_client: iot_config::client::Settings, pub ingest: file_store::Settings, @@ -39,50 +38,55 @@ pub struct Settings { pub denylist: denylist::Settings, pub price_tracker: price::price_tracker::Settings, /// Reward period in hours. (Default to 24) - #[serde(default = "default_reward_period")] - pub rewards: i64, + #[serde(with = "humantime_serde", default = "default_reward_period")] + pub reward_period: Duration, /// Reward calculation offset in minutes, rewards will be calculated at the end - /// of the reward period + reward_offset_minutes - #[serde(default = "default_reward_offset_minutes")] - pub reward_offset_minutes: i64, + /// of the reward_period + reward_period_offset + #[serde(with = "humantime_serde", default = "default_reward_period_offset")] + pub reward_period_offset: Duration, #[serde(default = "default_max_witnesses_per_poc")] pub max_witnesses_per_poc: u64, /// The cadence at which hotspots are permitted to beacon (in seconds) /// this should be a factor of 24 so that we can have clear /// beaconing bucket sizes - #[serde(default = "default_beacon_interval")] - pub beacon_interval: u64, + #[serde(with = "humantime_serde", default = "default_beacon_interval")] + pub beacon_interval: Duration, + // FIXME: unused /// Trigger interval for generating a transmit scaling map - #[serde(default = "default_transmit_scale_interval")] - pub transmit_scale_interval: i64, + #[serde(with = "humantime_serde", default = "default_transmit_scale_interval")] + pub transmit_scale_interval: Duration, // roll up time defined in the ingestors ( in seconds ) // ie the time after which they will write out files to s3 // this will be used when padding out the witness // loader window before and after values - #[serde(default = "default_ingestor_rollup_time")] - pub ingestor_rollup_time: i64, + #[serde(with = "humantime_serde", default = "default_ingestor_rollup_time")] + pub ingestor_rollup_time: Duration, /// window width for the poc report loader ( in seconds ) /// each poll the loader will load reports from start time to start time + window width /// NOTE: the window width should be as a minimum equal to the ingestor roll up period /// any less and the verifier will potentially miss incoming reports - #[serde(default = "default_poc_loader_window_width")] - pub poc_loader_window_width: i64, + #[serde(with = "humantime_serde", default = "default_poc_loader_window_width")] + pub poc_loader_window_width: Duration, /// cadence for how often to look for poc reports from s3 buckets - #[serde(default = "default_poc_loader_poll_time")] - pub poc_loader_poll_time: u64, + #[serde(with = "humantime_serde", default = "default_poc_loader_poll_time")] + pub poc_loader_poll_time: Duration, + // FIXME: unused /// the lifespan of a piece of entropy - #[serde(default = "default_entropy_lifespan ")] - pub entropy_lifespan: i64, + #[serde(with = "humantime_serde", default = "default_entropy_lifespan ")] + pub entropy_lifespan: Duration, /// max window age for the poc report loader ( in seconds ) /// the starting point of the window will never be older than now - max age - #[serde(default = "default_loader_window_max_lookback_age")] - pub loader_window_max_lookback_age: i64, + #[serde( + with = "humantime_serde", + default = "default_loader_window_max_lookback_age" + )] + pub loader_window_max_lookback_age: Duration, /// File store poll interval for incoming entropy reports, in seconds - #[serde(default = "default_entropy_interval")] - pub entropy_interval: i64, - /// File store poll interval for incoming packets, in seconds. (Default is 900; 15 minutes) - #[serde(default = "default_packet_interval")] - pub packet_interval: i64, + #[serde(with = "humantime_serde", default = "default_entropy_interval")] + pub entropy_interval: Duration, + /// File store poll interval for incoming packets, in seconds. (Default 15 minutes) + #[serde(with = "humantime_serde", default = "default_packet_interval")] + pub packet_interval: Duration, /// the max number of times a beacon report will be retried /// after this the report will be ignored and eventually be purged #[serde(default = "default_beacon_max_retries")] @@ -92,103 +96,97 @@ pub struct Settings { #[serde(default = "default_witness_max_retries")] pub witness_max_retries: u64, /// interval at which gateways are refreshed - #[serde(default = "default_gateway_refresh_interval")] - pub gateway_refresh_interval: i64, + #[serde(with = "humantime_serde", default = "default_gateway_refresh_interval")] + pub gateway_refresh_interval: Duration, /// interval at which region params in the cache are refreshed - #[serde(default = "default_region_params_refresh_interval")] - pub region_params_refresh_interval: u64, + #[serde( + with = "humantime_serde", + default = "default_region_params_refresh_interval" + )] + pub region_params_refresh_interval: Duration, } -// Default: 30 minutes -fn default_gateway_refresh_interval() -> i64 { - 30 * 60 +fn default_gateway_refresh_interval() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } -// Default: 30 minutes -fn default_region_params_refresh_interval() -> u64 { - 30 * 60 +fn default_region_params_refresh_interval() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } -// Default: 60 minutes // this should be at least poc_loader_window_width * 2 -pub fn default_loader_window_max_lookback_age() -> i64 { - 60 * 60 +fn default_loader_window_max_lookback_age() -> Duration { + humantime::parse_duration("60 minutes").unwrap() } -// Default: 5 minutes -fn default_entropy_interval() -> i64 { - 5 * 60 +fn default_entropy_interval() -> Duration { + humantime::parse_duration("5 minutes").unwrap() } -// Default: 5 minutes -pub fn default_entropy_lifespan() -> i64 { - 5 * 60 +fn default_entropy_lifespan() -> Duration { + humantime::parse_duration("5 minutes").unwrap() } -// Default: 5 minutes -pub fn default_poc_loader_window_width() -> i64 { - 5 * 60 +fn default_poc_loader_window_width() -> Duration { + humantime::parse_duration("5 minutes").unwrap() } -// Default: 5 minutes -pub fn default_ingestor_rollup_time() -> i64 { - 5 * 60 +fn default_ingestor_rollup_time() -> Duration { + humantime::parse_duration("5 minutes").unwrap() } -// Default: 5 minutes + // in normal operational mode the poll time should be set same as that of the window width // however, if for example we are loading historic data, ie looking back 24hours, we will want // the loader to be catching up as quickly as possible and so we will want to poll more often // in order to iterate quickly over the historic data // the average time it takes to load the data available within with window width needs to be // considered here -pub fn default_poc_loader_poll_time() -> u64 { - 5 * 60 +fn default_poc_loader_poll_time() -> Duration { + humantime::parse_duration("5 minutes").unwrap() } -// Default: 6 hours -pub fn default_beacon_interval() -> u64 { - 6 * 60 * 60 +fn default_beacon_interval() -> Duration { + humantime::parse_duration("6 hours").unwrap() } -// Default: 30 min -pub fn default_transmit_scale_interval() -> i64 { - 1800 +fn default_transmit_scale_interval() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } -pub fn default_log() -> String { +fn default_log() -> String { "iot_verifier=debug,poc_store=info".to_string() } -pub fn default_base_stale_period() -> i64 { - 0 +fn default_base_stale_period() -> Duration { + Duration::default() } -pub fn default_beacon_stale_period() -> i64 { - 60 * 45 +fn default_beacon_stale_period() -> Duration { + humantime::parse_duration("45 minutes").unwrap() } -pub fn default_witness_stale_period() -> i64 { - 60 * 45 +fn default_witness_stale_period() -> Duration { + humantime::parse_duration("45 minutes").unwrap() } -pub fn default_entropy_stale_period() -> i64 { - 60 * 60 +fn default_entropy_stale_period() -> Duration { + humantime::parse_duration("60 minutes").unwrap() } -fn default_reward_period() -> i64 { - 24 +fn default_reward_period() -> Duration { + humantime::parse_duration("24 hours").unwrap() } -fn default_reward_offset_minutes() -> i64 { - 30 +fn default_reward_period_offset() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } -pub fn default_max_witnesses_per_poc() -> u64 { +fn default_max_witnesses_per_poc() -> u64 { 14 } -fn default_packet_interval() -> i64 { - 900 +fn default_packet_interval() -> Duration { + humantime::parse_duration("15 minutes").unwrap() } // runner runs at 30 sec intervals @@ -229,64 +227,13 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn reward_offset_duration(&self) -> Duration { - Duration::minutes(self.reward_offset_minutes) - } - - pub fn poc_loader_window_width(&self) -> Duration { - Duration::seconds(self.poc_loader_window_width) - } - - pub fn ingestor_rollup_time(&self) -> Duration { - Duration::seconds(self.ingestor_rollup_time) - } - - pub fn poc_loader_poll_time(&self) -> time::Duration { - time::Duration::from_secs(self.poc_loader_poll_time) - } - - pub fn loader_window_max_lookback_age(&self) -> Duration { - Duration::seconds(self.loader_window_max_lookback_age) - } - - pub fn entropy_lifespan(&self) -> Duration { - Duration::seconds(self.entropy_lifespan) - } - - pub fn base_stale_period(&self) -> Duration { - Duration::seconds(self.base_stale_period) - } - - pub fn beacon_stale_period(&self) -> Duration { - Duration::seconds(self.beacon_stale_period) - } - - pub fn witness_stale_period(&self) -> Duration { - Duration::seconds(self.witness_stale_period) - } - - pub fn entropy_stale_period(&self) -> Duration { - Duration::seconds(self.entropy_stale_period) - } - - pub fn entropy_interval(&self) -> Duration { - Duration::seconds(self.entropy_interval) - } - pub fn packet_interval(&self) -> Duration { - Duration::seconds(self.packet_interval) - } - pub fn gateway_refresh_interval(&self) -> Duration { - Duration::seconds(self.gateway_refresh_interval) - } - pub fn region_params_refresh_interval(&self) -> time::Duration { - time::Duration::from_secs(self.region_params_refresh_interval) - } pub fn beacon_interval(&self) -> anyhow::Result { + // FIXME: // validate the beacon_interval value is a factor of 24, if not bail out - if (24 * 60 * 60) % self.beacon_interval != 0 { + if (24 * 60 * 60) % self.beacon_interval.as_secs() != 0 { bail!("beacon interval is not a factor of 24") } else { - Ok(Duration::seconds(self.beacon_interval as i64)) + Ok(self.beacon_interval) } } } diff --git a/iot_verifier/src/telemetry.rs b/iot_verifier/src/telemetry.rs index 2629a6a7e..8f0a110c7 100644 --- a/iot_verifier/src/telemetry.rs +++ b/iot_verifier/src/telemetry.rs @@ -26,47 +26,47 @@ pub async fn initialize(db: &Pool) -> anyhow::Result<()> { } pub fn count_packets(count: u64) { - metrics::counter!(PACKET_COUNTER, count); + metrics::counter!(PACKET_COUNTER).increment(count); } pub fn count_non_rewardable_packets(count: u64) { - metrics::counter!(NON_REWARDABLE_PACKET_COUNTER, count); + metrics::counter!(NON_REWARDABLE_PACKET_COUNTER).increment(count); } pub fn count_loader_beacons(count: u64) { - metrics::counter!(LOADER_BEACON_COUNTER, count); + metrics::counter!(LOADER_BEACON_COUNTER).increment(count); } pub fn count_loader_witnesses(count: u64) { - metrics::counter!(LOADER_WITNESS_COUNTER, count); + metrics::counter!(LOADER_WITNESS_COUNTER).increment(count); } pub fn count_loader_dropped_beacons(count: u64, labels: &[(&'static str, &'static str)]) { - metrics::counter!(LOADER_DROPPED_BEACON_COUNTER, count, labels); + metrics::counter!(LOADER_DROPPED_BEACON_COUNTER, labels).increment(count); } pub fn count_loader_dropped_witnesses(count: u64, labels: &[(&'static str, &'static str)]) { - metrics::counter!(LOADER_DROPPED_WITNESS_COUNTER, count, labels); + metrics::counter!(LOADER_DROPPED_WITNESS_COUNTER, labels).increment(count); } pub fn num_beacons(count: u64) { - metrics::gauge!(BEACON_GUAGE, count as f64); + metrics::gauge!(BEACON_GUAGE).set(count as f64); } pub fn increment_num_beacons_by(count: u64) { - metrics::increment_gauge!(BEACON_GUAGE, count as f64); + metrics::gauge!(BEACON_GUAGE).increment(count as f64); } pub fn decrement_num_beacons() { - metrics::decrement_gauge!(BEACON_GUAGE, 1.0) + metrics::gauge!(BEACON_GUAGE).decrement(1.0) } pub fn increment_invalid_witnesses(labels: &[(&'static str, &'static str)]) { - metrics::increment_counter!(INVALID_WITNESS_COUNTER, labels); + metrics::counter!(INVALID_WITNESS_COUNTER, labels).increment(1); } pub fn last_rewarded_end_time(datetime: DateTime) { - metrics::gauge!(LAST_REWARDED_END_TIME, datetime.timestamp() as f64); + metrics::gauge!(LAST_REWARDED_END_TIME).set(datetime.timestamp() as f64); } #[derive(Default)] diff --git a/iot_verifier/src/tx_scaler.rs b/iot_verifier/src/tx_scaler.rs index 5728c8e49..eaf8c0d21 100644 --- a/iot_verifier/src/tx_scaler.rs +++ b/iot_verifier/src/tx_scaler.rs @@ -3,16 +3,17 @@ use crate::{ hex_density::{compute_hex_density_map, GlobalHexMap, HexDensityMap}, last_beacon_reciprocity::LastBeaconReciprocity, }; -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use futures::future::LocalBoxFuture; use helium_crypto::PublicKeyBinary; use sqlx::PgPool; -use std::collections::HashMap; +use std::{collections::HashMap, time::Duration}; use task_manager::ManagedTask; // The number in minutes within which the gateway has registered a beacon // to the oracle for inclusion in transmit scaling density calculations -const HIP_17_INTERACTIVITY_LIMIT: i64 = 3600; +// 60 hours +const HIP_17_INTERACTIVITY_LIMIT: Duration = Duration::from_secs(60 * 60 * 60); pub struct Server { pub hex_density_map: HexDensityMap, @@ -101,7 +102,7 @@ impl Server { &self, now: DateTime, ) -> anyhow::Result>> { - let interactivity_deadline = now - Duration::minutes(HIP_17_INTERACTIVITY_LIMIT); + let interactivity_deadline = now - HIP_17_INTERACTIVITY_LIMIT; Ok( LastBeaconReciprocity::get_all_since(&self.pool, interactivity_deadline) .await? diff --git a/iot_verifier/src/witness_updater.rs b/iot_verifier/src/witness_updater.rs index d1fa912c0..661653d06 100644 --- a/iot_verifier/src/witness_updater.rs +++ b/iot_verifier/src/witness_updater.rs @@ -24,7 +24,7 @@ struct Telemetry { impl Telemetry { fn new() -> Self { - let gauge = metrics::register_gauge!("iot_verifier_witness_updater_queue"); + let gauge = metrics::gauge!("iot_verifier_witness_updater_queue"); gauge.set(0.0); Self { queue_gauge: gauge } } diff --git a/iot_verifier/tests/integrations/common/mod.rs b/iot_verifier/tests/integrations/common/mod.rs index c9f00f01c..0dea4c251 100644 --- a/iot_verifier/tests/integrations/common/mod.rs +++ b/iot_verifier/tests/integrations/common/mod.rs @@ -419,11 +419,10 @@ pub const BEACONER5: &str = "112BwpY6ARmnMsPZE9iBauh6EJVDvH7MimZtvWnd99nXmmGcKeM pub const WITNESS1: &str = "13ABbtvMrRK8jgYrT3h6Y9Zu44nS6829kzsamiQn9Eefeu3VAZs"; pub const WITNESS2: &str = "112e5E4NCpZ88ivqoXeyWwiVCC4mJFv4kMPowycNMXjoDRSP6ZnS"; -#[allow(dead_code)] + pub const UNKNOWN_GATEWAY1: &str = "1YiZUsuCwxE7xyxjke1ogehv5WSuYZ9o7uM2ZKvRpytyqb8Be63"; pub const NO_METADATA_GATEWAY1: &str = "1YpopKVbRDELWGR3nMd1MAU8a5GxP1uQSDj9AeXHEi3fHSsWGRi"; -#[allow(dead_code)] pub const DENIED_PUBKEY1: &str = "112bUGwooPd1dCDd3h3yZwskjxCzBsQNKeaJTuUF4hSgYedcsFa9"; pub const LOCAL_ENTROPY: [u8; 4] = [233, 70, 25, 176]; @@ -436,7 +435,7 @@ pub const POC_DATA: [u8; 51] = [ 203, 122, 146, 49, 241, 156, 148, 74, 246, 68, 17, 8, 212, 48, 6, 152, 58, 221, 158, 186, 101, 37, 59, 135, 126, 18, 72, 244, 65, 174, ]; -#[allow(dead_code)] + pub const ENTROPY_TIMESTAMP: i64 = 1677163710000; const EU868_PARAMS: &[u8] = &[ diff --git a/iot_verifier/tests/integrations/purger_tests.rs b/iot_verifier/tests/integrations/purger_tests.rs index 6daafb641..876917dce 100644 --- a/iot_verifier/tests/integrations/purger_tests.rs +++ b/iot_verifier/tests/integrations/purger_tests.rs @@ -1,5 +1,5 @@ use crate::common; -use chrono::{Duration as ChronoDuration, TimeZone, Utc}; +use chrono::{TimeZone, Utc}; use helium_crypto::PublicKeyBinary; use helium_proto::services::poc_lora::{ InvalidParticipantSide, InvalidReason, LoraBeaconReportReqV1, LoraWitnessReportReqV1, @@ -16,10 +16,10 @@ async fn test_purger(pool: PgPool) -> anyhow::Result<()> { let (invalid_witness_client, mut invalid_witnesses) = common::create_file_sink(); // default stale periods after which the purger will delete reports from the db - let base_stale_period = ChronoDuration::seconds(0); - let beacon_stale_period = ChronoDuration::seconds(3); - let witness_stale_period = ChronoDuration::seconds(3); - let entropy_stale_period = ChronoDuration::seconds(3); + let base_stale_period = Duration::from_secs(0); + let beacon_stale_period = Duration::from_secs(3); + let witness_stale_period = Duration::from_secs(3); + let entropy_stale_period = Duration::from_secs(3); // create the purger let purger = Purger { @@ -34,7 +34,7 @@ async fn test_purger(pool: PgPool) -> anyhow::Result<()> { // default reports timestamp let entropy_ts = Utc.timestamp_millis_opt(common::ENTROPY_TIMESTAMP).unwrap(); - let report_ts = entropy_ts + ChronoDuration::minutes(1); + let report_ts = entropy_ts + Duration::from_secs(60); // // inject a beacon, witness & entropy report into the db diff --git a/iot_verifier/tests/integrations/runner_tests.rs b/iot_verifier/tests/integrations/runner_tests.rs index 628980600..3c8fd7fec 100644 --- a/iot_verifier/tests/integrations/runner_tests.rs +++ b/iot_verifier/tests/integrations/runner_tests.rs @@ -1,6 +1,6 @@ use crate::common::{self, MockFileSinkReceiver}; use async_trait::async_trait; -use chrono::{DateTime, Duration as ChronoDuration, TimeZone, Utc}; +use chrono::{DateTime, TimeZone, Utc}; use denylist::DenyList; use futures_util::{stream, StreamExt as FuturesStreamExt}; use helium_crypto::PublicKeyBinary; @@ -23,7 +23,9 @@ use sqlx::PgPool; use std::{self, str::FromStr, time::Duration}; lazy_static! { - static ref BEACON_INTERVAL: ChronoDuration = ChronoDuration::seconds(21600); + static ref BEACON_INTERVAL: Duration = Duration::from_secs(21600); + static ref BEACON_INTERVAL_PLUS_TWO_HOURS: Duration = + *BEACON_INTERVAL + Duration::from_secs(2 * 60 * 60); } #[derive(Debug, Clone)] pub struct MockIotConfigClient { @@ -65,7 +67,7 @@ struct TestContext { } impl TestContext { - async fn setup(pool: PgPool, beacon_interval: ChronoDuration) -> anyhow::Result { + async fn setup(pool: PgPool, beacon_interval: Duration) -> anyhow::Result { // setup file sinks let (invalid_beacon_client, invalid_beacons) = common::create_file_sink(); let (invalid_witness_client, invalid_witnesses) = common::create_file_sink(); @@ -82,7 +84,7 @@ impl TestContext { let deny_list: DenyList = vec![PublicKeyBinary::from_str(common::DENIED_PUBKEY1).unwrap()] .try_into() .unwrap(); - let refresh_interval = ChronoDuration::seconds(30); + let refresh_interval = Duration::from_secs(30); let (gateway_updater_receiver, _gateway_updater_server) = GatewayUpdater::new(refresh_interval, iot_config_client.clone()).await?; let gateway_cache = GatewayCache::new(gateway_updater_receiver.clone()); @@ -115,7 +117,7 @@ impl TestContext { // and all beacon and witness reports will be created // with a received_ts based on an offset from this ts let entropy_ts = Utc.timestamp_millis_opt(common::ENTROPY_TIMESTAMP).unwrap(); - let report_ts = entropy_ts + ChronoDuration::minutes(1); + let report_ts = entropy_ts + Duration::from_secs(60); // add the entropy to the DB common::inject_entropy_report(pool.clone(), entropy_ts).await?; @@ -151,25 +153,25 @@ async fn valid_beacon_and_witness(pool: PgPool) -> anyhow::Result<()> { common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -219,25 +221,25 @@ async fn confirm_valid_reports_unmodified(pool: PgPool) -> anyhow::Result<()> { common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -280,13 +282,13 @@ async fn confirm_invalid_reports_unmodified(pool: PgPool) -> anyhow::Result<()> common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - ChronoDuration::hours(1), + now - chrono::Duration::hours(1).to_std()?, ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - ChronoDuration::hours(1), + now - chrono::Duration::hours(1).to_std()?, ) .await?; txn.commit().await?; @@ -330,25 +332,25 @@ async fn confirm_valid_beacon_invalid_witness_reports_unmodified( common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -395,25 +397,25 @@ async fn valid_beacon_irregular_schedule_with_witness(pool: PgPool) -> anyhow::R common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -498,13 +500,13 @@ async fn valid_beacon_irregular_schedule_no_witness(pool: PgPool) -> anyhow::Res common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -591,25 +593,25 @@ async fn invalid_beacon_irregular_schedule_with_witness(pool: PgPool) -> anyhow: common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -702,13 +704,13 @@ async fn valid_beacon_gateway_not_found(pool: PgPool) -> anyhow::Result<()> { common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -765,25 +767,25 @@ async fn invalid_witness_no_metadata(pool: PgPool) -> anyhow::Result<()> { common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -841,13 +843,13 @@ async fn invalid_beacon_no_gateway_found(pool: PgPool) -> anyhow::Result<()> { common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - ChronoDuration::hours(1), + now - chrono::Duration::hours(1), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - ChronoDuration::hours(1), + now - chrono::Duration::hours(1), ) .await?; txn.commit().await?; @@ -932,7 +934,7 @@ async fn invalid_beacon_bad_payload(pool: PgPool) -> anyhow::Result<()> { ctx.runner.handle_db_tick().await?; tokio::time::sleep(Duration::from_secs(3)).await; let mut txn = pool.begin().await?; - let beacon_report = Report::get_stale_beacons(&mut txn, ChronoDuration::seconds(1)).await?; + let beacon_report = Report::get_stale_beacons(&mut txn, Duration::from_secs(1)).await?; // max attempts is 2, once that is exceeded the report is no longer retried // so even tho we called handle_db_tick 5 times above, the report was only retried twice assert_eq!(2, beacon_report[0].attempts); @@ -963,13 +965,13 @@ async fn valid_beacon_and_witness_no_beacon_reciprocity(pool: PgPool) -> anyhow: common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -1022,13 +1024,13 @@ async fn valid_beacon_and_witness_no_witness_reciprocity(pool: PgPool) -> anyhow common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (*BEACON_INTERVAL + ChronoDuration::hours(2)), + now - (*BEACON_INTERVAL_PLUS_TWO_HOURS), ) .await?; txn.commit().await?; @@ -1070,7 +1072,7 @@ async fn valid_beacon_and_witness_no_witness_reciprocity(pool: PgPool) -> anyhow #[sqlx::test] async fn valid_new_gateway_witness_first_reciprocity(pool: PgPool) -> anyhow::Result<()> { - let test_beacon_interval = ChronoDuration::seconds(5); + let test_beacon_interval = Duration::from_secs(5); let mut ctx = TestContext::setup(pool.clone(), test_beacon_interval).await?; let now = ctx.entropy_ts; @@ -1099,13 +1101,13 @@ async fn valid_new_gateway_witness_first_reciprocity(pool: PgPool) -> anyhow::Re common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; txn.commit().await?; @@ -1198,13 +1200,13 @@ async fn valid_new_gateway_witness_first_reciprocity(pool: PgPool) -> anyhow::Re common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; txn.commit().await?; @@ -1238,7 +1240,7 @@ async fn valid_new_gateway_witness_first_reciprocity(pool: PgPool) -> anyhow::Re #[sqlx::test] async fn valid_new_gateway_beacon_first_reciprocity(pool: PgPool) -> anyhow::Result<()> { - let test_beacon_interval = ChronoDuration::seconds(5); + let test_beacon_interval = Duration::from_secs(5); let mut ctx = TestContext::setup(pool.clone(), test_beacon_interval).await?; let now = ctx.entropy_ts; @@ -1295,13 +1297,13 @@ async fn valid_new_gateway_beacon_first_reciprocity(pool: PgPool) -> anyhow::Res common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; txn.commit().await?; @@ -1340,7 +1342,7 @@ async fn valid_new_gateway_beacon_first_reciprocity(pool: PgPool) -> anyhow::Res tokio::time::sleep(Duration::from_secs(6)).await; let beacon_to_inject = common::create_valid_beacon_report( common::BEACONER5, - ctx.entropy_ts + ChronoDuration::seconds(5), + ctx.entropy_ts + Duration::from_secs(5), ); let witness_to_inject = common::create_valid_witness_report(common::WITNESS2, ctx.entropy_ts); common::inject_beacon_report(pool.clone(), beacon_to_inject.clone()).await?; @@ -1351,13 +1353,13 @@ async fn valid_new_gateway_beacon_first_reciprocity(pool: PgPool) -> anyhow::Res common::inject_last_beacon( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; common::inject_last_witness( &mut txn, witness_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; txn.commit().await?; @@ -1392,7 +1394,7 @@ async fn valid_new_gateway_beacon_first_reciprocity(pool: PgPool) -> anyhow::Res #[sqlx::test] async fn valid_lone_wolf_beacon(pool: PgPool) -> anyhow::Result<()> { - let test_beacon_interval = ChronoDuration::seconds(5); + let test_beacon_interval = Duration::from_secs(5); let mut ctx = TestContext::setup(pool.clone(), test_beacon_interval).await?; let now = ctx.entropy_ts; @@ -1446,13 +1448,13 @@ async fn valid_lone_wolf_beacon(pool: PgPool) -> anyhow::Result<()> { common::inject_last_beacon( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; common::inject_last_witness( &mut txn, beacon_to_inject.report.pub_key.clone(), - now - (test_beacon_interval + ChronoDuration::seconds(10)), + now - (test_beacon_interval + Duration::from_secs(10)), ) .await?; txn.commit().await?; @@ -1494,7 +1496,7 @@ async fn valid_lone_wolf_beacon(pool: PgPool) -> anyhow::Result<()> { #[sqlx::test] async fn valid_two_isolated_gateways_beaconing_and_witnessing(pool: PgPool) -> anyhow::Result<()> { - let test_beacon_interval = ChronoDuration::seconds(5); + let test_beacon_interval = Duration::from_secs(5); let mut ctx = TestContext::setup(pool.clone(), test_beacon_interval).await?; // simulate two gateways with no recent activity coming online and @@ -1576,11 +1578,11 @@ async fn valid_two_isolated_gateways_beaconing_and_witnessing(pool: PgPool) -> a let beacon_to_inject = common::create_valid_beacon_report( common::BEACONER1, - ctx.entropy_ts + ChronoDuration::seconds(5), + ctx.entropy_ts + chrono::Duration::seconds(5), ); let witness_to_inject = common::create_valid_witness_report( common::WITNESS1, - ctx.entropy_ts + ChronoDuration::seconds(5), + ctx.entropy_ts + chrono::Duration::seconds(5), ); common::inject_beacon_report(pool.clone(), beacon_to_inject.clone()).await?; common::inject_witness_report(pool.clone(), witness_to_inject.clone()).await?; diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 5b16e567d..8eb9a10c2 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -11,5 +11,11 @@ tower = "0.4" thiserror = { workspace = true } serde = { workspace = true } tracing = { workspace = true } +tracing-subscriber = { workspace = true } metrics = { workspace = true } metrics-exporter-prometheus = { workspace = true } +futures = { workspace = true } + +[dev-dependencies] +tokio = { workspace = true } +reqwest = { workspace = true } diff --git a/metrics/src/client_requests.rs b/metrics/src/client_requests.rs new file mode 100644 index 000000000..afc802a7d --- /dev/null +++ b/metrics/src/client_requests.rs @@ -0,0 +1,205 @@ +//! Add a timing span to anything that can be instrumented and returns a Result. +//! +//! Example: +//! ```ignore +//! use poc_metrics::client_requests::ClientMetricTiming; +//! +//! async fn time_function() { +//! let x: Result = async { Ok(42) } +//! .with_timing("iot_fetch_info") +//! .await; +//! assert_eq!(42, x.unwrap()); +//! } +//! ``` +//! +//! This will result in a prometheus metric +//! >> client_request_duration_seconds{name = "iot_fetch_info", quantile="xxx"} +//! +//! Install the `ApiTimingLayer`. +//! +//! Adding `.with_span_events(FmtSpan::CLOSE)` to a regular format layer will +//! print the timing spans to stdout as well. +//! +//! Example: +//! ```ignore +//! use poc_metrics::client_requests; +//! use tracing_subscriber::fmt::format::FmtSpan; +//! use tracing_subscriber::layer::SubscriberExt; +//! use tracing_subscriber::util::SubscriberInitExt; +//! +//! tracing_subscriber::registry() +//! .with(tracing_subscriber::fmt::layer().with_span_events(FmtSpan::CLOSE)) +//! .with(client_requests::client_request_timing_layer("histogram_name")) +//! .init(); +//! ``` +use futures::{future::Inspect, Future, FutureExt}; +use std::time::Instant; +use tracing::{field::Visit, instrument::Instrumented, span, Instrument, Subscriber}; +use tracing_subscriber::{filter, layer, registry::LookupSpan, Layer}; + +const SPAN_NAME: &str = "metrics::timing"; +const RESULT_FIELD: &str = "result"; +const NAME_FIELD: &str = "name"; +const SUCCESS: &str = "ok"; +const ERROR: &str = "error"; +const UNKNOWN: &str = "unknown"; + +pub fn client_request_timing_layer(histogram_name: &'static str) -> impl layer::Layer +where + S: Subscriber + for<'a> LookupSpan<'a>, +{ + ApiTimingLayer::new(histogram_name).with_filter(filter::filter_fn(|m| m.name() == SPAN_NAME)) +} + +pub trait ClientMetricTiming: Sized + Instrument + FutureExt { + fn with_timing( + self, + name: &'static str, + ) -> Instrumented)>> + where + Self: Future> + Sized; +} + +// Impl ClientMetricTiming for all futures that return a Result +impl ClientMetricTiming for F +where + F: Future> + Sized, +{ + fn with_timing( + self, + name: &'static str, + ) -> Instrumented)>> { + // NOTE(mj): `tracing::info_span!(SPAN_NAME, {NAME_FIELD} = name, {RESULT_FIELD} = tracing::field::Empty);` + // + // Results in the error "format must be a string literal". Maybe one day + // this will be fixed in the tracing macro so we can use it likes the + // docs say. + let span = tracing::info_span!(SPAN_NAME, name, result = tracing::field::Empty); + let inner_span = span.clone(); + self.inspect(move |res| { + inner_span.record(RESULT_FIELD, res.as_ref().ok().map_or(ERROR, |_| SUCCESS)); + }) + .instrument(span) + } +} + +struct Timing { + name: Option, + start: Instant, + // ok | error | unknown + result: String, +} + +impl Timing { + fn new() -> Self { + Self { + name: None, + start: Instant::now(), + result: UNKNOWN.to_string(), + } + } + + fn record(self, histogram_name: &'static str) { + if let Some(name) = self.name { + metrics::histogram!( + histogram_name, + NAME_FIELD => name, + RESULT_FIELD => self.result + ) + .record(self.start.elapsed().as_secs_f64()) + } + } +} + +impl Visit for Timing { + fn record_debug(&mut self, _field: &tracing::field::Field, _value: &dyn std::fmt::Debug) {} + fn record_str(&mut self, field: &tracing::field::Field, value: &str) { + match field.name() { + NAME_FIELD => self.name = Some(value.to_string()), + RESULT_FIELD => self.result = value.to_string(), + _ => (), + } + } +} + +struct ApiTimingLayer { + histogram_name: &'static str, +} + +impl ApiTimingLayer { + fn new(histogram_name: &'static str) -> Self { + Self { histogram_name } + } +} + +impl tracing_subscriber::Layer for ApiTimingLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, +{ + fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: layer::Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + + let mut timing = Timing::new(); + attrs.values().record(&mut timing); + span.extensions_mut().insert(timing); + } + + fn on_record(&self, id: &span::Id, values: &span::Record<'_>, ctx: layer::Context<'_, S>) { + let span = ctx.span(id).unwrap(); + + if let Some(timing) = span.extensions_mut().get_mut::() { + values.record(timing); + }; + } + + fn on_close(&self, id: tracing::Id, ctx: layer::Context) { + let span = ctx.span(&id).unwrap(); + + if let Some(timing) = span.extensions_mut().remove::() { + timing.record(self.histogram_name); + }; + } +} + +#[cfg(test)] +mod tests { + use super::ClientMetricTiming; + use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; + + #[tokio::test] + async fn test_telemetry() -> Result<(), Box> { + tracing_subscriber::registry() + // Uncomment to view traces and Spans closing + // .with( + // tracing_subscriber::fmt::layer() + // .with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE), + // ) + .with(super::client_request_timing_layer("histogram_name")) + .init(); + + // Let the OS assign a port + let addr = { + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + listener.local_addr()? + }; + tracing::info!("listening on {addr}"); + super::super::install(addr)?; + + let success = async { Ok("nothing went wrong") }; + let failure = async { Err("something went wrong") }; + let _: Result<&str, &str> = success.with_timing("success").await; + let _: Result<&str, &str> = failure.with_timing("failing").await; + + // .with_timing() can only be added to futures that return Results. + // let will_not_compile = async { 1 + 2 }.with_timing("not a result"); + + let res = reqwest::get(format!("http://{addr}")).await?; + let body = res.text().await?; + + tracing::info!("response: \n{body}"); + assert!(body.contains(r#"histogram_name_count{name="success",result="ok"} 1"#)); + assert!(body.contains(r#"histogram_name_count{name="failing",result="error"} 1"#)); + + Ok(()) + } +} diff --git a/metrics/src/lib.rs b/metrics/src/lib.rs index 5e4b6b60b..286b25a51 100644 --- a/metrics/src/lib.rs +++ b/metrics/src/lib.rs @@ -12,32 +12,25 @@ use std::{ }; use tower::{Layer, Service}; +pub mod client_requests; mod error; pub mod settings; pub fn start_metrics(settings: &Settings) -> Result { - let socket: SocketAddr = settings.endpoint.parse()?; - PrometheusBuilder::new() - .with_http_listener(socket) - .install()?; - Ok(()) + install(settings.endpoint) } -/// Install the Prometheus export gateway -pub fn install_metrics() { - let endpoint = - std::env::var("METRICS_SCRAPE_ENDPOINT").unwrap_or_else(|_| String::from("0.0.0.0:9000")); - let socket: SocketAddr = endpoint - .parse() - .expect("Invalid METRICS_SCRAPE_ENDPOINT value"); +fn install(socket_addr: SocketAddr) -> Result { if let Err(e) = PrometheusBuilder::new() - .with_http_listener(socket) + .with_http_listener(socket_addr) .install() { tracing::error!(target: "poc", "Failed to install Prometheus scrape endpoint: {e}"); } else { - tracing::info!(target: "poc", "Metrics scrape endpoint listening on {endpoint}"); + tracing::info!(target: "poc", "Metrics scrape endpoint listening on {socket_addr}"); } + + Ok(()) } /// Measure the duration of a block and record it @@ -49,7 +42,7 @@ macro_rules! record_duration { ( $metric_name:expr, $e:expr ) => {{ let timer = std::time::Instant::now(); let res = $e; - ::metrics::histogram!($metric_name, timer.elapsed()); + ::metrics::histogram!($metric_name).record(timer.elapsed()); res }}; } @@ -126,7 +119,7 @@ where let metric_name_time = self.metric_name_time; let timer = std::time::Instant::now(); - metrics::increment_gauge!(metric_name_count, 1.0); + metrics::gauge!(metric_name_count).increment(1.0); let clone = self.inner.clone(); // take the service that was ready @@ -134,11 +127,11 @@ where Box::pin(async move { let res = inner.call(req).await; - metrics::decrement_gauge!(metric_name_count, 1.0); + metrics::gauge!(metric_name_count).decrement(1.0); let elapsed_time = timer.elapsed(); tracing::debug!("request processed in {elapsed_time:?}"); // TODO What units to use? Is f64 seconds appropriate? - ::metrics::histogram!(metric_name_time, elapsed_time.as_secs_f64()); + metrics::histogram!(metric_name_time).record(elapsed_time.as_secs_f64()); res }) } diff --git a/metrics/src/settings.rs b/metrics/src/settings.rs index 9a0841286..55cfb2e19 100644 --- a/metrics/src/settings.rs +++ b/metrics/src/settings.rs @@ -1,12 +1,14 @@ +use std::net::SocketAddr; + use serde::Deserialize; #[derive(Debug, Deserialize, Clone)] pub struct Settings { /// Scrape endpoint for metrics #[serde(default = "default_metrics_endpoint")] - pub endpoint: String, + pub endpoint: SocketAddr, } -pub fn default_metrics_endpoint() -> String { - "127.0.0.1:19000".to_string() +fn default_metrics_endpoint() -> SocketAddr { + "127.0.0.1:19000".parse().unwrap() } diff --git a/mobile_config/src/main.rs b/mobile_config/src/main.rs index bfaf9a9b5..60562beb1 100644 --- a/mobile_config/src/main.rs +++ b/mobile_config/src/main.rs @@ -71,8 +71,6 @@ impl Daemon { // Create on-chain metadata pool let metadata_pool = settings.metadata.connect("mobile-config-metadata").await?; - let listen_addr = settings.listen_addr()?; - let (key_cache_updater, key_cache) = KeyCache::from_settings(settings, &pool).await?; let admin_svc = @@ -97,6 +95,7 @@ impl Daemon { settings.signing_keypair()?, ); + let listen_addr = settings.listen; let grpc_server = GrpcServer { listen_addr, admin_svc, diff --git a/mobile_config/src/settings.rs b/mobile_config/src/settings.rs index dfba3af10..1e9bcbedc 100644 --- a/mobile_config/src/settings.rs +++ b/mobile_config/src/settings.rs @@ -1,11 +1,7 @@ use anyhow::Context; use config::{Config, Environment, File}; use serde::Deserialize; -use std::{ - net::{AddrParseError, SocketAddr}, - path::Path, - str::FromStr, -}; +use std::{net::SocketAddr, path::Path, str::FromStr}; #[derive(Debug, Deserialize)] pub struct Settings { @@ -15,7 +11,7 @@ pub struct Settings { pub log: String, /// Listen address. Required. Default to 0.0.0.0::8080 #[serde(default = "default_listen_addr")] - pub listen: String, + pub listen: SocketAddr, /// File from which to load config server signing keypair pub signing_keypair: String, /// B58 encoded public key of the default admin keypair @@ -33,8 +29,8 @@ pub fn default_log() -> String { "mobile_config=debug".to_string() } -pub fn default_listen_addr() -> String { - "0.0.0.0:8080".to_string() +pub fn default_listen_addr() -> SocketAddr { + "0.0.0.0:8080".parse().unwrap() } impl Settings { @@ -61,10 +57,6 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn listen_addr(&self) -> Result { - SocketAddr::from_str(&self.listen) - } - pub fn signing_keypair(&self) -> anyhow::Result { let data = std::fs::read(&self.signing_keypair) .map_err(helium_crypto::Error::from) diff --git a/mobile_config/src/telemetry.rs b/mobile_config/src/telemetry.rs index 3238d8748..550b2df21 100644 --- a/mobile_config/src/telemetry.rs +++ b/mobile_config/src/telemetry.rs @@ -3,9 +3,9 @@ const GATEWAY_CHAIN_LOOKUP_METRIC: &str = concat!(env!("CARGO_PKG_NAME"), "-", "gateway-chain-lookup"); pub fn count_request(service: &'static str, rpc: &'static str) { - metrics::increment_counter!(RPC_METRIC, "service" => service, "rpc" => rpc); + metrics::counter!(RPC_METRIC, "service" => service, "rpc" => rpc).increment(1); } pub fn count_gateway_chain_lookup(result: &'static str) { - metrics::increment_counter!(GATEWAY_CHAIN_LOOKUP_METRIC, "result" => result); + metrics::counter!(GATEWAY_CHAIN_LOOKUP_METRIC, "result" => result).increment(1); } diff --git a/mobile_packet_verifier/Cargo.toml b/mobile_packet_verifier/Cargo.toml index 6e49b156d..820a84280 100644 --- a/mobile_packet_verifier/Cargo.toml +++ b/mobile_packet_verifier/Cargo.toml @@ -35,3 +35,4 @@ triggered = {workspace = true} http = {workspace = true} http-serde = {workspace = true} sha2 = {workspace = true} +humantime-serde = { workspace=true } \ No newline at end of file diff --git a/mobile_packet_verifier/pkg/settings-template.toml b/mobile_packet_verifier/pkg/settings-template.toml index 1e47a6b8e..836071aed 100644 --- a/mobile_packet_verifier/pkg/settings-template.toml +++ b/mobile_packet_verifier/pkg/settings-template.toml @@ -8,11 +8,11 @@ cache = "/var/data/verified-reports" # We will burn data credits from the solana chain every `burn_period` hours. # Default value is 1 hour. -burn_period = 1 +burn_period = "1 hour" # In case burn fails we will accelarate and burn in `min_burn_period` minutes instead of `burn_period` hours. # Default value is 15 minutes. -min_burn_period = 15 +min_burn_period = "15 minutes" # If set to true, enables integration with the Solana network. This includes # checking payer balances and burning data credits. If this is disabled, all diff --git a/mobile_packet_verifier/src/burner.rs b/mobile_packet_verifier/src/burner.rs index ffb6a0c0f..6cf3c7f41 100644 --- a/mobile_packet_verifier/src/burner.rs +++ b/mobile_packet_verifier/src/burner.rs @@ -96,13 +96,15 @@ where tracing::info!(%total_dcs, %payer, "Burning DC"); if self.burn_data_credits(&payer, total_dcs).await.is_err() { // We have failed to burn data credits: - metrics::counter!("burned", total_dcs, "payer" => payer.to_string(), "success" => "false"); + metrics::counter!("burned", "payer" => payer.to_string(), "success" => "false") + .increment(total_dcs); continue; } // We succesfully managed to burn data credits: - metrics::counter!("burned", total_dcs, "payer" => payer.to_string(), "success" => "true"); + metrics::counter!("burned", "payer" => payer.to_string(), "success" => "true") + .increment(total_dcs); // Delete from the data transfer session and write out to S3 diff --git a/mobile_packet_verifier/src/daemon.rs b/mobile_packet_verifier/src/daemon.rs index 6692783f9..767132e1b 100644 --- a/mobile_packet_verifier/src/daemon.rs +++ b/mobile_packet_verifier/src/daemon.rs @@ -46,8 +46,8 @@ impl Daemon { pool, burner, reports, - burn_period: settings.burn_period(), - min_burn_period: settings.min_burn_period(), + burn_period: settings.burn_period, + min_burn_period: settings.min_burn_period, gateway_info_resolver, authorization_verifier, invalid_data_session_report_sink, @@ -169,7 +169,7 @@ impl Cmd { Utc.timestamp_millis_opt(0).unwrap(), )) .prefix(FileType::DataTransferSessionIngestReport.to_string()) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .create() .await?; diff --git a/mobile_packet_verifier/src/event_ids.rs b/mobile_packet_verifier/src/event_ids.rs index caac3f22f..88b6747b8 100644 --- a/mobile_packet_verifier/src/event_ids.rs +++ b/mobile_packet_verifier/src/event_ids.rs @@ -1,5 +1,6 @@ -use chrono::{DateTime, Duration, Utc}; +use chrono::{DateTime, Utc}; use sqlx::{Pool, Postgres, Transaction}; +use std::time::Duration; use task_manager::ManagedTask; use crate::settings::Settings; @@ -37,13 +38,13 @@ impl EventIdPurger { pub fn from_settings(conn: Pool, settings: &Settings) -> Self { Self { conn, - interval: settings.purger_interval(), - max_age: settings.purger_max_age(), + interval: settings.purger_interval, + max_age: settings.purger_max_age, } } pub async fn run(self, mut shutdown: triggered::Listener) -> anyhow::Result<()> { - let mut timer = tokio::time::interval(self.interval.to_std()?); + let mut timer = tokio::time::interval(self.interval); loop { tokio::select! { diff --git a/mobile_packet_verifier/src/settings.rs b/mobile_packet_verifier/src/settings.rs index f4b190d6e..72ef9b586 100644 --- a/mobile_packet_verifier/src/settings.rs +++ b/mobile_packet_verifier/src/settings.rs @@ -1,7 +1,8 @@ -use chrono::{DateTime, Duration, TimeZone, Utc}; +use chrono::{DateTime, Utc}; use config::{Config, ConfigError, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::path::Path; +use std::{path::Path, time::Duration}; #[derive(Debug, Deserialize)] pub struct Settings { @@ -11,12 +12,12 @@ pub struct Settings { pub log: String, /// Cache location for generated verified reports pub cache: String, - /// Burn period in hours. (Default is 1) - #[serde(default = "default_burn_period")] - pub burn_period: i64, - /// Minimum burn period when error, in minutes. (Default is 15) - #[serde(default = "default_min_burn_period")] - pub min_burn_period: i64, + /// Burn period in hours. (Default is 1 hour) + #[serde(with = "humantime_serde", default = "default_burn_period")] + pub burn_period: Duration, + /// Minimum burn period when error. (Default is 15 minutes) + #[serde(with = "humantime_serde", default = "default_min_burn_period")] + pub min_burn_period: Duration, pub database: db_store::Settings, pub ingest: file_store::Settings, pub output: file_store::Settings, @@ -26,39 +27,35 @@ pub struct Settings { pub solana: Option, pub config_client: mobile_config::ClientSettings, #[serde(default = "default_start_after")] - pub start_after: u64, - #[serde(default = "default_purger_interval_in_hours")] - pub purger_interval_in_hours: u64, - #[serde(default = "default_purger_max_age_in_hours")] - pub purger_max_age_in_hours: u64, + pub start_after: DateTime, + #[serde(with = "humantime_serde", default = "default_purger_interval")] + pub purger_interval: Duration, + #[serde(with = "humantime_serde", default = "default_purger_max_age")] + pub purger_max_age: Duration, } -pub fn default_purger_interval_in_hours() -> u64 { - 1 +fn default_purger_interval() -> Duration { + humantime::parse_duration("1 hour").unwrap() } -pub fn default_purger_max_age_in_hours() -> u64 { - 24 +fn default_purger_max_age() -> Duration { + humantime::parse_duration("24 hours").unwrap() } -pub fn default_start_after() -> u64 { - 0 +fn default_start_after() -> DateTime { + DateTime::UNIX_EPOCH } -pub fn default_url() -> http::Uri { - http::Uri::from_static("http://127.0.0.1:8080") -} - -pub fn default_log() -> String { +fn default_log() -> String { "mobile_packet_verifier=debug,poc_store=info".to_string() } -pub fn default_burn_period() -> i64 { - 1 +fn default_burn_period() -> Duration { + humantime::parse_duration("1 hour").unwrap() } -pub fn default_min_burn_period() -> i64 { - 15 +fn default_min_burn_period() -> Duration { + humantime::parse_duration("15 minutes").unwrap() } impl Settings { @@ -83,26 +80,4 @@ impl Settings { .build() .and_then(|config| config.try_deserialize()) } - - pub fn start_after(&self) -> DateTime { - Utc.timestamp_opt(self.start_after as i64, 0) - .single() - .unwrap() - } - - pub fn burn_period(&self) -> tokio::time::Duration { - tokio::time::Duration::from_secs(60 * 60 * self.burn_period as u64) - } - - pub fn min_burn_period(&self) -> tokio::time::Duration { - tokio::time::Duration::from_secs(60 * self.min_burn_period as u64) - } - - pub fn purger_interval(&self) -> Duration { - Duration::hours(self.purger_interval_in_hours as i64) - } - - pub fn purger_max_age(&self) -> Duration { - Duration::hours(self.purger_max_age_in_hours as i64) - } } diff --git a/mobile_verifier/Cargo.toml b/mobile_verifier/Cargo.toml index 2bc69762b..b26843bed 100644 --- a/mobile_verifier/Cargo.toml +++ b/mobile_verifier/Cargo.toml @@ -55,6 +55,7 @@ task-manager = {path = "../task_manager"} solana-sdk = {workspace = true} derive_builder = {workspace = true} regex = "1" +humantime-serde = { workspace = true } [dev-dependencies] backon = "0" diff --git a/mobile_verifier/pkg/settings-template.toml b/mobile_verifier/pkg/settings-template.toml index 519525106..57b462eca 100644 --- a/mobile_verifier/pkg/settings-template.toml +++ b/mobile_verifier/pkg/settings-template.toml @@ -7,14 +7,11 @@ cache = "/var/data/verfied-reports" # Reward period in hours. (Default is 24) -# rewards = 24 - -# Verifications per rewards period. Default is 8 -# verifications = 8 +# rewards = "24 hours" # Verification offset in minutes, verification will occur at the end of -# the verification period + verification_offset_minutes; Default = 30 -# verification_offset_minutes = 30 +# the reward period + reward_offset; Default = 30 minutes +# reward_offset_minutes = "30 minutes" [database] diff --git a/mobile_verifier/src/coverage.rs b/mobile_verifier/src/coverage.rs index 0a315279f..eed1463c1 100644 --- a/mobile_verifier/src/coverage.rs +++ b/mobile_verifier/src/coverage.rs @@ -97,7 +97,7 @@ impl CoverageDaemon { file_source::continuous_source::() .state(pool.clone()) .store(file_store) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::CoverageObjectIngestReport.to_string()) .create() .await?; @@ -136,16 +136,16 @@ impl CoverageDaemon { pub async fn run(mut self, shutdown: triggered::Listener) -> anyhow::Result<()> { loop { - #[rustfmt::skip] tokio::select! { _ = shutdown.clone() => { tracing::info!("CoverageDaemon shutting down"); break; } Some(file) = self.coverage_objs.recv() => { - let start = Instant::now(); - self.process_file(file).await?; - metrics::histogram!("coverage_object_processing_time", start.elapsed()); + let start = Instant::now(); + self.process_file(file).await?; + metrics::histogram!("coverage_object_processing_time") + .record(start.elapsed()); } } } @@ -376,9 +376,11 @@ impl Ord for IndoorCoverageLevel { impl IndoorCoverageLevel { fn coverage_points(&self) -> Decimal { - match self.signal_level { - SignalLevel::High => dec!(400), - SignalLevel::Low => dec!(100), + match (&self.radio_key, self.signal_level) { + (OwnedKeyType::Wifi(_), SignalLevel::High) => dec!(400), + (OwnedKeyType::Wifi(_), SignalLevel::Low) => dec!(100), + (OwnedKeyType::Cbrs(_), SignalLevel::High) => dec!(100), + (OwnedKeyType::Cbrs(_), SignalLevel::Low) => dec!(25), _ => dec!(0), } } @@ -418,11 +420,15 @@ impl Ord for OutdoorCoverageLevel { impl OutdoorCoverageLevel { fn coverage_points(&self) -> Decimal { - match self.signal_level { - SignalLevel::High => dec!(16), - SignalLevel::Medium => dec!(8), - SignalLevel::Low => dec!(4), - SignalLevel::None => dec!(0), + match (&self.radio_key, self.signal_level) { + (OwnedKeyType::Wifi(_), SignalLevel::High) => dec!(16), + (OwnedKeyType::Wifi(_), SignalLevel::Medium) => dec!(8), + (OwnedKeyType::Wifi(_), SignalLevel::Low) => dec!(4), + (OwnedKeyType::Wifi(_), SignalLevel::None) => dec!(0), + (OwnedKeyType::Cbrs(_), SignalLevel::High) => dec!(4), + (OwnedKeyType::Cbrs(_), SignalLevel::Medium) => dec!(2), + (OwnedKeyType::Cbrs(_), SignalLevel::Low) => dec!(1), + (OwnedKeyType::Cbrs(_), SignalLevel::None) => dec!(0), } } } @@ -964,7 +970,7 @@ mod test { radio_key: OwnedKeyType::Cbrs("3".to_string()), hotspot: owner, points: CoverageRewardPoints { - coverage_points: dec!(400), + coverage_points: dec!(100), boost_multiplier: NonZeroU32::new(1).unwrap(), hex_assignments: HexAssignments::test_best(), rank: None @@ -1080,7 +1086,7 @@ mod test { radio_key: OwnedKeyType::Cbrs("10".to_string()), hotspot: owner.clone(), points: CoverageRewardPoints { - coverage_points: dec!(400), + coverage_points: dec!(100), boost_multiplier: NonZeroU32::new(1).unwrap(), hex_assignments: HexAssignments::test_best(), rank: None @@ -1123,7 +1129,7 @@ mod test { radio_key: OwnedKeyType::Cbrs("5".to_string()), hotspot: owner.clone(), points: CoverageRewardPoints { - coverage_points: dec!(16), + coverage_points: dec!(4), rank: Some(dec!(1.0)), boost_multiplier: NonZeroU32::new(1).unwrap(), hex_assignments: HexAssignments::test_best(), @@ -1137,7 +1143,7 @@ mod test { radio_key: OwnedKeyType::Cbrs("4".to_string()), hotspot: owner.clone(), points: CoverageRewardPoints { - coverage_points: dec!(16), + coverage_points: dec!(4), rank: Some(dec!(0.50)), boost_multiplier: NonZeroU32::new(1).unwrap(), hex_assignments: HexAssignments::test_best(), @@ -1151,7 +1157,7 @@ mod test { radio_key: OwnedKeyType::Cbrs("3".to_string()), hotspot: owner, points: CoverageRewardPoints { - coverage_points: dec!(16), + coverage_points: dec!(4), rank: Some(dec!(0.25)), boost_multiplier: NonZeroU32::new(1).unwrap(), hex_assignments: HexAssignments::test_best(), @@ -1297,7 +1303,7 @@ mod test { // assert outdoor cbrs radios assert_eq!( - dec!(16), + dec!(4), rewards .iter() .find(|r| r.radio_key == OwnedKeyType::Cbrs("oco1-3".to_string())) @@ -1307,7 +1313,7 @@ mod test { ); assert_eq!( - dec!(8), + dec!(2), rewards .iter() .find(|r| r.radio_key == OwnedKeyType::Cbrs("oco1-4".to_string())) @@ -1317,7 +1323,7 @@ mod test { ); assert_eq!( - dec!(4), + dec!(1), rewards .iter() .find(|r| r.radio_key == OwnedKeyType::Cbrs("oco1-1".to_string())) @@ -1335,7 +1341,7 @@ mod test { // assert indoor cbrs radios assert_eq!( - dec!(400), + dec!(100), rewards .iter() .find(|r| r.radio_key == OwnedKeyType::Cbrs("ico1-1".to_string())) diff --git a/mobile_verifier/src/data_session.rs b/mobile_verifier/src/data_session.rs index 9873febc3..b519fed64 100644 --- a/mobile_verifier/src/data_session.rs +++ b/mobile_verifier/src/data_session.rs @@ -49,7 +49,7 @@ impl DataSessionIngestor { file_source::continuous_source::() .state(pool.clone()) .store(data_transfer_ingest) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::ValidDataTransferSession.to_string()) .create() .await?; @@ -73,7 +73,6 @@ impl DataSessionIngestor { tracing::info!("starting DataSessionIngestor"); tokio::spawn(async move { loop { - #[rustfmt::skip] tokio::select! { biased; _ = shutdown.clone() => { @@ -81,12 +80,10 @@ impl DataSessionIngestor { break; } Some(file) = self.receiver.recv() => { - let start = Instant::now(); - self.process_file(file).await?; - metrics::histogram!( - "valid_data_transfer_session_processing_time", - start.elapsed() - ); + let start = Instant::now(); + self.process_file(file).await?; + metrics::histogram!("valid_data_transfer_session_processing_time") + .record(start.elapsed()); } } } @@ -115,7 +112,8 @@ impl DataSessionIngestor { .try_fold(transaction, |mut transaction, report| async move { let data_session = HotspotDataSession::from_valid_data_session(report, file_ts); data_session.save(&mut transaction).await?; - metrics::increment_counter!("oracles_mobile_verifier_ingest_hotspot_data_session"); + metrics::counter!("oracles_mobile_verifier_ingest_hotspot_data_session") + .increment(1); Ok(transaction) }) .await? diff --git a/mobile_verifier/src/heartbeats/cbrs.rs b/mobile_verifier/src/heartbeats/cbrs.rs index 91ae48e34..a95ef1a2a 100644 --- a/mobile_verifier/src/heartbeats/cbrs.rs +++ b/mobile_verifier/src/heartbeats/cbrs.rs @@ -56,7 +56,7 @@ where file_source::continuous_source::() .state(pool.clone()) .store(file_store) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::CbrsHeartbeatIngestReport.to_string()) .queue_size(1) .create() @@ -66,7 +66,7 @@ where pool, gateway_resolver, cbrs_heartbeats, - settings.modeled_coverage_start(), + settings.modeled_coverage_start, settings.max_asserted_distance_deviation, settings.max_distance_from_coverage, valid_heartbeats, @@ -122,7 +122,6 @@ where let location_cache = LocationCache::new(&self.pool); loop { - #[rustfmt::skip] tokio::select! { biased; _ = shutdown.clone() => { @@ -130,15 +129,16 @@ where break; } Some(file) = self.heartbeats.recv() => { - let start = Instant::now(); - self.process_file( + let start = Instant::now(); + self.process_file( file, &heartbeat_cache, &coverage_claim_time_cache, &coverage_object_cache, &location_cache, - ).await?; - metrics::histogram!("cbrs_heartbeat_processing_time", start.elapsed()); + ).await?; + metrics::histogram!("cbrs_heartbeat_processing_time") + .record(start.elapsed()); } } } diff --git a/mobile_verifier/src/heartbeats/mod.rs b/mobile_verifier/src/heartbeats/mod.rs index 1ea6d669e..49d74339b 100644 --- a/mobile_verifier/src/heartbeats/mod.rs +++ b/mobile_verifier/src/heartbeats/mod.rs @@ -275,7 +275,7 @@ pub struct HeartbeatReward { pub coverage_object: Uuid, } -const RESTRICTIVE_MAX_DISTANCE: i64 = 30; +const RESTRICTIVE_MAX_DISTANCE: i64 = 50; impl HeartbeatReward { pub fn key(&self) -> KeyType<'_> { diff --git a/mobile_verifier/src/heartbeats/wifi.rs b/mobile_verifier/src/heartbeats/wifi.rs index 667b3c4e7..295a1c8df 100644 --- a/mobile_verifier/src/heartbeats/wifi.rs +++ b/mobile_verifier/src/heartbeats/wifi.rs @@ -55,7 +55,7 @@ where file_source::continuous_source::() .state(pool.clone()) .store(file_store) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::WifiHeartbeatIngestReport.to_string()) .create() .await?; @@ -64,7 +64,7 @@ where pool, gateway_resolver, wifi_heartbeats, - settings.modeled_coverage_start(), + settings.modeled_coverage_start, settings.max_asserted_distance_deviation, settings.max_distance_from_coverage, valid_heartbeats, @@ -119,7 +119,6 @@ where let location_cache = LocationCache::new(&self.pool); loop { - #[rustfmt::skip] tokio::select! { biased; _ = shutdown.clone() => { @@ -127,15 +126,16 @@ where break; } Some(file) = self.heartbeats.recv() => { - let start = Instant::now(); - self.process_file( + let start = Instant::now(); + self.process_file( file, &heartbeat_cache, &coverage_claim_time_cache, &coverage_object_cache, &location_cache - ).await?; - metrics::histogram!("wifi_heartbeat_processing_time", start.elapsed()); + ).await?; + metrics::histogram!("wifi_heartbeat_processing_time") + .record(start.elapsed()); } } } diff --git a/mobile_verifier/src/radio_threshold.rs b/mobile_verifier/src/radio_threshold.rs index 2821cf78c..fb058ced0 100644 --- a/mobile_verifier/src/radio_threshold.rs +++ b/mobile_verifier/src/radio_threshold.rs @@ -97,7 +97,7 @@ where file_source::continuous_source::() .state(pool.clone()) .store(file_store.clone()) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::RadioThresholdIngestReport.to_string()) .create() .await?; @@ -107,7 +107,7 @@ where file_source::continuous_source::() .state(pool.clone()) .store(file_store.clone()) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::InvalidatedRadioThresholdIngestReport.to_string()) .create() .await?; diff --git a/mobile_verifier/src/reward_shares.rs b/mobile_verifier/src/reward_shares.rs index 3f609cabb..b4ddcca62 100644 --- a/mobile_verifier/src/reward_shares.rs +++ b/mobile_verifier/src/reward_shares.rs @@ -1415,31 +1415,31 @@ mod test { *owner_rewards .get(&owner1) .expect("Could not fetch owner1 rewards"), - 364_298_724_954 + 260_213_374_966 ); assert_eq!( *owner_rewards .get(&owner2) .expect("Could not fetch owner2 rewards"), - 1_366_120_218_577 + 975_800_156_122 ); assert_eq!( *owner_rewards .get(&owner3) .expect("Could not fetch owner3 rewards"), - 45_537_340_619 + 32_526_671_870 ); assert_eq!(owner_rewards.get(&owner4), None); let owner5_reward = *owner_rewards .get(&owner5) .expect("Could not fetch owner5 rewards"); - assert_eq!(owner5_reward, 182_149_362_477); + assert_eq!(owner5_reward, 520_426_749_934); let owner6_reward = *owner_rewards .get(&owner6) .expect("Could not fetch owner6 rewards"); - assert_eq!(owner6_reward, 45_537_340_619); + assert_eq!(owner6_reward, 130_106_687_483); // confirm owner 6 reward is 0.25 of owner 5's reward // this is due to owner 6's hotspot not having a validation location timestamp @@ -1449,7 +1449,7 @@ mod test { let owner7_reward = *owner_rewards .get(&owner6) .expect("Could not fetch owner7 rewards"); - assert_eq!(owner7_reward, 45_537_340_619); + assert_eq!(owner7_reward, 130_106_687_483); // confirm owner 7 reward is 0.25 of owner 5's reward // owner 7's hotspot does have a validation location timestamp @@ -1458,7 +1458,7 @@ mod test { assert_eq!((owner5_reward as f64 * 0.25) as u64, owner7_reward); // confirm total sum of allocated poc rewards - assert_eq!(allocated_poc_rewards, 2_049_180_327_865); + assert_eq!(allocated_poc_rewards, 2_049_180_327_858); // confirm the unallocated poc reward amounts let unallocated_sp_reward_amount = (total_poc_rewards @@ -1466,7 +1466,7 @@ mod test { .round_dp_with_strategy(0, RoundingStrategy::ToZero) .to_u64() .unwrap_or(0); - assert_eq!(unallocated_sp_reward_amount, 3); + assert_eq!(unallocated_sp_reward_amount, 10); } #[tokio::test] @@ -1583,19 +1583,17 @@ mod test { } println!("owner rewards {:?}", owner_rewards); - // These were different, now they are the same: - // wifi let owner1_reward = *owner_rewards .get(&owner1) .expect("Could not fetch owner1 rewards"); - assert_eq!(owner1_reward, 1_024_590_163_934); + assert_eq!(owner1_reward, 1_639_344_262_295); // sercomm let owner2_reward = *owner_rewards .get(&owner2) .expect("Could not fetch owner2 rewards"); - assert_eq!(owner2_reward, 1_024_590_163_934); + assert_eq!(owner2_reward, 409_836_065_573); } #[tokio::test] @@ -1728,7 +1726,7 @@ mod test { // owner 1 is a wifi indoor with a distance_to_asserted > max // and so gets the reduced reward scale of 0.1 ( radio reward scale of 0.4 * location scale of 0.25) // owner 2 is a cbrs sercomm indoor which has a reward scale of 1.0 - assert_eq!(owner1_reward, (owner2_reward as f64 * 0.25) as u64); + assert_eq!(owner1_reward, owner2_reward); } #[tokio::test] @@ -1850,13 +1848,13 @@ mod test { let owner1_reward = *owner_rewards .get(&owner1) .expect("Could not fetch owner1 rewards"); - assert_eq!(owner1_reward, 1_024_590_163_934); + assert_eq!(owner1_reward, 1_639_344_262_295); // sercomm let owner2_reward = *owner_rewards .get(&owner2) .expect("Could not fetch owner2 rewards"); - assert_eq!(owner2_reward, 1_024_590_163_934); + assert_eq!(owner2_reward, 409_836_065_573); } /// Test to ensure that rewards that are zeroed are not written out. diff --git a/mobile_verifier/src/rewarder.rs b/mobile_verifier/src/rewarder.rs index 7dfbb7c1e..c9c3792b3 100644 --- a/mobile_verifier/src/rewarder.rs +++ b/mobile_verifier/src/rewarder.rs @@ -9,7 +9,7 @@ use crate::{ subscriber_location, telemetry, Settings, }; use anyhow::bail; -use chrono::{DateTime, Duration, TimeZone, Utc}; +use chrono::{DateTime, TimeZone, Utc}; use db_store::meta; use file_store::{ file_sink::{self, FileSinkClient}, @@ -35,7 +35,7 @@ use reward_scheduler::Scheduler; use rust_decimal::{prelude::*, Decimal}; use rust_decimal_macros::dec; use sqlx::{PgExecutor, Pool, Postgres}; -use std::ops::Range; +use std::{ops::Range, time::Duration}; use task_manager::{ManagedTask, TaskManager}; use tokio::time::sleep; @@ -68,7 +68,6 @@ where ) -> anyhow::Result { let (price_tracker, price_daemon) = PriceTracker::new_tm(&settings.price_tracker).await?; - let reward_period_hours = settings.rewards; let (mobile_rewards, mobile_rewards_server) = file_sink::FileSinkBuilder::new( FileType::MobileRewardShare, settings.store_base_path(), @@ -93,8 +92,8 @@ where pool.clone(), carrier_service_verifier, hex_boosting_info_resolver, - Duration::hours(reward_period_hours), - Duration::minutes(settings.reward_offset_minutes), + settings.reward_period, + settings.reward_period_offset, mobile_rewards, reward_manifests, price_tracker, @@ -150,7 +149,7 @@ where self.reward(&scheduler).await?; continue; } else { - Duration::minutes(REWARDS_NOT_CURRENT_DELAY_PERIOD).to_std()? + chrono::Duration::minutes(REWARDS_NOT_CURRENT_DELAY_PERIOD).to_std()? } } else { scheduler.sleep_duration(now)? diff --git a/mobile_verifier/src/settings.rs b/mobile_verifier/src/settings.rs index 02e1d0958..d909730b9 100644 --- a/mobile_verifier/src/settings.rs +++ b/mobile_verifier/src/settings.rs @@ -1,7 +1,11 @@ -use chrono::{DateTime, TimeZone, Utc}; +use chrono::{DateTime, Utc}; use config::{Config, ConfigError, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::path::{Path, PathBuf}; +use std::{ + path::{Path, PathBuf}, + time::Duration, +}; #[derive(Debug, Deserialize)] pub struct Settings { @@ -11,11 +15,11 @@ pub struct Settings { pub log: String, /// Cache location for generated verified reports pub cache: String, - /// Reward period in hours. (Default is 24) - #[serde(default = "default_reward_period")] - pub rewards: i64, - #[serde(default = "default_reward_offset_minutes")] - pub reward_offset_minutes: i64, + /// Reward period in hours. (Default is 24 hours) + #[serde(with = "humantime_serde", default = "default_reward_period")] + pub reward_period: Duration, + #[serde(with = "humantime_serde", default = "default_reward_period_offset")] + pub reward_period_offset: Duration, pub database: db_store::Settings, pub ingest: file_store::Settings, pub data_transfer_ingest: file_store::Settings, @@ -27,8 +31,8 @@ pub struct Settings { pub price_tracker: price::price_tracker::Settings, pub config_client: mobile_config::ClientSettings, #[serde(default = "default_start_after")] - pub start_after: u64, - pub modeled_coverage_start: u64, + pub start_after: DateTime, + pub modeled_coverage_start: DateTime, /// Max distance in meters between the heartbeat and all of the hexes in /// its respective coverage object #[serde(default = "default_max_distance_from_coverage")] @@ -53,29 +57,29 @@ fn default_fencing_resolution() -> u8 { 7 } -pub fn default_max_distance_from_coverage() -> u32 { +fn default_max_distance_from_coverage() -> u32 { // Default is 2 km 2000 } -pub fn default_max_asserted_distance_deviation() -> u32 { +fn default_max_asserted_distance_deviation() -> u32 { 100 } -pub fn default_log() -> String { +fn default_log() -> String { "mobile_verifier=debug,poc_store=info".to_string() } -pub fn default_start_after() -> u64 { - 0 +fn default_start_after() -> DateTime { + DateTime::UNIX_EPOCH } -pub fn default_reward_period() -> i64 { - 24 +fn default_reward_period() -> Duration { + humantime::parse_duration("24 hours").unwrap() } -pub fn default_reward_offset_minutes() -> i64 { - 30 +fn default_reward_period_offset() -> Duration { + humantime::parse_duration("30 minutes").unwrap() } impl Settings { @@ -101,18 +105,6 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn start_after(&self) -> DateTime { - Utc.timestamp_opt(self.start_after as i64, 0) - .single() - .unwrap() - } - - pub fn modeled_coverage_start(&self) -> DateTime { - Utc.timestamp_opt(self.modeled_coverage_start as i64, 0) - .single() - .unwrap() - } - pub fn usa_region_paths(&self) -> anyhow::Result> { let paths = std::fs::read_dir(&self.usa_geofence_regions)?; Ok(paths diff --git a/mobile_verifier/src/speedtests.rs b/mobile_verifier/src/speedtests.rs index 9f6bc3d1d..f6599d930 100644 --- a/mobile_verifier/src/speedtests.rs +++ b/mobile_verifier/src/speedtests.rs @@ -85,7 +85,7 @@ where file_source::continuous_source::() .state(pool.clone()) .store(file_store) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::CellSpeedtestIngestReport.to_string()) .create() .await?; @@ -123,7 +123,6 @@ where pub async fn run(mut self, shutdown: triggered::Listener) -> anyhow::Result<()> { loop { - #[rustfmt::skip] tokio::select! { biased; _ = shutdown.clone() => { @@ -131,9 +130,10 @@ where break; } Some(file) = self.speedtests.recv() => { - let start = Instant::now(); - self.process_file(file).await?; - metrics::histogram!("speedtest_processing_time", start.elapsed()); + let start = Instant::now(); + self.process_file(file).await?; + metrics::histogram!("speedtest_processing_time") + .record(start.elapsed()); } } } diff --git a/mobile_verifier/src/subscriber_location.rs b/mobile_verifier/src/subscriber_location.rs index e17712a58..930c20983 100644 --- a/mobile_verifier/src/subscriber_location.rs +++ b/mobile_verifier/src/subscriber_location.rs @@ -67,7 +67,7 @@ where file_source::continuous_source::() .state(pool.clone()) .store(file_store.clone()) - .lookback(LookbackBehavior::StartAfter(settings.start_after())) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) .prefix(FileType::SubscriberLocationIngestReport.to_string()) .create() .await?; @@ -105,17 +105,14 @@ where async fn run(mut self, shutdown: triggered::Listener) -> anyhow::Result<()> { loop { - #[rustfmt::skip] tokio::select! { biased; _ = shutdown.clone() => break, Some(file) = self.reports_receiver.recv() => { - let start = Instant::now(); + let start = Instant::now(); self.process_file(file).await?; - metrics::histogram!( - "subscriber_location_processing_time", - start.elapsed() - ); + metrics::histogram!("subscriber_location_processing_time") + .record(start.elapsed()); } } } diff --git a/mobile_verifier/src/telemetry.rs b/mobile_verifier/src/telemetry.rs index 4bfe3be2a..224fc3ab3 100644 --- a/mobile_verifier/src/telemetry.rs +++ b/mobile_verifier/src/telemetry.rs @@ -13,9 +13,9 @@ pub async fn initialize(db: &Pool) -> anyhow::Result<()> { } pub fn last_rewarded_end_time(timestamp: DateTime) { - metrics::gauge!(LAST_REWARDED_END_TIME, timestamp.timestamp() as f64); + metrics::gauge!(LAST_REWARDED_END_TIME).set(timestamp.timestamp() as f64); } pub fn data_transfer_rewards_scale(scale: f64) { - metrics::gauge!(DATA_TRANSFER_REWARDS_SCALE, scale); + metrics::gauge!(DATA_TRANSFER_REWARDS_SCALE).set(scale); } diff --git a/mobile_verifier/tests/integrations/boosting_oracles.rs b/mobile_verifier/tests/integrations/boosting_oracles.rs index 880407c37..3f791d2cc 100644 --- a/mobile_verifier/tests/integrations/boosting_oracles.rs +++ b/mobile_verifier/tests/integrations/boosting_oracles.rs @@ -410,43 +410,43 @@ async fn test_footfall_and_urbanization_and_landtype(pool: PgPool) -> anyhow::Re // Hex | Assignment | Points Equation | Sum // ----------------------------------------------- // == yellow - POI ≥ 1 Urbanized - // hex1 | A, A, A | 400 * 1 | 400 - // hex2 | A, B, A | 400 * 1 | 400 - // hex3 | A, C, A | 400 * 1 | 400 + // hex1 | A, A, A | 100 * 1 | 100 + // hex2 | A, B, A | 100 * 1 | 100 + // hex3 | A, C, A | 100 * 1 | 100 // == orange - POI ≥ 1 Not Urbanized - // hex4 | A, A, B | 400 * 1 | 400 - // hex5 | A, B, B | 400 * 1 | 400 - // hex6 | A, C, B | 400 * 1 | 400 + // hex4 | A, A, B | 100 * 1 | 100 + // hex5 | A, B, B | 100 * 1 | 100 + // hex6 | A, C, B | 100 * 1 | 100 // == light green - Point of Interest Urbanized - // hex7 | B, A, A | 400 * 0.70 | 280 - // hex8 | B, B, A | 400 * 0.70 | 280 - // hex9 | B, C, A | 400 * 0.70 | 280 + // hex7 | B, A, A | 100 * 0.70 | 70 + // hex8 | B, B, A | 100 * 0.70 | 70 + // hex9 | B, C, A | 100 * 0.70 | 70 // == dark green - Point of Interest Not Urbanized - // hex10 | B, A, B | 400 * 0.50 | 200 - // hex11 | B, B, B | 400 * 0.50 | 200 - // hex12 | B, C, B | 400 * 0.50 | 200 + // hex10 | B, A, B | 100 * 0.50 | 50 + // hex11 | B, B, B | 100 * 0.50 | 50 + // hex12 | B, C, B | 100 * 0.50 | 50 // == light blue - No POI Urbanized - // hex13 | C, A, A | 400 * 0.40 | 160 - // hex14 | C, B, A | 400 * 0.30 | 120 - // hex15 | C, C, A | 400 * 0.05 | 20 + // hex13 | C, A, A | 100 * 0.40 | 40 + // hex14 | C, B, A | 100 * 0.30 | 30 + // hex15 | C, C, A | 100 * 0.05 | 5 // == dark blue - No POI Not Urbanized - // hex16 | C, A, B | 400 * 0.20 | 80 - // hex17 | C, B, B | 400 * 0.15 | 60 - // hex18 | C, C, B | 400 * 0.03 | 12 + // hex16 | C, A, B | 100 * 0.20 | 20 + // hex17 | C, B, B | 100 * 0.15 | 15 + // hex18 | C, C, B | 100 * 0.03 | 3 // == gray - Outside of USA - // hex19 | A, A, C | 400 * 0.00 | 0 - // hex20 | A, B, C | 400 * 0.00 | 0 - // hex21 | A, C, C | 400 * 0.00 | 0 - // hex22 | B, A, C | 400 * 0.00 | 0 - // hex23 | B, B, C | 400 * 0.00 | 0 - // hex24 | B, C, C | 400 * 0.00 | 0 - // hex25 | C, A, C | 400 * 0.00 | 0 - // hex26 | C, B, C | 400 * 0.00 | 0 - // hex27 | C, C, C | 400 * 0.00 | 0 + // hex19 | A, A, C | 100 * 0.00 | 0 + // hex20 | A, B, C | 100 * 0.00 | 0 + // hex21 | A, C, C | 100 * 0.00 | 0 + // hex22 | B, A, C | 100 * 0.00 | 0 + // hex23 | B, B, C | 100 * 0.00 | 0 + // hex24 | B, C, C | 100 * 0.00 | 0 + // hex25 | C, A, C | 100 * 0.00 | 0 + // hex26 | C, B, C | 100 * 0.00 | 0 + // hex27 | C, C, C | 100 * 0.00 | 0 // ----------------------------------------------- - // = 4,292 + // = 1,073 - assert_eq!(coverage_points.hotspot_points(&owner), dec!(4292.0)); + assert_eq!(coverage_points.hotspot_points(&owner), dec!(1073.0)); Ok(()) } diff --git a/mobile_verifier/tests/integrations/hex_boosting.rs b/mobile_verifier/tests/integrations/hex_boosting.rs index b438cd19c..6dd082915 100644 --- a/mobile_verifier/tests/integrations/hex_boosting.rs +++ b/mobile_verifier/tests/integrations/hex_boosting.rs @@ -916,9 +916,9 @@ async fn test_poc_with_cbrs_and_multi_coverage_boosted_hexes(pool: PgPool) -> an ); if let Ok((poc_rewards, unallocated_reward)) = rewards { // assert poc reward outputs - let exp_reward_1 = 23_990_403_838_464; - let exp_reward_2 = 23_990_403_838_464; - let exp_reward_3 = 1_199_520_191_923; + let exp_reward_1 = 24_437_429_996_945; + let exp_reward_2 = 24_437_429_996_945; + let exp_reward_3 = 305_467_874_961; assert_eq!(exp_reward_1, poc_rewards[0].poc_reward); assert_eq!( @@ -961,11 +961,11 @@ async fn test_poc_with_cbrs_and_multi_coverage_boosted_hexes(pool: PgPool) -> an poc_rewards[0].boosted_hexes[0].location ); - // hotspot1 should have 20x the reward of hotspot 3 - assert_eq!(poc_rewards[0].poc_reward / poc_rewards[2].poc_reward, 20); - // hotspot1 should have 20x the reward of hotspot 3 + // hotspot1 should have 80x the reward of hotspot 3 + assert_eq!(poc_rewards[0].poc_reward / poc_rewards[2].poc_reward, 80); + // hotspot1 should have 80x the reward of hotspot 3 // due to the 2 boosted hexes each with a 10x multiplier - assert_eq!(poc_rewards[1].poc_reward / poc_rewards[2].poc_reward, 20); + assert_eq!(poc_rewards[1].poc_reward / poc_rewards[2].poc_reward, 80); // confirm the total rewards allocated matches expectations let poc_sum: u64 = poc_rewards.iter().map(|r| r.poc_reward).sum(); diff --git a/mobile_verifier/tests/integrations/modeled_coverage.rs b/mobile_verifier/tests/integrations/modeled_coverage.rs index fee26b159..1069f166f 100644 --- a/mobile_verifier/tests/integrations/modeled_coverage.rs +++ b/mobile_verifier/tests/integrations/modeled_coverage.rs @@ -505,7 +505,7 @@ async fn scenario_one(pool: PgPool) -> anyhow::Result<()> { ) .await?; - assert_eq!(coverage_points.hotspot_points(&owner), dec!(1000)); + assert_eq!(coverage_points.hotspot_points(&owner), dec!(250)); Ok(()) } @@ -605,8 +605,8 @@ async fn scenario_two(pool: PgPool) -> anyhow::Result<()> { ) .await?; - assert_eq!(coverage_points.hotspot_points(&owner_1), dec!(450)); - assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(1000)); + assert_eq!(coverage_points.hotspot_points(&owner_1), dec!(112.5)); + assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(250)); Ok(()) } @@ -891,7 +891,7 @@ async fn scenario_three(pool: PgPool) -> anyhow::Result<()> { assert_eq!(coverage_points.hotspot_points(&owner_1), dec!(0)); assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(0)); assert_eq!(coverage_points.hotspot_points(&owner_3), dec!(0)); - assert_eq!(coverage_points.hotspot_points(&owner_4), dec!(1000)); + assert_eq!(coverage_points.hotspot_points(&owner_4), dec!(250)); assert_eq!(coverage_points.hotspot_points(&owner_5), dec!(0)); assert_eq!(coverage_points.hotspot_points(&owner_6), dec!(0)); @@ -960,7 +960,7 @@ async fn scenario_four(pool: PgPool) -> anyhow::Result<()> { ) .await?; - assert_eq!(coverage_points.hotspot_points(&owner), dec!(76)); + assert_eq!(coverage_points.hotspot_points(&owner), dec!(19)); Ok(()) } @@ -1061,9 +1061,9 @@ async fn scenario_five(pool: PgPool) -> anyhow::Result<()> { assert_eq!( coverage_points.hotspot_points(&owner_1), - dec!(76) * dec!(0.5) + dec!(19) * dec!(0.5) ); - assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(32)); + assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(8)); Ok(()) } @@ -1307,7 +1307,7 @@ async fn scenario_six(pool: PgPool) -> anyhow::Result<()> { .await?; assert_eq!(coverage_points.hotspot_points(&owner_1), dec!(0)); - assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(250)); + assert_eq!(coverage_points.hotspot_points(&owner_2), dec!(62.5)); assert_eq!(coverage_points.hotspot_points(&owner_3), dec!(0)); assert_eq!(coverage_points.hotspot_points(&owner_4), dec!(0)); assert_eq!(coverage_points.hotspot_points(&owner_5), dec!(0)); diff --git a/mobile_verifier/tests/integrations/rewarder_poc_dc.rs b/mobile_verifier/tests/integrations/rewarder_poc_dc.rs index f9287ed5e..f3eaf1e4a 100644 --- a/mobile_verifier/tests/integrations/rewarder_poc_dc.rs +++ b/mobile_verifier/tests/integrations/rewarder_poc_dc.rs @@ -59,9 +59,9 @@ async fn test_poc_and_dc_rewards(pool: PgPool) -> anyhow::Result<()> { ); if let Ok((poc_rewards, dc_rewards, unallocated_poc_reward)) = rewards { // assert poc reward outputs - let hotspot_1_reward = 24_108_003_121_986; - let hotspot_2_reward = 24_108_003_121_986; - let hotspot_3_reward = 964_320_124_879; + let hotspot_1_reward = 9_758_001_263_661; + let hotspot_2_reward = 39_032_005_054_644; + let hotspot_3_reward = 390_320_050_546; assert_eq!(hotspot_1_reward, poc_rewards[0].poc_reward); assert_eq!( HOTSPOT_1.to_string(), diff --git a/poc_entropy/src/server.rs b/poc_entropy/src/server.rs index be1843a26..e771afffb 100644 --- a/poc_entropy/src/server.rs +++ b/poc_entropy/src/server.rs @@ -18,7 +18,7 @@ impl PocEntropy for EntropyServer { _request: tonic::Request, ) -> Result, tonic::Status> { let entropy = &*self.entropy_watch.borrow(); - metrics::increment_counter!("entropy_server_get_count"); + metrics::counter!("entropy_server_get_count").increment(1); Ok(tonic::Response::new(entropy.into())) } } diff --git a/price/Cargo.toml b/price/Cargo.toml index c98b65c15..0ce8851fc 100644 --- a/price/Cargo.toml +++ b/price/Cargo.toml @@ -32,3 +32,4 @@ triggered = {workspace = true} solana-client = {workspace = true} solana-sdk = {workspace = true} task-manager = {path = "../task_manager"} +humantime-serde = { workspace = true } \ No newline at end of file diff --git a/price/pkg/settings-template.toml b/price/pkg/settings-template.toml index c176b5b1c..dfa2278a7 100644 --- a/price/pkg/settings-template.toml +++ b/price/pkg/settings-template.toml @@ -6,7 +6,7 @@ source = "https://api.devnet.solana.com" # Price tick interval (secs). Default = 60s. Optional. -interval = 60 +interval = "60 seconds" # Cache folder to use. Default blow # diff --git a/price/src/metrics.rs b/price/src/metrics.rs index f13813e8a..c39b53996 100644 --- a/price/src/metrics.rs +++ b/price/src/metrics.rs @@ -12,9 +12,9 @@ impl Metrics { } fn increment_counter(counter: String, token_type: BlockchainTokenTypeV1) { - metrics::increment_counter!(counter, "token_type" => token_type.as_str_name()); + metrics::counter!(counter, "token_type" => token_type.as_str_name()).increment(1); } fn set_gauge(token_type: BlockchainTokenTypeV1, value: f64) { - metrics::gauge!(PRICE_GAUGE, value, "token_type" => token_type.as_str_name()); + metrics::gauge!(PRICE_GAUGE, "token_type" => token_type.as_str_name()).set(value); } diff --git a/price/src/price_generator.rs b/price/src/price_generator.rs index 2459c00af..9624a7ab1 100644 --- a/price/src/price_generator.rs +++ b/price/src/price_generator.rs @@ -1,13 +1,13 @@ use crate::{metrics::Metrics, Settings}; use anyhow::{anyhow, bail, Error, Result}; -use chrono::{DateTime, Duration, TimeZone, Utc}; +use chrono::{DateTime, TimeZone, Utc}; use file_store::file_sink; use futures::{future::LocalBoxFuture, TryFutureExt}; use helium_proto::{BlockchainTokenTypeV1, PriceReportV1}; use serde::{Deserialize, Serialize}; use solana_client::nonblocking::rpc_client::RpcClient; use solana_sdk::pubkey::Pubkey as SolPubkey; -use std::{cmp::Ordering, path::PathBuf, str::FromStr}; +use std::{cmp::Ordering, path::PathBuf, str::FromStr, time::Duration}; use task_manager::ManagedTask; use tokio::{fs, time}; @@ -90,12 +90,12 @@ impl PriceGenerator { client, key: settings.price_key(token_type)?, default_price: settings.default_price(token_type)?, - interval_duration: settings.interval().to_std()?, - stale_price_duration: settings.stale_price_duration(), + interval_duration: settings.interval, + stale_price_duration: settings.stale_price_duration, latest_price_file: PathBuf::from_str(&settings.cache)? .join(format!("{token_type:?}.latest")), file_sink: Some(file_sink), - pyth_price_interval: settings.pyth_price_interval().to_std()?, + pyth_price_interval: settings.pyth_price_interval, }) } diff --git a/price/src/settings.rs b/price/src/settings.rs index a4655c40a..1da3adfe9 100644 --- a/price/src/settings.rs +++ b/price/src/settings.rs @@ -1,10 +1,10 @@ use anyhow::{anyhow, Result}; -use chrono::Duration; use config::{Config, Environment, File}; use helium_proto::BlockchainTokenTypeV1; +use humantime_serde::re::humantime; use serde::Deserialize; use solana_sdk::pubkey::Pubkey as SolPubkey; -use std::{path::Path, str::FromStr}; +use std::{path::Path, str::FromStr, time::Duration}; #[derive(Debug, Deserialize, Clone)] pub struct ClusterConfig { @@ -48,41 +48,37 @@ pub struct Settings { /// Metrics settings pub metrics: poc_metrics::Settings, /// Tick interval (secs). Default = 60s. - #[serde(default = "default_interval")] - pub interval: i64, + #[serde(with = "humantime_serde", default = "default_interval")] + pub interval: Duration, /// Cluster Configuration - #[serde(default = "default_cluster")] + #[serde(default)] pub cluster: ClusterConfig, /// How long to use a stale price in minutes - #[serde(default = "default_stale_price_minutes")] - pub stale_price_minutes: u64, + #[serde(with = "humantime_serde", default = "default_stale_price_duration")] + pub stale_price_duration: Duration, /// Interval when retrieving a pyth price from on chain - #[serde(default = "default_pyth_price_interval")] - pub pyth_price_interval_in_seconds: u64, + #[serde(with = "humantime_serde", default = "default_pyth_price_interval")] + pub pyth_price_interval: Duration, } -pub fn default_pyth_price_interval() -> u64 { - 60 * 60 * 2 +fn default_pyth_price_interval() -> Duration { + humantime::parse_duration("2 hours").unwrap() } -pub fn default_source() -> String { +fn default_source() -> String { "https://api.devnet.solana.com".to_string() } -pub fn default_log() -> String { +fn default_log() -> String { "price=debug".to_string() } -pub fn default_interval() -> i64 { - 60 +fn default_interval() -> Duration { + humantime::parse_duration("1 minute").unwrap() } -pub fn default_stale_price_minutes() -> u64 { - 12 * 60 -} - -pub fn default_cluster() -> ClusterConfig { - ClusterConfig::default() +fn default_stale_price_duration() -> Duration { + humantime::parse_duration("12 hours").unwrap() } pub fn default_cache() -> String { @@ -112,18 +108,6 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn interval(&self) -> Duration { - Duration::seconds(self.interval) - } - - pub fn pyth_price_interval(&self) -> Duration { - Duration::seconds(self.pyth_price_interval_in_seconds as i64) - } - - pub fn stale_price_duration(&self) -> Duration { - Duration::minutes(self.stale_price_minutes as i64) - } - pub fn price_key(&self, token_type: BlockchainTokenTypeV1) -> Result> { self.key(token_type)? .as_ref() diff --git a/reward_index/Cargo.toml b/reward_index/Cargo.toml index ff25749d7..29c34841c 100644 --- a/reward_index/Cargo.toml +++ b/reward_index/Cargo.toml @@ -40,3 +40,4 @@ rust_decimal_macros = {workspace = true} tonic = {workspace = true} rand = {workspace = true} async-trait = {workspace = true} +humantime-serde = { workspace = true } \ No newline at end of file diff --git a/reward_index/pkg/settings-template.toml b/reward_index/pkg/settings-template.toml index 7d1c1e51d..0e939a68d 100644 --- a/reward_index/pkg/settings-template.toml +++ b/reward_index/pkg/settings-template.toml @@ -5,7 +5,7 @@ # Interval for checking verifier bucket (in seconds). Default below (15 minutes) # -# interval = 900 +# interval = "15 minutes" # Mode to operate the indexer in. "iot" or "mobile" mode = "iot" diff --git a/reward_index/src/main.rs b/reward_index/src/main.rs index 9adbc8a2c..600c6764f 100644 --- a/reward_index/src/main.rs +++ b/reward_index/src/main.rs @@ -1,5 +1,4 @@ use anyhow::Result; -use chrono::{TimeZone, Utc}; use clap::Parser; use file_store::{ file_info_poller::LookbackBehavior, file_source, reward_manifest::RewardManifest, FileStore, @@ -76,18 +75,13 @@ impl Server { telemetry::initialize(&pool).await?; let file_store = FileStore::from_settings(&settings.verifier).await?; - let (receiver, server) = file_source::continuous_source::() .state(pool.clone()) .store(file_store) .prefix(FileType::RewardManifest.to_string()) - .lookback(LookbackBehavior::StartAfter( - Utc.timestamp_opt(settings.start_after as i64, 0) - .single() - .unwrap(), - )) - .poll_duration(settings.interval()) - .offset(settings.interval() * 2) + .lookback(LookbackBehavior::StartAfter(settings.start_after)) + .poll_duration(settings.interval) + .offset(settings.interval * 2) .create() .await?; let source_join_handle = server.start(shutdown_listener.clone()).await?; diff --git a/reward_index/src/settings.rs b/reward_index/src/settings.rs index 790f1375b..e41231059 100644 --- a/reward_index/src/settings.rs +++ b/reward_index/src/settings.rs @@ -1,7 +1,8 @@ -use chrono::Duration; +use chrono::{DateTime, Utc}; use config::{Config, Environment, File}; +use humantime_serde::re::humantime; use serde::Deserialize; -use std::{fmt, path::Path}; +use std::{fmt, path::Path, time::Duration}; /// Mode to start the indexer in. Each mode uses different files from /// the verifier @@ -28,8 +29,8 @@ pub struct Settings { #[serde(default = "default_log")] pub log: String, /// Check interval in seconds. (Default is 900; 15 minutes) - #[serde(default = "default_interval")] - pub interval: i64, + #[serde(with = "humantime_serde", default = "default_interval")] + pub interval: Duration, /// Mode to run the server in (iot or mobile). Required pub mode: Mode, pub database: db_store::Settings, @@ -38,14 +39,18 @@ pub struct Settings { pub operation_fund_key: Option, pub unallocated_reward_entity_key: Option, #[serde(default = "default_start_after")] - pub start_after: u64, + pub start_after: DateTime, } -pub fn default_start_after() -> u64 { - 0 +fn default_interval() -> Duration { + humantime::parse_duration("15 minutes").unwrap() } -pub fn default_log() -> String { +fn default_start_after() -> DateTime { + DateTime::UNIX_EPOCH +} + +fn default_log() -> String { "reward_index=debug,poc_store=info".to_string() } @@ -72,10 +77,6 @@ impl Settings { .and_then(|config| config.try_deserialize()) } - pub fn interval(&self) -> Duration { - Duration::seconds(self.interval) - } - pub fn operation_fund_key(&self) -> Option { self.operation_fund_key.clone() } @@ -84,7 +85,3 @@ impl Settings { self.unallocated_reward_entity_key.clone() } } - -fn default_interval() -> i64 { - 900 -} diff --git a/reward_index/src/telemetry.rs b/reward_index/src/telemetry.rs index 7589b68df..4437b9ed1 100644 --- a/reward_index/src/telemetry.rs +++ b/reward_index/src/telemetry.rs @@ -16,7 +16,7 @@ pub async fn last_reward_processed_time( db: &Pool, datetime: DateTime, ) -> anyhow::Result<()> { - metrics::gauge!(LAST_REWARD_PROCESSED_TIME, datetime.timestamp() as f64); + metrics::gauge!(LAST_REWARD_PROCESSED_TIME).set(datetime.timestamp() as f64); meta::store(db, LAST_REWARD_PROCESSED_TIME, datetime.timestamp()).await?; Ok(()) diff --git a/reward_scheduler/src/lib.rs b/reward_scheduler/src/lib.rs index edd8efc78..788f4eb79 100644 --- a/reward_scheduler/src/lib.rs +++ b/reward_scheduler/src/lib.rs @@ -1,5 +1,5 @@ -use chrono::{DateTime, Duration, Utc}; -use std::ops::Range; +use chrono::{DateTime, Utc}; +use std::{ops::Range, time::Duration}; #[derive(Debug)] pub struct Scheduler { @@ -43,7 +43,7 @@ impl Scheduler { let duration = if self.reward_period.end + self.reward_offset > now { self.reward_period.end + self.reward_offset - now } else if next_reward_period.end + self.reward_offset <= now { - Duration::zero() + chrono::Duration::zero() } else { (next_reward_period.end + self.reward_offset) - now }; @@ -63,11 +63,11 @@ mod tests { } fn reward_period_length() -> Duration { - Duration::hours(24) + chrono::Duration::hours(24).to_std().unwrap() } fn standard_duration(minutes: i64) -> Result { - Duration::minutes(minutes) + chrono::Duration::minutes(minutes) .to_std() .map_err(|_| OutOfRangeError) } @@ -78,7 +78,7 @@ mod tests { reward_period_length(), dt(2022, 12, 1, 0, 0, 0), dt(2022, 12, 2, 0, 0, 0), - Duration::minutes(30), + chrono::Duration::minutes(30).to_std().unwrap(), ); let now = dt(2022, 12, 1, 1, 0, 0); @@ -98,7 +98,7 @@ mod tests { reward_period_length(), dt(2022, 12, 1, 0, 0, 0), dt(2022, 12, 2, 0, 0, 0), - Duration::minutes(30), + chrono::Duration::minutes(30).to_std().unwrap(), ); let now = dt(2022, 12, 2, 0, 30, 0); @@ -122,7 +122,7 @@ mod tests { reward_period_length(), dt(2022, 12, 1, 0, 0, 0), dt(2022, 12, 2, 0, 0, 0), - Duration::minutes(30), + chrono::Duration::minutes(30).to_std().unwrap(), ); let now = dt(2022, 12, 2, 0, 15, 0); diff --git a/solana/src/burn.rs b/solana/src/burn.rs index d31db7233..3103a4cc2 100644 --- a/solana/src/burn.rs +++ b/solana/src/burn.rs @@ -141,9 +141,9 @@ impl SolanaNetwork for SolanaRpc { if self.payers_to_monitor.contains(payer) { metrics::gauge!( "balance", - account_layout.amount as f64, "payer" => payer.to_string() - ); + ) + .set(account_layout.amount as f64); } Ok(account_layout.amount)