diff --git a/.env.test b/.env.test new file mode 100644 index 00000000..75cfea32 --- /dev/null +++ b/.env.test @@ -0,0 +1,24 @@ +##### AWS config ##### + +AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID" +AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" +AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" +AWS_S3_BUCKET_REGION="us-east-1" +AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" + +##### On chain config ##### + +MADARA_RPC_URL="http://localhost:3000" +ETHEREUM_RPC_URL="http://localhost:3001" +MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead" +PRIVATE_KEY="0xdead" +ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef" +STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead" + +##### Config URLs ##### + +DA_LAYER="ethereum" +PROVER_SERVICE="sharp" +SETTLEMENT_LAYER="ethereum" +DATA_STORAGE="s3" +MONGODB_CONNECTION_STRING="mongodb://localhost:27017" diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 29f80c5a..a55082e6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -10,6 +10,21 @@ jobs: # sadly, for now we have to "rebuild" for the coverage runs-on: ubuntu-latest + services: + localstack: + image: localstack/localstack + env: + SERVICES: s3, sqs + DEFAULT_REGION: us-east-1 + AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" + ports: + - 4566:4566 + mongodb: + image: mongo:latest + ports: + - 27017:27017 + steps: - uses: actions/checkout@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 40fd1c8d..9a22504e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Function to calculate the kzg proof of x_0. - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. +- Fixtures for testing. +- Tests for database client. ## Changed +- GitHub's coverage CI yml file for localstack and db testing. + ## Removed - `fetch_from_test` argument diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 41ce3824..55761526 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -148,7 +148,7 @@ pub async fn config_force_init(config: Config) { } /// Builds the DA client based on the environment variable DA_LAYER -async fn build_da_client() -> Box { +pub async fn build_da_client() -> Box { match get_env_var_or_panic("DA_LAYER").as_str() { "ethereum" => { let config = EthereumDaConfig::new_from_env(); @@ -159,7 +159,7 @@ async fn build_da_client() -> Box { } /// Builds the prover service based on the environment variable PROVER_SERVICE -fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box { +pub fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box { match get_env_var_or_panic("PROVER_SERVICE").as_str() { "sharp" => Box::new(SharpProverService::with_settings(settings_provider)), _ => panic!("Unsupported prover service"), @@ -167,7 +167,9 @@ fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box Box { +pub async fn build_settlement_client( + settings_provider: &impl SettingsProvider, +) -> Box { match get_env_var_or_panic("SETTLEMENT_LAYER").as_str() { "ethereum" => Box::new(EthereumSettlementClient::with_settings(settings_provider)), "starknet" => Box::new(StarknetSettlementClient::with_settings(settings_provider).await), @@ -175,7 +177,7 @@ async fn build_settlement_client(settings_provider: &impl SettingsProvider) -> B } } -async fn build_storage_client() -> Box { +pub async fn build_storage_client() -> Box { match get_env_var_or_panic("DATA_STORAGE").as_str() { "s3" => Box::new(AWSS3::new(AWSS3Config::new_from_env()).await), _ => panic!("Unsupported Storage Client"), diff --git a/crates/orchestrator/src/controllers/jobs_controller.rs b/crates/orchestrator/src/controllers/jobs_controller.rs deleted file mode 100644 index 43a67652..00000000 --- a/crates/orchestrator/src/controllers/jobs_controller.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::collections::HashMap; - -use axum::extract::Json; -use serde::Deserialize; - -use crate::controllers::errors::AppError; -use crate::jobs::types::JobType; - -/// Client request to create a job -#[derive(Debug, Deserialize)] -pub struct CreateJobRequest { - /// Job type - job_type: JobType, - /// Internal id must be a way to identify the job. For example - /// block_no, transaction_hash etc. The (job_type, internal_id) - /// pair must be unique. - internal_id: String, -} - -/// Create a job -pub async fn create_job(Json(payload): Json) -> Result, AppError> { - crate::jobs::create_job(payload.job_type, payload.internal_id, HashMap::new()).await?; - Ok(Json::from(())) -} diff --git a/crates/orchestrator/src/controllers/mod.rs b/crates/orchestrator/src/controllers/mod.rs index aadb38b7..8575ccdc 100644 --- a/crates/orchestrator/src/controllers/mod.rs +++ b/crates/orchestrator/src/controllers/mod.rs @@ -1,5 +1,2 @@ /// Errors mod errors; - -/// Job controllers -pub mod jobs_controller; diff --git a/crates/orchestrator/src/data_storage/aws_s3/config.rs b/crates/orchestrator/src/data_storage/aws_s3/config.rs index 7c41f3c6..06eeaff8 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/config.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/config.rs @@ -12,6 +12,9 @@ pub struct AWSS3Config { pub s3_bucket_name: String, /// S3 Bucket region pub s3_bucket_region: String, + /// Endpoint url + #[cfg(test)] + pub endpoint_url: String, } /// Implementation of `DataStorageConfig` for `AWSS3Config` @@ -23,6 +26,8 @@ impl DataStorageConfig for AWSS3Config { s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), + #[cfg(test)] + endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"), } } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 81751929..50ae6e3e 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -22,7 +22,6 @@ pub struct AWSS3 { impl AWSS3 { /// Initializes a new AWS S3 client by passing the config /// and returning it. - #[allow(dead_code)] pub async fn new(config: AWSS3Config) -> Self { // AWS cred building let credentials = Credentials::new( @@ -33,7 +32,15 @@ impl AWSS3 { "loaded_from_custom_env", ); let region = Region::new(config.s3_bucket_region.clone().to_string()); - let conf_builder = Builder::new().region(region).credentials_provider(credentials); + + #[allow(unused_mut)] + let mut conf_builder = Builder::new().region(region).credentials_provider(credentials).force_path_style(true); + + #[cfg(test)] + { + conf_builder = conf_builder.endpoint_url(config.endpoint_url.clone().to_string()); + } + let conf = conf_builder.build(); // Building AWS S3 config @@ -69,4 +76,10 @@ impl DataStorage for AWSS3 { Ok(()) } + + #[cfg(test)] + async fn build_test_bucket(&self, bucket_name: &str) -> Result<()> { + self.client.create_bucket().bucket(bucket_name).send().await?; + Ok(()) + } } diff --git a/crates/orchestrator/src/data_storage/mod.rs b/crates/orchestrator/src/data_storage/mod.rs index f259bf61..b3ff74fd 100644 --- a/crates/orchestrator/src/data_storage/mod.rs +++ b/crates/orchestrator/src/data_storage/mod.rs @@ -17,6 +17,8 @@ use mockall::automock; pub trait DataStorage: Send + Sync { async fn get_data(&self, key: &str) -> Result; async fn put_data(&self, data: Bytes, key: &str) -> Result<()>; + #[cfg(test)] + async fn build_test_bucket(&self, bucket_name: &str) -> Result<()>; } /// **DataStorageConfig** : Trait method to represent the config struct needed for diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 5be0cf54..aea5a41a 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -40,6 +40,10 @@ impl MongoDb { MongoDb { client } } + pub fn client(&self) -> Client { + self.client.clone() + } + fn get_job_collection(&self) -> Collection { self.client.database("orchestrator").collection("jobs") } diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index 7e4367ef..3d02378b 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -1,6 +1,6 @@ /// Config of the service. Contains configurations for DB, Queues and other services. pub mod config; -mod constants; +pub mod constants; /// Controllers for the routes pub mod controllers; /// Contains the trait that implements the fetching functions @@ -17,6 +17,6 @@ pub mod queue; /// Contains the routes for the service pub mod routes; #[cfg(test)] -mod tests; +pub mod tests; /// Contains workers which act like cron jobs pub mod workers; diff --git a/crates/orchestrator/src/queue/job_queue.rs b/crates/orchestrator/src/queue/job_queue.rs index 9432276f..aebba4bf 100644 --- a/crates/orchestrator/src/queue/job_queue.rs +++ b/crates/orchestrator/src/queue/job_queue.rs @@ -12,8 +12,8 @@ use uuid::Uuid; use crate::config::config; use crate::jobs::{process_job, verify_job}; -const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; -const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue"; +pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; +pub const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue"; #[derive(Debug, Serialize, Deserialize)] pub struct JobQueueMessage { diff --git a/crates/orchestrator/src/routes.rs b/crates/orchestrator/src/routes.rs index 39d8f3d4..877e5b88 100644 --- a/crates/orchestrator/src/routes.rs +++ b/crates/orchestrator/src/routes.rs @@ -1,16 +1,10 @@ use axum::http::StatusCode; use axum::response::IntoResponse; -use axum::routing::{get, post}; +use axum::routing::get; use axum::Router; -use crate::controllers::jobs_controller; - pub fn app_router() -> Router { - Router::new() - .route("/health", get(root)) - .nest("/v1/dev", dev_routes()) - .nest("/v1/job", job_routes()) - .fallback(handler_404) + Router::new().route("/health", get(root)).nest("/v1/dev", dev_routes()).fallback(handler_404) } async fn root() -> &'static str { @@ -21,10 +15,6 @@ async fn handler_404() -> impl IntoResponse { (StatusCode::NOT_FOUND, "The requested resource was not found") } -fn job_routes() -> Router { - Router::new().route("/create_job", post(jobs_controller::create_job)) -} - fn dev_routes() -> Router { Router::new() } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index c8a36696..0a97fa33 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -6,16 +6,21 @@ use std::sync::Arc; use ::uuid::Uuid; use constants::*; use da_client_interface::MockDaClient; +use mongodb::Client; use prover_client_interface::MockProverClient; use rstest::*; use settlement_client_interface::MockSettlementClient; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::JsonRpcClient; use url::Url; +use utils::env_utils::get_env_var_or_panic; +use utils::settings::default::DefaultSettingsProvider; -use crate::config::Config; +use crate::config::{build_storage_client, config_force_init, Config}; use crate::data_storage::MockDataStorage; -use crate::database::MockDatabase; +use crate::database::mongodb::config::MongoDbConfig; +use crate::database::mongodb::MongoDb; +use crate::database::{DatabaseConfig, MockDatabase}; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; @@ -74,3 +79,43 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] job_item } + +/// For implementation of integration tests +#[fixture] +pub async fn build_config() -> color_eyre::Result<()> { + // Getting .env.test variables + dotenvy::from_filename("../.env.test")?; + + // init starknet client + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"), + )); + + // init database + let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await); + + // init the queue + let queue = Box::new(crate::queue::sqs::SqsQueue {}); + + let da_client = crate::config::build_da_client().await; + let settings_provider = DefaultSettingsProvider {}; + let settlement_client = crate::config::build_settlement_client(&settings_provider).await; + let prover_client = crate::config::build_prover_service(&settings_provider); + let storage_client = build_storage_client().await; + + // building a test bucket : + storage_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await?; + + let config = + Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client); + config_force_init(config).await; + + Ok(()) +} + +pub async fn drop_database() -> color_eyre::Result<()> { + let db_client: Client = MongoDb::new(MongoDbConfig::new_from_env()).await.client(); + // dropping `jobs` collection. + db_client.database("orchestrator").collection::("jobs").drop(None).await?; + Ok(()) +} diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs new file mode 100644 index 00000000..ef1dd0ec --- /dev/null +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -0,0 +1,36 @@ +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::{DataStorage, DataStorageConfig}; +use crate::tests::common::build_config; +use bytes::Bytes; +use rstest::rstest; +use serde_json::json; + +#[rstest] +#[tokio::test] +async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { + build_config().await?; + dotenvy::from_filename("../.env.test")?; + + let config = AWSS3Config::new_from_env(); + let s3_client = AWSS3::new(config).await; + + let mock_data = json!( + { + "body" : "hello world. hello world." + } + ); + let json_bytes = serde_json::to_vec(&mock_data)?; + let key = "test_data.txt"; + + // putting test data on key : "test_data.txt" + s3_client.put_data(Bytes::from(json_bytes), key).await.expect("Unable to put data into the bucket."); + + // getting the data from key : "test_data.txt" + let data = s3_client.get_data(key).await.expect("Unable to get the data from the bucket."); + let received_json: serde_json::Value = serde_json::from_slice(&data)?; + + assert_eq!(received_json, mock_data); + + Ok(()) +} diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index c8adec2c..29ce7847 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,7 +1,199 @@ +use crate::config::config; +use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; +use crate::tests::common::{build_config, drop_database}; +use color_eyre::eyre::eyre; use rstest::*; +use uuid::Uuid; #[rstest] #[tokio::test] -async fn test_database() { - // TODO: write test case +async fn test_database_connection() -> color_eyre::Result<()> { + let init_config_error = build_config().await.is_err(); + if init_config_error { + return Err(eyre!("Not able to init config.")); + } + + Ok(()) +} + +/// Tests for `create_job` operation in database trait. +/// Creates 3 jobs and asserts them. +#[rstest] +#[tokio::test] +async fn test_database_create_job() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::ProofCreation, JobStatus::Created, 1), + build_job_item(JobType::ProofCreation, JobStatus::Created, 2), + build_job_item(JobType::ProofCreation, JobStatus::Created, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + + let get_job_1 = + database_client.get_job_by_internal_id_and_type("1", &JobType::ProofCreation).await.unwrap().unwrap(); + let get_job_2 = + database_client.get_job_by_internal_id_and_type("2", &JobType::ProofCreation).await.unwrap().unwrap(); + let get_job_3 = + database_client.get_job_by_internal_id_and_type("3", &JobType::ProofCreation).await.unwrap().unwrap(); + + assert_eq!(get_job_1, job_vec[0].clone()); + assert_eq!(get_job_2, job_vec[1].clone()); + assert_eq!(get_job_3, job_vec[2].clone()); + + Ok(()) +} + +/// Test for `get_jobs_without_successor` operation in database trait. +/// Creates jobs in the following sequence : +/// +/// - Creates 3 snos run jobs with completed status +/// +/// - Creates 2 proof creation jobs with succession of the 2 snos jobs +/// +/// - Should return one snos job without the successor job of proof creation +#[rstest] +#[tokio::test] +async fn test_database_get_jobs_without_successor() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + build_job_item(JobType::ProofCreation, JobStatus::Created, 1), + build_job_item(JobType::ProofCreation, JobStatus::Created, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + database_client.create_job(job_vec[3].clone()).await.unwrap(); + database_client.create_job(job_vec[4].clone()).await.unwrap(); + + let jobs_without_successor = database_client + .get_jobs_without_successor(JobType::SnosRun, JobStatus::Completed, JobType::ProofCreation) + .await + .unwrap(); + + assert_eq!(jobs_without_successor.len(), 1, "Expected number of jobs assertion failed."); + assert_eq!(jobs_without_successor[0], job_vec[1], "Expected job assertion failed."); + + Ok(()) +} + +/// Test for `get_last_successful_job_by_type` operation in database trait. +/// Creates the jobs in following sequence : +/// +/// - Creates 3 successful jobs. +/// +/// - Should return the last successful job +#[rstest] +#[tokio::test] +async fn test_database_get_last_successful_job_by_type() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + + let last_successful_job = database_client.get_last_successful_job_by_type(JobType::SnosRun).await.unwrap(); + + assert_eq!(last_successful_job.unwrap(), job_vec[2], "Expected job assertion failed"); + + Ok(()) +} + +/// Test for `get_jobs_after_internal_id_by_job_type` operation in database trait. +/// Creates the jobs in following sequence : +/// +/// - Creates 5 successful jobs. +/// +/// - Should return the jobs after internal id +#[rstest] +#[tokio::test] +async fn test_database_get_jobs_after_internal_id_by_job_type() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + build_job_item(JobType::SnosRun, JobStatus::Completed, 4), + build_job_item(JobType::SnosRun, JobStatus::Completed, 5), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + database_client.create_job(job_vec[3].clone()).await.unwrap(); + database_client.create_job(job_vec[4].clone()).await.unwrap(); + + let jobs_after_internal_id = database_client + .get_jobs_after_internal_id_by_job_type(JobType::SnosRun, JobStatus::Completed, "2".to_string()) + .await + .unwrap(); + + assert_eq!(jobs_after_internal_id.len(), 3, "Number of jobs assertion failed"); + assert_eq!(jobs_after_internal_id[0], job_vec[2]); + assert_eq!(jobs_after_internal_id[1], job_vec[3]); + assert_eq!(jobs_after_internal_id[0], job_vec[4]); + + Ok(()) +} + +// Test Util Functions +// ========================================== + +fn build_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { + JobItem { + id: Uuid::new_v4(), + internal_id: internal_id.to_string(), + job_type, + status: job_status, + external_id: ExternalId::Number(0), + metadata: Default::default(), + version: 0, + } } diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index b4b53dd3..83dfc04c 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -7,4 +7,5 @@ pub mod server; pub mod queue; pub mod common; +mod data_storage; pub mod workers; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index fa9a4a1e..7799ffb2 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,6 +1,7 @@ use crate::config::config_force_init; use crate::database::MockDatabase; use crate::jobs::types::JobType; +use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::get_job_item_mock_by_id; @@ -26,8 +27,6 @@ async fn test_snos_worker(#[case] db_val: bool) -> Result<(), Box> { let start_job_index; let block; - const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; - // Mocking db function expectations if !db_val { db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index 534edf48..cdf2788b 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -69,7 +69,7 @@ impl EthereumSettlementClient { ProviderBuilder::new().with_recommended_fillers().wallet(wallet.clone()).on_http(settlement_cfg.rpc_url), ); let core_contract_client = StarknetValidityContractClient::new( - Address::from_slice(settlement_cfg.core_contract_address.as_bytes()).0.into(), + Address::from_str(&settlement_cfg.core_contract_address).unwrap().0.into(), provider.clone(), );