From 726f1ed13e8d55b352e062da80b556cb82ded8a4 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Tue, 23 Jul 2024 20:11:53 +0530 Subject: [PATCH 01/12] feat : added tests for increasing coverage --- crates/orchestrator/src/constants.rs | 1 + .../src/controllers/jobs_controller.rs | 4 +- .../src/data_storage/aws_s3/config.rs | 3 ++ .../src/data_storage/aws_s3/mod.rs | 9 ++-- crates/orchestrator/src/lib.rs | 4 +- .../orchestrator/src/tests/controllers/mod.rs | 54 +++++++++++++++++++ .../src/tests/data_storage/mod.rs | 34 ++++++++++++ crates/orchestrator/src/tests/mod.rs | 2 + .../src/tests/workers/snos/mod.rs | 3 +- 9 files changed, 105 insertions(+), 9 deletions(-) create mode 100644 crates/orchestrator/src/tests/controllers/mod.rs create mode 100644 crates/orchestrator/src/tests/data_storage/mod.rs diff --git a/crates/orchestrator/src/constants.rs b/crates/orchestrator/src/constants.rs index 9361d764..1fd645a9 100644 --- a/crates/orchestrator/src/constants.rs +++ b/crates/orchestrator/src/constants.rs @@ -1,2 +1,3 @@ pub const BLOB_DATA_FILE_NAME: &str = "blob_data.txt"; pub const SNOS_OUTPUT_FILE_NAME: &str = "snos_output.json"; +pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; diff --git a/crates/orchestrator/src/controllers/jobs_controller.rs b/crates/orchestrator/src/controllers/jobs_controller.rs index 43a67652..e5f0daf8 100644 --- a/crates/orchestrator/src/controllers/jobs_controller.rs +++ b/crates/orchestrator/src/controllers/jobs_controller.rs @@ -10,11 +10,11 @@ use crate::jobs::types::JobType; #[derive(Debug, Deserialize)] pub struct CreateJobRequest { /// Job type - job_type: JobType, + pub job_type: JobType, /// Internal id must be a way to identify the job. For example /// block_no, transaction_hash etc. The (job_type, internal_id) /// pair must be unique. - internal_id: String, + pub internal_id: String, } /// Create a job diff --git a/crates/orchestrator/src/data_storage/aws_s3/config.rs b/crates/orchestrator/src/data_storage/aws_s3/config.rs index 7c41f3c6..d4518d85 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/config.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/config.rs @@ -12,6 +12,8 @@ pub struct AWSS3Config { pub s3_bucket_name: String, /// S3 Bucket region pub s3_bucket_region: String, + /// Endpoint url + pub endpoint_url: String, } /// Implementation of `DataStorageConfig` for `AWSS3Config` @@ -23,6 +25,7 @@ impl DataStorageConfig for AWSS3Config { s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), + endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"), } } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 81751929..4e6e908a 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -1,5 +1,5 @@ use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::DataStorage; +use crate::data_storage::{DataStorage}; use async_trait::async_trait; use aws_sdk_s3::config::{Builder, Credentials, Region}; use aws_sdk_s3::primitives::ByteStream; @@ -22,7 +22,6 @@ pub struct AWSS3 { impl AWSS3 { /// Initializes a new AWS S3 client by passing the config /// and returning it. - #[allow(dead_code)] pub async fn new(config: AWSS3Config) -> Self { // AWS cred building let credentials = Credentials::new( @@ -33,7 +32,11 @@ impl AWSS3 { "loaded_from_custom_env", ); let region = Region::new(config.s3_bucket_region.clone().to_string()); - let conf_builder = Builder::new().region(region).credentials_provider(credentials); + let conf_builder = Builder::new() + .region(region) + .credentials_provider(credentials) + .endpoint_url(config.endpoint_url.clone()) + .force_path_style(true); let conf = conf_builder.build(); // Building AWS S3 config diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index 7e4367ef..3d02378b 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -1,6 +1,6 @@ /// Config of the service. Contains configurations for DB, Queues and other services. pub mod config; -mod constants; +pub mod constants; /// Controllers for the routes pub mod controllers; /// Contains the trait that implements the fetching functions @@ -17,6 +17,6 @@ pub mod queue; /// Contains the routes for the service pub mod routes; #[cfg(test)] -mod tests; +pub mod tests; /// Contains workers which act like cron jobs pub mod workers; diff --git a/crates/orchestrator/src/tests/controllers/mod.rs b/crates/orchestrator/src/tests/controllers/mod.rs new file mode 100644 index 00000000..2aae2349 --- /dev/null +++ b/crates/orchestrator/src/tests/controllers/mod.rs @@ -0,0 +1,54 @@ +use crate::config::config_force_init; +use crate::constants::JOB_PROCESSING_QUEUE; +use crate::controllers::jobs_controller::{create_job, CreateJobRequest}; +use crate::database::MockDatabase; +use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; +use crate::queue::MockQueueProvider; +use crate::tests::common::init_config; +use axum::Json; +use mockall::predicate::eq; +use rstest::rstest; +use uuid::Uuid; + +#[rstest] +#[tokio::test] +async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { + let mut db = MockDatabase::new(); + let mut queue = MockQueueProvider::new(); + + // mocking db get function (when creating job it should return no job existing) + db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); + // mocking db get function (when creating job to pre-check if job is not existing : worker module) + db.expect_get_job_by_internal_id_and_type() + .times(1) + .with(eq("1"), eq(JobType::SnosRun)) + .returning(|_, _| Ok(None)); + // mocking creation of the job + db.expect_create_job().times(1).withf(move |item| item.internal_id == "1".to_string()).returning(move |_| { + Ok(JobItem { + id: Uuid::new_v4(), + internal_id: "1".to_string(), + job_type: JobType::SnosRun, + status: JobStatus::Created, + external_id: ExternalId::Number(0), + metadata: Default::default(), + version: 0, + }) + }); + // mocking sending of the job into the queue after the creation + queue + .expect_send_message_to_queue() + .returning(|_, _, _| Ok(())) + .withf(|queue, _payload, _delay| queue == JOB_PROCESSING_QUEUE); + + let config = init_config(None, Some(db), Some(queue), None, None, None, None).await; + config_force_init(config).await; + + let create_job_request = CreateJobRequest { job_type: JobType::SnosRun, internal_id: "1".to_string() }; + + let create_job_call = create_job(Json::from(create_job_request)).await.unwrap(); + // comparing the output (safety check not really necessary) + assert_eq!(create_job_call.0, Json::from(()).0); + + Ok(()) +} diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs new file mode 100644 index 00000000..6322b3a0 --- /dev/null +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -0,0 +1,34 @@ +use bytes::Bytes; +use dotenvy::dotenv; +use rstest::rstest; +use serde_json::json; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::{DataStorage, DataStorageConfig}; + +#[rstest] +#[tokio::test] +async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { + dotenv().ok(); + let config = AWSS3Config::new_from_env(); + let s3_client = AWSS3::new(config).await; + + let mock_data = json!( + { + "body" : "hello world. hello world." + } + ); + let json_bytes = serde_json::to_vec(&mock_data)?; + let key = "test_data.txt"; + + // putting test data on key : "test_data.txt" + s3_client.put_data(Bytes::from(json_bytes), key).await.expect("Unable to put data into the bucket."); + + // getting the data from key : "test_data.txt" + let data = s3_client.get_data(key).await.expect("Unable to get the data from the bucket."); + let received_json: serde_json::Value = serde_json::from_slice(&data)?; + + assert_eq!(received_json, mock_data); + + Ok(()) +} \ No newline at end of file diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index b4b53dd3..142be3cf 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -7,4 +7,6 @@ pub mod server; pub mod queue; pub mod common; +mod controllers; pub mod workers; +mod data_storage; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index fa9a4a1e..c4e95902 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,4 +1,5 @@ use crate::config::config_force_init; +use crate::constants::JOB_PROCESSING_QUEUE; use crate::database::MockDatabase; use crate::jobs::types::JobType; use crate::queue::MockQueueProvider; @@ -26,8 +27,6 @@ async fn test_snos_worker(#[case] db_val: bool) -> Result<(), Box> { let start_job_index; let block; - const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; - // Mocking db function expectations if !db_val { db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); From b3a80b04003ab942a27ed3b0d85c3e8592413f80 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 19:46:28 +0530 Subject: [PATCH 02/12] feat : added mongo db tests and fixtures and updated the ci for tests --- .env.example | 3 + .github/workflows/coverage.yml | 38 ++++++++++ crates/orchestrator/src/config.rs | 10 +-- .../src/data_storage/aws_s3/mod.rs | 2 +- .../orchestrator/src/database/mongodb/mod.rs | 4 ++ crates/orchestrator/src/tests/common/mod.rs | 52 +++++++++++++- .../orchestrator/src/tests/controllers/mod.rs | 11 +-- .../src/tests/data_storage/mod.rs | 8 +-- crates/orchestrator/src/tests/database/mod.rs | 70 ++++++++++++++++++- crates/orchestrator/src/tests/mod.rs | 2 +- crates/settlement-clients/ethereum/src/lib.rs | 2 +- 11 files changed, 179 insertions(+), 23 deletions(-) diff --git a/.env.example b/.env.example index 57635205..37723425 100644 --- a/.env.example +++ b/.env.example @@ -28,3 +28,6 @@ AWS_SECRET_ACCESS_KEY= # S3 AWS_S3_BUCKET_NAME= AWS_S3_BUCKET_REGION= + +# Local Stack +AWS_ENDPOINT_URL= \ No newline at end of file diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 29f80c5a..d4f094eb 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -10,9 +10,29 @@ jobs: # sadly, for now we have to "rebuild" for the coverage runs-on: ubuntu-latest + services: + localstack: + image: localstack/localstack + env: + SERVICES: s3, sqs + DEFAULT_REGION: us-east-1 + AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" + ports: + - 4566:4566 + steps: - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + pip install awscli-local + # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key - run: rustup show @@ -25,7 +45,25 @@ jobs: cargo llvm-cov clean --workspace - name: Run llvm-cov + env: + AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" + AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" + AWS_S3_BUCKET_NAME: "madara-orchestrator-test-bucket" + AWS_S3_BUCKET_REGION: "us-east-1" + AWS_ENDPOINT_URL: "http://localhost.localstack.cloud:4566" + MADARA_RPC_URL: "http://localhost:3000" + ETHEREUM_RPC_URL: "http://localhost:3001" + MEMORY_PAGES_CONTRACT_ADDRESS: "0x000000000000000000000000000000000001dead" + PRIVATE_KEY: "0xdead" + ETHEREUM_PRIVATE_KEY: "0x000000000000000000000000000000000000000000000000000000000000beef" + STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS: "0x000000000000000000000000000000000002dead" + DA_LAYER: "ethereum" + PROVER_SERVICE: "sharp" + SETTLEMENT_LAYER: "ethereum" + DATA_STORAGE: "s3" + MONGODB_CONNECTION_STRING: "mongodb://localhost:27017" run: | + aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket madara-orchestrator-test-bucket cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index 41ce3824..55761526 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -148,7 +148,7 @@ pub async fn config_force_init(config: Config) { } /// Builds the DA client based on the environment variable DA_LAYER -async fn build_da_client() -> Box { +pub async fn build_da_client() -> Box { match get_env_var_or_panic("DA_LAYER").as_str() { "ethereum" => { let config = EthereumDaConfig::new_from_env(); @@ -159,7 +159,7 @@ async fn build_da_client() -> Box { } /// Builds the prover service based on the environment variable PROVER_SERVICE -fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box { +pub fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box { match get_env_var_or_panic("PROVER_SERVICE").as_str() { "sharp" => Box::new(SharpProverService::with_settings(settings_provider)), _ => panic!("Unsupported prover service"), @@ -167,7 +167,9 @@ fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box Box { +pub async fn build_settlement_client( + settings_provider: &impl SettingsProvider, +) -> Box { match get_env_var_or_panic("SETTLEMENT_LAYER").as_str() { "ethereum" => Box::new(EthereumSettlementClient::with_settings(settings_provider)), "starknet" => Box::new(StarknetSettlementClient::with_settings(settings_provider).await), @@ -175,7 +177,7 @@ async fn build_settlement_client(settings_provider: &impl SettingsProvider) -> B } } -async fn build_storage_client() -> Box { +pub async fn build_storage_client() -> Box { match get_env_var_or_panic("DATA_STORAGE").as_str() { "s3" => Box::new(AWSS3::new(AWSS3Config::new_from_env()).await), _ => panic!("Unsupported Storage Client"), diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index 4e6e908a..da673eba 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -1,5 +1,5 @@ use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::{DataStorage}; +use crate::data_storage::DataStorage; use async_trait::async_trait; use aws_sdk_s3::config::{Builder, Credentials, Region}; use aws_sdk_s3::primitives::ByteStream; diff --git a/crates/orchestrator/src/database/mongodb/mod.rs b/crates/orchestrator/src/database/mongodb/mod.rs index 5be0cf54..aea5a41a 100644 --- a/crates/orchestrator/src/database/mongodb/mod.rs +++ b/crates/orchestrator/src/database/mongodb/mod.rs @@ -40,6 +40,10 @@ impl MongoDb { MongoDb { client } } + pub fn client(&self) -> Client { + self.client.clone() + } + fn get_job_collection(&self) -> Collection { self.client.database("orchestrator").collection("jobs") } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index c8a36696..3f57a6f9 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -6,16 +6,22 @@ use std::sync::Arc; use ::uuid::Uuid; use constants::*; use da_client_interface::MockDaClient; +use dotenvy::dotenv; +use mongodb::Client; use prover_client_interface::MockProverClient; use rstest::*; use settlement_client_interface::MockSettlementClient; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::JsonRpcClient; use url::Url; +use utils::env_utils::get_env_var_or_panic; +use utils::settings::default::DefaultSettingsProvider; -use crate::config::Config; +use crate::config::{config_force_init, Config}; use crate::data_storage::MockDataStorage; -use crate::database::MockDatabase; +use crate::database::mongodb::config::MongoDbConfig; +use crate::database::mongodb::MongoDb; +use crate::database::{DatabaseConfig, MockDatabase}; use crate::jobs::types::JobStatus::Created; use crate::jobs::types::JobType::DataSubmission; use crate::jobs::types::{ExternalId, JobItem}; @@ -74,3 +80,45 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] job_item } + +/// For implementation of integration tests +#[fixture] +pub async fn build_config() -> color_eyre::Result<()> { + dotenv().ok(); + + // init starknet client + let provider = JsonRpcClient::new(HttpTransport::new( + Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"), + )); + + // init database + let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await); + + // init the queue + let queue = Box::new(crate::queue::sqs::SqsQueue {}); + + let da_client = crate::config::build_da_client().await; + let settings_provider = DefaultSettingsProvider {}; + let settlement_client = crate::config::build_settlement_client(&settings_provider).await; + let prover_client = crate::config::build_prover_service(&settings_provider); + let storage_client = crate::config::build_storage_client().await; + + let config = + Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client); + config_force_init(config).await; + + Ok(()) +} + +#[fixture] +pub async fn get_database_client() -> Client { + MongoDb::new(MongoDbConfig::new_from_env()).await.client() +} + +#[fixture] +pub async fn drop_database() -> color_eyre::Result<()> { + let db_client: Client = get_database_client().await; + // dropping `jobs` collection. + db_client.database("orchestrator").collection::("jobs").drop(None).await?; + Ok(()) +} diff --git a/crates/orchestrator/src/tests/controllers/mod.rs b/crates/orchestrator/src/tests/controllers/mod.rs index 2aae2349..b5849a86 100644 --- a/crates/orchestrator/src/tests/controllers/mod.rs +++ b/crates/orchestrator/src/tests/controllers/mod.rs @@ -19,12 +19,9 @@ async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { // mocking db get function (when creating job it should return no job existing) db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); // mocking db get function (when creating job to pre-check if job is not existing : worker module) - db.expect_get_job_by_internal_id_and_type() - .times(1) - .with(eq("1"), eq(JobType::SnosRun)) - .returning(|_, _| Ok(None)); + db.expect_get_job_by_internal_id_and_type().times(1).with(eq("1"), eq(JobType::SnosRun)).returning(|_, _| Ok(None)); // mocking creation of the job - db.expect_create_job().times(1).withf(move |item| item.internal_id == "1".to_string()).returning(move |_| { + db.expect_create_job().times(1).withf(move |item| item.internal_id == *"1").returning(move |_| { Ok(JobItem { id: Uuid::new_v4(), internal_id: "1".to_string(), @@ -46,9 +43,7 @@ async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { let create_job_request = CreateJobRequest { job_type: JobType::SnosRun, internal_id: "1".to_string() }; - let create_job_call = create_job(Json::from(create_job_request)).await.unwrap(); - // comparing the output (safety check not really necessary) - assert_eq!(create_job_call.0, Json::from(()).0); + let _ = create_job(Json::from(create_job_request)).await.unwrap(); Ok(()) } diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 6322b3a0..8436a808 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,10 +1,10 @@ +use crate::data_storage::aws_s3::config::AWSS3Config; +use crate::data_storage::aws_s3::AWSS3; +use crate::data_storage::{DataStorage, DataStorageConfig}; use bytes::Bytes; use dotenvy::dotenv; use rstest::rstest; use serde_json::json; -use crate::data_storage::aws_s3::AWSS3; -use crate::data_storage::aws_s3::config::AWSS3Config; -use crate::data_storage::{DataStorage, DataStorageConfig}; #[rstest] #[tokio::test] @@ -31,4 +31,4 @@ async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { assert_eq!(received_json, mock_data); Ok(()) -} \ No newline at end of file +} diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index c8adec2c..ae9db9df 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -1,7 +1,73 @@ +use crate::config::config; +use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; +use crate::tests::common::{build_config, drop_database}; +use color_eyre::eyre::eyre; +use dotenvy::dotenv; use rstest::*; +use uuid::Uuid; #[rstest] #[tokio::test] -async fn test_database() { - // TODO: write test case +async fn test_database_connection() -> color_eyre::Result<()> { + let init_config_error = build_config().await.is_err(); + if init_config_error { + return Err(eyre!("Not able to init config.")); + } + + Ok(()) +} + +/// Tests for `create_job` operation in database trait. +/// Creates 3 jobs and asserts them. +#[rstest] +#[tokio::test] +async fn test_database_create_job() -> color_eyre::Result<()> { + dotenv().ok(); + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + get_random_job_item(JobType::ProofCreation, JobStatus::Created, 1), + get_random_job_item(JobType::ProofCreation, JobStatus::Created, 2), + get_random_job_item(JobType::ProofCreation, JobStatus::Created, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + + let get_job_1 = + database_client.get_job_by_internal_id_and_type("1", &JobType::ProofCreation).await.unwrap().unwrap(); + let get_job_2 = + database_client.get_job_by_internal_id_and_type("2", &JobType::ProofCreation).await.unwrap().unwrap(); + let get_job_3 = + database_client.get_job_by_internal_id_and_type("3", &JobType::ProofCreation).await.unwrap().unwrap(); + + assert_eq!(get_job_1, job_vec[0].clone()); + assert_eq!(get_job_2, job_vec[1].clone()); + assert_eq!(get_job_3, job_vec[2].clone()); + + Ok(()) +} + +// Test Util Functions +// ========================================== + +fn get_random_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { + JobItem { + id: Uuid::new_v4(), + internal_id: internal_id.to_string(), + job_type, + status: job_status, + external_id: ExternalId::Number(0), + metadata: Default::default(), + version: 0, + } } diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index 142be3cf..348e5c70 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -8,5 +8,5 @@ pub mod queue; pub mod common; mod controllers; -pub mod workers; mod data_storage; +pub mod workers; diff --git a/crates/settlement-clients/ethereum/src/lib.rs b/crates/settlement-clients/ethereum/src/lib.rs index 534edf48..cdf2788b 100644 --- a/crates/settlement-clients/ethereum/src/lib.rs +++ b/crates/settlement-clients/ethereum/src/lib.rs @@ -69,7 +69,7 @@ impl EthereumSettlementClient { ProviderBuilder::new().with_recommended_fillers().wallet(wallet.clone()).on_http(settlement_cfg.rpc_url), ); let core_contract_client = StarknetValidityContractClient::new( - Address::from_slice(settlement_cfg.core_contract_address.as_bytes()).0.into(), + Address::from_str(&settlement_cfg.core_contract_address).unwrap().0.into(), provider.clone(), ); From 5826d44e685b2028420558d3a596c23b11c440cb Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:04:42 +0530 Subject: [PATCH 03/12] update : removed unwanted fixtures --- .github/workflows/coverage.yml | 3 ++- crates/orchestrator/src/tests/common/mod.rs | 8 +------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d4f094eb..ba9e6ed0 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -63,7 +63,8 @@ jobs: DATA_STORAGE: "s3" MONGODB_CONNECTION_STRING: "mongodb://localhost:27017" run: | - aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket madara-orchestrator-test-bucket + awslocal s3api create-bucket --bucket madara-orchestrator-test-bucket + awslocal s3 ls cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 3f57a6f9..4265a707 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -110,14 +110,8 @@ pub async fn build_config() -> color_eyre::Result<()> { Ok(()) } -#[fixture] -pub async fn get_database_client() -> Client { - MongoDb::new(MongoDbConfig::new_from_env()).await.client() -} - -#[fixture] pub async fn drop_database() -> color_eyre::Result<()> { - let db_client: Client = get_database_client().await; + let db_client: Client = MongoDb::new(MongoDbConfig::new_from_env()).await.client(); // dropping `jobs` collection. db_client.database("orchestrator").collection::("jobs").drop(None).await?; Ok(()) From 0e70fff85ab96d4f248967027006da49039194ba Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:05:39 +0530 Subject: [PATCH 04/12] update : removed unwanted fixtures --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ba9e6ed0..ef9d662b 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -27,7 +27,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.x' + python-version: "3.x" - name: Install dependencies run: | From 97f1f210242357a68e318fa9388cd3be4223ef2f Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:11:09 +0530 Subject: [PATCH 05/12] update : added mongo db runner in ci --- .github/workflows/coverage.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ef9d662b..2ce12339 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -20,6 +20,10 @@ jobs: AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" ports: - 4566:4566 + mongodb: + image: mongo:latest + ports: + - 27017:27017 steps: - uses: actions/checkout@v3 @@ -29,9 +33,20 @@ jobs: with: python-version: "3.x" + - name: Set up Node.js + uses: actions/setup-node@v2 + with: + node-version: "14" + - name: Install dependencies run: | pip install awscli-local + npm ci + + - name: Verify MongoDB connection + run: | + sudo apt-get install -y mongodb-clients + mongosh --eval "db.runCommand({ping:1})" # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key From a64613cefeb39bc99020917dc1b9a43131cfc3e0 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:15:06 +0530 Subject: [PATCH 06/12] update : added mongo db runner in ci --- .github/workflows/coverage.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 2ce12339..d0b842ad 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -41,7 +41,6 @@ jobs: - name: Install dependencies run: | pip install awscli-local - npm ci - name: Verify MongoDB connection run: | From db472d538a4cc7f3ff5bf3e5769b1f571f940ad4 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Wed, 24 Jul 2024 20:18:53 +0530 Subject: [PATCH 07/12] update : added mongo db runner in ci --- .github/workflows/coverage.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d0b842ad..c97218b0 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -33,20 +33,10 @@ jobs: with: python-version: "3.x" - - name: Set up Node.js - uses: actions/setup-node@v2 - with: - node-version: "14" - - name: Install dependencies run: | pip install awscli-local - - name: Verify MongoDB connection - run: | - sudo apt-get install -y mongodb-clients - mongosh --eval "db.runCommand({ping:1})" - # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key - run: rustup show From 15541ef49dcb53ef579457f7e6bd3498f1635d36 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 01:08:26 +0530 Subject: [PATCH 08/12] update : updated with new changes and ci --- .env.example | 3 -- .env.test.example | 33 +++++++++++++ .github/workflows/coverage.yml | 44 ++++++----------- .gitignore | 1 + CHANGELOG.md | 3 ++ crates/orchestrator/src/constants.rs | 1 - .../src/controllers/jobs_controller.rs | 24 --------- crates/orchestrator/src/controllers/mod.rs | 3 -- .../src/data_storage/aws_s3/config.rs | 2 + .../src/data_storage/aws_s3/mod.rs | 20 ++++++-- crates/orchestrator/src/data_storage/mod.rs | 2 + crates/orchestrator/src/queue/job_queue.rs | 4 +- crates/orchestrator/src/routes.rs | 14 +----- crates/orchestrator/src/tests/common/mod.rs | 11 +++-- .../orchestrator/src/tests/controllers/mod.rs | 49 ------------------- .../src/tests/data_storage/mod.rs | 4 +- crates/orchestrator/src/tests/database/mod.rs | 10 ++-- crates/orchestrator/src/tests/mod.rs | 1 - .../src/tests/workers/snos/mod.rs | 2 +- 19 files changed, 90 insertions(+), 141 deletions(-) create mode 100644 .env.test.example delete mode 100644 crates/orchestrator/src/controllers/jobs_controller.rs delete mode 100644 crates/orchestrator/src/tests/controllers/mod.rs diff --git a/.env.example b/.env.example index 37723425..57635205 100644 --- a/.env.example +++ b/.env.example @@ -28,6 +28,3 @@ AWS_SECRET_ACCESS_KEY= # S3 AWS_S3_BUCKET_NAME= AWS_S3_BUCKET_REGION= - -# Local Stack -AWS_ENDPOINT_URL= \ No newline at end of file diff --git a/.env.test.example b/.env.test.example new file mode 100644 index 00000000..e55b97fc --- /dev/null +++ b/.env.test.example @@ -0,0 +1,33 @@ +HOST= +PORT= +DATABASE_URL= +MADARA_RPC_URL= +DA_LAYER= +SETTLEMENT_LAYER= + +# Ethereum +ETHEREUM_PRIVATE_KEY= +ETHEREUM_RPC_URL= +MEMORY_PAGES_CONTRACT_ADDRESS= +STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS= + + +# Starknet +STARKNET_PUBLIC_KEY= +STARNET_PRIVATE_KEY= +STARKNET_RPC_URL= +STARKNET_CAIRO_CORE_CONTRACT_ADDRESS= + +# MongoDB connection string +MONGODB_CONNECTION_STRING= + +# SQS +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= + +# S3 +AWS_S3_BUCKET_NAME= +AWS_S3_BUCKET_REGION= + +# AWS Local Stack URL +AWS_ENDPOINT_URL= diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index c97218b0..d5d10b8d 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -28,15 +28,6 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: "3.x" - - - name: Install dependencies - run: | - pip install awscli-local - # selecting a toolchain either by action or manual `rustup` calls should happen # before the plugin, as the cache uses the current rustc version as its cache key - run: rustup show @@ -49,26 +40,23 @@ jobs: cargo llvm-cov clean --workspace - name: Run llvm-cov - env: - AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID" - AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY" - AWS_S3_BUCKET_NAME: "madara-orchestrator-test-bucket" - AWS_S3_BUCKET_REGION: "us-east-1" - AWS_ENDPOINT_URL: "http://localhost.localstack.cloud:4566" - MADARA_RPC_URL: "http://localhost:3000" - ETHEREUM_RPC_URL: "http://localhost:3001" - MEMORY_PAGES_CONTRACT_ADDRESS: "0x000000000000000000000000000000000001dead" - PRIVATE_KEY: "0xdead" - ETHEREUM_PRIVATE_KEY: "0x000000000000000000000000000000000000000000000000000000000000beef" - STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS: "0x000000000000000000000000000000000002dead" - DA_LAYER: "ethereum" - PROVER_SERVICE: "sharp" - SETTLEMENT_LAYER: "ethereum" - DATA_STORAGE: "s3" - MONGODB_CONNECTION_STRING: "mongodb://localhost:27017" run: | - awslocal s3api create-bucket --bucket madara-orchestrator-test-bucket - awslocal s3 ls + echo 'AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID"' >> .env.test + echo 'AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY"' >> .env.test + echo 'AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket"' >> .env.test + echo 'AWS_S3_BUCKET_REGION="us-east-1"' >> .env.test + echo 'AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566"' >> .env.test + echo 'MADARA_RPC_URL="http://localhost:3000"' >> .env.test + echo 'ETHEREUM_RPC_URL="http://localhost:3001"' >> .env.test + echo 'MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead"' >> .env.test + echo 'PRIVATE_KEY="0xdead"' >> .env.test + echo 'ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef"' >> .env.test + echo 'STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead"' >> .env.test + echo 'DA_LAYER="ethereum"' >> .env.test + echo 'PROVER_SERVICE="sharp"' >> .env.test + echo 'SETTLEMENT_LAYER="ethereum"' >> .env.test + echo 'DATA_STORAGE="s3"' >> .env.test + echo 'MONGODB_CONNECTION_STRING="mongodb://localhost:27017"' >> .env.test cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/.gitignore b/.gitignore index cc424bf3..76cd1131 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .env .idea .DS_Store +.env.test *.code-workspace .vscode diff --git a/CHANGELOG.md b/CHANGELOG.md index 40fd1c8d..70c41f6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Function to calculate the kzg proof of x_0. - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. +- Fixtures for testing. ## Changed +- GitHub's coverage CI yml file for localstack and db testing. + ## Removed - `fetch_from_test` argument diff --git a/crates/orchestrator/src/constants.rs b/crates/orchestrator/src/constants.rs index 1fd645a9..9361d764 100644 --- a/crates/orchestrator/src/constants.rs +++ b/crates/orchestrator/src/constants.rs @@ -1,3 +1,2 @@ pub const BLOB_DATA_FILE_NAME: &str = "blob_data.txt"; pub const SNOS_OUTPUT_FILE_NAME: &str = "snos_output.json"; -pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; diff --git a/crates/orchestrator/src/controllers/jobs_controller.rs b/crates/orchestrator/src/controllers/jobs_controller.rs deleted file mode 100644 index e5f0daf8..00000000 --- a/crates/orchestrator/src/controllers/jobs_controller.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::collections::HashMap; - -use axum::extract::Json; -use serde::Deserialize; - -use crate::controllers::errors::AppError; -use crate::jobs::types::JobType; - -/// Client request to create a job -#[derive(Debug, Deserialize)] -pub struct CreateJobRequest { - /// Job type - pub job_type: JobType, - /// Internal id must be a way to identify the job. For example - /// block_no, transaction_hash etc. The (job_type, internal_id) - /// pair must be unique. - pub internal_id: String, -} - -/// Create a job -pub async fn create_job(Json(payload): Json) -> Result, AppError> { - crate::jobs::create_job(payload.job_type, payload.internal_id, HashMap::new()).await?; - Ok(Json::from(())) -} diff --git a/crates/orchestrator/src/controllers/mod.rs b/crates/orchestrator/src/controllers/mod.rs index aadb38b7..8575ccdc 100644 --- a/crates/orchestrator/src/controllers/mod.rs +++ b/crates/orchestrator/src/controllers/mod.rs @@ -1,5 +1,2 @@ /// Errors mod errors; - -/// Job controllers -pub mod jobs_controller; diff --git a/crates/orchestrator/src/data_storage/aws_s3/config.rs b/crates/orchestrator/src/data_storage/aws_s3/config.rs index d4518d85..06eeaff8 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/config.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/config.rs @@ -13,6 +13,7 @@ pub struct AWSS3Config { /// S3 Bucket region pub s3_bucket_region: String, /// Endpoint url + #[cfg(test)] pub endpoint_url: String, } @@ -25,6 +26,7 @@ impl DataStorageConfig for AWSS3Config { s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"), s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"), s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"), + #[cfg(test)] endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"), } } diff --git a/crates/orchestrator/src/data_storage/aws_s3/mod.rs b/crates/orchestrator/src/data_storage/aws_s3/mod.rs index da673eba..50ae6e3e 100644 --- a/crates/orchestrator/src/data_storage/aws_s3/mod.rs +++ b/crates/orchestrator/src/data_storage/aws_s3/mod.rs @@ -32,11 +32,15 @@ impl AWSS3 { "loaded_from_custom_env", ); let region = Region::new(config.s3_bucket_region.clone().to_string()); - let conf_builder = Builder::new() - .region(region) - .credentials_provider(credentials) - .endpoint_url(config.endpoint_url.clone()) - .force_path_style(true); + + #[allow(unused_mut)] + let mut conf_builder = Builder::new().region(region).credentials_provider(credentials).force_path_style(true); + + #[cfg(test)] + { + conf_builder = conf_builder.endpoint_url(config.endpoint_url.clone().to_string()); + } + let conf = conf_builder.build(); // Building AWS S3 config @@ -72,4 +76,10 @@ impl DataStorage for AWSS3 { Ok(()) } + + #[cfg(test)] + async fn build_test_bucket(&self, bucket_name: &str) -> Result<()> { + self.client.create_bucket().bucket(bucket_name).send().await?; + Ok(()) + } } diff --git a/crates/orchestrator/src/data_storage/mod.rs b/crates/orchestrator/src/data_storage/mod.rs index f259bf61..b3ff74fd 100644 --- a/crates/orchestrator/src/data_storage/mod.rs +++ b/crates/orchestrator/src/data_storage/mod.rs @@ -17,6 +17,8 @@ use mockall::automock; pub trait DataStorage: Send + Sync { async fn get_data(&self, key: &str) -> Result; async fn put_data(&self, data: Bytes, key: &str) -> Result<()>; + #[cfg(test)] + async fn build_test_bucket(&self, bucket_name: &str) -> Result<()>; } /// **DataStorageConfig** : Trait method to represent the config struct needed for diff --git a/crates/orchestrator/src/queue/job_queue.rs b/crates/orchestrator/src/queue/job_queue.rs index 9432276f..aebba4bf 100644 --- a/crates/orchestrator/src/queue/job_queue.rs +++ b/crates/orchestrator/src/queue/job_queue.rs @@ -12,8 +12,8 @@ use uuid::Uuid; use crate::config::config; use crate::jobs::{process_job, verify_job}; -const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; -const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue"; +pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue"; +pub const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue"; #[derive(Debug, Serialize, Deserialize)] pub struct JobQueueMessage { diff --git a/crates/orchestrator/src/routes.rs b/crates/orchestrator/src/routes.rs index 39d8f3d4..877e5b88 100644 --- a/crates/orchestrator/src/routes.rs +++ b/crates/orchestrator/src/routes.rs @@ -1,16 +1,10 @@ use axum::http::StatusCode; use axum::response::IntoResponse; -use axum::routing::{get, post}; +use axum::routing::get; use axum::Router; -use crate::controllers::jobs_controller; - pub fn app_router() -> Router { - Router::new() - .route("/health", get(root)) - .nest("/v1/dev", dev_routes()) - .nest("/v1/job", job_routes()) - .fallback(handler_404) + Router::new().route("/health", get(root)).nest("/v1/dev", dev_routes()).fallback(handler_404) } async fn root() -> &'static str { @@ -21,10 +15,6 @@ async fn handler_404() -> impl IntoResponse { (StatusCode::NOT_FOUND, "The requested resource was not found") } -fn job_routes() -> Router { - Router::new().route("/create_job", post(jobs_controller::create_job)) -} - fn dev_routes() -> Router { Router::new() } diff --git a/crates/orchestrator/src/tests/common/mod.rs b/crates/orchestrator/src/tests/common/mod.rs index 4265a707..0a97fa33 100644 --- a/crates/orchestrator/src/tests/common/mod.rs +++ b/crates/orchestrator/src/tests/common/mod.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use ::uuid::Uuid; use constants::*; use da_client_interface::MockDaClient; -use dotenvy::dotenv; use mongodb::Client; use prover_client_interface::MockProverClient; use rstest::*; @@ -17,7 +16,7 @@ use url::Url; use utils::env_utils::get_env_var_or_panic; use utils::settings::default::DefaultSettingsProvider; -use crate::config::{config_force_init, Config}; +use crate::config::{build_storage_client, config_force_init, Config}; use crate::data_storage::MockDataStorage; use crate::database::mongodb::config::MongoDbConfig; use crate::database::mongodb::MongoDb; @@ -84,7 +83,8 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))] /// For implementation of integration tests #[fixture] pub async fn build_config() -> color_eyre::Result<()> { - dotenv().ok(); + // Getting .env.test variables + dotenvy::from_filename("../.env.test")?; // init starknet client let provider = JsonRpcClient::new(HttpTransport::new( @@ -101,7 +101,10 @@ pub async fn build_config() -> color_eyre::Result<()> { let settings_provider = DefaultSettingsProvider {}; let settlement_client = crate::config::build_settlement_client(&settings_provider).await; let prover_client = crate::config::build_prover_service(&settings_provider); - let storage_client = crate::config::build_storage_client().await; + let storage_client = build_storage_client().await; + + // building a test bucket : + storage_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await?; let config = Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client); diff --git a/crates/orchestrator/src/tests/controllers/mod.rs b/crates/orchestrator/src/tests/controllers/mod.rs deleted file mode 100644 index b5849a86..00000000 --- a/crates/orchestrator/src/tests/controllers/mod.rs +++ /dev/null @@ -1,49 +0,0 @@ -use crate::config::config_force_init; -use crate::constants::JOB_PROCESSING_QUEUE; -use crate::controllers::jobs_controller::{create_job, CreateJobRequest}; -use crate::database::MockDatabase; -use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; -use crate::queue::MockQueueProvider; -use crate::tests::common::init_config; -use axum::Json; -use mockall::predicate::eq; -use rstest::rstest; -use uuid::Uuid; - -#[rstest] -#[tokio::test] -async fn test_create_job_jobs_controller() -> color_eyre::Result<()> { - let mut db = MockDatabase::new(); - let mut queue = MockQueueProvider::new(); - - // mocking db get function (when creating job it should return no job existing) - db.expect_get_last_successful_job_by_type().times(1).with(eq(JobType::SnosRun)).returning(|_| Ok(None)); - // mocking db get function (when creating job to pre-check if job is not existing : worker module) - db.expect_get_job_by_internal_id_and_type().times(1).with(eq("1"), eq(JobType::SnosRun)).returning(|_, _| Ok(None)); - // mocking creation of the job - db.expect_create_job().times(1).withf(move |item| item.internal_id == *"1").returning(move |_| { - Ok(JobItem { - id: Uuid::new_v4(), - internal_id: "1".to_string(), - job_type: JobType::SnosRun, - status: JobStatus::Created, - external_id: ExternalId::Number(0), - metadata: Default::default(), - version: 0, - }) - }); - // mocking sending of the job into the queue after the creation - queue - .expect_send_message_to_queue() - .returning(|_, _, _| Ok(())) - .withf(|queue, _payload, _delay| queue == JOB_PROCESSING_QUEUE); - - let config = init_config(None, Some(db), Some(queue), None, None, None, None).await; - config_force_init(config).await; - - let create_job_request = CreateJobRequest { job_type: JobType::SnosRun, internal_id: "1".to_string() }; - - let _ = create_job(Json::from(create_job_request)).await.unwrap(); - - Ok(()) -} diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index 8436a808..f007a457 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -2,14 +2,14 @@ use crate::data_storage::aws_s3::config::AWSS3Config; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; use bytes::Bytes; -use dotenvy::dotenv; use rstest::rstest; use serde_json::json; #[rstest] #[tokio::test] async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { - dotenv().ok(); + dotenvy::from_filename("../.env.test")?; + let config = AWSS3Config::new_from_env(); let s3_client = AWSS3::new(config).await; diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index ae9db9df..5e8a651c 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -2,7 +2,6 @@ use crate::config::config; use crate::jobs::types::{ExternalId, JobItem, JobStatus, JobType}; use crate::tests::common::{build_config, drop_database}; use color_eyre::eyre::eyre; -use dotenvy::dotenv; use rstest::*; use uuid::Uuid; @@ -22,7 +21,6 @@ async fn test_database_connection() -> color_eyre::Result<()> { #[rstest] #[tokio::test] async fn test_database_create_job() -> color_eyre::Result<()> { - dotenv().ok(); let init_config = build_config().await.is_ok(); if !init_config { return Err(eyre!("Not able to init config.")); @@ -34,9 +32,9 @@ async fn test_database_create_job() -> color_eyre::Result<()> { let database_client = config.database(); let job_vec = [ - get_random_job_item(JobType::ProofCreation, JobStatus::Created, 1), - get_random_job_item(JobType::ProofCreation, JobStatus::Created, 2), - get_random_job_item(JobType::ProofCreation, JobStatus::Created, 3), + build_job_item(JobType::ProofCreation, JobStatus::Created, 1), + build_job_item(JobType::ProofCreation, JobStatus::Created, 2), + build_job_item(JobType::ProofCreation, JobStatus::Created, 3), ]; database_client.create_job(job_vec[0].clone()).await.unwrap(); @@ -60,7 +58,7 @@ async fn test_database_create_job() -> color_eyre::Result<()> { // Test Util Functions // ========================================== -fn get_random_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { +fn build_job_item(job_type: JobType, job_status: JobStatus, internal_id: u64) -> JobItem { JobItem { id: Uuid::new_v4(), internal_id: internal_id.to_string(), diff --git a/crates/orchestrator/src/tests/mod.rs b/crates/orchestrator/src/tests/mod.rs index 348e5c70..83dfc04c 100644 --- a/crates/orchestrator/src/tests/mod.rs +++ b/crates/orchestrator/src/tests/mod.rs @@ -7,6 +7,5 @@ pub mod server; pub mod queue; pub mod common; -mod controllers; mod data_storage; pub mod workers; diff --git a/crates/orchestrator/src/tests/workers/snos/mod.rs b/crates/orchestrator/src/tests/workers/snos/mod.rs index c4e95902..7799ffb2 100644 --- a/crates/orchestrator/src/tests/workers/snos/mod.rs +++ b/crates/orchestrator/src/tests/workers/snos/mod.rs @@ -1,7 +1,7 @@ use crate::config::config_force_init; -use crate::constants::JOB_PROCESSING_QUEUE; use crate::database::MockDatabase; use crate::jobs::types::JobType; +use crate::queue::job_queue::JOB_PROCESSING_QUEUE; use crate::queue::MockQueueProvider; use crate::tests::common::init_config; use crate::tests::workers::utils::get_job_item_mock_by_id; From fc7d4938bc477707fd38d47d083035da43efd157 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 14:53:42 +0530 Subject: [PATCH 09/12] update : updated test cases for s3 client --- crates/orchestrator/src/tests/data_storage/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/orchestrator/src/tests/data_storage/mod.rs b/crates/orchestrator/src/tests/data_storage/mod.rs index f007a457..ef1dd0ec 100644 --- a/crates/orchestrator/src/tests/data_storage/mod.rs +++ b/crates/orchestrator/src/tests/data_storage/mod.rs @@ -1,6 +1,7 @@ use crate::data_storage::aws_s3::config::AWSS3Config; use crate::data_storage::aws_s3::AWSS3; use crate::data_storage::{DataStorage, DataStorageConfig}; +use crate::tests::common::build_config; use bytes::Bytes; use rstest::rstest; use serde_json::json; @@ -8,6 +9,7 @@ use serde_json::json; #[rstest] #[tokio::test] async fn test_put_and_get_data_s3() -> color_eyre::Result<()> { + build_config().await?; dotenvy::from_filename("../.env.test")?; let config = AWSS3Config::new_from_env(); From 9a101ba76614366d76f05baeb71b4b4f749fc193 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 14:56:44 +0530 Subject: [PATCH 10/12] update : added .env.test file in the commit --- .env.test | 24 ++++++++++++++++++++++++ .env.test.example | 33 --------------------------------- .github/workflows/coverage.yml | 16 ---------------- .gitignore | 1 - 4 files changed, 24 insertions(+), 50 deletions(-) create mode 100644 .env.test delete mode 100644 .env.test.example diff --git a/.env.test b/.env.test new file mode 100644 index 00000000..75cfea32 --- /dev/null +++ b/.env.test @@ -0,0 +1,24 @@ +##### AWS config ##### + +AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID" +AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY" +AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket" +AWS_S3_BUCKET_REGION="us-east-1" +AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566" + +##### On chain config ##### + +MADARA_RPC_URL="http://localhost:3000" +ETHEREUM_RPC_URL="http://localhost:3001" +MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead" +PRIVATE_KEY="0xdead" +ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef" +STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead" + +##### Config URLs ##### + +DA_LAYER="ethereum" +PROVER_SERVICE="sharp" +SETTLEMENT_LAYER="ethereum" +DATA_STORAGE="s3" +MONGODB_CONNECTION_STRING="mongodb://localhost:27017" diff --git a/.env.test.example b/.env.test.example deleted file mode 100644 index e55b97fc..00000000 --- a/.env.test.example +++ /dev/null @@ -1,33 +0,0 @@ -HOST= -PORT= -DATABASE_URL= -MADARA_RPC_URL= -DA_LAYER= -SETTLEMENT_LAYER= - -# Ethereum -ETHEREUM_PRIVATE_KEY= -ETHEREUM_RPC_URL= -MEMORY_PAGES_CONTRACT_ADDRESS= -STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS= - - -# Starknet -STARKNET_PUBLIC_KEY= -STARNET_PRIVATE_KEY= -STARKNET_RPC_URL= -STARKNET_CAIRO_CORE_CONTRACT_ADDRESS= - -# MongoDB connection string -MONGODB_CONNECTION_STRING= - -# SQS -AWS_ACCESS_KEY_ID= -AWS_SECRET_ACCESS_KEY= - -# S3 -AWS_S3_BUCKET_NAME= -AWS_S3_BUCKET_REGION= - -# AWS Local Stack URL -AWS_ENDPOINT_URL= diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d5d10b8d..a55082e6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -41,22 +41,6 @@ jobs: - name: Run llvm-cov run: | - echo 'AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID"' >> .env.test - echo 'AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY"' >> .env.test - echo 'AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket"' >> .env.test - echo 'AWS_S3_BUCKET_REGION="us-east-1"' >> .env.test - echo 'AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566"' >> .env.test - echo 'MADARA_RPC_URL="http://localhost:3000"' >> .env.test - echo 'ETHEREUM_RPC_URL="http://localhost:3001"' >> .env.test - echo 'MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead"' >> .env.test - echo 'PRIVATE_KEY="0xdead"' >> .env.test - echo 'ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef"' >> .env.test - echo 'STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead"' >> .env.test - echo 'DA_LAYER="ethereum"' >> .env.test - echo 'PROVER_SERVICE="sharp"' >> .env.test - echo 'SETTLEMENT_LAYER="ethereum"' >> .env.test - echo 'DATA_STORAGE="s3"' >> .env.test - echo 'MONGODB_CONNECTION_STRING="mongodb://localhost:27017"' >> .env.test cargo llvm-cov nextest --release --lcov --output-path lcov.info --test-threads=1 - name: Upload coverage to codecov.io diff --git a/.gitignore b/.gitignore index 76cd1131..cc424bf3 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,6 @@ .env .idea .DS_Store -.env.test *.code-workspace .vscode From 906a1eb2efa7706d593d0d065ed42132cf406465 Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 15:57:21 +0530 Subject: [PATCH 11/12] feat : added database necessary tests --- crates/orchestrator/src/tests/database/mod.rs | 128 ++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/crates/orchestrator/src/tests/database/mod.rs b/crates/orchestrator/src/tests/database/mod.rs index 5e8a651c..29ce7847 100644 --- a/crates/orchestrator/src/tests/database/mod.rs +++ b/crates/orchestrator/src/tests/database/mod.rs @@ -55,6 +55,134 @@ async fn test_database_create_job() -> color_eyre::Result<()> { Ok(()) } +/// Test for `get_jobs_without_successor` operation in database trait. +/// Creates jobs in the following sequence : +/// +/// - Creates 3 snos run jobs with completed status +/// +/// - Creates 2 proof creation jobs with succession of the 2 snos jobs +/// +/// - Should return one snos job without the successor job of proof creation +#[rstest] +#[tokio::test] +async fn test_database_get_jobs_without_successor() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + build_job_item(JobType::ProofCreation, JobStatus::Created, 1), + build_job_item(JobType::ProofCreation, JobStatus::Created, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + database_client.create_job(job_vec[3].clone()).await.unwrap(); + database_client.create_job(job_vec[4].clone()).await.unwrap(); + + let jobs_without_successor = database_client + .get_jobs_without_successor(JobType::SnosRun, JobStatus::Completed, JobType::ProofCreation) + .await + .unwrap(); + + assert_eq!(jobs_without_successor.len(), 1, "Expected number of jobs assertion failed."); + assert_eq!(jobs_without_successor[0], job_vec[1], "Expected job assertion failed."); + + Ok(()) +} + +/// Test for `get_last_successful_job_by_type` operation in database trait. +/// Creates the jobs in following sequence : +/// +/// - Creates 3 successful jobs. +/// +/// - Should return the last successful job +#[rstest] +#[tokio::test] +async fn test_database_get_last_successful_job_by_type() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + + let last_successful_job = database_client.get_last_successful_job_by_type(JobType::SnosRun).await.unwrap(); + + assert_eq!(last_successful_job.unwrap(), job_vec[2], "Expected job assertion failed"); + + Ok(()) +} + +/// Test for `get_jobs_after_internal_id_by_job_type` operation in database trait. +/// Creates the jobs in following sequence : +/// +/// - Creates 5 successful jobs. +/// +/// - Should return the jobs after internal id +#[rstest] +#[tokio::test] +async fn test_database_get_jobs_after_internal_id_by_job_type() -> color_eyre::Result<()> { + let init_config = build_config().await.is_ok(); + if !init_config { + return Err(eyre!("Not able to init config.")); + } + + drop_database().await.unwrap(); + + let config = config().await; + let database_client = config.database(); + + let job_vec = [ + build_job_item(JobType::SnosRun, JobStatus::Completed, 1), + build_job_item(JobType::SnosRun, JobStatus::Completed, 2), + build_job_item(JobType::SnosRun, JobStatus::Completed, 3), + build_job_item(JobType::SnosRun, JobStatus::Completed, 4), + build_job_item(JobType::SnosRun, JobStatus::Completed, 5), + ]; + + database_client.create_job(job_vec[0].clone()).await.unwrap(); + database_client.create_job(job_vec[1].clone()).await.unwrap(); + database_client.create_job(job_vec[2].clone()).await.unwrap(); + database_client.create_job(job_vec[3].clone()).await.unwrap(); + database_client.create_job(job_vec[4].clone()).await.unwrap(); + + let jobs_after_internal_id = database_client + .get_jobs_after_internal_id_by_job_type(JobType::SnosRun, JobStatus::Completed, "2".to_string()) + .await + .unwrap(); + + assert_eq!(jobs_after_internal_id.len(), 3, "Number of jobs assertion failed"); + assert_eq!(jobs_after_internal_id[0], job_vec[2]); + assert_eq!(jobs_after_internal_id[1], job_vec[3]); + assert_eq!(jobs_after_internal_id[0], job_vec[4]); + + Ok(()) +} + // Test Util Functions // ========================================== From 65d66e6a621f05d2151a131f19bb0fc9725aaa0b Mon Sep 17 00:00:00 2001 From: Arun Jangra Date: Fri, 26 Jul 2024 15:58:14 +0530 Subject: [PATCH 12/12] feat : added database necessary tests --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70c41f6d..9a22504e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). - Tests for updating the state. - Function to update the state and publish blob on ethereum in state update job. - Fixtures for testing. +- Tests for database client. ## Changed