Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat : database operation tests #58

Closed
wants to merge 12 commits into from
24 changes: 24 additions & 0 deletions .env.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
##### AWS config #####

AWS_ACCESS_KEY_ID="AWS_ACCESS_KEY_ID"
AWS_SECRET_ACCESS_KEY="AWS_SECRET_ACCESS_KEY"
AWS_S3_BUCKET_NAME="madara-orchestrator-test-bucket"
AWS_S3_BUCKET_REGION="us-east-1"
AWS_ENDPOINT_URL="http://localhost.localstack.cloud:4566"

##### On chain config #####

MADARA_RPC_URL="http://localhost:3000"
ETHEREUM_RPC_URL="http://localhost:3001"
MEMORY_PAGES_CONTRACT_ADDRESS="0x000000000000000000000000000000000001dead"
PRIVATE_KEY="0xdead"
ETHEREUM_PRIVATE_KEY="0x000000000000000000000000000000000000000000000000000000000000beef"
STARKNET_SOLIDITY_CORE_CONTRACT_ADDRESS="0x000000000000000000000000000000000002dead"

##### Config URLs #####

DA_LAYER="ethereum"
PROVER_SERVICE="sharp"
SETTLEMENT_LAYER="ethereum"
DATA_STORAGE="s3"
MONGODB_CONNECTION_STRING="mongodb://localhost:27017"
15 changes: 15 additions & 0 deletions .github/workflows/coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,21 @@ jobs:
# sadly, for now we have to "rebuild" for the coverage
runs-on: ubuntu-latest

services:
localstack:
image: localstack/localstack
env:
SERVICES: s3, sqs
DEFAULT_REGION: us-east-1
AWS_ACCESS_KEY_ID: "AWS_ACCESS_KEY_ID"
AWS_SECRET_ACCESS_KEY: "AWS_SECRET_ACCESS_KEY"
ports:
- 4566:4566
mongodb:
image: mongo:latest
ports:
- 27017:27017

steps:
- uses: actions/checkout@v3

Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
- Function to calculate the kzg proof of x_0.
- Tests for updating the state.
- Function to update the state and publish blob on ethereum in state update job.
- Fixtures for testing.
- Tests for database client.

## Changed

- GitHub's coverage CI yml file for localstack and db testing.

## Removed

- `fetch_from_test` argument
Expand Down
10 changes: 6 additions & 4 deletions crates/orchestrator/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ pub async fn config_force_init(config: Config) {
}

/// Builds the DA client based on the environment variable DA_LAYER
async fn build_da_client() -> Box<dyn DaClient + Send + Sync> {
pub async fn build_da_client() -> Box<dyn DaClient + Send + Sync> {
match get_env_var_or_panic("DA_LAYER").as_str() {
"ethereum" => {
let config = EthereumDaConfig::new_from_env();
Expand All @@ -159,23 +159,25 @@ async fn build_da_client() -> Box<dyn DaClient + Send + Sync> {
}

/// Builds the prover service based on the environment variable PROVER_SERVICE
fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box<dyn ProverClient> {
pub fn build_prover_service(settings_provider: &impl SettingsProvider) -> Box<dyn ProverClient> {
match get_env_var_or_panic("PROVER_SERVICE").as_str() {
"sharp" => Box::new(SharpProverService::with_settings(settings_provider)),
_ => panic!("Unsupported prover service"),
}
}

/// Builds the settlement client depending on the env variable SETTLEMENT_LAYER
async fn build_settlement_client(settings_provider: &impl SettingsProvider) -> Box<dyn SettlementClient + Send + Sync> {
pub async fn build_settlement_client(
settings_provider: &impl SettingsProvider,
) -> Box<dyn SettlementClient + Send + Sync> {
match get_env_var_or_panic("SETTLEMENT_LAYER").as_str() {
"ethereum" => Box::new(EthereumSettlementClient::with_settings(settings_provider)),
"starknet" => Box::new(StarknetSettlementClient::with_settings(settings_provider).await),
_ => panic!("Unsupported Settlement layer"),
}
}

async fn build_storage_client() -> Box<dyn DataStorage + Send + Sync> {
pub async fn build_storage_client() -> Box<dyn DataStorage + Send + Sync> {
match get_env_var_or_panic("DATA_STORAGE").as_str() {
"s3" => Box::new(AWSS3::new(AWSS3Config::new_from_env()).await),
_ => panic!("Unsupported Storage Client"),
Expand Down
24 changes: 0 additions & 24 deletions crates/orchestrator/src/controllers/jobs_controller.rs

This file was deleted.

3 changes: 0 additions & 3 deletions crates/orchestrator/src/controllers/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,2 @@
/// Errors
mod errors;

/// Job controllers
pub mod jobs_controller;
5 changes: 5 additions & 0 deletions crates/orchestrator/src/data_storage/aws_s3/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ pub struct AWSS3Config {
pub s3_bucket_name: String,
/// S3 Bucket region
pub s3_bucket_region: String,
/// Endpoint url
#[cfg(test)]
pub endpoint_url: String,
}

/// Implementation of `DataStorageConfig` for `AWSS3Config`
Expand All @@ -23,6 +26,8 @@ impl DataStorageConfig for AWSS3Config {
s3_key_secret: get_env_var_or_panic("AWS_SECRET_ACCESS_KEY"),
s3_bucket_name: get_env_var_or_panic("AWS_S3_BUCKET_NAME"),
s3_bucket_region: get_env_var_or_panic("AWS_S3_BUCKET_REGION"),
#[cfg(test)]
endpoint_url: get_env_var_or_panic("AWS_ENDPOINT_URL"),
}
}
}
17 changes: 15 additions & 2 deletions crates/orchestrator/src/data_storage/aws_s3/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ pub struct AWSS3 {
impl AWSS3 {
/// Initializes a new AWS S3 client by passing the config
/// and returning it.
#[allow(dead_code)]
pub async fn new(config: AWSS3Config) -> Self {
// AWS cred building
let credentials = Credentials::new(
Expand All @@ -33,7 +32,15 @@ impl AWSS3 {
"loaded_from_custom_env",
);
let region = Region::new(config.s3_bucket_region.clone().to_string());
let conf_builder = Builder::new().region(region).credentials_provider(credentials);

#[allow(unused_mut)]
let mut conf_builder = Builder::new().region(region).credentials_provider(credentials).force_path_style(true);

#[cfg(test)]
{
conf_builder = conf_builder.endpoint_url(config.endpoint_url.clone().to_string());
}

let conf = conf_builder.build();

// Building AWS S3 config
Expand Down Expand Up @@ -69,4 +76,10 @@ impl DataStorage for AWSS3 {

Ok(())
}

#[cfg(test)]
async fn build_test_bucket(&self, bucket_name: &str) -> Result<()> {
self.client.create_bucket().bucket(bucket_name).send().await?;
Ok(())
}
}
2 changes: 2 additions & 0 deletions crates/orchestrator/src/data_storage/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ use mockall::automock;
pub trait DataStorage: Send + Sync {
async fn get_data(&self, key: &str) -> Result<Bytes>;
async fn put_data(&self, data: Bytes, key: &str) -> Result<()>;
#[cfg(test)]
async fn build_test_bucket(&self, bucket_name: &str) -> Result<()>;
}

/// **DataStorageConfig** : Trait method to represent the config struct needed for
Expand Down
4 changes: 4 additions & 0 deletions crates/orchestrator/src/database/mongodb/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ impl MongoDb {
MongoDb { client }
}

pub fn client(&self) -> Client {
self.client.clone()
}

fn get_job_collection(&self) -> Collection<JobItem> {
self.client.database("orchestrator").collection("jobs")
}
Expand Down
4 changes: 2 additions & 2 deletions crates/orchestrator/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/// Config of the service. Contains configurations for DB, Queues and other services.
pub mod config;
mod constants;
pub mod constants;
/// Controllers for the routes
pub mod controllers;
/// Contains the trait that implements the fetching functions
Expand All @@ -17,6 +17,6 @@ pub mod queue;
/// Contains the routes for the service
pub mod routes;
#[cfg(test)]
mod tests;
pub mod tests;
/// Contains workers which act like cron jobs
pub mod workers;
4 changes: 2 additions & 2 deletions crates/orchestrator/src/queue/job_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ use uuid::Uuid;
use crate::config::config;
use crate::jobs::{process_job, verify_job};

const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue";
const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue";
pub const JOB_PROCESSING_QUEUE: &str = "madara_orchestrator_job_processing_queue";
pub const JOB_VERIFICATION_QUEUE: &str = "madara_orchestrator_job_verification_queue";

#[derive(Debug, Serialize, Deserialize)]
pub struct JobQueueMessage {
Expand Down
14 changes: 2 additions & 12 deletions crates/orchestrator/src/routes.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,10 @@
use axum::http::StatusCode;
use axum::response::IntoResponse;
use axum::routing::{get, post};
use axum::routing::get;
use axum::Router;

use crate::controllers::jobs_controller;

pub fn app_router() -> Router {
Router::new()
.route("/health", get(root))
.nest("/v1/dev", dev_routes())
.nest("/v1/job", job_routes())
.fallback(handler_404)
Router::new().route("/health", get(root)).nest("/v1/dev", dev_routes()).fallback(handler_404)
}

async fn root() -> &'static str {
Expand All @@ -21,10 +15,6 @@ async fn handler_404() -> impl IntoResponse {
(StatusCode::NOT_FOUND, "The requested resource was not found")
}

fn job_routes() -> Router {
Router::new().route("/create_job", post(jobs_controller::create_job))
}

fn dev_routes() -> Router {
Router::new()
}
49 changes: 47 additions & 2 deletions crates/orchestrator/src/tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,21 @@ use std::sync::Arc;
use ::uuid::Uuid;
use constants::*;
use da_client_interface::MockDaClient;
use mongodb::Client;
use prover_client_interface::MockProverClient;
use rstest::*;
use settlement_client_interface::MockSettlementClient;
use starknet::providers::jsonrpc::HttpTransport;
use starknet::providers::JsonRpcClient;
use url::Url;
use utils::env_utils::get_env_var_or_panic;
use utils::settings::default::DefaultSettingsProvider;

use crate::config::Config;
use crate::config::{build_storage_client, config_force_init, Config};
use crate::data_storage::MockDataStorage;
use crate::database::MockDatabase;
use crate::database::mongodb::config::MongoDbConfig;
use crate::database::mongodb::MongoDb;
use crate::database::{DatabaseConfig, MockDatabase};
use crate::jobs::types::JobStatus::Created;
use crate::jobs::types::JobType::DataSubmission;
use crate::jobs::types::{ExternalId, JobItem};
Expand Down Expand Up @@ -74,3 +79,43 @@ pub fn custom_job_item(default_job_item: JobItem, #[default(String::from("0"))]

job_item
}

/// For implementation of integration tests
#[fixture]
pub async fn build_config() -> color_eyre::Result<()> {
// Getting .env.test variables
dotenvy::from_filename("../.env.test")?;

// init starknet client
let provider = JsonRpcClient::new(HttpTransport::new(
Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"),
));

// init database
let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await);

// init the queue
let queue = Box::new(crate::queue::sqs::SqsQueue {});

let da_client = crate::config::build_da_client().await;
let settings_provider = DefaultSettingsProvider {};
let settlement_client = crate::config::build_settlement_client(&settings_provider).await;
let prover_client = crate::config::build_prover_service(&settings_provider);
let storage_client = build_storage_client().await;

// building a test bucket :
storage_client.build_test_bucket(&get_env_var_or_panic("AWS_S3_BUCKET_NAME")).await?;

let config =
Config::new(Arc::new(provider), da_client, prover_client, settlement_client, database, queue, storage_client);
config_force_init(config).await;

Ok(())
}

pub async fn drop_database() -> color_eyre::Result<()> {
let db_client: Client = MongoDb::new(MongoDbConfig::new_from_env()).await.client();
// dropping `jobs` collection.
db_client.database("orchestrator").collection::<JobItem>("jobs").drop(None).await?;
Ok(())
}
36 changes: 36 additions & 0 deletions crates/orchestrator/src/tests/data_storage/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
use crate::data_storage::aws_s3::config::AWSS3Config;
use crate::data_storage::aws_s3::AWSS3;
use crate::data_storage::{DataStorage, DataStorageConfig};
use crate::tests::common::build_config;
use bytes::Bytes;
use rstest::rstest;
use serde_json::json;

#[rstest]
#[tokio::test]
async fn test_put_and_get_data_s3() -> color_eyre::Result<()> {
build_config().await?;
dotenvy::from_filename("../.env.test")?;

let config = AWSS3Config::new_from_env();
let s3_client = AWSS3::new(config).await;

let mock_data = json!(
{
"body" : "hello world. hello world."
}
);
let json_bytes = serde_json::to_vec(&mock_data)?;
let key = "test_data.txt";

// putting test data on key : "test_data.txt"
s3_client.put_data(Bytes::from(json_bytes), key).await.expect("Unable to put data into the bucket.");

// getting the data from key : "test_data.txt"
let data = s3_client.get_data(key).await.expect("Unable to get the data from the bucket.");
let received_json: serde_json::Value = serde_json::from_slice(&data)?;

assert_eq!(received_json, mock_data);

Ok(())
}
Loading
Loading