Skip to content

Commit

Permalink
Adding swagger endpoints and open API endpoint documentation. The ope…
Browse files Browse the repository at this point in the history
…napi schema should be available under http://localhost:<rest_server_port>/api-doc.json, the swagger ui should be available under http://localhost:<rest_server_port>/swagger-ui. Added mechanism to transform json schemas from schemars so we can use them i openapi. (#167)
  • Loading branch information
zajko authored Aug 9, 2023
1 parent b60a4b9 commit fd08814
Show file tree
Hide file tree
Showing 16 changed files with 1,193 additions and 309 deletions.
820 changes: 558 additions & 262 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ members = [
"sidecar",
"listener",
"types"
]
]
10 changes: 8 additions & 2 deletions sidecar/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ async-trait = "0.1.56"
bytes = "1.2.0"
casper-event-listener = { path = "../listener", version = "0.1.0" }
casper-event-types = { path = "../types", version = "0.1.0" }
casper-types = { version = "2.0.0", features = ["std"] }
casper-types = { version = "2.0.0", features = ["std", "json-schema"] }
clap = { version = "4.0.32", features = ["derive"] }
derive-new = "0.5.9"
eventsource-stream = "0.2.3"
Expand All @@ -20,12 +20,15 @@ hex = "0.4.3"
hex_fmt = "0.3.0"
http = "0.2.1"
hyper = "0.14.4"
indexmap = "2.0.0"
itertools = "0.10.3"
jsonschema = "0.17.1"
rand = "0.8.3"
regex = "1.6.0"
reqwest = "0.11.11"
schemars = "0.8.5"
sea-query = "0.26.3"
serde = { version = "1.0", features = ["derive"] }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
sqlx = { version = "0.6", features = ["runtime-tokio-native-tls", "any", "sqlite"] }
thiserror = "1"
Expand All @@ -35,6 +38,9 @@ toml = "0.5.8"
tower = { version = "0.4.13", features = ["buffer", "limit", "make", "timeout"] }
tracing = "0.1"
tracing-subscriber = "0.3"
#Utoipa reference should be updated and pointed to cargo once 3.4.4 is released
utoipa = { git = "https://github.com/juhaku/utoipa", rev = "cea4c50112c6cc0883767a43ff611db367cd13b5", features = ["rc_schema"]}
utoipa-swagger-ui = { git = "https://github.com/juhaku/utoipa", rev = "cea4c50112c6cc0883767a43ff611db367cd13b5"}
warp = { version = "0.3.3", features = ["compression"] }
wheelbuf = "0.2.0"

Expand Down
3 changes: 1 addition & 2 deletions sidecar/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ mod integration_tests_version_switch;
mod migration_manager;
#[cfg(test)]
mod performance_tests;
mod rest_server;
pub mod rest_server;
mod sql;
mod sqlite_database;
#[cfg(test)]
Expand Down Expand Up @@ -172,7 +172,6 @@ async fn run(config: Config) -> Result<(), Error> {
Ok(())
}
});

// This channel allows SseData to be sent from multiple connected nodes to the single EventStreamServer.
let (outbound_sse_data_sender, mut outbound_sse_data_receiver) =
mpsc_channel(config.outbound_channel_size.unwrap_or(DEFAULT_CHANNEL_SIZE));
Expand Down
7 changes: 4 additions & 3 deletions sidecar/src/rest_server.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
mod errors;
mod filters;
pub mod filters;
mod handlers;
mod openapi;
#[cfg(test)]
mod tests;

Expand All @@ -10,6 +11,7 @@ use std::time::Duration;
use anyhow::Error;
use hyper::Server;
use tower::{buffer::Buffer, make::Shared, ServiceBuilder};
use warp::Filter;

use crate::{
sqlite_database::SqliteDatabase, types::config::RestServerConfig, utils::resolve_address,
Expand All @@ -22,13 +24,12 @@ pub async fn run_server(
sqlite_database: SqliteDatabase,
) -> Result<(), Error> {
let api = filters::combined_filters(sqlite_database);

let address = format!("{}:{}", BIND_ALL_INTERFACES, config.port);
let socket_address = resolve_address(&address)?;

let listener = TcpListener::bind(socket_address)?;

let warp_service = warp::service(api);
let warp_service = warp::service(api.with(warp::cors().allow_any_origin()));
let tower_service = ServiceBuilder::new()
.concurrency_limit(config.max_concurrent_requests as usize)
.rate_limit(
Expand Down
112 changes: 110 additions & 2 deletions sidecar/src/rest_server/filters.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use super::{errors::handle_rejection, handlers};
use super::{errors::handle_rejection, handlers, openapi::build_open_api_filters};
use crate::{
types::database::DatabaseReader,
utils::{root_filter, InvalidPath},
Expand All @@ -20,6 +20,7 @@ pub(super) fn combined_filters<Db: DatabaseReader + Clone + Send + Sync>(
.or(faults_by_public_key(db.clone()))
.or(faults_by_era(db.clone()))
.or(finality_signatures_by_block(db))
.or(build_open_api_filters())
.recover(handle_rejection)
}

Expand Down Expand Up @@ -62,7 +63,14 @@ fn deploy_filters<Db: DatabaseReader + Clone + Send + Sync>(
/// Return: data about the latest block.
/// Path URL: block
/// Example: curl http://127.0.0.1:18888/block
fn latest_block<Db: DatabaseReader + Clone + Send + Sync>(
#[utoipa::path(
get,
path = "/block",
responses(
(status = 200, description = "latest stored block", body = BlockAdded)
)
)]
pub fn latest_block<Db: DatabaseReader + Clone + Send + Sync>(
db: Db,
) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
warp::path!("block")
Expand All @@ -76,6 +84,16 @@ fn latest_block<Db: DatabaseReader + Clone + Send + Sync>(
/// Return: data about the block specified.
/// Path URL: block/<block-hash>
/// Example: curl http://127.0.0.1:18888/block/c0292d8408e9d83d1aaceadfbeb25dc38cda36bcb91c3d403a0deb594dc3d63f
#[utoipa::path(
get,
path = "/block/{block_hash}",
params(
("block_hash" = String, Path, description = "Base64 encoded block hash of requested block")
),
responses(
(status = 200, description = "fetch latest stored block", body = BlockAdded)
)
)]
fn block_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
db: Db,
) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
Expand All @@ -90,6 +108,16 @@ fn block_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
/// Return: data about the block requested.
/// Path URL: block/<block-height>
/// Example: curl http://127.0.0.1:18888/block/630151
#[utoipa::path(
get,
path = "/block/{height}",
params(
("height" = u32, Path, description = "Height of the requested block")
),
responses(
(status = 200, description = "fetch latest stored block", body = BlockAdded)
)
)]
fn block_by_height<Db: DatabaseReader + Clone + Send + Sync>(
db: Db,
) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
Expand All @@ -105,6 +133,16 @@ fn block_by_height<Db: DatabaseReader + Clone + Send + Sync>(
/// Return: data about the deploy specified.
/// Path URL: deploy/<deploy-hash>
/// Example: curl http://127.0.0.1:18888/deploy/f01544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a
#[utoipa::path(
get,
path = "/deploy/{deploy_hash}",
params(
("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy")
),
responses(
(status = 200, description = "fetch aggregate data for deploy events", body = DeployAggregate)
)
)]
fn deploy_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
db: Db,
) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
Expand All @@ -119,6 +157,16 @@ fn deploy_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
/// Return: data about the accepted deploy.
/// Path URL: deploy/accepted/<deploy-hash>
/// Example: curl http://127.0.0.1:18888/deploy/accepted/f01544d37354c5f9b2c4956826d32f8e44198f94fb6752e87f422fe3071ab58a
#[utoipa::path(
get,
path = "/deploy/accepted/{deploy_hash}",
params(
("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy accepted")
),
responses(
(status = 200, description = "fetch stored deploy", body = DeployAccepted)
)
)]
fn deploy_accepted_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
db: Db,
) -> impl Filter<Extract = (impl warp::Reply,), Error = warp::Rejection> + Clone {
Expand All @@ -128,6 +176,16 @@ fn deploy_accepted_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
.and_then(handlers::get_deploy_accepted_by_hash)
}

#[utoipa::path(
get,
path = "/deploy/expired/{deploy_hash}",
params(
("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy expired")
),
responses(
(status = 200, description = "fetch stored deploy", body = DeployExpired)
)
)]
/// Return information about a deploy that expired given its deploy hash.
/// Input: the database with data to be filtered.
/// Return: data about the expired deploy.
Expand All @@ -142,6 +200,16 @@ fn deploy_expired_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
.and_then(handlers::get_deploy_expired_by_hash)
}

#[utoipa::path(
get,
path = "/deploy/processed/{deploy_hash}",
params(
("deploy_hash" = String, Path, description = "Base64 encoded deploy hash of requested deploy processed")
),
responses(
(status = 200, description = "fetch stored deploy", body = DeployProcessed)
)
)]
/// Return information about a deploy that was processed given its deploy hash.
/// Input: the database with data to be filtered.
/// Return: data about the processed deploy.
Expand All @@ -156,6 +224,16 @@ fn deploy_processed_by_hash<Db: DatabaseReader + Clone + Send + Sync>(
.and_then(handlers::get_deploy_processed_by_hash)
}

#[utoipa::path(
get,
path = "/faults/{public_key}",
params(
("public_key" = String, Path, description = "Base64 encoded validator's public key")
),
responses(
(status = 200, description = "faults associated with a validator's public key", body = [Fault])
)
)]
/// Return the faults associated with a validator's public key.
/// Input: the database with data to be filtered.
/// Return: faults caused by the validator specified.
Expand All @@ -170,6 +248,16 @@ fn faults_by_public_key<Db: DatabaseReader + Clone + Send + Sync>(
.and_then(handlers::get_faults_by_public_key)
}

#[utoipa::path(
get,
path = "/faults/{era}",
params(
("era" = String, Path, description = "Era identifier")
),
responses(
(status = 200, description = "faults associated with an era ", body = [Fault])
)
)]
/// Return the faults associated with an era given a valid era identifier.
/// Input: the database with data to be filtered.
/// Return: fault information for a given era.
Expand All @@ -184,6 +272,16 @@ fn faults_by_era<Db: DatabaseReader + Clone + Send + Sync>(
.and_then(handlers::get_faults_by_era)
}

#[utoipa::path(
get,
path = "/signatures/{block_hash}",
params(
("block_hash" = String, Path, description = "Base64 encoded block hash of requested block")
),
responses(
(status = 200, description = "finality signatures in a block", body = [FinalitySignature])
)
)]
/// Return the finality signatures in a block given its block hash.
/// Input: the database with data to be filtered.
/// Return: the finality signatures for the block specified.
Expand All @@ -198,6 +296,16 @@ fn finality_signatures_by_block<Db: DatabaseReader + Clone + Send + Sync>(
.and_then(handlers::get_finality_signatures_by_block)
}

#[utoipa::path(
get,
path = "/step/{era_id}",
params(
("era_id" = String, Path, description = "Era id")
),
responses(
(status = 200, description = "step event emitted at the end of an era", body = Step)
)
)]
/// Return the step event emitted at the end of an era, given a valid era identifier.
/// Input: the database with data to be filtered.
/// Return: the step event for a given era.
Expand Down
Loading

0 comments on commit fd08814

Please sign in to comment.