From 2bcbfd31fcd00d1d6a1f4ec821dbf4fb5a6896e6 Mon Sep 17 00:00:00 2001 From: Jesse Hallett Date: Mon, 25 Mar 2024 14:25:48 -0700 Subject: [PATCH] switch configuration types to maps (#12) In some earlier feedback @dmoverton pointed out it would be better to store native queries in maps, as opposed to vecs. I thought the same logic would apply to some of the other configuration fields. It's also been bugging me that the configuration formats didn't match pattern in ddn and ndc-postgres in that we were using lists of named objects instead of maps. I switched over some types from `Vec` to `BTreeMap`. To smooth things out I also added a `WithName` type that can wrap any struct, and adds a `name` field that appears at the same level as the underlying struct's fields in serialized formats. That lets us deserialize either without a name (for example if we're getting names from map keys in `schema.json` instead of from map value fields), or with a name (like if we split up schema configuration to put info for each collection into it's own file, we'll want to read the name from a field in the file.) I used that method to update `read_subdir_configs` in `directory.rs` so that it automatically grabs a name from each file. The changes ended up being more extensive than I'd hoped so let me know if it's too much. But since this changes configuration formats I thought if we want to make this change it's better not to wait. https://hasurahq.atlassian.net/browse/MDB-91 --- Cargo.lock | 1 + crates/cli/Cargo.toml | 3 +- crates/cli/src/introspection/sampling.rs | 180 +++-- .../cli/src/introspection/type_unification.rs | 91 ++- .../src/introspection/validation_schema.rs | 65 +- crates/configuration/src/configuration.rs | 80 +-- crates/configuration/src/directory.rs | 36 +- crates/configuration/src/lib.rs | 8 +- crates/configuration/src/native_queries.rs | 11 +- crates/configuration/src/schema/database.rs | 23 +- crates/configuration/src/schema/mod.rs | 34 +- crates/configuration/src/with_name.rs | 81 +++ .../src/interface_types/mongo_config.rs | 4 +- crates/mongodb-agent-common/src/query/mod.rs | 13 +- crates/mongodb-connector/src/mutation.rs | 10 +- crates/mongodb-connector/src/schema.rs | 44 +- .../chinook/native_queries/hello.yaml | 6 +- .../chinook/native_queries/insert_artist.yaml | 6 +- fixtures/connector/chinook/schema.json | 647 +++++++----------- 19 files changed, 699 insertions(+), 644 deletions(-) create mode 100644 crates/configuration/src/with_name.rs diff --git a/Cargo.lock b/Cargo.lock index 8f30611b..934915f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1537,6 +1537,7 @@ dependencies = [ "configuration", "futures-util", "indexmap 1.9.3", + "itertools 0.12.1", "mongodb", "mongodb-agent-common", "mongodb-support", diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index e96b70cc..e3225fd1 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -13,6 +13,7 @@ anyhow = "1.0.80" clap = { version = "4.5.1", features = ["derive", "env"] } futures-util = "0.3.28" indexmap = { version = "1", features = ["serde"] } # must match the version that ndc-client uses +itertools = "^0.12.1" serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0.113", features = ["raw_value"] } thiserror = "1.0.57" @@ -20,4 +21,4 @@ tokio = { version = "1.36.0", features = ["full"] } these = "2.0.0" [dev-dependencies] -proptest = "1" \ No newline at end of file +proptest = "1" diff --git a/crates/cli/src/introspection/sampling.rs b/crates/cli/src/introspection/sampling.rs index ca2e0e32..1891ba8f 100644 --- a/crates/cli/src/introspection/sampling.rs +++ b/crates/cli/src/introspection/sampling.rs @@ -1,15 +1,20 @@ +use std::collections::BTreeMap; + use super::type_unification::{ unify_object_types, unify_schema, unify_type, TypeUnificationContext, TypeUnificationResult, }; use configuration::{ - schema::{Collection, ObjectField, ObjectType, Type}, - Schema, + schema::{self, Type}, + Schema, WithName, }; use futures_util::TryStreamExt; use mongodb::bson::{doc, Bson, Document}; use mongodb_agent_common::interface_types::MongoConfig; use mongodb_support::BsonScalarType::{self, *}; +type ObjectField = WithName; +type ObjectType = WithName; + /// Sample from all collections in the database and return a Schema. /// Return an error if there are any errors accessing the database /// or if the types derived from the sample documents for a collection @@ -19,8 +24,8 @@ pub async fn sample_schema_from_db( config: &MongoConfig, ) -> anyhow::Result { let mut schema = Schema { - collections: vec![], - object_types: vec![], + collections: BTreeMap::new(), + object_types: BTreeMap::new(), }; let db = config.client.database(&config.database); let mut collections_cursor = db.list_collections(None, None).await?; @@ -54,15 +59,17 @@ async fn sample_schema_from_collection( unify_object_types(collected_object_types, object_types)? }; } - let collection_info = Collection { - name: collection_name.to_string(), - description: None, - r#type: collection_name.to_string(), - }; + let collection_info = WithName::named( + collection_name.to_string(), + schema::Collection { + description: None, + r#type: collection_name.to_string(), + }, + ); Ok(Schema { - collections: vec![collection_info], - object_types: collected_object_types, + collections: WithName::into_map([collection_info]), + object_types: WithName::into_map(collected_object_types), }) } @@ -83,11 +90,13 @@ fn make_object_type( (object_type_defs.concat(), object_fields) }; - let object_type = ObjectType { - name: object_type_name.to_string(), - description: None, - fields: object_fields, - }; + let object_type = WithName::named( + object_type_name.to_string(), + schema::ObjectType { + description: None, + fields: WithName::into_map(object_fields), + }, + ); object_type_defs.push(object_type); Ok(object_type_defs) @@ -101,11 +110,13 @@ fn make_object_field( let object_type_name = format!("{type_prefix}{field_name}"); let (collected_otds, field_type) = make_field_type(&object_type_name, field_name, field_value)?; - let object_field = ObjectField { - name: field_name.to_owned(), - description: None, - r#type: field_type, - }; + let object_field = WithName::named( + field_name.to_owned(), + schema::ObjectField { + description: None, + r#type: field_type, + }, + ); Ok((collected_otds, object_field)) } @@ -164,7 +175,12 @@ fn make_field_type( #[cfg(test)] mod tests { - use configuration::schema::{ObjectField, ObjectType, Type}; + use std::collections::BTreeMap; + + use configuration::{ + schema::{ObjectField, ObjectType, Type}, + WithName, + }; use mongodb::bson::doc; use mongodb_support::BsonScalarType; @@ -176,24 +192,30 @@ mod tests { fn simple_doc() -> Result<(), anyhow::Error> { let object_name = "foo"; let doc = doc! {"my_int": 1, "my_string": "two"}; - let result = make_object_type(object_name, &doc); + let result = make_object_type(object_name, &doc).map(WithName::into_map::>); - let expected = Ok(vec![ObjectType { - name: object_name.to_owned(), - fields: vec![ - ObjectField { - name: "my_int".to_owned(), - r#type: Type::Scalar(BsonScalarType::Int), - description: None, - }, - ObjectField { - name: "my_string".to_owned(), - r#type: Type::Scalar(BsonScalarType::String), - description: None, - }, - ], - description: None, - }]); + let expected = Ok(BTreeMap::from([( + object_name.to_owned(), + ObjectType { + fields: BTreeMap::from([ + ( + "my_int".to_owned(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::Int), + description: None, + }, + ), + ( + "my_string".to_owned(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::String), + description: None, + }, + ), + ]), + description: None, + }, + )])); assert_eq!(expected, result); @@ -204,40 +226,56 @@ mod tests { fn array_of_objects() -> Result<(), anyhow::Error> { let object_name = "foo"; let doc = doc! {"my_array": [{"foo": 42, "bar": ""}, {"bar": "wut", "baz": 3.77}]}; - let result = make_object_type(object_name, &doc); + let result = make_object_type(object_name, &doc).map(WithName::into_map::>); - let expected = Ok(vec![ - ObjectType { - name: "foo_my_array".to_owned(), - fields: vec![ - ObjectField { - name: "foo".to_owned(), - r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Int))), - description: None, - }, - ObjectField { - name: "bar".to_owned(), - r#type: Type::Scalar(BsonScalarType::String), - description: None, - }, - ObjectField { - name: "baz".to_owned(), - r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Double))), - description: None, - }, - ], - description: None, - }, - ObjectType { - name: object_name.to_owned(), - fields: vec![ObjectField { - name: "my_array".to_owned(), - r#type: Type::ArrayOf(Box::new(Type::Object("foo_my_array".to_owned()))), + let expected = Ok(BTreeMap::from([ + ( + "foo_my_array".to_owned(), + ObjectType { + fields: BTreeMap::from([ + ( + "foo".to_owned(), + ObjectField { + r#type: Type::Nullable(Box::new(Type::Scalar(BsonScalarType::Int))), + description: None, + }, + ), + ( + "bar".to_owned(), + ObjectField { + r#type: Type::Scalar(BsonScalarType::String), + description: None, + }, + ), + ( + "baz".to_owned(), + ObjectField { + r#type: Type::Nullable(Box::new(Type::Scalar( + BsonScalarType::Double, + ))), + description: None, + }, + ), + ]), description: None, - }], - description: None, - }, - ]); + }, + ), + ( + object_name.to_owned(), + ObjectType { + fields: BTreeMap::from([( + "my_array".to_owned(), + ObjectField { + r#type: Type::ArrayOf(Box::new(Type::Object( + "foo_my_array".to_owned(), + ))), + description: None, + }, + )]), + description: None, + }, + ), + ])); assert_eq!(expected, result); diff --git a/crates/cli/src/introspection/type_unification.rs b/crates/cli/src/introspection/type_unification.rs index b3ac3179..efcb11e1 100644 --- a/crates/cli/src/introspection/type_unification.rs +++ b/crates/cli/src/introspection/type_unification.rs @@ -3,10 +3,11 @@ /// It allows the information in the schemas derived from several documents to be combined into one schema. /// use configuration::{ - schema::{ObjectField, ObjectType, Type}, - Schema, + schema::{self, Type}, + Schema, WithName, }; use indexmap::IndexMap; +use itertools::Itertools as _; use mongodb_support::{ align::align_with_result, BsonScalarType::{self, *}, @@ -17,6 +18,9 @@ use std::{ }; use thiserror::Error; +type ObjectField = WithName; +type ObjectType = WithName; + #[derive(Debug, PartialEq, Eq, Clone)] pub struct TypeUnificationContext { object_type_name: String, @@ -153,11 +157,13 @@ fn make_nullable(t: Type) -> Type { } fn make_nullable_field(field: ObjectField) -> Result { - Ok(ObjectField { - name: field.name, - r#type: make_nullable(field.r#type), - description: field.description, - }) + Ok(WithName::named( + field.name, + schema::ObjectField { + r#type: make_nullable(field.value.r#type), + description: field.value.description, + }, + )) } /// Unify two `ObjectType`s. @@ -167,13 +173,17 @@ fn unify_object_type( object_type_b: ObjectType, ) -> TypeUnificationResult { let field_map_a: IndexMap = object_type_a + .value .fields .into_iter() + .map_into::() .map(|o| (o.name.to_owned(), o)) .collect(); let field_map_b: IndexMap = object_type_b + .value .fields .into_iter() + .map_into::() .map(|o| (o.name.to_owned(), o)) .collect(); @@ -185,11 +195,19 @@ fn unify_object_type( |field_a, field_b| unify_object_field(&object_type_a.name, field_a, field_b), )?; - Ok(ObjectType { - name: object_type_a.name, - fields: merged_field_map.into_values().collect(), - description: object_type_a.description.or(object_type_b.description), - }) + Ok(WithName::named( + object_type_a.name, + schema::ObjectType { + fields: merged_field_map + .into_values() + .map(WithName::into_name_value_pair) + .collect(), + description: object_type_a + .value + .description + .or(object_type_b.value.description), + }, + )) } /// Unify the types of two `ObjectField`s. @@ -200,11 +218,20 @@ fn unify_object_field( object_field_b: ObjectField, ) -> TypeUnificationResult { let context = TypeUnificationContext::new(object_type_name, &object_field_a.name); - Ok(ObjectField { - name: object_field_a.name, - r#type: unify_type(context, object_field_a.r#type, object_field_b.r#type)?, - description: object_field_a.description.or(object_field_b.description), - }) + Ok(WithName::named( + object_field_a.name, + schema::ObjectField { + r#type: unify_type( + context, + object_field_a.value.r#type, + object_field_b.value.r#type, + )?, + description: object_field_a + .value + .description + .or(object_field_b.value.description), + }, + )) } /// Unify two sets of `ObjectType`s. @@ -251,10 +278,12 @@ mod tests { use std::collections::{HashMap, HashSet}; use super::{ - normalize_type, unify_object_type, unify_type, ObjectField, ObjectType, - TypeUnificationContext, TypeUnificationError, + normalize_type, unify_object_type, unify_type, TypeUnificationContext, TypeUnificationError, + }; + use configuration::{ + schema::{self, Type}, + WithName, }; - use configuration::schema::Type; use mongodb_support::BsonScalarType; use proptest::{collection::hash_map, prelude::*}; @@ -409,29 +438,27 @@ mod tests { } let name = "foo"; - let left_object = ObjectType { - name: name.to_owned(), - fields: left_fields.into_iter().map(|(k, v)| ObjectField{name: k, r#type: v, description: None}).collect(), + let left_object = WithName::named(name.to_owned(), schema::ObjectType { + fields: left_fields.into_iter().map(|(k, v)| (k, schema::ObjectField{r#type: v, description: None})).collect(), description: None - }; - let right_object = ObjectType { - name: name.to_owned(), - fields: right_fields.into_iter().map(|(k, v)| ObjectField{name: k, r#type: v, description: None}).collect(), + }); + let right_object = WithName::named(name.to_owned(), schema::ObjectType { + fields: right_fields.into_iter().map(|(k, v)| (k, schema::ObjectField{r#type: v, description: None})).collect(), description: None - }; + }); let result = unify_object_type(left_object, right_object); match result { Err(err) => panic!("Got error result {err}"), Ok(ot) => { - for field in &ot.fields { + for field in ot.value.named_fields() { // Any fields not shared between the two input types should be nullable. - if !shared.contains_key(&field.name) { - assert!(is_nullable(&field.r#type), "Found a non-shared field that is not nullable") + if !shared.contains_key(field.name) { + assert!(is_nullable(&field.value.r#type), "Found a non-shared field that is not nullable") } } // All input fields must appear in the result. - let fields: HashSet = ot.fields.into_iter().map(|f| f.name).collect(); + let fields: HashSet = ot.value.fields.into_keys().collect(); assert!(left.into_keys().chain(right.into_keys()).chain(shared.into_keys()).all(|k| fields.contains(&k)), "Missing field in result type") } diff --git a/crates/cli/src/introspection/validation_schema.rs b/crates/cli/src/introspection/validation_schema.rs index 279679fb..7b819288 100644 --- a/crates/cli/src/introspection/validation_schema.rs +++ b/crates/cli/src/introspection/validation_schema.rs @@ -1,6 +1,6 @@ use configuration::{ - schema::{Collection, ObjectField, ObjectType, Type}, - Schema, + schema::{self, Type}, + Schema, WithName, }; use futures_util::{StreamExt, TryStreamExt}; use indexmap::IndexMap; @@ -10,6 +10,10 @@ use mongodb_support::{BsonScalarType, BsonType}; use mongodb_agent_common::interface_types::{MongoAgentError, MongoConfig}; +type Collection = WithName; +type ObjectType = WithName; +type ObjectField = WithName; + pub async fn get_metadata_from_validation_schema( config: &MongoConfig, ) -> Result { @@ -52,8 +56,8 @@ pub async fn get_metadata_from_validation_schema( .await?; Ok(Schema { - collections, - object_types: object_types.concat(), + collections: WithName::into_map(collections), + object_types: WithName::into_map(object_types.concat()), }) } @@ -66,11 +70,13 @@ fn make_collection( let (mut object_type_defs, object_fields) = { let type_prefix = format!("{collection_name}_"); - let id_field = ObjectField { - name: "_id".to_string(), - description: Some("primary key _id".to_string()), - r#type: Type::Scalar(BsonScalarType::ObjectId), - }; + let id_field = WithName::named( + "_id", + schema::ObjectField { + description: Some("primary key _id".to_string()), + r#type: Type::Scalar(BsonScalarType::ObjectId), + }, + ); let (object_type_defs, mut object_fields): (Vec>, Vec) = properties .iter() @@ -84,19 +90,20 @@ fn make_collection( (object_type_defs.concat(), object_fields) }; - let collection_type = ObjectType { - name: collection_name.to_string(), - description: Some(format!("Object type for collection {collection_name}")), - fields: object_fields, - }; + let collection_type = WithName::named( + collection_name, + schema::ObjectType { + description: Some(format!("Object type for collection {collection_name}")), + fields: WithName::into_map(object_fields), + }, + ); object_type_defs.push(collection_type); - let collection_info = Collection { - name: collection_name.to_string(), + let collection_info = WithName::named(collection_name, schema::Collection { description: validator_schema.description.clone(), r#type: collection_name.to_string(), - }; + }); (object_type_defs, collection_info) } @@ -111,11 +118,13 @@ fn make_object_field( let object_type_name = format!("{type_prefix}{prop_name}"); let (collected_otds, field_type) = make_field_type(&object_type_name, prop_schema); - let object_field = ObjectField { - name: prop_name.clone(), - description, - r#type: maybe_nullable(field_type, !required_labels.contains(prop_name)), - }; + let object_field = WithName::named( + prop_name.clone(), + schema::ObjectField { + description, + r#type: maybe_nullable(field_type, !required_labels.contains(prop_name)), + }, + ); (collected_otds, object_field) } @@ -147,11 +156,13 @@ fn make_field_type(object_type_name: &str, prop_schema: &Property) -> (Vec, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub native_queries: BTreeMap, } impl Configuration { - pub fn validate(schema: Schema, native_queries: Vec) -> anyhow::Result { + pub fn validate( + schema: Schema, + native_queries: BTreeMap, + ) -> anyhow::Result { let config = Configuration { schema, native_queries, @@ -29,7 +32,7 @@ impl Configuration { { let duplicate_type_names: Vec<&str> = config .object_types() - .map(|t| t.name.as_ref()) + .map(|(name, _)| name.as_ref()) .duplicates() .collect(); ensure!( @@ -39,21 +42,6 @@ impl Configuration { ); } - { - let duplicate_collection_names: Vec<&str> = config - .schema - .collections - .iter() - .map(|c| c.name.as_ref()) - .duplicates() - .collect(); - ensure!( - duplicate_collection_names.is_empty(), - "configuration contains multiple definitions for these collection names: {}", - duplicate_collection_names.join(", ") - ); - } - Ok(config) } @@ -68,11 +56,11 @@ impl Configuration { } /// Returns object types collected from schema and native queries - pub fn object_types(&self) -> impl Iterator { + pub fn object_types(&self) -> impl Iterator { let object_types_from_schema = self.schema.object_types.iter(); let object_types_from_native_queries = self .native_queries - .iter() + .values() .flat_map(|native_query| &native_query.object_types); object_types_from_schema.chain(object_types_from_native_queries) } @@ -89,26 +77,38 @@ mod tests { fn fails_with_duplicate_object_types() { let schema = Schema { collections: Default::default(), - object_types: vec![ObjectType { - name: "Album".to_owned(), - fields: Default::default(), - description: Default::default(), - }], + object_types: [( + "Album".to_owned(), + ObjectType { + fields: Default::default(), + description: Default::default(), + }, + )] + .into_iter() + .collect(), }; - let native_queries = vec![NativeQuery { - name: "hello".to_owned(), - object_types: vec![ObjectType { - name: "Album".to_owned(), - fields: Default::default(), + let native_queries = [( + "hello".to_owned(), + NativeQuery { + object_types: [( + "Album".to_owned(), + ObjectType { + fields: Default::default(), + description: Default::default(), + }, + )] + .into_iter() + .collect(), + result_type: Type::Object("Album".to_owned()), + command: doc! { "command": 1 }, + arguments: Default::default(), + selection_criteria: Default::default(), description: Default::default(), - }], - result_type: Type::Object("Album".to_owned()), - command: doc! { "command": 1 }, - arguments: Default::default(), - selection_criteria: Default::default(), - description: Default::default(), - mode: Default::default(), - }]; + mode: Default::default(), + }, + )] + .into_iter() + .collect(); let result = Configuration::validate(schema, native_queries); let error_msg = result.unwrap_err().to_string(); assert!(error_msg.contains("multiple definitions")); diff --git a/crates/configuration/src/directory.rs b/crates/configuration/src/directory.rs index fd616e71..c1b368fd 100644 --- a/crates/configuration/src/directory.rs +++ b/crates/configuration/src/directory.rs @@ -2,11 +2,14 @@ use anyhow::{anyhow, Context as _}; use futures::stream::TryStreamExt as _; use itertools::Itertools as _; use serde::{Deserialize, Serialize}; -use std::path::{Path, PathBuf}; +use std::{ + collections::BTreeMap, + path::{Path, PathBuf}, +}; use tokio::fs; use tokio_stream::wrappers::ReadDirStream; -use crate::{native_queries::NativeQuery, Configuration}; +use crate::{with_name::WithName, Configuration}; pub const SCHEMA_FILENAME: &str = "schema"; pub const NATIVE_QUERIES_DIRNAME: &str = "native_queries"; @@ -32,7 +35,7 @@ pub async fn read_directory( let schema = parse_json_or_yaml(dir, SCHEMA_FILENAME).await?; - let native_queries: Vec = read_subdir_configs(&dir.join(NATIVE_QUERIES_DIRNAME)) + let native_queries = read_subdir_configs(&dir.join(NATIVE_QUERIES_DIRNAME)) .await? .unwrap_or_default(); @@ -42,7 +45,9 @@ pub async fn read_directory( /// Parse all files in a directory with one of the allowed configuration extensions according to /// the given type argument. For example if `T` is `NativeQuery` this function assumes that all /// json and yaml files in the given directory should be parsed as native query configurations. -async fn read_subdir_configs(subdir: &Path) -> anyhow::Result>> +/// +/// Assumes that every configuration file has a `name` field. +async fn read_subdir_configs(subdir: &Path) -> anyhow::Result>> where for<'a> T: Deserialize<'a>, { @@ -51,7 +56,7 @@ where } let dir_stream = ReadDirStream::new(fs::read_dir(subdir).await?); - let configs = dir_stream + let configs: Vec> = dir_stream .map_err(|err| err.into()) .try_filter_map(|dir_entry| async move { // Permits regular files and symlinks, does not filter out symlinks to directories. @@ -73,11 +78,26 @@ where Ok(format_option.map(|format| (path, format))) }) - .and_then(|(path, format)| async move { parse_config_file::(path, format).await }) - .try_collect::>() + .and_then( + |(path, format)| async move { parse_config_file::>(path, format).await }, + ) + .try_collect() .await?; - Ok(Some(configs)) + let duplicate_names = configs + .iter() + .map(|c| c.name.as_ref()) + .duplicates() + .collect::>(); + + if duplicate_names.is_empty() { + Ok(Some(WithName::into_map(configs))) + } else { + Err(anyhow!( + "found duplicate names in configuration: {}", + duplicate_names.join(", ") + )) + } } /// Given a base name, like "connection", looks for files of the form "connection.json", diff --git a/crates/configuration/src/lib.rs b/crates/configuration/src/lib.rs index 8414acc1..91aa4c65 100644 --- a/crates/configuration/src/lib.rs +++ b/crates/configuration/src/lib.rs @@ -1,9 +1,11 @@ mod configuration; -pub mod schema; -pub mod native_queries; mod directory; +pub mod native_queries; +pub mod schema; +mod with_name; pub use crate::configuration::Configuration; -pub use crate::schema::Schema; pub use crate::directory::read_directory; pub use crate::directory::write_directory; +pub use crate::schema::Schema; +pub use crate::with_name::{WithName, WithNameRef}; diff --git a/crates/configuration/src/native_queries.rs b/crates/configuration/src/native_queries.rs index 6153a4bf..d7946a3f 100644 --- a/crates/configuration/src/native_queries.rs +++ b/crates/configuration/src/native_queries.rs @@ -1,3 +1,5 @@ +use std::collections::BTreeMap; + use mongodb::{bson, options::SelectionCriteria}; use schemars::JsonSchema; use serde::Deserialize; @@ -9,14 +11,11 @@ use crate::schema::{ObjectField, ObjectType, Type}; #[derive(Clone, Debug, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct NativeQuery { - /// Name that will be used to identify the query in your data graph - pub name: String, - /// You may define object types here to reference in `result_type`. Any types defined here will /// be merged with the definitions in `schema.json`. This allows you to maintain hand-written /// types for native queries without having to edit a generated `schema.json` file. - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub object_types: Vec, + #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] + pub object_types: BTreeMap, /// Type of data returned by the query. You may reference object types defined in the /// `object_types` list in this definition, or you may reference object types from @@ -25,7 +24,7 @@ pub struct NativeQuery { /// Arguments for per-query customization #[serde(default)] - pub arguments: Vec, + pub arguments: BTreeMap, /// Command to run expressed as a BSON document #[schemars(with = "Object")] diff --git a/crates/configuration/src/schema/database.rs b/crates/configuration/src/schema/database.rs index c82942e5..53a99521 100644 --- a/crates/configuration/src/schema/database.rs +++ b/crates/configuration/src/schema/database.rs @@ -1,12 +1,15 @@ +use std::collections::BTreeMap; + use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use mongodb_support::BsonScalarType; +use crate::{WithName, WithNameRef}; + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct Collection { - pub name: String, /// The name of a type declared in `objectTypes` that describes the fields of this collection. /// The type name may be the same as the collection name. pub r#type: String, @@ -30,17 +33,29 @@ pub enum Type { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct ObjectType { - pub name: String, - pub fields: Vec, + pub fields: BTreeMap, #[serde(default)] pub description: Option, } +impl ObjectType { + pub fn named_fields(&self) -> impl Iterator> { + self.fields + .iter() + .map(|(name, field)| WithNameRef::named(name, field)) + } + + pub fn into_named_fields(self) -> impl Iterator> { + self.fields + .into_iter() + .map(|(name, field)| WithName::named(name, field)) + } +} + /// Information about an object type field. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct ObjectField { - pub name: String, pub r#type: Type, #[serde(default)] pub description: Option, diff --git a/crates/configuration/src/schema/mod.rs b/crates/configuration/src/schema/mod.rs index 23cb3e5a..bec4bdf2 100644 --- a/crates/configuration/src/schema/mod.rs +++ b/crates/configuration/src/schema/mod.rs @@ -1,15 +1,45 @@ mod database; +use std::collections::BTreeMap; + use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use crate::{WithName, WithNameRef}; + pub use self::database::{Collection, ObjectField, ObjectType, Type}; #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] #[serde(rename_all = "camelCase")] pub struct Schema { #[serde(default)] - pub collections: Vec, + pub collections: BTreeMap, #[serde(default)] - pub object_types: Vec, + pub object_types: BTreeMap, +} + +impl Schema { + pub fn into_named_collections(self) -> impl Iterator> { + self.collections + .into_iter() + .map(|(name, field)| WithName::named(name, field)) + } + + pub fn into_named_object_types(self) -> impl Iterator> { + self.object_types + .into_iter() + .map(|(name, field)| WithName::named(name, field)) + } + + pub fn named_collections(&self) -> impl Iterator> { + self.collections + .iter() + .map(|(name, field)| WithNameRef::named(name, field)) + } + + pub fn named_object_types(&self) -> impl Iterator> { + self.object_types + .iter() + .map(|(name, field)| WithNameRef::named(name, field)) + } } diff --git a/crates/configuration/src/with_name.rs b/crates/configuration/src/with_name.rs new file mode 100644 index 00000000..deeb5eb0 --- /dev/null +++ b/crates/configuration/src/with_name.rs @@ -0,0 +1,81 @@ +use serde::{Deserialize, Serialize}; + +/// Helper for working with serialized formats of named values. This is for cases where we want to +/// deserialize to a map where names are stored as map keys. But in serialized form the name may be +/// an inline field. +#[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)] +pub struct WithName { + pub name: String, + #[serde(flatten)] + pub value: T, +} + +impl WithName { + pub fn into_map(values: impl IntoIterator>) -> Map + where + Map: FromIterator<(String, T)>, + { + values + .into_iter() + .map(Self::into_name_value_pair) + .collect::() + } + + pub fn into_name_value_pair(self) -> (String, T) { + (self.name, self.value) + } + + pub fn named(name: impl ToString, value: T) -> Self { + WithName { + name: name.to_string(), + value, + } + } + + pub fn as_ref(&self) -> WithNameRef<'_, R> + where + T: AsRef, + { + WithNameRef::named(&self.name, self.value.as_ref()) + } +} + +impl From> for (String, T) { + fn from(value: WithName) -> Self { + value.into_name_value_pair() + } +} + +impl From<(String, T)> for WithName { + fn from((name, value): (String, T)) -> Self { + WithName::named(name, value) + } +} + +#[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct WithNameRef<'a, T> { + pub name: &'a str, + pub value: &'a T, +} + +impl<'a, T> WithNameRef<'a, T> { + pub fn named<'b>(name: &'b str, value: &'b T) -> WithNameRef<'b, T> { + WithNameRef { name, value } + } + + pub fn to_owned(&self) -> WithName + where + T: ToOwned, + { + WithName::named(self.name.to_owned(), self.value.to_owned()) + } +} + +impl<'a, T, R> From<&'a WithName> for WithNameRef<'a, R> +where + T: AsRef, +{ + fn from(value: &'a WithName) -> Self { + value.as_ref() + } +} diff --git a/crates/mongodb-agent-common/src/interface_types/mongo_config.rs b/crates/mongodb-agent-common/src/interface_types/mongo_config.rs index e5cdae13..b7323285 100644 --- a/crates/mongodb-agent-common/src/interface_types/mongo_config.rs +++ b/crates/mongodb-agent-common/src/interface_types/mongo_config.rs @@ -1,3 +1,5 @@ +use std::collections::BTreeMap; + use configuration::native_queries::NativeQuery; use mongodb::Client; @@ -8,5 +10,5 @@ pub struct MongoConfig { /// Name of the database to connect to pub database: String, - pub native_queries: Vec, + pub native_queries: BTreeMap, } diff --git a/crates/mongodb-agent-common/src/query/mod.rs b/crates/mongodb-agent-common/src/query/mod.rs index fc98d036..53a4bc92 100644 --- a/crates/mongodb-agent-common/src/query/mod.rs +++ b/crates/mongodb-agent-common/src/query/mod.rs @@ -36,10 +36,15 @@ pub async fn handle_query_request( let database = config.client.database(&config.database); let target = &query_request.target; - if let Some(native_query) = config.native_queries.iter().find(|query| { - let target_name = target.name(); - target_name.len() == 1 && target_name[0] == query.name - }) { + let target_name = { + let name = target.name(); + if name.len() == 1 { + Some(&name[0]) + } else { + None + } + }; + if let Some(native_query) = target_name.and_then(|name| config.native_queries.get(name)) { return handle_native_query_request(native_query.clone(), database).await; } diff --git a/crates/mongodb-connector/src/mutation.rs b/crates/mongodb-connector/src/mutation.rs index 2388d952..dde8a43a 100644 --- a/crates/mongodb-connector/src/mutation.rs +++ b/crates/mongodb-connector/src/mutation.rs @@ -56,13 +56,17 @@ fn look_up_procedures( .into_iter() .map(|operation| match operation { MutationOperation::Procedure { - name, arguments, .. + name: procedure_name, + arguments, + .. } => { let native_query = config .native_queries .iter() - .find(|native_query| native_query.name == name); - native_query.ok_or(name).map(|nq| Job::new(nq, arguments)) + .find(|(native_query_name, _)| *native_query_name == &procedure_name); + native_query + .ok_or(procedure_name) + .map(|(_, nq)| Job::new(nq, arguments)) } }) .partition_result(); diff --git a/crates/mongodb-connector/src/schema.rs b/crates/mongodb-connector/src/schema.rs index d5f265e8..fe00ed7e 100644 --- a/crates/mongodb-connector/src/schema.rs +++ b/crates/mongodb-connector/src/schema.rs @@ -18,14 +18,14 @@ pub async fn get_schema( let functions = config .native_queries .iter() - .filter(|q| q.mode == native_queries::Mode::ReadOnly) + .filter(|(_, q)| q.mode == native_queries::Mode::ReadOnly) .map(native_query_to_function) .collect(); let procedures = config .native_queries .iter() - .filter(|q| q.mode == native_queries::Mode::ReadWrite) + .filter(|(_, q)| q.mode == native_queries::Mode::ReadWrite) .map(native_query_to_procedure) .collect(); @@ -38,9 +38,11 @@ pub async fn get_schema( }) } -fn map_object_type(object_type: &schema::ObjectType) -> (String, models::ObjectType) { +fn map_object_type( + (name, object_type): (&String, &schema::ObjectType), +) -> (String, models::ObjectType) { ( - object_type.name.clone(), + name.clone(), models::ObjectType { fields: map_field_infos(&object_type.fields), description: object_type.description.clone(), @@ -48,15 +50,17 @@ fn map_object_type(object_type: &schema::ObjectType) -> (String, models::ObjectT ) } -fn map_field_infos(fields: &[schema::ObjectField]) -> BTreeMap { +fn map_field_infos( + fields: &BTreeMap, +) -> BTreeMap { fields .iter() - .map(|f| { + .map(|(name, field)| { ( - f.name.clone(), + name.clone(), models::ObjectField { - r#type: map_type(&f.r#type), - description: f.description.clone(), + r#type: map_type(&field.r#type), + description: field.description.clone(), }, ) }) @@ -78,9 +82,9 @@ fn map_type(t: &schema::Type) -> models::Type { } } -fn map_collection(collection: &schema::Collection) -> models::CollectionInfo { +fn map_collection((name, collection): (&String, &schema::Collection)) -> models::CollectionInfo { models::CollectionInfo { - name: collection.name.clone(), + name: name.clone(), collection_type: collection.r#type.clone(), description: collection.description.clone(), arguments: Default::default(), @@ -90,13 +94,13 @@ fn map_collection(collection: &schema::Collection) -> models::CollectionInfo { } /// For read-only native queries -fn native_query_to_function(query: &NativeQuery) -> models::FunctionInfo { +fn native_query_to_function((query_name, query): (&String, &NativeQuery)) -> models::FunctionInfo { let arguments = query .arguments .iter() - .map(|field| { + .map(|(name, field)| { ( - field.name.clone(), + name.clone(), models::ArgumentInfo { argument_type: map_type(&field.r#type), description: field.description.clone(), @@ -105,7 +109,7 @@ fn native_query_to_function(query: &NativeQuery) -> models::FunctionInfo { }) .collect(); models::FunctionInfo { - name: query.name.clone(), + name: query_name.clone(), description: query.description.clone(), arguments, result_type: map_type(&query.result_type), @@ -113,13 +117,15 @@ fn native_query_to_function(query: &NativeQuery) -> models::FunctionInfo { } /// For read-write native queries -fn native_query_to_procedure(query: &NativeQuery) -> models::ProcedureInfo { +fn native_query_to_procedure( + (query_name, query): (&String, &NativeQuery), +) -> models::ProcedureInfo { let arguments = query .arguments .iter() - .map(|field| { + .map(|(name, field)| { ( - field.name.clone(), + name.clone(), models::ArgumentInfo { argument_type: map_type(&field.r#type), description: field.description.clone(), @@ -128,7 +134,7 @@ fn native_query_to_procedure(query: &NativeQuery) -> models::ProcedureInfo { }) .collect(); models::ProcedureInfo { - name: query.name.clone(), + name: query_name.clone(), description: query.description.clone(), arguments, result_type: map_type(&query.result_type), diff --git a/fixtures/connector/chinook/native_queries/hello.yaml b/fixtures/connector/chinook/native_queries/hello.yaml index e7b7a575..4a027c8f 100644 --- a/fixtures/connector/chinook/native_queries/hello.yaml +++ b/fixtures/connector/chinook/native_queries/hello.yaml @@ -1,11 +1,11 @@ name: hello description: Example of a read-only native query objectTypes: - - name: HelloResult + HelloResult: fields: - - name: ok + ok: type: !scalar int - - name: readOnly + readOnly: type: !scalar bool # There are more fields but you get the idea resultType: !object HelloResult diff --git a/fixtures/connector/chinook/native_queries/insert_artist.yaml b/fixtures/connector/chinook/native_queries/insert_artist.yaml index d6803340..cb04258b 100644 --- a/fixtures/connector/chinook/native_queries/insert_artist.yaml +++ b/fixtures/connector/chinook/native_queries/insert_artist.yaml @@ -1,11 +1,11 @@ name: insertArtist description: Example of a database update using a native query objectTypes: - - name: InsertArtist + InsertArtist: fields: - - name: ok + ok: type: !scalar int - - name: n + n: type: !scalar int resultType: !object InsertArtist # TODO: implement arguments instead of hard-coding inputs diff --git a/fixtures/connector/chinook/schema.json b/fixtures/connector/chinook/schema.json index 4c7ee983..b2c96ec0 100644 --- a/fixtures/connector/chinook/schema.json +++ b/fixtures/connector/chinook/schema.json @@ -1,742 +1,555 @@ { - "collections": [ - { - "name": "Invoice", - "type": "Invoice", - "description": null - }, - { - "name": "Track", - "type": "Track", + "collections": { + "Album": { + "type": "Album", "description": null }, - { - "name": "MediaType", - "type": "MediaType", + "Artist": { + "type": "Artist", "description": null }, - { - "name": "InvoiceLine", - "type": "InvoiceLine", + "Customer": { + "type": "Customer", "description": null }, - { - "name": "Employee", + "Employee": { "type": "Employee", "description": null }, - { - "name": "PlaylistTrack", - "type": "PlaylistTrack", + "Genre": { + "type": "Genre", "description": null }, - { - "name": "Album", - "type": "Album", + "Invoice": { + "type": "Invoice", "description": null }, - { - "name": "Genre", - "type": "Genre", + "InvoiceLine": { + "type": "InvoiceLine", "description": null }, - { - "name": "Artist", - "type": "Artist", + "MediaType": { + "type": "MediaType", "description": null }, - { - "name": "Playlist", + "Playlist": { "type": "Playlist", "description": null }, - { - "name": "Customer", - "type": "Customer", + "PlaylistTrack": { + "type": "PlaylistTrack", + "description": null + }, + "Track": { + "type": "Track", "description": null } - ], - "objectTypes": [ - { - "name": "Invoice", - "fields": [ - { - "name": "_id", + }, + "objectTypes": { + "Album": { + "fields": { + "AlbumId": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "int" }, "description": null }, - { - "name": "BillingAddress", + "ArtistId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "BillingCity", + "Title": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "BillingCountry", + "_id": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "objectId" }, "description": null - }, - { - "name": "BillingPostalCode", + } + }, + "description": null + }, + "Artist": { + "fields": { + "ArtistId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "BillingState", + "Name": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "CustomerId", + "_id": { "type": { - "scalar": "int" + "scalar": "objectId" + }, + "description": null + } + }, + "description": null + }, + "Customer": { + "fields": { + "Address": { + "type": { + "scalar": "string" }, "description": null }, - { - "name": "InvoiceDate", + "City": { "type": { "scalar": "string" }, "description": null }, - { - "name": "InvoiceId", + "Company": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "Total", + "Country": { "type": { - "scalar": "double" + "scalar": "string" }, "description": null - } - ], - "description": "Object type for collection Invoice" - }, - { - "name": "Track", - "fields": [ - { - "name": "_id", + }, + "CustomerId": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "int" }, "description": null }, - { - "name": "AlbumId", + "Email": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "string" }, "description": null }, - { - "name": "Bytes", + "Fax": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "string" }, "description": null }, - { - "name": "Composer", + "FirstName": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "GenreId", + "LastName": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "string" }, "description": null }, - { - "name": "MediaTypeId", + "Phone": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "Milliseconds", + "PostalCode": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "Name", + "State": { "type": { "scalar": "string" }, "description": null }, - { - "name": "TrackId", + "SupportRepId": { "type": { "scalar": "int" }, "description": null }, - { - "name": "UnitPrice", + "_id": { "type": { - "scalar": "double" + "scalar": "objectId" }, "description": null } - ], - "description": "Object type for collection Track" + }, + "description": null }, - { - "name": "MediaType", - "fields": [ - { - "name": "_id", + "Employee": { + "fields": { + "Address": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "string" }, "description": null }, - { - "name": "MediaTypeId", + "BirthDate": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "Name", + "City": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null - } - ], - "description": "Object type for collection MediaType" - }, - { - "name": "InvoiceLine", - "fields": [ - { - "name": "_id", + }, + "Country": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "string" }, "description": null }, - { - "name": "InvoiceId", + "Email": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "InvoiceLineId", + "EmployeeId": { "type": { "scalar": "int" }, "description": null }, - { - "name": "Quantity", + "Fax": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "TrackId", + "FirstName": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "UnitPrice", + "HireDate": { "type": { - "scalar": "double" + "scalar": "string" }, "description": null - } - ], - "description": "Object type for collection InvoiceLine" - }, - { - "name": "Employee", - "fields": [ - { - "name": "_id", + }, + "LastName": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "string" }, "description": null }, - { - "name": "Address", + "Phone": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "BirthDate", + "PostalCode": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "City", + "ReportsTo": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "Country", + "State": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "Email", + "Title": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "EmployeeId", + "_id": { "type": { - "scalar": "int" + "scalar": "objectId" }, "description": null - }, - { - "name": "Fax", + } + }, + "description": null + }, + "Genre": { + "fields": { + "GenreId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "FirstName", + "Name": { "type": { "scalar": "string" }, "description": null }, - { - "name": "HireDate", + "_id": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "objectId" }, "description": null - }, - { - "name": "LastName", + } + }, + "description": null + }, + "Invoice": { + "fields": { + "BillingAddress": { "type": { "scalar": "string" }, "description": null }, - { - "name": "Phone", + "BillingCity": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "PostalCode", + "BillingCountry": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "ReportsTo", + "BillingPostalCode": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "State", + "BillingState": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "Title", + "CustomerId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null - } - ], - "description": "Object type for collection Employee" - }, - { - "name": "PlaylistTrack", - "fields": [ - { - "name": "_id", + }, + "InvoiceDate": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "string" }, "description": null }, - { - "name": "PlaylistId", + "InvoiceId": { "type": { "scalar": "int" }, "description": null }, - { - "name": "TrackId", + "Total": { "type": { - "scalar": "int" + "scalar": "double" }, "description": null - } - ], - "description": "Object type for collection PlaylistTrack" - }, - { - "name": "Album", - "fields": [ - { - "name": "_id", + }, + "_id": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "objectId" }, "description": null - }, - { - "name": "AlbumId", + } + }, + "description": null + }, + "InvoiceLine": { + "fields": { + "InvoiceId": { "type": { "scalar": "int" }, "description": null }, - { - "name": "ArtistId", + "InvoiceLineId": { "type": { "scalar": "int" }, "description": null }, - { - "name": "Title", + "Quantity": { "type": { - "scalar": "string" + "scalar": "int" }, "description": null - } - ], - "description": "Object type for collection Album" - }, - { - "name": "Genre", - "fields": [ - { - "name": "_id", + }, + "TrackId": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "int" }, "description": null }, - { - "name": "GenreId", + "UnitPrice": { "type": { - "scalar": "int" + "scalar": "double" }, "description": null }, - { - "name": "Name", + "_id": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "objectId" }, "description": null } - ], - "description": "Object type for collection Genre" + }, + "description": null }, - { - "name": "Artist", - "fields": [ - { - "name": "_id", + "MediaType": { + "fields": { + "MediaTypeId": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "int" }, "description": null }, - { - "name": "ArtistId", + "Name": { "type": { - "scalar": "int" + "scalar": "string" }, "description": null }, - { - "name": "Name", + "_id": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "objectId" }, "description": null } - ], - "description": "Object type for collection Artist" + }, + "description": null }, - { - "name": "Playlist", - "fields": [ - { - "name": "_id", + "Playlist": { + "fields": { + "Name": { "type": { - "nullable": { - "scalar": "objectId" - } + "scalar": "string" }, "description": null }, - { - "name": "Name", + "PlaylistId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "PlaylistId", + "_id": { "type": { - "scalar": "int" + "scalar": "objectId" }, "description": null } - ], - "description": "Object type for collection Playlist" + }, + "description": null }, - { - "name": "Customer", - "fields": [ - { - "name": "_id", - "type": { - "nullable": { - "scalar": "objectId" - } - }, - "description": null - }, - { - "name": "Address", + "PlaylistTrack": { + "fields": { + "PlaylistId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "City", + "TrackId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "Company", + "_id": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "objectId" }, "description": null - }, - { - "name": "Country", + } + }, + "description": null + }, + "Track": { + "fields": { + "AlbumId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "CustomerId", + "Bytes": { "type": { "scalar": "int" }, "description": null }, - { - "name": "Email", + "Composer": { "type": { "scalar": "string" }, "description": null }, - { - "name": "Fax", + "GenreId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "FirstName", + "MediaTypeId": { "type": { - "scalar": "string" + "scalar": "int" }, "description": null }, - { - "name": "LastName", + "Milliseconds": { "type": { - "scalar": "string" + "scalar": "int" }, "description": null }, - { - "name": "Phone", + "Name": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "string" }, "description": null }, - { - "name": "PostalCode", + "TrackId": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "int" }, "description": null }, - { - "name": "State", + "UnitPrice": { "type": { - "nullable": { - "scalar": "string" - } + "scalar": "double" }, "description": null }, - { - "name": "SupportRepId", + "_id": { "type": { - "nullable": { - "scalar": "int" - } + "scalar": "objectId" }, "description": null } - ], - "description": "Object type for collection Customer" + }, + "description": null } - ] + } } \ No newline at end of file