diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index 9de14f14..b955295c 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -55,7 +55,7 @@ jobs: strategy: fail-fast: false matrix: - crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer] + crate: [canyon_connection, canyon_crud, canyon_macros, canyon_migrations] steps: - uses: actions/checkout@v3 diff --git a/Cargo.toml b/Cargo.toml index a0bba641..dcfb553f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,8 @@ description.workspace = true members = [ "canyon_connection", "canyon_crud", - "canyon_observer", + "canyon_entities", + "canyon_migrations", "canyon_macros", "tests" @@ -23,7 +24,8 @@ members = [ # Project crates canyon_connection = { workspace = true, path = "canyon_connection" } canyon_crud = { workspace = true, path = "canyon_crud" } -canyon_observer = { workspace = true, path = "canyon_observer" } +canyon_entities = { workspace = true, path = "canyon_entities" } +canyon_migrations = { workspace = true, path = "canyon_migrations", optional = true } canyon_macros = { workspace = true, path = "canyon_macros" } # To be marked as opt deps @@ -33,7 +35,8 @@ tiberius = { workspace = true, optional = true } [workspace.dependencies] canyon_crud = { version = "0.3.1", path = "canyon_crud" } canyon_connection = { version = "0.3.1", path = "canyon_connection" } -canyon_observer = { version = "0.3.1", path = "canyon_observer" } +canyon_entities = { version = "0.3.1", path = "canyon_entities" } +canyon_migrations = { version = "0.3.1", path = "canyon_migrations"} canyon_macros = { version = "0.3.1", path = "canyon_macros" } tokio = { version = "1.27.0", features = ["full"] } @@ -52,6 +55,7 @@ toml = "0.7.3" async-trait = "0.1.68" walkdir = "2.3.3" regex = "1.5" +partialdebug = "0.2.0" quote = "1.0.9" proc-macro2 = "1.0.27" @@ -59,7 +63,7 @@ proc-macro2 = "1.0.27" [workspace.package] version = "0.3.1" edition = "2021" -authors = ["Alex Vergara, Gonzalo Busto"] +authors = ["Alex Vergara, Gonzalo Busto Musi"] documentation = "https://zerodaycode.github.io/canyon-book/" homepage = "https://github.com/zerodaycode/Canyon-SQL" readme = "README.md" @@ -67,5 +71,6 @@ license = "MIT" description = "A Rust ORM and QueryBuilder" [features] -postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres", "canyon_macros/postgres"] -mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql", "canyon_macros/mssql"] +postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres", "canyon_migrations/postgres", "canyon_macros/postgres"] +mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql", "canyon_migrations/mssql", "canyon_macros/mssql"] +migrations = ["canyon_migrations", "canyon_macros/migrations"] diff --git a/bash_aliases.sh b/bash_aliases.sh index 64e2d931..aee09cd7 100644 --- a/bash_aliases.sh +++ b/bash_aliases.sh @@ -39,7 +39,7 @@ alias SqlServerInitializationLinux='cargo test initialize_sql_server_docker_inst # Publish Canyon-SQL to the registry with its dependencies -alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_observer && cargo publish -p canyon_macros && cargo publish -p canyon_sql_root' +alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_migrations && cargo publish -p canyon_macros && cargo publish -p canyon_sql_root' # Collects the code coverage for the project (tests must run before this) alias CcEnvVars='export CARGO_INCREMENTAL=0 diff --git a/canyon_entities/Cargo.toml b/canyon_entities/Cargo.toml new file mode 100644 index 00000000..374e2e98 --- /dev/null +++ b/canyon_entities/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "canyon_entities" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true + +[dependencies] +regex = { workspace = true } +partialdebug = { workspace = true } +quote = { workspace = true } +proc-macro2 = { workspace = true } +syn = { version = "1.0.86", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade diff --git a/canyon_observer/src/manager/entity.rs b/canyon_entities/src/entity.rs similarity index 98% rename from canyon_observer/src/manager/entity.rs rename to canyon_entities/src/entity.rs index 7aaeb38e..8604d0e8 100644 --- a/canyon_observer/src/manager/entity.rs +++ b/canyon_entities/src/entity.rs @@ -10,7 +10,7 @@ use syn::{ use super::entity_fields::EntityField; /// Provides a convenient way of handling the data on any -/// `CanyonEntity` struct anntotaded with the macro `#[canyon_entity]` +/// `CanyonEntity` struct annotated with the macro `#[canyon_entity]` #[derive(PartialDebug, Clone)] pub struct CanyonEntity { pub struct_name: Ident, diff --git a/canyon_observer/src/manager/entity_fields.rs b/canyon_entities/src/entity_fields.rs similarity index 100% rename from canyon_observer/src/manager/entity_fields.rs rename to canyon_entities/src/entity_fields.rs diff --git a/canyon_observer/src/manager/field_annotation.rs b/canyon_entities/src/field_annotation.rs similarity index 100% rename from canyon_observer/src/manager/field_annotation.rs rename to canyon_entities/src/field_annotation.rs diff --git a/canyon_entities/src/lib.rs b/canyon_entities/src/lib.rs new file mode 100644 index 00000000..8b3abd6c --- /dev/null +++ b/canyon_entities/src/lib.rs @@ -0,0 +1,11 @@ +use crate::register_types::CanyonRegisterEntity; +use std::sync::Mutex; + +pub mod entity; +pub mod entity_fields; +pub mod field_annotation; +pub mod manager_builder; +pub mod register_types; + +pub static CANYON_REGISTER_ENTITIES: Mutex>> = + Mutex::new(Vec::new()); diff --git a/canyon_observer/src/manager/manager_builder.rs b/canyon_entities/src/manager_builder.rs similarity index 100% rename from canyon_observer/src/manager/manager_builder.rs rename to canyon_entities/src/manager_builder.rs diff --git a/canyon_entities/src/register_types.rs b/canyon_entities/src/register_types.rs new file mode 100644 index 00000000..45cd1b8d --- /dev/null +++ b/canyon_entities/src/register_types.rs @@ -0,0 +1,45 @@ +/// This file contains `Rust` types that represents an entry on the `CanyonRegister` +/// where `Canyon` tracks the user types that has to manage + +pub const NUMERIC_PK_DATATYPE: [&str; 6] = ["i16", "u16", "i32", "u32", "i64", "u64"]; + +/// Gets the necessary identifiers of a CanyonEntity to make it the comparative +/// against the database schemas +#[derive(Debug, Clone, Default)] +pub struct CanyonRegisterEntity<'a> { + pub entity_name: &'a str, + pub entity_db_table_name: &'a str, + pub user_schema_name: Option<&'a str>, + pub entity_fields: Vec, +} + +/// Complementary type for a field that represents a struct field that maps +/// some real database column data +#[derive(Debug, Clone, Default)] +pub struct CanyonRegisterEntityField { + pub field_name: String, + pub field_type: String, + pub annotations: Vec, +} + +impl CanyonRegisterEntityField { + /// Return if the field is autoincremental + pub fn is_autoincremental(&self) -> bool { + let has_pk_annotation = self + .annotations + .iter() + .find(|a| a.starts_with("Annotation: PrimaryKey")); + + let pk_is_autoincremental = match has_pk_annotation { + Some(annotation) => annotation.contains("true"), + None => false, + }; + + NUMERIC_PK_DATATYPE.contains(&self.field_type.as_str()) && pk_is_autoincremental + } + + /// Return the nullability of a the field + pub fn is_nullable(&self) -> bool { + self.field_type.to_uppercase().starts_with("OPTION") + } +} diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index 82d336f5..763fde8d 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -19,10 +19,12 @@ proc-macro2 = { workspace = true } futures = { workspace = true } tokio = { workspace = true } -canyon_observer = { workspace = true } -canyon_crud = { workspace = true } canyon_connection = { workspace = true } +canyon_crud = { workspace = true } +canyon_entities = { workspace = true } +canyon_migrations = { workspace = true, optional = true } [features] -postgres = ["canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres"] -mssql = ["canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql"] +postgres = ["canyon_connection/postgres", "canyon_crud/postgres", "canyon_migrations/postgres"] +mssql = ["canyon_connection/mssql", "canyon_crud/mssql", "canyon_migrations/mssql"] +migrations = ["canyon_migrations"] diff --git a/canyon_macros/src/canyon_macro.rs b/canyon_macros/src/canyon_macro.rs index 1424de92..48c89fcc 100644 --- a/canyon_macros/src/canyon_macro.rs +++ b/canyon_macros/src/canyon_macro.rs @@ -1,112 +1,32 @@ //! Provides helpers to build the `#[canyon_macros::canyon]` procedural like attribute macro -use proc_macro::TokenStream as TokenStream1; -use proc_macro2::{Ident, TokenStream}; - +use canyon_connection::CANYON_TOKIO_RUNTIME; +use canyon_migrations::migrations::handler::Migrations; +use canyon_migrations::{CM_QUERIES_TO_EXECUTE, QUERIES_TO_EXECUTE}; +use proc_macro2::TokenStream; use quote::quote; -use canyon_observer::{CM_QUERIES_TO_EXECUTE, QUERIES_TO_EXECUTE}; -use syn::{Lit, NestedMeta}; - -#[derive(Debug)] -/// Utilery struct for wrapping the content and result of parsing the attributes on the `canyon` macro -pub struct CanyonMacroAttributes { - pub allowed_migrations: bool, - pub error: Option, -} - -/// Parses the [`syn::NestedMeta::Meta`] or [`syn::NestedMeta::Lit`] attached to the `canyon` macro -pub fn parse_canyon_macro_attributes(_meta: &Vec) -> CanyonMacroAttributes { - let mut res = CanyonMacroAttributes { - allowed_migrations: false, - error: None, - }; - - for nested_meta in _meta { - match nested_meta { - syn::NestedMeta::Meta(m) => determine_allowed_attributes(m, &mut res), - syn::NestedMeta::Lit(lit) => match lit { - syn::Lit::Str(ref l) => { - res.error = Some(report_literals_not_allowed(&l.value(), lit)) - } - syn::Lit::ByteStr(ref l) => { - res.error = Some(report_literals_not_allowed( - &String::from_utf8_lossy(&l.value()), - lit, - )) - } - syn::Lit::Byte(ref l) => { - res.error = Some(report_literals_not_allowed(&l.value().to_string(), lit)) - } - syn::Lit::Char(ref l) => { - res.error = Some(report_literals_not_allowed(&l.value().to_string(), lit)) - } - syn::Lit::Int(ref l) => { - res.error = Some(report_literals_not_allowed(&l.to_string(), lit)) - } - syn::Lit::Float(ref l) => { - res.error = Some(report_literals_not_allowed(&l.to_string(), lit)) - } - syn::Lit::Bool(ref l) => { - res.error = Some(report_literals_not_allowed(&l.value().to_string(), lit)) - } - syn::Lit::Verbatim(ref l) => { - res.error = Some(report_literals_not_allowed(&l.to_string(), lit)) - } - }, - } - } - - res -} - -/// Determines whenever a [`syn::NestedMeta::Meta`] it's classified as a valid argument of the `canyon` macro -fn determine_allowed_attributes(meta: &syn::Meta, cma: &mut CanyonMacroAttributes) { - const ALLOWED_ATTRS: [&str; 1] = ["enable_migrations"]; - - let attr_ident = meta.path().get_ident().unwrap(); - let attr_ident_str = attr_ident.to_string(); - - if attr_ident_str.as_str() == "enable_migrations" { - cma.allowed_migrations = true; - } else { - let error = syn::Error::new_spanned( - Ident::new(&attr_ident_str, attr_ident.span()), - format!( - "No `{attr_ident_str}` arguments allowed in the `Canyon` macro attributes.\n\ - Allowed ones are: {ALLOWED_ATTRS:?}" - ), - ) - .into_compile_error(); - cma.error = Some( - quote! { - #error - fn main() {} - } - .into(), - ) - } -} - -/// Creates a custom error for report not allowed literals on the attribute -/// args of the `canyon` proc macro -fn report_literals_not_allowed(ident: &str, s: &Lit) -> TokenStream1 { - let error = syn::Error::new_spanned( - Ident::new(ident, s.span()), - "No literals allowed in the `Canyon` macro", - ) - .into_compile_error(); +#[cfg(feature = "migrations")] +pub fn main_with_queries() -> TokenStream { + CANYON_TOKIO_RUNTIME.block_on(async { + canyon_connection::init_connections_cache().await; + Migrations::migrate().await; + }); + // The queries to execute at runtime in the managed state + let mut queries_tokens: Vec = Vec::new(); + wire_queries_to_execute(&mut queries_tokens); quote! { - #error - fn main() {} + { + #(#queries_tokens)* + } } - .into() } /// Creates a TokenScream that is used to load the data generated at compile-time /// by the `CanyonManaged` macros again on the queries register -pub fn wire_queries_to_execute(canyon_manager_tokens: &mut Vec) { +#[cfg(feature = "migrations")] +fn wire_queries_to_execute(canyon_manager_tokens: &mut Vec) { let cm_data = CM_QUERIES_TO_EXECUTE.lock().unwrap(); let data = QUERIES_TO_EXECUTE.lock().unwrap(); diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index ce03cc58..160f6ece 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -1,17 +1,20 @@ extern crate proc_macro; mod canyon_entity_macro; +#[cfg(feature = "migrations")] mod canyon_macro; mod query_operations; mod utils; -use canyon_connection::CANYON_TOKIO_RUNTIME; use canyon_entity_macro::parse_canyon_entity_proc_macro_attr; use proc_macro::TokenStream as CompilerTokenStream; use proc_macro2::{Ident, TokenStream}; use quote::{quote, ToTokens}; use syn::{DeriveInput, Fields, Type, Visibility}; +#[cfg(feature = "migrations")] +use canyon_macro::main_with_queries; + use query_operations::{ delete::{generate_delete_query_tokens, generate_delete_tokens}, insert::{generate_insert_tokens, generate_multiple_insert_tokens}, @@ -22,22 +25,14 @@ use query_operations::{ }, update::{generate_update_query_tokens, generate_update_tokens}, }; - -use canyon_macro::{parse_canyon_macro_attributes, wire_queries_to_execute}; use utils::{function_parser::FunctionParser, helpers, macro_tokens::MacroTokens}; -use canyon_observer::{ - manager::{ - entity::CanyonEntity, - manager_builder::{ - generate_enum_with_fields, generate_enum_with_fields_values, generate_user_struct, - }, +use canyon_entities::{ + entity::CanyonEntity, + manager_builder::{ + generate_enum_with_fields, generate_enum_with_fields_values, generate_user_struct, }, - migrations::handler::Migrations, -}; - -use canyon_observer::{ - migrations::register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}, + register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}, CANYON_REGISTER_ENTITIES, }; @@ -51,15 +46,6 @@ use canyon_observer::{ /// the necessary operations for the migrations #[proc_macro_attribute] pub fn main(_meta: CompilerTokenStream, input: CompilerTokenStream) -> CompilerTokenStream { - let attrs = syn::parse_macro_input!(_meta as syn::AttributeArgs); - - // Parses the attributes declared in the arguments of this proc macro - let attrs_parse_result = parse_canyon_macro_attributes(&attrs); - if attrs_parse_result.error.is_some() { - return attrs_parse_result.error.unwrap(); - } - - // Parses the function items that this attribute is attached to let func_res = syn::parse::(input); if func_res.is_err() { return quote! { fn main() {} }.into(); @@ -70,46 +56,27 @@ pub fn main(_meta: CompilerTokenStream, input: CompilerTokenStream) -> CompilerT let sign = func.sig; let body = func.block.stmts; - if attrs_parse_result.allowed_migrations { - CANYON_TOKIO_RUNTIME.block_on(async { - canyon_connection::init_connections_cache().await; - Migrations::migrate().await; - }); - - // The queries to execute at runtime in the managed state - let mut queries_tokens: Vec = Vec::new(); - wire_queries_to_execute(&mut queries_tokens); + #[allow(unused_mut, unused_assignments)] + let mut migrations_tokens = quote! {}; + #[cfg(feature = "migrations")] + { + migrations_tokens = main_with_queries(); + } - // The final code wired in main() - quote! { - #sign { - canyon_sql::runtime::CANYON_TOKIO_RUNTIME - .handle() - .block_on( async { - canyon_sql::runtime::init_connections_cache().await; - { - #(#queries_tokens)* - } - #(#body)* - } - ) - } - } - .into() - } else { - quote! { - #sign { - canyon_sql::runtime::CANYON_TOKIO_RUNTIME + // The final code wired in main() + quote! { + #sign { + canyon_sql::runtime::CANYON_TOKIO_RUNTIME .handle() .block_on( async { - canyon_sql::runtime::init_connections_cache().await; - #(#body)* - } - ) - } + canyon_sql::runtime::init_connections_cache().await; + #migrations_tokens + #(#body)* + } + ) } - .into() } + .into() } #[proc_macro_attribute] diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index 0f70ab4d..5a5a4e15 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -1,4 +1,4 @@ -use canyon_observer::manager::field_annotation::EntityFieldAnnotation; +use canyon_entities::field_annotation::EntityFieldAnnotation; use proc_macro2::TokenStream; use quote::quote; diff --git a/canyon_macros/src/utils/macro_tokens.rs b/canyon_macros/src/utils/macro_tokens.rs index 370fbeea..29de0467 100644 --- a/canyon_macros/src/utils/macro_tokens.rs +++ b/canyon_macros/src/utils/macro_tokens.rs @@ -1,6 +1,6 @@ use std::convert::TryFrom; -use canyon_observer::manager::field_annotation::EntityFieldAnnotation; +use canyon_entities::field_annotation::EntityFieldAnnotation; use proc_macro2::Ident; use syn::{Attribute, DeriveInput, Fields, Generics, Type, Visibility}; diff --git a/canyon_observer/Cargo.toml b/canyon_migrations/Cargo.toml similarity index 89% rename from canyon_observer/Cargo.toml rename to canyon_migrations/Cargo.toml index 0f939b2c..ba353b76 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_migrations/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "canyon_observer" +name = "canyon_migrations" version.workspace = true edition.workspace = true authors.workspace = true @@ -12,14 +12,17 @@ description.workspace = true [dependencies] canyon_crud = { workspace = true } canyon_connection = { workspace = true } +canyon_entities = { workspace = true } + tokio = { workspace = true } tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } + async-trait = { workspace = true } regex = { workspace = true } +partialdebug = { workspace = true } walkdir = { workspace = true } -partialdebug = "0.2.0" proc-macro2 = { workspace = true } quote = { workspace = true } syn = { version = "1.0.86", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade diff --git a/canyon_observer/src/constants.rs b/canyon_migrations/src/constants.rs similarity index 99% rename from canyon_observer/src/constants.rs rename to canyon_migrations/src/constants.rs index 3928da4f..9f025762 100644 --- a/canyon_observer/src/constants.rs +++ b/canyon_migrations/src/constants.rs @@ -1,5 +1,3 @@ -pub const NUMERIC_PK_DATATYPE: [&str; 6] = ["i16", "u16", "i32", "u32", "i64", "u64"]; - #[cfg(feature = "postgres")] pub mod postgresql_queries { pub static CANYON_MEMORY_TABLE: &str = "CREATE TABLE IF NOT EXISTS canyon_memory ( diff --git a/canyon_observer/src/lib.rs b/canyon_migrations/src/lib.rs similarity index 87% rename from canyon_observer/src/lib.rs rename to canyon_migrations/src/lib.rs index 41e0dd42..5743cc8b 100644 --- a/canyon_observer/src/lib.rs +++ b/canyon_migrations/src/lib.rs @@ -13,16 +13,13 @@ pub mod migrations; extern crate canyon_connection; extern crate canyon_crud; +extern crate canyon_entities; mod constants; -pub mod manager; -use crate::migrations::register_types::CanyonRegisterEntity; use canyon_connection::lazy_static::lazy_static; use std::{collections::HashMap, sync::Mutex}; -pub static CANYON_REGISTER_ENTITIES: Mutex>> = - Mutex::new(Vec::new()); lazy_static! { pub static ref QUERIES_TO_EXECUTE: Mutex>> = Mutex::new(HashMap::new()); diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_migrations/src/migrations/handler.rs similarity index 99% rename from canyon_observer/src/migrations/handler.rs rename to canyon_migrations/src/migrations/handler.rs index 9ce3c4e8..24dfb1c4 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_migrations/src/migrations/handler.rs @@ -1,5 +1,6 @@ use canyon_connection::{datasources::Migrations as MigrationsStatus, DATASOURCES}; use canyon_crud::rows::CanyonRows; +use canyon_entities::CANYON_REGISTER_ENTITIES; use partialdebug::placeholder::PartialDebug; use crate::{ @@ -14,7 +15,6 @@ use crate::{ memory::CanyonMemory, processor::MigrationsProcessor, }, - CANYON_REGISTER_ENTITIES, }; #[derive(PartialDebug)] diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_migrations/src/migrations/information_schema.rs similarity index 100% rename from canyon_observer/src/migrations/information_schema.rs rename to canyon_migrations/src/migrations/information_schema.rs diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_migrations/src/migrations/memory.rs similarity index 99% rename from canyon_observer/src/migrations/memory.rs rename to canyon_migrations/src/migrations/memory.rs index 18f6eb31..1d822fd1 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_migrations/src/migrations/memory.rs @@ -5,7 +5,7 @@ use std::collections::HashMap; use std::fs; use walkdir::WalkDir; -use super::register_types::CanyonRegisterEntity; +use canyon_entities::register_types::CanyonRegisterEntity; /// Convenient struct that contains the necessary data and operations to implement /// the `Canyon Memory`. diff --git a/canyon_observer/src/migrations/mod.rs b/canyon_migrations/src/migrations/mod.rs similarity index 76% rename from canyon_observer/src/migrations/mod.rs rename to canyon_migrations/src/migrations/mod.rs index 525cbc10..1b139fdd 100644 --- a/canyon_observer/src/migrations/mod.rs +++ b/canyon_migrations/src/migrations/mod.rs @@ -2,4 +2,4 @@ pub mod handler; pub mod information_schema; pub mod memory; pub mod processor; -pub mod register_types; +pub mod transforms; diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_migrations/src/migrations/processor.rs similarity index 97% rename from canyon_observer/src/migrations/processor.rs rename to canyon_migrations/src/migrations/processor.rs index b096b828..425c1b0d 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_migrations/src/migrations/processor.rs @@ -13,7 +13,11 @@ use crate::save_migrations_query_to_execute; use super::information_schema::{ColumnMetadata, TableMetadata}; use super::memory::CanyonMemory; -use super::register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}; +#[cfg(feature = "postgres")] +use crate::migrations::transforms::{to_postgres_alter_syntax, to_postgres_syntax}; +#[cfg(feature = "mssql")] +use crate::migrations::transforms::{to_sqlserver_alter_syntax, to_sqlserver_syntax}; +use canyon_entities::register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}; /// Responsible of generating the queries to sync the database status with the /// Rust source code managed by Canyon, for successfully make the migrations @@ -661,9 +665,7 @@ impl MigrationsHelper { #[cfg(feature = "postgres")] { if db_type == DatabaseType::PostgreSql { - return canyon_register_entity_field - .to_postgres_alter_syntax() - .to_lowercase() + return to_postgres_alter_syntax(canyon_register_entity_field).to_lowercase() == current_column_metadata.datatype; } } @@ -671,9 +673,7 @@ impl MigrationsHelper { { if db_type == DatabaseType::SqlServer { // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") - return canyon_register_entity_field - .to_sqlserver_alter_syntax() - .to_lowercase() + return to_sqlserver_alter_syntax(canyon_register_entity_field).to_lowercase() == current_column_metadata.datatype; } } @@ -786,7 +786,7 @@ impl DatabaseOperation for TableOperation { .map(|entity_field| format!( "\"{}\" {}", entity_field.field_name, - entity_field.to_postgres_syntax() + to_postgres_syntax(entity_field) )) .collect::>() .join(", ") @@ -801,7 +801,7 @@ impl DatabaseOperation for TableOperation { .map(|entity_field| format!( "{} {}", entity_field.field_name, - entity_field.to_sqlserver_syntax() + to_sqlserver_syntax(entity_field) )) .collect::>() .join(", ") @@ -924,14 +924,14 @@ impl DatabaseOperation for ColumnOperation { "ALTER TABLE \"{}\" ADD COLUMN \"{}\" {};", table_name, entity_field.field_name, - entity_field.to_postgres_syntax() + to_postgres_syntax(entity_field) ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( "ALTER TABLE {} ADD \"{}\" {};", table_name, entity_field.field_name, - entity_field.to_sqlserver_syntax() + to_sqlserver_syntax(entity_field) ) } ColumnOperation::DeleteColumn(table_name, column_name) => { @@ -943,7 +943,7 @@ impl DatabaseOperation for ColumnOperation { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "ALTER TABLE \"{_table_name}\" ALTER COLUMN \"{}\" TYPE {};", - _entity_field.field_name, _entity_field.to_postgres_alter_syntax() + _entity_field.field_name, to_postgres_alter_syntax(_entity_field) ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") @@ -955,7 +955,7 @@ impl DatabaseOperation for ColumnOperation { #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", - entity_field.field_name, entity_field.to_sqlserver_alter_syntax() + entity_field.field_name, to_sqlserver_alter_syntax(entity_field) ) } #[cfg(feature = "mssql")] ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => @@ -981,7 +981,7 @@ impl DatabaseOperation for ColumnOperation { #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NOT NULL", entity_field.field_name, - entity_field.to_sqlserver_alter_syntax() + to_sqlserver_alter_syntax(entity_field) ) } } diff --git a/canyon_migrations/src/migrations/transforms.rs b/canyon_migrations/src/migrations/transforms.rs new file mode 100644 index 00000000..6d14e478 --- /dev/null +++ b/canyon_migrations/src/migrations/transforms.rs @@ -0,0 +1,179 @@ +#[cfg(feature = "postgres")] +use crate::constants::postgresql_type; +#[cfg(feature = "mssql")] +use crate::constants::sqlserver_type; +use crate::constants::{regex_patterns, rust_type}; + +use canyon_entities::register_types::CanyonRegisterEntityField; +use regex::Regex; + +/// Return the postgres datatype and parameters to create a column for a given rust type +#[cfg(feature = "postgres")] +pub fn to_postgres_syntax(field: &CanyonRegisterEntityField) -> String { + let rust_type_clean = field.field_type.replace(' ', ""); + + match rust_type_clean.as_str() { + rust_type::I8 | rust_type::U8 => { + String::from(&format!("{} NOT NULL", postgresql_type::INTEGER)) + } + rust_type::OPT_I8 | rust_type::OPT_U8 => String::from(postgresql_type::INTEGER), + + rust_type::I16 | rust_type::U16 => { + String::from(&format!("{} NOT NULL", postgresql_type::INTEGER)) + } + rust_type::OPT_I16 | rust_type::OPT_U16 => String::from(postgresql_type::INTEGER), + + rust_type::I32 | rust_type::U32 => { + String::from(&format!("{} NOT NULL", postgresql_type::INTEGER)) + } + rust_type::OPT_I32 | rust_type::OPT_U32 => String::from(postgresql_type::INTEGER), + + rust_type::I64 | rust_type::U64 => { + String::from(&format!("{} NOT NULL", postgresql_type::BIGINT)) + } + rust_type::OPT_I64 | rust_type::OPT_U64 => String::from(postgresql_type::BIGINT), + + rust_type::STRING => String::from(&format!("{} NOT NULL", postgresql_type::TEXT)), + rust_type::OPT_STRING => String::from(postgresql_type::TEXT), + + rust_type::BOOL => String::from(&format!("{} NOT NULL", postgresql_type::BOOLEAN)), + rust_type::OPT_BOOL => String::from(postgresql_type::BOOLEAN), + + rust_type::NAIVE_DATE => String::from(&format!("{} NOT NULL", postgresql_type::DATE)), + rust_type::OPT_NAIVE_DATE => String::from(postgresql_type::DATE), + + rust_type::NAIVE_TIME => String::from(&format!("{} NOT NULL", postgresql_type::TIME)), + rust_type::OPT_NAIVE_TIME => String::from(postgresql_type::TIME), + + rust_type::NAIVE_DATE_TIME => { + String::from(&format!("{} NOT NULL", postgresql_type::DATETIME)) + } + rust_type::OPT_NAIVE_DATE_TIME => String::from(postgresql_type::DATETIME), + &_ => todo!("Not supported datatype for this migrations version"), + } +} + +/// Return the postgres datatype and parameters to create a column for a given rust type +/// for Microsoft SQL Server +#[cfg(feature = "mssql")] +pub fn to_sqlserver_syntax(field: &CanyonRegisterEntityField) -> String { + let rust_type_clean = field.field_type.replace(' ', ""); + + match rust_type_clean.as_str() { + rust_type::I8 | rust_type::U8 => String::from(&format!("{} NOT NULL", sqlserver_type::INT)), + rust_type::OPT_I8 | rust_type::OPT_U8 => String::from(sqlserver_type::INT), + + rust_type::I16 | rust_type::U16 => { + String::from(&format!("{} NOT NULL", sqlserver_type::INT)) + } + rust_type::OPT_I16 | rust_type::OPT_U16 => String::from(sqlserver_type::INT), + + rust_type::I32 | rust_type::U32 => { + String::from(&format!("{} NOT NULL", sqlserver_type::INT)) + } + rust_type::OPT_I32 | rust_type::OPT_U32 => String::from(sqlserver_type::INT), + + rust_type::I64 | rust_type::U64 => { + String::from(&format!("{} NOT NULL", sqlserver_type::BIGINT)) + } + rust_type::OPT_I64 | rust_type::OPT_U64 => String::from(sqlserver_type::BIGINT), + + rust_type::STRING => { + String::from(&format!("{} NOT NULL DEFAULT ''", sqlserver_type::NVARCHAR)) + } + rust_type::OPT_STRING => String::from(sqlserver_type::NVARCHAR), + + rust_type::BOOL => String::from(&format!("{} NOT NULL", sqlserver_type::BIT)), + rust_type::OPT_BOOL => String::from(sqlserver_type::BIT), + + rust_type::NAIVE_DATE => String::from(&format!("{} NOT NULL", sqlserver_type::DATE)), + rust_type::OPT_NAIVE_DATE => String::from(sqlserver_type::DATE), + + rust_type::NAIVE_TIME => String::from(&format!("{} NOT NULL", sqlserver_type::TIME)), + rust_type::OPT_NAIVE_TIME => String::from(sqlserver_type::TIME), + + rust_type::NAIVE_DATE_TIME => { + String::from(&format!("{} NOT NULL", sqlserver_type::DATETIME)) + } + rust_type::OPT_NAIVE_DATE_TIME => String::from(sqlserver_type::DATETIME), + &_ => todo!("Not supported datatype for this migrations version"), + } +} + +#[cfg(feature = "postgres")] +pub fn to_postgres_alter_syntax(field: &CanyonRegisterEntityField) -> String { + let mut rust_type_clean = field.field_type.replace(' ', ""); + let rs_type_is_optional = field.field_type.to_uppercase().starts_with("OPTION"); + + if rs_type_is_optional { + let type_regex = Regex::new(regex_patterns::EXTRACT_RUST_OPT_REGEX).unwrap(); + let capture_rust_type = type_regex.captures(rust_type_clean.as_str()).unwrap(); + rust_type_clean = capture_rust_type + .name("rust_type") + .unwrap() + .as_str() + .to_string(); + } + + match rust_type_clean.as_str() { + rust_type::I8 | rust_type::U8 | rust_type::OPT_I8 | rust_type::OPT_U8 => { + String::from(postgresql_type::INT_8) + } + rust_type::I16 | rust_type::U16 | rust_type::OPT_I16 | rust_type::OPT_U16 => { + String::from(postgresql_type::SMALL_INT) + } + rust_type::I32 | rust_type::U32 | rust_type::OPT_I32 | rust_type::OPT_U32 => { + String::from(postgresql_type::INTEGER) + } + rust_type::I64 | rust_type::U64 | rust_type::OPT_I64 | rust_type::OPT_U64 => { + String::from(postgresql_type::BIGINT) + } + rust_type::STRING | rust_type::OPT_STRING => String::from(postgresql_type::TEXT), + rust_type::BOOL | rust_type::OPT_BOOL => String::from(postgresql_type::BOOLEAN), + rust_type::NAIVE_DATE | rust_type::OPT_NAIVE_DATE => String::from(postgresql_type::DATE), + rust_type::NAIVE_TIME | rust_type::OPT_NAIVE_TIME => String::from(postgresql_type::TIME), + rust_type::NAIVE_DATE_TIME | rust_type::OPT_NAIVE_DATE_TIME => { + String::from(postgresql_type::DATETIME) + } + &_ => todo!("Not supported datatype for this migrations version"), + } +} + +#[cfg(feature = "mssql")] +pub fn to_sqlserver_alter_syntax(field: &CanyonRegisterEntityField) -> String { + let mut rust_type_clean = field.field_type.replace(' ', ""); + let rs_type_is_optional = field.field_type.to_uppercase().starts_with("OPTION"); + + if rs_type_is_optional { + let type_regex = Regex::new(regex_patterns::EXTRACT_RUST_OPT_REGEX).unwrap(); + let capture_rust_type = type_regex.captures(rust_type_clean.as_str()).unwrap(); + rust_type_clean = capture_rust_type + .name("rust_type") + .unwrap() + .as_str() + .to_string(); + } + + match rust_type_clean.as_str() { + rust_type::I8 | rust_type::U8 | rust_type::OPT_I8 | rust_type::OPT_U8 => { + String::from(sqlserver_type::TINY_INT) + } + rust_type::I16 | rust_type::U16 | rust_type::OPT_I16 | rust_type::OPT_U16 => { + String::from(sqlserver_type::SMALL_INT) + } + rust_type::I32 | rust_type::U32 | rust_type::OPT_I32 | rust_type::OPT_U32 => { + String::from(sqlserver_type::INT) + } + rust_type::I64 | rust_type::U64 | rust_type::OPT_I64 | rust_type::OPT_U64 => { + String::from(sqlserver_type::BIGINT) + } + rust_type::STRING | rust_type::OPT_STRING => String::from(sqlserver_type::NVARCHAR), + rust_type::BOOL | rust_type::OPT_BOOL => String::from(sqlserver_type::BIT), + rust_type::NAIVE_DATE | rust_type::OPT_NAIVE_DATE => String::from(sqlserver_type::DATE), + rust_type::NAIVE_TIME | rust_type::OPT_NAIVE_TIME => String::from(sqlserver_type::TIME), + rust_type::NAIVE_DATE_TIME | rust_type::OPT_NAIVE_DATE_TIME => { + String::from(sqlserver_type::DATETIME) + } + &_ => todo!("Not supported datatype for this migrations version"), + } +} diff --git a/canyon_observer/src/manager/mod.rs b/canyon_observer/src/manager/mod.rs deleted file mode 100644 index eca614b8..00000000 --- a/canyon_observer/src/manager/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod entity; -pub mod entity_fields; -pub mod field_annotation; -pub mod manager_builder; diff --git a/canyon_observer/src/migrations/register_types.rs b/canyon_observer/src/migrations/register_types.rs deleted file mode 100644 index 14481c13..00000000 --- a/canyon_observer/src/migrations/register_types.rs +++ /dev/null @@ -1,228 +0,0 @@ -use regex::Regex; - -#[cfg(feature = "postgres")] -use crate::constants::postgresql_type; -#[cfg(feature = "mssql")] -use crate::constants::sqlserver_type; -use crate::constants::{regex_patterns, rust_type, NUMERIC_PK_DATATYPE}; - -/// This file contains `Rust` types that represents an entry on the `CanyonRegister` -/// where `Canyon` tracks the user types that has to manage - -/// Gets the necessary identifiers of a CanyonEntity to make it the comparative -/// against the database schemas -#[derive(Debug, Clone, Default)] -pub struct CanyonRegisterEntity<'a> { - pub entity_name: &'a str, - pub entity_db_table_name: &'a str, - pub user_schema_name: Option<&'a str>, - pub entity_fields: Vec, -} - -/// Complementary type for a field that represents a struct field that maps -/// some real database column data -#[derive(Debug, Clone, Default)] -pub struct CanyonRegisterEntityField { - pub field_name: String, - pub field_type: String, - pub annotations: Vec, -} - -impl CanyonRegisterEntityField { - /// Return the postgres datatype and parameters to create a column for a given rust type - #[cfg(feature = "postgres")] - pub fn to_postgres_syntax(&self) -> String { - let rust_type_clean = self.field_type.replace(' ', ""); - - match rust_type_clean.as_str() { - rust_type::I8 | rust_type::U8 => { - String::from(&format!("{} NOT NULL", postgresql_type::INTEGER)) - } - rust_type::OPT_I8 | rust_type::OPT_U8 => String::from(postgresql_type::INTEGER), - - rust_type::I16 | rust_type::U16 => { - String::from(&format!("{} NOT NULL", postgresql_type::INTEGER)) - } - rust_type::OPT_I16 | rust_type::OPT_U16 => String::from(postgresql_type::INTEGER), - - rust_type::I32 | rust_type::U32 => { - String::from(&format!("{} NOT NULL", postgresql_type::INTEGER)) - } - rust_type::OPT_I32 | rust_type::OPT_U32 => String::from(postgresql_type::INTEGER), - - rust_type::I64 | rust_type::U64 => { - String::from(&format!("{} NOT NULL", postgresql_type::BIGINT)) - } - rust_type::OPT_I64 | rust_type::OPT_U64 => String::from(postgresql_type::BIGINT), - - rust_type::STRING => String::from(&format!("{} NOT NULL", postgresql_type::TEXT)), - rust_type::OPT_STRING => String::from(postgresql_type::TEXT), - - rust_type::BOOL => String::from(&format!("{} NOT NULL", postgresql_type::BOOLEAN)), - rust_type::OPT_BOOL => String::from(postgresql_type::BOOLEAN), - - rust_type::NAIVE_DATE => String::from(&format!("{} NOT NULL", postgresql_type::DATE)), - rust_type::OPT_NAIVE_DATE => String::from(postgresql_type::DATE), - - rust_type::NAIVE_TIME => String::from(&format!("{} NOT NULL", postgresql_type::TIME)), - rust_type::OPT_NAIVE_TIME => String::from(postgresql_type::TIME), - - rust_type::NAIVE_DATE_TIME => { - String::from(&format!("{} NOT NULL", postgresql_type::DATETIME)) - } - rust_type::OPT_NAIVE_DATE_TIME => String::from(postgresql_type::DATETIME), - &_ => todo!("Not supported datatype for this migrations version"), - } - } - - /// Return the postgres datatype and parameters to create a column for a given rust type - /// for Microsoft SQL Server - #[cfg(feature = "mssql")] - pub fn to_sqlserver_syntax(&self) -> String { - let rust_type_clean = self.field_type.replace(' ', ""); - - match rust_type_clean.as_str() { - rust_type::I8 | rust_type::U8 => { - String::from(&format!("{} NOT NULL", sqlserver_type::INT)) - } - rust_type::OPT_I8 | rust_type::OPT_U8 => String::from(sqlserver_type::INT), - - rust_type::I16 | rust_type::U16 => { - String::from(&format!("{} NOT NULL", sqlserver_type::INT)) - } - rust_type::OPT_I16 | rust_type::OPT_U16 => String::from(sqlserver_type::INT), - - rust_type::I32 | rust_type::U32 => { - String::from(&format!("{} NOT NULL", sqlserver_type::INT)) - } - rust_type::OPT_I32 | rust_type::OPT_U32 => String::from(sqlserver_type::INT), - - rust_type::I64 | rust_type::U64 => { - String::from(&format!("{} NOT NULL", sqlserver_type::BIGINT)) - } - rust_type::OPT_I64 | rust_type::OPT_U64 => String::from(sqlserver_type::BIGINT), - - rust_type::STRING => { - String::from(&format!("{} NOT NULL DEFAULT ''", sqlserver_type::NVARCHAR)) - } - rust_type::OPT_STRING => String::from(sqlserver_type::NVARCHAR), - - rust_type::BOOL => String::from(&format!("{} NOT NULL", sqlserver_type::BIT)), - rust_type::OPT_BOOL => String::from(sqlserver_type::BIT), - - rust_type::NAIVE_DATE => String::from(&format!("{} NOT NULL", sqlserver_type::DATE)), - rust_type::OPT_NAIVE_DATE => String::from(sqlserver_type::DATE), - - rust_type::NAIVE_TIME => String::from(&format!("{} NOT NULL", sqlserver_type::TIME)), - rust_type::OPT_NAIVE_TIME => String::from(sqlserver_type::TIME), - - rust_type::NAIVE_DATE_TIME => { - String::from(&format!("{} NOT NULL", sqlserver_type::DATETIME)) - } - rust_type::OPT_NAIVE_DATE_TIME => String::from(sqlserver_type::DATETIME), - &_ => todo!("Not supported datatype for this migrations version"), - } - } - - #[cfg(feature = "postgres")] - pub fn to_postgres_alter_syntax(&self) -> String { - let mut rust_type_clean = self.field_type.replace(' ', ""); - let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); - - if rs_type_is_optional { - let type_regex = Regex::new(regex_patterns::EXTRACT_RUST_OPT_REGEX).unwrap(); - let capture_rust_type = type_regex.captures(rust_type_clean.as_str()).unwrap(); - rust_type_clean = capture_rust_type - .name("rust_type") - .unwrap() - .as_str() - .to_string(); - } - - match rust_type_clean.as_str() { - rust_type::I8 | rust_type::U8 | rust_type::OPT_I8 | rust_type::OPT_U8 => { - String::from(postgresql_type::INT_8) - } - rust_type::I16 | rust_type::U16 | rust_type::OPT_I16 | rust_type::OPT_U16 => { - String::from(postgresql_type::SMALL_INT) - } - rust_type::I32 | rust_type::U32 | rust_type::OPT_I32 | rust_type::OPT_U32 => { - String::from(postgresql_type::INTEGER) - } - rust_type::I64 | rust_type::U64 | rust_type::OPT_I64 | rust_type::OPT_U64 => { - String::from(postgresql_type::BIGINT) - } - rust_type::STRING | rust_type::OPT_STRING => String::from(postgresql_type::TEXT), - rust_type::BOOL | rust_type::OPT_BOOL => String::from(postgresql_type::BOOLEAN), - rust_type::NAIVE_DATE | rust_type::OPT_NAIVE_DATE => { - String::from(postgresql_type::DATE) - } - rust_type::NAIVE_TIME | rust_type::OPT_NAIVE_TIME => { - String::from(postgresql_type::TIME) - } - rust_type::NAIVE_DATE_TIME | rust_type::OPT_NAIVE_DATE_TIME => { - String::from(postgresql_type::DATETIME) - } - &_ => todo!("Not supported datatype for this migrations version"), - } - } - - #[cfg(feature = "mssql")] - pub fn to_sqlserver_alter_syntax(&self) -> String { - let mut rust_type_clean = self.field_type.replace(' ', ""); - let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); - - if rs_type_is_optional { - let type_regex = Regex::new(regex_patterns::EXTRACT_RUST_OPT_REGEX).unwrap(); - let capture_rust_type = type_regex.captures(rust_type_clean.as_str()).unwrap(); - rust_type_clean = capture_rust_type - .name("rust_type") - .unwrap() - .as_str() - .to_string(); - } - - match rust_type_clean.as_str() { - rust_type::I8 | rust_type::U8 | rust_type::OPT_I8 | rust_type::OPT_U8 => { - String::from(sqlserver_type::TINY_INT) - } - rust_type::I16 | rust_type::U16 | rust_type::OPT_I16 | rust_type::OPT_U16 => { - String::from(sqlserver_type::SMALL_INT) - } - rust_type::I32 | rust_type::U32 | rust_type::OPT_I32 | rust_type::OPT_U32 => { - String::from(sqlserver_type::INT) - } - rust_type::I64 | rust_type::U64 | rust_type::OPT_I64 | rust_type::OPT_U64 => { - String::from(sqlserver_type::BIGINT) - } - rust_type::STRING | rust_type::OPT_STRING => String::from(sqlserver_type::NVARCHAR), - rust_type::BOOL | rust_type::OPT_BOOL => String::from(sqlserver_type::BIT), - rust_type::NAIVE_DATE | rust_type::OPT_NAIVE_DATE => String::from(sqlserver_type::DATE), - rust_type::NAIVE_TIME | rust_type::OPT_NAIVE_TIME => String::from(sqlserver_type::TIME), - rust_type::NAIVE_DATE_TIME | rust_type::OPT_NAIVE_DATE_TIME => { - String::from(sqlserver_type::DATETIME) - } - &_ => todo!("Not supported datatype for this migrations version"), - } - } - - /// Return if the field is autoincremental - pub fn is_autoincremental(&self) -> bool { - let has_pk_annotation = self - .annotations - .iter() - .find(|a| a.starts_with("Annotation: PrimaryKey")); - - let pk_is_autoincremental = match has_pk_annotation { - Some(annotation) => annotation.contains("true"), - None => false, - }; - - NUMERIC_PK_DATATYPE.contains(&self.field_type.as_str()) && pk_is_autoincremental - } - - /// Return the nullability of a the field - pub fn is_nullable(&self) -> bool { - self.field_type.to_uppercase().starts_with("OPTION") - } -} diff --git a/src/lib.rs b/src/lib.rs index 33a2c82b..1d2e3375 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,11 +6,11 @@ extern crate canyon_connection; extern crate canyon_crud; extern crate canyon_macros; -extern crate canyon_observer; +extern crate canyon_migrations; /// Reexported elements to the root of the public API pub mod migrations { - pub use canyon_observer::migrations::{handler, processor}; + pub use canyon_migrations::migrations::{handler, processor}; } /// The top level reexport. Here we define the path to some really important