diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml index 144aa42e..cb3ecc98 100644 --- a/.github/workflows/code-coverage.yml +++ b/.github/workflows/code-coverage.yml @@ -27,7 +27,6 @@ jobs: rustup override set nightly - name: Make the USER own the working directory. Installing `gssapi` headers - if: ${{ matrix.os == 'ubuntu-latest' }} run: | sudo chown -R $USER:$USER ${{ github.workspace }} sudo apt -y install gcc libgssapi-krb5-2 libkrb5-dev libsasl2-modules-gssapi-mit diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index c72c0e5b..9de14f14 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -55,7 +55,7 @@ jobs: strategy: fail-fast: false matrix: - crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer, canyon_sql] + crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 53f77132..3c26ce66 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -53,8 +53,12 @@ jobs: - name: Run only UNIT tests for Windows if: ${{ matrix.os == 'windows-latest' }} - run: cargo test --verbose --workspace --target=x86_64-pc-windows-msvc --exclude tests --all-features --no-fail-fast -- --show-output + run: | + cargo test --verbose --workspace --lib --target=x86_64-pc-windows-msvc --all-features --no-fail-fast -- --show-output + cargo test --verbose --workspace --doc --target=x86_64-pc-windows-msvc --all-features --no-fail-fast -- --show-output - name: Run only UNIT tests for MacOS if: ${{ matrix.os == 'MacOS-latest' }} - run: cargo test --verbose --workspace --exclude tests --all-features --no-fail-fast -- --show-output + run: | + cargo test --verbose --workspace --lib --all-features --no-fail-fast -- --show-output + cargo test --verbose --workspace --doc --all-features --no-fail-fast -- --show-output diff --git a/CHANGELOG.md b/CHANGELOG.md index d4ef370f..db434f8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,31 @@ Year format is defined as: `YYYY-m-d` ## [Unreleased] -- Solved a bug in the canyon_entity proc macro that was wiring the incorrect user table name in the migrations +## [0.2.0] - 2023 - 04 - 13 + +### Feature + +- Enabled conditional compilation for the database dependencies of the project. +This caused a major rework in the codebase, but none of the client APIs has been affected. +Now, Canyon-SQL comes with two features, ["postgres", "mssql"]. +There's no default features enabled for the project. + +## [0.2.0] - 2023 - 04 - 13 + +### Feature [BREAKING CHANGES] + +- The configuration file has been reworked, by providing a whole category dedicated +to the authentication against the database server. +- We removed the database type property, since the database type can be inferred by +the new mandatory auth property +- Included support for the `MSSQL` integrated authentication via the cfg feature `mssql-integrated-auth` + +## [0.1.2] - 2023 - 03 - 28 + +### Update + +- Implemented bool types for QueryParameters<'_>. +- Minimal performance improvements ## [0.1.1] - 2023 - 03 - 20 diff --git a/Cargo.toml b/Cargo.toml index 800ad578..6f4da496 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,12 +1,65 @@ -# This is the root Cargo.toml file that serves as manager for the workspace of the project +[package] +name = "canyon_sql" +version.workspace = true +edition.workspace = true [workspace] members = [ - "canyon_sql", + "canyon_connection", + "canyon_crud", "canyon_observer", "canyon_macros", - "canyon_crud", - "canyon_connection", "tests" ] + +[dependencies] +# Project crates +canyon_connection = { workspace = true, path = "canyon_connection" } +canyon_crud = { workspace = true, path = "canyon_crud" } +canyon_observer = { workspace = true, path = "canyon_observer" } +canyon_macros = { workspace = true, path = "canyon_macros" } + +# To be marked as opt deps +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } + +[workspace.dependencies] +canyon_crud = { version = "0.3.0", path = "canyon_crud" } +canyon_connection = { version = "0.3.0", path = "canyon_connection" } +canyon_observer = { version = "0.3.0", path = "canyon_observer" } +canyon_macros = { version = "0.3.0", path = "canyon_macros" } + +tokio = { version = "1.27.0", features = ["full"] } +tokio-util = { version = "0.7.4", features = ["compat"] } +tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } +tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } + +chrono = { version = "0.4", features = ["serde"] } # Just from TP better? +serde = { version = "1.0.138", features = ["derive"] } + +futures = "0.3.25" +indexmap = "1.9.1" +async-std = "1.12.0" +lazy_static = "1.4.0" +toml = "0.7.3" +async-trait = "0.1.68" +walkdir = "2.3.3" +regex = "1.5" + +quote = "1.0.9" +proc-macro2 = "1.0.27" + +[workspace.package] +version = "0.3.0" +edition = "2021" +authors = ["Alex Vergara, Gonzalo Busto"] +documentation = "https://zerodaycode.github.io/canyon-book/" +homepage = "https://github.com/zerodaycode/Canyon-SQL" +readme = "README.md" +license = "MIT" +description = "A Rust ORM and QueryBuilder" + +[features] +postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres", "canyon_macros/postgres"] +mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql", "canyon_macros/mssql"] diff --git a/bash_aliases.sh b/bash_aliases.sh index a67da429..64e2d931 100644 --- a/bash_aliases.sh +++ b/bash_aliases.sh @@ -39,7 +39,7 @@ alias SqlServerInitializationLinux='cargo test initialize_sql_server_docker_inst # Publish Canyon-SQL to the registry with its dependencies -alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_observer && cargo publish -p canyon_macros && cargo publish -p canyon_sql' +alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_observer && cargo publish -p canyon_macros && cargo publish -p canyon_sql_root' # Collects the code coverage for the project (tests must run before this) alias CcEnvVars='export CARGO_INCREMENTAL=0 diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 99058cf2..fd37fd4e 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -1,29 +1,30 @@ [package] name = "canyon_connection" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" - +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] -tokio = { version = "1.21.2", features = ["full"] } -tokio-util = { version = "0.7.4", features = ["compat"] } -tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } -futures = "0.3.25" -indexmap = "1.9.1" +tokio = { workspace = true } +tokio-util = { workspace = true } -tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } -async-std = { version = "1.12.0" } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } -lazy_static = "1.4.0" +futures = { workspace = true } +indexmap = { workspace = true } +lazy_static = { workspace = true } +toml = { workspace = true } +serde = { workspace = true } +async-std = { workspace = true, optional = true } +walkdir = { workspace = true } -serde = { version = "1.0.138", features = ["derive"] } -toml = "0.7.3" [features] -mssql-integrated-auth = [] - +postgres = ["tokio-postgres"] +mssql = ["tiberius", "async-std"] diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 71fd767e..7196e948 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -1,28 +1,34 @@ -use async_std::net::TcpStream; - use serde::Deserialize; + +#[cfg(feature = "mssql")] +use async_std::net::TcpStream; +#[cfg(feature = "mssql")] use tiberius::{AuthMethod, Config}; +#[cfg(feature = "postgres")] use tokio_postgres::{Client, NoTls}; use crate::datasources::DatasourceConfig; /// Represents the current supported databases by Canyon -#[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy, Default)] +#[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy)] pub enum DatabaseType { - #[default] #[serde(alias = "postgres", alias = "postgresql")] + #[cfg(feature = "postgres")] PostgreSql, #[serde(alias = "sqlserver", alias = "mssql")] + #[cfg(feature = "mssql")] SqlServer, } /// A connection with a `PostgreSQL` database +#[cfg(feature = "postgres")] pub struct PostgreSqlConnection { pub client: Client, // pub connection: Connection, // TODO Hold it, or not to hold it... that's the question! } /// A connection with a `SqlServer` database +#[cfg(feature = "mssql")] pub struct SqlServerConnection { pub client: &'static mut tiberius::Client, } @@ -32,7 +38,9 @@ pub struct SqlServerConnection { /// process them and generates a pool of 1 to 1 database connection for /// every datasource defined. pub enum DatabaseConnection { + #[cfg(feature = "postgres")] Postgres(PostgreSqlConnection), + #[cfg(feature = "mssql")] SqlServer(SqlServerConnection), } @@ -44,6 +52,7 @@ impl DatabaseConnection { datasource: &DatasourceConfig, ) -> Result> { match datasource.get_db_type() { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => { let (username, password) = match &datasource.auth { crate::datasources::Auth::Postgres(postgres_auth) => match postgres_auth { @@ -51,6 +60,7 @@ impl DatabaseConnection { (username.as_str(), password.as_str()) } }, + #[cfg(feature = "mssql")] crate::datasources::Auth::SqlServer(_) => { panic!("Found SqlServer auth configuration for a PostgreSQL datasource") } @@ -79,6 +89,7 @@ impl DatabaseConnection { // connection: new_connection, })) } + #[cfg(feature = "mssql")] DatabaseType::SqlServer => { let mut config = Config::new(); @@ -88,6 +99,7 @@ impl DatabaseConnection { // Using SQL Server authentication. config.authentication(match &datasource.auth { + #[cfg(feature = "postgres")] crate::datasources::Auth::Postgres(_) => { panic!("Found PostgreSQL auth configuration for a SqlServer database") } @@ -95,7 +107,6 @@ impl DatabaseConnection { crate::datasources::SqlServerAuth::Basic { username, password } => { AuthMethod::sql_server(username, password) } - #[cfg(feature = "mssql-integrated-auth")] crate::datasources::SqlServerAuth::Integrated => AuthMethod::Integrated, }, }); @@ -128,19 +139,21 @@ impl DatabaseConnection { } } - pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { - if let DatabaseConnection::Postgres(conn) = self { - Some(conn) - } else { - None + #[cfg(feature = "postgres")] + pub fn postgres_connection(&self) -> &PostgreSqlConnection { + match self { + DatabaseConnection::Postgres(conn) => conn, + #[cfg(all(feature = "postgres", feature = "mssql"))] + _ => panic!(), } } - pub fn sqlserver_connection(&mut self) -> Option<&mut SqlServerConnection> { - if let DatabaseConnection::SqlServer(conn) = self { - Some(conn) - } else { - None + #[cfg(feature = "mssql")] + pub fn sqlserver_connection(&mut self) -> &mut SqlServerConnection { + match self { + DatabaseConnection::SqlServer(conn) => conn, + #[cfg(all(feature = "postgres", feature = "mssql"))] + _ => panic!(), } } } @@ -150,27 +163,60 @@ mod database_connection_handler { use super::*; use crate::CanyonSqlConfig; - const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql] - datasources = [ - {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - /// Tests the behaviour of the `DatabaseType::from_datasource(...)` #[test] fn check_from_datasource() { - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) - .expect("A failure happened retrieving the [canyon_sql] section"); - - assert_eq!( - config.canyon_sql.datasources[0].get_db_type(), - DatabaseType::PostgreSql - ); - assert_eq!( - config.canyon_sql.datasources[1].get_db_type(), - DatabaseType::SqlServer - ); + #[cfg(all(feature = "postgres", feature = "mssql"))] + { + const CONFIG_FILE_MOCK_ALT_ALL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_ALL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::PostgreSql + ); + assert_eq!( + config.canyon_sql.datasources[1].get_db_type(), + DatabaseType::SqlServer + ); + } + + #[cfg(feature = "postgres")] + { + const CONFIG_FILE_MOCK_ALT_PG: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_PG) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::PostgreSql + ); + } + + #[cfg(feature = "mssql")] + { + const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MSSQL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::SqlServer + ); + } } } diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 81c4e611..9571c343 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -5,57 +5,72 @@ use crate::canyon_database_connector::DatabaseType; /// ``` #[test] fn load_ds_config_from_array() { - const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql] - datasources = [ - {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { integrated = {} } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) - .expect("A failure happened retrieving the [canyon_sql] section"); - - let ds_0 = &config.canyon_sql.datasources[0]; - let ds_1 = &config.canyon_sql.datasources[1]; - let ds_2 = &config.canyon_sql.datasources[2]; - - assert_eq!(ds_0.name, "PostgresDS"); - assert_eq!(ds_0.get_db_type(), DatabaseType::PostgreSql); - assert_eq!( - ds_0.auth, - Auth::Postgres(PostgresAuth::Basic { - username: "postgres".to_string(), - password: "postgres".to_string() - }) - ); - assert_eq!(ds_0.properties.host, "localhost"); - assert_eq!(ds_0.properties.port, None); - assert_eq!(ds_0.properties.db_name, "triforce"); - assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); - - assert_eq!(ds_1.name, "SqlServerDS"); - assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); - assert_eq!( - ds_1.auth, - Auth::SqlServer(SqlServerAuth::Basic { - username: "sa".to_string(), - password: "SqlServer-10".to_string() - }) - ); - assert_eq!(ds_1.properties.host, "192.168.0.250.1"); - assert_eq!(ds_1.properties.port, Some(3340)); - assert_eq!(ds_1.properties.db_name, "triforce2"); - assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); - - assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) + #[cfg(feature = "postgres")] + { + const CONFIG_FILE_MOCK_ALT_PG: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_PG) + .expect("A failure happened retrieving the [canyon_sql] section"); + + let ds_0 = &config.canyon_sql.datasources[0]; + + assert_eq!(ds_0.name, "PostgresDS"); + assert_eq!(ds_0.get_db_type(), DatabaseType::PostgreSql); + assert_eq!( + ds_0.auth, + Auth::Postgres(PostgresAuth::Basic { + username: "postgres".to_string(), + password: "postgres".to_string() + }) + ); + assert_eq!(ds_0.properties.host, "localhost"); + assert_eq!(ds_0.properties.port, None); + assert_eq!(ds_0.properties.db_name, "triforce"); + assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); + } + + #[cfg(feature = "mssql")] + { + const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, + {name = 'SqlServerDS', auth = { sqlserver = { integrated = {} } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MSSQL) + .expect("A failure happened retrieving the [canyon_sql] section"); + + let ds_1 = &config.canyon_sql.datasources[0]; + let ds_2 = &config.canyon_sql.datasources[1]; + + assert_eq!(ds_1.name, "SqlServerDS"); + assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); + assert_eq!( + ds_1.auth, + Auth::SqlServer(SqlServerAuth::Basic { + username: "sa".to_string(), + password: "SqlServer-10".to_string() + }) + ); + assert_eq!(ds_1.properties.host, "192.168.0.250.1"); + assert_eq!(ds_1.properties.port, Some(3340)); + assert_eq!(ds_1.properties.db_name, "triforce2"); + assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); + + assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)); + } } /// #[derive(Deserialize, Debug, Clone)] pub struct CanyonSqlConfig { pub canyon_sql: Datasources, } + #[derive(Deserialize, Debug, Clone)] pub struct Datasources { pub datasources: Vec, @@ -71,7 +86,9 @@ pub struct DatasourceConfig { impl DatasourceConfig { pub fn get_db_type(&self) -> DatabaseType { match self.auth { + #[cfg(feature = "postgres")] Auth::Postgres(_) => DatabaseType::PostgreSql, + #[cfg(feature = "mssql")] Auth::SqlServer(_) => DatabaseType::SqlServer, } } @@ -79,23 +96,26 @@ impl DatasourceConfig { #[derive(Deserialize, Debug, Clone, PartialEq)] pub enum Auth { - #[serde(alias = "PostgreSQL", alias = "postgresql")] + #[serde(alias = "PostgreSQL", alias = "postgresql", alias = "postgres")] + #[cfg(feature = "postgres")] Postgres(PostgresAuth), #[serde(alias = "SqlServer", alias = "sqlserver", alias = "mssql")] + #[cfg(feature = "mssql")] SqlServer(SqlServerAuth), } #[derive(Deserialize, Debug, Clone, PartialEq)] +#[cfg(feature = "postgres")] pub enum PostgresAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, } #[derive(Deserialize, Debug, Clone, PartialEq)] +#[cfg(feature = "mssql")] pub enum SqlServerAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, - #[cfg(feature = "mssql-integrated-auth")] #[serde(alias = "Integrated", alias = "integrated")] Integrated, } diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 1a8f7cab..fed9f31f 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -1,8 +1,11 @@ +#[cfg(feature = "mssql")] pub extern crate async_std; pub extern crate futures; pub extern crate lazy_static; +#[cfg(feature = "mssql")] pub extern crate tiberius; pub extern crate tokio; +#[cfg(feature = "postgres")] pub extern crate tokio_postgres; pub extern crate tokio_util; @@ -10,21 +13,21 @@ pub mod canyon_database_connector; pub mod datasources; use std::fs; +use std::path::PathBuf; use crate::datasources::{CanyonSqlConfig, DatasourceConfig}; use canyon_database_connector::DatabaseConnection; use indexmap::IndexMap; use lazy_static::lazy_static; -use tokio::sync::Mutex; - -const CONFIG_FILE_IDENTIFIER: &str = "canyon.toml"; +use tokio::sync::{Mutex, MutexGuard}; +use walkdir::WalkDir; lazy_static! { pub static ref CANYON_TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Runtime::new() // TODO Make the config with the builder .expect("Failed initializing the Canyon-SQL Tokio Runtime"); - static ref RAW_CONFIG_FILE: String = fs::read_to_string(CONFIG_FILE_IDENTIFIER) + static ref RAW_CONFIG_FILE: String = fs::read_to_string(find_canyon_config_file()) .expect("Error opening or reading the Canyon configuration file"); static ref CONFIG_FILE: CanyonSqlConfig = toml::from_str(RAW_CONFIG_FILE.as_str()) .expect("Error generating the configuration for Canyon-SQL"); @@ -36,6 +39,24 @@ lazy_static! { Mutex::new(IndexMap::new()); } +fn find_canyon_config_file() -> PathBuf { + for e in WalkDir::new(".") + .max_depth(2) + .into_iter() + .filter_map(|e| e.ok()) + { + let filename = e.file_name().to_str().unwrap(); + if e.metadata().unwrap().is_file() + && filename.starts_with("canyon") + && filename.ends_with(".toml") + { + return e.path().to_path_buf(); + } + } + + panic!() +} + /// Convenient free function to initialize a kind of connection pool based on the datasources present defined /// in the configuration file. /// @@ -61,3 +82,25 @@ pub async fn init_connections_cache() { ); } } + +/// +pub fn get_database_connection<'a>( + datasource_name: &str, + guarded_cache: &'a mut MutexGuard>, +) -> &'a mut DatabaseConnection { + if datasource_name.is_empty() { + guarded_cache + .get_mut( + DATASOURCES + .get(0) + .expect("We didn't found any valid datasource configuration. Check your `canyon.toml` file") + .name + .as_str() + ).unwrap_or_else(|| panic!("No default datasource found. Check your `canyon.toml` file")) + } else { + guarded_cache.get_mut(datasource_name) + .unwrap_or_else(|| + panic!("Canyon couldn't find a datasource in the pool with the argument provided: {datasource_name}") + ) + } +} diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index 4c30408f..123a44fe 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -1,15 +1,22 @@ [package] name = "canyon_crud" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] -chrono = { version = "0.4", features = ["serde"] } -async-trait = { version = "0.1.50" } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } +chrono = { workspace = true } +async-trait = { workspace = true } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } +canyon_connection = { workspace = true } + +[features] +postgres = ["tokio-postgres", "canyon_connection/postgres"] +mssql = ["tiberius", "canyon_connection/mssql"] diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index e484fe8c..d46bf863 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -1,18 +1,19 @@ -#![allow(clippy::extra_unused_lifetimes)] - use crate::{ crud::{CrudOperations, Transaction}, mapper::RowMapper, }; -use canyon_connection::{ - tiberius::{self, ColumnData, IntoSql}, - tokio_postgres::{self, types::ToSql}, -}; + +#[cfg(feature = "postgres")] +use canyon_connection::tokio_postgres::{self, types::ToSql}; + +#[cfg(feature = "mssql")] +use canyon_connection::tiberius::{self, ColumnData, IntoSql}; + use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use std::any::Any; /// Created for retrieve the field's name of a field of a struct, giving -/// the Canoyn's autogenerated enum with the variants that maps this +/// the Canyon's autogenerated enum with the variants that maps this /// fields. /// /// ``` @@ -79,26 +80,29 @@ pub trait ForeignKeyable { fn get_fk_column(&self, column: &str) -> Option<&dyn QueryParameter<'_>>; } -/// To define trait objects that helps to relates the necessary bounds in the 'IN` SQL clause -pub trait InClauseValues: ToSql + ToString {} - /// Generic abstraction to represent any of the Row types /// from the client crates pub trait Row { fn as_any(&self) -> &dyn Any; } + +#[cfg(feature = "postgres")] impl Row for tokio_postgres::Row { fn as_any(&self) -> &dyn Any { self } } +#[cfg(feature = "mssql")] impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self } } +/// Generic abstraction for hold a Column type that will be one of the Column +/// types present in the dependent crates +// #[derive(Copy, Clone)] pub struct Column<'a> { name: &'a str, type_: ColumnType, @@ -110,116 +114,152 @@ impl<'a> Column<'a> { pub fn column_type(&self) -> &ColumnType { &self.type_ } - pub fn type_(&'a self) -> &'_ dyn Type { - match &self.type_ { - ColumnType::Postgres(v) => v as &'a dyn Type, - ColumnType::SqlServer(v) => v as &'a dyn Type, - } - } + // pub fn type_(&'a self) -> &'_ dyn Type { + // match (*self).type_ { + // #[cfg(feature = "postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, + // #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => v as &'a dyn Type, + // } + // } } pub trait Type { fn as_any(&self) -> &dyn Any; } +#[cfg(feature = "postgres")] impl Type for tokio_postgres::types::Type { fn as_any(&self) -> &dyn Any { self } } +#[cfg(feature = "mssql")] impl Type for tiberius::ColumnType { fn as_any(&self) -> &dyn Any { self } } +/// Wrapper over the dependencies Column's types pub enum ColumnType { + #[cfg(feature = "postgres")] Postgres(tokio_postgres::types::Type), + #[cfg(feature = "mssql")] SqlServer(tiberius::ColumnType), } pub trait RowOperations { - /// Abstracts the different forms of use the common `get` row - /// function or method dynamically no matter what are the origin - /// type from any database client provider - fn get<'a, Output>(&'a self, col_name: &str) -> Output + #[cfg(feature = "postgres")] + fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: tokio_postgres::types::FromSql<'a>; + #[cfg(feature = "mssql")] + fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>; + Output: tiberius::FromSql<'a>; - fn get_opt<'a, Output>(&'a self, col_name: &str) -> Option + #[cfg(feature = "postgres")] + fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>; + Output: tokio_postgres::types::FromSql<'a>; + #[cfg(feature = "mssql")] + fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: tiberius::FromSql<'a>; fn columns(&self) -> Vec; } impl RowOperations for &dyn Row { - fn get<'a, Output>(&'a self, col_name: &str) -> Output + #[cfg(feature = "postgres")] + fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>, + Output: tokio_postgres::types::FromSql<'a>, { if let Some(row) = self.as_any().downcast_ref::() { return row.get::<&str, Output>(col_name); }; + panic!() // TODO into result and propagate + } + #[cfg(feature = "mssql")] + fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: tiberius::FromSql<'a>, + { if let Some(row) = self.as_any().downcast_ref::() { return row .get::(col_name) .expect("Failed to obtain a row in the MSSQL migrations"); }; - panic!() + panic!() // TODO into result and propagate } - fn columns(&self) -> Vec { - let mut cols = vec![]; - - if self.as_any().is::() { - self.as_any() - .downcast_ref::() - .expect("Not a tokio postgres Row for column") - .columns() - .iter() - .for_each(|c| { - cols.push(Column { - name: c.name(), - type_: ColumnType::Postgres(c.type_().to_owned()), - }) - }) - } else { - self.as_any() - .downcast_ref::() - .expect("Not a Tiberius Row for column") - .columns() - .iter() - .for_each(|c| { - cols.push(Column { - name: c.name(), - type_: ColumnType::SqlServer(c.column_type()), - }) - }) - }; - - cols - } - - fn get_opt<'a, Output>(&'a self, col_name: &str) -> Option + #[cfg(feature = "postgres")] + fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>, + Output: tokio_postgres::types::FromSql<'a>, { if let Some(row) = self.as_any().downcast_ref::() { return row.get::<&str, Option>(col_name); }; + panic!() // TODO into result and propagate + } + + #[cfg(feature = "mssql")] + fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: tiberius::FromSql<'a>, + { if let Some(row) = self.as_any().downcast_ref::() { - return row - .try_get::(col_name) - .expect("Failed to obtain a row in the MSSQL migrations"); + return row.get::(col_name); }; - panic!() + panic!() // TODO into result and propagate + } + + fn columns(&self) -> Vec { + let mut cols = vec![]; + + #[cfg(feature = "postgres")] + { + if self.as_any().is::() { + self.as_any() + .downcast_ref::() + .expect("Not a tokio postgres Row for column") + .columns() + .iter() + .for_each(|c| { + cols.push(Column { + name: c.name(), + type_: ColumnType::Postgres(c.type_().to_owned()), + }) + }) + } + } + #[cfg(feature = "mssql")] + { + if self.as_any().is::() { + self.as_any() + .downcast_ref::() + .expect("Not a Tiberius Row for column") + .columns() + .iter() + .for_each(|c| { + cols.push(Column { + name: c.name(), + type_: ColumnType::SqlServer(c.column_type()), + }) + }) + }; + } + + cols } } /// Defines a trait for represent type bounds against the allowed -/// datatypes supported by Canyon to be used as query parameters. +/// data types supported by Canyon to be used as query parameters. pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync); + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_>; } @@ -231,6 +271,7 @@ pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { /// a collection of [`QueryParameter<'a>`], in order to allow a workflow /// that is not dependent of the specific type of the argument that holds /// the query parameters of the database connectors +#[cfg(feature = "mssql")] impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { fn into_sql(self) -> ColumnData<'a> { self.as_sqlserver_param() @@ -238,118 +279,131 @@ impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { } impl<'a> QueryParameter<'a> for bool { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::Bit(Some(*self)) } } impl<'a> QueryParameter<'a> for i16 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self)) } } impl<'a> QueryParameter<'a> for &i16 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(*self) } } impl<'a> QueryParameter<'a> for Option<&i16> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for i32 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self)) } } impl<'a> QueryParameter<'a> for &i32 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(*self) } } impl<'a> QueryParameter<'a> for Option<&i32> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for f32 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(*self)) } } impl<'a> QueryParameter<'a> for &f32 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(*self) } } impl<'a> QueryParameter<'a> for Option<&f32> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some( *self.expect("Error on an f32 value on QueryParameter<'_>"), @@ -357,37 +411,42 @@ impl<'a> QueryParameter<'a> for Option<&f32> { } } impl<'a> QueryParameter<'a> for f64 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(*self)) } } impl<'a> QueryParameter<'a> for &f64 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(*self) } } impl<'a> QueryParameter<'a> for Option<&f64> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some( *self.expect("Error on an f64 value on QueryParameter<'_>"), @@ -395,64 +454,71 @@ impl<'a> QueryParameter<'a> for Option<&f64> { } } impl<'a> QueryParameter<'a> for i64 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self)) } } impl<'a> QueryParameter<'a> for &i64 { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(*self) } } impl<'a> QueryParameter<'a> for Option<&i64> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for String { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) } } impl<'a> QueryParameter<'a> for &String { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), @@ -461,10 +527,11 @@ impl<'a> QueryParameter<'a> for Option { } } impl<'a> QueryParameter<'a> for Option<&String> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), @@ -472,20 +539,22 @@ impl<'a> QueryParameter<'a> for Option<&String> { } } } -impl<'a> QueryParameter<'_> for &'_ str { +impl<'a> QueryParameter<'a> for &'_ str { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(*self))) } } impl<'a> QueryParameter<'a> for Option<&'_ str> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match *self { Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), @@ -493,92 +562,102 @@ impl<'a> QueryParameter<'a> for Option<&'_ str> { } } } -impl<'a> QueryParameter<'_> for NaiveDate { +impl<'a> QueryParameter<'a> for NaiveDate { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } -impl<'a> QueryParameter<'_> for NaiveTime { +impl<'a> QueryParameter<'a> for NaiveTime { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } -impl<'a> QueryParameter<'_> for NaiveDateTime { +impl<'a> QueryParameter<'a> for NaiveDateTime { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } -impl<'a> QueryParameter<'_> for DateTime { +impl<'a> QueryParameter<'a> for DateTime { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } -impl<'a> QueryParameter<'_> for DateTime { +impl<'a> QueryParameter<'a> for DateTime { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } -impl<'a> QueryParameter<'_> for Option> { +impl<'a> QueryParameter<'a> for Option> { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 8f587a02..f5c6d37e 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -2,54 +2,39 @@ use std::fmt::Display; use async_trait::async_trait; use canyon_connection::canyon_database_connector::DatabaseConnection; -use canyon_connection::{CACHED_DATABASE_CONN, DATASOURCES}; +use canyon_connection::{get_database_connection, CACHED_DATABASE_CONN}; use crate::bounds::QueryParameter; use crate::mapper::RowMapper; use crate::query_elements::query_builder::{ DeleteQueryBuilder, SelectQueryBuilder, UpdateQueryBuilder, }; -use crate::result::DatabaseResult; +use crate::rows::CanyonRows; /// This traits defines and implements a query against a database given -/// an statemt `stmt` and the params to pass the to the client. +/// an statement `stmt` and the params to pass the to the client. /// -/// It returns a [`DatabaseResult`], which is the core Canyon type to wrap -/// the result of the query and, if the user desires, -/// automatically map it to an struct. +/// Returns [`std::result::Result`] of [`CanyonRows`], which is the core Canyon type to wrap +/// the result of the query provide automatic mappings and deserialization #[async_trait] pub trait Transaction { - /// Performs a query against the targeted database by the selected datasource. - /// - /// No datasource means take the entry zero + /// Performs a query against the targeted database by the selected or + /// the defaulted datasource, wrapping the resultant collection of entities + /// in [`super::rows::CanyonRows`] async fn query<'a, S, Z>( stmt: S, params: Z, datasource_name: &'a str, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> + ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> where S: AsRef + Display + Sync + Send + 'a, Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; + let database_conn = get_database_connection(datasource_name, &mut guarded_cache); - let database_conn = if datasource_name.is_empty() { - guarded_cache - .get_mut( - DATASOURCES - .get(0) - .expect("We didn't found any valid datasource configuration. Check your `canyon.toml` file") - .name - .as_str() - ).unwrap_or_else(|| panic!("No default datasource found. Check your `canyon.toml` file")) - } else { - guarded_cache.get_mut(datasource_name) - .unwrap_or_else(|| - panic!("Canyon couldn't find a datasource in the pool with the argument provided: {datasource_name}" - )) - }; - - match database_conn { + match *database_conn { + #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( database_conn, @@ -58,6 +43,7 @@ pub trait Transaction { ) .await } + #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( database_conn, @@ -84,7 +70,7 @@ pub trait Transaction { /// /// See it's definition and docs to see the implementations. /// Also, you can find the written macro-code that performs the auto-mapping -/// in the *canyon_sql::canyon_macros* crates, on the root of this project. +/// in the *canyon_sql_root::canyon_macros* crates, on the root of this project. #[async_trait] pub trait CrudOperations: Transaction where @@ -119,14 +105,12 @@ where datasource_name: &'a str, ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>>; - async fn insert<'a>( - &mut self, - ) -> Result<(), Box>; + async fn insert<'a>(&mut self) -> Result<(), Box>; async fn insert_datasource<'a>( &mut self, datasource_name: &'a str, - ) -> Result<(), Box>; + ) -> Result<(), Box>; async fn multi_insert<'a>( instances: &'a mut [&'a mut T], @@ -137,69 +121,68 @@ where datasource_name: &'a str, ) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>>; - async fn update(&self) -> Result<(), Box>; + async fn update(&self) -> Result<(), Box>; async fn update_datasource<'a>( &self, datasource_name: &'a str, - ) -> Result<(), Box>; + ) -> Result<(), Box>; fn update_query<'a>() -> UpdateQueryBuilder<'a, T>; fn update_query_datasource(datasource_name: &str) -> UpdateQueryBuilder<'_, T>; - async fn delete(&self) -> Result<(), Box>; + async fn delete(&self) -> Result<(), Box>; async fn delete_datasource<'a>( &self, datasource_name: &'a str, - ) -> Result<(), Box>; + ) -> Result<(), Box>; fn delete_query<'a>() -> DeleteQueryBuilder<'a, T>; fn delete_query_datasource(datasource_name: &str) -> DeleteQueryBuilder<'_, T>; } +#[cfg(feature = "postgres")] mod postgres_query_launcher { use crate::bounds::QueryParameter; - use crate::result::DatabaseResult; + use crate::rows::CanyonRows; use canyon_connection::canyon_database_connector::DatabaseConnection; pub async fn launch<'a, T>( db_conn: &DatabaseConnection, stmt: String, params: &'a [&'_ dyn QueryParameter<'_>], - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { + ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { let mut m_params = Vec::new(); for param in params { m_params.push(param.as_postgres_param()); } - Ok(DatabaseResult::new_postgresql( - db_conn - .postgres_connection() - .unwrap() - .client - .query(&stmt, m_params.as_slice()) - .await?, - )) + let r = db_conn + .postgres_connection() + .client + .query(&stmt, m_params.as_slice()) + .await?; + + Ok(CanyonRows::Postgres(r)) } } +#[cfg(feature = "mssql")] mod sqlserver_query_launcher { - use canyon_connection::tiberius::Row; - + use crate::rows::CanyonRows; use crate::{ bounds::QueryParameter, canyon_connection::{canyon_database_connector::DatabaseConnection, tiberius::Query}, - result::DatabaseResult, }; pub async fn launch<'a, T, Z>( db_conn: &mut DatabaseConnection, stmt: &mut String, params: Z, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> + ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> where Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, { @@ -223,20 +206,14 @@ mod sqlserver_query_launcher { .iter() .for_each(|param| mssql_query.bind(*param)); - let _results: Vec = mssql_query - .query( - db_conn - .sqlserver_connection() - .expect("Error querying the MSSQL database") - .client, - ) + let _results = mssql_query + .query(db_conn.sqlserver_connection().client) .await? .into_results() - .await? - .into_iter() - .flatten() - .collect::>(); + .await?; - Ok(DatabaseResult::new_sqlserver(_results)) + Ok(CanyonRows::Tiberius( + _results.into_iter().flatten().collect(), + )) } } diff --git a/canyon_crud/src/lib.rs b/canyon_crud/src/lib.rs index 8a20b48e..cea474cb 100644 --- a/canyon_crud/src/lib.rs +++ b/canyon_crud/src/lib.rs @@ -1,10 +1,11 @@ +pub extern crate async_trait; extern crate canyon_connection; pub mod bounds; pub mod crud; pub mod mapper; pub mod query_elements; -pub mod result; +pub mod rows; pub use query_elements::operators::*; diff --git a/canyon_crud/src/mapper.rs b/canyon_crud/src/mapper.rs index 71303785..66cb91d2 100644 --- a/canyon_crud/src/mapper.rs +++ b/canyon_crud/src/mapper.rs @@ -1,4 +1,7 @@ -use canyon_connection::{tiberius, tokio_postgres}; +#[cfg(feature = "mssql")] +use canyon_connection::tiberius; +#[cfg(feature = "postgres")] +use canyon_connection::tokio_postgres; use crate::crud::Transaction; @@ -6,7 +9,8 @@ use crate::crud::Transaction; /// from some supported database in Canyon-SQL into a user's defined /// type `T` pub trait RowMapper>: Sized { + #[cfg(feature = "postgres")] fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; - + #[cfg(feature = "mssql")] fn deserialize_sqlserver(row: &tiberius::Row) -> T; } diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs index f0e68223..92146542 100644 --- a/canyon_crud/src/query_elements/query_builder.rs +++ b/canyon_crud/src/query_elements/query_builder.rs @@ -26,7 +26,7 @@ pub mod ops { /// hierarchy. /// /// For example, the [`super::QueryBuilder`] type holds the data - /// necessary for track the SQL sentece while it's being generated + /// necessary for track the SQL sentence while it's being generated /// thought the fluent builder, and provides the behaviour of /// the common elements defined in this trait. /// @@ -44,7 +44,7 @@ pub mod ops { /// just one type. pub trait QueryBuilder<'a, T> where - T: Debug + CrudOperations + Transaction + RowMapper, + T: CrudOperations + Transaction + RowMapper, { /// Returns a read-only reference to the underlying SQL sentence, /// with the same lifetime as self @@ -174,7 +174,7 @@ where self.datasource_name, ) .await? - .get_entities::()) + .into_results::()) } pub fn r#where>(&mut self, r#where: Z, op: impl Operator) { @@ -324,7 +324,7 @@ where } /// Adds a *LEFT JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -340,7 +340,7 @@ where } /// Adds a *RIGHT JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -356,7 +356,7 @@ where } /// Adds a *RIGHT JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -372,7 +372,7 @@ where } /// Adds a *FULL JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -428,12 +428,6 @@ where self } - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - #[inline] fn or_values_in(&mut self, r#and: Z, values: &'a [Q]) -> &mut Self where @@ -444,6 +438,12 @@ where self } + #[inline] + fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { + self._inner.or(column, op); + self + } + #[inline] fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { self._inner.order_by(order_by, desc); @@ -565,12 +565,6 @@ where self } - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - #[inline] fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self where @@ -581,6 +575,12 @@ where self } + #[inline] + fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { + self._inner.or(column, op); + self + } + #[inline] fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { self._inner.order_by(order_by, desc); @@ -665,12 +665,6 @@ where self } - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - #[inline] fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self where @@ -681,6 +675,12 @@ where self } + #[inline] + fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { + self._inner.or(column, op); + self + } + #[inline] fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { self._inner.order_by(order_by, desc); diff --git a/canyon_crud/src/result.rs b/canyon_crud/src/result.rs deleted file mode 100644 index 1a2cae29..00000000 --- a/canyon_crud/src/result.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::{bounds::Row, crud::Transaction, mapper::RowMapper}; -use canyon_connection::{canyon_database_connector::DatabaseType, tiberius, tokio_postgres}; -use std::{fmt::Debug, marker::PhantomData}; - -/// Represents a database result after a query, by wrapping the `Vec` types that comes with the -/// results after the query. -/// and providing methods to deserialize this result into a **user defined struct** -#[derive(Debug)] -pub struct DatabaseResult { - pub postgres: Vec, - pub sqlserver: Vec, - pub active_ds: DatabaseType, - _phantom_data: std::marker::PhantomData, -} - -impl DatabaseResult { - pub fn new_postgresql(result: Vec) -> Self { - Self { - postgres: result, - sqlserver: Vec::with_capacity(0), - active_ds: DatabaseType::PostgreSql, - _phantom_data: PhantomData, - } - } - - pub fn new_sqlserver(results: Vec) -> Self { - Self { - postgres: Vec::with_capacity(0), - sqlserver: results, - active_ds: DatabaseType::SqlServer, - _phantom_data: PhantomData, - } - } - - /// Returns a [`Vec`] filled with instances of the type T. - /// Z param it's used to constraint the types that can call this method. - /// - /// Also, provides a way to statically call `Z::deserialize_` method, - /// which it's the implementation used by the macros to automatically - /// map database columns into the fields for T. - pub fn get_entities>(&self) -> Vec - where - T: Transaction, - { - match self.active_ds { - DatabaseType::PostgreSql => self.map_from_postgresql::(), - DatabaseType::SqlServer => self.map_from_sql_server::(), - } - } - - fn map_from_postgresql>(&self) -> Vec - where - T: Transaction, - { - let mut results = Vec::new(); - - self.postgres - .iter() - .for_each(|row| results.push(Z::deserialize_postgresql(row))); - - results - } - - fn map_from_sql_server>(&self) -> Vec - where - T: Transaction, - { - let mut results = Vec::new(); - - self.sqlserver - .iter() - .for_each(|row| results.push(Z::deserialize_sqlserver(row))); - - results - } - - pub fn as_canyon_rows(&self) -> Vec<&dyn Row> { - let mut results = Vec::new(); - - match self.active_ds { - DatabaseType::PostgreSql => { - self.postgres - .iter() - .for_each(|row| results.push(row as &dyn Row)); - } - DatabaseType::SqlServer => { - self.sqlserver - .iter() - .for_each(|row| results.push(row as &dyn Row)); - } - }; - - results - } - - /// Returns the active datasource - pub fn get_active_ds(&self) -> &DatabaseType { - &self.active_ds - } - - /// Returns how many rows contains the result of the query - pub fn number_of_results(&self) -> usize { - match self.active_ds { - DatabaseType::PostgreSql => self.postgres.len(), - DatabaseType::SqlServer => self.sqlserver.len(), - } - } -} diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs new file mode 100644 index 00000000..d8d35070 --- /dev/null +++ b/canyon_crud/src/rows.rs @@ -0,0 +1,71 @@ +use crate::crud::Transaction; +use crate::mapper::RowMapper; +use std::marker::PhantomData; + +/// Lightweight wrapper over the collection of results of the different crates +/// supported by Canyon-SQL. +/// +/// Even tho the wrapping seems meaningless, this allows us to provide internal +/// operations that are too difficult or to ugly to implement in the macros that +/// will call the query method of Crud. +pub enum CanyonRows { + #[cfg(feature = "postgres")] + Postgres(Vec), + #[cfg(feature = "mssql")] + Tiberius(Vec), + UnusableTypeMarker(PhantomData), +} + +impl CanyonRows { + #[cfg(feature = "postgres")] + pub fn get_postgres_rows(&self) -> &Vec { + match self { + Self::Postgres(v) => v, + _ => panic!("This branch will never ever should be reachable"), + } + } + + #[cfg(feature = "mssql")] + pub fn get_tiberius_rows(&self) -> &Vec { + match self { + Self::Tiberius(v) => v, + _ => panic!("This branch will never ever should be reachable"), + } + } + + /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of T + pub fn into_results>(self) -> Vec + where + T: Transaction, + { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.iter().map(|row| Z::deserialize_postgresql(row)).collect(), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.iter().map(|row| Z::deserialize_sqlserver(row)).collect(), + _ => panic!("This branch will never ever should be reachable"), + } + } + + /// Returns the number of elements present on the wrapped collection + pub fn len(&self) -> usize { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.len(), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.len(), + _ => panic!("This branch will never ever should be reachable"), + } + } + + /// Returns true whenever the wrapped collection of Rows does not contains any elements + pub fn is_empty(&self) -> bool { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.is_empty(), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.is_empty(), + _ => panic!("This branch will never ever should be reachable"), + } + } +} diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index 93695087..82d336f5 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -1,23 +1,28 @@ [package] name = "canyon_macros" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [lib] proc-macro = true [dependencies] -syn = { version = "1.0.109", features = ["full"] } -quote = "1.0.9" -proc-macro2 = "1.0.27" -futures = "0.3.21" -tokio = { version = "1.9.0", features = ["full"] } +syn = { version = "1.0.109", features = ["full"] } # TODO Pending to upgrade and refactor +quote = { workspace = true } +proc-macro2 = { workspace = true } +futures = { workspace = true } +tokio = { workspace = true } -canyon_observer = { version = "0.2.0", path = "../canyon_observer" } -canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } +canyon_observer = { workspace = true } +canyon_crud = { workspace = true } +canyon_connection = { workspace = true } + +[features] +postgres = ["canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres"] +mssql = ["canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql"] diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index 34a166e8..ce03cc58 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -322,7 +322,7 @@ fn impl_crud_operations_trait_for_struct( _search_by_revese_fk_tokens.iter().map(|(_, m_impl)| m_impl); // The autogenerated name for the trait that holds the fk and rev fk searches - let fk_trait_ident = proc_macro2::Ident::new( + let fk_trait_ident = Ident::new( &format!("{}FkOperations", &ty.to_string()), proc_macro2::Span::call_site(), ); @@ -486,7 +486,6 @@ pub fn implement_row_mapper_for_type(input: proc_macro::TokenStream) -> proc_mac } }); - // TODO rework this ugly piece of code in the upcoming versions let init_field_values_sqlserver = fields.iter().map(|(_vis, ident, ty)| { let ident_name = ident.to_string(); @@ -568,21 +567,52 @@ pub fn implement_row_mapper_for_type(input: proc_macro::TokenStream) -> proc_mac // The type of the Struct let ty = ast.ident; - let tokens = quote! { - impl canyon_sql::crud::RowMapper for #ty - { - fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { - Self { - #(#init_field_values),* + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let tokens = if postgres_enabled && mssql_enabled { + quote! { + impl canyon_sql::crud::RowMapper for #ty { + fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { + Self { + #(#init_field_values),* + } + } + fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { + Self { + #(#init_field_values_sqlserver),* + } } } - - fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { - Self { - #(#init_field_values_sqlserver),* + } + } else if postgres_enabled { + quote! { + impl canyon_sql::crud::RowMapper for #ty { + fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { + Self { + #(#init_field_values),* + } } } } + } else if mssql_enabled { + quote! { + impl canyon_sql::crud::RowMapper for #ty { + fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { + Self { + #(#init_field_values_sqlserver),* + } + } + } + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } }; tokens.into() diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 11890b31..329399f0 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -34,11 +34,62 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri ._fields_with_types() .into_iter() .find(|(i, _t)| Some(i.to_string()) == primary_key); - let insert_transaction = if let Some(pk_data) = &pk_ident_type { let pk_ident = &pk_data.0; let pk_type = &pk_data.1; + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let match_rows = if postgres_enabled && mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + self.#pk_ident = v + .get(0) + .ok_or("Failed getting the returned IDs for an insert")? + .get::<&str, #pk_type>(#primary_key); + Ok(()) + } + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + self.#pk_ident = v + .get(0) + .ok_or("Failed getting the returned IDs for a multi insert")? + .get::<#pk_type, &str>(#primary_key) + .ok_or("SQL Server primary key type failed to be set as value")?; + Ok(()) + } + } + } else if postgres_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + self.#pk_ident = v + .get(0) + .ok_or("Failed getting the returned IDs for an insert")? + .get::<&str, #pk_type>(#primary_key); + Ok(()) + } + } + } else if mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + self.#pk_ident = v + .get(0) + .ok_or("Failed getting the returned IDs for a multi insert")? + .get::<#pk_type, &str>(#primary_key) + .ok_or("SQL Server primary key type failed to be set as value")?; + Ok(()) + } + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } + }; + quote! { #remove_pk_value_from_fn_entry; @@ -50,35 +101,15 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri #primary_key ); - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + let rows = <#ty as canyon_sql::crud::Transaction<#ty>>::query( stmt, values, datasource_name - ).await; - - match result { - Ok(res) => { - match res.get_active_ds() { - canyon_sql::crud::DatabaseType::PostgreSql => { - self.#pk_ident = res.postgres.get(0) - .expect("No value found on the returning clause") - .get::<&str, #pk_type>(#primary_key) - .to_owned(); - - Ok(()) - }, - canyon_sql::crud::DatabaseType::SqlServer => { - self.#pk_ident = res.sqlserver.get(0) - .expect("No value found on the returning clause") - .get::<#pk_type, &str>(#primary_key) - .expect("SQL Server primary key type failed to be set as value") - .to_owned(); - - Ok(()) - } - } - }, - Err(e) => Err(e) + ).await?; + + match rows { + #match_rows + _ => panic!("Reached the panic match arm of insert for the DatabaseConnection type") // TODO remove when the generics will be refactored } } } else { @@ -228,6 +259,70 @@ pub fn generate_multiple_insert_tokens( let pk_ident = &pk_data.0; let pk_type = &pk_data.1; + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let match_multi_insert_rows = if postgres_enabled && mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<&str, #pk_type>(#pk); + } + + Ok(()) + } + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<#pk_type, &str>(#pk) + .expect("SQL Server primary key type failed to be set as value"); + } + + Ok(()) + } + } + } else if postgres_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<&str, #pk_type>(#pk); + } + + Ok(()) + } + } + } else if mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<#pk_type, &str>(#pk) + .expect("SQL Server primary key type failed to be set as value"); + } + + Ok(()) + } + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } + }; + quote! { mapped_fields = #column_names .split(", ") @@ -290,41 +385,15 @@ pub fn generate_multiple_insert_tokens( } } - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + let multi_insert_result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( stmt, v_arr, datasource_name - ).await; - - match result { - Ok(res) => { - match res.get_active_ds() { - canyon_sql::crud::DatabaseType::PostgreSql => { - for (idx, instance) in instances.iter_mut().enumerate() { - instance.#pk_ident = res - .postgres - .get(idx) - .expect("Failed getting the returned IDs for a multi insert") - .get::<&str, #pk_type>(#pk); - } - - Ok(()) - }, - canyon_sql::crud::DatabaseType::SqlServer => { - for (idx, instance) in instances.iter_mut().enumerate() { - instance.#pk_ident = res - .sqlserver - .get(idx) - .expect("Failed getting the returned IDs for a multi insert") - .get::<#pk_type, &str>(#pk) - .expect("SQL Server primary key type failed to be set as value"); - } - - Ok(()) - } - } - }, - Err(e) => Err(e) + ).await?; + + match multi_insert_result { + #match_multi_insert_rows + _ => panic!() // TODO remove when the generics will be refactored } } } else { @@ -382,16 +451,13 @@ pub fn generate_multiple_insert_tokens( } } - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + <#ty as canyon_sql::crud::Transaction<#ty>>::query( stmt, v_arr, datasource_name - ).await; + ).await?; - match result { - Ok(res) => Ok(()), - Err(e) => Err(e) - } + Ok(()) } }; diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index 761451c1..0f70ab4d 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -16,7 +16,7 @@ pub fn generate_find_all_unchecked_tokens( let stmt = format!("SELECT * FROM {table_schema_data}"); quote! { - /// Performns a `SELECT * FROM table_name`, where `table_name` it's + /// Performs a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. @@ -27,7 +27,7 @@ pub fn generate_find_all_unchecked_tokens( "" ).await .unwrap() - .get_entities::<#ty>() + .into_results::<#ty>() } /// Performs a `SELECT * FROM table_name`, where `table_name` it's @@ -45,7 +45,7 @@ pub fn generate_find_all_unchecked_tokens( datasource_name ).await .unwrap() - .get_entities::<#ty>() + .into_results::<#ty>() } } } @@ -60,7 +60,7 @@ pub fn generate_find_all_tokens( let stmt = format!("SELECT * FROM {table_schema_data}"); quote! { - /// Performns a `SELECT * FROM table_name`, where `table_name` it's + /// Performs a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. @@ -73,11 +73,11 @@ pub fn generate_find_all_tokens( &[], "" ).await? - .get_entities::<#ty>() + .into_results::<#ty>() ) } - /// Performns a `SELECT * FROM table_name`, where `table_name` it's + /// Performs a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. @@ -98,7 +98,7 @@ pub fn generate_find_all_tokens( &[], datasource_name ).await? - .get_entities::<#ty>() + .into_results::<#ty>() ) } } @@ -150,25 +150,46 @@ pub fn generate_count_tokens( let ty_str = &ty.to_string(); let stmt = format!("SELECT COUNT (*) FROM {table_schema_data}"); - let result_handling = quote! { - match count.get_active_ds() { - canyon_sql::crud::DatabaseType::PostgreSql => { - Ok( - count.postgres.get(0) - .expect(&format!("Count operation failed for {:?}", #ty_str)) - .get::<&str, i64>("count") - .to_owned() - ) - }, - canyon_sql::crud::DatabaseType::SqlServer => { - Ok( - count.sqlserver.get(0) - .expect(&format!("Count operation failed for {:?}", #ty_str)) - .get::(0) - .expect(&format!("SQL Server failed to return the count values for {:?}", #ty_str)) - .into() - ) - } + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let result_handling = if postgres_enabled && mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( + v.remove(0).get::<&str, i64>("count") + ), + canyon_sql::crud::CanyonRows::Tiberius(mut v) => + v.remove(0) + .get::(0) + .map(|c| c as i64) + .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()) + .into(), + _ => panic!() // TODO remove when the generics will be refactored + } + } else if postgres_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( + v.remove(0).get::<&str, i64>("count") + ), + _ => panic!() // TODO remove when the generics will be refactored + } + } else if mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Tiberius(mut v) => + v.remove(0) + .get::(0) + .map(|c| c as i64) + .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()) + .into(), + _ => panic!() // TODO remove when the generics will be refactored + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) } }; @@ -182,7 +203,9 @@ pub fn generate_count_tokens( "" ).await?; - #result_handling + match count { + #result_handling + } } /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, @@ -194,7 +217,9 @@ pub fn generate_count_tokens( datasource_name ).await?; - #result_handling + match count { + #result_handling + } } } } @@ -242,9 +267,9 @@ pub fn generate_find_by_pk_tokens( let result_handling = quote! { match result { - n if n.number_of_results() == 0 => Ok(None), + n if n.len() == 0 => Ok(None), _ => Ok( - Some(result.get_entities::<#ty>().remove(0)) + Some(result.into_results::<#ty>().remove(0)) ) } }; @@ -347,9 +372,9 @@ pub fn generate_find_by_foreign_key_tokens( ); let result_handler = quote! { match result { - n if n.number_of_results() == 0 => Ok(None), + n if n.len() == 0 => Ok(None), _ => Ok(Some( - result.get_entities::<#fk_ty>().remove(0) + result.into_results::<#fk_ty>().remove(0) )) } }; @@ -434,8 +459,8 @@ pub fn generate_find_by_reverse_foreign_key_tokens( #quoted_method_signature { let lookage_value = value.get_fk_column(#column) - .expect(format!( - "Column: {:?} not found in type: {:?}", #column, #table + .expect(format!( + "Column: {:?} not found in type: {:?}", #column, #table ).as_str()); let stmt = format!( @@ -448,8 +473,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( stmt, &[lookage_value], "" - ).await? - .get_entities::<#ty>()) + ).await?.into_results::<#ty>()) } }, )); @@ -477,8 +501,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( stmt, &[lookage_value], datasource_name - ).await? - .get_entities::<#ty>()) + ).await?.into_results::<#ty>()) } }, )); diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index cb4bd353..0f939b2c 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -1,27 +1,29 @@ [package] name = "canyon_observer" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] -tokio = { version = "1.9.0", features = ["full"] } -tokio-postgres = { version = "0.7.2" , features=["with-chrono-0_4"] } -async-trait = { version = "0.1.50" } -regex = "1.5" -walkdir = "2" +canyon_crud = { workspace = true } +canyon_connection = { workspace = true } +tokio = { workspace = true } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } +async-trait = { workspace = true } -proc-macro2 = "1.0.27" -syn = { version = "1.0.86", features = ["full", "parsing"] } -quote = "1.0.9" - -# Debug +regex = { workspace = true } +walkdir = { workspace = true } partialdebug = "0.2.0" +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { version = "1.0.86", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade -# Internal dependencies -canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } +[features] +postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres"] +mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql"] diff --git a/canyon_observer/src/constants.rs b/canyon_observer/src/constants.rs index c9db74e8..3928da4f 100644 --- a/canyon_observer/src/constants.rs +++ b/canyon_observer/src/constants.rs @@ -1,5 +1,6 @@ pub const NUMERIC_PK_DATATYPE: [&str; 6] = ["i16", "u16", "i32", "u32", "i64", "u64"]; +#[cfg(feature = "postgres")] pub mod postgresql_queries { pub static CANYON_MEMORY_TABLE: &str = "CREATE TABLE IF NOT EXISTS canyon_memory ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, @@ -35,6 +36,7 @@ pub mod postgresql_queries { table_schema = 'public';"; } +#[cfg(feature = "mssql")] pub mod mssql_queries { pub static CANYON_MEMORY_TABLE: &str = "IF OBJECT_ID(N'[dbo].[canyon_memory]', N'U') IS NULL BEGIN @@ -142,7 +144,7 @@ pub mod rust_type { pub const OPT_NAIVE_DATE_TIME: &str = "Option"; } -/// TODO +#[cfg(feature = "postgres")] pub mod postgresql_type { pub const INT_8: &str = "int8"; pub const SMALL_INT: &str = "smallint"; @@ -155,6 +157,7 @@ pub mod postgresql_type { pub const DATETIME: &str = "timestamp without time zone"; } +#[cfg(feature = "mssql")] pub mod sqlserver_type { pub const TINY_INT: &str = "TINY INT"; pub const SMALL_INT: &str = "SMALL INT"; diff --git a/canyon_observer/src/lib.rs b/canyon_observer/src/lib.rs index 1a0766e5..41e0dd42 100644 --- a/canyon_observer/src/lib.rs +++ b/canyon_observer/src/lib.rs @@ -11,6 +11,7 @@ /// in order to perform the migrations pub mod migrations; +extern crate canyon_connection; extern crate canyon_crud; mod constants; diff --git a/canyon_observer/src/manager/entity.rs b/canyon_observer/src/manager/entity.rs index 78e2f157..7aaeb38e 100644 --- a/canyon_observer/src/manager/entity.rs +++ b/canyon_observer/src/manager/entity.rs @@ -71,7 +71,7 @@ impl CanyonEntity { /// Generates an implementation of the match pattern to find whatever variant /// is being requested when the method `.field_name_as_str(self)` it's invoked over some - /// instance that implements the `canyon_sql::crud::bounds::FieldIdentifier` trait + /// instance that implements the `canyon_sql_root::crud::bounds::FieldIdentifier` trait pub fn create_match_arm_for_get_variant_as_string( &self, enum_name: &Ident, @@ -91,7 +91,7 @@ impl CanyonEntity { /// Generates an implementation of the match pattern to find whatever variant /// is being requested when the method `.value()` it's invoked over some - /// instance that implements the `canyon_sql::crud::bounds::FieldValueIdentifier` trait + /// instance that implements the `canyon_sql_root::crud::bounds::FieldValueIdentifier` trait pub fn create_match_arm_for_relate_fields_with_values( &self, enum_name: &Ident, diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index d454128a..9ce3c4e8 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -1,11 +1,11 @@ use canyon_connection::{datasources::Migrations as MigrationsStatus, DATASOURCES}; +use canyon_crud::rows::CanyonRows; use partialdebug::placeholder::PartialDebug; use crate::{ canyon_crud::{ bounds::{Column, Row, RowOperations}, crud::Transaction, - result::DatabaseResult, DatabaseType, }, constants, @@ -53,7 +53,8 @@ impl Migrations { // Tracked entities that must be migrated whenever Canyon starts let schema_status = Self::fetch_database(&datasource.name, datasource.get_db_type()).await; - let database_tables_schema_info = Self::map_rows(schema_status); + let database_tables_schema_info = + Self::map_rows(schema_status, datasource.get_db_type()); // We filter the tables from the schema that aren't Canyon entities let mut user_database_tables = vec![]; @@ -87,9 +88,11 @@ impl Migrations { async fn fetch_database( datasource_name: &str, db_type: DatabaseType, - ) -> DatabaseResult { + ) -> CanyonRows { let query = match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, + #[cfg(feature = "mssql")] DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, }; @@ -105,34 +108,14 @@ impl Migrations { /// Handler for parse the result of query the information of some database schema, /// and extract the content of the returned rows into custom structures with /// the data well organized for every entity present on that schema - fn map_rows(db_results: DatabaseResult) -> Vec { - let mut schema_info: Vec = Vec::new(); - - for res_row in db_results.as_canyon_rows().into_iter() { - let unique_table = schema_info - .iter_mut() - .find(|table| table.table_name == *res_row.get::<&str>("table_name").to_owned()); - match unique_table { - Some(table) => { - /* If a table entity it's already present on the collection, we add it - the founded columns related to the table */ - Self::get_columns_metadata(res_row, table); - } - None => { - /* If there's no table for a given "table_name" property on the - collection yet, we must create a new instance and attach it - the founded columns data in this iteration */ - let mut new_table = TableMetadata { - table_name: res_row.get::<&str>("table_name").to_owned(), - columns: Vec::new(), - }; - Self::get_columns_metadata(res_row, &mut new_table); - schema_info.push(new_table); - } - }; + fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { + match db_results { + #[cfg(feature = "postgres")] + CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), + #[cfg(feature = "mssql")] + CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), + _ => panic!(), } - - schema_info } /// Parses all the [`Row`] after query the information of the targeted schema, @@ -218,4 +201,95 @@ impl Migrations { } }; } + + #[cfg(feature = "postgres")] + fn process_tp_rows( + db_results: Vec, + db_type: DatabaseType, + ) -> Vec { + let mut schema_info: Vec = Vec::new(); + for res_row in db_results.iter() { + let unique_table = schema_info + .iter_mut() + .find(|table| check_for_table_name(table, db_type, res_row as &dyn Row)); + match unique_table { + Some(table) => { + /* If a table entity it's already present on the collection, we add it + the founded columns related to the table */ + Self::get_columns_metadata(res_row as &dyn Row, table); + } + None => { + /* If there's no table for a given "table_name" property on the + collection yet, we must create a new instance and attach it + the founded columns data in this iteration */ + let mut new_table = TableMetadata { + table_name: get_table_name_from_tp_row(res_row), + columns: Vec::new(), + }; + Self::get_columns_metadata(res_row as &dyn Row, &mut new_table); + schema_info.push(new_table); + } + }; + } + + schema_info + } + + #[cfg(feature = "mssql")] + fn process_tib_rows( + db_results: Vec, + db_type: DatabaseType, + ) -> Vec { + let mut schema_info: Vec = Vec::new(); + for res_row in db_results.iter() { + let unique_table = schema_info + .iter_mut() + .find(|table| check_for_table_name(table, db_type, res_row as &dyn Row)); + match unique_table { + Some(table) => { + /* If a table entity it's already present on the collection, we add it + the founded columns related to the table */ + Self::get_columns_metadata(res_row as &dyn Row, table); + } + None => { + /* If there's no table for a given "table_name" property on the + collection yet, we must create a new instance and attach it + the founded columns data in this iteration */ + let mut new_table = TableMetadata { + table_name: get_table_name_from_tib_row(res_row), + columns: Vec::new(), + }; + Self::get_columns_metadata(res_row as &dyn Row, &mut new_table); + schema_info.push(new_table); + } + }; + } + + schema_info + } +} + +#[cfg(feature = "postgres")] +fn get_table_name_from_tp_row(res_row: &tokio_postgres::Row) -> String { + res_row.get::<&str, String>("table_name") +} +#[cfg(feature = "mssql")] +fn get_table_name_from_tib_row(res_row: &tiberius::Row) -> String { + res_row + .get::<&str, &str>("table_name") + .unwrap_or_default() + .to_string() +} + +fn check_for_table_name( + table: &&mut TableMetadata, + db_type: DatabaseType, + res_row: &dyn Row, +) -> bool { + match db_type { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => table.table_name == res_row.get_postgres::<&str>("table_name"), + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => table.table_name == res_row.get_mssql::<&str>("table_name"), + } } diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index bdf9f48e..74709619 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -1,4 +1,7 @@ -use canyon_connection::{tiberius::ColumnType as TIB_TY, tokio_postgres::types::Type as TP_TYP}; +#[cfg(feature = "mssql")] +use canyon_connection::tiberius::ColumnType as TIB_TY; +#[cfg(feature = "postgres")] +use canyon_connection::tokio_postgres::types::Type as TP_TYP; use canyon_crud::bounds::{Column, ColumnType, Row, RowOperations}; /// Model that represents the database entities that belongs to the current schema. @@ -40,21 +43,27 @@ impl ColumnMetadataTypeValue { /// Retrieves the value stored in a [`Column`] for a passed [`Row`] pub fn get_value(row: &dyn Row, col: &Column) -> Self { match col.column_type() { + #[cfg(feature = "postgres")] ColumnType::Postgres(v) => { match *v { - TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => { - Self::StringValue(row.get_opt::<&str>(col.name()).map(|opt| opt.to_owned())) - } - TP_TYP::INT4 => Self::IntValue(row.get_opt::(col.name())), + TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => Self::StringValue( + row.get_postgres_opt::<&str>(col.name()) + .map(|opt| opt.to_owned()), + ), + TP_TYP::INT4 => Self::IntValue(row.get_postgres_opt::(col.name())), _ => Self::NoneValue, // TODO watchout this one } } + #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => match v { TIB_TY::NChar | TIB_TY::NVarchar | TIB_TY::BigChar | TIB_TY::BigVarChar => { - Self::StringValue(row.get_opt::<&str>(col.name()).map(|opt| opt.to_owned())) + Self::StringValue( + row.get_mssql_opt::<&str>(col.name()) + .map(|opt| opt.to_owned()), + ) } TIB_TY::Int2 | TIB_TY::Int4 | TIB_TY::Int8 | TIB_TY::Intn => { - Self::IntValue(row.get_opt::(col.name())) + Self::IntValue(row.get_mssql_opt::(col.name())) } _ => Self::NoneValue, }, diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index 0a4080c0..18f6eb31 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -1,5 +1,5 @@ use crate::constants; -use canyon_crud::{bounds::RowOperations, crud::Transaction, DatabaseType, DatasourceConfig}; +use canyon_crud::{crud::Transaction, DatabaseType, DatasourceConfig}; use regex::Regex; use std::collections::HashMap; use std::fs; @@ -70,21 +70,47 @@ impl CanyonMemory { let res = Self::query("SELECT * FROM canyon_memory", [], &datasource.name) .await .expect("Error querying Canyon Memory"); - let mem_results = res.as_canyon_rows(); // Manually maps the results let mut db_rows = Vec::new(); - for row in mem_results.iter() { - let db_row = CanyonMemoryRow { - id: row.get::("id"), - filepath: row.get::<&str>("filepath"), - struct_name: row.get::<&str>("struct_name"), - declared_table_name: row.get::<&str>("declared_table_name"), - }; - db_rows.push(db_row); + #[cfg(feature = "postgres")] + { + let mem_results: &Vec = res.get_postgres_rows(); + for row in mem_results { + let db_row = CanyonMemoryRow { + id: row.get::<&str, i32>("id"), + filepath: row.get::<&str, String>("filepath"), + struct_name: row.get::<&str, String>("struct_name").to_owned(), + declared_table_name: row.get::<&str, String>("declared_table_name").to_owned(), + }; + db_rows.push(db_row); + } + } + #[cfg(feature = "mssql")] + { + let mem_results: &Vec = res.get_tiberius_rows(); + for row in mem_results { + let db_row = CanyonMemoryRow { + id: row.get::("id").unwrap(), + filepath: row.get::<&str, &str>("filepath").unwrap().to_string(), + struct_name: row.get::<&str, &str>("struct_name").unwrap().to_string(), + declared_table_name: row + .get::<&str, &str>("declared_table_name") + .unwrap() + .to_string(), + }; + db_rows.push(db_row); + } } - // Parses the source code files looking for the #[canyon_entity] annotated classes + Self::populate_memory(datasource, canyon_entities, db_rows).await + } + + async fn populate_memory( + datasource: &DatasourceConfig, + canyon_entities: &[CanyonRegisterEntity<'_>], + db_rows: Vec, + ) -> CanyonMemory { let mut mem = Self { memory: Vec::new(), renamed_entities: HashMap::new(), @@ -106,7 +132,7 @@ impl CanyonMemory { && old.struct_name == _struct.struct_name && old.declared_table_name == _struct.declared_table_name) { - updates.push(old.struct_name); + updates.push(&old.struct_name); let stmt = format!( "UPDATE canyon_memory SET filepath = '{}', struct_name = '{}', declared_table_name = '{}' \ WHERE id = {}", @@ -137,12 +163,12 @@ impl CanyonMemory { } // Deletes the records from canyon_memory, because they stopped to be tracked by Canyon - for db_row in db_rows.into_iter() { + for db_row in db_rows.iter() { if !mem .memory .iter() .any(|entity| entity.struct_name == db_row.struct_name) - && !updates.contains(&db_row.struct_name) + && !updates.contains(&&(db_row.struct_name)) { save_canyon_memory_query( format!( @@ -216,12 +242,12 @@ impl CanyonMemory { } /// Generates, if not exists the `canyon_memory` table - #[cfg(not(cargo_check))] async fn create_memory(datasource_name: &str, database_type: &DatabaseType) { - let query = if database_type == &DatabaseType::PostgreSql { - constants::postgresql_queries::CANYON_MEMORY_TABLE - } else { - constants::mssql_queries::CANYON_MEMORY_TABLE + let query = match database_type { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE, }; Self::query(query, [], datasource_name) @@ -250,11 +276,11 @@ fn save_canyon_memory_query(stmt: String, ds_name: &str) { /// Represents a single row from the `canyon_memory` table #[derive(Debug)] -struct CanyonMemoryRow<'a> { +struct CanyonMemoryRow { id: i32, - filepath: &'a str, - struct_name: &'a str, - declared_table_name: &'a str, + filepath: String, + struct_name: String, + declared_table_name: String, } /// Represents the data that will be serialized in the `canyon_memory` table diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index c3995bbf..b096b828 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -169,7 +169,7 @@ impl MigrationsProcessor { entity_name: &'a str, entity_fields: Vec, current_table_metadata: Option<&'a TableMetadata>, - db_type: DatabaseType, + _db_type: DatabaseType, ) { if current_table_metadata.is_none() { return; @@ -188,12 +188,15 @@ impl MigrationsProcessor { .collect(); for column_metadata in columns_name_to_delete { - if db_type == DatabaseType::SqlServer && !column_metadata.is_nullable { - self.drop_column_not_null( - entity_name, - column_metadata.column_name.clone(), - MigrationsHelper::get_datatype_from_column_metadata(column_metadata), - ) + #[cfg(feature = "mssql")] + { + if _db_type == DatabaseType::SqlServer && !column_metadata.is_nullable { + self.drop_column_not_null( + entity_name, + column_metadata.column_name.clone(), + MigrationsHelper::get_datatype_from_column_metadata(column_metadata), + ) + } } self.delete_column(entity_name, column_metadata.column_name.clone()); } @@ -243,6 +246,7 @@ impl MigrationsProcessor { ))); } + #[cfg(feature = "mssql")] fn drop_column_not_null( &mut self, table_name: &str, @@ -314,8 +318,11 @@ impl MigrationsProcessor { if attr.starts_with("Annotation: PrimaryKey") { Self::add_primary_key(self, entity_name, canyon_register_entity_field.clone()); - if canyon_register_entity_field.is_autoincremental() { - Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + #[cfg(feature = "postgres")] + { + if canyon_register_entity_field.is_autoincremental() { + Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + } } } } @@ -351,6 +358,7 @@ impl MigrationsProcessor { ))); } + #[cfg(feature = "postgres")] fn add_identity(&mut self, entity_name: &str, field: CanyonRegisterEntityField) { self.constraints_operations .push(Box::new(ColumnOperation::AlterColumnAddIdentity( @@ -386,19 +394,24 @@ impl MigrationsProcessor { if field_is_primary_key && current_column_metadata.primary_key_info.is_none() { Self::add_primary_key(self, entity_name, canyon_register_entity_field.clone()); - if canyon_register_entity_field.is_autoincremental() { - Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + #[cfg(feature = "postgres")] + { + if canyon_register_entity_field.is_autoincremental() { + Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + } } } // Case when the field contains a primary key annotation, and it's already on the database else if field_is_primary_key && current_column_metadata.primary_key_info.is_some() { - let is_autoincr_rust = canyon_register_entity_field.is_autoincremental(); - let is_autoincr_in_db = current_column_metadata.is_identity; - - if !is_autoincr_rust && is_autoincr_in_db { - Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()) - } else if is_autoincr_rust && !is_autoincr_in_db { - Self::add_identity(self, entity_name, canyon_register_entity_field.clone()) + #[cfg(feature = "postgres")] + { + let is_autoincr_rust = canyon_register_entity_field.is_autoincremental(); + let is_autoincr_in_db = current_column_metadata.is_identity; + if !is_autoincr_rust && is_autoincr_in_db { + Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()) + } else if is_autoincr_rust && !is_autoincr_in_db { + Self::add_identity(self, entity_name, canyon_register_entity_field.clone()) + } } } // Case when field doesn't contains a primary key annotation, but there is one in the database column @@ -413,8 +426,11 @@ impl MigrationsProcessor { .to_string(), ); - if current_column_metadata.is_identity { - Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()); + #[cfg(feature = "postgres")] + { + if current_column_metadata.is_identity { + Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()); + } } } @@ -527,6 +543,7 @@ impl MigrationsProcessor { ))); } + #[cfg(feature = "postgres")] fn drop_identity( &mut self, entity_name: &str, @@ -619,6 +636,7 @@ impl MigrationsHelper { } } + #[cfg(feature = "mssql")] fn get_datatype_from_column_metadata(current_column_metadata: &ColumnMetadata) -> String { // TODO Add all SQL Server text datatypes if vec!["nvarchar", "varchar"] @@ -640,20 +658,27 @@ impl MigrationsHelper { canyon_register_entity_field: &CanyonRegisterEntityField, current_column_metadata: &ColumnMetadata, ) -> bool { - if db_type == DatabaseType::PostgreSql { - canyon_register_entity_field - .to_postgres_alter_syntax() - .to_lowercase() - == current_column_metadata.datatype - } else if db_type == DatabaseType::SqlServer { - // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") - canyon_register_entity_field - .to_sqlserver_alter_syntax() - .to_lowercase() - == current_column_metadata.datatype - } else { - todo!() + #[cfg(feature = "postgres")] + { + if db_type == DatabaseType::PostgreSql { + return canyon_register_entity_field + .to_postgres_alter_syntax() + .to_lowercase() + == current_column_metadata.datatype; + } + } + #[cfg(feature = "mssql")] + { + if db_type == DatabaseType::SqlServer { + // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") + return canyon_register_entity_field + .to_sqlserver_alter_syntax() + .to_lowercase() + == current_column_metadata.datatype; + } } + + false } fn extract_foreign_key_annotation(field_annotations: &[String]) -> (String, String) { @@ -752,112 +777,110 @@ impl DatabaseOperation for TableOperation { let stmt = match self { TableOperation::CreateTable(table_name, table_fields) => { - if db_type == DatabaseType::PostgreSql { - format!( - "CREATE TABLE \"{table_name}\" ({});", - table_fields - .iter() - .map(|entity_field| format!( - "\"{}\" {}", - entity_field.field_name, - entity_field.to_postgres_syntax() - )) - .collect::>() - .join(", ") - ) - } else if db_type == DatabaseType::SqlServer { - format!( - "CREATE TABLE {:?} ({:?});", - table_name, - table_fields - .iter() - .map(|entity_field| format!( - "{} {}", - entity_field.field_name, - entity_field.to_sqlserver_syntax() - )) - .collect::>() - .join(", ") - ) - .replace('"', "") - } else { - todo!("There's no other databases supported in Canyon-SQL right now") + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => { + format!( + "CREATE TABLE \"{table_name}\" ({});", + table_fields + .iter() + .map(|entity_field| format!( + "\"{}\" {}", + entity_field.field_name, + entity_field.to_postgres_syntax() + )) + .collect::>() + .join(", ") + ) + } + #[cfg(feature = "mssql")] DatabaseType::SqlServer => { + format!( + "CREATE TABLE {:?} ({:?});", + table_name, + table_fields + .iter() + .map(|entity_field| format!( + "{} {}", + entity_field.field_name, + entity_field.to_sqlserver_syntax() + )) + .collect::>() + .join(", ") + ) + .replace('"', "") + } } } TableOperation::AlterTableName(old_table_name, new_table_name) => { - if db_type == DatabaseType::PostgreSql { - format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};") - } else if db_type == DatabaseType::SqlServer { - /* - Notes: Brackets around `old_table_name`, p.e. - exec sp_rename ['league'], 'leagues' // NOT VALID! - is only allowed for compound names split by a dot. - exec sp_rename ['random.league'], 'leagues' // OK - - CARE! This doesn't mean that we are including the schema. - exec sp_rename ['dbo.random.league'], 'leagues' // OK - exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets - - Due to the automatic mapped name from Rust to DB and vice-versa, this won't - be an allowed behaviour for now, only with the table_name parameter on the - CanyonEntity annotation. - */ - format!("exec sp_rename '{old_table_name}', '{new_table_name}';") - } else { - todo!() + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};"), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + /* + Notes: Brackets around `old_table_name`, p.e. + exec sp_rename ['league'], 'leagues' // NOT VALID! + is only allowed for compound names split by a dot. + exec sp_rename ['random.league'], 'leagues' // OK + + CARE! This doesn't mean that we are including the schema. + exec sp_rename ['dbo.random.league'], 'leagues' // OK + exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets + + Due to the automatic mapped name from Rust to DB and vice-versa, this won't + be an allowed behaviour for now, only with the table_name parameter on the + CanyonEntity annotation. + */ + format!("exec sp_rename '{old_table_name}', '{new_table_name}';") } } TableOperation::AddTableForeignKey( - table_name, - foreign_key_name, - column_foreign_key, - table_to_reference, - column_to_reference, + _table_name, + _foreign_key_name, + _column_foreign_key, + _table_to_reference, + _column_to_reference, ) => { - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE {table_name} ADD CONSTRAINT {foreign_key_name} \ - FOREIGN KEY ({column_foreign_key}) REFERENCES {table_to_reference} ({column_to_reference});" - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE {_table_name} ADD CONSTRAINT {_foreign_key_name} \ + FOREIGN KEY ({_column_foreign_key}) REFERENCES {_table_to_reference} ({_column_to_reference});" + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } - TableOperation::DeleteTableForeignKey(table_with_foreign_key, constraint_name) => { - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE {table_with_foreign_key} DROP CONSTRAINT {constraint_name};", - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + TableOperation::DeleteTableForeignKey(_table_with_foreign_key, _constraint_name) => { + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE {_table_with_foreign_key} DROP CONSTRAINT {_constraint_name};", + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } - TableOperation::AddTablePrimaryKey(table_name, entity_field) => { - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE \"{table_name}\" ADD PRIMARY KEY (\"{}\");", - entity_field.field_name - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + TableOperation::AddTablePrimaryKey(_table_name, _entity_field) => { + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE \"{_table_name}\" ADD PRIMARY KEY (\"{}\");", + _entity_field.field_name + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } TableOperation::DeleteTablePrimaryKey(table_name, primary_key_name) => { - if db_type == DatabaseType::PostgreSql || db_type == DatabaseType::SqlServer { - format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") - } else { - todo!() + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;"), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") } } }; @@ -875,11 +898,14 @@ enum ColumnOperation { // AlterColumnName, AlterColumnType(String, CanyonRegisterEntityField), AlterColumnDropNotNull(String, CanyonRegisterEntityField), + AlterColumnSetNotNull(String, CanyonRegisterEntityField), + + #[cfg(feature = "mssql")] // SQL server specific operation - SQL server can't drop a NOT NULL column DropNotNullBeforeDropColumn(String, String, String), - AlterColumnSetNotNull(String, CanyonRegisterEntityField), - // TODO if implement through annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} + #[cfg(feature = "postgres")] AlterColumnAddIdentity(String, CanyonRegisterEntityField), + #[cfg(feature = "postgres")] AlterColumnDropIdentity(String, CanyonRegisterEntityField), } @@ -892,51 +918,47 @@ impl DatabaseOperation for ColumnOperation { let stmt = match self { ColumnOperation::CreateColumn(table_name, entity_field) => - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE \"{}\" ADD COLUMN \"{}\" {};", - table_name, - entity_field.field_name, - entity_field.to_postgres_syntax()) - } else if db_type == DatabaseType::SqlServer { - format!( - "ALTER TABLE {} ADD \"{}\" {};", - table_name, - entity_field.field_name, - entity_field.to_sqlserver_syntax() - ) - } else { - todo!() - }, + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE \"{}\" ADD COLUMN \"{}\" {};", + table_name, + entity_field.field_name, + entity_field.to_postgres_syntax() + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + format!( + "ALTER TABLE {} ADD \"{}\" {};", + table_name, + entity_field.field_name, + entity_field.to_sqlserver_syntax() + ) + } ColumnOperation::DeleteColumn(table_name, column_name) => { // TODO Check if operation for SQL server is different format!("ALTER TABLE \"{table_name}\" DROP COLUMN \"{column_name}\";") }, - ColumnOperation::AlterColumnType(table_name, entity_field) => - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" TYPE {};", - entity_field.field_name, entity_field.to_postgres_alter_syntax() - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() - } - , + ColumnOperation::AlterColumnType(_table_name, _entity_field) => + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE \"{_table_name}\" ALTER COLUMN \"{}\" TYPE {};", + _entity_field.field_name, _entity_field.to_postgres_alter_syntax() + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") + } ColumnOperation::AlterColumnDropNotNull(table_name, entity_field) => - if db_type == DatabaseType::PostgreSql { - format!("ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP NOT NULL;", entity_field.field_name) - } else if db_type == DatabaseType::SqlServer { - format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", - entity_field.field_name, entity_field.to_sqlserver_alter_syntax() - ) - } else { - todo!() - } - - ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => + format!("ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP NOT NULL;", entity_field.field_name), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => + format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", + entity_field.field_name, entity_field.to_sqlserver_alter_syntax() + ) + } + #[cfg(feature = "mssql")] ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => format!( "ALTER TABLE {table_name} ALTER COLUMN {column_name} {column_datatype} NULL; DECLARE @tableName VARCHAR(MAX) = '{table_name}' DECLARE @columnName VARCHAR(MAX) = '{column_name}' @@ -951,15 +973,24 @@ impl DatabaseOperation for ColumnOperation { EXEC('ALTER TABLE '+@tableName+' DROP CONSTRAINT ' + @ConstraintName);" ), - ColumnOperation::AlterColumnSetNotNull(table_name, entity_field) => format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" SET NOT NULL;", entity_field.field_name - ), + ColumnOperation::AlterColumnSetNotNull(table_name, entity_field) => { + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" SET NOT NULL;", entity_field.field_name + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NOT NULL", + entity_field.field_name, + entity_field.to_sqlserver_alter_syntax() + ) + } + } - ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( + #[cfg(feature = "postgres")] ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" ADD GENERATED ALWAYS AS IDENTITY;", entity_field.field_name ), - ColumnOperation::AlterColumnDropIdentity(table_name, entity_field) => format!( + #[cfg(feature = "postgres")] ColumnOperation::AlterColumnDropIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP IDENTITY;", entity_field.field_name ), }; @@ -969,34 +1000,26 @@ impl DatabaseOperation for ColumnOperation { } /// Helper for operations involving sequences +#[cfg(feature = "postgres")] #[derive(Debug)] -#[allow(dead_code)] enum SequenceOperation { ModifySequence(String, CanyonRegisterEntityField), } - +#[cfg(feature = "postgres")] impl Transaction for SequenceOperation {} +#[cfg(feature = "postgres")] #[async_trait] impl DatabaseOperation for SequenceOperation { async fn generate_sql(&self, datasource: &DatasourceConfig) { - let db_type = datasource.get_db_type(); - let stmt = match self { SequenceOperation::ModifySequence(table_name, entity_field) => { - if db_type == DatabaseType::PostgreSql { - format!( + format!( "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", entity_field.field_name, entity_field.field_name ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() - } } }; - save_migrations_query_to_execute(stmt, &datasource.name); } } diff --git a/canyon_observer/src/migrations/register_types.rs b/canyon_observer/src/migrations/register_types.rs index 470944db..14481c13 100644 --- a/canyon_observer/src/migrations/register_types.rs +++ b/canyon_observer/src/migrations/register_types.rs @@ -1,8 +1,10 @@ use regex::Regex; -use crate::constants::{ - postgresql_type, regex_patterns, rust_type, sqlserver_type, NUMERIC_PK_DATATYPE, -}; +#[cfg(feature = "postgres")] +use crate::constants::postgresql_type; +#[cfg(feature = "mssql")] +use crate::constants::sqlserver_type; +use crate::constants::{regex_patterns, rust_type, NUMERIC_PK_DATATYPE}; /// This file contains `Rust` types that represents an entry on the `CanyonRegister` /// where `Canyon` tracks the user types that has to manage @@ -28,6 +30,7 @@ pub struct CanyonRegisterEntityField { impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type + #[cfg(feature = "postgres")] pub fn to_postgres_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); @@ -74,6 +77,7 @@ impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type /// for Microsoft SQL Server + #[cfg(feature = "mssql")] pub fn to_sqlserver_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); @@ -120,6 +124,7 @@ impl CanyonRegisterEntityField { } } + #[cfg(feature = "postgres")] pub fn to_postgres_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); @@ -162,6 +167,7 @@ impl CanyonRegisterEntityField { } } + #[cfg(feature = "mssql")] pub fn to_sqlserver_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); @@ -200,50 +206,6 @@ impl CanyonRegisterEntityField { } } - /// Return the datatype and parameters to create an id column, given the corresponding "CanyonRegisterEntityField" - /// with the correct format for PostgreSQL - fn _to_postgres_id_syntax(&self) -> String { - let has_pk_annotation = self - .annotations - .iter() - .find(|a| a.starts_with("Annotation: PrimaryKey")); - - let pk_is_autoincremental = match has_pk_annotation { - Some(annotation) => annotation.contains("true"), - None => false, - }; - - let postgres_datatype_syntax = Self::to_postgres_syntax(self); - - if NUMERIC_PK_DATATYPE.contains(&self.field_type.as_str()) && pk_is_autoincremental { - format!("{postgres_datatype_syntax} PRIMARY KEY GENERATED ALWAYS AS IDENTITY") - } else { - format!("{postgres_datatype_syntax} PRIMARY KEY") - } - } - - /// Return the datatype and parameters to create an id column, given the corresponding "CanyonRegisterEntityField" - /// with the correct format for Microsoft SQL Server - fn _to_sqlserver_id_syntax(&self) -> String { - let has_pk_annotation = self - .annotations - .iter() - .find(|a| a.starts_with("Annotation: PrimaryKey")); - - let pk_is_autoincremental = match has_pk_annotation { - Some(annotation) => annotation.contains("true"), - None => false, - }; - - let sqlserver_datatype_syntax = Self::to_sqlserver_syntax(self); - - if NUMERIC_PK_DATATYPE.contains(&self.field_type.as_str()) && pk_is_autoincremental { - format!("{sqlserver_datatype_syntax} IDENTITY PRIMARY") - } else { - format!("{sqlserver_datatype_syntax} PRIMARY KEY") - } - } - /// Return if the field is autoincremental pub fn is_autoincremental(&self) -> bool { let has_pk_annotation = self diff --git a/canyon_sql/Cargo.toml b/canyon_sql/Cargo.toml deleted file mode 100755 index 0a13a101..00000000 --- a/canyon_sql/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "canyon_sql" -version = "0.2.0" -edition = "2021" -authors = ["Alex Vergara, Gonzalo Busto"] -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" - -[dependencies] -async-trait = { version = "0.1.50" } - -# Project crates -canyon_macros = { version = "0.2.0", path = "../canyon_macros" } -canyon_observer = { version = "0.2.0", path = "../canyon_observer" } -canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } diff --git a/canyon_sql/src/lib.rs b/src/lib.rs old mode 100755 new mode 100644 similarity index 73% rename from canyon_sql/src/lib.rs rename to src/lib.rs index 330b8ed4..33a2c82b --- a/canyon_sql/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,10 @@ /// Here it's where all the available functionalities and features /// reaches the top most level, grouping them and making them visible /// through this crate, building the *public API* of the library +extern crate canyon_connection; +extern crate canyon_crud; +extern crate canyon_macros; +extern crate canyon_observer; /// Reexported elements to the root of the public API pub mod migrations { @@ -15,17 +19,27 @@ pub use canyon_macros::main; /// Public API for the `Canyon-SQL` proc-macros, and for the external ones pub mod macros { - pub use async_trait::*; + pub use canyon_crud::async_trait::*; pub use canyon_macros::*; } +/// connection module serves to reexport the public elements of the `canyon_connection` crate, +/// exposing them through the public API +pub mod connection { + #[cfg(feature = "postgres")] + pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; + + #[cfg(feature = "mssql")] + pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; +} + /// Crud module serves to reexport the public elements of the `canyon_crud` crate, /// exposing them through the public API pub mod crud { pub use canyon_crud::bounds; pub use canyon_crud::crud::*; pub use canyon_crud::mapper::*; - pub use canyon_crud::result::*; + pub use canyon_crud::rows::CanyonRows; pub use canyon_crud::DatabaseType; } @@ -37,7 +51,9 @@ pub mod query { /// Reexport the available database clients within Canyon pub mod db_clients { + #[cfg(feature = "mssql")] pub use canyon_connection::tiberius; + #[cfg(feature = "postgres")] pub use canyon_connection::tokio_postgres; } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f2e83953..da6b0dfc 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -1,12 +1,16 @@ [package] name = "tests" -version = "0.2.0" -edition = "2021" +version.workspace = true +edition.workspace = true publish = false [dev-dependencies] -canyon_sql = { path = "../canyon_sql" } +canyon_sql = { path = ".." } [[test]] name = "canyon_integration_tests" -path = "canyon_integration_tests.rs" \ No newline at end of file +path = "canyon_integration_tests.rs" + +[features] +postgres = ["canyon_sql/postgres"] +mssql = ["canyon_sql/mssql"] diff --git a/tests/canyon_integration_tests.rs b/tests/canyon_integration_tests.rs index 8120ee8f..30687987 100644 --- a/tests/canyon_integration_tests.rs +++ b/tests/canyon_integration_tests.rs @@ -1,3 +1,5 @@ +extern crate canyon_sql; + use std::error::Error; ///! Integration tests for the heart of a Canyon-SQL application, the CRUD operations. diff --git a/tests/constants.rs b/tests/constants.rs index f7804e43..1c9c8044 100644 --- a/tests/constants.rs +++ b/tests/constants.rs @@ -1,7 +1,11 @@ ///! Constant values to share across the integration tests + +#[cfg(feature = "postgres")] pub const PSQL_DS: &str = "postgres_docker"; +#[cfg(feature = "mssql")] pub const SQL_SERVER_DS: &str = "sqlserver_docker"; +#[cfg(feature = "postgres")] pub static FETCH_PUBLIC_SCHEMA: &str = "SELECT gi.table_name, @@ -33,6 +37,7 @@ LEFT JOIN pg_catalog.pg_constraint AS con on WHERE table_schema = 'public';"; +#[cfg(feature = "mssql")] pub const SQL_SERVER_CREATE_TABLES: &str = " IF OBJECT_ID(N'[dbo].[league]', N'U') IS NULL BEGIN @@ -87,6 +92,7 @@ BEGIN END; "; +#[cfg(feature = "mssql")] pub const SQL_SERVER_FILL_TABLE_VALUES: &str = " -- Values for league table -- Values for league table diff --git a/tests/crud/delete_operations.rs b/tests/crud/delete_operations.rs index 46d1bcaf..6420e553 100644 --- a/tests/crud/delete_operations.rs +++ b/tests/crud/delete_operations.rs @@ -2,7 +2,10 @@ ///! generates and executes *INSERT* statements use canyon_sql::crud::CrudOperations; -use crate::constants::{PSQL_DS, SQL_SERVER_DS}; +#[cfg(feature = "postgres")] +use crate::constants::PSQL_DS; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Deletes a row from the database that is mapped into some instance of a `T` entity. @@ -14,6 +17,7 @@ use crate::tests_models::league::*; /// /// Attempt of usage the `t.delete(&self)` method on an entity without `#[primary_key]` /// will raise a runtime error. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_method_operation() { // For test the delete, we will insert a new instance of the database, and then, @@ -58,6 +62,7 @@ fn test_crud_delete_method_operation() { } /// Same as the delete test, but performing the operations with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_datasource_method_operation() { // For test the delete, we will insert a new instance of the database, and then, diff --git a/tests/crud/foreign_key_operations.rs b/tests/crud/foreign_key_operations.rs index b58df802..471dd639 100644 --- a/tests/crud/foreign_key_operations.rs +++ b/tests/crud/foreign_key_operations.rs @@ -10,13 +10,15 @@ ///! For more info: TODO -> Link to the docs of the foreign key chapter use canyon_sql::crud::CrudOperations; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; use crate::tests_models::tournament::*; /// Given an entity `T` which has some field declaring a foreign key relation -/// with some another entity `U`, for example, performns a search to find +/// with some another entity `U`, for example, performs a search to find /// what is the parent type `U` of `T` +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_by_foreign_key() { let some_tournament: Tournament = Tournament::find_by_pk(&1) @@ -38,6 +40,7 @@ fn test_crud_search_by_foreign_key() { } /// Same as the search by foreign key, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_by_foreign_key_datasource() { let some_tournament: Tournament = Tournament::find_by_pk_datasource(&10, SQL_SERVER_DS) @@ -67,6 +70,7 @@ fn test_crud_search_by_foreign_key_datasource() { /// to `U`. /// /// For this to work, `U`, the parent, must have derived the `ForeignKeyable` proc macro +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_reverse_side_foreign_key() { let some_league: League = League::find_by_pk(&1) @@ -87,6 +91,7 @@ fn test_crud_search_reverse_side_foreign_key() { /// Same as the search by the reverse side of a foreign key relation /// but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_reverse_side_foreign_key_datasource() { let some_league: League = League::find_by_pk_datasource(&1, SQL_SERVER_DS) diff --git a/tests/crud/init_mssql.rs b/tests/crud/init_mssql.rs new file mode 100644 index 00000000..19b08549 --- /dev/null +++ b/tests/crud/init_mssql.rs @@ -0,0 +1,62 @@ +use crate::constants::SQL_SERVER_CREATE_TABLES; +use crate::constants::SQL_SERVER_DS; +use crate::constants::SQL_SERVER_FILL_TABLE_VALUES; +use crate::tests_models::league::League; + +use canyon_sql::crud::CrudOperations; +use canyon_sql::db_clients::tiberius::{Client, Config}; +use canyon_sql::runtime::tokio::net::TcpStream; +use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; + +/// In order to initialize data on `SqlServer`. we must manually insert it +/// when the docker starts. SqlServer official docker from Microsoft does +/// not allow you to run `.sql` files against the database (not at least, without) +/// using a workaround. So, we are going to query the `SqlServer` to check if already +/// has some data (other processes, persistence or multi-threading envs), af if not, +/// we are going to retrieve the inserted data on the `postgreSQL` at start-up and +/// inserting into the `SqlServer` instance. +/// +/// This will be marked as `#[ignore]`, so we can force to run first the marked as +/// ignored, check the data available, perform the necessary init operations and +/// then *cargo test * the real integration tests +#[canyon_sql::macros::canyon_tokio_test] +#[ignore] +fn initialize_sql_server_docker_instance() { + static CONN_STR: &str = + "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; + + canyon_sql::runtime::futures::executor::block_on(async { + let config = Config::from_ado_string(CONN_STR).unwrap(); + + let tcp = TcpStream::connect(config.get_addr()).await.unwrap(); + let tcp2 = TcpStream::connect(config.get_addr()).await.unwrap(); + tcp.set_nodelay(true).ok(); + + let mut client = Client::connect(config.clone(), tcp.compat_write()) + .await + .unwrap(); + + // Create the tables + let query_result = client.query(SQL_SERVER_CREATE_TABLES, &[]).await; + assert!(query_result.is_ok()); + + let leagues_sql = League::find_all_datasource(SQL_SERVER_DS).await; + println!("LSQL ERR: {leagues_sql:?}"); + assert!(leagues_sql.is_ok()); + + match leagues_sql { + Ok(ref leagues) => { + let leagues_len = leagues.len(); + println!("Leagues already inserted on SQLSERVER: {:?}", &leagues_len); + if leagues.len() < 10 { + let mut client2 = Client::connect(config, tcp2.compat_write()) + .await + .expect("Can't connect to MSSQL"); + let result = client2.query(SQL_SERVER_FILL_TABLE_VALUES, &[]).await; + assert!(result.is_ok()); + } + } + Err(e) => eprintln!("Error retrieving the leagues: {e}"), + } + }); +} diff --git a/tests/crud/insert_operations.rs b/tests/crud/insert_operations.rs index 29c0c9fa..d52fa868 100644 --- a/tests/crud/insert_operations.rs +++ b/tests/crud/insert_operations.rs @@ -2,6 +2,7 @@ ///! generates and executes *INSERT* statements use canyon_sql::crud::CrudOperations; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; @@ -25,7 +26,8 @@ use crate::tests_models::league::*; /// /// If the type hasn't a `#[primary_key]` annotation, or the annotation contains /// an argument specifying not autoincremental behaviour, all the fields will be -/// inserted on the database and no returning value will be placed in any field. +/// inserted on the database and no returning value will be placed in any field. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_insert_operation() { let mut new_league: League = League { @@ -54,6 +56,7 @@ fn test_crud_insert_operation() { /// Same as the insert operation above, but targeting the database defined in /// the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_insert_datasource_operation() { let mut new_league: League = League { @@ -93,6 +96,7 @@ fn test_crud_insert_datasource_operation() { /// /// The instances without `#[primary_key]` inserts all the values on the instaqce fields /// on the database. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_multi_insert_operation() { let mut new_league_mi: League = League { @@ -154,6 +158,7 @@ fn test_crud_multi_insert_operation() { } /// Same as the multi insert above, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_multi_insert_datasource_operation() { let mut new_league_mi: League = League { diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 7526c8f6..407e727c 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -1,69 +1,10 @@ +#![allow(unused_imports)] + pub mod delete_operations; pub mod foreign_key_operations; +#[cfg(feature = "mssql")] +pub mod init_mssql; pub mod insert_operations; pub mod querybuilder_operations; pub mod select_operations; pub mod update_operations; - -use crate::constants::SQL_SERVER_CREATE_TABLES; -use crate::constants::SQL_SERVER_DS; -use crate::constants::SQL_SERVER_FILL_TABLE_VALUES; -use crate::tests_models::league::League; - -use canyon_sql::crud::CrudOperations; -use canyon_sql::db_clients::tiberius::{Client, Config}; -use canyon_sql::runtime::tokio::net::TcpStream; -use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; - -/// In order to initialize data on `SqlServer`. we must manually insert it -/// when the docker starts. SqlServer official docker from Microsoft does -/// not allow you to run `.sql` files against the database (not at least, without) -/// using a workaround. So, we are going to query the `SqlServer` to check if already -/// has some data (other processes, persistence or multi-threading envs), af if not, -/// we are going to retrieve the inserted data on the `postgreSQL` at start-up and -/// inserting into the `SqlServer` instance. -/// -/// This will be marked as `#[ignore]`, so we can force to run first the marked as -/// ignored, check the data available, perform the necessary init operations and -/// then *cargo test * the real integration tests -#[canyon_sql::macros::canyon_tokio_test] -#[ignore] -fn initialize_sql_server_docker_instance() { - canyon_sql::runtime::futures::executor::block_on(async { - static CONN_STR: &str = - "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; - - let config = Config::from_ado_string(CONN_STR).unwrap(); - - let tcp = TcpStream::connect(config.get_addr()).await.unwrap(); - let tcp2 = TcpStream::connect(config.get_addr()).await.unwrap(); - tcp.set_nodelay(true).ok(); - - let mut client = Client::connect(config.clone(), tcp.compat_write()) - .await - .unwrap(); - - // Create the tables - let query_result = client.query(SQL_SERVER_CREATE_TABLES, &[]).await; - assert!(query_result.is_ok()); - - let leagues_sql = League::find_all_datasource(SQL_SERVER_DS).await; - println!("LSQL ERR: {leagues_sql:?}"); - assert!(leagues_sql.is_ok()); - - match leagues_sql { - Ok(ref leagues) => { - let leagues_len = leagues.len(); - println!("Leagues already inserted on SQLSERVER: {:?}", &leagues_len); - if leagues.len() < 10 { - let mut client2 = Client::connect(config, tcp2.compat_write()) - .await - .expect("Can't connect to MSSQL"); - let result = client2.query(SQL_SERVER_FILL_TABLE_VALUES, &[]).await; - assert!(result.is_ok()); - } - } - Err(e) => eprintln!("Error retrieving the leagues: {e}"), - } - }); -} diff --git a/tests/crud/querybuilder_operations.rs b/tests/crud/querybuilder_operations.rs index 4700f598..1c853161 100644 --- a/tests/crud/querybuilder_operations.rs +++ b/tests/crud/querybuilder_operations.rs @@ -9,8 +9,10 @@ use canyon_sql::{ query::{operators::Comp, ops::QueryBuilder}, }; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; +#[cfg(feature = "mssql")] use crate::tests_models::player::*; use crate::tests_models::tournament::*; @@ -38,6 +40,7 @@ fn test_generated_sql_by_the_select_querybuilder() { /// Builds a new SQL statement for retrieves entities of the `T` type, filtered /// with the parameters that modifies the base SQL to SELECT * FROM +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder() { // Find all the leagues with ID less or equals that 7 @@ -57,6 +60,7 @@ fn test_crud_find_with_querybuilder() { } /// Same than the above but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder_datasource() { // Find all the players where its ID column value is greater that 50 @@ -70,6 +74,7 @@ fn test_crud_find_with_querybuilder_datasource() { /// Updates the values of the range on entries defined by the constraint parameters /// in the database entity +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_with_querybuilder() { // Find all the leagues with ID less or equals that 7 @@ -82,7 +87,7 @@ fn test_crud_update_with_querybuilder() { .r#where(LeagueFieldValue::id(&1), Comp::Gt) .and(LeagueFieldValue::id(&8), Comp::Lt); - /* Family of QueryBuilders are clone, useful in case of need to read the generated SQL + /* NOTE: Family of QueryBuilders are clone, useful in case of need to read the generated SQL let qpr = q.clone(); println!("PSQL: {:?}", qpr.read_sql()); */ @@ -105,6 +110,7 @@ fn test_crud_update_with_querybuilder() { } /// Same as above, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_with_querybuilder_datasource() { // Find all the leagues with ID less or equals that 7 @@ -139,6 +145,7 @@ fn test_crud_update_with_querybuilder_datasource() { /// Note if the database is persisted (not created and destroyed on every docker or /// GitHub Action wake up), it won't delete things that already have been deleted, /// but this isn't an error. They just don't exists. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_with_querybuilder() { Tournament::delete_query() @@ -152,6 +159,7 @@ fn test_crud_delete_with_querybuilder() { } /// Same as the above delete, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_with_querybuilder_datasource() { Player::delete_query_datasource(SQL_SERVER_DS) diff --git a/tests/crud/select_operations.rs b/tests/crud/select_operations.rs index 26e0e5f2..9f9a6f5c 100644 --- a/tests/crud/select_operations.rs +++ b/tests/crud/select_operations.rs @@ -1,5 +1,6 @@ #![allow(clippy::nonminimal_bool)] +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; ///! Integration tests for the CRUD operations available in `Canyon` that ///! generates and executes *SELECT* statements @@ -12,6 +13,7 @@ use crate::tests_models::player::*; /// Tests the behaviour of a SELECT * FROM {table_name} within Canyon, through the /// `::find_all()` associated function derived with the `CanyonCrud` derive proc-macro /// and using the *default datasource* +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all() { let find_all_result: Result, Box> = @@ -28,6 +30,7 @@ fn test_crud_find_all() { /// Same as the `find_all()`, but with the unchecked variant, which directly returns `Vec` not /// `Result` wrapped +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all_unchecked() { let find_all_result: Vec = League::find_all_unchecked().await; @@ -37,6 +40,7 @@ fn test_crud_find_all_unchecked() { /// Tests the behaviour of a SELECT * FROM {table_name} within Canyon, through the /// `::find_all()` associated function derived with the `CanyonCrud` derive proc-macro /// and using the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all_datasource() { let find_all_result: Result, Box> = @@ -48,6 +52,7 @@ fn test_crud_find_all_datasource() { /// Same as the `find_all_datasource()`, but with the unchecked variant and the specified dataosource, /// returning directly `Vec` and not `Result, Err>` +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all_unchecked_datasource() { let find_all_result: Vec = League::find_all_unchecked_datasource(SQL_SERVER_DS).await; @@ -58,6 +63,7 @@ fn test_crud_find_all_unchecked_datasource() { /// defined with the #[primary_key] attribute over some field of the type. /// /// Uses the *default datasource*. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_by_pk() { let find_by_pk_result: Result, Box> = @@ -80,6 +86,8 @@ fn test_crud_find_by_pk() { /// defined with the #[primary_key] attribute over some field of the type. /// /// Uses the *specified datasource* in the second parameter of the function call. +#[cfg(feature = "postgres")] +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_by_pk_datasource() { let find_by_pk_result: Result, Box> = @@ -99,6 +107,7 @@ fn test_crud_find_by_pk_datasource() { } /// Counts how many rows contains an entity on the target database. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_count_operation() { assert_eq!( @@ -109,6 +118,7 @@ fn test_crud_count_operation() { /// Counts how many rows contains an entity on the target database using /// the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_count_datasource_operation() { assert_eq!( diff --git a/tests/crud/update_operations.rs b/tests/crud/update_operations.rs index fc7ae733..e4085560 100644 --- a/tests/crud/update_operations.rs +++ b/tests/crud/update_operations.rs @@ -2,6 +2,7 @@ ///! generates and executes *UPDATE* statements use canyon_sql::crud::CrudOperations; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; @@ -15,6 +16,7 @@ use crate::tests_models::league::*; /// /// Attempt of usage the `t.update(&self)` method on an entity without `#[primary_key]` /// will raise a runtime error. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_method_operation() { // We first retrieve some entity from the database. Note that we must make @@ -55,6 +57,7 @@ fn test_crud_update_method_operation() { } /// Same as the above test, but with the specified datasource. +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_datasource_method_operation() { // We first retrieve some entity from the database. Note that we must make diff --git a/tests/migrations/mod.rs b/tests/migrations/mod.rs index 17b19c35..47f82566 100644 --- a/tests/migrations/mod.rs +++ b/tests/migrations/mod.rs @@ -1,16 +1,18 @@ +#![allow(unused_imports)] ///! Integration tests for the migrations feature of `Canyon-SQL` use canyon_sql::{crud::Transaction, migrations::handler::Migrations}; use crate::constants; /// Brings the information of the `PostgreSQL` requested schema +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_migrations_postgresql_status_query() { let results = Migrations::query(constants::FETCH_PUBLIC_SCHEMA, [], constants::PSQL_DS).await; assert!(results.is_ok()); - let public_schema_info = results.ok().unwrap().postgres; - + let res = results.unwrap(); + let public_schema_info = res.get_postgres_rows(); let first_result = public_schema_info.get(0).unwrap(); assert_eq!(first_result.columns().get(0).unwrap().name(), "table_name");