From c8467efdccb0431b67bf5fd5100dc85934315e18 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 13 Apr 2023 17:10:18 +0200 Subject: [PATCH 01/23] Corrected the code coverage workflow. Updated the CHANGELOG.md --- .github/workflows/code-coverage.yml | 1 - CHANGELOG.md | 17 ++++++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml index 144aa42e..cb3ecc98 100644 --- a/.github/workflows/code-coverage.yml +++ b/.github/workflows/code-coverage.yml @@ -27,7 +27,6 @@ jobs: rustup override set nightly - name: Make the USER own the working directory. Installing `gssapi` headers - if: ${{ matrix.os == 'ubuntu-latest' }} run: | sudo chown -R $USER:$USER ${{ github.workspace }} sudo apt -y install gcc libgssapi-krb5-2 libkrb5-dev libsasl2-modules-gssapi-mit diff --git a/CHANGELOG.md b/CHANGELOG.md index d4ef370f..d79ce967 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,22 @@ Year format is defined as: `YYYY-m-d` ## [Unreleased] -- Solved a bug in the canyon_entity proc macro that was wiring the incorrect user table name in the migrations +## [0.2.0] - 2023 - 04 - 13 + +### Feature [BREAKING CHANGES] + +- The configuration file has been reworked, by providing a whole category dedicated +to the authentication against the database server. +- We removed the database type property, since the database type can be inferred by +the new mandatory auth property +- Included support for the `MSSQL` integrated authentication via the cfg feature `mssql-integrated-auth` + +## [0.1.2] - 2023 - 03 - 28 + +### Update + +- Implemented bool types for QueryParameters<'_>. +- Minimal performance improvements ## [0.1.1] - 2023 - 03 - 20 From aee3e23f91cfbdeca199bdf06860d0a158f431f9 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Fri, 14 Apr 2023 10:45:25 +0200 Subject: [PATCH 02/23] #feature - Created two new cfg properties, for split the code per supported database. This means, that the user now will must attach specific features to the canyon-sql dependency, unless the default, which is PostgreSQL --- canyon_connection/Cargo.toml | 3 +++ .../src/canyon_database_connector.rs | 21 +++++++++++++------ canyon_connection/src/datasources.rs | 10 ++++++--- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 99058cf2..234ffee0 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -25,5 +25,8 @@ serde = { version = "1.0.138", features = ["derive"] } toml = "0.7.3" [features] +default = ["postgres"] +postgres = [] +mssql = [] mssql-integrated-auth = [] diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 71fd767e..563cfd5c 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -1,8 +1,8 @@ -use async_std::net::TcpStream; +#[cfg(feature = "mssql")] use async_std::net::TcpStream; use serde::Deserialize; -use tiberius::{AuthMethod, Config}; -use tokio_postgres::{Client, NoTls}; +#[cfg(feature = "mssql")] use tiberius::{AuthMethod, Config}; +#[cfg(feature = "postgres")] use tokio_postgres::{Client, NoTls}; use crate::datasources::DatasourceConfig; @@ -11,17 +11,21 @@ use crate::datasources::DatasourceConfig; pub enum DatabaseType { #[default] #[serde(alias = "postgres", alias = "postgresql")] + #[cfg(feature = "postgres")] PostgreSql, #[serde(alias = "sqlserver", alias = "mssql")] + #[cfg(feature = "mssql")] SqlServer, } /// A connection with a `PostgreSQL` database +#[cfg(feature = "postgres")] pub struct PostgreSqlConnection { pub client: Client, // pub connection: Connection, // TODO Hold it, or not to hold it... that's the question! } +#[cfg(feature = "mssql")] /// A connection with a `SqlServer` database pub struct SqlServerConnection { pub client: &'static mut tiberius::Client, @@ -32,8 +36,8 @@ pub struct SqlServerConnection { /// process them and generates a pool of 1 to 1 database connection for /// every datasource defined. pub enum DatabaseConnection { - Postgres(PostgreSqlConnection), - SqlServer(SqlServerConnection), + #[cfg(feature = "postgres")] Postgres(PostgreSqlConnection), + #[cfg(feature = "mssql")] SqlServer(SqlServerConnection), } unsafe impl Send for DatabaseConnection {} @@ -44,6 +48,7 @@ impl DatabaseConnection { datasource: &DatasourceConfig, ) -> Result> { match datasource.get_db_type() { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => { let (username, password) = match &datasource.auth { crate::datasources::Auth::Postgres(postgres_auth) => match postgres_auth { @@ -51,6 +56,7 @@ impl DatabaseConnection { (username.as_str(), password.as_str()) } }, + #[cfg(feature = "mssql")] crate::datasources::Auth::SqlServer(_) => { panic!("Found SqlServer auth configuration for a PostgreSQL datasource") } @@ -79,6 +85,7 @@ impl DatabaseConnection { // connection: new_connection, })) } + #[cfg(feature = "mssql")] DatabaseType::SqlServer => { let mut config = Config::new(); @@ -88,7 +95,7 @@ impl DatabaseConnection { // Using SQL Server authentication. config.authentication(match &datasource.auth { - crate::datasources::Auth::Postgres(_) => { + #[cfg(feature = "postgres")] crate::datasources::Auth::Postgres(_) => { panic!("Found PostgreSQL auth configuration for a SqlServer database") } crate::datasources::Auth::SqlServer(sql_server_auth) => match sql_server_auth { @@ -128,6 +135,7 @@ impl DatabaseConnection { } } + #[cfg(feature = "postgres")] pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { if let DatabaseConnection::Postgres(conn) = self { Some(conn) @@ -136,6 +144,7 @@ impl DatabaseConnection { } } + #[cfg(feature = "mssql")] pub fn sqlserver_connection(&mut self) -> Option<&mut SqlServerConnection> { if let DatabaseConnection::SqlServer(conn) = self { Some(conn) diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 81c4e611..0486686a 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -49,7 +49,7 @@ fn load_ds_config_from_array() { assert_eq!(ds_1.properties.db_name, "triforce2"); assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); - assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) + #[cfg(feature = "postgres")] assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) } /// #[derive(Deserialize, Debug, Clone)] @@ -71,8 +71,8 @@ pub struct DatasourceConfig { impl DatasourceConfig { pub fn get_db_type(&self) -> DatabaseType { match self.auth { - Auth::Postgres(_) => DatabaseType::PostgreSql, - Auth::SqlServer(_) => DatabaseType::SqlServer, + #[cfg(feature = "postgres")] Auth::Postgres(_) => DatabaseType::PostgreSql, + #[cfg(feature = "mssql")] Auth::SqlServer(_) => DatabaseType::SqlServer, } } } @@ -80,18 +80,22 @@ impl DatasourceConfig { #[derive(Deserialize, Debug, Clone, PartialEq)] pub enum Auth { #[serde(alias = "PostgreSQL", alias = "postgresql")] + #[cfg(feature = "postgres")] Postgres(PostgresAuth), #[serde(alias = "SqlServer", alias = "sqlserver", alias = "mssql")] + #[cfg(feature = "mssql")] SqlServer(SqlServerAuth), } #[derive(Deserialize, Debug, Clone, PartialEq)] +#[cfg(feature = "postgres")] pub enum PostgresAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, } #[derive(Deserialize, Debug, Clone, PartialEq)] +#[cfg(feature = "mssql")] pub enum SqlServerAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, From 28129020193220944124c0ed5c23a49111c70099 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Fri, 14 Apr 2023 10:48:58 +0200 Subject: [PATCH 03/23] #fix - Small doc typos and reordering trait impl members --- canyon_connection/src/datasources.rs | 2 +- .../src/query_elements/query_builder.rs | 44 +++++++++---------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 0486686a..409705dc 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -79,7 +79,7 @@ impl DatasourceConfig { #[derive(Deserialize, Debug, Clone, PartialEq)] pub enum Auth { - #[serde(alias = "PostgreSQL", alias = "postgresql")] + #[serde(alias = "PostgreSQL", alias = "postgresql", alias = "postgres")] #[cfg(feature = "postgres")] Postgres(PostgresAuth), #[serde(alias = "SqlServer", alias = "sqlserver", alias = "mssql")] diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs index f0e68223..56ddda7c 100644 --- a/canyon_crud/src/query_elements/query_builder.rs +++ b/canyon_crud/src/query_elements/query_builder.rs @@ -324,7 +324,7 @@ where } /// Adds a *LEFT JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -340,7 +340,7 @@ where } /// Adds a *RIGHT JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -356,7 +356,7 @@ where } /// Adds a *RIGHT JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -372,7 +372,7 @@ where } /// Adds a *FULL JOIN* SQL statement to the underlying - /// [`Query`] holded by the [`QueryBuilder`], where: + /// [`Query`] held by the [`QueryBuilder`], where: /// /// * `join_table` - The table target of the join operation /// * `col1` - The left side of the ON operator for the join @@ -428,12 +428,6 @@ where self } - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - #[inline] fn or_values_in(&mut self, r#and: Z, values: &'a [Q]) -> &mut Self where @@ -444,6 +438,12 @@ where self } + #[inline] + fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { + self._inner.or(column, op); + self + } + #[inline] fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { self._inner.order_by(order_by, desc); @@ -565,12 +565,6 @@ where self } - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - #[inline] fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self where @@ -581,6 +575,12 @@ where self } + #[inline] + fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { + self._inner.or(column, op); + self + } + #[inline] fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { self._inner.order_by(order_by, desc); @@ -665,12 +665,6 @@ where self } - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - #[inline] fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self where @@ -681,6 +675,12 @@ where self } + #[inline] + fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { + self._inner.or(column, op); + self + } + #[inline] fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { self._inner.order_by(order_by, desc); From f49f5d6c7c895457bfd4a80aef2a24df4975e293 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Fri, 14 Apr 2023 14:47:49 +0200 Subject: [PATCH 04/23] #wip - Rewriting with conditinal compilation. Reworking the workspace --- .github/workflows/code-quality.yml | 2 +- canyon_sql/Cargo.toml => Caasdfadsrgo.tomlsda | 16 +- Cargo.toml | 35 ++- bash_aliases.sh | 2 +- canyon_connection/Cargo.toml | 45 ++- .../src/canyon_database_connector.rs | 4 +- canyon_connection/src/datasources.rs | 5 +- canyon_connection/src/lib.rs | 10 +- canyon_crud/Cargo.toml | 28 +- canyon_crud/src/bounds.rs | 257 ++++++++---------- canyon_crud/src/crud.rs | 65 +++-- canyon_crud/src/lib.rs | 1 - canyon_crud/src/mapper.rs | 3 +- .../src/query_elements/query_builder.rs | 3 +- canyon_crud/src/result.rs | 108 -------- canyon_macros/Cargo.toml | 15 +- canyon_macros/src/query_operations/insert.rs | 5 +- canyon_macros/src/query_operations/select.rs | 65 +++-- canyon_observer/Cargo.toml | 15 +- canyon_observer/src/manager/entity.rs | 4 +- canyon_observer/src/migrations/handler.rs | 9 +- canyon_observer/src/migrations/memory.rs | 5 +- {canyon_sql/src => src}/lib.rs | 1 - tests/Cargo.toml | 2 +- 24 files changed, 310 insertions(+), 395 deletions(-) rename canyon_sql/Cargo.toml => Caasdfadsrgo.tomlsda (55%) mode change 100755 => 100644 delete mode 100644 canyon_crud/src/result.rs rename {canyon_sql/src => src}/lib.rs (98%) mode change 100755 => 100644 diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index c72c0e5b..07ce16a2 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -55,7 +55,7 @@ jobs: strategy: fail-fast: false matrix: - crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer, canyon_sql] + crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer, canyon_sql_root] steps: - uses: actions/checkout@v3 diff --git a/canyon_sql/Cargo.toml b/Caasdfadsrgo.tomlsda old mode 100755 new mode 100644 similarity index 55% rename from canyon_sql/Cargo.toml rename to Caasdfadsrgo.tomlsda index 0a13a101..3e3c557e --- a/canyon_sql/Cargo.toml +++ b/Caasdfadsrgo.tomlsda @@ -1,13 +1,13 @@ [package] name = "canyon_sql" version = "0.2.0" -edition = "2021" -authors = ["Alex Vergara, Gonzalo Busto"] -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] async-trait = { version = "0.1.50" } @@ -16,4 +16,4 @@ async-trait = { version = "0.1.50" } canyon_macros = { version = "0.2.0", path = "../canyon_macros" } canyon_observer = { version = "0.2.0", path = "../canyon_observer" } canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } +canyon_connection = { version = "0.2.0", path = "../canyon_connection", features = ["postgres"] } diff --git a/Cargo.toml b/Cargo.toml index 800ad578..70dea99d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,10 @@ -# This is the root Cargo.toml file that serves as manager for the workspace of the project +# This is the root Caasdfadsrgo.tomlsda file that serves as manager for the workspace of the project +[package] +name = "canyon_sql" +version = "0.2.0" [workspace] members = [ - "canyon_sql", "canyon_observer", "canyon_macros", "canyon_crud", @@ -10,3 +12,32 @@ members = [ "tests" ] + +[workspace.dependencies] +# Project crates +canyon_macros = { version = "0.2.0", path = "canyon_macros" } +canyon_observer = { version = "0.2.0", path = "canyon_observer" } +canyon_crud = { version = "0.2.0", path = "canyon_crud", features = ["postgres", "mssql"] } +canyon_connection = { version = "0.2.0", path = "canyon_connection", features = ["postgres", "mssql"] } + +tokio = { version = "1.21.2", features = ["full"] } +tokio-util = { version = "0.7.4", features = ["compat"] } +tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } +tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } + +futures = "0.3.25" +indexmap = "1.9.1" +async-std = "1.12.0" +lazy_static = "1.4.0" +serde = { version = "1.0.138", features = ["derive"] } +toml = "0.7.3" + +[workspace.package] +version = "0.2.0" +edition = "2021" +authors = ["Alex Vergara, Gonzalo Busto"] +documentation = "https://zerodaycode.github.io/canyon-book/" +homepage = "https://github.com/zerodaycode/Canyon-SQL" +readme = "../README.md" +license = "MIT" +description = "A Rust ORM and QueryBuilder" diff --git a/bash_aliases.sh b/bash_aliases.sh index a67da429..64e2d931 100644 --- a/bash_aliases.sh +++ b/bash_aliases.sh @@ -39,7 +39,7 @@ alias SqlServerInitializationLinux='cargo test initialize_sql_server_docker_inst # Publish Canyon-SQL to the registry with its dependencies -alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_observer && cargo publish -p canyon_macros && cargo publish -p canyon_sql' +alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_observer && cargo publish -p canyon_macros && cargo publish -p canyon_sql_root' # Collects the code coverage for the project (tests must run before this) alias CcEnvVars='export CARGO_INCREMENTAL=0 diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 234ffee0..323e91a3 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -1,32 +1,29 @@ [package] name = "canyon_connection" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" - +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] -tokio = { version = "1.21.2", features = ["full"] } -tokio-util = { version = "0.7.4", features = ["compat"] } -tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } -futures = "0.3.25" -indexmap = "1.9.1" - -tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } -async-std = { version = "1.12.0" } +tokio = { workspace = true, features = ["full"], optional = true } +tokio-util = { workspace = true, features = ["compat"], optional = true } +tokio-postgres = { workspace = true, features = ["with-chrono-0_4"], optional = true } +tiberius = { workspace = true, features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } -lazy_static = "1.4.0" - -serde = { version = "1.0.138", features = ["derive"] } -toml = "0.7.3" +futures = { workspace = true } +indexmap = { workspace = true } +async-std = { workspace = true } +lazy_static = { workspace = true } +serde = { workspace = true, features = ["derive"] } +toml = { workspace = true } [features] default = ["postgres"] -postgres = [] -mssql = [] -mssql-integrated-auth = [] - +postgres = ["tokio", "tokio-postgres", "tokio-util"] +mssql = ["tiberius", "tiberius/tds73", "tiberius/chrono"] +mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 563cfd5c..27d59799 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -160,7 +160,7 @@ mod database_connection_handler { use crate::CanyonSqlConfig; const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql] + [canyon_sql_root] datasources = [ {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } @@ -171,7 +171,7 @@ mod database_connection_handler { #[test] fn check_from_datasource() { let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) - .expect("A failure happened retrieving the [canyon_sql] section"); + .expect("A failure happened retrieving the [canyon_sql_root] section"); assert_eq!( config.canyon_sql.datasources[0].get_db_type(), diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 409705dc..4dc76dbb 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -6,7 +6,7 @@ use crate::canyon_database_connector::DatabaseType; #[test] fn load_ds_config_from_array() { const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql] + [canyon_sql_root] datasources = [ {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, @@ -15,7 +15,7 @@ fn load_ds_config_from_array() { "#; let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) - .expect("A failure happened retrieving the [canyon_sql] section"); + .expect("A failure happened retrieving the [canyon_sql_root] section"); let ds_0 = &config.canyon_sql.datasources[0]; let ds_1 = &config.canyon_sql.datasources[1]; @@ -56,6 +56,7 @@ fn load_ds_config_from_array() { pub struct CanyonSqlConfig { pub canyon_sql: Datasources, } + #[derive(Deserialize, Debug, Clone)] pub struct Datasources { pub datasources: Vec, diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 1a8f7cab..59a9dbba 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -1,10 +1,10 @@ -pub extern crate async_std; +#[cfg(feature = "mssql")] pub extern crate async_std; pub extern crate futures; pub extern crate lazy_static; -pub extern crate tiberius; -pub extern crate tokio; -pub extern crate tokio_postgres; -pub extern crate tokio_util; +#[cfg(feature = "mssql")] pub extern crate tiberius; +#[cfg(feature = "postgres")] pub extern crate tokio; +#[cfg(feature = "postgres")] pub extern crate tokio_postgres; +#[cfg(feature = "postgres")] pub extern crate tokio_util; pub mod canyon_database_connector; pub mod datasources; diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index 4c30408f..0e4f0854 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -1,15 +1,27 @@ [package] name = "canyon_crud" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] +tokio = { workspace = true, features = ["full"], optional = true } +tokio-util = { workspace = true, features = ["compat"], optional = true } +tokio-postgres = { workspace = true, features = ["with-chrono-0_4"], optional = true } +tiberius = { workspace = true, features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } + chrono = { version = "0.4", features = ["serde"] } async-trait = { version = "0.1.50" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } +canyon_connection = { version = "0.2.0", path = "../canyon_connection", features = ["postgres", "mssql"] } + +[features] +default = ["postgres"] +postgres = ["tokio", "tokio-postgres", "tokio-util"] +mssql = ["tiberius", "tiberius/tds73", "tiberius/chrono"] +mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index e484fe8c..813c05dd 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -4,15 +4,19 @@ use crate::{ crud::{CrudOperations, Transaction}, mapper::RowMapper, }; -use canyon_connection::{ - tiberius::{self, ColumnData, IntoSql}, - tokio_postgres::{self, types::ToSql}, -}; + +#[cfg(feature = "postgres")] +use canyon_connection::tokio_postgres::{self, types::ToSql}; + +#[cfg(feature = "mssql")] +use canyon_connection::tiberius::{self, ColumnData, IntoSql}; + use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use std::any::Any; +use tiberius::FromSql; /// Created for retrieve the field's name of a field of a struct, giving -/// the Canoyn's autogenerated enum with the variants that maps this +/// the Canyon's autogenerated enum with the variants that maps this /// fields. /// /// ``` @@ -87,18 +91,20 @@ pub trait InClauseValues: ToSql + ToString {} pub trait Row { fn as_any(&self) -> &dyn Any; } -impl Row for tokio_postgres::Row { + +#[cfg(feature = "postgres")] impl Row for tokio_postgres::Row { fn as_any(&self) -> &dyn Any { self } } - -impl Row for tiberius::Row { +#[cfg(feature = "mssql")] impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self } } +/// Generic abstraction for hold a Column type that will be one of the Column +/// types present in the dependent crates pub struct Column<'a> { name: &'a str, type_: ColumnType, @@ -112,8 +118,8 @@ impl<'a> Column<'a> { } pub fn type_(&'a self) -> &'_ dyn Type { match &self.type_ { - ColumnType::Postgres(v) => v as &'a dyn Type, - ColumnType::SqlServer(v) => v as &'a dyn Type, + #[cfg(feature = "postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, + #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => v as &'a dyn Type, } } } @@ -121,20 +127,21 @@ impl<'a> Column<'a> { pub trait Type { fn as_any(&self) -> &dyn Any; } -impl Type for tokio_postgres::types::Type { +#[cfg(feature = "postgres")] impl Type for tokio_postgres::types::Type { fn as_any(&self) -> &dyn Any { self } } -impl Type for tiberius::ColumnType { +#[cfg(feature = "mssql")] impl Type for tiberius::ColumnType { fn as_any(&self) -> &dyn Any { self } } +/// Wrapper over the dependencies Column's types pub enum ColumnType { - Postgres(tokio_postgres::types::Type), - SqlServer(tiberius::ColumnType), + #[cfg(feature = "postgres")] Postgres(tokio_postgres::types::Type), + #[cfg(feature = "mssql")] SqlServer(tiberius::ColumnType), } pub trait RowOperations { @@ -168,6 +175,21 @@ impl RowOperations for &dyn Row { panic!() } + fn get_opt<'a, Output>(&'a self, col_name: &str) -> Option + where + Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>, + { + if let Some(row) = self.as_any().downcast_ref::() { + return row.get::<&str, Option>(col_name); + }; + if let Some(row) = self.as_any().downcast_ref::() { + return row + .try_get:where + .expect("Failed to obtain a row in the MSSQL migrations"); + }; + panic!() + } + fn columns(&self) -> Vec { let mut cols = vec![]; @@ -199,28 +221,13 @@ impl RowOperations for &dyn Row { cols } - - fn get_opt<'a, Output>(&'a self, col_name: &str) -> Option - where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>, - { - if let Some(row) = self.as_any().downcast_ref::() { - return row.get::<&str, Option>(col_name); - }; - if let Some(row) = self.as_any().downcast_ref::() { - return row - .try_get::(col_name) - .expect("Failed to obtain a row in the MSSQL migrations"); - }; - panic!() - } } /// Defines a trait for represent type bounds against the allowed -/// datatypes supported by Canyon to be used as query parameters. +/// data types supported by Canyon to be used as query parameters. pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync); - fn as_sqlserver_param(&self) -> ColumnData<'_>; + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync); + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_>; } /// The implementation of the [`canyon_connection::tiberius`] [`IntoSql`] for the @@ -231,6 +238,7 @@ pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { /// a collection of [`QueryParameter<'a>`], in order to allow a workflow /// that is not dependent of the specific type of the argument that holds /// the query parameters of the database connectors +#[cfg(feature = "mssql")] impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { fn into_sql(self) -> ColumnData<'a> { self.as_sqlserver_param() @@ -238,222 +246,198 @@ impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { } impl<'a> QueryParameter<'a> for bool { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::Bit(Some(*self)) } } impl<'a> QueryParameter<'a> for i16 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self)) } } impl<'a> QueryParameter<'a> for &i16 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(*self) } } impl<'a> QueryParameter<'a> for Option<&i16> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for i32 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self)) } } impl<'a> QueryParameter<'a> for &i32 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(*self) } } impl<'a> QueryParameter<'a> for Option<&i32> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for f32 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(*self)) } } impl<'a> QueryParameter<'a> for &f32 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(*self) } } impl<'a> QueryParameter<'a> for Option<&f32> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some( *self.expect("Error on an f32 value on QueryParameter<'_>"), )) } } impl<'a> QueryParameter<'a> for f64 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(*self)) } } impl<'a> QueryParameter<'a> for &f64 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(*self) } } impl<'a> QueryParameter<'a> for Option<&f64> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some( *self.expect("Error on an f64 value on QueryParameter<'_>"), )) } } impl<'a> QueryParameter<'a> for i64 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self)) } } impl<'a> QueryParameter<'a> for &i64 { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(*self) } } impl<'a> QueryParameter<'a> for Option<&i64> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for String { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) } } impl<'a> QueryParameter<'a> for &String { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), None => ColumnData::String(None), @@ -461,11 +445,10 @@ impl<'a> QueryParameter<'a> for Option { } } impl<'a> QueryParameter<'a> for Option<&String> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), None => ColumnData::String(None), @@ -473,20 +456,18 @@ impl<'a> QueryParameter<'a> for Option<&String> { } } impl<'a> QueryParameter<'_> for &'_ str { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(*self))) } } impl<'a> QueryParameter<'a> for Option<&'_ str> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match *self { Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), None => ColumnData::String(None), @@ -494,92 +475,82 @@ impl<'a> QueryParameter<'a> for Option<&'_ str> { } } impl<'a> QueryParameter<'_> for NaiveDate { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveTime { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveDateTime { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for Option> { - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - - fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 8f587a02..121efa91 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -9,10 +9,9 @@ use crate::mapper::RowMapper; use crate::query_elements::query_builder::{ DeleteQueryBuilder, SelectQueryBuilder, UpdateQueryBuilder, }; -use crate::result::DatabaseResult; /// This traits defines and implements a query against a database given -/// an statemt `stmt` and the params to pass the to the client. +/// an statement `stmt` and the params to pass the to the client. /// /// It returns a [`DatabaseResult`], which is the core Canyon type to wrap /// the result of the query and, if the user desires, @@ -26,10 +25,11 @@ pub trait Transaction { stmt: S, params: Z, datasource_name: &'a str, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> + ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> where S: AsRef + Display + Sync + Send + 'a, Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, + T: Transaction + RowMapper { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; @@ -50,7 +50,7 @@ pub trait Transaction { }; match database_conn { - DatabaseConnection::Postgres(_) => { + #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( database_conn, stmt.to_string(), @@ -58,7 +58,7 @@ pub trait Transaction { ) .await } - DatabaseConnection::SqlServer(_) => { + #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( database_conn, &mut stmt.to_string(), @@ -84,7 +84,7 @@ pub trait Transaction { /// /// See it's definition and docs to see the implementations. /// Also, you can find the written macro-code that performs the auto-mapping -/// in the *canyon_sql::canyon_macros* crates, on the root of this project. +/// in the *canyon_sql_root::canyon_macros* crates, on the root of this project. #[async_trait] pub trait CrudOperations: Transaction where @@ -121,12 +121,12 @@ where async fn insert<'a>( &mut self, - ) -> Result<(), Box>; + ) -> Result<(), Box>; async fn insert_datasource<'a>( &mut self, datasource_name: &'a str, - ) -> Result<(), Box>; + ) -> Result<(), Box>; async fn multi_insert<'a>( instances: &'a mut [&'a mut T], @@ -137,71 +137,79 @@ where datasource_name: &'a str, ) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>>; - async fn update(&self) -> Result<(), Box>; + async fn update(&self) -> Result<(), Box>; async fn update_datasource<'a>( &self, datasource_name: &'a str, - ) -> Result<(), Box>; + ) -> Result<(), Box>; fn update_query<'a>() -> UpdateQueryBuilder<'a, T>; fn update_query_datasource(datasource_name: &str) -> UpdateQueryBuilder<'_, T>; - async fn delete(&self) -> Result<(), Box>; + async fn delete(&self) -> Result<(), Box>; async fn delete_datasource<'a>( &self, datasource_name: &'a str, - ) -> Result<(), Box>; + ) -> Result<(), Box>; fn delete_query<'a>() -> DeleteQueryBuilder<'a, T>; fn delete_query_datasource(datasource_name: &str) -> DeleteQueryBuilder<'_, T>; } +#[cfg(feature = "postgres")] mod postgres_query_launcher { use crate::bounds::QueryParameter; - use crate::result::DatabaseResult; use canyon_connection::canyon_database_connector::DatabaseConnection; + use crate::crud::Transaction; + use crate::mapper::RowMapper; pub async fn launch<'a, T>( db_conn: &DatabaseConnection, stmt: String, params: &'a [&'_ dyn QueryParameter<'_>], - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { + ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> + where T: Transaction + RowMapper + { let mut m_params = Vec::new(); for param in params { m_params.push(param.as_postgres_param()); } - Ok(DatabaseResult::new_postgresql( - db_conn - .postgres_connection() - .unwrap() - .client - .query(&stmt, m_params.as_slice()) - .await?, - )) + let r = db_conn + .postgres_connection() + .unwrap() + .client + .query(&stmt, m_params.as_slice()) + .await?; + + Ok( + r.iter().map(|row| T::deserialize_postgresql(row)).collect() + ) } } -mod sqlserver_query_launcher { - use canyon_connection::tiberius::Row; +#[cfg(feature = "mssql")] +mod sqlserver_query_launcher { use crate::{ bounds::QueryParameter, canyon_connection::{canyon_database_connector::DatabaseConnection, tiberius::Query}, - result::DatabaseResult, }; + use crate::crud::Transaction; + use crate::mapper::RowMapper; pub async fn launch<'a, T, Z>( db_conn: &mut DatabaseConnection, stmt: &mut String, params: Z, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> + ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> where Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, + T: Transaction + RowMapper { // Re-generate de insert statement to adequate it to the SQL SERVER syntax to retrieve the PK value(s) after insert if stmt.contains("RETURNING") { @@ -223,7 +231,7 @@ mod sqlserver_query_launcher { .iter() .for_each(|param| mssql_query.bind(*param)); - let _results: Vec = mssql_query + let _results = mssql_query .query( db_conn .sqlserver_connection() @@ -235,8 +243,9 @@ mod sqlserver_query_launcher { .await? .into_iter() .flatten() + .map(|row| T::deserialize_sqlserver(&row)) .collect::>(); - Ok(DatabaseResult::new_sqlserver(_results)) + Ok(_results) } } diff --git a/canyon_crud/src/lib.rs b/canyon_crud/src/lib.rs index 8a20b48e..929dbea2 100644 --- a/canyon_crud/src/lib.rs +++ b/canyon_crud/src/lib.rs @@ -4,7 +4,6 @@ pub mod bounds; pub mod crud; pub mod mapper; pub mod query_elements; -pub mod result; pub use query_elements::operators::*; diff --git a/canyon_crud/src/mapper.rs b/canyon_crud/src/mapper.rs index 71303785..0114bd3a 100644 --- a/canyon_crud/src/mapper.rs +++ b/canyon_crud/src/mapper.rs @@ -1,4 +1,5 @@ -use canyon_connection::{tiberius, tokio_postgres}; +#[cfg(feature = "postgres")] use canyon_connection::tokio_postgres; +#[cfg(feature = "mssql")] use canyon_connection::tiberius; use crate::crud::Transaction; diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs index 56ddda7c..c26c5642 100644 --- a/canyon_crud/src/query_elements/query_builder.rs +++ b/canyon_crud/src/query_elements/query_builder.rs @@ -173,8 +173,7 @@ where self.query.params.to_vec(), self.datasource_name, ) - .await? - .get_entities::()) + .await?) } pub fn r#where>(&mut self, r#where: Z, op: impl Operator) { diff --git a/canyon_crud/src/result.rs b/canyon_crud/src/result.rs deleted file mode 100644 index 1a2cae29..00000000 --- a/canyon_crud/src/result.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::{bounds::Row, crud::Transaction, mapper::RowMapper}; -use canyon_connection::{canyon_database_connector::DatabaseType, tiberius, tokio_postgres}; -use std::{fmt::Debug, marker::PhantomData}; - -/// Represents a database result after a query, by wrapping the `Vec` types that comes with the -/// results after the query. -/// and providing methods to deserialize this result into a **user defined struct** -#[derive(Debug)] -pub struct DatabaseResult { - pub postgres: Vec, - pub sqlserver: Vec, - pub active_ds: DatabaseType, - _phantom_data: std::marker::PhantomData, -} - -impl DatabaseResult { - pub fn new_postgresql(result: Vec) -> Self { - Self { - postgres: result, - sqlserver: Vec::with_capacity(0), - active_ds: DatabaseType::PostgreSql, - _phantom_data: PhantomData, - } - } - - pub fn new_sqlserver(results: Vec) -> Self { - Self { - postgres: Vec::with_capacity(0), - sqlserver: results, - active_ds: DatabaseType::SqlServer, - _phantom_data: PhantomData, - } - } - - /// Returns a [`Vec`] filled with instances of the type T. - /// Z param it's used to constraint the types that can call this method. - /// - /// Also, provides a way to statically call `Z::deserialize_` method, - /// which it's the implementation used by the macros to automatically - /// map database columns into the fields for T. - pub fn get_entities>(&self) -> Vec - where - T: Transaction, - { - match self.active_ds { - DatabaseType::PostgreSql => self.map_from_postgresql::(), - DatabaseType::SqlServer => self.map_from_sql_server::(), - } - } - - fn map_from_postgresql>(&self) -> Vec - where - T: Transaction, - { - let mut results = Vec::new(); - - self.postgres - .iter() - .for_each(|row| results.push(Z::deserialize_postgresql(row))); - - results - } - - fn map_from_sql_server>(&self) -> Vec - where - T: Transaction, - { - let mut results = Vec::new(); - - self.sqlserver - .iter() - .for_each(|row| results.push(Z::deserialize_sqlserver(row))); - - results - } - - pub fn as_canyon_rows(&self) -> Vec<&dyn Row> { - let mut results = Vec::new(); - - match self.active_ds { - DatabaseType::PostgreSql => { - self.postgres - .iter() - .for_each(|row| results.push(row as &dyn Row)); - } - DatabaseType::SqlServer => { - self.sqlserver - .iter() - .for_each(|row| results.push(row as &dyn Row)); - } - }; - - results - } - - /// Returns the active datasource - pub fn get_active_ds(&self) -> &DatabaseType { - &self.active_ds - } - - /// Returns how many rows contains the result of the query - pub fn number_of_results(&self) -> usize { - match self.active_ds { - DatabaseType::PostgreSql => self.postgres.len(), - DatabaseType::SqlServer => self.sqlserver.len(), - } - } -} diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index 93695087..a501440d 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "canyon_macros" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [lib] proc-macro = true diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 11890b31..543a5121 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -56,6 +56,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri datasource_name ).await; + // TODO Convertir a canyon rows match result { Ok(res) => { match res.get_active_ds() { @@ -296,7 +297,9 @@ pub fn generate_multiple_insert_tokens( datasource_name ).await; - match result { + match result { // TODO Falta el ds correcto + // TODO Recuperar datasource fuera del código cliente + /* .for_each(|row| results.push(row as &dyn Row)); */ Ok(res) => { match res.get_active_ds() { canyon_sql::crud::DatabaseType::PostgreSql => { diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index 761451c1..c782e8c2 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -16,7 +16,7 @@ pub fn generate_find_all_unchecked_tokens( let stmt = format!("SELECT * FROM {table_schema_data}"); quote! { - /// Performns a `SELECT * FROM table_name`, where `table_name` it's + /// Performs a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. @@ -27,7 +27,6 @@ pub fn generate_find_all_unchecked_tokens( "" ).await .unwrap() - .get_entities::<#ty>() } /// Performs a `SELECT * FROM table_name`, where `table_name` it's @@ -45,7 +44,6 @@ pub fn generate_find_all_unchecked_tokens( datasource_name ).await .unwrap() - .get_entities::<#ty>() } } } @@ -60,7 +58,7 @@ pub fn generate_find_all_tokens( let stmt = format!("SELECT * FROM {table_schema_data}"); quote! { - /// Performns a `SELECT * FROM table_name`, where `table_name` it's + /// Performs a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. @@ -73,11 +71,10 @@ pub fn generate_find_all_tokens( &[], "" ).await? - .get_entities::<#ty>() ) } - /// Performns a `SELECT * FROM table_name`, where `table_name` it's + /// Performs a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. @@ -98,7 +95,6 @@ pub fn generate_find_all_tokens( &[], datasource_name ).await? - .get_entities::<#ty>() ) } } @@ -151,25 +147,26 @@ pub fn generate_count_tokens( let stmt = format!("SELECT COUNT (*) FROM {table_schema_data}"); let result_handling = quote! { - match count.get_active_ds() { - canyon_sql::crud::DatabaseType::PostgreSql => { - Ok( - count.postgres.get(0) - .expect(&format!("Count operation failed for {:?}", #ty_str)) - .get::<&str, i64>("count") - .to_owned() - ) - }, - canyon_sql::crud::DatabaseType::SqlServer => { - Ok( - count.sqlserver.get(0) - .expect(&format!("Count operation failed for {:?}", #ty_str)) - .get::(0) - .expect(&format!("SQL Server failed to return the count values for {:?}", #ty_str)) - .into() - ) - } - } + // match count.get_active_ds() { + // canyon_sql_root::crud::DatabaseType::PostgreSql => { + // Ok( + // count.postgres.get(0) + // .expect(&format!("Count operation failed for {:?}", #ty_str)) + // .get::<&str, i64>("count") + // .to_owned() + // ) + // }, + // canyon_sql_root::crud::DatabaseType::SqlServer => { + // Ok( + // count.sqlserver.get(0) + // .expect(&format!("Count operation failed for {:?}", #ty_str)) + // .get::(0) + // .expect(&format!("SQL Server failed to return the count values for {:?}", #ty_str)) + // .into() + // ) + // } + // } + Ok(0 as i64) // TODO }; quote! { @@ -240,11 +237,12 @@ pub fn generate_find_by_pk_tokens( }; } + // TOODO no tenemos number_OF_results let result_handling = quote! { match result { - n if n.number_of_results() == 0 => Ok(None), + n if n.len() == 0 => Ok(None), _ => Ok( - Some(result.get_entities::<#ty>().remove(0)) + Some(result.remove(0)) ) } }; @@ -347,9 +345,10 @@ pub fn generate_find_by_foreign_key_tokens( ); let result_handler = quote! { match result { - n if n.number_of_results() == 0 => Ok(None), + // TODO Noof + n if n.len() == 0 => Ok(None), _ => Ok(Some( - result.get_entities::<#fk_ty>().remove(0) + result.remove(0) )) } }; @@ -448,8 +447,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( stmt, &[lookage_value], "" - ).await? - .get_entities::<#ty>()) + ).await?) } }, )); @@ -477,8 +475,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( stmt, &[lookage_value], datasource_name - ).await? - .get_entities::<#ty>()) + ).await?) } }, )); diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index cb4bd353..67918e37 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "canyon_observer" -version = "0.2.0" -edition = "2021" -documentation = "https://zerodaycode.github.io/canyon-book/" -homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" -license = "MIT" -description = "A Rust ORM and QueryBuilder" +version.workspace = true +edition.workspace = true +authors.workspace = true +documentation.workspace = true +homepage.workspace = true +readme.workspace = true +license.workspace = true +description.workspace = true [dependencies] tokio = { version = "1.9.0", features = ["full"] } diff --git a/canyon_observer/src/manager/entity.rs b/canyon_observer/src/manager/entity.rs index 78e2f157..7aaeb38e 100644 --- a/canyon_observer/src/manager/entity.rs +++ b/canyon_observer/src/manager/entity.rs @@ -71,7 +71,7 @@ impl CanyonEntity { /// Generates an implementation of the match pattern to find whatever variant /// is being requested when the method `.field_name_as_str(self)` it's invoked over some - /// instance that implements the `canyon_sql::crud::bounds::FieldIdentifier` trait + /// instance that implements the `canyon_sql_root::crud::bounds::FieldIdentifier` trait pub fn create_match_arm_for_get_variant_as_string( &self, enum_name: &Ident, @@ -91,7 +91,7 @@ impl CanyonEntity { /// Generates an implementation of the match pattern to find whatever variant /// is being requested when the method `.value()` it's invoked over some - /// instance that implements the `canyon_sql::crud::bounds::FieldValueIdentifier` trait + /// instance that implements the `canyon_sql_root::crud::bounds::FieldValueIdentifier` trait pub fn create_match_arm_for_relate_fields_with_values( &self, enum_name: &Ident, diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index d454128a..739b4cae 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -5,7 +5,6 @@ use crate::{ canyon_crud::{ bounds::{Column, Row, RowOperations}, crud::Transaction, - result::DatabaseResult, DatabaseType, }, constants, @@ -87,7 +86,7 @@ impl Migrations { async fn fetch_database( datasource_name: &str, db_type: DatabaseType, - ) -> DatabaseResult { + ) -> Vec { let query = match db_type { DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, @@ -105,10 +104,12 @@ impl Migrations { /// Handler for parse the result of query the information of some database schema, /// and extract the content of the returned rows into custom structures with /// the data well organized for every entity present on that schema - fn map_rows(db_results: DatabaseResult) -> Vec { + fn map_rows(db_results: Vec) -> Vec { let mut schema_info: Vec = Vec::new(); - for res_row in db_results.as_canyon_rows().into_iter() { + for res_row in db_results.iter() + .map(|row| &row as &dyn Row) + { let unique_table = schema_info .iter_mut() .find(|table| table.table_name == *res_row.get::<&str>("table_name").to_owned()); diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index 0a4080c0..aac81d3b 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -4,6 +4,7 @@ use regex::Regex; use std::collections::HashMap; use std::fs; use walkdir::WalkDir; +use canyon_crud::bounds::Row; use super::register_types::CanyonRegisterEntity; @@ -70,11 +71,11 @@ impl CanyonMemory { let res = Self::query("SELECT * FROM canyon_memory", [], &datasource.name) .await .expect("Error querying Canyon Memory"); - let mem_results = res.as_canyon_rows(); + let mem_results = res.map(|row| &row as &dyn Row); // Manually maps the results let mut db_rows = Vec::new(); - for row in mem_results.iter() { + for row in mem_results { let db_row = CanyonMemoryRow { id: row.get::("id"), filepath: row.get::<&str>("filepath"), diff --git a/canyon_sql/src/lib.rs b/src/lib.rs old mode 100755 new mode 100644 similarity index 98% rename from canyon_sql/src/lib.rs rename to src/lib.rs index 330b8ed4..d3bf079c --- a/canyon_sql/src/lib.rs +++ b/src/lib.rs @@ -25,7 +25,6 @@ pub mod crud { pub use canyon_crud::bounds; pub use canyon_crud::crud::*; pub use canyon_crud::mapper::*; - pub use canyon_crud::result::*; pub use canyon_crud::DatabaseType; } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f2e83953..54047bc4 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dev-dependencies] -canyon_sql = { path = "../canyon_sql" } +canyon_sql = { path = ".." } [[test]] name = "canyon_integration_tests" From 5723243f180d192b4ccd1aa86fad9cfdae334f43 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Sun, 16 Apr 2023 00:25:22 +0200 Subject: [PATCH 05/23] WIP - Introducing CanyonRows --- canyon_connection/src/lib.rs | 25 ++++++- canyon_crud/src/bounds.rs | 58 +++++++++------ canyon_crud/src/crud.rs | 59 ++++++--------- canyon_crud/src/lib.rs | 1 + canyon_crud/src/rows.rs | 76 ++++++++++++++++++++ canyon_macros/src/query_operations/insert.rs | 33 ++------- 6 files changed, 163 insertions(+), 89 deletions(-) create mode 100644 canyon_crud/src/rows.rs diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 59a9dbba..535e59fd 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -15,7 +15,7 @@ use crate::datasources::{CanyonSqlConfig, DatasourceConfig}; use canyon_database_connector::DatabaseConnection; use indexmap::IndexMap; use lazy_static::lazy_static; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; const CONFIG_FILE_IDENTIFIER: &str = "canyon.toml"; @@ -61,3 +61,26 @@ pub async fn init_connections_cache() { ); } } + + +/// +pub fn get_database_connection<'a>( + datasource_name: &str, + guarded_cache: &'a mut MutexGuard> +) -> &'a mut DatabaseConnection { + if datasource_name.is_empty() { + guarded_cache + .get_mut( + DATASOURCES + .get(0) + .expect("We didn't found any valid datasource configuration. Check your `canyon.toml` file") + .name + .as_str() + ).unwrap_or_else(|| panic!("No default datasource found. Check your `canyon.toml` file")) + } else { + guarded_cache.get_mut(datasource_name) + .unwrap_or_else(|| + panic!("Canyon couldn't find a datasource in the pool with the argument provided: {datasource_name}") + ) + } +} diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 813c05dd..3589cb65 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -145,55 +145,71 @@ pub enum ColumnType { } pub trait RowOperations { - /// Abstracts the different forms of use the common `get` row - /// function or method dynamically no matter what are the origin - /// type from any database client provider - fn get<'a, Output>(&'a self, col_name: &str) -> Output - where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>; + #[cfg(feature = "postgres")] + fn get_postgres<'a, Output>(&'a self, col_name: &str) -> Output + where Output: tokio_postgres::types::FromSql<'a>; + #[cfg(feature = "mssql")] + fn get_mssql<'a, Output>(&self, col_name: &str) -> Output + where Output: tiberius::FromSql<'a>; - fn get_opt<'a, Output>(&'a self, col_name: &str) -> Option - where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>; + #[cfg(feature = "postgres")] + fn get_postgres_opt<'a, Output>(&'a self, col_name: &str) -> Option + where Output: tokio_postgres::types::FromSql<'a>; + #[cfg(feature = "mssql")] + fn get_mssql_opt<'a, Output>(&'a self, col_name: &str) -> Option + where Output: tokio_postgres::types::FromSql<'a>; fn columns(&self) -> Vec; } impl RowOperations for &dyn Row { - fn get<'a, Output>(&'a self, col_name: &str) -> Output - where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>, + #[cfg(feature = "postgres")] + fn get_postgres<'a, Output>(&'a self, col_name: &str) -> Output + where Output: tokio_postgres::types::FromSql<'a> { if let Some(row) = self.as_any().downcast_ref::() { return row.get::<&str, Output>(col_name); }; + panic!() // TODO into result and propagate + } + #[cfg(feature = "mssql")] + fn get_mssql<'a, Output>(&'a self, col_name: &str) -> Output + where Output: tiberius::FromSql<'a> + { if let Some(row) = self.as_any().downcast_ref::() { return row .get::(col_name) .expect("Failed to obtain a row in the MSSQL migrations"); }; - panic!() + panic!() // TODO into result and propagate } - fn get_opt<'a, Output>(&'a self, col_name: &str) -> Option - where - Output: tokio_postgres::types::FromSql<'a> + tiberius::FromSql<'a>, + #[cfg(feature = "postgres")] + fn get_postgres_opt<'a, Output>(&'a self, col_name: &str) -> Option + where Output: tokio_postgres::types::FromSql<'a> { if let Some(row) = self.as_any().downcast_ref::() { return row.get::<&str, Option>(col_name); }; + panic!() // TODO into result and propagate + } + + #[cfg(feature = "mssql")] + fn get_mssql_opt<'a, Output>(&'a self, col_name: &str) -> Option + where Output: tiberius::FromSql<'a> + { if let Some(row) = self.as_any().downcast_ref::() { return row - .try_get:where - .expect("Failed to obtain a row in the MSSQL migrations"); + .try_get + .expect("Failed to obtain a row for MSSQL"); }; - panic!() + panic!() // TODO into result and propagate } fn columns(&self) -> Vec { let mut cols = vec![]; - if self.as_any().is::() { + /* if self.as_any().is::() { self.as_any() .downcast_ref::() .expect("Not a tokio postgres Row for column") @@ -217,7 +233,7 @@ impl RowOperations for &dyn Row { type_: ColumnType::SqlServer(c.column_type()), }) }) - }; + }; */ cols } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 121efa91..59fc5bd3 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -2,13 +2,14 @@ use std::fmt::Display; use async_trait::async_trait; use canyon_connection::canyon_database_connector::DatabaseConnection; -use canyon_connection::{CACHED_DATABASE_CONN, DATASOURCES}; +use canyon_connection::{CACHED_DATABASE_CONN, get_database_connection}; use crate::bounds::QueryParameter; use crate::mapper::RowMapper; use crate::query_elements::query_builder::{ DeleteQueryBuilder, SelectQueryBuilder, UpdateQueryBuilder, }; +use crate::rows::CanyonRows; /// This traits defines and implements a query against a database given /// an statement `stmt` and the params to pass the to the client. @@ -18,36 +19,22 @@ use crate::query_elements::query_builder::{ /// automatically map it to an struct. #[async_trait] pub trait Transaction { - /// Performs a query against the targeted database by the selected datasource. - /// - /// No datasource means take the entry zero + /// Performs a query against the targeted database by the selected or + /// the defaulted datasource, wrapping the resultant collection of entities + /// in [`super::rows::Rows`]. This ones provides custom operations that + /// facilitates the macro operations. async fn query<'a, S, Z>( stmt: S, params: Z, datasource_name: &'a str, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> - where - S: AsRef + Display + Sync + Send + 'a, - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, - T: Transaction + RowMapper + ) -> Result> + where + S: AsRef + Display + Sync + Send + 'a, + Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, + T: Transaction + RowMapper { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; - - let database_conn = if datasource_name.is_empty() { - guarded_cache - .get_mut( - DATASOURCES - .get(0) - .expect("We didn't found any valid datasource configuration. Check your `canyon.toml` file") - .name - .as_str() - ).unwrap_or_else(|| panic!("No default datasource found. Check your `canyon.toml` file")) - } else { - guarded_cache.get_mut(datasource_name) - .unwrap_or_else(|| - panic!("Canyon couldn't find a datasource in the pool with the argument provided: {datasource_name}" - )) - }; + let database_conn = get_database_connection(datasource_name, &mut guarded_cache); match database_conn { #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => { @@ -56,7 +43,7 @@ pub trait Transaction { stmt.to_string(), params.as_ref(), ) - .await + .await } #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( @@ -64,7 +51,7 @@ pub trait Transaction { &mut stmt.to_string(), params, ) - .await + .await } } } @@ -166,12 +153,13 @@ mod postgres_query_launcher { use canyon_connection::canyon_database_connector::DatabaseConnection; use crate::crud::Transaction; use crate::mapper::RowMapper; + use crate::rows::CanyonRows; pub async fn launch<'a, T>( db_conn: &DatabaseConnection, stmt: String, params: &'a [&'_ dyn QueryParameter<'_>], - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> + ) -> Result> where T: Transaction + RowMapper { let mut m_params = Vec::new(); @@ -186,9 +174,7 @@ mod postgres_query_launcher { .query(&stmt, m_params.as_slice()) .await?; - Ok( - r.iter().map(|row| T::deserialize_postgresql(row)).collect() - ) + Ok(CanyonRows::Postgres(r)) } } @@ -201,12 +187,13 @@ mod sqlserver_query_launcher { }; use crate::crud::Transaction; use crate::mapper::RowMapper; + use crate::rows::CanyonRows; pub async fn launch<'a, T, Z>( db_conn: &mut DatabaseConnection, stmt: &mut String, params: Z, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> + ) -> Result> where Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, T: Transaction + RowMapper @@ -240,12 +227,8 @@ mod sqlserver_query_launcher { ) .await? .into_results() - .await? - .into_iter() - .flatten() - .map(|row| T::deserialize_sqlserver(&row)) - .collect::>(); + .await?; - Ok(_results) + Ok(CanyonRows::Tiberius(_results)) } } diff --git a/canyon_crud/src/lib.rs b/canyon_crud/src/lib.rs index 929dbea2..ee856f6c 100644 --- a/canyon_crud/src/lib.rs +++ b/canyon_crud/src/lib.rs @@ -4,6 +4,7 @@ pub mod bounds; pub mod crud; pub mod mapper; pub mod query_elements; +pub mod rows; pub use query_elements::operators::*; diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs new file mode 100644 index 00000000..157fde81 --- /dev/null +++ b/canyon_crud/src/rows.rs @@ -0,0 +1,76 @@ +use tokio_postgres::types::FromSql; +use crate::bounds::{PrimaryKey, QueryParameter}; +use crate::crud::Transaction; +use crate::mapper::RowMapper; + +/// Lightweight wrapper over the collection of results of the different crates +/// supported by Canyon-SQL. +/// +/// Even tho the wrapping seems meaningless, this allows us to provide internal +/// operations that are too difficult or to ugly to implement in the macros that +/// will call the query method of Crud. +pub enum CanyonRows { + #[cfg(feature = "postgres")] Postgres(Vec), + #[cfg(feature = "mssql")] Tiberius(Vec>) +} + +impl CanyonRows { + // /// Type constructor, returning the correct variant of Self wrapping the collection of results + // /// by the given database connection + // pub fn new( + // conn: &DatabaseConnection, + // res: Vec + // ) -> Self { + // match conn { + // #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => Self::Postgres(res), + // #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => Self::Tiberius(res) + // } + // } + + /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of T + pub fn into_results(self) -> Vec where T: Transaction + RowMapper { + match self { + #[cfg(feature = "postgres")] Self::Postgres(v) => v + .iter() + .map(|row| T::deserialize_postgresql(row)) + .collect(), + #[cfg(feature = "mssql")] Self::Tiberius(v) => v + .iter() + .flatten() + .map(|row| T::deserialize_sqlserver(&row)) + .collect() + } + } + + /// + pub fn set_primary_key_after_insert<'a, T, PkType: PrimaryKey>(self, pk: &str) -> PkType { + match self { + #[cfg(feature = "postgres")] Self::Postgres(v) => { + v.get(0) + .expect("No value found on the returning clause") + .get::<&str, PkType>(pk) + // .to_owned(); + } + #[cfg(feature = "mssql")] Self::Tiberius(v) => { + v.into_iter() + .flatten() + .collect::>() + .remove(0) + .get::(pk) + .expect("SQL Server primary key type failed to be set as value") + // .to_owned() + } + } + } +} + +// r.iter().map(|row| T::deserialize_postgresql(row)).collect() +// .map(|row| T::deserialize_sqlserver(&row)) + + +// canyon_sql::crud::DatabaseType::SqlServer => { +// self.#pk_ident = res.sqlserver.get(0) +// .expect("No value found on the returning clause") +// .get::<#pk_type, &str>(#primary_key) +// .expect("SQL Server primary key type failed to be set as value") +// .to_owned(); diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 543a5121..d33d5086 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -50,38 +50,13 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri #primary_key ); - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + <#ty as canyon_sql::crud::Transaction<#ty>>::query_for_rows( stmt, values, datasource_name - ).await; - - // TODO Convertir a canyon rows - match result { - Ok(res) => { - match res.get_active_ds() { - canyon_sql::crud::DatabaseType::PostgreSql => { - self.#pk_ident = res.postgres.get(0) - .expect("No value found on the returning clause") - .get::<&str, #pk_type>(#primary_key) - .to_owned(); - - Ok(()) - }, - canyon_sql::crud::DatabaseType::SqlServer => { - self.#pk_ident = res.sqlserver.get(0) - .expect("No value found on the returning clause") - .get::<#pk_type, &str>(#primary_key) - .expect("SQL Server primary key type failed to be set as value") - .to_owned(); - - Ok(()) - } - } - }, - Err(e) => Err(e) - } - } + ).await + .set_primary_key_after_insert(); + } } else { quote! { let stmt = format!( From 32e611a5186cd129850f94cb8784ec788602b907 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Sun, 16 Apr 2023 21:08:57 +0200 Subject: [PATCH 06/23] WIP - Whole rework of the workspace, more cfg's --- Cargo.toml | 30 ++- canyon_connection/Cargo.toml | 15 +- .../src/canyon_database_connector.rs | 49 ++--- canyon_connection/src/datasources.rs | 14 +- canyon_connection/src/lib.rs | 10 +- canyon_crud/Cargo.toml | 23 +- canyon_crud/src/bounds.rs | 208 +++++++++--------- canyon_crud/src/crud.rs | 47 ++-- canyon_crud/src/mapper.rs | 9 +- .../src/query_elements/query_builder.rs | 6 +- canyon_crud/src/rows.rs | 61 ++--- canyon_macros/src/query_operations/insert.rs | 23 +- canyon_observer/Cargo.toml | 19 +- canyon_observer/src/lib.rs | 1 + canyon_observer/src/migrations/handler.rs | 15 +- .../src/migrations/information_schema.rs | 7 +- canyon_observer/src/migrations/memory.rs | 7 +- 17 files changed, 272 insertions(+), 272 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 70dea99d..01e1eafe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,32 +5,44 @@ version = "0.2.0" [workspace] members = [ + "canyon_connection", "canyon_observer", "canyon_macros", "canyon_crud", - "canyon_connection", "tests" ] -[workspace.dependencies] +[dependencies] # Project crates canyon_macros = { version = "0.2.0", path = "canyon_macros" } canyon_observer = { version = "0.2.0", path = "canyon_observer" } -canyon_crud = { version = "0.2.0", path = "canyon_crud", features = ["postgres", "mssql"] } -canyon_connection = { version = "0.2.0", path = "canyon_connection", features = ["postgres", "mssql"] } +canyon_crud = { version = "0.2.0", path = "canyon_crud" } +canyon_connection = { version = "0.2.0", path = "canyon_connection" } -tokio = { version = "1.21.2", features = ["full"] } + +#tokio = { workspace = true } +#tokio-util = { workspace = true } +#tokio-postgres = { workspace = true } +#tiberius = { worskpace = true } + +[workspace.dependencies] +canyon_crud = { version = "0.2.0", path = "canyon_crud" } +canyon_connection = { version = "0.2.0", path = "canyon_connection" } + +tokio = { version = "1.27.0", features = ["full"] } tokio-util = { version = "0.7.4", features = ["compat"] } tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } +serde = { version = "1.0.138", features = ["derive"] } + futures = "0.3.25" indexmap = "1.9.1" async-std = "1.12.0" lazy_static = "1.4.0" -serde = { version = "1.0.138", features = ["derive"] } toml = "0.7.3" +async-trait = "0.1.68" [workspace.package] version = "0.2.0" @@ -41,3 +53,9 @@ homepage = "https://github.com/zerodaycode/Canyon-SQL" readme = "../README.md" license = "MIT" description = "A Rust ORM and QueryBuilder" + +[features] +default = ["postgres", "canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] +postgres = ["canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] +mssql = ["canyon_connection/tiberius"] +mssql-integrated-auth = ["mssql"] \ No newline at end of file diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 323e91a3..36fc8a97 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -10,20 +10,13 @@ license.workspace = true description.workspace = true [dependencies] -tokio = { workspace = true, features = ["full"], optional = true } -tokio-util = { workspace = true, features = ["compat"], optional = true } -tokio-postgres = { workspace = true, features = ["with-chrono-0_4"], optional = true } -tiberius = { workspace = true, features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } - +tokio = { workspace = true } +tokio-util = { workspace = true } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } futures = { workspace = true } indexmap = { workspace = true } async-std = { workspace = true } lazy_static = { workspace = true } serde = { workspace = true, features = ["derive"] } toml = { workspace = true } - -[features] -default = ["postgres"] -postgres = ["tokio", "tokio-postgres", "tokio-util"] -mssql = ["tiberius", "tiberius/tds73", "tiberius/chrono"] -mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 27d59799..5e324cde 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -1,32 +1,31 @@ -#[cfg(feature = "mssql")] use async_std::net::TcpStream; +#[cfg(feature = "tiberius")] use async_std::net::TcpStream; use serde::Deserialize; -#[cfg(feature = "mssql")] use tiberius::{AuthMethod, Config}; -#[cfg(feature = "postgres")] use tokio_postgres::{Client, NoTls}; +#[cfg(feature = "tiberius")] use tiberius::{AuthMethod, Config}; +#[cfg(feature = "tokio-postgres")] use tokio_postgres::{Client, NoTls}; use crate::datasources::DatasourceConfig; /// Represents the current supported databases by Canyon -#[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy, Default)] +#[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy)] pub enum DatabaseType { - #[default] #[serde(alias = "postgres", alias = "postgresql")] - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] PostgreSql, #[serde(alias = "sqlserver", alias = "mssql")] - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] SqlServer, } /// A connection with a `PostgreSQL` database -#[cfg(feature = "postgres")] +#[cfg(feature = "tokio-postgres")] pub struct PostgreSqlConnection { pub client: Client, // pub connection: Connection, // TODO Hold it, or not to hold it... that's the question! } -#[cfg(feature = "mssql")] /// A connection with a `SqlServer` database +#[cfg(feature = "tiberius")] pub struct SqlServerConnection { pub client: &'static mut tiberius::Client, } @@ -36,8 +35,8 @@ pub struct SqlServerConnection { /// process them and generates a pool of 1 to 1 database connection for /// every datasource defined. pub enum DatabaseConnection { - #[cfg(feature = "postgres")] Postgres(PostgreSqlConnection), - #[cfg(feature = "mssql")] SqlServer(SqlServerConnection), + #[cfg(feature = "tokio-postgres")] Postgres(PostgreSqlConnection), + #[cfg(feature = "tiberius")] SqlServer(SqlServerConnection), } unsafe impl Send for DatabaseConnection {} @@ -48,7 +47,7 @@ impl DatabaseConnection { datasource: &DatasourceConfig, ) -> Result> { match datasource.get_db_type() { - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => { let (username, password) = match &datasource.auth { crate::datasources::Auth::Postgres(postgres_auth) => match postgres_auth { @@ -56,7 +55,7 @@ impl DatabaseConnection { (username.as_str(), password.as_str()) } }, - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] crate::datasources::Auth::SqlServer(_) => { panic!("Found SqlServer auth configuration for a PostgreSQL datasource") } @@ -85,7 +84,7 @@ impl DatabaseConnection { // connection: new_connection, })) } - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => { let mut config = Config::new(); @@ -95,14 +94,14 @@ impl DatabaseConnection { // Using SQL Server authentication. config.authentication(match &datasource.auth { - #[cfg(feature = "postgres")] crate::datasources::Auth::Postgres(_) => { + #[cfg(feature = "tokio-postgres")] crate::datasources::Auth::Postgres(_) => { panic!("Found PostgreSQL auth configuration for a SqlServer database") } crate::datasources::Auth::SqlServer(sql_server_auth) => match sql_server_auth { crate::datasources::SqlServerAuth::Basic { username, password } => { AuthMethod::sql_server(username, password) } - #[cfg(feature = "mssql-integrated-auth")] + #[cfg(feature = "mssql-integrated-auth")] // TODO pending, or remove the cfg? crate::datasources::SqlServerAuth::Integrated => AuthMethod::Integrated, }, }); @@ -135,21 +134,19 @@ impl DatabaseConnection { } } - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { - if let DatabaseConnection::Postgres(conn) = self { - Some(conn) - } else { - None + match self { + DatabaseConnection::Postgres(conn) => Some(conn), + _ => panic!() } } - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] pub fn sqlserver_connection(&mut self) -> Option<&mut SqlServerConnection> { - if let DatabaseConnection::SqlServer(conn) = self { - Some(conn) - } else { - None + match self { + DatabaseConnection::SqlServer(conn) => Some(conn), + _ => panic!() } } } diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 4dc76dbb..2a553cb3 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -49,7 +49,7 @@ fn load_ds_config_from_array() { assert_eq!(ds_1.properties.db_name, "triforce2"); assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); - #[cfg(feature = "postgres")] assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) + #[cfg(feature = "tokio-postgres")] assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) } /// #[derive(Deserialize, Debug, Clone)] @@ -72,8 +72,8 @@ pub struct DatasourceConfig { impl DatasourceConfig { pub fn get_db_type(&self) -> DatabaseType { match self.auth { - #[cfg(feature = "postgres")] Auth::Postgres(_) => DatabaseType::PostgreSql, - #[cfg(feature = "mssql")] Auth::SqlServer(_) => DatabaseType::SqlServer, + #[cfg(feature = "tokio-postgres")] Auth::Postgres(_) => DatabaseType::PostgreSql, + #[cfg(feature = "tiberius")] Auth::SqlServer(_) => DatabaseType::SqlServer, } } } @@ -81,22 +81,22 @@ impl DatasourceConfig { #[derive(Deserialize, Debug, Clone, PartialEq)] pub enum Auth { #[serde(alias = "PostgreSQL", alias = "postgresql", alias = "postgres")] - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] Postgres(PostgresAuth), #[serde(alias = "SqlServer", alias = "sqlserver", alias = "mssql")] - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] SqlServer(SqlServerAuth), } #[derive(Deserialize, Debug, Clone, PartialEq)] -#[cfg(feature = "postgres")] +#[cfg(feature = "tokio-postgres")] pub enum PostgresAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, } #[derive(Deserialize, Debug, Clone, PartialEq)] -#[cfg(feature = "mssql")] +#[cfg(feature = "tiberius")] pub enum SqlServerAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 535e59fd..cc240034 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -1,10 +1,10 @@ -#[cfg(feature = "mssql")] pub extern crate async_std; +#[cfg(feature = "tiberius")] pub extern crate async_std; pub extern crate futures; pub extern crate lazy_static; -#[cfg(feature = "mssql")] pub extern crate tiberius; -#[cfg(feature = "postgres")] pub extern crate tokio; -#[cfg(feature = "postgres")] pub extern crate tokio_postgres; -#[cfg(feature = "postgres")] pub extern crate tokio_util; +#[cfg(feature = "tiberius")] pub extern crate tiberius; +pub extern crate tokio; +#[cfg(feature = "tokio-postgres")] pub extern crate tokio_postgres; +#[cfg(feature = "tokio-postgres")] pub extern crate tokio_util; pub mod canyon_database_connector; pub mod datasources; diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index 0e4f0854..6f6ee233 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -10,18 +10,19 @@ license.workspace = true description.workspace = true [dependencies] -tokio = { workspace = true, features = ["full"], optional = true } -tokio-util = { workspace = true, features = ["compat"], optional = true } -tokio-postgres = { workspace = true, features = ["with-chrono-0_4"], optional = true } -tiberius = { workspace = true, features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } - +#tokio = { workspace = true, features = ["full"], optional = true } +#tokio-util = { workspace = true, features = ["compat"], optional = true } +#tokio-postgres = { workspace = true, features = ["with-chrono-0_4"], optional = true } +#tiberius = { workspace = true, features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } chrono = { version = "0.4", features = ["serde"] } async-trait = { version = "0.1.50" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection", features = ["postgres", "mssql"] } +canyon_connection = { workspace = true, path = "../canyon_connection" } -[features] -default = ["postgres"] -postgres = ["tokio", "tokio-postgres", "tokio-util"] -mssql = ["tiberius", "tiberius/tds73", "tiberius/chrono"] -mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file +#[features] +#default = ["postgres"] +#postgres = ["tokio", "tokio-postgres", "tokio-util"] +#mssql = ["tiberius", "tiberius/tds73", "tiberius/chrono"] +#mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 3589cb65..218f337b 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -1,19 +1,16 @@ -#![allow(clippy::extra_unused_lifetimes)] - use crate::{ crud::{CrudOperations, Transaction}, mapper::RowMapper, }; -#[cfg(feature = "postgres")] +#[cfg(feature = "tokio-postgres")] use canyon_connection::tokio_postgres::{self, types::ToSql}; -#[cfg(feature = "mssql")] -use canyon_connection::tiberius::{self, ColumnData, IntoSql}; +#[cfg(feature = "tiberius")] +use canyon_connection::tiberius::{self, ColumnData, FromSql, IntoSql}; use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use std::any::Any; -use tiberius::FromSql; /// Created for retrieve the field's name of a field of a struct, giving /// the Canyon's autogenerated enum with the variants that maps this @@ -83,21 +80,18 @@ pub trait ForeignKeyable { fn get_fk_column(&self, column: &str) -> Option<&dyn QueryParameter<'_>>; } -/// To define trait objects that helps to relates the necessary bounds in the 'IN` SQL clause -pub trait InClauseValues: ToSql + ToString {} - /// Generic abstraction to represent any of the Row types /// from the client crates pub trait Row { fn as_any(&self) -> &dyn Any; } -#[cfg(feature = "postgres")] impl Row for tokio_postgres::Row { +#[cfg(feature = "tokio-postgres")] impl Row for tokio_postgres::Row { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "mssql")] impl Row for tiberius::Row { +#[cfg(feature = "tiberius")] impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self } @@ -105,6 +99,7 @@ pub trait Row { /// Generic abstraction for hold a Column type that will be one of the Column /// types present in the dependent crates +// #[derive(Copy, Clone)] pub struct Column<'a> { name: &'a str, type_: ColumnType, @@ -116,46 +111,47 @@ impl<'a> Column<'a> { pub fn column_type(&self) -> &ColumnType { &self.type_ } - pub fn type_(&'a self) -> &'_ dyn Type { - match &self.type_ { - #[cfg(feature = "postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, - #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => v as &'a dyn Type, - } - } + // pub fn type_(&'a self) -> &'_ dyn Type { + // match (*self).type_ { + // #[cfg(feature = "tokio-postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, + // #[cfg(feature = "tiberius")] ColumnType::SqlServer(v) => v as &'a dyn Type, + // } + // } } pub trait Type { fn as_any(&self) -> &dyn Any; } -#[cfg(feature = "postgres")] impl Type for tokio_postgres::types::Type { +#[cfg(feature = "tokio-postgres")] impl Type for tokio_postgres::types::Type { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "mssql")] impl Type for tiberius::ColumnType { +#[cfg(feature = "tiberius")] impl Type for tiberius::ColumnType { fn as_any(&self) -> &dyn Any { self } } /// Wrapper over the dependencies Column's types +// #[derive(Copy)] pub enum ColumnType { - #[cfg(feature = "postgres")] Postgres(tokio_postgres::types::Type), - #[cfg(feature = "mssql")] SqlServer(tiberius::ColumnType), + #[cfg(feature = "tokio-postgres")] Postgres(tokio_postgres::types::Type), + #[cfg(feature = "tiberius")] SqlServer(tiberius::ColumnType), } pub trait RowOperations { - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] fn get_postgres<'a, Output>(&'a self, col_name: &str) -> Output where Output: tokio_postgres::types::FromSql<'a>; - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] fn get_mssql<'a, Output>(&self, col_name: &str) -> Output where Output: tiberius::FromSql<'a>; - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] fn get_postgres_opt<'a, Output>(&'a self, col_name: &str) -> Option where Output: tokio_postgres::types::FromSql<'a>; - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] fn get_mssql_opt<'a, Output>(&'a self, col_name: &str) -> Option where Output: tokio_postgres::types::FromSql<'a>; @@ -163,7 +159,7 @@ pub trait RowOperations { } impl RowOperations for &dyn Row { - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] fn get_postgres<'a, Output>(&'a self, col_name: &str) -> Output where Output: tokio_postgres::types::FromSql<'a> { @@ -172,7 +168,7 @@ impl RowOperations for &dyn Row { }; panic!() // TODO into result and propagate } - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] fn get_mssql<'a, Output>(&'a self, col_name: &str) -> Output where Output: tiberius::FromSql<'a> { @@ -184,7 +180,7 @@ impl RowOperations for &dyn Row { panic!() // TODO into result and propagate } - #[cfg(feature = "postgres")] + #[cfg(feature = "tokio-postgres")] fn get_postgres_opt<'a, Output>(&'a self, col_name: &str) -> Option where Output: tokio_postgres::types::FromSql<'a> { @@ -194,7 +190,7 @@ impl RowOperations for &dyn Row { panic!() // TODO into result and propagate } - #[cfg(feature = "mssql")] + #[cfg(feature = "tiberius")] fn get_mssql_opt<'a, Output>(&'a self, col_name: &str) -> Option where Output: tiberius::FromSql<'a> { @@ -242,8 +238,8 @@ impl RowOperations for &dyn Row { /// Defines a trait for represent type bounds against the allowed /// data types supported by Canyon to be used as query parameters. pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync); - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_>; + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync); + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_>; } /// The implementation of the [`canyon_connection::tiberius`] [`IntoSql`] for the @@ -254,7 +250,7 @@ pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { /// a collection of [`QueryParameter<'a>`], in order to allow a workflow /// that is not dependent of the specific type of the argument that holds /// the query parameters of the database connectors -#[cfg(feature = "mssql")] +#[cfg(feature = "tiberius")] impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { fn into_sql(self) -> ColumnData<'a> { self.as_sqlserver_param() @@ -262,198 +258,198 @@ impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { } impl<'a> QueryParameter<'a> for bool { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::Bit(Some(*self)) } } impl<'a> QueryParameter<'a> for i16 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self)) } } impl<'a> QueryParameter<'a> for &i16 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(*self) } } impl<'a> QueryParameter<'a> for Option<&i16> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for i32 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self)) } } impl<'a> QueryParameter<'a> for &i32 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(*self) } } impl<'a> QueryParameter<'a> for Option<&i32> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for f32 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(*self)) } } impl<'a> QueryParameter<'a> for &f32 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(*self) } } impl<'a> QueryParameter<'a> for Option<&f32> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some( *self.expect("Error on an f32 value on QueryParameter<'_>"), )) } } impl<'a> QueryParameter<'a> for f64 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(*self)) } } impl<'a> QueryParameter<'a> for &f64 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(*self) } } impl<'a> QueryParameter<'a> for Option<&f64> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some( *self.expect("Error on an f64 value on QueryParameter<'_>"), )) } } impl<'a> QueryParameter<'a> for i64 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self)) } } impl<'a> QueryParameter<'a> for &i64 { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(*self) } } impl<'a> QueryParameter<'a> for Option<&i64> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for String { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) } } impl<'a> QueryParameter<'a> for &String { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), None => ColumnData::String(None), @@ -461,10 +457,10 @@ impl<'a> QueryParameter<'a> for Option { } } impl<'a> QueryParameter<'a> for Option<&String> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), None => ColumnData::String(None), @@ -472,18 +468,18 @@ impl<'a> QueryParameter<'a> for Option<&String> { } } impl<'a> QueryParameter<'_> for &'_ str { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(*self))) } } impl<'a> QueryParameter<'a> for Option<&'_ str> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match *self { Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), None => ColumnData::String(None), @@ -491,82 +487,82 @@ impl<'a> QueryParameter<'a> for Option<&'_ str> { } } impl<'a> QueryParameter<'_> for NaiveDate { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveTime { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveDateTime { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for Option> { - #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 59fc5bd3..b3395df5 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -19,25 +19,39 @@ use crate::rows::CanyonRows; /// automatically map it to an struct. #[async_trait] pub trait Transaction { + // /// Performs a query against the targeted database by the selected or + // /// the defaulted datasource, returning a collection of instances of *T* + // async fn query<'a, S, Z>( + // stmt: S, + // params: Z, + // datasource_name: &'a str, + // ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> + // where + // S: AsRef + Display + Sync + Send + 'a, + // Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, + // { + // Self::query_for_rows(stmt, params, datasource_name) + // .await + // .map(|res| res.into_results()) + // } + /// Performs a query against the targeted database by the selected or /// the defaulted datasource, wrapping the resultant collection of entities - /// in [`super::rows::Rows`]. This ones provides custom operations that - /// facilitates the macro operations. + /// in [`super::rows::Rows`] async fn query<'a, S, Z>( stmt: S, params: Z, datasource_name: &'a str, - ) -> Result> + ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> where S: AsRef + Display + Sync + Send + 'a, - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, - T: Transaction + RowMapper + Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; let database_conn = get_database_connection(datasource_name, &mut guarded_cache); - match database_conn { - #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => { + match *database_conn { + #[cfg(feature = "tokio-postgres")] DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( database_conn, stmt.to_string(), @@ -45,7 +59,7 @@ pub trait Transaction { ) .await } - #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => { + #[cfg(feature = "tiberius")] DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( database_conn, &mut stmt.to_string(), @@ -147,21 +161,17 @@ where fn delete_query_datasource(datasource_name: &str) -> DeleteQueryBuilder<'_, T>; } -#[cfg(feature = "postgres")] +#[cfg(feature = "tokio-postgres")] mod postgres_query_launcher { use crate::bounds::QueryParameter; use canyon_connection::canyon_database_connector::DatabaseConnection; - use crate::crud::Transaction; - use crate::mapper::RowMapper; use crate::rows::CanyonRows; pub async fn launch<'a, T>( db_conn: &DatabaseConnection, stmt: String, params: &'a [&'_ dyn QueryParameter<'_>], - ) -> Result> - where T: Transaction + RowMapper - { + ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { let mut m_params = Vec::new(); for param in params { m_params.push(param.as_postgres_param()); @@ -179,24 +189,21 @@ mod postgres_query_launcher { } -#[cfg(feature = "mssql")] +#[cfg(feature = "tiberius")] mod sqlserver_query_launcher { use crate::{ bounds::QueryParameter, canyon_connection::{canyon_database_connector::DatabaseConnection, tiberius::Query}, }; - use crate::crud::Transaction; - use crate::mapper::RowMapper; use crate::rows::CanyonRows; pub async fn launch<'a, T, Z>( db_conn: &mut DatabaseConnection, stmt: &mut String, params: Z, - ) -> Result> + ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> where - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, - T: Transaction + RowMapper + Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a { // Re-generate de insert statement to adequate it to the SQL SERVER syntax to retrieve the PK value(s) after insert if stmt.contains("RETURNING") { diff --git a/canyon_crud/src/mapper.rs b/canyon_crud/src/mapper.rs index 0114bd3a..7996c0fc 100644 --- a/canyon_crud/src/mapper.rs +++ b/canyon_crud/src/mapper.rs @@ -1,5 +1,5 @@ -#[cfg(feature = "postgres")] use canyon_connection::tokio_postgres; -#[cfg(feature = "mssql")] use canyon_connection::tiberius; +#[cfg(feature = "tokio-postgres")] use canyon_connection::tokio_postgres; +#[cfg(feature = "tiberius")] use canyon_connection::tiberius; use crate::crud::Transaction; @@ -7,7 +7,6 @@ use crate::crud::Transaction; /// from some supported database in Canyon-SQL into a user's defined /// type `T` pub trait RowMapper>: Sized { - fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; - - fn deserialize_sqlserver(row: &tiberius::Row) -> T; + #[cfg(feature = "tokio-postgres")] fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; + #[cfg(feature = "tiberius")] fn deserialize_sqlserver(row: &tiberius::Row) -> T; } diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs index c26c5642..9d102f87 100644 --- a/canyon_crud/src/query_elements/query_builder.rs +++ b/canyon_crud/src/query_elements/query_builder.rs @@ -26,7 +26,7 @@ pub mod ops { /// hierarchy. /// /// For example, the [`super::QueryBuilder`] type holds the data - /// necessary for track the SQL sentece while it's being generated + /// necessary for track the SQL sentence while it's being generated /// thought the fluent builder, and provides the behaviour of /// the common elements defined in this trait. /// @@ -44,7 +44,7 @@ pub mod ops { /// just one type. pub trait QueryBuilder<'a, T> where - T: Debug + CrudOperations + Transaction + RowMapper, + T: CrudOperations + Transaction + RowMapper, { /// Returns a read-only reference to the underlying SQL sentence, /// with the same lifetime as self @@ -173,7 +173,7 @@ where self.query.params.to_vec(), self.datasource_name, ) - .await?) + .await?.into_results::()) } pub fn r#where>(&mut self, r#where: Z, op: impl Operator) { diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 157fde81..669407ae 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -1,5 +1,4 @@ -use tokio_postgres::types::FromSql; -use crate::bounds::{PrimaryKey, QueryParameter}; +use std::marker::PhantomData; use crate::crud::Transaction; use crate::mapper::RowMapper; @@ -9,12 +8,13 @@ use crate::mapper::RowMapper; /// Even tho the wrapping seems meaningless, this allows us to provide internal /// operations that are too difficult or to ugly to implement in the macros that /// will call the query method of Crud. -pub enum CanyonRows { - #[cfg(feature = "postgres")] Postgres(Vec), - #[cfg(feature = "mssql")] Tiberius(Vec>) +pub enum CanyonRows { + #[cfg(feature = "tokio-postgres")] Postgres(Vec), + #[cfg(feature = "tiberius")] Tiberius(Vec>), + UnusableTypeMarker(PhantomData) } -impl CanyonRows { +impl CanyonRows { // /// Type constructor, returning the correct variant of Self wrapping the collection of results // /// by the given database connection // pub fn new( @@ -22,55 +22,24 @@ impl CanyonRows { // res: Vec // ) -> Self { // match conn { - // #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => Self::Postgres(res), - // #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => Self::Tiberius(res) + // #[cfg(feature = "tokio-postgres")] DatabaseConnection::Postgres(_) => Self::Postgres(res), + // #[cfg(feature = "tiberius")] DatabaseConnection::SqlServer(_) => Self::Tiberius(res) // } // } /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of T - pub fn into_results(self) -> Vec where T: Transaction + RowMapper { + pub fn into_results>(self) -> Vec where T: Transaction { match self { - #[cfg(feature = "postgres")] Self::Postgres(v) => v + #[cfg(feature = "tokio-postgres")] Self::Postgres(v) => v .iter() - .map(|row| T::deserialize_postgresql(row)) + .map(|row| Z::deserialize_postgresql(row)) .collect(), - #[cfg(feature = "mssql")] Self::Tiberius(v) => v + #[cfg(feature = "tiberius")] Self::Tiberius(v) => v .iter() .flatten() - .map(|row| T::deserialize_sqlserver(&row)) - .collect() - } - } - - /// - pub fn set_primary_key_after_insert<'a, T, PkType: PrimaryKey>(self, pk: &str) -> PkType { - match self { - #[cfg(feature = "postgres")] Self::Postgres(v) => { - v.get(0) - .expect("No value found on the returning clause") - .get::<&str, PkType>(pk) - // .to_owned(); - } - #[cfg(feature = "mssql")] Self::Tiberius(v) => { - v.into_iter() - .flatten() - .collect::>() - .remove(0) - .get::(pk) - .expect("SQL Server primary key type failed to be set as value") - // .to_owned() - } + .map(|row| Z::deserialize_sqlserver(&row)) + .collect(), + _ => panic!("This branch will never ever should be reachable") } } } - -// r.iter().map(|row| T::deserialize_postgresql(row)).collect() -// .map(|row| T::deserialize_sqlserver(&row)) - - -// canyon_sql::crud::DatabaseType::SqlServer => { -// self.#pk_ident = res.sqlserver.get(0) -// .expect("No value found on the returning clause") -// .get::<#pk_type, &str>(#primary_key) -// .expect("SQL Server primary key type failed to be set as value") -// .to_owned(); diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index d33d5086..45ce1187 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -50,13 +50,28 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri #primary_key ); - <#ty as canyon_sql::crud::Transaction<#ty>>::query_for_rows( + let rows = <#ty as canyon_sql::crud::Transaction<#ty>>::query_for_rows( stmt, values, datasource_name - ).await - .set_primary_key_after_insert(); - } + ).await; + + match rows { + #[cfg(feature = "tokio-postgres")] Self::Postgres(v) => { + v.remove(0) + .expect("No value found on the returning clause for Postgres") + .get::<&str, #pk_type>(#primary_key) + } + #[cfg(feature = "tiberius")] Self::Tiberius(v) => { + v.into_iter() + .flatten() + .collect::>() + .remove(0) + .get::<#pk_type, &str>(#primary_key) + .expect("SQL Server primary key type failed to be set as value") + } + } + } } else { quote! { let stmt = format!( diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index 67918e37..9f59b093 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -10,19 +10,18 @@ license.workspace = true description.workspace = true [dependencies] -tokio = { version = "1.9.0", features = ["full"] } -tokio-postgres = { version = "0.7.2" , features=["with-chrono-0_4"] } -async-trait = { version = "0.1.50" } -regex = "1.5" -walkdir = "2" +canyon_crud = { workspace = true } +canyon_connection = { workspace = true } +tokio = { workspace = true } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } +async-trait = { workspace = true } +# transform to opts with migrations feature +regex = "1.5" # opt +walkdir = "2" # opt proc-macro2 = "1.0.27" syn = { version = "1.0.86", features = ["full", "parsing"] } quote = "1.0.9" - -# Debug partialdebug = "0.2.0" -# Internal dependencies -canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } diff --git a/canyon_observer/src/lib.rs b/canyon_observer/src/lib.rs index 1a0766e5..41e0dd42 100644 --- a/canyon_observer/src/lib.rs +++ b/canyon_observer/src/lib.rs @@ -11,6 +11,7 @@ /// in order to perform the migrations pub mod migrations; +extern crate canyon_connection; extern crate canyon_crud; mod constants; diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index 739b4cae..aafc6fd7 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -52,7 +52,7 @@ impl Migrations { // Tracked entities that must be migrated whenever Canyon starts let schema_status = Self::fetch_database(&datasource.name, datasource.get_db_type()).await; - let database_tables_schema_info = Self::map_rows(schema_status); + let database_tables_schema_info = Self::map_rows(schema_status, datasource.get_db_type()); // We filter the tables from the schema that aren't Canyon entities let mut user_database_tables = vec![]; @@ -98,21 +98,26 @@ impl Migrations { panic!( "Error querying the schema information for the datasource: {datasource_name}" ) - }) + }).into_results() } /// Handler for parse the result of query the information of some database schema, /// and extract the content of the returned rows into custom structures with /// the data well organized for every entity present on that schema - fn map_rows(db_results: Vec) -> Vec { + fn map_rows(db_results: Vec, db_type: DatabaseType) -> Vec { let mut schema_info: Vec = Vec::new(); + let row_retriever_fn_ptr = match db_type { + DatabaseType::PostgreSql => RowOperations::get_postgres::<&str>, + DatabaseType::SqlServer => RowOperations::get_mssql::<&str>, + }; for res_row in db_results.iter() .map(|row| &row as &dyn Row) { let unique_table = schema_info .iter_mut() - .find(|table| table.table_name == *res_row.get::<&str>("table_name").to_owned()); + // TODO To be able to remove row from our code, use a match statement to get table name + .find(|table| table.table_name == *row_retriever_fn_ptr("table_name").to_owned()); match unique_table { Some(table) => { /* If a table entity it's already present on the collection, we add it @@ -124,7 +129,7 @@ impl Migrations { collection yet, we must create a new instance and attach it the founded columns data in this iteration */ let mut new_table = TableMetadata { - table_name: res_row.get::<&str>("table_name").to_owned(), + table_name: row_retriever_fn_ptr("table_name").to_owned(), columns: Vec::new(), }; Self::get_columns_metadata(res_row, &mut new_table); diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index bdf9f48e..527f4084 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -1,4 +1,5 @@ -use canyon_connection::{tiberius::ColumnType as TIB_TY, tokio_postgres::types::Type as TP_TYP}; +#[cfg(feature = "tokio-postgres")] use canyon_connection::tokio_postgres::types::Type as TP_TYP; +#[cfg(feature = "tiberius")] use canyon_connection::tiberius::ColumnType as TIB_TY; use canyon_crud::bounds::{Column, ColumnType, Row, RowOperations}; /// Model that represents the database entities that belongs to the current schema. @@ -40,7 +41,7 @@ impl ColumnMetadataTypeValue { /// Retrieves the value stored in a [`Column`] for a passed [`Row`] pub fn get_value(row: &dyn Row, col: &Column) -> Self { match col.column_type() { - ColumnType::Postgres(v) => { + #[cfg(feature = "tokio-postgres")] ColumnType::Postgres(v) => { match *v { TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => { Self::StringValue(row.get_opt::<&str>(col.name()).map(|opt| opt.to_owned())) @@ -49,7 +50,7 @@ impl ColumnMetadataTypeValue { _ => Self::NoneValue, // TODO watchout this one } } - ColumnType::SqlServer(v) => match v { + #[cfg(feature = "tiberius")] ColumnType::SqlServer(v) => match v { TIB_TY::NChar | TIB_TY::NVarchar | TIB_TY::BigChar | TIB_TY::BigVarChar => { Self::StringValue(row.get_opt::<&str>(col.name()).map(|opt| opt.to_owned())) } diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index aac81d3b..d5a1311a 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -219,10 +219,9 @@ impl CanyonMemory { /// Generates, if not exists the `canyon_memory` table #[cfg(not(cargo_check))] async fn create_memory(datasource_name: &str, database_type: &DatabaseType) { - let query = if database_type == &DatabaseType::PostgreSql { - constants::postgresql_queries::CANYON_MEMORY_TABLE - } else { - constants::mssql_queries::CANYON_MEMORY_TABLE + let query = match database_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE }; Self::query(query, [], datasource_name) From 41843a4b87f05855d7a9077bac60bff89f5cf15f Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Mon, 17 Apr 2023 14:33:00 +0200 Subject: [PATCH 07/23] #wip - Addressing the issues of the CanyonMemory module to the new source code structure --- Cargo.toml | 4 +- canyon_crud/src/bounds.rs | 10 ++++ canyon_crud/src/rows.rs | 29 +++++++----- canyon_observer/src/migrations/handler.rs | 4 +- canyon_observer/src/migrations/memory.rs | 58 +++++++++++++++-------- 5 files changed, 71 insertions(+), 34 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 01e1eafe..919cdf24 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ license = "MIT" description = "A Rust ORM and QueryBuilder" [features] -default = ["postgres", "canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] +default = ["postgres"] postgres = ["canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] -mssql = ["canyon_connection/tiberius"] +mssql = ["canyon_connection/tiberius", "canyon_observer/tiberius", "canyon_observer/tiberius"] mssql-integrated-auth = ["mssql"] \ No newline at end of file diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 218f337b..93b00ed1 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -91,11 +91,21 @@ pub trait Row { self } } +#[cfg(feature = "tokio-postgres")] impl Row for &tokio_postgres::Row { + fn as_any(&self) -> &dyn Any { + *self + } +} #[cfg(feature = "tiberius")] impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self } } +#[cfg(feature = "tiberius")] impl Row for &tiberius::Row { + fn as_any(&self) -> &dyn Any { + self + } +} /// Generic abstraction for hold a Column type that will be one of the Column /// types present in the dependent crates diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 669407ae..3760b76d 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -15,17 +15,24 @@ pub enum CanyonRows { } impl CanyonRows { - // /// Type constructor, returning the correct variant of Self wrapping the collection of results - // /// by the given database connection - // pub fn new( - // conn: &DatabaseConnection, - // res: Vec - // ) -> Self { - // match conn { - // #[cfg(feature = "tokio-postgres")] DatabaseConnection::Postgres(_) => Self::Postgres(res), - // #[cfg(feature = "tiberius")] DatabaseConnection::SqlServer(_) => Self::Tiberius(res) - // } - // } + #[cfg(feature = "tokio-postgres")] + pub fn get_postgres_rows(self) -> Vec { + match self { + Self::Postgres(v) => v, + _ => panic!("This branch will never ever should be reachable") + } + } + + #[cfg(feature = "tiberius")] + pub fn get_tiberius_rows(self) -> Vec { + match self { + Self::Tiberius(v) => v + .iter() + .flatten() + .collect(), + _ => panic!("This branch will never ever should be reachable") + } + } /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of T pub fn into_results>(self) -> Vec where T: Transaction { diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index aafc6fd7..cb9f27ee 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -107,8 +107,8 @@ impl Migrations { fn map_rows(db_results: Vec, db_type: DatabaseType) -> Vec { let mut schema_info: Vec = Vec::new(); let row_retriever_fn_ptr = match db_type { - DatabaseType::PostgreSql => RowOperations::get_postgres::<&str>, - DatabaseType::SqlServer => RowOperations::get_mssql::<&str>, + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => RowOperations::get_postgres::<&str>, + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => RowOperations::get_mssql::<&str>, }; for res_row in db_results.iter() diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index d5a1311a..735b8d2c 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -1,10 +1,9 @@ use crate::constants; -use canyon_crud::{bounds::RowOperations, crud::Transaction, DatabaseType, DatasourceConfig}; +use canyon_crud::{crud::Transaction, DatabaseType, DatasourceConfig}; use regex::Regex; use std::collections::HashMap; use std::fs; use walkdir::WalkDir; -use canyon_crud::bounds::Row; use super::register_types::CanyonRegisterEntity; @@ -71,21 +70,42 @@ impl CanyonMemory { let res = Self::query("SELECT * FROM canyon_memory", [], &datasource.name) .await .expect("Error querying Canyon Memory"); - let mem_results = res.map(|row| &row as &dyn Row); // Manually maps the results let mut db_rows = Vec::new(); - for row in mem_results { - let db_row = CanyonMemoryRow { - id: row.get::("id"), - filepath: row.get::<&str>("filepath"), - struct_name: row.get::<&str>("struct_name"), - declared_table_name: row.get::<&str>("declared_table_name"), - }; - db_rows.push(db_row); + #[cfg(feature = "tokio-postgres")] { + let mem_results: Vec = res.get_postgres_rows(); + for row in mem_results { + let db_row = CanyonMemoryRow { + id: row.get::<&str, i32>("id"), + filepath: row.get::<&str, String>("filepath"), + struct_name: row.get::<&str, String>("struct_name").to_owned(), + declared_table_name: row.get::<&str, String>("declared_table_name").to_owned(), + }; + db_rows.push(db_row); + } + } + #[cfg(feature = "tiberius")] { + let mem_results: Vec = res.get_tiberius_rows(); + for row in mem_results { + let db_row = CanyonMemoryRow { + id: row.get::("id"), + filepath: row.get::<&str, &str>("filepath"), + struct_name: row.get::<&str, &str>("struct_name"), + declared_table_name: row.get::<&str, &str>("declared_table_name"), + }; + db_rows.push(db_row); + } } - // Parses the source code files looking for the #[canyon_entity] annotated classes + Self::populate_memory(datasource, canyon_entities, db_rows).await + } + + async fn populate_memory( + datasource: &DatasourceConfig, + canyon_entities: &[CanyonRegisterEntity<'_>], + db_rows: Vec + ) -> CanyonMemory { let mut mem = Self { memory: Vec::new(), renamed_entities: HashMap::new(), @@ -107,7 +127,7 @@ impl CanyonMemory { && old.struct_name == _struct.struct_name && old.declared_table_name == _struct.declared_table_name) { - updates.push(old.struct_name); + updates.push(&old.struct_name); let stmt = format!( "UPDATE canyon_memory SET filepath = '{}', struct_name = '{}', declared_table_name = '{}' \ WHERE id = {}", @@ -138,12 +158,12 @@ impl CanyonMemory { } // Deletes the records from canyon_memory, because they stopped to be tracked by Canyon - for db_row in db_rows.into_iter() { + for db_row in db_rows.iter() { if !mem .memory .iter() .any(|entity| entity.struct_name == db_row.struct_name) - && !updates.contains(&db_row.struct_name) + && !updates.contains(&&(db_row.struct_name)) { save_canyon_memory_query( format!( @@ -250,11 +270,11 @@ fn save_canyon_memory_query(stmt: String, ds_name: &str) { /// Represents a single row from the `canyon_memory` table #[derive(Debug)] -struct CanyonMemoryRow<'a> { +struct CanyonMemoryRow { id: i32, - filepath: &'a str, - struct_name: &'a str, - declared_table_name: &'a str, + filepath: String, + struct_name: String, + declared_table_name: String, } /// Represents the data that will be serialized in the `canyon_memory` table From ad9e1eab6c098608c184834d5f847acf742bdc60 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 09:10:15 +0200 Subject: [PATCH 08/23] #wip - Addressing the issues of the Handler module to the new source code structure --- canyon_crud/src/crud.rs | 2 +- canyon_crud/src/rows.rs | 38 ++++++++++++++++--- canyon_observer/src/migrations/handler.rs | 26 ++++++------- .../src/migrations/information_schema.rs | 4 +- 4 files changed, 47 insertions(+), 23 deletions(-) diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index b3395df5..895019ca 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -236,6 +236,6 @@ mod sqlserver_query_launcher { .into_results() .await?; - Ok(CanyonRows::Tiberius(_results)) + Ok(CanyonRows::Tiberius(_results.iter().flatten().collect())) } } diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 3760b76d..98e00507 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -10,7 +10,7 @@ use crate::mapper::RowMapper; /// will call the query method of Crud. pub enum CanyonRows { #[cfg(feature = "tokio-postgres")] Postgres(Vec), - #[cfg(feature = "tiberius")] Tiberius(Vec>), + #[cfg(feature = "tiberius")] Tiberius(Vec), UnusableTypeMarker(PhantomData) } @@ -26,10 +26,7 @@ impl CanyonRows { #[cfg(feature = "tiberius")] pub fn get_tiberius_rows(self) -> Vec { match self { - Self::Tiberius(v) => v - .iter() - .flatten() - .collect(), + Self::Tiberius(v) => v, _ => panic!("This branch will never ever should be reachable") } } @@ -43,10 +40,39 @@ impl CanyonRows { .collect(), #[cfg(feature = "tiberius")] Self::Tiberius(v) => v .iter() - .flatten() .map(|row| Z::deserialize_sqlserver(&row)) .collect(), _ => panic!("This branch will never ever should be reachable") } } } + +#[cfg(feature = "tokio-postgres")] +impl IntoIterator for CanyonRows { + type Item = tokio_postgres::Row; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + #[cfg(feature = "tokio-postgres")] { + match self { + Self::Postgres(v) => v.into_iter(), + _ => panic!() + } + } + } +} + +#[cfg(feature = "tiberius")] +impl IntoIterator for CanyonRows { + type Item = tiberius::Row; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + #[cfg(feature = "tokio-postgres")] { + match self { + Self::Postgres(v) => v.into_iter(), + _ => panic!() + } + } + } +} diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index cb9f27ee..79152756 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -1,5 +1,6 @@ use canyon_connection::{datasources::Migrations as MigrationsStatus, DATASOURCES}; use partialdebug::placeholder::PartialDebug; +use canyon_crud::rows::CanyonRows; use crate::{ canyon_crud::{ @@ -86,38 +87,35 @@ impl Migrations { async fn fetch_database( datasource_name: &str, db_type: DatabaseType, - ) -> Vec { + ) -> CanyonRows { let query = match db_type { - DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, - DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, }; - Self::query(query, [], datasource_name) - .await - .unwrap_or_else(|_| { - panic!( - "Error querying the schema information for the datasource: {datasource_name}" - ) - }).into_results() + Self::query(query, [], datasource_name).await + .unwrap_or_else(|_| {panic!( + "Error querying the schema information for the datasource: {datasource_name}" + )}) } /// Handler for parse the result of query the information of some database schema, /// and extract the content of the returned rows into custom structures with /// the data well organized for every entity present on that schema - fn map_rows(db_results: Vec, db_type: DatabaseType) -> Vec { + fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { let mut schema_info: Vec = Vec::new(); let row_retriever_fn_ptr = match db_type { #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => RowOperations::get_postgres::<&str>, #[cfg(feature = "tiberius")] DatabaseType::SqlServer => RowOperations::get_mssql::<&str>, }; - for res_row in db_results.iter() + for res_row in db_results.into_iter() .map(|row| &row as &dyn Row) { let unique_table = schema_info .iter_mut() // TODO To be able to remove row from our code, use a match statement to get table name - .find(|table| table.table_name == *row_retriever_fn_ptr("table_name").to_owned()); + .find(|table| table.table_name == row_retriever_fn_ptr(&res_row, "table_name")); match unique_table { Some(table) => { /* If a table entity it's already present on the collection, we add it @@ -129,7 +127,7 @@ impl Migrations { collection yet, we must create a new instance and attach it the founded columns data in this iteration */ let mut new_table = TableMetadata { - table_name: row_retriever_fn_ptr("table_name").to_owned(), + table_name: row_retriever_fn_ptr(&res_row, "table_name").to_string(), columns: Vec::new(), }; Self::get_columns_metadata(res_row, &mut new_table); diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index 527f4084..91bc4db4 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -44,9 +44,9 @@ impl ColumnMetadataTypeValue { #[cfg(feature = "tokio-postgres")] ColumnType::Postgres(v) => { match *v { TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => { - Self::StringValue(row.get_opt::<&str>(col.name()).map(|opt| opt.to_owned())) + Self::StringValue(row.get_postgres_opt::<&str>(col.name()).map(|opt| opt.to_owned())) } - TP_TYP::INT4 => Self::IntValue(row.get_opt::(col.name())), + TP_TYP::INT4 => Self::IntValue(row.get_postgres_opt::(col.name())), _ => Self::NoneValue, // TODO watchout this one } } From 6d9c66381ba2307b25e78989a41affbbd6b621a3 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 12:42:26 +0200 Subject: [PATCH 09/23] #wip - Reaching the goal of the conditional compilation by database client --- .../src/canyon_database_connector.rs | 1 + canyon_crud/src/bounds.rs | 51 +-- canyon_crud/src/rows.rs | 16 +- canyon_observer/src/constants.rs | 5 +- canyon_observer/src/migrations/handler.rs | 36 +- .../src/migrations/information_schema.rs | 4 +- canyon_observer/src/migrations/processor.rs | 312 +++++++++--------- .../src/migrations/register_types.rs | 58 +--- src/lib.rs | 13 +- 9 files changed, 242 insertions(+), 254 deletions(-) diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 5e324cde..c58451fb 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -135,6 +135,7 @@ impl DatabaseConnection { } #[cfg(feature = "tokio-postgres")] + #[allow(unreachable_patterns)] pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { match self { DatabaseConnection::Postgres(conn) => Some(conn), diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 93b00ed1..ce0b498b 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -215,31 +215,36 @@ impl RowOperations for &dyn Row { fn columns(&self) -> Vec { let mut cols = vec![]; - /* if self.as_any().is::() { - self.as_any() - .downcast_ref::() - .expect("Not a tokio postgres Row for column") - .columns() - .iter() - .for_each(|c| { - cols.push(Column { - name: c.name(), - type_: ColumnType::Postgres(c.type_().to_owned()), + #[cfg(feature = "tokio-postgres")] { + if self.as_any().is::() { + self.as_any() + .downcast_ref::() + .expect("Not a tokio postgres Row for column") + .columns() + .iter() + .for_each(|c| { + cols.push(Column { + name: c.name(), + type_: ColumnType::Postgres(c.type_().to_owned()), + }) }) - }) - } else { - self.as_any() - .downcast_ref::() - .expect("Not a Tiberius Row for column") - .columns() - .iter() - .for_each(|c| { - cols.push(Column { - name: c.name(), - type_: ColumnType::SqlServer(c.column_type()), + } + } + #[cfg(feature = "tiberius")] { + if self.as_any().is::() { + self.as_any() + .downcast_ref::() + .expect("Not a Tiberius Row for column") + .columns() + .iter() + .for_each(|c| { + cols.push(Column { + name: c.name(), + type_: ColumnType::SqlServer(c.column_type()), + }) }) - }) - }; */ + }; + } cols } diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 98e00507..efddfcb8 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -53,11 +53,9 @@ impl IntoIterator for CanyonRows { type IntoIter = std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { - #[cfg(feature = "tokio-postgres")] { - match self { - Self::Postgres(v) => v.into_iter(), - _ => panic!() - } + match self { + Self::Postgres(v) => v.into_iter(), + _ => panic!() } } } @@ -68,11 +66,9 @@ impl IntoIterator for CanyonRows { type IntoIter = std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { - #[cfg(feature = "tokio-postgres")] { - match self { - Self::Postgres(v) => v.into_iter(), - _ => panic!() - } + match self { + Self::Tiberius(v) => v.into_iter(), + _ => panic!() } } } diff --git a/canyon_observer/src/constants.rs b/canyon_observer/src/constants.rs index c9db74e8..ae746e6e 100644 --- a/canyon_observer/src/constants.rs +++ b/canyon_observer/src/constants.rs @@ -1,5 +1,6 @@ pub const NUMERIC_PK_DATATYPE: [&str; 6] = ["i16", "u16", "i32", "u32", "i64", "u64"]; +#[cfg(feature = "tokio-postgres")] pub mod postgresql_queries { pub static CANYON_MEMORY_TABLE: &str = "CREATE TABLE IF NOT EXISTS canyon_memory ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, @@ -35,6 +36,7 @@ pub mod postgresql_queries { table_schema = 'public';"; } +#[cfg(feature = "tiberius")] pub mod mssql_queries { pub static CANYON_MEMORY_TABLE: &str = "IF OBJECT_ID(N'[dbo].[canyon_memory]', N'U') IS NULL BEGIN @@ -142,7 +144,7 @@ pub mod rust_type { pub const OPT_NAIVE_DATE_TIME: &str = "Option"; } -/// TODO +#[cfg(feature = "tokio-postgres")] pub mod postgresql_type { pub const INT_8: &str = "int8"; pub const SMALL_INT: &str = "smallint"; @@ -155,6 +157,7 @@ pub mod postgresql_type { pub const DATETIME: &str = "timestamp without time zone"; } +#[cfg(feature = "tiberius")] pub mod sqlserver_type { pub const TINY_INT: &str = "TINY INT"; pub const SMALL_INT: &str = "SMALL INT"; diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index 79152756..884b86f4 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -104,33 +104,32 @@ impl Migrations { /// the data well organized for every entity present on that schema fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { let mut schema_info: Vec = Vec::new(); - let row_retriever_fn_ptr = match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => RowOperations::get_postgres::<&str>, - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => RowOperations::get_mssql::<&str>, - }; for res_row in db_results.into_iter() - .map(|row| &row as &dyn Row) + // .map(|row| &row as &dyn Row) { let unique_table = schema_info .iter_mut() // TODO To be able to remove row from our code, use a match statement to get table name - .find(|table| table.table_name == row_retriever_fn_ptr(&res_row, "table_name")); + .find(|table| check_for_table_name(table, &res_row as &dyn Row)); match unique_table { Some(table) => { /* If a table entity it's already present on the collection, we add it the founded columns related to the table */ - Self::get_columns_metadata(res_row, table); + Self::get_columns_metadata(&res_row as &dyn Row, table); } None => { /* If there's no table for a given "table_name" property on the collection yet, we must create a new instance and attach it the founded columns data in this iteration */ let mut new_table = TableMetadata { - table_name: row_retriever_fn_ptr(&res_row, "table_name").to_string(), + table_name: match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => get_table_name_from_tp_row(&res_row), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => get_table_name_from_tib_row(&res_row), + }, columns: Vec::new(), }; - Self::get_columns_metadata(res_row, &mut new_table); + Self::get_columns_metadata(&res_row as &dyn Row, &mut new_table); schema_info.push(new_table); } }; @@ -223,3 +222,22 @@ impl Migrations { }; } } + + +#[cfg(feature = "tokio-postgres")] +fn get_table_name_from_tp_row(res_row: &tokio_postgres::Row) -> String { + res_row.get::<&str, String>("table_name") +} +#[cfg(feature = "tiberius")] +fn get_table_name_from_tib_row(res_row: &tiberius::Row) -> String { + res_row.get::<&str, &str>("table_name").unwrap_or_default().to_string() +} + +fn check_for_table_name(table: &&mut TableMetadata, res_row: &dyn Row) -> bool { + #[cfg(feature = "tokio-postgres")] { + table.table_name == res_row.get_postgres::<&str>("table_name") + } + #[cfg(feature = "tiberius")] { + table.table_name == row_retriever_fn_ptr(&res_row, "table_name") + } +} diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index 91bc4db4..d93c7007 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -52,10 +52,10 @@ impl ColumnMetadataTypeValue { } #[cfg(feature = "tiberius")] ColumnType::SqlServer(v) => match v { TIB_TY::NChar | TIB_TY::NVarchar | TIB_TY::BigChar | TIB_TY::BigVarChar => { - Self::StringValue(row.get_opt::<&str>(col.name()).map(|opt| opt.to_owned())) + Self::StringValue(row.get_mssql_opt::<&str>(col.name()).map(|opt| opt.to_owned())) } TIB_TY::Int2 | TIB_TY::Int4 | TIB_TY::Int8 | TIB_TY::Intn => { - Self::IntValue(row.get_opt::(col.name())) + Self::IntValue(row.get_mssql_opt::(col.name())) } _ => Self::NoneValue, }, diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index c3995bbf..ff89bdc9 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -169,7 +169,7 @@ impl MigrationsProcessor { entity_name: &'a str, entity_fields: Vec, current_table_metadata: Option<&'a TableMetadata>, - db_type: DatabaseType, + _db_type: DatabaseType, ) { if current_table_metadata.is_none() { return; @@ -188,12 +188,15 @@ impl MigrationsProcessor { .collect(); for column_metadata in columns_name_to_delete { - if db_type == DatabaseType::SqlServer && !column_metadata.is_nullable { - self.drop_column_not_null( - entity_name, - column_metadata.column_name.clone(), - MigrationsHelper::get_datatype_from_column_metadata(column_metadata), - ) + #[cfg(feature = "tiberius")] + { + if _db_type == DatabaseType::SqlServer && !column_metadata.is_nullable { + self.drop_column_not_null( + entity_name, + column_metadata.column_name.clone(), + MigrationsHelper::get_datatype_from_column_metadata(column_metadata), + ) + } } self.delete_column(entity_name, column_metadata.column_name.clone()); } @@ -243,7 +246,7 @@ impl MigrationsProcessor { ))); } - fn drop_column_not_null( + #[cfg(feature = "tiberius")] fn drop_column_not_null( &mut self, table_name: &str, column_name: String, @@ -619,6 +622,7 @@ impl MigrationsHelper { } } + #[cfg(feature = "tiberius")] fn get_datatype_from_column_metadata(current_column_metadata: &ColumnMetadata) -> String { // TODO Add all SQL Server text datatypes if vec!["nvarchar", "varchar"] @@ -640,20 +644,25 @@ impl MigrationsHelper { canyon_register_entity_field: &CanyonRegisterEntityField, current_column_metadata: &ColumnMetadata, ) -> bool { - if db_type == DatabaseType::PostgreSql { - canyon_register_entity_field - .to_postgres_alter_syntax() - .to_lowercase() - == current_column_metadata.datatype - } else if db_type == DatabaseType::SqlServer { - // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") - canyon_register_entity_field - .to_sqlserver_alter_syntax() - .to_lowercase() - == current_column_metadata.datatype - } else { - todo!() + #[cfg(feature = "tokio-postgres")] { + if db_type == DatabaseType::PostgreSql { + return canyon_register_entity_field + .to_postgres_alter_syntax() + .to_lowercase() + == current_column_metadata.datatype; + } + } + #[cfg(feature = "tiberius")] { + if db_type == DatabaseType::SqlServer { + // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") + return canyon_register_entity_field + .to_sqlserver_alter_syntax() + .to_lowercase() + == current_column_metadata.datatype; + } } + + return false; } fn extract_foreign_key_annotation(field_annotations: &[String]) -> (String, String) { @@ -752,60 +761,60 @@ impl DatabaseOperation for TableOperation { let stmt = match self { TableOperation::CreateTable(table_name, table_fields) => { - if db_type == DatabaseType::PostgreSql { - format!( - "CREATE TABLE \"{table_name}\" ({});", - table_fields - .iter() - .map(|entity_field| format!( - "\"{}\" {}", - entity_field.field_name, - entity_field.to_postgres_syntax() - )) - .collect::>() - .join(", ") - ) - } else if db_type == DatabaseType::SqlServer { - format!( - "CREATE TABLE {:?} ({:?});", - table_name, - table_fields - .iter() - .map(|entity_field| format!( - "{} {}", - entity_field.field_name, - entity_field.to_sqlserver_syntax() - )) - .collect::>() - .join(", ") - ) - .replace('"', "") - } else { - todo!("There's no other databases supported in Canyon-SQL right now") + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => { + format!( + "CREATE TABLE \"{table_name}\" ({});", + table_fields + .iter() + .map(|entity_field| format!( + "\"{}\" {}", + entity_field.field_name, + entity_field.to_postgres_syntax() + )) + .collect::>() + .join(", ") + ) + } + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => { + format!( + "CREATE TABLE {:?} ({:?});", + table_name, + table_fields + .iter() + .map(|entity_field| format!( + "{} {}", + entity_field.field_name, + entity_field.to_sqlserver_syntax() + )) + .collect::>() + .join(", ") + ) + .replace('"', "") + } } } TableOperation::AlterTableName(old_table_name, new_table_name) => { - if db_type == DatabaseType::PostgreSql { - format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};") - } else if db_type == DatabaseType::SqlServer { - /* - Notes: Brackets around `old_table_name`, p.e. - exec sp_rename ['league'], 'leagues' // NOT VALID! - is only allowed for compound names split by a dot. - exec sp_rename ['random.league'], 'leagues' // OK - - CARE! This doesn't mean that we are including the schema. - exec sp_rename ['dbo.random.league'], 'leagues' // OK - exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets - - Due to the automatic mapped name from Rust to DB and vice-versa, this won't - be an allowed behaviour for now, only with the table_name parameter on the - CanyonEntity annotation. - */ - format!("exec sp_rename '{old_table_name}', '{new_table_name}';") - } else { - todo!() + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};"), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + /* + Notes: Brackets around `old_table_name`, p.e. + exec sp_rename ['league'], 'leagues' // NOT VALID! + is only allowed for compound names split by a dot. + exec sp_rename ['random.league'], 'leagues' // OK + + CARE! This doesn't mean that we are including the schema. + exec sp_rename ['dbo.random.league'], 'leagues' // OK + exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets + + Due to the automatic mapped name from Rust to DB and vice-versa, this won't + be an allowed behaviour for now, only with the table_name parameter on the + CanyonEntity annotation. + */ + format!("exec sp_rename '{old_table_name}', '{new_table_name}';") } } @@ -816,48 +825,46 @@ impl DatabaseOperation for TableOperation { table_to_reference, column_to_reference, ) => { - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE {table_name} ADD CONSTRAINT {foreign_key_name} \ - FOREIGN KEY ({column_foreign_key}) REFERENCES {table_to_reference} ({column_to_reference});" - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE {table_name} ADD CONSTRAINT {foreign_key_name} \ + FOREIGN KEY ({column_foreign_key}) REFERENCES {table_to_reference} ({column_to_reference});" + ), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } TableOperation::DeleteTableForeignKey(table_with_foreign_key, constraint_name) => { - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE {table_with_foreign_key} DROP CONSTRAINT {constraint_name};", - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE {table_with_foreign_key} DROP CONSTRAINT {constraint_name};", + ), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } TableOperation::AddTablePrimaryKey(table_name, entity_field) => { - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE \"{table_name}\" ADD PRIMARY KEY (\"{}\");", - entity_field.field_name - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE \"{table_name}\" ADD PRIMARY KEY (\"{}\");", + entity_field.field_name + ), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } TableOperation::DeleteTablePrimaryKey(table_name, primary_key_name) => { - if db_type == DatabaseType::PostgreSql || db_type == DatabaseType::SqlServer { - format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") - } else { - todo!() + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;"), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") } } }; @@ -876,11 +883,11 @@ enum ColumnOperation { AlterColumnType(String, CanyonRegisterEntityField), AlterColumnDropNotNull(String, CanyonRegisterEntityField), // SQL server specific operation - SQL server can't drop a NOT NULL column - DropNotNullBeforeDropColumn(String, String, String), - AlterColumnSetNotNull(String, CanyonRegisterEntityField), + #[cfg(feature = "tiberius")] DropNotNullBeforeDropColumn(String, String, String), + #[cfg(feature = "tokio-postgres")] AlterColumnSetNotNull(String, CanyonRegisterEntityField), // TODO if implement through annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} - AlterColumnAddIdentity(String, CanyonRegisterEntityField), - AlterColumnDropIdentity(String, CanyonRegisterEntityField), + #[cfg(feature = "tokio-postgres")] AlterColumnAddIdentity(String, CanyonRegisterEntityField), + #[cfg(feature = "tokio-postgres")] AlterColumnDropIdentity(String, CanyonRegisterEntityField), } impl Transaction for ColumnOperation {} @@ -892,51 +899,47 @@ impl DatabaseOperation for ColumnOperation { let stmt = match self { ColumnOperation::CreateColumn(table_name, entity_field) => - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE \"{}\" ADD COLUMN \"{}\" {};", - table_name, - entity_field.field_name, - entity_field.to_postgres_syntax()) - } else if db_type == DatabaseType::SqlServer { - format!( - "ALTER TABLE {} ADD \"{}\" {};", - table_name, - entity_field.field_name, - entity_field.to_sqlserver_syntax() - ) - } else { - todo!() - }, + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE \"{}\" ADD COLUMN \"{}\" {};", + table_name, + entity_field.field_name, + entity_field.to_postgres_syntax() + ), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + format!( + "ALTER TABLE {} ADD \"{}\" {};", + table_name, + entity_field.field_name, + entity_field.to_sqlserver_syntax() + ) + } ColumnOperation::DeleteColumn(table_name, column_name) => { // TODO Check if operation for SQL server is different format!("ALTER TABLE \"{table_name}\" DROP COLUMN \"{column_name}\";") }, ColumnOperation::AlterColumnType(table_name, entity_field) => - if db_type == DatabaseType::PostgreSql { - format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" TYPE {};", - entity_field.field_name, entity_field.to_postgres_alter_syntax() - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() - } - , + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" TYPE {};", + entity_field.field_name, entity_field.to_postgres_alter_syntax() + ), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") + } ColumnOperation::AlterColumnDropNotNull(table_name, entity_field) => - if db_type == DatabaseType::PostgreSql { - format!("ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP NOT NULL;", entity_field.field_name) - } else if db_type == DatabaseType::SqlServer { - format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", - entity_field.field_name, entity_field.to_sqlserver_alter_syntax() - ) - } else { - todo!() - } - - ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!("ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP NOT NULL;", entity_field.field_name), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", + entity_field.field_name, entity_field.to_sqlserver_alter_syntax() + ) + } + #[cfg(feature = "tiberius")] ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => format!( "ALTER TABLE {table_name} ALTER COLUMN {column_name} {column_datatype} NULL; DECLARE @tableName VARCHAR(MAX) = '{table_name}' DECLARE @columnName VARCHAR(MAX) = '{column_name}' @@ -955,11 +958,11 @@ impl DatabaseOperation for ColumnOperation { "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" SET NOT NULL;", entity_field.field_name ), - ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( + #[cfg(feature = "tokio-postgres")] ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" ADD GENERATED ALWAYS AS IDENTITY;", entity_field.field_name ), - ColumnOperation::AlterColumnDropIdentity(table_name, entity_field) => format!( + #[cfg(feature = "tokio-postgres")] ColumnOperation::AlterColumnDropIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP IDENTITY;", entity_field.field_name ), }; @@ -984,15 +987,14 @@ impl DatabaseOperation for SequenceOperation { let stmt = match self { SequenceOperation::ModifySequence(table_name, entity_field) => { - if db_type == DatabaseType::PostgreSql { - format!( - "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", - entity_field.field_name, entity_field.field_name - ) - } else if db_type == DatabaseType::SqlServer { - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } else { - todo!() + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + format!( + "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", + entity_field.field_name, entity_field.field_name + ), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } }; diff --git a/canyon_observer/src/migrations/register_types.rs b/canyon_observer/src/migrations/register_types.rs index 470944db..57ef6e39 100644 --- a/canyon_observer/src/migrations/register_types.rs +++ b/canyon_observer/src/migrations/register_types.rs @@ -1,8 +1,8 @@ use regex::Regex; -use crate::constants::{ - postgresql_type, regex_patterns, rust_type, sqlserver_type, NUMERIC_PK_DATATYPE, -}; +use crate::constants::{regex_patterns, rust_type, NUMERIC_PK_DATATYPE}; +#[cfg(feature = "tokio-postgres")] use crate::constants::postgresql_type; +#[cfg(feature = "tiberius")] use crate::constants::sqlserver_type; /// This file contains `Rust` types that represents an entry on the `CanyonRegister` /// where `Canyon` tracks the user types that has to manage @@ -28,7 +28,7 @@ pub struct CanyonRegisterEntityField { impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type - pub fn to_postgres_syntax(&self) -> String { + #[cfg(feature = "tokio-postgres")] pub fn to_postgres_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); match rust_type_clean.as_str() { @@ -74,7 +74,7 @@ impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type /// for Microsoft SQL Server - pub fn to_sqlserver_syntax(&self) -> String { + #[cfg(feature = "tiberius")] pub fn to_sqlserver_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); match rust_type_clean.as_str() { @@ -120,7 +120,7 @@ impl CanyonRegisterEntityField { } } - pub fn to_postgres_alter_syntax(&self) -> String { + #[cfg(feature = "tokio-postgres")] pub fn to_postgres_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); @@ -162,7 +162,7 @@ impl CanyonRegisterEntityField { } } - pub fn to_sqlserver_alter_syntax(&self) -> String { + #[cfg(feature = "tiberius")] pub fn to_sqlserver_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); @@ -200,50 +200,6 @@ impl CanyonRegisterEntityField { } } - /// Return the datatype and parameters to create an id column, given the corresponding "CanyonRegisterEntityField" - /// with the correct format for PostgreSQL - fn _to_postgres_id_syntax(&self) -> String { - let has_pk_annotation = self - .annotations - .iter() - .find(|a| a.starts_with("Annotation: PrimaryKey")); - - let pk_is_autoincremental = match has_pk_annotation { - Some(annotation) => annotation.contains("true"), - None => false, - }; - - let postgres_datatype_syntax = Self::to_postgres_syntax(self); - - if NUMERIC_PK_DATATYPE.contains(&self.field_type.as_str()) && pk_is_autoincremental { - format!("{postgres_datatype_syntax} PRIMARY KEY GENERATED ALWAYS AS IDENTITY") - } else { - format!("{postgres_datatype_syntax} PRIMARY KEY") - } - } - - /// Return the datatype and parameters to create an id column, given the corresponding "CanyonRegisterEntityField" - /// with the correct format for Microsoft SQL Server - fn _to_sqlserver_id_syntax(&self) -> String { - let has_pk_annotation = self - .annotations - .iter() - .find(|a| a.starts_with("Annotation: PrimaryKey")); - - let pk_is_autoincremental = match has_pk_annotation { - Some(annotation) => annotation.contains("true"), - None => false, - }; - - let sqlserver_datatype_syntax = Self::to_sqlserver_syntax(self); - - if NUMERIC_PK_DATATYPE.contains(&self.field_type.as_str()) && pk_is_autoincremental { - format!("{sqlserver_datatype_syntax} IDENTITY PRIMARY") - } else { - format!("{sqlserver_datatype_syntax} PRIMARY KEY") - } - } - /// Return if the field is autoincremental pub fn is_autoincremental(&self) -> bool { let has_pk_annotation = self diff --git a/src/lib.rs b/src/lib.rs index d3bf079c..cb8be374 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,6 +4,13 @@ /// reaches the top most level, grouping them and making them visible /// through this crate, building the *public API* of the library +extern crate canyon_connection; +extern crate canyon_crud; +extern crate canyon_observer; +extern crate canyon_macros; + +// extern crate async_trait; + /// Reexported elements to the root of the public API pub mod migrations { pub use canyon_observer::migrations::{handler, processor}; @@ -15,7 +22,7 @@ pub use canyon_macros::main; /// Public API for the `Canyon-SQL` proc-macros, and for the external ones pub mod macros { - pub use async_trait::*; + // pub use async_trait::*; pub use canyon_macros::*; } @@ -36,8 +43,8 @@ pub mod query { /// Reexport the available database clients within Canyon pub mod db_clients { - pub use canyon_connection::tiberius; - pub use canyon_connection::tokio_postgres; + #[cfg(feature = "postgres")] pub use canyon_connection::tokio_postgres; + #[cfg(feature = "mssql")] pub use canyon_connection::tiberius; } /// Reexport the needed runtime dependencies From c0b0dd7f66e2eef63d50a18e789ba51f53c53261 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 14:09:44 +0200 Subject: [PATCH 10/23] First compilable version since the rework for the conditional compilation --- canyon_macros/src/query_operations/insert.rs | 51 ++++++++++---------- canyon_macros/src/query_operations/select.rs | 37 +++++--------- 2 files changed, 38 insertions(+), 50 deletions(-) diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 45ce1187..d1b4e7a4 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -34,7 +34,6 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri ._fields_with_types() .into_iter() .find(|(i, _t)| Some(i.to_string()) == primary_key); - let insert_transaction = if let Some(pk_data) = &pk_ident_type { let pk_ident = &pk_data.0; let pk_type = &pk_data.1; @@ -54,22 +53,25 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri stmt, values, datasource_name - ).await; + ).await?; match rows { - #[cfg(feature = "tokio-postgres")] Self::Postgres(v) => { - v.remove(0) - .expect("No value found on the returning clause for Postgres") - .get::<&str, #pk_type>(#primary_key) - } - #[cfg(feature = "tiberius")] Self::Tiberius(v) => { - v.into_iter() - .flatten() - .collect::>() - .remove(0) + #[cfg(feature = "tokio-postgres")] Self::Postgres(mut v) => { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<&str, #pk_type>(#primary_key); + Ok(()) + }, + #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") .get::<#pk_type, &str>(#primary_key) - .expect("SQL Server primary key type failed to be set as value") - } + .expect("SQL Server primary key type failed to be set as value"); + Ok(()) + }, + _ => panic!() // TODO remove when the generics will be refactored } } } else { @@ -92,6 +94,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri } }; + quote! { /// Inserts into a database entity the current data in `self`, generating a new /// entry (row), returning the `PRIMARY KEY` = `self.` with the specified @@ -287,15 +290,12 @@ pub fn generate_multiple_insert_tokens( datasource_name ).await; - match result { // TODO Falta el ds correcto - // TODO Recuperar datasource fuera del código cliente - /* .for_each(|row| results.push(row as &dyn Row)); */ + match result { Ok(res) => { - match res.get_active_ds() { - canyon_sql::crud::DatabaseType::PostgreSql => { + match res { + #[cfg(feature = "tokio-postgres")] Self::Postgres(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { - instance.#pk_ident = res - .postgres + instance.#pk_ident = v .get(idx) .expect("Failed getting the returned IDs for a multi insert") .get::<&str, #pk_type>(#pk); @@ -303,18 +303,17 @@ pub fn generate_multiple_insert_tokens( Ok(()) }, - canyon_sql::crud::DatabaseType::SqlServer => { + #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => for (idx, instance) in instances.iter_mut().enumerate() { - instance.#pk_ident = res - .sqlserver + instance.#pk_ident = v .get(idx) .expect("Failed getting the returned IDs for a multi insert") .get::<#pk_type, &str>(#pk) .expect("SQL Server primary key type failed to be set as value"); } - Ok(()) - } + Ok(()), + _ => panic!() // TODO remove when the generics will be refactored } }, Err(e) => Err(e) diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index c782e8c2..c5875f03 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -147,39 +147,28 @@ pub fn generate_count_tokens( let stmt = format!("SELECT COUNT (*) FROM {table_schema_data}"); let result_handling = quote! { - // match count.get_active_ds() { - // canyon_sql_root::crud::DatabaseType::PostgreSql => { - // Ok( - // count.postgres.get(0) - // .expect(&format!("Count operation failed for {:?}", #ty_str)) - // .get::<&str, i64>("count") - // .to_owned() - // ) - // }, - // canyon_sql_root::crud::DatabaseType::SqlServer => { - // Ok( - // count.sqlserver.get(0) - // .expect(&format!("Count operation failed for {:?}", #ty_str)) - // .get::(0) - // .expect(&format!("SQL Server failed to return the count values for {:?}", #ty_str)) - // .into() - // ) - // } - // } - Ok(0 as i64) // TODO + match count { + #[cfg(feature = "tokio-postgres")] Self::Postgres(mut v) => Ok( + v.remove(0).get::<&str, i64>("count") + ), + #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => + v.remove(0) + .get::("count") + .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()), + _ => panic!() // TODO remove when the generics will be refactored + } }; quote! { /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, /// wrapping a possible success or error coming from the database async fn count() -> Result> { - let count = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + <#ty as canyon_sql::crud::Transaction<#ty>>::query( #stmt, &[], "" - ).await?; - - #result_handling + ).await + .get_by_idx_and_key(0, "count") } /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, From 2a7d64f4202c1a0edc41f5a0373cdf329aa61868 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 15:59:59 +0200 Subject: [PATCH 11/23] Setting up the missing parts for msssql databases --- Cargo.toml | 2 +- .../src/canyon_database_connector.rs | 8 +- canyon_crud/src/bounds.rs | 37 +++--- canyon_crud/src/crud.rs | 6 +- canyon_crud/src/rows.rs | 68 ++++++----- canyon_macros/src/query_operations/insert.rs | 3 +- canyon_observer/src/migrations/handler.rs | 108 ++++++++++++------ canyon_observer/src/migrations/memory.rs | 12 +- 8 files changed, 141 insertions(+), 103 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 919cdf24..afe4e1c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,5 +57,5 @@ description = "A Rust ORM and QueryBuilder" [features] default = ["postgres"] postgres = ["canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] -mssql = ["canyon_connection/tiberius", "canyon_observer/tiberius", "canyon_observer/tiberius"] +mssql = ["canyon_connection/tiberius", "canyon_crud/tiberius", "canyon_observer/tiberius"] mssql-integrated-auth = ["mssql"] \ No newline at end of file diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index c58451fb..7c448f0e 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -1,8 +1,8 @@ -#[cfg(feature = "tiberius")] use async_std::net::TcpStream; - use serde::Deserialize; -#[cfg(feature = "tiberius")] use tiberius::{AuthMethod, Config}; + #[cfg(feature = "tokio-postgres")] use tokio_postgres::{Client, NoTls}; +#[cfg(feature = "tiberius")] use tiberius::{AuthMethod, Config}; +#[cfg(feature = "tiberius")] use async_std::net::TcpStream; use crate::datasources::DatasourceConfig; @@ -158,7 +158,7 @@ mod database_connection_handler { use crate::CanyonSqlConfig; const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql_root] + [canyon_sql] datasources = [ {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index ce0b498b..78d679b3 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -7,7 +7,7 @@ use crate::{ use canyon_connection::tokio_postgres::{self, types::ToSql}; #[cfg(feature = "tiberius")] -use canyon_connection::tiberius::{self, ColumnData, FromSql, IntoSql}; +use canyon_connection::tiberius::{self, ColumnData, IntoSql}; use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; use std::any::Any; @@ -91,21 +91,13 @@ pub trait Row { self } } -#[cfg(feature = "tokio-postgres")] impl Row for &tokio_postgres::Row { - fn as_any(&self) -> &dyn Any { - *self - } -} + #[cfg(feature = "tiberius")] impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "tiberius")] impl Row for &tiberius::Row { - fn as_any(&self) -> &dyn Any { - self - } -} + /// Generic abstraction for hold a Column type that will be one of the Column /// types present in the dependent crates @@ -144,7 +136,6 @@ pub trait Type { } /// Wrapper over the dependencies Column's types -// #[derive(Copy)] pub enum ColumnType { #[cfg(feature = "tokio-postgres")] Postgres(tokio_postgres::types::Type), #[cfg(feature = "tiberius")] SqlServer(tiberius::ColumnType), @@ -152,25 +143,25 @@ pub enum ColumnType { pub trait RowOperations { #[cfg(feature = "tokio-postgres")] - fn get_postgres<'a, Output>(&'a self, col_name: &str) -> Output + fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tokio_postgres::types::FromSql<'a>; #[cfg(feature = "tiberius")] - fn get_mssql<'a, Output>(&self, col_name: &str) -> Output + fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tiberius::FromSql<'a>; #[cfg(feature = "tokio-postgres")] - fn get_postgres_opt<'a, Output>(&'a self, col_name: &str) -> Option + fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tokio_postgres::types::FromSql<'a>; #[cfg(feature = "tiberius")] - fn get_mssql_opt<'a, Output>(&'a self, col_name: &str) -> Option - where Output: tokio_postgres::types::FromSql<'a>; + fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where Output: tiberius::FromSql<'a>; fn columns(&self) -> Vec; } impl RowOperations for &dyn Row { #[cfg(feature = "tokio-postgres")] - fn get_postgres<'a, Output>(&'a self, col_name: &str) -> Output + fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tokio_postgres::types::FromSql<'a> { if let Some(row) = self.as_any().downcast_ref::() { @@ -179,7 +170,7 @@ impl RowOperations for &dyn Row { panic!() // TODO into result and propagate } #[cfg(feature = "tiberius")] - fn get_mssql<'a, Output>(&'a self, col_name: &str) -> Output + fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tiberius::FromSql<'a> { if let Some(row) = self.as_any().downcast_ref::() { @@ -191,7 +182,7 @@ impl RowOperations for &dyn Row { } #[cfg(feature = "tokio-postgres")] - fn get_postgres_opt<'a, Output>(&'a self, col_name: &str) -> Option + fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tokio_postgres::types::FromSql<'a> { if let Some(row) = self.as_any().downcast_ref::() { @@ -201,13 +192,11 @@ impl RowOperations for &dyn Row { } #[cfg(feature = "tiberius")] - fn get_mssql_opt<'a, Output>(&'a self, col_name: &str) -> Option + fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tiberius::FromSql<'a> { if let Some(row) = self.as_any().downcast_ref::() { - return row - .try_get - .expect("Failed to obtain a row for MSSQL"); + return row.get::(col_name); }; panic!() // TODO into result and propagate } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 895019ca..c025dd97 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -50,7 +50,7 @@ pub trait Transaction { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; let database_conn = get_database_connection(datasource_name, &mut guarded_cache); - match *database_conn { + match database_conn { #[cfg(feature = "tokio-postgres")] DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( database_conn, @@ -58,7 +58,7 @@ pub trait Transaction { params.as_ref(), ) .await - } + }, #[cfg(feature = "tiberius")] DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( database_conn, @@ -236,6 +236,6 @@ mod sqlserver_query_launcher { .into_results() .await?; - Ok(CanyonRows::Tiberius(_results.iter().flatten().collect())) + Ok(CanyonRows::Tiberius(_results.into_iter().flatten().collect())) } } diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index efddfcb8..5dfbf5bf 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -16,7 +16,7 @@ pub enum CanyonRows { impl CanyonRows { #[cfg(feature = "tokio-postgres")] - pub fn get_postgres_rows(self) -> Vec { + pub fn get_postgres_rows(&self) -> &Vec { match self { Self::Postgres(v) => v, _ => panic!("This branch will never ever should be reachable") @@ -24,7 +24,7 @@ impl CanyonRows { } #[cfg(feature = "tiberius")] - pub fn get_tiberius_rows(self) -> Vec { + pub fn get_tiberius_rows(&self) -> &Vec { match self { Self::Tiberius(v) => v, _ => panic!("This branch will never ever should be reachable") @@ -47,28 +47,44 @@ impl CanyonRows { } } -#[cfg(feature = "tokio-postgres")] -impl IntoIterator for CanyonRows { - type Item = tokio_postgres::Row; - type IntoIter = std::vec::IntoIter; +// #[cfg(feature = "tokio-postgres")] +// impl IntoIterator for CanyonRows { +// type Item = tokio_postgres::Row; +// type IntoIter = std::vec::IntoIter; +// +// fn into_iter(self) -> Self::IntoIter { +// match self { +// Self::Postgres(v) => v.into_iter(), +// _ => panic!() +// } +// } +// } +// +// #[cfg(feature = "tiberius")] +// impl IntoIterator for CanyonRows { +// type Item = tiberius::Row; +// type IntoIter = std::vec::IntoIter; +// +// fn into_iter(self) -> Self::IntoIter { +// match self { +// Self::Tiberius(v) => v.into_iter(), +// _ => panic!() +// } +// } +// } +// +// #[cfg(all(feature = "tokio-postgres", feature = "tiberius"))] +// impl IntoIterator for CanyonRows { +// if cfg!(feature = "tokio-postgres") { +// type Item = tokio_postgres::Row; +// } else { type Item = tiberius::Row; } +// type IntoIter = std::vec::IntoIter; +// +// fn into_iter(self) -> Self::IntoIter { +// match self { +// Self::Tiberius(v) => v.into_iter(), +// _ => panic!() +// } +// } +// } - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Postgres(v) => v.into_iter(), - _ => panic!() - } - } -} - -#[cfg(feature = "tiberius")] -impl IntoIterator for CanyonRows { - type Item = tiberius::Row; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - match self { - Self::Tiberius(v) => v.into_iter(), - _ => panic!() - } - } -} diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index d1b4e7a4..64579a55 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -303,7 +303,7 @@ pub fn generate_multiple_insert_tokens( Ok(()) }, - #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => + #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v .get(idx) @@ -313,6 +313,7 @@ pub fn generate_multiple_insert_tokens( } Ok(()), + }, _ => panic!() // TODO remove when the generics will be refactored } }, diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index 884b86f4..407dc76e 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -103,39 +103,11 @@ impl Migrations { /// and extract the content of the returned rows into custom structures with /// the data well organized for every entity present on that schema fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { - let mut schema_info: Vec = Vec::new(); - - for res_row in db_results.into_iter() - // .map(|row| &row as &dyn Row) - { - let unique_table = schema_info - .iter_mut() - // TODO To be able to remove row from our code, use a match statement to get table name - .find(|table| check_for_table_name(table, &res_row as &dyn Row)); - match unique_table { - Some(table) => { - /* If a table entity it's already present on the collection, we add it - the founded columns related to the table */ - Self::get_columns_metadata(&res_row as &dyn Row, table); - } - None => { - /* If there's no table for a given "table_name" property on the - collection yet, we must create a new instance and attach it - the founded columns data in this iteration */ - let mut new_table = TableMetadata { - table_name: match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => get_table_name_from_tp_row(&res_row), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => get_table_name_from_tib_row(&res_row), - }, - columns: Vec::new(), - }; - Self::get_columns_metadata(&res_row as &dyn Row, &mut new_table); - schema_info.push(new_table); - } - }; + match db_results { + #[cfg(feature = "tokio-postgres")] CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), + #[cfg(feature = "tiberius")] CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), + _ => panic!() } - - schema_info } /// Parses all the [`Row`] after query the information of the targeted schema, @@ -221,6 +193,66 @@ impl Migrations { } }; } + + #[cfg(feature = "tokio-postgres")] + fn process_tp_rows(db_results: Vec, db_type: DatabaseType) -> Vec { + let mut schema_info: Vec = Vec::new(); + for res_row in db_results.iter() { + let unique_table = schema_info + .iter_mut() + .find(|table| check_for_table_name(table, db_type, res_row as &dyn Row)); + match unique_table { + Some(table) => { + /* If a table entity it's already present on the collection, we add it + the founded columns related to the table */ + Self::get_columns_metadata(res_row as &dyn Row, table); + } + None => { + /* If there's no table for a given "table_name" property on the + collection yet, we must create a new instance and attach it + the founded columns data in this iteration */ + let mut new_table = TableMetadata { + table_name: get_table_name_from_tp_row(res_row), + columns: Vec::new(), + }; + Self::get_columns_metadata(res_row as &dyn Row, &mut new_table); + schema_info.push(new_table); + } + }; + } + + schema_info + } + + #[cfg(feature = "tiberius")] + fn process_tib_rows(db_results: Vec, db_type: DatabaseType) -> Vec { + let mut schema_info: Vec = Vec::new(); + for res_row in db_results.iter() { + let unique_table = schema_info + .iter_mut() + .find(|table| check_for_table_name(table, db_type, res_row as &dyn Row)); + match unique_table { + Some(table) => { + /* If a table entity it's already present on the collection, we add it + the founded columns related to the table */ + Self::get_columns_metadata(res_row as &dyn Row, table); + } + None => { + /* If there's no table for a given "table_name" property on the + collection yet, we must create a new instance and attach it + the founded columns data in this iteration */ + let mut new_table = TableMetadata { + table_name: get_table_name_from_tib_row(res_row), + columns: Vec::new(), + }; + Self::get_columns_metadata(res_row as &dyn Row, &mut new_table); + schema_info.push(new_table); + } + }; + } + + schema_info + } } @@ -233,11 +265,11 @@ fn get_table_name_from_tib_row(res_row: &tiberius::Row) -> String { res_row.get::<&str, &str>("table_name").unwrap_or_default().to_string() } -fn check_for_table_name(table: &&mut TableMetadata, res_row: &dyn Row) -> bool { - #[cfg(feature = "tokio-postgres")] { - table.table_name == res_row.get_postgres::<&str>("table_name") - } - #[cfg(feature = "tiberius")] { - table.table_name == row_retriever_fn_ptr(&res_row, "table_name") +fn check_for_table_name(table: &&mut TableMetadata, db_type: DatabaseType, res_row: &dyn Row) -> bool { + match db_type { + #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + table.table_name == res_row.get_postgres::<&str>("table_name"), + #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + table.table_name == res_row.get_mssql::<&str>("table_name") } } diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index 735b8d2c..8c8fe8f4 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -74,7 +74,7 @@ impl CanyonMemory { // Manually maps the results let mut db_rows = Vec::new(); #[cfg(feature = "tokio-postgres")] { - let mem_results: Vec = res.get_postgres_rows(); + let mem_results: &Vec = res.get_postgres_rows(); for row in mem_results { let db_row = CanyonMemoryRow { id: row.get::<&str, i32>("id"), @@ -86,13 +86,13 @@ impl CanyonMemory { } } #[cfg(feature = "tiberius")] { - let mem_results: Vec = res.get_tiberius_rows(); + let mem_results: &Vec = res.get_tiberius_rows(); for row in mem_results { let db_row = CanyonMemoryRow { - id: row.get::("id"), - filepath: row.get::<&str, &str>("filepath"), - struct_name: row.get::<&str, &str>("struct_name"), - declared_table_name: row.get::<&str, &str>("declared_table_name"), + id: row.get::("id").unwrap(), + filepath: row.get::<&str, &str>("filepath").unwrap().to_string(), + struct_name: row.get::<&str, &str>("struct_name").unwrap().to_string(), + declared_table_name: row.get::<&str, &str>("declared_table_name").unwrap().to_string(), }; db_rows.push(db_row); } From e09557d4b6074ce7ce0545f076ea2a5ed28cc6e0 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 16:18:15 +0200 Subject: [PATCH 12/23] Adecuating the doc-tests to the conditional compilation --- Cargo.toml | 9 +-------- canyon_connection/Cargo.toml | 2 +- canyon_connection/src/canyon_database_connector.rs | 4 ++-- canyon_connection/src/datasources.rs | 11 +++++------ tests/Cargo.toml | 4 ++-- tests/canyon_integration_tests.rs | 2 ++ 6 files changed, 13 insertions(+), 19 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index afe4e1c6..085ee7b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,12 +20,6 @@ canyon_observer = { version = "0.2.0", path = "canyon_observer" } canyon_crud = { version = "0.2.0", path = "canyon_crud" } canyon_connection = { version = "0.2.0", path = "canyon_connection" } - -#tokio = { workspace = true } -#tokio-util = { workspace = true } -#tokio-postgres = { workspace = true } -#tiberius = { worskpace = true } - [workspace.dependencies] canyon_crud = { version = "0.2.0", path = "canyon_crud" } canyon_connection = { version = "0.2.0", path = "canyon_connection" } @@ -50,7 +44,7 @@ edition = "2021" authors = ["Alex Vergara, Gonzalo Busto"] documentation = "https://zerodaycode.github.io/canyon-book/" homepage = "https://github.com/zerodaycode/Canyon-SQL" -readme = "../README.md" +readme = "README.md" license = "MIT" description = "A Rust ORM and QueryBuilder" @@ -58,4 +52,3 @@ description = "A Rust ORM and QueryBuilder" default = ["postgres"] postgres = ["canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] mssql = ["canyon_connection/tiberius", "canyon_crud/tiberius", "canyon_observer/tiberius"] -mssql-integrated-auth = ["mssql"] \ No newline at end of file diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 36fc8a97..77fea282 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -13,7 +13,7 @@ description.workspace = true tokio = { workspace = true } tokio-util = { workspace = true } tokio-postgres = { workspace = true, optional = true } -tiberius = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true} futures = { workspace = true } indexmap = { workspace = true } async-std = { workspace = true } diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 7c448f0e..9ec8612f 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -171,11 +171,11 @@ mod database_connection_handler { let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) .expect("A failure happened retrieving the [canyon_sql_root] section"); - assert_eq!( + #[cfg(feature = "tokio-postgres")] assert_eq!( config.canyon_sql.datasources[0].get_db_type(), DatabaseType::PostgreSql ); - assert_eq!( + #[cfg(feature = "tiberius")] assert_eq!( config.canyon_sql.datasources[1].get_db_type(), DatabaseType::SqlServer ); diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 2a553cb3..e50b0521 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -19,7 +19,7 @@ fn load_ds_config_from_array() { let ds_0 = &config.canyon_sql.datasources[0]; let ds_1 = &config.canyon_sql.datasources[1]; - let ds_2 = &config.canyon_sql.datasources[2]; + let _ds_2 = &config.canyon_sql.datasources[2]; assert_eq!(ds_0.name, "PostgresDS"); assert_eq!(ds_0.get_db_type(), DatabaseType::PostgreSql); @@ -35,9 +35,9 @@ fn load_ds_config_from_array() { assert_eq!(ds_0.properties.db_name, "triforce"); assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); - assert_eq!(ds_1.name, "SqlServerDS"); - assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); - assert_eq!( + #[cfg(feature = "tiberius")] assert_eq!(ds_1.name, "SqlServerDS"); + #[cfg(feature = "tiberius")] assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); + #[cfg(feature = "tiberius")] assert_eq!( ds_1.auth, Auth::SqlServer(SqlServerAuth::Basic { username: "sa".to_string(), @@ -49,7 +49,7 @@ fn load_ds_config_from_array() { assert_eq!(ds_1.properties.db_name, "triforce2"); assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); - #[cfg(feature = "tokio-postgres")] assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) + #[cfg(feature = "tiberius")] assert_eq!(_ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) } /// #[derive(Deserialize, Debug, Clone)] @@ -100,7 +100,6 @@ pub enum PostgresAuth { pub enum SqlServerAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, - #[cfg(feature = "mssql-integrated-auth")] #[serde(alias = "Integrated", alias = "integrated")] Integrated, } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 54047bc4..212c0505 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tests" -version = "0.2.0" -edition = "2021" +version.workspace = true +edition.workspace = true publish = false [dev-dependencies] diff --git a/tests/canyon_integration_tests.rs b/tests/canyon_integration_tests.rs index 8120ee8f..30687987 100644 --- a/tests/canyon_integration_tests.rs +++ b/tests/canyon_integration_tests.rs @@ -1,3 +1,5 @@ +extern crate canyon_sql; + use std::error::Error; ///! Integration tests for the heart of a Canyon-SQL application, the CRUD operations. From 5d99d499ec370671ce6616ee6e4078879dc835b2 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 17:31:59 +0200 Subject: [PATCH 13/23] Applying format with rustfmt --- Caasdfadsrgo.tomlsda | 19 -- Cargo.toml | 1 - .../src/canyon_database_connector.rs | 31 +- canyon_connection/src/datasources.rs | 18 +- canyon_connection/src/lib.rs | 15 +- canyon_crud/src/bounds.rs | 278 ++++++++++++------ canyon_crud/src/crud.rs | 35 +-- canyon_crud/src/mapper.rs | 12 +- .../src/query_elements/query_builder.rs | 3 +- canyon_crud/src/rows.rs | 34 +-- canyon_macros/src/query_operations/insert.rs | 1 - canyon_observer/src/migrations/handler.rs | 60 ++-- .../src/migrations/information_schema.rs | 24 +- canyon_observer/src/migrations/memory.rs | 19 +- canyon_observer/src/migrations/processor.rs | 21 +- .../src/migrations/register_types.rs | 18 +- src/lib.rs | 9 +- tests/crud/mod.rs | 6 +- 18 files changed, 374 insertions(+), 230 deletions(-) delete mode 100644 Caasdfadsrgo.tomlsda diff --git a/Caasdfadsrgo.tomlsda b/Caasdfadsrgo.tomlsda deleted file mode 100644 index 3e3c557e..00000000 --- a/Caasdfadsrgo.tomlsda +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "canyon_sql" -version = "0.2.0" -edition.workspace = true -authors.workspace = true -documentation.workspace = true -homepage.workspace = true -readme.workspace = true -license.workspace = true -description.workspace = true - -[dependencies] -async-trait = { version = "0.1.50" } - -# Project crates -canyon_macros = { version = "0.2.0", path = "../canyon_macros" } -canyon_observer = { version = "0.2.0", path = "../canyon_observer" } -canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection", features = ["postgres"] } diff --git a/Cargo.toml b/Cargo.toml index 085ee7b2..501c3fa6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,3 @@ -# This is the root Caasdfadsrgo.tomlsda file that serves as manager for the workspace of the project [package] name = "canyon_sql" version = "0.2.0" diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 9ec8612f..5330e5e4 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -1,8 +1,11 @@ use serde::Deserialize; -#[cfg(feature = "tokio-postgres")] use tokio_postgres::{Client, NoTls}; -#[cfg(feature = "tiberius")] use tiberius::{AuthMethod, Config}; -#[cfg(feature = "tiberius")] use async_std::net::TcpStream; +#[cfg(feature = "tiberius")] +use async_std::net::TcpStream; +#[cfg(feature = "tiberius")] +use tiberius::{AuthMethod, Config}; +#[cfg(feature = "tokio-postgres")] +use tokio_postgres::{Client, NoTls}; use crate::datasources::DatasourceConfig; @@ -35,8 +38,10 @@ pub struct SqlServerConnection { /// process them and generates a pool of 1 to 1 database connection for /// every datasource defined. pub enum DatabaseConnection { - #[cfg(feature = "tokio-postgres")] Postgres(PostgreSqlConnection), - #[cfg(feature = "tiberius")] SqlServer(SqlServerConnection), + #[cfg(feature = "tokio-postgres")] + Postgres(PostgreSqlConnection), + #[cfg(feature = "tiberius")] + SqlServer(SqlServerConnection), } unsafe impl Send for DatabaseConnection {} @@ -94,14 +99,16 @@ impl DatabaseConnection { // Using SQL Server authentication. config.authentication(match &datasource.auth { - #[cfg(feature = "tokio-postgres")] crate::datasources::Auth::Postgres(_) => { + #[cfg(feature = "tokio-postgres")] + crate::datasources::Auth::Postgres(_) => { panic!("Found PostgreSQL auth configuration for a SqlServer database") } crate::datasources::Auth::SqlServer(sql_server_auth) => match sql_server_auth { crate::datasources::SqlServerAuth::Basic { username, password } => { AuthMethod::sql_server(username, password) } - #[cfg(feature = "mssql-integrated-auth")] // TODO pending, or remove the cfg? + #[cfg(feature = "mssql-integrated-auth")] + // TODO pending, or remove the cfg? crate::datasources::SqlServerAuth::Integrated => AuthMethod::Integrated, }, }); @@ -139,7 +146,7 @@ impl DatabaseConnection { pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { match self { DatabaseConnection::Postgres(conn) => Some(conn), - _ => panic!() + _ => panic!(), } } @@ -147,7 +154,7 @@ impl DatabaseConnection { pub fn sqlserver_connection(&mut self) -> Option<&mut SqlServerConnection> { match self { DatabaseConnection::SqlServer(conn) => Some(conn), - _ => panic!() + _ => panic!(), } } } @@ -171,11 +178,13 @@ mod database_connection_handler { let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) .expect("A failure happened retrieving the [canyon_sql_root] section"); - #[cfg(feature = "tokio-postgres")] assert_eq!( + #[cfg(feature = "tokio-postgres")] + assert_eq!( config.canyon_sql.datasources[0].get_db_type(), DatabaseType::PostgreSql ); - #[cfg(feature = "tiberius")] assert_eq!( + #[cfg(feature = "tiberius")] + assert_eq!( config.canyon_sql.datasources[1].get_db_type(), DatabaseType::SqlServer ); diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index e50b0521..c2be5aa5 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -35,9 +35,12 @@ fn load_ds_config_from_array() { assert_eq!(ds_0.properties.db_name, "triforce"); assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); - #[cfg(feature = "tiberius")] assert_eq!(ds_1.name, "SqlServerDS"); - #[cfg(feature = "tiberius")] assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); - #[cfg(feature = "tiberius")] assert_eq!( + #[cfg(feature = "tiberius")] + assert_eq!(ds_1.name, "SqlServerDS"); + #[cfg(feature = "tiberius")] + assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); + #[cfg(feature = "tiberius")] + assert_eq!( ds_1.auth, Auth::SqlServer(SqlServerAuth::Basic { username: "sa".to_string(), @@ -49,7 +52,8 @@ fn load_ds_config_from_array() { assert_eq!(ds_1.properties.db_name, "triforce2"); assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); - #[cfg(feature = "tiberius")] assert_eq!(_ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) + #[cfg(feature = "tiberius")] + assert_eq!(_ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) } /// #[derive(Deserialize, Debug, Clone)] @@ -72,8 +76,10 @@ pub struct DatasourceConfig { impl DatasourceConfig { pub fn get_db_type(&self) -> DatabaseType { match self.auth { - #[cfg(feature = "tokio-postgres")] Auth::Postgres(_) => DatabaseType::PostgreSql, - #[cfg(feature = "tiberius")] Auth::SqlServer(_) => DatabaseType::SqlServer, + #[cfg(feature = "tokio-postgres")] + Auth::Postgres(_) => DatabaseType::PostgreSql, + #[cfg(feature = "tiberius")] + Auth::SqlServer(_) => DatabaseType::SqlServer, } } } diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index cc240034..64960537 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -1,10 +1,14 @@ -#[cfg(feature = "tiberius")] pub extern crate async_std; +#[cfg(feature = "tiberius")] +pub extern crate async_std; pub extern crate futures; pub extern crate lazy_static; -#[cfg(feature = "tiberius")] pub extern crate tiberius; +#[cfg(feature = "tiberius")] +pub extern crate tiberius; pub extern crate tokio; -#[cfg(feature = "tokio-postgres")] pub extern crate tokio_postgres; -#[cfg(feature = "tokio-postgres")] pub extern crate tokio_util; +#[cfg(feature = "tokio-postgres")] +pub extern crate tokio_postgres; +#[cfg(feature = "tokio-postgres")] +pub extern crate tokio_util; pub mod canyon_database_connector; pub mod datasources; @@ -62,11 +66,10 @@ pub async fn init_connections_cache() { } } - /// pub fn get_database_connection<'a>( datasource_name: &str, - guarded_cache: &'a mut MutexGuard> + guarded_cache: &'a mut MutexGuard>, ) -> &'a mut DatabaseConnection { if datasource_name.is_empty() { guarded_cache diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 78d679b3..6a6842ba 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -86,19 +86,20 @@ pub trait Row { fn as_any(&self) -> &dyn Any; } -#[cfg(feature = "tokio-postgres")] impl Row for tokio_postgres::Row { +#[cfg(feature = "tokio-postgres")] +impl Row for tokio_postgres::Row { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "tiberius")] impl Row for tiberius::Row { +#[cfg(feature = "tiberius")] +impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self } } - /// Generic abstraction for hold a Column type that will be one of the Column /// types present in the dependent crates // #[derive(Copy, Clone)] @@ -124,12 +125,14 @@ impl<'a> Column<'a> { pub trait Type { fn as_any(&self) -> &dyn Any; } -#[cfg(feature = "tokio-postgres")] impl Type for tokio_postgres::types::Type { +#[cfg(feature = "tokio-postgres")] +impl Type for tokio_postgres::types::Type { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "tiberius")] impl Type for tiberius::ColumnType { +#[cfg(feature = "tiberius")] +impl Type for tiberius::ColumnType { fn as_any(&self) -> &dyn Any { self } @@ -137,24 +140,30 @@ pub trait Type { /// Wrapper over the dependencies Column's types pub enum ColumnType { - #[cfg(feature = "tokio-postgres")] Postgres(tokio_postgres::types::Type), - #[cfg(feature = "tiberius")] SqlServer(tiberius::ColumnType), + #[cfg(feature = "tokio-postgres")] + Postgres(tokio_postgres::types::Type), + #[cfg(feature = "tiberius")] + SqlServer(tiberius::ColumnType), } pub trait RowOperations { #[cfg(feature = "tokio-postgres")] fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output - where Output: tokio_postgres::types::FromSql<'a>; + where + Output: tokio_postgres::types::FromSql<'a>; #[cfg(feature = "tiberius")] fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output - where Output: tiberius::FromSql<'a>; + where + Output: tiberius::FromSql<'a>; #[cfg(feature = "tokio-postgres")] fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where Output: tokio_postgres::types::FromSql<'a>; + where + Output: tokio_postgres::types::FromSql<'a>; #[cfg(feature = "tiberius")] fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where Output: tiberius::FromSql<'a>; + where + Output: tiberius::FromSql<'a>; fn columns(&self) -> Vec; } @@ -162,7 +171,8 @@ pub trait RowOperations { impl RowOperations for &dyn Row { #[cfg(feature = "tokio-postgres")] fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output - where Output: tokio_postgres::types::FromSql<'a> + where + Output: tokio_postgres::types::FromSql<'a>, { if let Some(row) = self.as_any().downcast_ref::() { return row.get::<&str, Output>(col_name); @@ -171,7 +181,8 @@ impl RowOperations for &dyn Row { } #[cfg(feature = "tiberius")] fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output - where Output: tiberius::FromSql<'a> + where + Output: tiberius::FromSql<'a>, { if let Some(row) = self.as_any().downcast_ref::() { return row @@ -183,7 +194,8 @@ impl RowOperations for &dyn Row { #[cfg(feature = "tokio-postgres")] fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where Output: tokio_postgres::types::FromSql<'a> + where + Output: tokio_postgres::types::FromSql<'a>, { if let Some(row) = self.as_any().downcast_ref::() { return row.get::<&str, Option>(col_name); @@ -193,7 +205,8 @@ impl RowOperations for &dyn Row { #[cfg(feature = "tiberius")] fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where Output: tiberius::FromSql<'a> + where + Output: tiberius::FromSql<'a>, { if let Some(row) = self.as_any().downcast_ref::() { return row.get::(col_name); @@ -204,7 +217,8 @@ impl RowOperations for &dyn Row { fn columns(&self) -> Vec { let mut cols = vec![]; - #[cfg(feature = "tokio-postgres")] { + #[cfg(feature = "tokio-postgres")] + { if self.as_any().is::() { self.as_any() .downcast_ref::() @@ -219,7 +233,8 @@ impl RowOperations for &dyn Row { }) } } - #[cfg(feature = "tiberius")] { + #[cfg(feature = "tiberius")] + { if self.as_any().is::() { self.as_any() .downcast_ref::() @@ -242,8 +257,10 @@ impl RowOperations for &dyn Row { /// Defines a trait for represent type bounds against the allowed /// data types supported by Canyon to be used as query parameters. pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync); - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_>; + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync); + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_>; } /// The implementation of the [`canyon_connection::tiberius`] [`IntoSql`] for the @@ -262,198 +279,247 @@ impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { } impl<'a> QueryParameter<'a> for bool { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::Bit(Some(*self)) } } impl<'a> QueryParameter<'a> for i16 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self)) } } impl<'a> QueryParameter<'a> for &i16 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(*self) } } impl<'a> QueryParameter<'a> for Option<&i16> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for i32 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self)) } } impl<'a> QueryParameter<'a> for &i32 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(*self) } } impl<'a> QueryParameter<'a> for Option<&i32> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for f32 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(*self)) } } impl<'a> QueryParameter<'a> for &f32 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(*self) } } impl<'a> QueryParameter<'a> for Option<&f32> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some( *self.expect("Error on an f32 value on QueryParameter<'_>"), )) } } impl<'a> QueryParameter<'a> for f64 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(*self)) } } impl<'a> QueryParameter<'a> for &f64 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(*self) } } impl<'a> QueryParameter<'a> for Option<&f64> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some( *self.expect("Error on an f64 value on QueryParameter<'_>"), )) } } impl<'a> QueryParameter<'a> for i64 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self)) } } impl<'a> QueryParameter<'a> for &i64 { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(*self) } } impl<'a> QueryParameter<'a> for Option<&i64> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for String { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) } } impl<'a> QueryParameter<'a> for &String { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), None => ColumnData::String(None), @@ -461,10 +527,12 @@ impl<'a> QueryParameter<'a> for Option { } } impl<'a> QueryParameter<'a> for Option<&String> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), None => ColumnData::String(None), @@ -472,18 +540,22 @@ impl<'a> QueryParameter<'a> for Option<&String> { } } impl<'a> QueryParameter<'_> for &'_ str { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(*self))) } } impl<'a> QueryParameter<'a> for Option<&'_ str> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { match *self { Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), None => ColumnData::String(None), @@ -491,82 +563,102 @@ impl<'a> QueryParameter<'a> for Option<&'_ str> { } } impl<'a> QueryParameter<'_> for NaiveDate { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveTime { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveDateTime { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for Option> { - #[cfg(feature = "tokio-postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + #[cfg(feature = "tokio-postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] fn as_sqlserver_param(&self) -> ColumnData<'_> { + #[cfg(feature = "tiberius")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index c025dd97..c38ea10a 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -2,7 +2,7 @@ use std::fmt::Display; use async_trait::async_trait; use canyon_connection::canyon_database_connector::DatabaseConnection; -use canyon_connection::{CACHED_DATABASE_CONN, get_database_connection}; +use canyon_connection::{get_database_connection, CACHED_DATABASE_CONN}; use crate::bounds::QueryParameter; use crate::mapper::RowMapper; @@ -43,29 +43,31 @@ pub trait Transaction { params: Z, datasource_name: &'a str, ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> - where - S: AsRef + Display + Sync + Send + 'a, - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a + where + S: AsRef + Display + Sync + Send + 'a, + Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; let database_conn = get_database_connection(datasource_name, &mut guarded_cache); match database_conn { - #[cfg(feature = "tokio-postgres")] DatabaseConnection::Postgres(_) => { + #[cfg(feature = "tokio-postgres")] + DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( database_conn, stmt.to_string(), params.as_ref(), ) - .await - }, - #[cfg(feature = "tiberius")] DatabaseConnection::SqlServer(_) => { + .await + } + #[cfg(feature = "tiberius")] + DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( database_conn, &mut stmt.to_string(), params, ) - .await + .await } } } @@ -120,9 +122,7 @@ where datasource_name: &'a str, ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>>; - async fn insert<'a>( - &mut self, - ) -> Result<(), Box>; + async fn insert<'a>(&mut self) -> Result<(), Box>; async fn insert_datasource<'a>( &mut self, @@ -164,8 +164,8 @@ where #[cfg(feature = "tokio-postgres")] mod postgres_query_launcher { use crate::bounds::QueryParameter; - use canyon_connection::canyon_database_connector::DatabaseConnection; use crate::rows::CanyonRows; + use canyon_connection::canyon_database_connector::DatabaseConnection; pub async fn launch<'a, T>( db_conn: &DatabaseConnection, @@ -188,14 +188,13 @@ mod postgres_query_launcher { } } - #[cfg(feature = "tiberius")] mod sqlserver_query_launcher { + use crate::rows::CanyonRows; use crate::{ bounds::QueryParameter, canyon_connection::{canyon_database_connector::DatabaseConnection, tiberius::Query}, }; - use crate::rows::CanyonRows; pub async fn launch<'a, T, Z>( db_conn: &mut DatabaseConnection, @@ -203,7 +202,7 @@ mod sqlserver_query_launcher { params: Z, ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> where - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a + Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, { // Re-generate de insert statement to adequate it to the SQL SERVER syntax to retrieve the PK value(s) after insert if stmt.contains("RETURNING") { @@ -236,6 +235,8 @@ mod sqlserver_query_launcher { .into_results() .await?; - Ok(CanyonRows::Tiberius(_results.into_iter().flatten().collect())) + Ok(CanyonRows::Tiberius( + _results.into_iter().flatten().collect(), + )) } } diff --git a/canyon_crud/src/mapper.rs b/canyon_crud/src/mapper.rs index 7996c0fc..cc944f1d 100644 --- a/canyon_crud/src/mapper.rs +++ b/canyon_crud/src/mapper.rs @@ -1,5 +1,7 @@ -#[cfg(feature = "tokio-postgres")] use canyon_connection::tokio_postgres; -#[cfg(feature = "tiberius")] use canyon_connection::tiberius; +#[cfg(feature = "tiberius")] +use canyon_connection::tiberius; +#[cfg(feature = "tokio-postgres")] +use canyon_connection::tokio_postgres; use crate::crud::Transaction; @@ -7,6 +9,8 @@ use crate::crud::Transaction; /// from some supported database in Canyon-SQL into a user's defined /// type `T` pub trait RowMapper>: Sized { - #[cfg(feature = "tokio-postgres")] fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; - #[cfg(feature = "tiberius")] fn deserialize_sqlserver(row: &tiberius::Row) -> T; + #[cfg(feature = "tokio-postgres")] + fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; + #[cfg(feature = "tiberius")] + fn deserialize_sqlserver(row: &tiberius::Row) -> T; } diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs index 9d102f87..92146542 100644 --- a/canyon_crud/src/query_elements/query_builder.rs +++ b/canyon_crud/src/query_elements/query_builder.rs @@ -173,7 +173,8 @@ where self.query.params.to_vec(), self.datasource_name, ) - .await?.into_results::()) + .await? + .into_results::()) } pub fn r#where>(&mut self, r#where: Z, op: impl Operator) { diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 5dfbf5bf..02322971 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -1,6 +1,6 @@ -use std::marker::PhantomData; use crate::crud::Transaction; use crate::mapper::RowMapper; +use std::marker::PhantomData; /// Lightweight wrapper over the collection of results of the different crates /// supported by Canyon-SQL. @@ -9,9 +9,11 @@ use crate::mapper::RowMapper; /// operations that are too difficult or to ugly to implement in the macros that /// will call the query method of Crud. pub enum CanyonRows { - #[cfg(feature = "tokio-postgres")] Postgres(Vec), - #[cfg(feature = "tiberius")] Tiberius(Vec), - UnusableTypeMarker(PhantomData) + #[cfg(feature = "tokio-postgres")] + Postgres(Vec), + #[cfg(feature = "tiberius")] + Tiberius(Vec), + UnusableTypeMarker(PhantomData), } impl CanyonRows { @@ -19,7 +21,7 @@ impl CanyonRows { pub fn get_postgres_rows(&self) -> &Vec { match self { Self::Postgres(v) => v, - _ => panic!("This branch will never ever should be reachable") + _ => panic!("This branch will never ever should be reachable"), } } @@ -27,22 +29,21 @@ impl CanyonRows { pub fn get_tiberius_rows(&self) -> &Vec { match self { Self::Tiberius(v) => v, - _ => panic!("This branch will never ever should be reachable") + _ => panic!("This branch will never ever should be reachable"), } } /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of T - pub fn into_results>(self) -> Vec where T: Transaction { + pub fn into_results>(self) -> Vec + where + T: Transaction, + { match self { - #[cfg(feature = "tokio-postgres")] Self::Postgres(v) => v - .iter() - .map(|row| Z::deserialize_postgresql(row)) - .collect(), - #[cfg(feature = "tiberius")] Self::Tiberius(v) => v - .iter() - .map(|row| Z::deserialize_sqlserver(&row)) - .collect(), - _ => panic!("This branch will never ever should be reachable") + #[cfg(feature = "tokio-postgres")] + Self::Postgres(v) => v.iter().map(|row| Z::deserialize_postgresql(row)).collect(), + #[cfg(feature = "tiberius")] + Self::Tiberius(v) => v.iter().map(|row| Z::deserialize_sqlserver(&row)).collect(), + _ => panic!("This branch will never ever should be reachable"), } } } @@ -87,4 +88,3 @@ impl CanyonRows { // } // } // } - diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 64579a55..18cf89f8 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -94,7 +94,6 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri } }; - quote! { /// Inserts into a database entity the current data in `self`, generating a new /// entry (row), returning the `PRIMARY KEY` = `self.` with the specified diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index 407dc76e..87dbd6a1 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -1,6 +1,6 @@ use canyon_connection::{datasources::Migrations as MigrationsStatus, DATASOURCES}; -use partialdebug::placeholder::PartialDebug; use canyon_crud::rows::CanyonRows; +use partialdebug::placeholder::PartialDebug; use crate::{ canyon_crud::{ @@ -53,7 +53,8 @@ impl Migrations { // Tracked entities that must be migrated whenever Canyon starts let schema_status = Self::fetch_database(&datasource.name, datasource.get_db_type()).await; - let database_tables_schema_info = Self::map_rows(schema_status, datasource.get_db_type()); + let database_tables_schema_info = + Self::map_rows(schema_status, datasource.get_db_type()); // We filter the tables from the schema that aren't Canyon entities let mut user_database_tables = vec![]; @@ -89,14 +90,19 @@ impl Migrations { db_type: DatabaseType, ) -> CanyonRows { let query = match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, + #[cfg(feature = "tokio-postgres")] + DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, + #[cfg(feature = "tiberius")] + DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, }; - Self::query(query, [], datasource_name).await - .unwrap_or_else(|_| {panic!( - "Error querying the schema information for the datasource: {datasource_name}" - )}) + Self::query(query, [], datasource_name) + .await + .unwrap_or_else(|_| { + panic!( + "Error querying the schema information for the datasource: {datasource_name}" + ) + }) } /// Handler for parse the result of query the information of some database schema, @@ -104,9 +110,11 @@ impl Migrations { /// the data well organized for every entity present on that schema fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { match db_results { - #[cfg(feature = "tokio-postgres")] CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), - #[cfg(feature = "tiberius")] CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), - _ => panic!() + #[cfg(feature = "tokio-postgres")] + CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), + #[cfg(feature = "tiberius")] + CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), + _ => panic!(), } } @@ -195,7 +203,10 @@ impl Migrations { } #[cfg(feature = "tokio-postgres")] - fn process_tp_rows(db_results: Vec, db_type: DatabaseType) -> Vec { + fn process_tp_rows( + db_results: Vec, + db_type: DatabaseType, + ) -> Vec { let mut schema_info: Vec = Vec::new(); for res_row in db_results.iter() { let unique_table = schema_info @@ -225,7 +236,10 @@ impl Migrations { } #[cfg(feature = "tiberius")] - fn process_tib_rows(db_results: Vec, db_type: DatabaseType) -> Vec { + fn process_tib_rows( + db_results: Vec, + db_type: DatabaseType, + ) -> Vec { let mut schema_info: Vec = Vec::new(); for res_row in db_results.iter() { let unique_table = schema_info @@ -255,21 +269,27 @@ impl Migrations { } } - #[cfg(feature = "tokio-postgres")] fn get_table_name_from_tp_row(res_row: &tokio_postgres::Row) -> String { res_row.get::<&str, String>("table_name") } #[cfg(feature = "tiberius")] fn get_table_name_from_tib_row(res_row: &tiberius::Row) -> String { - res_row.get::<&str, &str>("table_name").unwrap_or_default().to_string() + res_row + .get::<&str, &str>("table_name") + .unwrap_or_default() + .to_string() } -fn check_for_table_name(table: &&mut TableMetadata, db_type: DatabaseType, res_row: &dyn Row) -> bool { +fn check_for_table_name( + table: &&mut TableMetadata, + db_type: DatabaseType, + res_row: &dyn Row, +) -> bool { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => - table.table_name == res_row.get_postgres::<&str>("table_name"), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => - table.table_name == res_row.get_mssql::<&str>("table_name") + #[cfg(feature = "tokio-postgres")] + DatabaseType::PostgreSql => table.table_name == res_row.get_postgres::<&str>("table_name"), + #[cfg(feature = "tiberius")] + DatabaseType::SqlServer => table.table_name == res_row.get_mssql::<&str>("table_name"), } } diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index d93c7007..06eb6a3e 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -1,5 +1,7 @@ -#[cfg(feature = "tokio-postgres")] use canyon_connection::tokio_postgres::types::Type as TP_TYP; -#[cfg(feature = "tiberius")] use canyon_connection::tiberius::ColumnType as TIB_TY; +#[cfg(feature = "tiberius")] +use canyon_connection::tiberius::ColumnType as TIB_TY; +#[cfg(feature = "tokio-postgres")] +use canyon_connection::tokio_postgres::types::Type as TP_TYP; use canyon_crud::bounds::{Column, ColumnType, Row, RowOperations}; /// Model that represents the database entities that belongs to the current schema. @@ -41,18 +43,24 @@ impl ColumnMetadataTypeValue { /// Retrieves the value stored in a [`Column`] for a passed [`Row`] pub fn get_value(row: &dyn Row, col: &Column) -> Self { match col.column_type() { - #[cfg(feature = "tokio-postgres")] ColumnType::Postgres(v) => { + #[cfg(feature = "tokio-postgres")] + ColumnType::Postgres(v) => { match *v { - TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => { - Self::StringValue(row.get_postgres_opt::<&str>(col.name()).map(|opt| opt.to_owned())) - } + TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => Self::StringValue( + row.get_postgres_opt::<&str>(col.name()) + .map(|opt| opt.to_owned()), + ), TP_TYP::INT4 => Self::IntValue(row.get_postgres_opt::(col.name())), _ => Self::NoneValue, // TODO watchout this one } } - #[cfg(feature = "tiberius")] ColumnType::SqlServer(v) => match v { + #[cfg(feature = "tiberius")] + ColumnType::SqlServer(v) => match v { TIB_TY::NChar | TIB_TY::NVarchar | TIB_TY::BigChar | TIB_TY::BigVarChar => { - Self::StringValue(row.get_mssql_opt::<&str>(col.name()).map(|opt| opt.to_owned())) + Self::StringValue( + row.get_mssql_opt::<&str>(col.name()) + .map(|opt| opt.to_owned()), + ) } TIB_TY::Int2 | TIB_TY::Int4 | TIB_TY::Int8 | TIB_TY::Intn => { Self::IntValue(row.get_mssql_opt::(col.name())) diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index 8c8fe8f4..d22383a0 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -73,7 +73,8 @@ impl CanyonMemory { // Manually maps the results let mut db_rows = Vec::new(); - #[cfg(feature = "tokio-postgres")] { + #[cfg(feature = "tokio-postgres")] + { let mem_results: &Vec = res.get_postgres_rows(); for row in mem_results { let db_row = CanyonMemoryRow { @@ -85,14 +86,18 @@ impl CanyonMemory { db_rows.push(db_row); } } - #[cfg(feature = "tiberius")] { + #[cfg(feature = "tiberius")] + { let mem_results: &Vec = res.get_tiberius_rows(); for row in mem_results { let db_row = CanyonMemoryRow { id: row.get::("id").unwrap(), filepath: row.get::<&str, &str>("filepath").unwrap().to_string(), struct_name: row.get::<&str, &str>("struct_name").unwrap().to_string(), - declared_table_name: row.get::<&str, &str>("declared_table_name").unwrap().to_string(), + declared_table_name: row + .get::<&str, &str>("declared_table_name") + .unwrap() + .to_string(), }; db_rows.push(db_row); } @@ -104,7 +109,7 @@ impl CanyonMemory { async fn populate_memory( datasource: &DatasourceConfig, canyon_entities: &[CanyonRegisterEntity<'_>], - db_rows: Vec + db_rows: Vec, ) -> CanyonMemory { let mut mem = Self { memory: Vec::new(), @@ -240,8 +245,10 @@ impl CanyonMemory { #[cfg(not(cargo_check))] async fn create_memory(datasource_name: &str, database_type: &DatabaseType) { let query = match database_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE + #[cfg(feature = "tokio-postgres")] + DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, + #[cfg(feature = "tiberius")] + DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE, }; Self::query(query, [], datasource_name) diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index ff89bdc9..e068a3d4 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -246,7 +246,8 @@ impl MigrationsProcessor { ))); } - #[cfg(feature = "tiberius")] fn drop_column_not_null( + #[cfg(feature = "tiberius")] + fn drop_column_not_null( &mut self, table_name: &str, column_name: String, @@ -644,7 +645,8 @@ impl MigrationsHelper { canyon_register_entity_field: &CanyonRegisterEntityField, current_column_metadata: &ColumnMetadata, ) -> bool { - #[cfg(feature = "tokio-postgres")] { + #[cfg(feature = "tokio-postgres")] + { if db_type == DatabaseType::PostgreSql { return canyon_register_entity_field .to_postgres_alter_syntax() @@ -652,7 +654,8 @@ impl MigrationsHelper { == current_column_metadata.datatype; } } - #[cfg(feature = "tiberius")] { + #[cfg(feature = "tiberius")] + { if db_type == DatabaseType::SqlServer { // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") return canyon_register_entity_field @@ -883,11 +886,15 @@ enum ColumnOperation { AlterColumnType(String, CanyonRegisterEntityField), AlterColumnDropNotNull(String, CanyonRegisterEntityField), // SQL server specific operation - SQL server can't drop a NOT NULL column - #[cfg(feature = "tiberius")] DropNotNullBeforeDropColumn(String, String, String), - #[cfg(feature = "tokio-postgres")] AlterColumnSetNotNull(String, CanyonRegisterEntityField), + #[cfg(feature = "tiberius")] + DropNotNullBeforeDropColumn(String, String, String), + #[cfg(feature = "tokio-postgres")] + AlterColumnSetNotNull(String, CanyonRegisterEntityField), // TODO if implement through annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} - #[cfg(feature = "tokio-postgres")] AlterColumnAddIdentity(String, CanyonRegisterEntityField), - #[cfg(feature = "tokio-postgres")] AlterColumnDropIdentity(String, CanyonRegisterEntityField), + #[cfg(feature = "tokio-postgres")] + AlterColumnAddIdentity(String, CanyonRegisterEntityField), + #[cfg(feature = "tokio-postgres")] + AlterColumnDropIdentity(String, CanyonRegisterEntityField), } impl Transaction for ColumnOperation {} diff --git a/canyon_observer/src/migrations/register_types.rs b/canyon_observer/src/migrations/register_types.rs index 57ef6e39..b0cbf48d 100644 --- a/canyon_observer/src/migrations/register_types.rs +++ b/canyon_observer/src/migrations/register_types.rs @@ -1,8 +1,10 @@ use regex::Regex; +#[cfg(feature = "tokio-postgres")] +use crate::constants::postgresql_type; +#[cfg(feature = "tiberius")] +use crate::constants::sqlserver_type; use crate::constants::{regex_patterns, rust_type, NUMERIC_PK_DATATYPE}; -#[cfg(feature = "tokio-postgres")] use crate::constants::postgresql_type; -#[cfg(feature = "tiberius")] use crate::constants::sqlserver_type; /// This file contains `Rust` types that represents an entry on the `CanyonRegister` /// where `Canyon` tracks the user types that has to manage @@ -28,7 +30,8 @@ pub struct CanyonRegisterEntityField { impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type - #[cfg(feature = "tokio-postgres")] pub fn to_postgres_syntax(&self) -> String { + #[cfg(feature = "tokio-postgres")] + pub fn to_postgres_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); match rust_type_clean.as_str() { @@ -74,7 +77,8 @@ impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type /// for Microsoft SQL Server - #[cfg(feature = "tiberius")] pub fn to_sqlserver_syntax(&self) -> String { + #[cfg(feature = "tiberius")] + pub fn to_sqlserver_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); match rust_type_clean.as_str() { @@ -120,7 +124,8 @@ impl CanyonRegisterEntityField { } } - #[cfg(feature = "tokio-postgres")] pub fn to_postgres_alter_syntax(&self) -> String { + #[cfg(feature = "tokio-postgres")] + pub fn to_postgres_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); @@ -162,7 +167,8 @@ impl CanyonRegisterEntityField { } } - #[cfg(feature = "tiberius")] pub fn to_sqlserver_alter_syntax(&self) -> String { + #[cfg(feature = "tiberius")] + pub fn to_sqlserver_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); diff --git a/src/lib.rs b/src/lib.rs index cb8be374..20dac23f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,11 +3,10 @@ /// Here it's where all the available functionalities and features /// reaches the top most level, grouping them and making them visible /// through this crate, building the *public API* of the library - extern crate canyon_connection; extern crate canyon_crud; -extern crate canyon_observer; extern crate canyon_macros; +extern crate canyon_observer; // extern crate async_trait; @@ -43,8 +42,10 @@ pub mod query { /// Reexport the available database clients within Canyon pub mod db_clients { - #[cfg(feature = "postgres")] pub use canyon_connection::tokio_postgres; - #[cfg(feature = "mssql")] pub use canyon_connection::tiberius; + #[cfg(feature = "mssql")] + pub use canyon_connection::tiberius; + #[cfg(feature = "postgres")] + pub use canyon_connection::tokio_postgres; } /// Reexport the needed runtime dependencies diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 7526c8f6..c0f6afee 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -29,10 +29,10 @@ use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; #[canyon_sql::macros::canyon_tokio_test] #[ignore] fn initialize_sql_server_docker_instance() { - canyon_sql::runtime::futures::executor::block_on(async { - static CONN_STR: &str = - "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; + static CONN_STR: &str = + "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; + canyon_sql::runtime::futures::executor::block_on(async { let config = Config::from_ado_string(CONN_STR).unwrap(); let tcp = TcpStream::connect(config.get_addr()).await.unwrap(); From f6c4408b884b10fee463bc51f1b452e837f7f6ac Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Tue, 18 Apr 2023 18:12:12 +0200 Subject: [PATCH 14/23] Discarded the integrated auth cfg feature flag, it will be included directly with the mssql feature --- canyon_connection/src/canyon_database_connector.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 5330e5e4..1ef74c6b 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -107,8 +107,6 @@ impl DatabaseConnection { crate::datasources::SqlServerAuth::Basic { username, password } => { AuthMethod::sql_server(username, password) } - #[cfg(feature = "mssql-integrated-auth")] - // TODO pending, or remove the cfg? crate::datasources::SqlServerAuth::Integrated => AuthMethod::Integrated, }, }); From c7893e346db62380987c58d6d256f8bdd5ae15d8 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Wed, 19 Apr 2023 12:39:21 +0200 Subject: [PATCH 15/23] Addressing issues on the macros with the new code structure - [select - count] --- Cargo.toml | 6 ++-- tests/canyon.toml => canyon.toml | 0 canyon_connection/Cargo.toml | 4 +-- canyon_connection/src/datasources.rs | 2 +- canyon_crud/src/crud.rs | 2 +- canyon_crud/src/rows.rs | 11 ++++++ canyon_macros/src/query_operations/insert.rs | 20 +++++++---- canyon_macros/src/query_operations/select.rs | 38 ++++++++++++-------- canyon_observer/src/migrations/memory.rs | 1 - src/lib.rs | 16 ++++++--- tests/migrations/mod.rs | 4 +-- 11 files changed, 69 insertions(+), 35 deletions(-) rename tests/canyon.toml => canyon.toml (100%) diff --git a/Cargo.toml b/Cargo.toml index 501c3fa6..baef5115 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "canyon_sql" -version = "0.2.0" +version.workspace = true +edition.workspace = true [workspace] members = [ "canyon_connection", + "canyon_crud", "canyon_observer", "canyon_macros", - "canyon_crud", "tests" ] @@ -18,6 +19,7 @@ canyon_macros = { version = "0.2.0", path = "canyon_macros" } canyon_observer = { version = "0.2.0", path = "canyon_observer" } canyon_crud = { version = "0.2.0", path = "canyon_crud" } canyon_connection = { version = "0.2.0", path = "canyon_connection" } +async-trait = "0.1.68" [workspace.dependencies] canyon_crud = { version = "0.2.0", path = "canyon_crud" } diff --git a/tests/canyon.toml b/canyon.toml similarity index 100% rename from tests/canyon.toml rename to canyon.toml diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 77fea282..15aea6d9 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -12,8 +12,8 @@ description.workspace = true [dependencies] tokio = { workspace = true } tokio-util = { workspace = true } -tokio-postgres = { workspace = true, optional = true } -tiberius = { workspace = true, optional = true} +tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"], optional = true } +tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } futures = { workspace = true } indexmap = { workspace = true } async-std = { workspace = true } diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index c2be5aa5..feb50e61 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -6,7 +6,7 @@ use crate::canyon_database_connector::DatabaseType; #[test] fn load_ds_config_from_array() { const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql_root] + [canyon_sql] datasources = [ {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index c38ea10a..53d4728a 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -50,7 +50,7 @@ pub trait Transaction { let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; let database_conn = get_database_connection(datasource_name, &mut guarded_cache); - match database_conn { + match *database_conn { #[cfg(feature = "tokio-postgres")] DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 02322971..bbf096b1 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -46,6 +46,17 @@ impl CanyonRows { _ => panic!("This branch will never ever should be reachable"), } } + + /// Returns the number of elements present on the wrapped collection + pub fn len(&self) -> usize { + match self { + #[cfg(feature = "tokio-postgres")] + Self::Postgres(v) => v.len(), + #[cfg(feature = "tiberius")] + Self::Tiberius(v) => v.len(), + _ => panic!("This branch will never ever should be reachable"), + } + } } // #[cfg(feature = "tokio-postgres")] diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 18cf89f8..213315e5 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -49,21 +49,25 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri #primary_key ); - let rows = <#ty as canyon_sql::crud::Transaction<#ty>>::query_for_rows( + let rows = <#ty as canyon_sql::crud::Transaction<#ty>>::query( stmt, values, datasource_name ).await?; - match rows { - #[cfg(feature = "tokio-postgres")] Self::Postgres(mut v) => { + Ok(()) + + /* match rows { + // #[cfg(feature = "tokio-postgres")] + canyon_sql::connection::Postgres(mut v) => { instance.#pk_ident = v .get(idx) .expect("Failed getting the returned IDs for a multi insert") .get::<&str, #pk_type>(#primary_key); Ok(()) }, - #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => { + // #[cfg(feature = "tiberius")] + canyon_sql::connection::Tiberius(mut v) => { instance.#pk_ident = v .get(idx) .expect("Failed getting the returned IDs for a multi insert") @@ -72,7 +76,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri Ok(()) }, _ => panic!() // TODO remove when the generics will be refactored - } + } */ } } else { quote! { @@ -441,7 +445,8 @@ pub fn generate_multiple_insert_tokens( let mut mapped_fields: String = String::new(); - #multi_insert_transaction + // #multi_insert_transaction + Ok(()) } /// Inserts multiple instances of some type `T` into its related table with the specified @@ -497,7 +502,8 @@ pub fn generate_multiple_insert_tokens( let mut mapped_fields: String = String::new(); - #multi_insert_transaction + // #multi_insert_transaction + Ok(()) } } } diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index c5875f03..fd136fd4 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -27,6 +27,7 @@ pub fn generate_find_all_unchecked_tokens( "" ).await .unwrap() + .into_results::<#ty>() } /// Performs a `SELECT * FROM table_name`, where `table_name` it's @@ -44,6 +45,7 @@ pub fn generate_find_all_unchecked_tokens( datasource_name ).await .unwrap() + .into_results::<#ty>() } } } @@ -71,6 +73,7 @@ pub fn generate_find_all_tokens( &[], "" ).await? + .into_results::<#ty>() ) } @@ -95,6 +98,7 @@ pub fn generate_find_all_tokens( &[], datasource_name ).await? + .into_results::<#ty>() ) } } @@ -148,27 +152,33 @@ pub fn generate_count_tokens( let result_handling = quote! { match count { - #[cfg(feature = "tokio-postgres")] Self::Postgres(mut v) => Ok( + // #[cfg(feature = "tokio-postgres")] + canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( v.remove(0).get::<&str, i64>("count") ), - #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => + // #[cfg(feature = "tiberius")] + canyon_sql::crud::CanyonRows::Tiberius(mut v) => v.remove(0) - .get::("count") - .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()), + .get::(0) + .map(|c| c as i64) + .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()) + .into(), _ => panic!() // TODO remove when the generics will be refactored } + // Ok(0 as i64) }; quote! { /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, /// wrapping a possible success or error coming from the database async fn count() -> Result> { - <#ty as canyon_sql::crud::Transaction<#ty>>::query( + let count = <#ty as canyon_sql::crud::Transaction<#ty>>::query( #stmt, &[], "" - ).await - .get_by_idx_and_key(0, "count") + ).await?; + + #result_handling } /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, @@ -226,12 +236,11 @@ pub fn generate_find_by_pk_tokens( }; } - // TOODO no tenemos number_OF_results let result_handling = quote! { match result { n if n.len() == 0 => Ok(None), _ => Ok( - Some(result.remove(0)) + Some(result.into_results::<#ty>().remove(0)) ) } }; @@ -334,10 +343,9 @@ pub fn generate_find_by_foreign_key_tokens( ); let result_handler = quote! { match result { - // TODO Noof n if n.len() == 0 => Ok(None), _ => Ok(Some( - result.remove(0) + result.into_results::<#fk_ty>().remove(0) )) } }; @@ -422,8 +430,8 @@ pub fn generate_find_by_reverse_foreign_key_tokens( #quoted_method_signature { let lookage_value = value.get_fk_column(#column) - .expect(format!( - "Column: {:?} not found in type: {:?}", #column, #table + .expect(format!( + "Column: {:?} not found in type: {:?}", #column, #table ).as_str()); let stmt = format!( @@ -436,7 +444,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( stmt, &[lookage_value], "" - ).await?) + ).await?.into_results::<#ty>()) } }, )); @@ -464,7 +472,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( stmt, &[lookage_value], datasource_name - ).await?) + ).await?.into_results::<#ty>()) } }, )); diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index d22383a0..912bf6dc 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -242,7 +242,6 @@ impl CanyonMemory { } /// Generates, if not exists the `canyon_memory` table - #[cfg(not(cargo_check))] async fn create_memory(datasource_name: &str, database_type: &DatabaseType) { let query = match database_type { #[cfg(feature = "tokio-postgres")] diff --git a/src/lib.rs b/src/lib.rs index 20dac23f..8253cea9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,7 @@ extern crate canyon_crud; extern crate canyon_macros; extern crate canyon_observer; -// extern crate async_trait; +extern crate async_trait; /// Reexported elements to the root of the public API pub mod migrations { @@ -21,10 +21,17 @@ pub use canyon_macros::main; /// Public API for the `Canyon-SQL` proc-macros, and for the external ones pub mod macros { - // pub use async_trait::*; + pub use async_trait::*; pub use canyon_macros::*; } +/// connection module serves to reexport the public elements of the `canyon_connection` crate, +/// exposing them through the public API +pub mod connection { + pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; + pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; +} + /// Crud module serves to reexport the public elements of the `canyon_crud` crate, /// exposing them through the public API pub mod crud { @@ -32,6 +39,7 @@ pub mod crud { pub use canyon_crud::crud::*; pub use canyon_crud::mapper::*; pub use canyon_crud::DatabaseType; + pub use canyon_crud::rows::CanyonRows; } /// Re-exports the query elements from the `crud`crate @@ -42,10 +50,10 @@ pub mod query { /// Reexport the available database clients within Canyon pub mod db_clients { - #[cfg(feature = "mssql")] - pub use canyon_connection::tiberius; #[cfg(feature = "postgres")] pub use canyon_connection::tokio_postgres; + #[cfg(feature = "mssql")] + pub use canyon_connection::tiberius; } /// Reexport the needed runtime dependencies diff --git a/tests/migrations/mod.rs b/tests/migrations/mod.rs index 17b19c35..12dfa111 100644 --- a/tests/migrations/mod.rs +++ b/tests/migrations/mod.rs @@ -9,8 +9,8 @@ fn test_migrations_postgresql_status_query() { let results = Migrations::query(constants::FETCH_PUBLIC_SCHEMA, [], constants::PSQL_DS).await; assert!(results.is_ok()); - let public_schema_info = results.ok().unwrap().postgres; - + let res = results.unwrap(); + let public_schema_info = res.get_postgres_rows(); let first_result = public_schema_info.get(0).unwrap(); assert_eq!(first_result.columns().get(0).unwrap().name(), "table_name"); From b576757541bdf9cb896454ac7186d37869bbb225 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Wed, 19 Apr 2023 12:58:11 +0200 Subject: [PATCH 16/23] Addressing issues on the macros with the new code structure - [insert - insert(s) + multi_insert(s)] --- canyon_macros/src/query_operations/insert.rs | 45 +++++++++----------- canyon_macros/src/query_operations/select.rs | 1 - src/lib.rs | 4 +- tests/canyon.toml | 24 +++++++++++ 4 files changed, 46 insertions(+), 28 deletions(-) create mode 100644 tests/canyon.toml diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 213315e5..063df25c 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -55,28 +55,26 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri datasource_name ).await?; - Ok(()) - - /* match rows { + match rows { // #[cfg(feature = "tokio-postgres")] - canyon_sql::connection::Postgres(mut v) => { - instance.#pk_ident = v - .get(idx) - .expect("Failed getting the returned IDs for a multi insert") + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + self.#pk_ident = v + .get(0) + .expect("Failed getting the returned IDs for an insert") .get::<&str, #pk_type>(#primary_key); Ok(()) }, // #[cfg(feature = "tiberius")] - canyon_sql::connection::Tiberius(mut v) => { - instance.#pk_ident = v - .get(idx) - .expect("Failed getting the returned IDs for a multi insert") + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + self.#pk_ident = v + .get(0) + .expect("Failed getting the returned IDs for an insert") .get::<#pk_type, &str>(#primary_key) .expect("SQL Server primary key type failed to be set as value"); Ok(()) }, _ => panic!() // TODO remove when the generics will be refactored - } */ + } } } else { quote! { @@ -296,7 +294,8 @@ pub fn generate_multiple_insert_tokens( match result { Ok(res) => { match res { - #[cfg(feature = "tokio-postgres")] Self::Postgres(mut v) => { + // #[cfg(feature = "tokio-postgres")] + canyon_sql::crud::CanyonRows::Postgres(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v .get(idx) @@ -306,7 +305,8 @@ pub fn generate_multiple_insert_tokens( Ok(()) }, - #[cfg(feature = "tiberius")] Self::Tiberius(mut v) => { + // #[cfg(feature = "tiberius")] + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v .get(idx) @@ -315,7 +315,7 @@ pub fn generate_multiple_insert_tokens( .expect("SQL Server primary key type failed to be set as value"); } - Ok(()), + Ok(()) }, _ => panic!() // TODO remove when the generics will be refactored } @@ -378,16 +378,13 @@ pub fn generate_multiple_insert_tokens( } } - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + <#ty as canyon_sql::crud::Transaction<#ty>>::query( stmt, v_arr, datasource_name - ).await; + ).await?; - match result { - Ok(res) => Ok(()), - Err(e) => Err(e) - } + Ok(()) } }; @@ -445,8 +442,7 @@ pub fn generate_multiple_insert_tokens( let mut mapped_fields: String = String::new(); - // #multi_insert_transaction - Ok(()) + #multi_insert_transaction } /// Inserts multiple instances of some type `T` into its related table with the specified @@ -502,8 +498,7 @@ pub fn generate_multiple_insert_tokens( let mut mapped_fields: String = String::new(); - // #multi_insert_transaction - Ok(()) + #multi_insert_transaction } } } diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index fd136fd4..3086aea5 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -165,7 +165,6 @@ pub fn generate_count_tokens( .into(), _ => panic!() // TODO remove when the generics will be refactored } - // Ok(0 as i64) }; quote! { diff --git a/src/lib.rs b/src/lib.rs index 8253cea9..3aaf6ea9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,8 +28,8 @@ pub mod macros { /// connection module serves to reexport the public elements of the `canyon_connection` crate, /// exposing them through the public API pub mod connection { - pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; - pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; + #[cfg(feature = "postgres")] pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; + #[cfg(feature = "mssql")] pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; } /// Crud module serves to reexport the public elements of the `canyon_crud` crate, diff --git a/tests/canyon.toml b/tests/canyon.toml new file mode 100644 index 00000000..0b0614a4 --- /dev/null +++ b/tests/canyon.toml @@ -0,0 +1,24 @@ +[canyon_sql] + +[[canyon_sql.datasources]] +name = 'postgres_docker' + +[canyon_sql.datasources.auth] +postgresql = { basic = { username = 'postgres', password = 'postgres'}} + +[canyon_sql.datasources.properties] +host = 'localhost' +port = 5438 +db_name = 'postgres' + + +[[canyon_sql.datasources]] +name = 'sqlserver_docker' + +[canyon_sql.datasources.auth] +sqlserver = { basic = { username = 'sa', password = 'SqlServer-10' } } + +[canyon_sql.datasources.properties] +host = 'localhost' +port = 1434 +db_name = 'master' From 8558c9862edfac66071a99cb30c5baebfa94e8dc Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Wed, 19 Apr 2023 18:08:37 +0200 Subject: [PATCH 17/23] WIP - Reworked the way of how we was thinking about our features. Trying to conditionaly compilate client code by feature enabled --- Cargo.toml | 10 +- canyon_connection/Cargo.toml | 5 + .../src/canyon_database_connector.rs | 34 +-- canyon_connection/src/datasources.rs | 20 +- canyon_connection/src/lib.rs | 8 +- canyon_crud/Cargo.toml | 8 +- canyon_crud/src/bounds.rs | 196 +++++++++--------- canyon_crud/src/crud.rs | 8 +- canyon_crud/src/mapper.rs | 8 +- canyon_crud/src/rows.rs | 20 +- canyon_macros/Cargo.toml | 6 +- canyon_macros/src/query_operations/insert.rs | 53 +++-- canyon_macros/src/query_operations/select.rs | 4 +- canyon_observer/Cargo.toml | 5 + canyon_observer/src/constants.rs | 4 +- canyon_observer/src/migrations/handler.rs | 10 +- .../src/migrations/information_schema.rs | 4 +- canyon_observer/src/migrations/memory.rs | 4 +- canyon_observer/src/migrations/processor.rs | 32 +-- .../src/migrations/register_types.rs | 6 +- src/lib.rs | 7 +- tests/Cargo.toml | 7 +- 22 files changed, 246 insertions(+), 213 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index baef5115..e7cf2500 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,15 +15,17 @@ members = [ [dependencies] # Project crates -canyon_macros = { version = "0.2.0", path = "canyon_macros" } -canyon_observer = { version = "0.2.0", path = "canyon_observer" } +canyon_connection = { version = "0.2.0", path = "canyon_connection", optional = true } canyon_crud = { version = "0.2.0", path = "canyon_crud" } -canyon_connection = { version = "0.2.0", path = "canyon_connection" } +canyon_observer = { version = "0.2.0", path = "canyon_observer" } +canyon_macros = { version = "0.2.0", path = "canyon_macros" } async-trait = "0.1.68" [workspace.dependencies] canyon_crud = { version = "0.2.0", path = "canyon_crud" } canyon_connection = { version = "0.2.0", path = "canyon_connection" } +canyon_observer = { version = "0.2.0", path = "canyon_observer" } +canyon_macros = { version = "0.2.0", path = "canyon_macros" } tokio = { version = "1.27.0", features = ["full"] } tokio-util = { version = "0.7.4", features = ["compat"] } @@ -51,5 +53,5 @@ description = "A Rust ORM and QueryBuilder" [features] default = ["postgres"] -postgres = ["canyon_connection/tokio-postgres", "canyon_crud/tokio-postgres", "canyon_observer/tokio-postgres"] +postgres = ["canyon_connection/postgres", "canyon_connection/tokio-postgres"] mssql = ["canyon_connection/tiberius", "canyon_crud/tiberius", "canyon_observer/tiberius"] diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 15aea6d9..9bdacbc2 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -20,3 +20,8 @@ async-std = { workspace = true } lazy_static = { workspace = true } serde = { workspace = true, features = ["derive"] } toml = { workspace = true } + +[features] +default = ["postgres"] +postgres = ["tokio-postgres"] +mssql = ["tiberius"] diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 1ef74c6b..bcb07d8e 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -1,10 +1,10 @@ use serde::Deserialize; -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] use async_std::net::TcpStream; -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] use tiberius::{AuthMethod, Config}; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] use tokio_postgres::{Client, NoTls}; use crate::datasources::DatasourceConfig; @@ -13,22 +13,22 @@ use crate::datasources::DatasourceConfig; #[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy)] pub enum DatabaseType { #[serde(alias = "postgres", alias = "postgresql")] - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] PostgreSql, #[serde(alias = "sqlserver", alias = "mssql")] - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] SqlServer, } /// A connection with a `PostgreSQL` database -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] pub struct PostgreSqlConnection { pub client: Client, // pub connection: Connection, // TODO Hold it, or not to hold it... that's the question! } /// A connection with a `SqlServer` database -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] pub struct SqlServerConnection { pub client: &'static mut tiberius::Client, } @@ -38,9 +38,9 @@ pub struct SqlServerConnection { /// process them and generates a pool of 1 to 1 database connection for /// every datasource defined. pub enum DatabaseConnection { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Postgres(PostgreSqlConnection), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] SqlServer(SqlServerConnection), } @@ -52,7 +52,7 @@ impl DatabaseConnection { datasource: &DatasourceConfig, ) -> Result> { match datasource.get_db_type() { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => { let (username, password) = match &datasource.auth { crate::datasources::Auth::Postgres(postgres_auth) => match postgres_auth { @@ -60,7 +60,7 @@ impl DatabaseConnection { (username.as_str(), password.as_str()) } }, - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] crate::datasources::Auth::SqlServer(_) => { panic!("Found SqlServer auth configuration for a PostgreSQL datasource") } @@ -89,7 +89,7 @@ impl DatabaseConnection { // connection: new_connection, })) } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] DatabaseType::SqlServer => { let mut config = Config::new(); @@ -99,7 +99,7 @@ impl DatabaseConnection { // Using SQL Server authentication. config.authentication(match &datasource.auth { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] crate::datasources::Auth::Postgres(_) => { panic!("Found PostgreSQL auth configuration for a SqlServer database") } @@ -139,7 +139,7 @@ impl DatabaseConnection { } } - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] #[allow(unreachable_patterns)] pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { match self { @@ -148,7 +148,7 @@ impl DatabaseConnection { } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] pub fn sqlserver_connection(&mut self) -> Option<&mut SqlServerConnection> { match self { DatabaseConnection::SqlServer(conn) => Some(conn), @@ -176,12 +176,12 @@ mod database_connection_handler { let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) .expect("A failure happened retrieving the [canyon_sql_root] section"); - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] assert_eq!( config.canyon_sql.datasources[0].get_db_type(), DatabaseType::PostgreSql ); - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] assert_eq!( config.canyon_sql.datasources[1].get_db_type(), DatabaseType::SqlServer diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index feb50e61..2dd3913c 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -35,11 +35,11 @@ fn load_ds_config_from_array() { assert_eq!(ds_0.properties.db_name, "triforce"); assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] assert_eq!(ds_1.name, "SqlServerDS"); - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] assert_eq!( ds_1.auth, Auth::SqlServer(SqlServerAuth::Basic { @@ -52,7 +52,7 @@ fn load_ds_config_from_array() { assert_eq!(ds_1.properties.db_name, "triforce2"); assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] assert_eq!(_ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) } /// @@ -76,9 +76,9 @@ pub struct DatasourceConfig { impl DatasourceConfig { pub fn get_db_type(&self) -> DatabaseType { match self.auth { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Auth::Postgres(_) => DatabaseType::PostgreSql, - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] Auth::SqlServer(_) => DatabaseType::SqlServer, } } @@ -87,22 +87,22 @@ impl DatasourceConfig { #[derive(Deserialize, Debug, Clone, PartialEq)] pub enum Auth { #[serde(alias = "PostgreSQL", alias = "postgresql", alias = "postgres")] - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Postgres(PostgresAuth), #[serde(alias = "SqlServer", alias = "sqlserver", alias = "mssql")] - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] SqlServer(SqlServerAuth), } #[derive(Deserialize, Debug, Clone, PartialEq)] -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] pub enum PostgresAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, } #[derive(Deserialize, Debug, Clone, PartialEq)] -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] pub enum SqlServerAuth { #[serde(alias = "Basic", alias = "basic")] Basic { username: String, password: String }, diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 64960537..699341d6 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -1,13 +1,13 @@ -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] pub extern crate async_std; pub extern crate futures; pub extern crate lazy_static; -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] pub extern crate tiberius; pub extern crate tokio; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] pub extern crate tokio_postgres; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] pub extern crate tokio_util; pub mod canyon_database_connector; diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index 6f6ee233..33213847 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -21,8 +21,8 @@ async-trait = { version = "0.1.50" } canyon_connection = { workspace = true, path = "../canyon_connection" } -#[features] -#default = ["postgres"] -#postgres = ["tokio", "tokio-postgres", "tokio-util"] -#mssql = ["tiberius", "tiberius/tds73", "tiberius/chrono"] +[features] +default = ["postgres"] +postgres = ["tokio-postgres"] +mssql = ["tiberius"] #mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 6a6842ba..7ed83f1b 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -3,10 +3,10 @@ use crate::{ mapper::RowMapper, }; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] use canyon_connection::tokio_postgres::{self, types::ToSql}; -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] use canyon_connection::tiberius::{self, ColumnData, IntoSql}; use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; @@ -86,14 +86,14 @@ pub trait Row { fn as_any(&self) -> &dyn Any; } -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] impl Row for tokio_postgres::Row { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] impl Row for tiberius::Row { fn as_any(&self) -> &dyn Any { self @@ -116,8 +116,8 @@ impl<'a> Column<'a> { } // pub fn type_(&'a self) -> &'_ dyn Type { // match (*self).type_ { - // #[cfg(feature = "tokio-postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, - // #[cfg(feature = "tiberius")] ColumnType::SqlServer(v) => v as &'a dyn Type, + // #[cfg(feature = "postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, + // #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => v as &'a dyn Type, // } // } } @@ -125,13 +125,13 @@ impl<'a> Column<'a> { pub trait Type { fn as_any(&self) -> &dyn Any; } -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] impl Type for tokio_postgres::types::Type { fn as_any(&self) -> &dyn Any { self } } -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] impl Type for tiberius::ColumnType { fn as_any(&self) -> &dyn Any { self @@ -140,27 +140,27 @@ impl Type for tiberius::ColumnType { /// Wrapper over the dependencies Column's types pub enum ColumnType { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Postgres(tokio_postgres::types::Type), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] SqlServer(tiberius::ColumnType), } pub trait RowOperations { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tokio_postgres::types::FromSql<'a>; - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tiberius::FromSql<'a>; - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tokio_postgres::types::FromSql<'a>; - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tiberius::FromSql<'a>; @@ -169,7 +169,7 @@ pub trait RowOperations { } impl RowOperations for &dyn Row { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tokio_postgres::types::FromSql<'a>, @@ -179,7 +179,7 @@ impl RowOperations for &dyn Row { }; panic!() // TODO into result and propagate } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output where Output: tiberius::FromSql<'a>, @@ -192,7 +192,7 @@ impl RowOperations for &dyn Row { panic!() // TODO into result and propagate } - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tokio_postgres::types::FromSql<'a>, @@ -203,7 +203,7 @@ impl RowOperations for &dyn Row { panic!() // TODO into result and propagate } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option where Output: tiberius::FromSql<'a>, @@ -217,7 +217,7 @@ impl RowOperations for &dyn Row { fn columns(&self) -> Vec { let mut cols = vec![]; - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] { if self.as_any().is::() { self.as_any() @@ -233,7 +233,7 @@ impl RowOperations for &dyn Row { }) } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] { if self.as_any().is::() { self.as_any() @@ -257,9 +257,9 @@ impl RowOperations for &dyn Row { /// Defines a trait for represent type bounds against the allowed /// data types supported by Canyon to be used as query parameters. pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync); - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_>; } @@ -271,7 +271,7 @@ pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { /// a collection of [`QueryParameter<'a>`], in order to allow a workflow /// that is not dependent of the specific type of the argument that holds /// the query parameters of the database connectors -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { fn into_sql(self) -> ColumnData<'a> { self.as_sqlserver_param() @@ -279,131 +279,131 @@ impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { } impl<'a> QueryParameter<'a> for bool { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::Bit(Some(*self)) } } impl<'a> QueryParameter<'a> for i16 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self)) } } impl<'a> QueryParameter<'a> for &i16 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(*self) } } impl<'a> QueryParameter<'a> for Option<&i16> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I16(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for i32 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self)) } } impl<'a> QueryParameter<'a> for &i32 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(*self) } } impl<'a> QueryParameter<'a> for Option<&i32> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I32(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for f32 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(*self)) } } impl<'a> QueryParameter<'a> for &f32 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(*self) } } impl<'a> QueryParameter<'a> for Option<&f32> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F32(Some( *self.expect("Error on an f32 value on QueryParameter<'_>"), @@ -411,42 +411,42 @@ impl<'a> QueryParameter<'a> for Option<&f32> { } } impl<'a> QueryParameter<'a> for f64 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(*self)) } } impl<'a> QueryParameter<'a> for &f64 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(*self) } } impl<'a> QueryParameter<'a> for Option<&f64> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::F64(Some( *self.expect("Error on an f64 value on QueryParameter<'_>"), @@ -454,71 +454,71 @@ impl<'a> QueryParameter<'a> for Option<&f64> { } } impl<'a> QueryParameter<'a> for i64 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self)) } } impl<'a> QueryParameter<'a> for &i64 { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(**self)) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(*self) } } impl<'a> QueryParameter<'a> for Option<&i64> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::I64(Some(*self.unwrap())) } } impl<'a> QueryParameter<'a> for String { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) } } impl<'a> QueryParameter<'a> for &String { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), @@ -527,11 +527,11 @@ impl<'a> QueryParameter<'a> for Option { } } impl<'a> QueryParameter<'a> for Option<&String> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match self { Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), @@ -540,21 +540,21 @@ impl<'a> QueryParameter<'a> for Option<&String> { } } impl<'a> QueryParameter<'_> for &'_ str { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { ColumnData::String(Some(std::borrow::Cow::Borrowed(*self))) } } impl<'a> QueryParameter<'a> for Option<&'_ str> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { match *self { Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), @@ -563,101 +563,101 @@ impl<'a> QueryParameter<'a> for Option<&'_ str> { } } impl<'a> QueryParameter<'_> for NaiveDate { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveTime { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for NaiveDateTime { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'a> for Option> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for DateTime { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } } impl<'a> QueryParameter<'_> for Option> { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn as_sqlserver_param(&self) -> ColumnData<'_> { self.into_sql() } diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 53d4728a..b06edfda 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -51,7 +51,7 @@ pub trait Transaction { let database_conn = get_database_connection(datasource_name, &mut guarded_cache); match *database_conn { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] DatabaseConnection::Postgres(_) => { postgres_query_launcher::launch::( database_conn, @@ -60,7 +60,7 @@ pub trait Transaction { ) .await } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] DatabaseConnection::SqlServer(_) => { sqlserver_query_launcher::launch::( database_conn, @@ -161,7 +161,7 @@ where fn delete_query_datasource(datasource_name: &str) -> DeleteQueryBuilder<'_, T>; } -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] mod postgres_query_launcher { use crate::bounds::QueryParameter; use crate::rows::CanyonRows; @@ -188,7 +188,7 @@ mod postgres_query_launcher { } } -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] mod sqlserver_query_launcher { use crate::rows::CanyonRows; use crate::{ diff --git a/canyon_crud/src/mapper.rs b/canyon_crud/src/mapper.rs index cc944f1d..66cb91d2 100644 --- a/canyon_crud/src/mapper.rs +++ b/canyon_crud/src/mapper.rs @@ -1,6 +1,6 @@ -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] use canyon_connection::tiberius; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] use canyon_connection::tokio_postgres; use crate::crud::Transaction; @@ -9,8 +9,8 @@ use crate::crud::Transaction; /// from some supported database in Canyon-SQL into a user's defined /// type `T` pub trait RowMapper>: Sized { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn deserialize_sqlserver(row: &tiberius::Row) -> T; } diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index bbf096b1..056e136a 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -9,15 +9,15 @@ use std::marker::PhantomData; /// operations that are too difficult or to ugly to implement in the macros that /// will call the query method of Crud. pub enum CanyonRows { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Postgres(Vec), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] Tiberius(Vec), UnusableTypeMarker(PhantomData), } impl CanyonRows { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] pub fn get_postgres_rows(&self) -> &Vec { match self { Self::Postgres(v) => v, @@ -25,7 +25,7 @@ impl CanyonRows { } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] pub fn get_tiberius_rows(&self) -> &Vec { match self { Self::Tiberius(v) => v, @@ -39,9 +39,9 @@ impl CanyonRows { T: Transaction, { match self { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Self::Postgres(v) => v.iter().map(|row| Z::deserialize_postgresql(row)).collect(), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] Self::Tiberius(v) => v.iter().map(|row| Z::deserialize_sqlserver(&row)).collect(), _ => panic!("This branch will never ever should be reachable"), } @@ -50,16 +50,16 @@ impl CanyonRows { /// Returns the number of elements present on the wrapped collection pub fn len(&self) -> usize { match self { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] Self::Postgres(v) => v.len(), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] Self::Tiberius(v) => v.len(), _ => panic!("This branch will never ever should be reachable"), } } } -// #[cfg(feature = "tokio-postgres")] +// #[cfg(feature = "postgres")] // impl IntoIterator for CanyonRows { // type Item = tokio_postgres::Row; // type IntoIter = std::vec::IntoIter; @@ -72,7 +72,7 @@ impl CanyonRows { // } // } // -// #[cfg(feature = "tiberius")] +// #[cfg(feature = "mssql")] // impl IntoIterator for CanyonRows { // type Item = tiberius::Row; // type IntoIter = std::vec::IntoIter; diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index a501440d..d095e7ff 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -19,6 +19,6 @@ proc-macro2 = "1.0.27" futures = "0.3.21" tokio = { version = "1.9.0", features = ["full"] } -canyon_observer = { version = "0.2.0", path = "../canyon_observer" } -canyon_crud = { version = "0.2.0", path = "../canyon_crud" } -canyon_connection = { version = "0.2.0", path = "../canyon_connection" } +canyon_observer = { workspace = true, path = "../canyon_observer" } +canyon_crud = { workspace = true, path = "../canyon_crud" } +canyon_connection = { workspace = true, path = "../canyon_connection" } diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 063df25c..3093bc4c 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -36,8 +36,36 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri .find(|(i, _t)| Some(i.to_string()) == primary_key); let insert_transaction = if let Some(pk_data) = &pk_ident_type { let pk_ident = &pk_data.0; + let pk_ident_str = &pk_data.0.to_string(); let pk_type = &pk_data.1; + let postgres_db_conn_match_arm = if cfg!(feature = "canyon_sql/postgres") { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + self.#pk_ident = v + .get(0) + .expect("Failed getting the returned IDs for an insert") + .get::<&str, #pk_type>(#primary_key); + Ok(()) + } + } + } else { + println!("No feature postgres detected for: {:?}", pk_ident_str); + quote! {} + }; + + let mssql_db_conn_match_arm = if cfg!(feature = "mssql") { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + self.#pk_ident = v + .get(0) + .expect("Failed getting the returned IDs for an insert") + .get::<&str, #pk_type>(#primary_key); + Ok(()) + } + } + } else { quote! {} }; + quote! { #remove_pk_value_from_fn_entry; @@ -56,24 +84,9 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri ).await?; match rows { - // #[cfg(feature = "tokio-postgres")] - canyon_sql::crud::CanyonRows::Postgres(mut v) => { - self.#pk_ident = v - .get(0) - .expect("Failed getting the returned IDs for an insert") - .get::<&str, #pk_type>(#primary_key); - Ok(()) - }, - // #[cfg(feature = "tiberius")] - canyon_sql::crud::CanyonRows::Tiberius(mut v) => { - self.#pk_ident = v - .get(0) - .expect("Failed getting the returned IDs for an insert") - .get::<#pk_type, &str>(#primary_key) - .expect("SQL Server primary key type failed to be set as value"); - Ok(()) - }, - _ => panic!() // TODO remove when the generics will be refactored + #postgres_db_conn_match_arm + #mssql_db_conn_match_arm + _ => panic!("Reached the panic match arm of insert for the DatabaseConnection type") // TODO remove when the generics will be refactored } } } else { @@ -294,7 +307,7 @@ pub fn generate_multiple_insert_tokens( match result { Ok(res) => { match res { - // #[cfg(feature = "tokio-postgres")] + // #[cfg(feature = "postgres")] canyon_sql::crud::CanyonRows::Postgres(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v @@ -305,7 +318,7 @@ pub fn generate_multiple_insert_tokens( Ok(()) }, - // #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] canyon_sql::crud::CanyonRows::Tiberius(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index 3086aea5..55006e5d 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -152,11 +152,11 @@ pub fn generate_count_tokens( let result_handling = quote! { match count { - // #[cfg(feature = "tokio-postgres")] + // #[cfg(feature = "postgres")] canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( v.remove(0).get::<&str, i64>("count") ), - // #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] canyon_sql::crud::CanyonRows::Tiberius(mut v) => v.remove(0) .get::(0) diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index 9f59b093..2282f494 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -25,3 +25,8 @@ syn = { version = "1.0.86", features = ["full", "parsing"] } quote = "1.0.9" partialdebug = "0.2.0" +[features] +default = ["postgres"] +postgres = ["tokio-postgres"] +mssql = ["tiberius"] +#mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] diff --git a/canyon_observer/src/constants.rs b/canyon_observer/src/constants.rs index ae746e6e..997a4bb3 100644 --- a/canyon_observer/src/constants.rs +++ b/canyon_observer/src/constants.rs @@ -1,6 +1,6 @@ pub const NUMERIC_PK_DATATYPE: [&str; 6] = ["i16", "u16", "i32", "u32", "i64", "u64"]; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] pub mod postgresql_queries { pub static CANYON_MEMORY_TABLE: &str = "CREATE TABLE IF NOT EXISTS canyon_memory ( id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY, @@ -144,7 +144,7 @@ pub mod rust_type { pub const OPT_NAIVE_DATE_TIME: &str = "Option"; } -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] pub mod postgresql_type { pub const INT_8: &str = "int8"; pub const SMALL_INT: &str = "smallint"; diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index 87dbd6a1..cd09ee28 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -90,7 +90,7 @@ impl Migrations { db_type: DatabaseType, ) -> CanyonRows { let query = match db_type { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, #[cfg(feature = "tiberius")] DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, @@ -110,7 +110,7 @@ impl Migrations { /// the data well organized for every entity present on that schema fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { match db_results { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), #[cfg(feature = "tiberius")] CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), @@ -202,7 +202,7 @@ impl Migrations { }; } - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] fn process_tp_rows( db_results: Vec, db_type: DatabaseType, @@ -269,7 +269,7 @@ impl Migrations { } } -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] fn get_table_name_from_tp_row(res_row: &tokio_postgres::Row) -> String { res_row.get::<&str, String>("table_name") } @@ -287,7 +287,7 @@ fn check_for_table_name( res_row: &dyn Row, ) -> bool { match db_type { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => table.table_name == res_row.get_postgres::<&str>("table_name"), #[cfg(feature = "tiberius")] DatabaseType::SqlServer => table.table_name == res_row.get_mssql::<&str>("table_name"), diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index 06eb6a3e..98ad01b9 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -1,6 +1,6 @@ #[cfg(feature = "tiberius")] use canyon_connection::tiberius::ColumnType as TIB_TY; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] use canyon_connection::tokio_postgres::types::Type as TP_TYP; use canyon_crud::bounds::{Column, ColumnType, Row, RowOperations}; @@ -43,7 +43,7 @@ impl ColumnMetadataTypeValue { /// Retrieves the value stored in a [`Column`] for a passed [`Row`] pub fn get_value(row: &dyn Row, col: &Column) -> Self { match col.column_type() { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] ColumnType::Postgres(v) => { match *v { TP_TYP::NAME | TP_TYP::VARCHAR | TP_TYP::TEXT => Self::StringValue( diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index 912bf6dc..a07e60d5 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -73,7 +73,7 @@ impl CanyonMemory { // Manually maps the results let mut db_rows = Vec::new(); - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] { let mem_results: &Vec = res.get_postgres_rows(); for row in mem_results { @@ -244,7 +244,7 @@ impl CanyonMemory { /// Generates, if not exists the `canyon_memory` table async fn create_memory(datasource_name: &str, database_type: &DatabaseType) { let query = match database_type { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, #[cfg(feature = "tiberius")] DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE, diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index e068a3d4..a4627bdc 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -645,7 +645,7 @@ impl MigrationsHelper { canyon_register_entity_field: &CanyonRegisterEntityField, current_column_metadata: &ColumnMetadata, ) -> bool { - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] { if db_type == DatabaseType::PostgreSql { return canyon_register_entity_field @@ -765,7 +765,7 @@ impl DatabaseOperation for TableOperation { let stmt = match self { TableOperation::CreateTable(table_name, table_fields) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => { format!( "CREATE TABLE \"{table_name}\" ({});", table_fields @@ -800,7 +800,7 @@ impl DatabaseOperation for TableOperation { TableOperation::AlterTableName(old_table_name, new_table_name) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};"), #[cfg(feature = "tiberius")] DatabaseType::SqlServer => /* @@ -829,7 +829,7 @@ impl DatabaseOperation for TableOperation { column_to_reference, ) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "ALTER TABLE {table_name} ADD CONSTRAINT {foreign_key_name} \ FOREIGN KEY ({column_foreign_key}) REFERENCES {table_to_reference} ({column_to_reference});" @@ -841,7 +841,7 @@ impl DatabaseOperation for TableOperation { TableOperation::DeleteTableForeignKey(table_with_foreign_key, constraint_name) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "ALTER TABLE {table_with_foreign_key} DROP CONSTRAINT {constraint_name};", ), @@ -852,7 +852,7 @@ impl DatabaseOperation for TableOperation { TableOperation::AddTablePrimaryKey(table_name, entity_field) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "ALTER TABLE \"{table_name}\" ADD PRIMARY KEY (\"{}\");", entity_field.field_name @@ -864,7 +864,7 @@ impl DatabaseOperation for TableOperation { TableOperation::DeleteTablePrimaryKey(table_name, primary_key_name) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;"), #[cfg(feature = "tiberius")] DatabaseType::SqlServer => format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") @@ -888,12 +888,12 @@ enum ColumnOperation { // SQL server specific operation - SQL server can't drop a NOT NULL column #[cfg(feature = "tiberius")] DropNotNullBeforeDropColumn(String, String, String), - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] AlterColumnSetNotNull(String, CanyonRegisterEntityField), // TODO if implement through annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] AlterColumnAddIdentity(String, CanyonRegisterEntityField), - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] AlterColumnDropIdentity(String, CanyonRegisterEntityField), } @@ -907,7 +907,7 @@ impl DatabaseOperation for ColumnOperation { let stmt = match self { ColumnOperation::CreateColumn(table_name, entity_field) => match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "ALTER TABLE \"{}\" ADD COLUMN \"{}\" {};", table_name, @@ -928,7 +928,7 @@ impl DatabaseOperation for ColumnOperation { }, ColumnOperation::AlterColumnType(table_name, entity_field) => match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" TYPE {};", entity_field.field_name, entity_field.to_postgres_alter_syntax() @@ -938,7 +938,7 @@ impl DatabaseOperation for ColumnOperation { } ColumnOperation::AlterColumnDropNotNull(table_name, entity_field) => match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!("ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP NOT NULL;", entity_field.field_name), #[cfg(feature = "tiberius")] DatabaseType::SqlServer => format!( @@ -965,11 +965,11 @@ impl DatabaseOperation for ColumnOperation { "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" SET NOT NULL;", entity_field.field_name ), - #[cfg(feature = "tokio-postgres")] ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( + #[cfg(feature = "postgres")] ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" ADD GENERATED ALWAYS AS IDENTITY;", entity_field.field_name ), - #[cfg(feature = "tokio-postgres")] ColumnOperation::AlterColumnDropIdentity(table_name, entity_field) => format!( + #[cfg(feature = "postgres")] ColumnOperation::AlterColumnDropIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP IDENTITY;", entity_field.field_name ), }; @@ -995,7 +995,7 @@ impl DatabaseOperation for SequenceOperation { let stmt = match self { SequenceOperation::ModifySequence(table_name, entity_field) => { match db_type { - #[cfg(feature = "tokio-postgres")] DatabaseType::PostgreSql => + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", entity_field.field_name, entity_field.field_name diff --git a/canyon_observer/src/migrations/register_types.rs b/canyon_observer/src/migrations/register_types.rs index b0cbf48d..313be8ea 100644 --- a/canyon_observer/src/migrations/register_types.rs +++ b/canyon_observer/src/migrations/register_types.rs @@ -1,6 +1,6 @@ use regex::Regex; -#[cfg(feature = "tokio-postgres")] +#[cfg(feature = "postgres")] use crate::constants::postgresql_type; #[cfg(feature = "tiberius")] use crate::constants::sqlserver_type; @@ -30,7 +30,7 @@ pub struct CanyonRegisterEntityField { impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] pub fn to_postgres_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); @@ -124,7 +124,7 @@ impl CanyonRegisterEntityField { } } - #[cfg(feature = "tokio-postgres")] + #[cfg(feature = "postgres")] pub fn to_postgres_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); diff --git a/src/lib.rs b/src/lib.rs index 3aaf6ea9..c1a034bb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,8 +28,11 @@ pub mod macros { /// connection module serves to reexport the public elements of the `canyon_connection` crate, /// exposing them through the public API pub mod connection { - #[cfg(feature = "postgres")] pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; - #[cfg(feature = "mssql")] pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; + #[cfg(feature = "postgres")] + pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; + + #[cfg(feature = "mssql")] + pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; } /// Crud module serves to reexport the public elements of the `canyon_crud` crate, diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 212c0505..08f9a557 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -9,4 +9,9 @@ canyon_sql = { path = ".." } [[test]] name = "canyon_integration_tests" -path = "canyon_integration_tests.rs" \ No newline at end of file +path = "canyon_integration_tests.rs" + +[features] +default = ["postgres"] +postgres = ["canyon_sql/postgres"] +mssql = [] From 51b97d52bffab92820da92303b080190b9c46d6b Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 20 Apr 2023 08:59:24 +0200 Subject: [PATCH 18/23] Moved the IT to initialize MSSQL databases to its own crate --- tests/crud/init_mssql.rs | 64 ++++++++++++++++++++++++++++++++++++++++ tests/crud/mod.rs | 64 +--------------------------------------- 2 files changed, 65 insertions(+), 63 deletions(-) create mode 100644 tests/crud/init_mssql.rs diff --git a/tests/crud/init_mssql.rs b/tests/crud/init_mssql.rs new file mode 100644 index 00000000..cf8f8071 --- /dev/null +++ b/tests/crud/init_mssql.rs @@ -0,0 +1,64 @@ +#![cfg_attr(feature = "canyon_sql/mssql")] + +use crate::constants::SQL_SERVER_CREATE_TABLES; +use crate::constants::SQL_SERVER_DS; +use crate::constants::SQL_SERVER_FILL_TABLE_VALUES; +use crate::tests_models::league::League; + +use canyon_sql::crud::CrudOperations; +use canyon_sql::db_clients::tiberius::{Client, Config}; +use canyon_sql::runtime::tokio::net::TcpStream; +use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; + +/// In order to initialize data on `SqlServer`. we must manually insert it +/// when the docker starts. SqlServer official docker from Microsoft does +/// not allow you to run `.sql` files against the database (not at least, without) +/// using a workaround. So, we are going to query the `SqlServer` to check if already +/// has some data (other processes, persistence or multi-threading envs), af if not, +/// we are going to retrieve the inserted data on the `postgreSQL` at start-up and +/// inserting into the `SqlServer` instance. +/// +/// This will be marked as `#[ignore]`, so we can force to run first the marked as +/// ignored, check the data available, perform the necessary init operations and +/// then *cargo test * the real integration tests +#[canyon_sql::macros::canyon_tokio_test] +#[ignore] +fn initialize_sql_server_docker_instance() { + static CONN_STR: &str = + "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; + + canyon_sql::runtime::futures::executor::block_on(async { + let config = Config::from_ado_string(CONN_STR).unwrap(); + + let tcp = TcpStream::connect(config.get_addr()).await.unwrap(); + let tcp2 = TcpStream::connect(config.get_addr()).await.unwrap(); + tcp.set_nodelay(true).ok(); + + let mut client = Client::connect(config.clone(), tcp.compat_write()) + .await + .unwrap(); + + // Create the tables + let query_result = client.query(SQL_SERVER_CREATE_TABLES, &[]).await; + assert!(query_result.is_ok()); + + let leagues_sql = League::find_all_datasource(SQL_SERVER_DS).await; + println!("LSQL ERR: {leagues_sql:?}"); + assert!(leagues_sql.is_ok()); + + match leagues_sql { + Ok(ref leagues) => { + let leagues_len = leagues.len(); + println!("Leagues already inserted on SQLSERVER: {:?}", &leagues_len); + if leagues.len() < 10 { + let mut client2 = Client::connect(config, tcp2.compat_write()) + .await + .expect("Can't connect to MSSQL"); + let result = client2.query(SQL_SERVER_FILL_TABLE_VALUES, &[]).await; + assert!(result.is_ok()); + } + } + Err(e) => eprintln!("Error retrieving the leagues: {e}"), + } + }); +} diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index c0f6afee..97bb67bb 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -4,66 +4,4 @@ pub mod insert_operations; pub mod querybuilder_operations; pub mod select_operations; pub mod update_operations; - -use crate::constants::SQL_SERVER_CREATE_TABLES; -use crate::constants::SQL_SERVER_DS; -use crate::constants::SQL_SERVER_FILL_TABLE_VALUES; -use crate::tests_models::league::League; - -use canyon_sql::crud::CrudOperations; -use canyon_sql::db_clients::tiberius::{Client, Config}; -use canyon_sql::runtime::tokio::net::TcpStream; -use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; - -/// In order to initialize data on `SqlServer`. we must manually insert it -/// when the docker starts. SqlServer official docker from Microsoft does -/// not allow you to run `.sql` files against the database (not at least, without) -/// using a workaround. So, we are going to query the `SqlServer` to check if already -/// has some data (other processes, persistence or multi-threading envs), af if not, -/// we are going to retrieve the inserted data on the `postgreSQL` at start-up and -/// inserting into the `SqlServer` instance. -/// -/// This will be marked as `#[ignore]`, so we can force to run first the marked as -/// ignored, check the data available, perform the necessary init operations and -/// then *cargo test * the real integration tests -#[canyon_sql::macros::canyon_tokio_test] -#[ignore] -fn initialize_sql_server_docker_instance() { - static CONN_STR: &str = - "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; - - canyon_sql::runtime::futures::executor::block_on(async { - let config = Config::from_ado_string(CONN_STR).unwrap(); - - let tcp = TcpStream::connect(config.get_addr()).await.unwrap(); - let tcp2 = TcpStream::connect(config.get_addr()).await.unwrap(); - tcp.set_nodelay(true).ok(); - - let mut client = Client::connect(config.clone(), tcp.compat_write()) - .await - .unwrap(); - - // Create the tables - let query_result = client.query(SQL_SERVER_CREATE_TABLES, &[]).await; - assert!(query_result.is_ok()); - - let leagues_sql = League::find_all_datasource(SQL_SERVER_DS).await; - println!("LSQL ERR: {leagues_sql:?}"); - assert!(leagues_sql.is_ok()); - - match leagues_sql { - Ok(ref leagues) => { - let leagues_len = leagues.len(); - println!("Leagues already inserted on SQLSERVER: {:?}", &leagues_len); - if leagues.len() < 10 { - let mut client2 = Client::connect(config, tcp2.compat_write()) - .await - .expect("Can't connect to MSSQL"); - let result = client2.query(SQL_SERVER_FILL_TABLE_VALUES, &[]).await; - assert!(result.is_ok()); - } - } - Err(e) => eprintln!("Error retrieving the leagues: {e}"), - } - }); -} +#[cfg(feature = "canyon_sql/mssql")] pub mod init_mssql; From cc9a3ef287139177df506aadf5a3d089e4c6b024 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 20 Apr 2023 11:52:57 +0200 Subject: [PATCH 19/23] A bunch of major changes to stabilize the new conditional APIs into the new compilation model --- Cargo.toml | 18 ++- canyon_connection/Cargo.toml | 8 +- .../src/canyon_database_connector.rs | 70 +++++--- canyon_connection/src/datasources.rs | 103 ++++++------ canyon_crud/Cargo.toml | 7 +- canyon_macros/Cargo.toml | 5 + canyon_macros/src/lib.rs | 122 ++++++++------ canyon_macros/src/query_operations/insert.rs | 152 ++++++++++++------ canyon_observer/Cargo.toml | 5 +- canyon_observer/src/constants.rs | 4 +- canyon_observer/src/migrations/handler.rs | 10 +- .../src/migrations/information_schema.rs | 4 +- canyon_observer/src/migrations/memory.rs | 4 +- canyon_observer/src/migrations/processor.rs | 32 ++-- .../src/migrations/register_types.rs | 6 +- src/lib.rs | 4 +- tests/Cargo.toml | 4 +- tests/constants.rs | 10 +- tests/crud/delete_operations.rs | 5 +- tests/crud/foreign_key_operations.rs | 8 +- tests/crud/init_mssql.rs | 2 - tests/crud/insert_operations.rs | 8 +- tests/crud/mod.rs | 2 +- tests/crud/querybuilder_operations.rs | 12 +- tests/crud/select_operations.rs | 11 +- tests/crud/update_operations.rs | 4 +- 26 files changed, 390 insertions(+), 230 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e7cf2500..c2e7ffd2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,17 +15,19 @@ members = [ [dependencies] # Project crates -canyon_connection = { version = "0.2.0", path = "canyon_connection", optional = true } -canyon_crud = { version = "0.2.0", path = "canyon_crud" } -canyon_observer = { version = "0.2.0", path = "canyon_observer" } -canyon_macros = { version = "0.2.0", path = "canyon_macros" } -async-trait = "0.1.68" +canyon_connection = { workspace = true, path = "canyon_connection" } +canyon_crud = { workspace = true, path = "canyon_crud" } +canyon_observer = { workspace = true, path = "canyon_observer" } +canyon_macros = { woorkspace = true, path = "canyon_macros" } + +# To be marked as opt deps +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } [workspace.dependencies] canyon_crud = { version = "0.2.0", path = "canyon_crud" } canyon_connection = { version = "0.2.0", path = "canyon_connection" } canyon_observer = { version = "0.2.0", path = "canyon_observer" } -canyon_macros = { version = "0.2.0", path = "canyon_macros" } tokio = { version = "1.27.0", features = ["full"] } tokio-util = { version = "0.7.4", features = ["compat"] } @@ -53,5 +55,5 @@ description = "A Rust ORM and QueryBuilder" [features] default = ["postgres"] -postgres = ["canyon_connection/postgres", "canyon_connection/tokio-postgres"] -mssql = ["canyon_connection/tiberius", "canyon_crud/tiberius", "canyon_observer/tiberius"] +postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_connection/postgres", "canyon_observer/postgres", "canyon_macros/postgres"] +mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql", "canyon_macros/mssql"] diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 9bdacbc2..4e140655 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -12,16 +12,18 @@ description.workspace = true [dependencies] tokio = { workspace = true } tokio-util = { workspace = true } + tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"], optional = true } tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } + futures = { workspace = true } indexmap = { workspace = true } -async-std = { workspace = true } lazy_static = { workspace = true } -serde = { workspace = true, features = ["derive"] } toml = { workspace = true } +serde = { workspace = true, features = ["derive"] } +async-std = { workspace = true, optional = true } [features] default = ["postgres"] postgres = ["tokio-postgres"] -mssql = ["tiberius"] +mssql = ["tiberius", "async-std"] diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index bcb07d8e..96d88154 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -162,29 +162,57 @@ mod database_connection_handler { use super::*; use crate::CanyonSqlConfig; - const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql] - datasources = [ - {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - /// Tests the behaviour of the `DatabaseType::from_datasource(...)` #[test] fn check_from_datasource() { - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) - .expect("A failure happened retrieving the [canyon_sql_root] section"); - - #[cfg(feature = "postgres")] - assert_eq!( - config.canyon_sql.datasources[0].get_db_type(), - DatabaseType::PostgreSql - ); - #[cfg(feature = "mssql")] - assert_eq!( - config.canyon_sql.datasources[1].get_db_type(), - DatabaseType::SqlServer - ); + #[cfg(all(feature = "postgres", feature = "mssql"))] { + const CONFIG_FILE_MOCK_ALT_ALL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_ALL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::PostgreSql + ); + assert_eq!( + config.canyon_sql.datasources[1].get_db_type(), + DatabaseType::SqlServer + ); + } + + #[cfg(feature = "postgres")] { + const CONFIG_FILE_MOCK_ALT_PG: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_PG) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::PostgreSql + ); + } + + #[cfg(feature = "mssql")] { + const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MSSQL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::SqlServer + ); + } } } diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 2dd3913c..82775fd7 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -5,55 +5,64 @@ use crate::canyon_database_connector::DatabaseType; /// ``` #[test] fn load_ds_config_from_array() { - const CONFIG_FILE_MOCK_ALT: &str = r#" - [canyon_sql] - datasources = [ - {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { integrated = {} } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT) - .expect("A failure happened retrieving the [canyon_sql_root] section"); - - let ds_0 = &config.canyon_sql.datasources[0]; - let ds_1 = &config.canyon_sql.datasources[1]; - let _ds_2 = &config.canyon_sql.datasources[2]; - - assert_eq!(ds_0.name, "PostgresDS"); - assert_eq!(ds_0.get_db_type(), DatabaseType::PostgreSql); - assert_eq!( - ds_0.auth, - Auth::Postgres(PostgresAuth::Basic { - username: "postgres".to_string(), - password: "postgres".to_string() - }) - ); - assert_eq!(ds_0.properties.host, "localhost"); - assert_eq!(ds_0.properties.port, None); - assert_eq!(ds_0.properties.db_name, "triforce"); - assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); + #[cfg(feature = "postgres")] { + const CONFIG_FILE_MOCK_ALT_PG: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_PG) + .expect("A failure happened retrieving the [canyon_sql] section"); - #[cfg(feature = "mssql")] - assert_eq!(ds_1.name, "SqlServerDS"); - #[cfg(feature = "mssql")] - assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); - #[cfg(feature = "mssql")] - assert_eq!( - ds_1.auth, - Auth::SqlServer(SqlServerAuth::Basic { - username: "sa".to_string(), - password: "SqlServer-10".to_string() - }) - ); - assert_eq!(ds_1.properties.host, "192.168.0.250.1"); - assert_eq!(ds_1.properties.port, Some(3340)); - assert_eq!(ds_1.properties.db_name, "triforce2"); - assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); + let ds_0 = &config.canyon_sql.datasources[0]; - #[cfg(feature = "mssql")] - assert_eq!(_ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)) + assert_eq!(ds_0.name, "PostgresDS"); + assert_eq!(ds_0.get_db_type(), DatabaseType::PostgreSql); + assert_eq!( + ds_0.auth, + Auth::Postgres(PostgresAuth::Basic { + username: "postgres".to_string(), + password: "postgres".to_string() + }) + ); + assert_eq!(ds_0.properties.host, "localhost"); + assert_eq!(ds_0.properties.port, None); + assert_eq!(ds_0.properties.db_name, "triforce"); + assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); + } + + #[cfg(feature = "mssql")] { + const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, + {name = 'SqlServerDS', auth = { sqlserver = { integrated = {} } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MSSQL) + .expect("A failure happened retrieving the [canyon_sql] section"); + + let ds_1 = &config.canyon_sql.datasources[0]; + let ds_2 = &config.canyon_sql.datasources[1]; + + + assert_eq!(ds_1.name, "SqlServerDS"); + assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); + assert_eq!( + ds_1.auth, + Auth::SqlServer(SqlServerAuth::Basic { + username: "sa".to_string(), + password: "SqlServer-10".to_string() + }) + ); + assert_eq!(ds_1.properties.host, "192.168.0.250.1"); + assert_eq!(ds_1.properties.port, Some(3340)); + assert_eq!(ds_1.properties.db_name, "triforce2"); + assert_eq!(ds_1.properties.migrations, Some(Migrations::Disabled)); + + assert_eq!(ds_2.auth, Auth::SqlServer(SqlServerAuth::Integrated)); + } } /// #[derive(Deserialize, Debug, Clone)] diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index 33213847..cbe44ad9 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -17,12 +17,11 @@ description.workspace = true tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } chrono = { version = "0.4", features = ["serde"] } -async-trait = { version = "0.1.50" } +async-trait = { workspace = true } canyon_connection = { workspace = true, path = "../canyon_connection" } [features] default = ["postgres"] -postgres = ["tokio-postgres"] -mssql = ["tiberius"] -#mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] \ No newline at end of file +postgres = ["tokio-postgres", "canyon_connection/postgres"] +mssql = ["tiberius", "canyon_connection/mssql"] diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index d095e7ff..e306dcab 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -22,3 +22,8 @@ tokio = { version = "1.9.0", features = ["full"] } canyon_observer = { workspace = true, path = "../canyon_observer" } canyon_crud = { workspace = true, path = "../canyon_crud" } canyon_connection = { workspace = true, path = "../canyon_connection" } + +[features] +default = ["postgres"] +postgres = ["canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres"] +mssql = ["canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql"] diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index 34a166e8..54c4c873 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -486,103 +486,133 @@ pub fn implement_row_mapper_for_type(input: proc_macro::TokenStream) -> proc_mac } }); - // TODO rework this ugly piece of code in the upcoming versions let init_field_values_sqlserver = fields.iter().map(|(_vis, ident, ty)| { - let ident_name = ident.to_string(); + let ident_name = ident.to_string(); - if get_field_type_as_string(ty) == "String" { - quote! { + if get_field_type_as_string(ty) == "String" { + quote! { #ident: row.get::<&str, &str>(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) .to_string() } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::<&str, &str>(#ident_name) .map( |x| x.to_owned() ) } - } else if get_field_type_as_string(ty) == "NaiveDate" { - quote! { + } else if get_field_type_as_string(ty) == "NaiveDate" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty) == "NaiveTime" { - quote! { + } else if get_field_type_as_string(ty) == "NaiveTime" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty) == "NaiveDateTime" { - quote! { + } else if get_field_type_as_string(ty) == "NaiveDateTime" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty) == "DateTime" { - quote! { + } else if get_field_type_as_string(ty) == "DateTime" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else { - quote! { + } else { + quote! { #ident: row.get::<#ty, &str>(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } - }); + } + }); // The type of the Struct let ty = ast.ident; - let tokens = quote! { - impl canyon_sql::crud::RowMapper for #ty - { - fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { - Self { - #(#init_field_values),* + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let tokens = if postgres_enabled && mssql_enabled { + quote! { + impl canyon_sql::crud::RowMapper for #ty { + fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { + Self { + #(#init_field_values),* + } + } + fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { + Self { + #(#init_field_values_sqlserver),* + } } } - - fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { - Self { - #(#init_field_values_sqlserver),* + } + } else if postgres_enabled { + quote! { + impl canyon_sql::crud::RowMapper for #ty { + fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { + Self { + #(#init_field_values),* + } + } + } + } + } else if mssql_enabled { + quote! { + impl canyon_sql::crud::RowMapper for #ty { + fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { + Self { + #(#init_field_values_sqlserver),* + } } } } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } }; tokens.into() diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index 3093bc4c..329399f0 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -36,35 +36,59 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri .find(|(i, _t)| Some(i.to_string()) == primary_key); let insert_transaction = if let Some(pk_data) = &pk_ident_type { let pk_ident = &pk_data.0; - let pk_ident_str = &pk_data.0.to_string(); let pk_type = &pk_data.1; - let postgres_db_conn_match_arm = if cfg!(feature = "canyon_sql/postgres") { + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let match_rows = if postgres_enabled && mssql_enabled { quote! { canyon_sql::crud::CanyonRows::Postgres(mut v) => { self.#pk_ident = v .get(0) - .expect("Failed getting the returned IDs for an insert") + .ok_or("Failed getting the returned IDs for an insert")? .get::<&str, #pk_type>(#primary_key); Ok(()) } + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + self.#pk_ident = v + .get(0) + .ok_or("Failed getting the returned IDs for a multi insert")? + .get::<#pk_type, &str>(#primary_key) + .ok_or("SQL Server primary key type failed to be set as value")?; + Ok(()) + } } - } else { - println!("No feature postgres detected for: {:?}", pk_ident_str); - quote! {} - }; - - let mssql_db_conn_match_arm = if cfg!(feature = "mssql") { + } else if postgres_enabled { quote! { canyon_sql::crud::CanyonRows::Postgres(mut v) => { self.#pk_ident = v .get(0) - .expect("Failed getting the returned IDs for an insert") + .ok_or("Failed getting the returned IDs for an insert")? .get::<&str, #pk_type>(#primary_key); Ok(()) } } - } else { quote! {} }; + } else if mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + self.#pk_ident = v + .get(0) + .ok_or("Failed getting the returned IDs for a multi insert")? + .get::<#pk_type, &str>(#primary_key) + .ok_or("SQL Server primary key type failed to be set as value")?; + Ok(()) + } + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } + }; quote! { #remove_pk_value_from_fn_entry; @@ -84,8 +108,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri ).await?; match rows { - #postgres_db_conn_match_arm - #mssql_db_conn_match_arm + #match_rows _ => panic!("Reached the panic match arm of insert for the DatabaseConnection type") // TODO remove when the generics will be refactored } } @@ -236,6 +259,70 @@ pub fn generate_multiple_insert_tokens( let pk_ident = &pk_data.0; let pk_type = &pk_data.1; + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let match_multi_insert_rows = if postgres_enabled && mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<&str, #pk_type>(#pk); + } + + Ok(()) + } + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<#pk_type, &str>(#pk) + .expect("SQL Server primary key type failed to be set as value"); + } + + Ok(()) + } + } + } else if postgres_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<&str, #pk_type>(#pk); + } + + Ok(()) + } + } + } else if mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + for (idx, instance) in instances.iter_mut().enumerate() { + instance.#pk_ident = v + .get(idx) + .expect("Failed getting the returned IDs for a multi insert") + .get::<#pk_type, &str>(#pk) + .expect("SQL Server primary key type failed to be set as value"); + } + + Ok(()) + } + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } + }; + quote! { mapped_fields = #column_names .split(", ") @@ -298,42 +385,15 @@ pub fn generate_multiple_insert_tokens( } } - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + let multi_insert_result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( stmt, v_arr, datasource_name - ).await; - - match result { - Ok(res) => { - match res { - // #[cfg(feature = "postgres")] - canyon_sql::crud::CanyonRows::Postgres(mut v) => { - for (idx, instance) in instances.iter_mut().enumerate() { - instance.#pk_ident = v - .get(idx) - .expect("Failed getting the returned IDs for a multi insert") - .get::<&str, #pk_type>(#pk); - } - - Ok(()) - }, - #[cfg(feature = "mssql")] - canyon_sql::crud::CanyonRows::Tiberius(mut v) => { - for (idx, instance) in instances.iter_mut().enumerate() { - instance.#pk_ident = v - .get(idx) - .expect("Failed getting the returned IDs for a multi insert") - .get::<#pk_type, &str>(#pk) - .expect("SQL Server primary key type failed to be set as value"); - } - - Ok(()) - }, - _ => panic!() // TODO remove when the generics will be refactored - } - }, - Err(e) => Err(e) + ).await?; + + match multi_insert_result { + #match_multi_insert_rows + _ => panic!() // TODO remove when the generics will be refactored } } } else { diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index 2282f494..7a616e0c 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -27,6 +27,5 @@ partialdebug = "0.2.0" [features] default = ["postgres"] -postgres = ["tokio-postgres"] -mssql = ["tiberius"] -#mssql-integrated-auth = ["mssql", "tiberius/integrated-auth-gssapi"] +postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres"] +mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql"] diff --git a/canyon_observer/src/constants.rs b/canyon_observer/src/constants.rs index 997a4bb3..3928da4f 100644 --- a/canyon_observer/src/constants.rs +++ b/canyon_observer/src/constants.rs @@ -36,7 +36,7 @@ pub mod postgresql_queries { table_schema = 'public';"; } -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] pub mod mssql_queries { pub static CANYON_MEMORY_TABLE: &str = "IF OBJECT_ID(N'[dbo].[canyon_memory]', N'U') IS NULL BEGIN @@ -157,7 +157,7 @@ pub mod postgresql_type { pub const DATETIME: &str = "timestamp without time zone"; } -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] pub mod sqlserver_type { pub const TINY_INT: &str = "TINY INT"; pub const SMALL_INT: &str = "SMALL INT"; diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index cd09ee28..9ce3c4e8 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -92,7 +92,7 @@ impl Migrations { let query = match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, }; @@ -112,7 +112,7 @@ impl Migrations { match db_results { #[cfg(feature = "postgres")] CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), _ => panic!(), } @@ -235,7 +235,7 @@ impl Migrations { schema_info } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn process_tib_rows( db_results: Vec, db_type: DatabaseType, @@ -273,7 +273,7 @@ impl Migrations { fn get_table_name_from_tp_row(res_row: &tokio_postgres::Row) -> String { res_row.get::<&str, String>("table_name") } -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] fn get_table_name_from_tib_row(res_row: &tiberius::Row) -> String { res_row .get::<&str, &str>("table_name") @@ -289,7 +289,7 @@ fn check_for_table_name( match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => table.table_name == res_row.get_postgres::<&str>("table_name"), - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] DatabaseType::SqlServer => table.table_name == res_row.get_mssql::<&str>("table_name"), } } diff --git a/canyon_observer/src/migrations/information_schema.rs b/canyon_observer/src/migrations/information_schema.rs index 98ad01b9..74709619 100644 --- a/canyon_observer/src/migrations/information_schema.rs +++ b/canyon_observer/src/migrations/information_schema.rs @@ -1,4 +1,4 @@ -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] use canyon_connection::tiberius::ColumnType as TIB_TY; #[cfg(feature = "postgres")] use canyon_connection::tokio_postgres::types::Type as TP_TYP; @@ -54,7 +54,7 @@ impl ColumnMetadataTypeValue { _ => Self::NoneValue, // TODO watchout this one } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => match v { TIB_TY::NChar | TIB_TY::NVarchar | TIB_TY::BigChar | TIB_TY::BigVarChar => { Self::StringValue( diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index a07e60d5..18f6eb31 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -86,7 +86,7 @@ impl CanyonMemory { db_rows.push(db_row); } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] { let mem_results: &Vec = res.get_tiberius_rows(); for row in mem_results { @@ -246,7 +246,7 @@ impl CanyonMemory { let query = match database_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE, }; diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index a4627bdc..e6c23bb4 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -188,7 +188,7 @@ impl MigrationsProcessor { .collect(); for column_metadata in columns_name_to_delete { - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] { if _db_type == DatabaseType::SqlServer && !column_metadata.is_nullable { self.drop_column_not_null( @@ -246,7 +246,7 @@ impl MigrationsProcessor { ))); } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn drop_column_not_null( &mut self, table_name: &str, @@ -623,7 +623,7 @@ impl MigrationsHelper { } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] fn get_datatype_from_column_metadata(current_column_metadata: &ColumnMetadata) -> String { // TODO Add all SQL Server text datatypes if vec!["nvarchar", "varchar"] @@ -654,7 +654,7 @@ impl MigrationsHelper { == current_column_metadata.datatype; } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] { if db_type == DatabaseType::SqlServer { // TODO Search a better way to get the datatype without useless info (like "VARCHAR(MAX)") @@ -779,7 +779,7 @@ impl DatabaseOperation for TableOperation { .join(", ") ) } - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => { + #[cfg(feature = "mssql")] DatabaseType::SqlServer => { format!( "CREATE TABLE {:?} ({:?});", table_name, @@ -802,7 +802,7 @@ impl DatabaseOperation for TableOperation { match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};"), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => /* Notes: Brackets around `old_table_name`, p.e. exec sp_rename ['league'], 'leagues' // NOT VALID! @@ -834,7 +834,7 @@ impl DatabaseOperation for TableOperation { "ALTER TABLE {table_name} ADD CONSTRAINT {foreign_key_name} \ FOREIGN KEY ({column_foreign_key}) REFERENCES {table_to_reference} ({column_to_reference});" ), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } @@ -845,7 +845,7 @@ impl DatabaseOperation for TableOperation { format!( "ALTER TABLE {table_with_foreign_key} DROP CONSTRAINT {constraint_name};", ), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } @@ -857,7 +857,7 @@ impl DatabaseOperation for TableOperation { "ALTER TABLE \"{table_name}\" ADD PRIMARY KEY (\"{}\");", entity_field.field_name ), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } @@ -866,7 +866,7 @@ impl DatabaseOperation for TableOperation { match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;"), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") } } @@ -886,7 +886,7 @@ enum ColumnOperation { AlterColumnType(String, CanyonRegisterEntityField), AlterColumnDropNotNull(String, CanyonRegisterEntityField), // SQL server specific operation - SQL server can't drop a NOT NULL column - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] DropNotNullBeforeDropColumn(String, String, String), #[cfg(feature = "postgres")] AlterColumnSetNotNull(String, CanyonRegisterEntityField), @@ -914,7 +914,7 @@ impl DatabaseOperation for ColumnOperation { entity_field.field_name, entity_field.to_postgres_syntax() ), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( "ALTER TABLE {} ADD \"{}\" {};", table_name, @@ -933,20 +933,20 @@ impl DatabaseOperation for ColumnOperation { "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" TYPE {};", entity_field.field_name, entity_field.to_postgres_alter_syntax() ), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } ColumnOperation::AlterColumnDropNotNull(table_name, entity_field) => match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!("ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" DROP NOT NULL;", entity_field.field_name), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", entity_field.field_name, entity_field.to_sqlserver_alter_syntax() ) } - #[cfg(feature = "tiberius")] ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => + #[cfg(feature = "mssql")] ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => format!( "ALTER TABLE {table_name} ALTER COLUMN {column_name} {column_datatype} NULL; DECLARE @tableName VARCHAR(MAX) = '{table_name}' DECLARE @columnName VARCHAR(MAX) = '{column_name}' @@ -1000,7 +1000,7 @@ impl DatabaseOperation for SequenceOperation { "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", entity_field.field_name, entity_field.field_name ), - #[cfg(feature = "tiberius")] DatabaseType::SqlServer => + #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } diff --git a/canyon_observer/src/migrations/register_types.rs b/canyon_observer/src/migrations/register_types.rs index 313be8ea..14481c13 100644 --- a/canyon_observer/src/migrations/register_types.rs +++ b/canyon_observer/src/migrations/register_types.rs @@ -2,7 +2,7 @@ use regex::Regex; #[cfg(feature = "postgres")] use crate::constants::postgresql_type; -#[cfg(feature = "tiberius")] +#[cfg(feature = "mssql")] use crate::constants::sqlserver_type; use crate::constants::{regex_patterns, rust_type, NUMERIC_PK_DATATYPE}; @@ -77,7 +77,7 @@ impl CanyonRegisterEntityField { /// Return the postgres datatype and parameters to create a column for a given rust type /// for Microsoft SQL Server - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] pub fn to_sqlserver_syntax(&self) -> String { let rust_type_clean = self.field_type.replace(' ', ""); @@ -167,7 +167,7 @@ impl CanyonRegisterEntityField { } } - #[cfg(feature = "tiberius")] + #[cfg(feature = "mssql")] pub fn to_sqlserver_alter_syntax(&self) -> String { let mut rust_type_clean = self.field_type.replace(' ', ""); let rs_type_is_optional = self.field_type.to_uppercase().starts_with("OPTION"); diff --git a/src/lib.rs b/src/lib.rs index c1a034bb..e40d9d3b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,8 +8,6 @@ extern crate canyon_crud; extern crate canyon_macros; extern crate canyon_observer; -extern crate async_trait; - /// Reexported elements to the root of the public API pub mod migrations { pub use canyon_observer::migrations::{handler, processor}; @@ -21,7 +19,7 @@ pub use canyon_macros::main; /// Public API for the `Canyon-SQL` proc-macros, and for the external ones pub mod macros { - pub use async_trait::*; + pub use canyon_crud::async_trait::*; pub use canyon_macros::*; } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 08f9a557..e606e3c9 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true publish = false [dev-dependencies] -canyon_sql = { path = ".." } +canyon_sql = { path = "..", default-features = false, features = ["mssql"] } [[test]] name = "canyon_integration_tests" @@ -13,5 +13,5 @@ path = "canyon_integration_tests.rs" [features] default = ["postgres"] -postgres = ["canyon_sql/postgres"] +postgres = [] mssql = [] diff --git a/tests/constants.rs b/tests/constants.rs index f7804e43..8fb86a44 100644 --- a/tests/constants.rs +++ b/tests/constants.rs @@ -1,8 +1,9 @@ ///! Constant values to share across the integration tests -pub const PSQL_DS: &str = "postgres_docker"; -pub const SQL_SERVER_DS: &str = "sqlserver_docker"; -pub static FETCH_PUBLIC_SCHEMA: &str = +#[cfg(feature = "postgres")] pub const PSQL_DS: &str = "postgres_docker"; +#[cfg(feature = "mssql")] pub const SQL_SERVER_DS: &str = "sqlserver_docker"; + +#[cfg(feature = "postgres")] pub static FETCH_PUBLIC_SCHEMA: &str = "SELECT gi.table_name, gi.column_name, @@ -33,7 +34,7 @@ LEFT JOIN pg_catalog.pg_constraint AS con on WHERE table_schema = 'public';"; -pub const SQL_SERVER_CREATE_TABLES: &str = " +#[cfg(feature = "mssql")] pub const SQL_SERVER_CREATE_TABLES: &str = " IF OBJECT_ID(N'[dbo].[league]', N'U') IS NULL BEGIN CREATE TABLE dbo.league ( @@ -87,6 +88,7 @@ BEGIN END; "; +#[cfg(feature = "mssql")] pub const SQL_SERVER_FILL_TABLE_VALUES: &str = " -- Values for league table -- Values for league table diff --git a/tests/crud/delete_operations.rs b/tests/crud/delete_operations.rs index 46d1bcaf..fb2e07e9 100644 --- a/tests/crud/delete_operations.rs +++ b/tests/crud/delete_operations.rs @@ -2,7 +2,8 @@ ///! generates and executes *INSERT* statements use canyon_sql::crud::CrudOperations; -use crate::constants::{PSQL_DS, SQL_SERVER_DS}; +#[cfg(feature = "postgres")] use crate::constants::PSQL_DS; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Deletes a row from the database that is mapped into some instance of a `T` entity. @@ -14,6 +15,7 @@ use crate::tests_models::league::*; /// /// Attempt of usage the `t.delete(&self)` method on an entity without `#[primary_key]` /// will raise a runtime error. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_method_operation() { // For test the delete, we will insert a new instance of the database, and then, @@ -58,6 +60,7 @@ fn test_crud_delete_method_operation() { } /// Same as the delete test, but performing the operations with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_datasource_method_operation() { // For test the delete, we will insert a new instance of the database, and then, diff --git a/tests/crud/foreign_key_operations.rs b/tests/crud/foreign_key_operations.rs index b58df802..b74f6852 100644 --- a/tests/crud/foreign_key_operations.rs +++ b/tests/crud/foreign_key_operations.rs @@ -10,13 +10,14 @@ ///! For more info: TODO -> Link to the docs of the foreign key chapter use canyon_sql::crud::CrudOperations; -use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; use crate::tests_models::tournament::*; /// Given an entity `T` which has some field declaring a foreign key relation -/// with some another entity `U`, for example, performns a search to find +/// with some another entity `U`, for example, performs a search to find /// what is the parent type `U` of `T` +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_by_foreign_key() { let some_tournament: Tournament = Tournament::find_by_pk(&1) @@ -38,6 +39,7 @@ fn test_crud_search_by_foreign_key() { } /// Same as the search by foreign key, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_by_foreign_key_datasource() { let some_tournament: Tournament = Tournament::find_by_pk_datasource(&10, SQL_SERVER_DS) @@ -67,6 +69,7 @@ fn test_crud_search_by_foreign_key_datasource() { /// to `U`. /// /// For this to work, `U`, the parent, must have derived the `ForeignKeyable` proc macro +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_reverse_side_foreign_key() { let some_league: League = League::find_by_pk(&1) @@ -87,6 +90,7 @@ fn test_crud_search_reverse_side_foreign_key() { /// Same as the search by the reverse side of a foreign key relation /// but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_search_reverse_side_foreign_key_datasource() { let some_league: League = League::find_by_pk_datasource(&1, SQL_SERVER_DS) diff --git a/tests/crud/init_mssql.rs b/tests/crud/init_mssql.rs index cf8f8071..19b08549 100644 --- a/tests/crud/init_mssql.rs +++ b/tests/crud/init_mssql.rs @@ -1,5 +1,3 @@ -#![cfg_attr(feature = "canyon_sql/mssql")] - use crate::constants::SQL_SERVER_CREATE_TABLES; use crate::constants::SQL_SERVER_DS; use crate::constants::SQL_SERVER_FILL_TABLE_VALUES; diff --git a/tests/crud/insert_operations.rs b/tests/crud/insert_operations.rs index 29c0c9fa..06ffbcbf 100644 --- a/tests/crud/insert_operations.rs +++ b/tests/crud/insert_operations.rs @@ -2,7 +2,7 @@ ///! generates and executes *INSERT* statements use canyon_sql::crud::CrudOperations; -use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Inserts a new record on the database, given an entity that is @@ -25,7 +25,8 @@ use crate::tests_models::league::*; /// /// If the type hasn't a `#[primary_key]` annotation, or the annotation contains /// an argument specifying not autoincremental behaviour, all the fields will be -/// inserted on the database and no returning value will be placed in any field. +/// inserted on the database and no returning value will be placed in any field. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_insert_operation() { let mut new_league: League = League { @@ -54,6 +55,7 @@ fn test_crud_insert_operation() { /// Same as the insert operation above, but targeting the database defined in /// the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_insert_datasource_operation() { let mut new_league: League = League { @@ -93,6 +95,7 @@ fn test_crud_insert_datasource_operation() { /// /// The instances without `#[primary_key]` inserts all the values on the instaqce fields /// on the database. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_multi_insert_operation() { let mut new_league_mi: League = League { @@ -154,6 +157,7 @@ fn test_crud_multi_insert_operation() { } /// Same as the multi insert above, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_multi_insert_datasource_operation() { let mut new_league_mi: League = League { diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 97bb67bb..5b11a7ed 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -4,4 +4,4 @@ pub mod insert_operations; pub mod querybuilder_operations; pub mod select_operations; pub mod update_operations; -#[cfg(feature = "canyon_sql/mssql")] pub mod init_mssql; +#[cfg(feature = "mssql")] pub mod init_mssql; diff --git a/tests/crud/querybuilder_operations.rs b/tests/crud/querybuilder_operations.rs index 4700f598..8f9d1659 100644 --- a/tests/crud/querybuilder_operations.rs +++ b/tests/crud/querybuilder_operations.rs @@ -9,10 +9,10 @@ use canyon_sql::{ query::{operators::Comp, ops::QueryBuilder}, }; -use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; -use crate::tests_models::player::*; use crate::tests_models::tournament::*; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] use crate::tests_models::player::*; /// Builds a new SQL statement for retrieves entities of the `T` type, filtered /// with the parameters that modifies the base SQL to SELECT * FROM @@ -38,6 +38,7 @@ fn test_generated_sql_by_the_select_querybuilder() { /// Builds a new SQL statement for retrieves entities of the `T` type, filtered /// with the parameters that modifies the base SQL to SELECT * FROM +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder() { // Find all the leagues with ID less or equals that 7 @@ -57,6 +58,7 @@ fn test_crud_find_with_querybuilder() { } /// Same than the above but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder_datasource() { // Find all the players where its ID column value is greater that 50 @@ -70,6 +72,7 @@ fn test_crud_find_with_querybuilder_datasource() { /// Updates the values of the range on entries defined by the constraint parameters /// in the database entity +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_with_querybuilder() { // Find all the leagues with ID less or equals that 7 @@ -82,7 +85,7 @@ fn test_crud_update_with_querybuilder() { .r#where(LeagueFieldValue::id(&1), Comp::Gt) .and(LeagueFieldValue::id(&8), Comp::Lt); - /* Family of QueryBuilders are clone, useful in case of need to read the generated SQL + /* NOTE: Family of QueryBuilders are clone, useful in case of need to read the generated SQL let qpr = q.clone(); println!("PSQL: {:?}", qpr.read_sql()); */ @@ -105,6 +108,7 @@ fn test_crud_update_with_querybuilder() { } /// Same as above, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_with_querybuilder_datasource() { // Find all the leagues with ID less or equals that 7 @@ -139,6 +143,7 @@ fn test_crud_update_with_querybuilder_datasource() { /// Note if the database is persisted (not created and destroyed on every docker or /// GitHub Action wake up), it won't delete things that already have been deleted, /// but this isn't an error. They just don't exists. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_with_querybuilder() { Tournament::delete_query() @@ -152,6 +157,7 @@ fn test_crud_delete_with_querybuilder() { } /// Same as the above delete, but with the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_with_querybuilder_datasource() { Player::delete_query_datasource(SQL_SERVER_DS) diff --git a/tests/crud/select_operations.rs b/tests/crud/select_operations.rs index 26e0e5f2..5c20e958 100644 --- a/tests/crud/select_operations.rs +++ b/tests/crud/select_operations.rs @@ -1,6 +1,6 @@ #![allow(clippy::nonminimal_bool)] -use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; ///! Integration tests for the CRUD operations available in `Canyon` that ///! generates and executes *SELECT* statements use crate::Error; @@ -12,6 +12,7 @@ use crate::tests_models::player::*; /// Tests the behaviour of a SELECT * FROM {table_name} within Canyon, through the /// `::find_all()` associated function derived with the `CanyonCrud` derive proc-macro /// and using the *default datasource* +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all() { let find_all_result: Result, Box> = @@ -28,6 +29,7 @@ fn test_crud_find_all() { /// Same as the `find_all()`, but with the unchecked variant, which directly returns `Vec` not /// `Result` wrapped +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all_unchecked() { let find_all_result: Vec = League::find_all_unchecked().await; @@ -37,6 +39,7 @@ fn test_crud_find_all_unchecked() { /// Tests the behaviour of a SELECT * FROM {table_name} within Canyon, through the /// `::find_all()` associated function derived with the `CanyonCrud` derive proc-macro /// and using the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all_datasource() { let find_all_result: Result, Box> = @@ -48,6 +51,7 @@ fn test_crud_find_all_datasource() { /// Same as the `find_all_datasource()`, but with the unchecked variant and the specified dataosource, /// returning directly `Vec` and not `Result, Err>` +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_all_unchecked_datasource() { let find_all_result: Vec = League::find_all_unchecked_datasource(SQL_SERVER_DS).await; @@ -58,6 +62,7 @@ fn test_crud_find_all_unchecked_datasource() { /// defined with the #[primary_key] attribute over some field of the type. /// /// Uses the *default datasource*. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_by_pk() { let find_by_pk_result: Result, Box> = @@ -80,6 +85,8 @@ fn test_crud_find_by_pk() { /// defined with the #[primary_key] attribute over some field of the type. /// /// Uses the *specified datasource* in the second parameter of the function call. +#[cfg(feature = "postgres")] +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_by_pk_datasource() { let find_by_pk_result: Result, Box> = @@ -99,6 +106,7 @@ fn test_crud_find_by_pk_datasource() { } /// Counts how many rows contains an entity on the target database. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_count_operation() { assert_eq!( @@ -109,6 +117,7 @@ fn test_crud_count_operation() { /// Counts how many rows contains an entity on the target database using /// the specified datasource +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_count_datasource_operation() { assert_eq!( diff --git a/tests/crud/update_operations.rs b/tests/crud/update_operations.rs index fc7ae733..eee448cc 100644 --- a/tests/crud/update_operations.rs +++ b/tests/crud/update_operations.rs @@ -2,7 +2,7 @@ ///! generates and executes *UPDATE* statements use canyon_sql::crud::CrudOperations; -use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Update operation is a *CRUD* method defined for some entity `T`, that works by appliying @@ -15,6 +15,7 @@ use crate::tests_models::league::*; /// /// Attempt of usage the `t.update(&self)` method on an entity without `#[primary_key]` /// will raise a runtime error. +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_method_operation() { // We first retrieve some entity from the database. Note that we must make @@ -55,6 +56,7 @@ fn test_crud_update_method_operation() { } /// Same as the above test, but with the specified datasource. +#[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_datasource_method_operation() { // We first retrieve some entity from the database. Note that we must make From f77aa14c19bc4bb8020221a5d7a193c9f211a3f3 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 20 Apr 2023 13:33:59 +0200 Subject: [PATCH 20/23] All the APIs stabilized for making use of the conditional compilation across all the Canyon-SQL crates --- Cargo.toml | 7 +- canyon_connection/Cargo.toml | 1 - .../src/canyon_database_connector.rs | 13 +-- canyon_connection/src/lib.rs | 3 +- canyon_crud/Cargo.toml | 7 +- canyon_crud/src/crud.rs | 2 - canyon_crud/src/lib.rs | 1 + canyon_macros/Cargo.toml | 7 +- canyon_macros/src/lib.rs | 2 +- canyon_macros/src/query_operations/select.rs | 42 ++++++- canyon_observer/Cargo.toml | 1 - canyon_observer/src/migrations/processor.rs | 109 ++++++++++-------- tests/Cargo.toml | 3 +- tests/crud/mod.rs | 2 + tests/migrations/mod.rs | 2 + 15 files changed, 117 insertions(+), 85 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c2e7ffd2..216ab313 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ members = [ canyon_connection = { workspace = true, path = "canyon_connection" } canyon_crud = { workspace = true, path = "canyon_crud" } canyon_observer = { workspace = true, path = "canyon_observer" } -canyon_macros = { woorkspace = true, path = "canyon_macros" } +canyon_macros = { workspace = true, path = "canyon_macros" } # To be marked as opt deps tokio-postgres = { workspace = true, optional = true } @@ -28,12 +28,14 @@ tiberius = { workspace = true, optional = true } canyon_crud = { version = "0.2.0", path = "canyon_crud" } canyon_connection = { version = "0.2.0", path = "canyon_connection" } canyon_observer = { version = "0.2.0", path = "canyon_observer" } +canyon_macros = { version = "0.2.0", path = "canyon_macros" } tokio = { version = "1.27.0", features = ["full"] } tokio-util = { version = "0.7.4", features = ["compat"] } tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } +chrono = { version = "0.4", features = ["serde"] } # Just from TP better? serde = { version = "1.0.138", features = ["derive"] } futures = "0.3.25" @@ -54,6 +56,5 @@ license = "MIT" description = "A Rust ORM and QueryBuilder" [features] -default = ["postgres"] -postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_connection/postgres", "canyon_observer/postgres", "canyon_macros/postgres"] +postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres", "canyon_macros/postgres"] mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql", "canyon_macros/mssql"] diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 4e140655..c736d124 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -24,6 +24,5 @@ serde = { workspace = true, features = ["derive"] } async-std = { workspace = true, optional = true } [features] -default = ["postgres"] postgres = ["tokio-postgres"] mssql = ["tiberius", "async-std"] diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 96d88154..7042cc71 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -140,19 +140,18 @@ impl DatabaseConnection { } #[cfg(feature = "postgres")] - #[allow(unreachable_patterns)] - pub fn postgres_connection(&self) -> Option<&PostgreSqlConnection> { + pub fn postgres_connection(&self) -> &PostgreSqlConnection { match self { - DatabaseConnection::Postgres(conn) => Some(conn), - _ => panic!(), + DatabaseConnection::Postgres(conn) => conn, + #[cfg(all(feature = "postgres", feature = "mssql"))] _ => panic!(), } } #[cfg(feature = "mssql")] - pub fn sqlserver_connection(&mut self) -> Option<&mut SqlServerConnection> { + pub fn sqlserver_connection(&mut self) -> &mut SqlServerConnection { match self { - DatabaseConnection::SqlServer(conn) => Some(conn), - _ => panic!(), + DatabaseConnection::SqlServer(conn) => conn, + #[cfg(all(feature = "postgres", feature = "mssql"))] _ => panic!(), } } } diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 699341d6..434433d4 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -5,10 +5,9 @@ pub extern crate lazy_static; #[cfg(feature = "mssql")] pub extern crate tiberius; pub extern crate tokio; +pub extern crate tokio_util; #[cfg(feature = "postgres")] pub extern crate tokio_postgres; -#[cfg(feature = "postgres")] -pub extern crate tokio_util; pub mod canyon_database_connector; pub mod datasources; diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index cbe44ad9..eaefae18 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -10,18 +10,13 @@ license.workspace = true description.workspace = true [dependencies] -#tokio = { workspace = true, features = ["full"], optional = true } -#tokio-util = { workspace = true, features = ["compat"], optional = true } -#tokio-postgres = { workspace = true, features = ["with-chrono-0_4"], optional = true } -#tiberius = { workspace = true, features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } -chrono = { version = "0.4", features = ["serde"] } +chrono = { workspace = true, features = ["serde"] } async-trait = { workspace = true } canyon_connection = { workspace = true, path = "../canyon_connection" } [features] -default = ["postgres"] postgres = ["tokio-postgres", "canyon_connection/postgres"] mssql = ["tiberius", "canyon_connection/mssql"] diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index b06edfda..bf6b5e90 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -179,7 +179,6 @@ mod postgres_query_launcher { let r = db_conn .postgres_connection() - .unwrap() .client .query(&stmt, m_params.as_slice()) .await?; @@ -228,7 +227,6 @@ mod sqlserver_query_launcher { .query( db_conn .sqlserver_connection() - .expect("Error querying the MSSQL database") .client, ) .await? diff --git a/canyon_crud/src/lib.rs b/canyon_crud/src/lib.rs index ee856f6c..cea474cb 100644 --- a/canyon_crud/src/lib.rs +++ b/canyon_crud/src/lib.rs @@ -1,3 +1,4 @@ +pub extern crate async_trait; extern crate canyon_connection; pub mod bounds; diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index e306dcab..40682671 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -19,11 +19,10 @@ proc-macro2 = "1.0.27" futures = "0.3.21" tokio = { version = "1.9.0", features = ["full"] } -canyon_observer = { workspace = true, path = "../canyon_observer" } -canyon_crud = { workspace = true, path = "../canyon_crud" } -canyon_connection = { workspace = true, path = "../canyon_connection" } +canyon_observer = { workspace = true } +canyon_crud = { workspace = true } +canyon_connection = { workspace = true } [features] -default = ["postgres"] postgres = ["canyon_connection/postgres", "canyon_crud/postgres", "canyon_observer/postgres"] mssql = ["canyon_connection/mssql", "canyon_crud/mssql", "canyon_observer/mssql"] diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index 54c4c873..d6b68bac 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -322,7 +322,7 @@ fn impl_crud_operations_trait_for_struct( _search_by_revese_fk_tokens.iter().map(|(_, m_impl)| m_impl); // The autogenerated name for the trait that holds the fk and rev fk searches - let fk_trait_ident = proc_macro2::Ident::new( + let fk_trait_ident = Ident::new( &format!("{}FkOperations", &ty.to_string()), proc_macro2::Span::call_site(), ); diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index 55006e5d..0f70ab4d 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -150,13 +150,14 @@ pub fn generate_count_tokens( let ty_str = &ty.to_string(); let stmt = format!("SELECT COUNT (*) FROM {table_schema_data}"); - let result_handling = quote! { - match count { - // #[cfg(feature = "postgres")] + let postgres_enabled = cfg!(feature = "postgres"); + let mssql_enabled = cfg!(feature = "mssql"); + + let result_handling = if postgres_enabled && mssql_enabled { + quote! { canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( v.remove(0).get::<&str, i64>("count") ), - #[cfg(feature = "mssql")] canyon_sql::crud::CanyonRows::Tiberius(mut v) => v.remove(0) .get::(0) @@ -165,6 +166,31 @@ pub fn generate_count_tokens( .into(), _ => panic!() // TODO remove when the generics will be refactored } + } else if postgres_enabled { + quote! { + canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( + v.remove(0).get::<&str, i64>("count") + ), + _ => panic!() // TODO remove when the generics will be refactored + } + } else if mssql_enabled { + quote! { + canyon_sql::crud::CanyonRows::Tiberius(mut v) => + v.remove(0) + .get::(0) + .map(|c| c as i64) + .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()) + .into(), + _ => panic!() // TODO remove when the generics will be refactored + } + } else { + quote! { + panic!( + "Reached a branch in the implementation of the Row Mapper macro that should never be reached.\ + This is a severe bug of Canyon-SQL. Please, open us an issue at \ + https://github.com/zerodaycode/Canyon-SQL/issues and let us know about that failure." + ) + } }; quote! { @@ -177,7 +203,9 @@ pub fn generate_count_tokens( "" ).await?; - #result_handling + match count { + #result_handling + } } /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, @@ -189,7 +217,9 @@ pub fn generate_count_tokens( datasource_name ).await?; - #result_handling + match count { + #result_handling + } } } } diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index 7a616e0c..41cb3076 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -26,6 +26,5 @@ quote = "1.0.9" partialdebug = "0.2.0" [features] -default = ["postgres"] postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres"] mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql"] diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index e6c23bb4..b8e1de59 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -318,8 +318,10 @@ impl MigrationsProcessor { if attr.starts_with("Annotation: PrimaryKey") { Self::add_primary_key(self, entity_name, canyon_register_entity_field.clone()); - if canyon_register_entity_field.is_autoincremental() { - Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + #[cfg(feature = "postgres")] { + if canyon_register_entity_field.is_autoincremental() { + Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + } } } } @@ -355,6 +357,7 @@ impl MigrationsProcessor { ))); } + #[cfg(feature = "postgres")] fn add_identity(&mut self, entity_name: &str, field: CanyonRegisterEntityField) { self.constraints_operations .push(Box::new(ColumnOperation::AlterColumnAddIdentity( @@ -390,19 +393,22 @@ impl MigrationsProcessor { if field_is_primary_key && current_column_metadata.primary_key_info.is_none() { Self::add_primary_key(self, entity_name, canyon_register_entity_field.clone()); - if canyon_register_entity_field.is_autoincremental() { - Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + #[cfg(feature = "postgres")] { + if canyon_register_entity_field.is_autoincremental() { + Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); + } } } // Case when the field contains a primary key annotation, and it's already on the database else if field_is_primary_key && current_column_metadata.primary_key_info.is_some() { - let is_autoincr_rust = canyon_register_entity_field.is_autoincremental(); - let is_autoincr_in_db = current_column_metadata.is_identity; - - if !is_autoincr_rust && is_autoincr_in_db { - Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()) - } else if is_autoincr_rust && !is_autoincr_in_db { - Self::add_identity(self, entity_name, canyon_register_entity_field.clone()) + #[cfg(feature = "postgres")] { + let is_autoincr_rust = canyon_register_entity_field.is_autoincremental(); + let is_autoincr_in_db = current_column_metadata.is_identity; + if !is_autoincr_rust && is_autoincr_in_db { + Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()) + } else if is_autoincr_rust && !is_autoincr_in_db { + Self::add_identity(self, entity_name, canyon_register_entity_field.clone()) + } } } // Case when field doesn't contains a primary key annotation, but there is one in the database column @@ -417,8 +423,10 @@ impl MigrationsProcessor { .to_string(), ); - if current_column_metadata.is_identity { - Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()); + #[cfg(feature = "postgres")] { + if current_column_metadata.is_identity { + Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()); + } } } @@ -531,6 +539,7 @@ impl MigrationsProcessor { ))); } + #[cfg(feature = "postgres")] fn drop_identity( &mut self, entity_name: &str, @@ -822,40 +831,40 @@ impl DatabaseOperation for TableOperation { } TableOperation::AddTableForeignKey( - table_name, - foreign_key_name, - column_foreign_key, - table_to_reference, - column_to_reference, + _table_name, + _foreign_key_name, + _column_foreign_key, + _table_to_reference, + _column_to_reference, ) => { match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( - "ALTER TABLE {table_name} ADD CONSTRAINT {foreign_key_name} \ - FOREIGN KEY ({column_foreign_key}) REFERENCES {table_to_reference} ({column_to_reference});" + "ALTER TABLE {_table_name} ADD CONSTRAINT {_foreign_key_name} \ + FOREIGN KEY ({_column_foreign_key}) REFERENCES {_table_to_reference} ({_column_to_reference});" ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } - TableOperation::DeleteTableForeignKey(table_with_foreign_key, constraint_name) => { + TableOperation::DeleteTableForeignKey(_table_with_foreign_key, _constraint_name) => { match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( - "ALTER TABLE {table_with_foreign_key} DROP CONSTRAINT {constraint_name};", + "ALTER TABLE {_table_with_foreign_key} DROP CONSTRAINT {_constraint_name};", ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } } - TableOperation::AddTablePrimaryKey(table_name, entity_field) => { + TableOperation::AddTablePrimaryKey(_table_name, _entity_field) => { match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( - "ALTER TABLE \"{table_name}\" ADD PRIMARY KEY (\"{}\");", - entity_field.field_name + "ALTER TABLE \"{_table_name}\" ADD PRIMARY KEY (\"{}\");", + _entity_field.field_name ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") @@ -885,12 +894,10 @@ enum ColumnOperation { // AlterColumnName, AlterColumnType(String, CanyonRegisterEntityField), AlterColumnDropNotNull(String, CanyonRegisterEntityField), - // SQL server specific operation - SQL server can't drop a NOT NULL column - #[cfg(feature = "mssql")] - DropNotNullBeforeDropColumn(String, String, String), - #[cfg(feature = "postgres")] AlterColumnSetNotNull(String, CanyonRegisterEntityField), - // TODO if implement through annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} + + #[cfg(feature = "mssql")] // SQL server specific operation - SQL server can't drop a NOT NULL column + DropNotNullBeforeDropColumn(String, String, String), #[cfg(feature = "postgres")] AlterColumnAddIdentity(String, CanyonRegisterEntityField), #[cfg(feature = "postgres")] @@ -926,12 +933,12 @@ impl DatabaseOperation for ColumnOperation { // TODO Check if operation for SQL server is different format!("ALTER TABLE \"{table_name}\" DROP COLUMN \"{column_name}\";") }, - ColumnOperation::AlterColumnType(table_name, entity_field) => + ColumnOperation::AlterColumnType(_table_name, _entity_field) => match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" TYPE {};", - entity_field.field_name, entity_field.to_postgres_alter_syntax() + "ALTER TABLE \"{_table_name}\" ALTER COLUMN \"{}\" TYPE {};", + _entity_field.field_name, _entity_field.to_postgres_alter_syntax() ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") @@ -961,9 +968,18 @@ impl DatabaseOperation for ColumnOperation { EXEC('ALTER TABLE '+@tableName+' DROP CONSTRAINT ' + @ConstraintName);" ), - ColumnOperation::AlterColumnSetNotNull(table_name, entity_field) => format!( - "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" SET NOT NULL;", entity_field.field_name - ), + ColumnOperation::AlterColumnSetNotNull(table_name, entity_field) => { + match db_type { + #[cfg(feature = "postgres")] DatabaseType::PostgreSql => format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" SET NOT NULL;", entity_field.field_name + ), + #[cfg(feature = "mssql")] DatabaseType::SqlServer => format!( + "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NOT NULL", + entity_field.field_name, + entity_field.to_sqlserver_alter_syntax() + ) + } + } #[cfg(feature = "postgres")] ColumnOperation::AlterColumnAddIdentity(table_name, entity_field) => format!( "ALTER TABLE \"{table_name}\" ALTER COLUMN \"{}\" ADD GENERATED ALWAYS AS IDENTITY;", entity_field.field_name @@ -979,33 +995,26 @@ impl DatabaseOperation for ColumnOperation { } /// Helper for operations involving sequences +#[cfg(feature = "postgres")] #[derive(Debug)] -#[allow(dead_code)] enum SequenceOperation { ModifySequence(String, CanyonRegisterEntityField), } - +#[cfg(feature = "postgres")] impl Transaction for SequenceOperation {} +#[cfg(feature = "postgres")] #[async_trait] impl DatabaseOperation for SequenceOperation { async fn generate_sql(&self, datasource: &DatasourceConfig) { - let db_type = datasource.get_db_type(); - let stmt = match self { SequenceOperation::ModifySequence(table_name, entity_field) => { - match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => - format!( - "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", - entity_field.field_name, entity_field.field_name - ), - #[cfg(feature = "mssql")] DatabaseType::SqlServer => - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") - } + format!( + "SELECT setval(pg_get_serial_sequence('\"{table_name}\"', '{}'), max(\"{}\")) from \"{table_name}\";", + entity_field.field_name, entity_field.field_name + ) } }; - save_migrations_query_to_execute(stmt, &datasource.name); } } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index e606e3c9..8d96ac44 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,13 +5,12 @@ edition.workspace = true publish = false [dev-dependencies] -canyon_sql = { path = "..", default-features = false, features = ["mssql"] } +canyon_sql = { path = "..", features = ["postgres", "mssql"] } [[test]] name = "canyon_integration_tests" path = "canyon_integration_tests.rs" [features] -default = ["postgres"] postgres = [] mssql = [] diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 5b11a7ed..82fdfd0b 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -1,3 +1,5 @@ +#![allow(unused_imports)] + pub mod delete_operations; pub mod foreign_key_operations; pub mod insert_operations; diff --git a/tests/migrations/mod.rs b/tests/migrations/mod.rs index 12dfa111..47f82566 100644 --- a/tests/migrations/mod.rs +++ b/tests/migrations/mod.rs @@ -1,9 +1,11 @@ +#![allow(unused_imports)] ///! Integration tests for the migrations feature of `Canyon-SQL` use canyon_sql::{crud::Transaction, migrations::handler::Migrations}; use crate::constants; /// Brings the information of the `PostgreSQL` requested schema +#[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_migrations_postgresql_status_query() { let results = Migrations::query(constants::FETCH_PUBLIC_SCHEMA, [], constants::PSQL_DS).await; From 8a3decfd0ed75f0cabab96cc802da4d9e475e065 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 20 Apr 2023 14:05:17 +0200 Subject: [PATCH 21/23] Setting up a better version for looking for the Canyon configuration file across directories --- Cargo.toml | 1 + canyon.toml | 24 ------- canyon_connection/Cargo.toml | 2 + .../src/canyon_database_connector.rs | 15 ++-- canyon_connection/src/datasources.rs | 7 +- canyon_connection/src/lib.rs | 26 +++++-- canyon_crud/src/bounds.rs | 14 ++-- canyon_crud/src/crud.rs | 6 +- canyon_crud/src/rows.rs | 54 ++++---------- canyon_macros/src/lib.rs | 70 +++++++++---------- canyon_observer/Cargo.toml | 2 +- canyon_observer/src/migrations/processor.rs | 17 +++-- src/lib.rs | 6 +- tests/Cargo.toml | 6 +- tests/canyon.toml | 20 +++--- tests/constants.rs | 12 ++-- tests/crud/delete_operations.rs | 6 +- tests/crud/foreign_key_operations.rs | 3 +- tests/crud/insert_operations.rs | 3 +- tests/crud/mod.rs | 3 +- tests/crud/querybuilder_operations.rs | 6 +- tests/crud/select_operations.rs | 3 +- tests/crud/update_operations.rs | 3 +- 23 files changed, 148 insertions(+), 161 deletions(-) delete mode 100644 canyon.toml diff --git a/Cargo.toml b/Cargo.toml index 216ab313..005b8648 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ async-std = "1.12.0" lazy_static = "1.4.0" toml = "0.7.3" async-trait = "0.1.68" +walkdir = "2.3.3" [workspace.package] version = "0.2.0" diff --git a/canyon.toml b/canyon.toml deleted file mode 100644 index 0b0614a4..00000000 --- a/canyon.toml +++ /dev/null @@ -1,24 +0,0 @@ -[canyon_sql] - -[[canyon_sql.datasources]] -name = 'postgres_docker' - -[canyon_sql.datasources.auth] -postgresql = { basic = { username = 'postgres', password = 'postgres'}} - -[canyon_sql.datasources.properties] -host = 'localhost' -port = 5438 -db_name = 'postgres' - - -[[canyon_sql.datasources]] -name = 'sqlserver_docker' - -[canyon_sql.datasources.auth] -sqlserver = { basic = { username = 'sa', password = 'SqlServer-10' } } - -[canyon_sql.datasources.properties] -host = 'localhost' -port = 1434 -db_name = 'master' diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index c736d124..886971bb 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -22,6 +22,8 @@ lazy_static = { workspace = true } toml = { workspace = true } serde = { workspace = true, features = ["derive"] } async-std = { workspace = true, optional = true } +walkdir = { workspace = true } + [features] postgres = ["tokio-postgres"] diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index 7042cc71..7196e948 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -143,7 +143,8 @@ impl DatabaseConnection { pub fn postgres_connection(&self) -> &PostgreSqlConnection { match self { DatabaseConnection::Postgres(conn) => conn, - #[cfg(all(feature = "postgres", feature = "mssql"))] _ => panic!(), + #[cfg(all(feature = "postgres", feature = "mssql"))] + _ => panic!(), } } @@ -151,7 +152,8 @@ impl DatabaseConnection { pub fn sqlserver_connection(&mut self) -> &mut SqlServerConnection { match self { DatabaseConnection::SqlServer(conn) => conn, - #[cfg(all(feature = "postgres", feature = "mssql"))] _ => panic!(), + #[cfg(all(feature = "postgres", feature = "mssql"))] + _ => panic!(), } } } @@ -164,7 +166,8 @@ mod database_connection_handler { /// Tests the behaviour of the `DatabaseType::from_datasource(...)` #[test] fn check_from_datasource() { - #[cfg(all(feature = "postgres", feature = "mssql"))] { + #[cfg(all(feature = "postgres", feature = "mssql"))] + { const CONFIG_FILE_MOCK_ALT_ALL: &str = r#" [canyon_sql] datasources = [ @@ -184,7 +187,8 @@ mod database_connection_handler { ); } - #[cfg(feature = "postgres")] { + #[cfg(feature = "postgres")] + { const CONFIG_FILE_MOCK_ALT_PG: &str = r#" [canyon_sql] datasources = [ @@ -199,7 +203,8 @@ mod database_connection_handler { ); } - #[cfg(feature = "mssql")] { + #[cfg(feature = "mssql")] + { const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" [canyon_sql] datasources = [ diff --git a/canyon_connection/src/datasources.rs b/canyon_connection/src/datasources.rs index 82775fd7..9571c343 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_connection/src/datasources.rs @@ -5,7 +5,8 @@ use crate::canyon_database_connector::DatabaseType; /// ``` #[test] fn load_ds_config_from_array() { - #[cfg(feature = "postgres")] { + #[cfg(feature = "postgres")] + { const CONFIG_FILE_MOCK_ALT_PG: &str = r#" [canyon_sql] datasources = [ @@ -32,7 +33,8 @@ fn load_ds_config_from_array() { assert_eq!(ds_0.properties.migrations, Some(Migrations::Enabled)); } - #[cfg(feature = "mssql")] { + #[cfg(feature = "mssql")] + { const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" [canyon_sql] datasources = [ @@ -46,7 +48,6 @@ fn load_ds_config_from_array() { let ds_1 = &config.canyon_sql.datasources[0]; let ds_2 = &config.canyon_sql.datasources[1]; - assert_eq!(ds_1.name, "SqlServerDS"); assert_eq!(ds_1.get_db_type(), DatabaseType::SqlServer); assert_eq!( diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index 434433d4..fed9f31f 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -5,29 +5,29 @@ pub extern crate lazy_static; #[cfg(feature = "mssql")] pub extern crate tiberius; pub extern crate tokio; -pub extern crate tokio_util; #[cfg(feature = "postgres")] pub extern crate tokio_postgres; +pub extern crate tokio_util; pub mod canyon_database_connector; pub mod datasources; use std::fs; +use std::path::PathBuf; use crate::datasources::{CanyonSqlConfig, DatasourceConfig}; use canyon_database_connector::DatabaseConnection; use indexmap::IndexMap; use lazy_static::lazy_static; use tokio::sync::{Mutex, MutexGuard}; - -const CONFIG_FILE_IDENTIFIER: &str = "canyon.toml"; +use walkdir::WalkDir; lazy_static! { pub static ref CANYON_TOKIO_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Runtime::new() // TODO Make the config with the builder .expect("Failed initializing the Canyon-SQL Tokio Runtime"); - static ref RAW_CONFIG_FILE: String = fs::read_to_string(CONFIG_FILE_IDENTIFIER) + static ref RAW_CONFIG_FILE: String = fs::read_to_string(find_canyon_config_file()) .expect("Error opening or reading the Canyon configuration file"); static ref CONFIG_FILE: CanyonSqlConfig = toml::from_str(RAW_CONFIG_FILE.as_str()) .expect("Error generating the configuration for Canyon-SQL"); @@ -39,6 +39,24 @@ lazy_static! { Mutex::new(IndexMap::new()); } +fn find_canyon_config_file() -> PathBuf { + for e in WalkDir::new(".") + .max_depth(2) + .into_iter() + .filter_map(|e| e.ok()) + { + let filename = e.file_name().to_str().unwrap(); + if e.metadata().unwrap().is_file() + && filename.starts_with("canyon") + && filename.ends_with(".toml") + { + return e.path().to_path_buf(); + } + } + + panic!() +} + /// Convenient free function to initialize a kind of connection pool based on the datasources present defined /// in the configuration file. /// diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 7ed83f1b..d46bf863 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -539,7 +539,7 @@ impl<'a> QueryParameter<'a> for Option<&String> { } } } -impl<'a> QueryParameter<'_> for &'_ str { +impl<'a> QueryParameter<'a> for &'_ str { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self @@ -562,7 +562,7 @@ impl<'a> QueryParameter<'a> for Option<&'_ str> { } } } -impl<'a> QueryParameter<'_> for NaiveDate { +impl<'a> QueryParameter<'a> for NaiveDate { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self @@ -582,7 +582,7 @@ impl<'a> QueryParameter<'a> for Option { self.into_sql() } } -impl<'a> QueryParameter<'_> for NaiveTime { +impl<'a> QueryParameter<'a> for NaiveTime { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self @@ -602,7 +602,7 @@ impl<'a> QueryParameter<'a> for Option { self.into_sql() } } -impl<'a> QueryParameter<'_> for NaiveDateTime { +impl<'a> QueryParameter<'a> for NaiveDateTime { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self @@ -622,7 +622,7 @@ impl<'a> QueryParameter<'a> for Option { self.into_sql() } } -impl<'a> QueryParameter<'_> for DateTime { +impl<'a> QueryParameter<'a> for DateTime { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self @@ -642,7 +642,7 @@ impl<'a> QueryParameter<'a> for Option> { self.into_sql() } } -impl<'a> QueryParameter<'_> for DateTime { +impl<'a> QueryParameter<'a> for DateTime { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self @@ -652,7 +652,7 @@ impl<'a> QueryParameter<'_> for DateTime { self.into_sql() } } -impl<'a> QueryParameter<'_> for Option> { +impl<'a> QueryParameter<'a> for Option> { #[cfg(feature = "postgres")] fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { self diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index bf6b5e90..8509a91e 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -224,11 +224,7 @@ mod sqlserver_query_launcher { .for_each(|param| mssql_query.bind(*param)); let _results = mssql_query - .query( - db_conn - .sqlserver_connection() - .client, - ) + .query(db_conn.sqlserver_connection().client) .await? .into_results() .await?; diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs index 056e136a..d8d35070 100644 --- a/canyon_crud/src/rows.rs +++ b/canyon_crud/src/rows.rs @@ -42,7 +42,7 @@ impl CanyonRows { #[cfg(feature = "postgres")] Self::Postgres(v) => v.iter().map(|row| Z::deserialize_postgresql(row)).collect(), #[cfg(feature = "mssql")] - Self::Tiberius(v) => v.iter().map(|row| Z::deserialize_sqlserver(&row)).collect(), + Self::Tiberius(v) => v.iter().map(|row| Z::deserialize_sqlserver(row)).collect(), _ => panic!("This branch will never ever should be reachable"), } } @@ -57,45 +57,15 @@ impl CanyonRows { _ => panic!("This branch will never ever should be reachable"), } } -} -// #[cfg(feature = "postgres")] -// impl IntoIterator for CanyonRows { -// type Item = tokio_postgres::Row; -// type IntoIter = std::vec::IntoIter; -// -// fn into_iter(self) -> Self::IntoIter { -// match self { -// Self::Postgres(v) => v.into_iter(), -// _ => panic!() -// } -// } -// } -// -// #[cfg(feature = "mssql")] -// impl IntoIterator for CanyonRows { -// type Item = tiberius::Row; -// type IntoIter = std::vec::IntoIter; -// -// fn into_iter(self) -> Self::IntoIter { -// match self { -// Self::Tiberius(v) => v.into_iter(), -// _ => panic!() -// } -// } -// } -// -// #[cfg(all(feature = "tokio-postgres", feature = "tiberius"))] -// impl IntoIterator for CanyonRows { -// if cfg!(feature = "tokio-postgres") { -// type Item = tokio_postgres::Row; -// } else { type Item = tiberius::Row; } -// type IntoIter = std::vec::IntoIter; -// -// fn into_iter(self) -> Self::IntoIter { -// match self { -// Self::Tiberius(v) => v.into_iter(), -// _ => panic!() -// } -// } -// } + /// Returns true whenever the wrapped collection of Rows does not contains any elements + pub fn is_empty(&self) -> bool { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.is_empty(), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.is_empty(), + _ => panic!("This branch will never ever should be reachable"), + } + } +} diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index d6b68bac..ce03cc58 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -487,82 +487,82 @@ pub fn implement_row_mapper_for_type(input: proc_macro::TokenStream) -> proc_mac }); let init_field_values_sqlserver = fields.iter().map(|(_vis, ident, ty)| { - let ident_name = ident.to_string(); + let ident_name = ident.to_string(); - if get_field_type_as_string(ty) == "String" { - quote! { + if get_field_type_as_string(ty) == "String" { + quote! { #ident: row.get::<&str, &str>(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) .to_string() } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::<&str, &str>(#ident_name) .map( |x| x.to_owned() ) } - } else if get_field_type_as_string(ty) == "NaiveDate" { - quote! { + } else if get_field_type_as_string(ty) == "NaiveDate" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty) == "NaiveTime" { - quote! { + } else if get_field_type_as_string(ty) == "NaiveTime" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty) == "NaiveDateTime" { - quote! { + } else if get_field_type_as_string(ty) == "NaiveDateTime" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else if get_field_type_as_string(ty) == "DateTime" { - quote! { + } else if get_field_type_as_string(ty) == "DateTime" { + quote! { #ident: row.get::(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { + } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { + quote! { #ident: row.get::(#ident_name) } - } else { - quote! { + } else { + quote! { #ident: row.get::<#ty, &str>(#ident_name) .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) } - } - }); + } + }); // The type of the Struct let ty = ast.ident; diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index 41cb3076..c1b090ab 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -19,7 +19,7 @@ async-trait = { workspace = true } # transform to opts with migrations feature regex = "1.5" # opt -walkdir = "2" # opt +walkdir = { workspace = true } proc-macro2 = "1.0.27" syn = { version = "1.0.86", features = ["full", "parsing"] } quote = "1.0.9" diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index b8e1de59..b096b828 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -318,7 +318,8 @@ impl MigrationsProcessor { if attr.starts_with("Annotation: PrimaryKey") { Self::add_primary_key(self, entity_name, canyon_register_entity_field.clone()); - #[cfg(feature = "postgres")] { + #[cfg(feature = "postgres")] + { if canyon_register_entity_field.is_autoincremental() { Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); } @@ -393,7 +394,8 @@ impl MigrationsProcessor { if field_is_primary_key && current_column_metadata.primary_key_info.is_none() { Self::add_primary_key(self, entity_name, canyon_register_entity_field.clone()); - #[cfg(feature = "postgres")] { + #[cfg(feature = "postgres")] + { if canyon_register_entity_field.is_autoincremental() { Self::add_identity(self, entity_name, canyon_register_entity_field.clone()); } @@ -401,7 +403,8 @@ impl MigrationsProcessor { } // Case when the field contains a primary key annotation, and it's already on the database else if field_is_primary_key && current_column_metadata.primary_key_info.is_some() { - #[cfg(feature = "postgres")] { + #[cfg(feature = "postgres")] + { let is_autoincr_rust = canyon_register_entity_field.is_autoincremental(); let is_autoincr_in_db = current_column_metadata.is_identity; if !is_autoincr_rust && is_autoincr_in_db { @@ -423,7 +426,8 @@ impl MigrationsProcessor { .to_string(), ); - #[cfg(feature = "postgres")] { + #[cfg(feature = "postgres")] + { if current_column_metadata.is_identity { Self::drop_identity(self, entity_name, canyon_register_entity_field.clone()); } @@ -674,7 +678,7 @@ impl MigrationsHelper { } } - return false; + false } fn extract_foreign_key_annotation(field_annotations: &[String]) -> (String, String) { @@ -896,7 +900,8 @@ enum ColumnOperation { AlterColumnDropNotNull(String, CanyonRegisterEntityField), AlterColumnSetNotNull(String, CanyonRegisterEntityField), - #[cfg(feature = "mssql")] // SQL server specific operation - SQL server can't drop a NOT NULL column + #[cfg(feature = "mssql")] + // SQL server specific operation - SQL server can't drop a NOT NULL column DropNotNullBeforeDropColumn(String, String, String), #[cfg(feature = "postgres")] AlterColumnAddIdentity(String, CanyonRegisterEntityField), diff --git a/src/lib.rs b/src/lib.rs index e40d9d3b..33a2c82b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,8 +39,8 @@ pub mod crud { pub use canyon_crud::bounds; pub use canyon_crud::crud::*; pub use canyon_crud::mapper::*; - pub use canyon_crud::DatabaseType; pub use canyon_crud::rows::CanyonRows; + pub use canyon_crud::DatabaseType; } /// Re-exports the query elements from the `crud`crate @@ -51,10 +51,10 @@ pub mod query { /// Reexport the available database clients within Canyon pub mod db_clients { - #[cfg(feature = "postgres")] - pub use canyon_connection::tokio_postgres; #[cfg(feature = "mssql")] pub use canyon_connection::tiberius; + #[cfg(feature = "postgres")] + pub use canyon_connection::tokio_postgres; } /// Reexport the needed runtime dependencies diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 8d96ac44..da6b0dfc 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -5,12 +5,12 @@ edition.workspace = true publish = false [dev-dependencies] -canyon_sql = { path = "..", features = ["postgres", "mssql"] } +canyon_sql = { path = ".." } [[test]] name = "canyon_integration_tests" path = "canyon_integration_tests.rs" [features] -postgres = [] -mssql = [] +postgres = ["canyon_sql/postgres"] +mssql = ["canyon_sql/mssql"] diff --git a/tests/canyon.toml b/tests/canyon.toml index 0b0614a4..dfa4a666 100644 --- a/tests/canyon.toml +++ b/tests/canyon.toml @@ -12,13 +12,13 @@ port = 5438 db_name = 'postgres' -[[canyon_sql.datasources]] -name = 'sqlserver_docker' - -[canyon_sql.datasources.auth] -sqlserver = { basic = { username = 'sa', password = 'SqlServer-10' } } - -[canyon_sql.datasources.properties] -host = 'localhost' -port = 1434 -db_name = 'master' +#[[canyon_sql.datasources]] +#name = 'sqlserver_docker' +# +#[canyon_sql.datasources.auth] +#sqlserver = { basic = { username = 'sa', password = 'SqlServer-10' } } +# +#[canyon_sql.datasources.properties] +#host = 'localhost' +#port = 1434 +#db_name = 'master' diff --git a/tests/constants.rs b/tests/constants.rs index 8fb86a44..1c9c8044 100644 --- a/tests/constants.rs +++ b/tests/constants.rs @@ -1,9 +1,12 @@ ///! Constant values to share across the integration tests -#[cfg(feature = "postgres")] pub const PSQL_DS: &str = "postgres_docker"; -#[cfg(feature = "mssql")] pub const SQL_SERVER_DS: &str = "sqlserver_docker"; +#[cfg(feature = "postgres")] +pub const PSQL_DS: &str = "postgres_docker"; +#[cfg(feature = "mssql")] +pub const SQL_SERVER_DS: &str = "sqlserver_docker"; -#[cfg(feature = "postgres")] pub static FETCH_PUBLIC_SCHEMA: &str = +#[cfg(feature = "postgres")] +pub static FETCH_PUBLIC_SCHEMA: &str = "SELECT gi.table_name, gi.column_name, @@ -34,7 +37,8 @@ LEFT JOIN pg_catalog.pg_constraint AS con on WHERE table_schema = 'public';"; -#[cfg(feature = "mssql")] pub const SQL_SERVER_CREATE_TABLES: &str = " +#[cfg(feature = "mssql")] +pub const SQL_SERVER_CREATE_TABLES: &str = " IF OBJECT_ID(N'[dbo].[league]', N'U') IS NULL BEGIN CREATE TABLE dbo.league ( diff --git a/tests/crud/delete_operations.rs b/tests/crud/delete_operations.rs index fb2e07e9..6420e553 100644 --- a/tests/crud/delete_operations.rs +++ b/tests/crud/delete_operations.rs @@ -2,8 +2,10 @@ ///! generates and executes *INSERT* statements use canyon_sql::crud::CrudOperations; -#[cfg(feature = "postgres")] use crate::constants::PSQL_DS; -#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "postgres")] +use crate::constants::PSQL_DS; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Deletes a row from the database that is mapped into some instance of a `T` entity. diff --git a/tests/crud/foreign_key_operations.rs b/tests/crud/foreign_key_operations.rs index b74f6852..471dd639 100644 --- a/tests/crud/foreign_key_operations.rs +++ b/tests/crud/foreign_key_operations.rs @@ -10,7 +10,8 @@ ///! For more info: TODO -> Link to the docs of the foreign key chapter use canyon_sql::crud::CrudOperations; -#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; use crate::tests_models::tournament::*; diff --git a/tests/crud/insert_operations.rs b/tests/crud/insert_operations.rs index 06ffbcbf..d52fa868 100644 --- a/tests/crud/insert_operations.rs +++ b/tests/crud/insert_operations.rs @@ -2,7 +2,8 @@ ///! generates and executes *INSERT* statements use canyon_sql::crud::CrudOperations; -#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Inserts a new record on the database, given an entity that is diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 82fdfd0b..407e727c 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -2,8 +2,9 @@ pub mod delete_operations; pub mod foreign_key_operations; +#[cfg(feature = "mssql")] +pub mod init_mssql; pub mod insert_operations; pub mod querybuilder_operations; pub mod select_operations; pub mod update_operations; -#[cfg(feature = "mssql")] pub mod init_mssql; diff --git a/tests/crud/querybuilder_operations.rs b/tests/crud/querybuilder_operations.rs index 8f9d1659..1c853161 100644 --- a/tests/crud/querybuilder_operations.rs +++ b/tests/crud/querybuilder_operations.rs @@ -9,10 +9,12 @@ use canyon_sql::{ query::{operators::Comp, ops::QueryBuilder}, }; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; +#[cfg(feature = "mssql")] +use crate::tests_models::player::*; use crate::tests_models::tournament::*; -#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; -#[cfg(feature = "mssql")] use crate::tests_models::player::*; /// Builds a new SQL statement for retrieves entities of the `T` type, filtered /// with the parameters that modifies the base SQL to SELECT * FROM diff --git a/tests/crud/select_operations.rs b/tests/crud/select_operations.rs index 5c20e958..9f9a6f5c 100644 --- a/tests/crud/select_operations.rs +++ b/tests/crud/select_operations.rs @@ -1,6 +1,7 @@ #![allow(clippy::nonminimal_bool)] -#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; ///! Integration tests for the CRUD operations available in `Canyon` that ///! generates and executes *SELECT* statements use crate::Error; diff --git a/tests/crud/update_operations.rs b/tests/crud/update_operations.rs index eee448cc..e4085560 100644 --- a/tests/crud/update_operations.rs +++ b/tests/crud/update_operations.rs @@ -2,7 +2,8 @@ ///! generates and executes *UPDATE* statements use canyon_sql::crud::CrudOperations; -#[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +#[cfg(feature = "mssql")] +use crate::constants::SQL_SERVER_DS; use crate::tests_models::league::*; /// Update operation is a *CRUD* method defined for some entity `T`, that works by appliying From a6049a1ab144fcbe227dcda54827b8723bc077f9 Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 20 Apr 2023 17:13:59 +0200 Subject: [PATCH 22/23] v0.3.0 --- .github/workflows/code-quality.yml | 2 +- CHANGELOG.md | 9 +++++++++ Cargo.toml | 14 +++++++++----- canyon_connection/Cargo.toml | 6 +++--- canyon_crud/Cargo.toml | 4 ++-- canyon_crud/src/crud.rs | 23 +++-------------------- canyon_macros/Cargo.toml | 10 +++++----- canyon_observer/Cargo.toml | 9 ++++----- tests/canyon.toml | 20 ++++++++++---------- 9 files changed, 46 insertions(+), 51 deletions(-) diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index 07ce16a2..9de14f14 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -55,7 +55,7 @@ jobs: strategy: fail-fast: false matrix: - crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer, canyon_sql_root] + crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer] steps: - uses: actions/checkout@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index d79ce967..db434f8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,15 @@ Year format is defined as: `YYYY-m-d` ## [0.2.0] - 2023 - 04 - 13 +### Feature + +- Enabled conditional compilation for the database dependencies of the project. +This caused a major rework in the codebase, but none of the client APIs has been affected. +Now, Canyon-SQL comes with two features, ["postgres", "mssql"]. +There's no default features enabled for the project. + +## [0.2.0] - 2023 - 04 - 13 + ### Feature [BREAKING CHANGES] - The configuration file has been reworked, by providing a whole category dedicated diff --git a/Cargo.toml b/Cargo.toml index 005b8648..6f4da496 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,10 +25,10 @@ tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } [workspace.dependencies] -canyon_crud = { version = "0.2.0", path = "canyon_crud" } -canyon_connection = { version = "0.2.0", path = "canyon_connection" } -canyon_observer = { version = "0.2.0", path = "canyon_observer" } -canyon_macros = { version = "0.2.0", path = "canyon_macros" } +canyon_crud = { version = "0.3.0", path = "canyon_crud" } +canyon_connection = { version = "0.3.0", path = "canyon_connection" } +canyon_observer = { version = "0.3.0", path = "canyon_observer" } +canyon_macros = { version = "0.3.0", path = "canyon_macros" } tokio = { version = "1.27.0", features = ["full"] } tokio-util = { version = "0.7.4", features = ["compat"] } @@ -45,9 +45,13 @@ lazy_static = "1.4.0" toml = "0.7.3" async-trait = "0.1.68" walkdir = "2.3.3" +regex = "1.5" + +quote = "1.0.9" +proc-macro2 = "1.0.27" [workspace.package] -version = "0.2.0" +version = "0.3.0" edition = "2021" authors = ["Alex Vergara, Gonzalo Busto"] documentation = "https://zerodaycode.github.io/canyon-book/" diff --git a/canyon_connection/Cargo.toml b/canyon_connection/Cargo.toml index 886971bb..fd37fd4e 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_connection/Cargo.toml @@ -13,14 +13,14 @@ description.workspace = true tokio = { workspace = true } tokio-util = { workspace = true } -tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"], optional = true } -tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"], optional = true } +tokio-postgres = { workspace = true, optional = true } +tiberius = { workspace = true, optional = true } futures = { workspace = true } indexmap = { workspace = true } lazy_static = { workspace = true } toml = { workspace = true } -serde = { workspace = true, features = ["derive"] } +serde = { workspace = true } async-std = { workspace = true, optional = true } walkdir = { workspace = true } diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index eaefae18..123a44fe 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -12,10 +12,10 @@ description.workspace = true [dependencies] tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } -chrono = { workspace = true, features = ["serde"] } +chrono = { workspace = true } async-trait = { workspace = true } -canyon_connection = { workspace = true, path = "../canyon_connection" } +canyon_connection = { workspace = true } [features] postgres = ["tokio-postgres", "canyon_connection/postgres"] diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 8509a91e..f5c6d37e 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -14,30 +14,13 @@ use crate::rows::CanyonRows; /// This traits defines and implements a query against a database given /// an statement `stmt` and the params to pass the to the client. /// -/// It returns a [`DatabaseResult`], which is the core Canyon type to wrap -/// the result of the query and, if the user desires, -/// automatically map it to an struct. +/// Returns [`std::result::Result`] of [`CanyonRows`], which is the core Canyon type to wrap +/// the result of the query provide automatic mappings and deserialization #[async_trait] pub trait Transaction { - // /// Performs a query against the targeted database by the selected or - // /// the defaulted datasource, returning a collection of instances of *T* - // async fn query<'a, S, Z>( - // stmt: S, - // params: Z, - // datasource_name: &'a str, - // ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> - // where - // S: AsRef + Display + Sync + Send + 'a, - // Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, - // { - // Self::query_for_rows(stmt, params, datasource_name) - // .await - // .map(|res| res.into_results()) - // } - /// Performs a query against the targeted database by the selected or /// the defaulted datasource, wrapping the resultant collection of entities - /// in [`super::rows::Rows`] + /// in [`super::rows::CanyonRows`] async fn query<'a, S, Z>( stmt: S, params: Z, diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index 40682671..82d336f5 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -13,11 +13,11 @@ description.workspace = true proc-macro = true [dependencies] -syn = { version = "1.0.109", features = ["full"] } -quote = "1.0.9" -proc-macro2 = "1.0.27" -futures = "0.3.21" -tokio = { version = "1.9.0", features = ["full"] } +syn = { version = "1.0.109", features = ["full"] } # TODO Pending to upgrade and refactor +quote = { workspace = true } +proc-macro2 = { workspace = true } +futures = { workspace = true } +tokio = { workspace = true } canyon_observer = { workspace = true } canyon_crud = { workspace = true } diff --git a/canyon_observer/Cargo.toml b/canyon_observer/Cargo.toml index c1b090ab..0f939b2c 100644 --- a/canyon_observer/Cargo.toml +++ b/canyon_observer/Cargo.toml @@ -17,13 +17,12 @@ tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } async-trait = { workspace = true } -# transform to opts with migrations feature -regex = "1.5" # opt +regex = { workspace = true } walkdir = { workspace = true } -proc-macro2 = "1.0.27" -syn = { version = "1.0.86", features = ["full", "parsing"] } -quote = "1.0.9" partialdebug = "0.2.0" +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { version = "1.0.86", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade [features] postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres"] diff --git a/tests/canyon.toml b/tests/canyon.toml index dfa4a666..0b0614a4 100644 --- a/tests/canyon.toml +++ b/tests/canyon.toml @@ -12,13 +12,13 @@ port = 5438 db_name = 'postgres' -#[[canyon_sql.datasources]] -#name = 'sqlserver_docker' -# -#[canyon_sql.datasources.auth] -#sqlserver = { basic = { username = 'sa', password = 'SqlServer-10' } } -# -#[canyon_sql.datasources.properties] -#host = 'localhost' -#port = 1434 -#db_name = 'master' +[[canyon_sql.datasources]] +name = 'sqlserver_docker' + +[canyon_sql.datasources.auth] +sqlserver = { basic = { username = 'sa', password = 'SqlServer-10' } } + +[canyon_sql.datasources.properties] +host = 'localhost' +port = 1434 +db_name = 'master' From 9735fd446319f5be9b7b62eb42379a1cb89b362f Mon Sep 17 00:00:00 2001 From: Alex Vergara Date: Thu, 20 Apr 2023 17:37:25 +0200 Subject: [PATCH 23/23] No IT for Windows and MacOS targets --- .github/workflows/continuous-integration.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 53f77132..3c26ce66 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -53,8 +53,12 @@ jobs: - name: Run only UNIT tests for Windows if: ${{ matrix.os == 'windows-latest' }} - run: cargo test --verbose --workspace --target=x86_64-pc-windows-msvc --exclude tests --all-features --no-fail-fast -- --show-output + run: | + cargo test --verbose --workspace --lib --target=x86_64-pc-windows-msvc --all-features --no-fail-fast -- --show-output + cargo test --verbose --workspace --doc --target=x86_64-pc-windows-msvc --all-features --no-fail-fast -- --show-output - name: Run only UNIT tests for MacOS if: ${{ matrix.os == 'MacOS-latest' }} - run: cargo test --verbose --workspace --exclude tests --all-features --no-fail-fast -- --show-output + run: | + cargo test --verbose --workspace --lib --all-features --no-fail-fast -- --show-output + cargo test --verbose --workspace --doc --all-features --no-fail-fast -- --show-output