diff --git a/Cargo.toml b/Cargo.toml index c0ae400b..2a146c91 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,5 @@ [workspace] -members = [ - ".", - "cli", - "generator", -] +members = [".", "cli", "generator"] [package] name = "seaography" @@ -24,7 +20,9 @@ async-graphql = { version = "6.0.7", features = ["decimal", "chrono", "dataloade sea-orm = { version = "0.12.0", default-features = false, features = ["seaography"] } itertools = { version = "0.11.0" } heck = { version = "0.4.1" } -thiserror = "1.0.44" +thiserror = { version = "1.0.44" } +async-trait = { version = "0.1" } +fnv = { version = "1.0.7" } [features] default = [] @@ -36,4 +34,4 @@ with-decimal = ["sea-orm/with-rust_decimal", "async-graphql/decimal"] with-bigdecimal = ["sea-orm/with-bigdecimal", "async-graphql/bigdecimal"] # with-postgres-array = ["sea-orm/postgres-array"] # with-ipnetwork = ["sea-orm/with-ipnetwork"] -# with-mac_address = ["sea-orm/with-mac_address"] \ No newline at end of file +# with-mac_address = ["sea-orm/with-mac_address"] diff --git a/examples/mysql/src/lib.rs b/examples/mysql/src/lib.rs index 770aa614..a292add5 100644 --- a/examples/mysql/src/lib.rs +++ b/examples/mysql/src/lib.rs @@ -1,8 +1,2 @@ -use sea_orm::prelude::*; - pub mod entities; pub mod query_root; - -pub struct OrmDataloader { - pub db: DatabaseConnection, -} diff --git a/examples/mysql/src/main.rs b/examples/mysql/src/main.rs index 025319e1..91f1a75e 100644 --- a/examples/mysql/src/main.rs +++ b/examples/mysql/src/main.rs @@ -1,13 +1,9 @@ -use async_graphql::{ - dataloader::DataLoader, - http::{playground_source, GraphQLPlaygroundConfig}, -}; +use async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; use async_graphql_poem::GraphQL; use dotenv::dotenv; use lazy_static::lazy_static; use poem::{get, handler, listener::TcpListener, web::Html, IntoResponse, Route, Server}; use sea_orm::Database; -use seaography_mysql_example::*; use std::env; lazy_static! { @@ -39,15 +35,8 @@ async fn main() { let database = Database::connect(&*DATABASE_URL) .await .expect("Fail to initialize database connection"); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); let schema = seaography_mysql_example::query_root::schema( database, - orm_dataloader, *DEPTH_LIMIT, *COMPLEXITY_LIMIT, ) diff --git a/examples/mysql/src/query_root.rs b/examples/mysql/src/query_root.rs index f7c83fb8..03fb2aad 100644 --- a/examples/mysql/src/query_root.rs +++ b/examples/mysql/src/query_root.rs @@ -1,5 +1,5 @@ -use crate::{entities::*, OrmDataloader}; -use async_graphql::{dataloader::DataLoader, dynamic::*}; +use crate::entities::*; +use async_graphql::dynamic::*; use sea_orm::DatabaseConnection; use seaography::{Builder, BuilderContext}; @@ -7,7 +7,6 @@ lazy_static::lazy_static! { static ref CONTEXT : BuilderContext = BuilderContext pub fn schema( database: DatabaseConnection, - orm_dataloader: DataLoader, depth: Option, complexity: Option, ) -> Result { @@ -45,5 +44,5 @@ pub fn schema( } else { schema }; - schema.data(database).data(orm_dataloader).finish() + schema.data(database).finish() } diff --git a/examples/mysql/tests/mutation_tests.rs b/examples/mysql/tests/mutation_tests.rs index b0c4e2e3..c3aad593 100644 --- a/examples/mysql/tests/mutation_tests.rs +++ b/examples/mysql/tests/mutation_tests.rs @@ -1,19 +1,13 @@ -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::Database; -use seaography_mysql_example::OrmDataloader; pub async fn get_schema() -> Schema { let database = Database::connect("mysql://sea:sea@127.0.0.1/sakila") .await .unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); let schema = - seaography_mysql_example::query_root::schema(database, orm_dataloader, None, None).unwrap(); + seaography_mysql_example::query_root::schema(database, None, None) + .unwrap(); schema } diff --git a/examples/mysql/tests/query_tests.rs b/examples/mysql/tests/query_tests.rs index 6d925f1e..9f3b8642 100644 --- a/examples/mysql/tests/query_tests.rs +++ b/examples/mysql/tests/query_tests.rs @@ -1,19 +1,12 @@ -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::Database; -use seaography_mysql_example::OrmDataloader; pub async fn get_schema() -> Schema { let database = Database::connect("mysql://sea:sea@127.0.0.1/sakila") .await .unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); let schema = - seaography_mysql_example::query_root::schema(database, orm_dataloader, None, None).unwrap(); + seaography_mysql_example::query_root::schema(database, None, None).unwrap(); schema } diff --git a/examples/postgres/src/lib.rs b/examples/postgres/src/lib.rs index 770aa614..f66ddd9c 100644 --- a/examples/postgres/src/lib.rs +++ b/examples/postgres/src/lib.rs @@ -1,8 +1,3 @@ -use sea_orm::prelude::*; - pub mod entities; pub mod query_root; -pub struct OrmDataloader { - pub db: DatabaseConnection, -} diff --git a/examples/postgres/src/main.rs b/examples/postgres/src/main.rs index 127a9710..f0bcedcc 100644 --- a/examples/postgres/src/main.rs +++ b/examples/postgres/src/main.rs @@ -1,13 +1,9 @@ -use async_graphql::{ - dataloader::DataLoader, - http::{playground_source, GraphQLPlaygroundConfig}, -}; +use async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; use async_graphql_poem::GraphQL; use dotenv::dotenv; use lazy_static::lazy_static; use poem::{get, handler, listener::TcpListener, web::Html, IntoResponse, Route, Server}; use sea_orm::Database; -use seaography_postgres_example::*; use std::env; lazy_static! { @@ -39,15 +35,8 @@ async fn main() { let database = Database::connect(&*DATABASE_URL) .await .expect("Fail to initialize database connection"); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); let schema = seaography_postgres_example::query_root::schema( database, - orm_dataloader, *DEPTH_LIMIT, *COMPLEXITY_LIMIT, ) diff --git a/examples/postgres/src/query_root.rs b/examples/postgres/src/query_root.rs index cd4d19d4..6b7fbda0 100644 --- a/examples/postgres/src/query_root.rs +++ b/examples/postgres/src/query_root.rs @@ -1,5 +1,5 @@ -use crate::{entities::*, OrmDataloader}; -use async_graphql::{dataloader::DataLoader, dynamic::*}; +use crate::entities::*; +use async_graphql::dynamic::*; use sea_orm::DatabaseConnection; use seaography::{Builder, BuilderContext}; @@ -7,11 +7,10 @@ lazy_static::lazy_static! { static ref CONTEXT : BuilderContext = BuilderContext pub fn schema( database: DatabaseConnection, - orm_dataloader: DataLoader, depth: Option, complexity: Option, ) -> Result { - let mut builder = Builder::new(&CONTEXT); + let mut builder = Builder::new(&CONTEXT, database.clone()); seaography::register_entities!( builder, [ @@ -44,5 +43,5 @@ pub fn schema( } else { schema }; - schema.data(database).data(orm_dataloader).finish() + schema.data(database).finish() } diff --git a/examples/postgres/tests/mutation_tests.rs b/examples/postgres/tests/mutation_tests.rs index 97b13a31..87931b28 100644 --- a/examples/postgres/tests/mutation_tests.rs +++ b/examples/postgres/tests/mutation_tests.rs @@ -1,19 +1,12 @@ -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::Database; -use seaography_postgres_example::OrmDataloader; pub async fn get_schema() -> Schema { let database = Database::connect("postgres://sea:sea@127.0.0.1/sakila") .await .unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); let schema = - seaography_postgres_example::query_root::schema(database, orm_dataloader, None, None) + seaography_postgres_example::query_root::schema(database, None, None) .unwrap(); schema diff --git a/examples/postgres/tests/query_tests.rs b/examples/postgres/tests/query_tests.rs index d8e46818..2b120bd0 100644 --- a/examples/postgres/tests/query_tests.rs +++ b/examples/postgres/tests/query_tests.rs @@ -1,19 +1,12 @@ -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::Database; -use seaography_postgres_example::OrmDataloader; pub async fn get_schema() -> Schema { let database = Database::connect("postgres://sea:sea@127.0.0.1/sakila") .await .unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); let schema = - seaography_postgres_example::query_root::schema(database, orm_dataloader, None, None) + seaography_postgres_example::query_root::schema(database, None, None) .unwrap(); schema @@ -524,104 +517,101 @@ async fn related_queries_filters() { schema .execute( r#" - { - customer( - filters: { active: { eq: 0 } } - pagination: { cursor: { limit: 3, cursor: "Int[3]:271" } } - ) { - nodes { - customerId - lastName - email - address { - address - } - payment(filters: { amount: { gt: "8" } }, orderBy: { amount: DESC }) { - nodes { - paymentId + { + customer( + filters: { active: { eq: 0 } } + pagination: { cursor: { limit: 3, cursor: "Int[3]:271" } } + orderBy: { customerId: ASC } + ) { + nodes { + customerId + lastName + email + address { + address + } + payment( + filters: { amount: { gt: "8" } } + orderBy: { amount: DESC } + pagination: { offset: { limit: 1, offset: 0 } } + ) { + nodes { + paymentId + amount + } } } + pageInfo { + hasPreviousPage + hasNextPage + endCursor + } } - pageInfo { - hasPreviousPage - hasNextPage - endCursor - } - } - } + } "#, ) .await, r#" - { - "customer": { - "nodes": [ - { - "customerId": 315, - "lastName": "GOODEN", - "email": "KENNETH.GOODEN@sakilacustomer.org", - "address": { - "address": "1542 Lubumbashi Boulevard" - }, - "payment": { - "nodes": [ - { - "paymentId": 8547 - }, - { - "paymentId": 8537 - } - ] - } + { + "customer": { + "nodes": [ + { + "customerId": 315, + "lastName": "GOODEN", + "email": "KENNETH.GOODEN@sakilacustomer.org", + "address": { + "address": "1542 Lubumbashi Boulevard" }, - { - "customerId": 368, - "lastName": "ARCE", - "email": "HARRY.ARCE@sakilacustomer.org", - "address": { - "address": "1922 Miraj Way" - }, - "payment": { - "nodes": [ - { - "paymentId": 9945 - }, - { - "paymentId": 9953 - }, - { - "paymentId": 9962 - }, - { - "paymentId": 9967 - } - ] - } + "payment": { + "nodes": [ + { + "paymentId": 8547, + "amount": "9.9900" + } + ] + } + }, + { + "customerId": 368, + "lastName": "ARCE", + "email": "HARRY.ARCE@sakilacustomer.org", + "address": { + "address": "1922 Miraj Way" }, - { - "customerId": 406, - "lastName": "RUNYON", - "email": "NATHAN.RUNYON@sakilacustomer.org", - "address": { - "address": "264 Bhimavaram Manor" - }, - "payment": { - "nodes": [ - { - "paymentId": 10998 - } - ] - } + "payment": { + "nodes": [ + { + "paymentId": 9945, + "amount": "9.9900" + } + ] + } + }, + { + "customerId": 406, + "lastName": "RUNYON", + "email": "NATHAN.RUNYON@sakilacustomer.org", + "address": { + "address": "264 Bhimavaram Manor" + }, + "payment": { + "nodes": [ + { + "paymentId": 10998, + "amount": "8.9900" + } + ] } - ], - "pageInfo": { - "hasPreviousPage": true, - "hasNextPage": true, - "endCursor": "Int[3]:406" } + ], + "pageInfo": { + "hasPreviousPage": true, + "hasNextPage": true, + "endCursor": "Int[3]:406" } } - "#, + } + "#, ); assert_eq( diff --git a/examples/sqlite/Cargo.toml b/examples/sqlite/Cargo.toml index 39fbf53a..ceced47d 100644 --- a/examples/sqlite/Cargo.toml +++ b/examples/sqlite/Cargo.toml @@ -4,8 +4,8 @@ name = "seaography-sqlite-example" version = "0.3.0" [dependencies] -actix-web = { version = "4.3.1", default-features = false, features = ["macros"] } -async-graphql-actix-web = { version = "6.0.7" } +poem = { version = "1.3.56" } +async-graphql-poem = { version = "6.0.7" } async-graphql = { version = "6.0.7", features = ["decimal", "chrono", "dataloader", "dynamic-schema"] } async-trait = { version = "0.1.72" } dotenv = "0.15.0" diff --git a/examples/sqlite/src/lib.rs b/examples/sqlite/src/lib.rs index 770aa614..a292add5 100644 --- a/examples/sqlite/src/lib.rs +++ b/examples/sqlite/src/lib.rs @@ -1,8 +1,2 @@ -use sea_orm::prelude::*; - pub mod entities; pub mod query_root; - -pub struct OrmDataloader { - pub db: DatabaseConnection, -} diff --git a/examples/sqlite/src/main.rs b/examples/sqlite/src/main.rs index 7fcfd733..0b8c045c 100644 --- a/examples/sqlite/src/main.rs +++ b/examples/sqlite/src/main.rs @@ -1,13 +1,9 @@ -use async_graphql::{ - dataloader::DataLoader, - http::{playground_source, GraphQLPlaygroundConfig}, -}; +use async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; use async_graphql_poem::GraphQL; use dotenv::dotenv; use lazy_static::lazy_static; use poem::{get, handler, listener::TcpListener, web::Html, IntoResponse, Route, Server}; use sea_orm::Database; -use seaography_sqlite_example::*; use std::env; lazy_static! { @@ -39,19 +35,9 @@ async fn main() { let database = Database::connect(&*DATABASE_URL) .await .expect("Fail to initialize database connection"); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); - let schema = seaography_sqlite_example::query_root::schema( - database, - orm_dataloader, - *DEPTH_LIMIT, - *COMPLEXITY_LIMIT, - ) - .unwrap(); + let schema = + seaography_sqlite_example::query_root::schema(database, *DEPTH_LIMIT, *COMPLEXITY_LIMIT) + .unwrap(); let app = Route::new().at( &*ENDPOINT, get(graphql_playground).post(GraphQL::new(schema)), diff --git a/examples/sqlite/src/query_root.rs b/examples/sqlite/src/query_root.rs index b3b77bf1..2a17dd30 100644 --- a/examples/sqlite/src/query_root.rs +++ b/examples/sqlite/src/query_root.rs @@ -1,5 +1,5 @@ -use crate::{entities::*, OrmDataloader}; -use async_graphql::{dataloader::DataLoader, dynamic::*}; +use crate::entities::*; +use async_graphql::dynamic::*; use sea_orm::DatabaseConnection; use seaography::{Builder, BuilderContext}; @@ -7,30 +7,29 @@ lazy_static::lazy_static! { static ref CONTEXT : BuilderContext = BuilderContext pub fn schema( database: DatabaseConnection, - orm_dataloader: DataLoader, depth: Option, complexity: Option, ) -> Result { - let mut builder = Builder::new(&CONTEXT); + let mut builder = Builder::new(&CONTEXT, database.clone()); seaography::register_entities!( builder, [ - film_actor, - rental, + actor, + address, category, - staff, + city, country, + customer, film, - actor, - language, - city, - inventory, - film_text, + film_actor, film_category, - customer, - store, + film_text, + inventory, + language, payment, - address, + rental, + staff, + store, ] ); let schema = builder.schema_builder(); @@ -44,5 +43,5 @@ pub fn schema( } else { schema }; - schema.data(database).data(orm_dataloader).finish() + schema.data(database).finish() } diff --git a/examples/sqlite/tests/guard_tests.rs b/examples/sqlite/tests/guard_tests.rs index 2d897dee..9e9340a6 100644 --- a/examples/sqlite/tests/guard_tests.rs +++ b/examples/sqlite/tests/guard_tests.rs @@ -1,12 +1,11 @@ use std::collections::BTreeMap; -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::{Database, DatabaseConnection, RelationTrait}; use seaography::{ Builder, BuilderContext, EntityObjectRelationBuilder, EntityObjectViaRelationBuilder, FnGuard, GuardsConfig, }; -use seaography_sqlite_example::OrmDataloader; lazy_static::lazy_static! { static ref CONTEXT : BuilderContext = { @@ -31,11 +30,10 @@ lazy_static::lazy_static! { pub fn schema( database: DatabaseConnection, - orm_dataloader: DataLoader, depth: Option, complexity: Option, ) -> Result { - let mut builder = Builder::new(&CONTEXT); + let mut builder = Builder::new(&CONTEXT, database.clone()); let entity_object_relation_builder = EntityObjectRelationBuilder { context: &CONTEXT }; let entity_object_via_relation_builder = EntityObjectViaRelationBuilder { context: &CONTEXT }; builder.register_entity::(vec![ @@ -270,18 +268,12 @@ pub fn schema( } else { schema }; - schema.data(database).data(orm_dataloader).finish() + schema.data(database).finish() } pub async fn get_schema() -> Schema { let database = Database::connect("sqlite://sakila.db").await.unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); - let schema = schema(database, orm_dataloader, None, None).unwrap(); + let schema = schema(database, None, None).unwrap(); schema } diff --git a/examples/sqlite/tests/mutation_tests.rs b/examples/sqlite/tests/mutation_tests.rs index 5a24bd7c..4b4dfe0f 100644 --- a/examples/sqlite/tests/mutation_tests.rs +++ b/examples/sqlite/tests/mutation_tests.rs @@ -1,18 +1,9 @@ -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::Database; -use seaography_sqlite_example::OrmDataloader; pub async fn get_schema() -> Schema { let database = Database::connect("sqlite://sakila.db").await.unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); - let schema = - seaography_sqlite_example::query_root::schema(database, orm_dataloader, None, None) - .unwrap(); + let schema = seaography_sqlite_example::query_root::schema(database, None, None).unwrap(); schema } diff --git a/examples/sqlite/tests/query_tests.rs b/examples/sqlite/tests/query_tests.rs index 88d2de6d..0f04935e 100644 --- a/examples/sqlite/tests/query_tests.rs +++ b/examples/sqlite/tests/query_tests.rs @@ -1,18 +1,9 @@ -use async_graphql::{dataloader::DataLoader, dynamic::*, Response}; +use async_graphql::{dynamic::*, Response}; use sea_orm::Database; -use seaography_sqlite_example::OrmDataloader; pub async fn get_schema() -> Schema { let database = Database::connect("sqlite://sakila.db").await.unwrap(); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); - let schema = - seaography_sqlite_example::query_root::schema(database, orm_dataloader, None, None) - .unwrap(); + let schema = seaography_sqlite_example::query_root::schema(database, None, None).unwrap(); schema } diff --git a/generator/src/templates/actix.rs b/generator/src/templates/actix.rs index 97d04cbc..d868a0e0 100644 --- a/generator/src/templates/actix.rs +++ b/generator/src/templates/actix.rs @@ -12,7 +12,6 @@ pub fn generate_main(crate_name: &str) -> TokenStream { quote! { use actix_web::{guard, web, web::Data, App, HttpResponse, HttpServer, Result}; use async_graphql::{ - dataloader::DataLoader, http::{playground_source, GraphQLPlaygroundConfig}, dynamic::*, }; @@ -20,7 +19,6 @@ pub fn generate_main(crate_name: &str) -> TokenStream { use dotenv::dotenv; use lazy_static::lazy_static; use sea_orm::Database; - use #crate_name_token::*; use std::env; lazy_static! { @@ -61,14 +59,7 @@ pub fn generate_main(crate_name: &str) -> TokenStream { .await .expect("Fail to initialize database connection"); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); - - let schema = #crate_name_token::query_root::schema(database, orm_dataloader, *DEPTH_LIMIT, *COMPLEXITY_LIMIT).unwrap(); + let schema = #crate_name_token::query_root::schema(database, *DEPTH_LIMIT, *COMPLEXITY_LIMIT).unwrap(); println!("Visit GraphQL Playground at http://{}", *URL); diff --git a/generator/src/templates/poem.rs b/generator/src/templates/poem.rs index 3a0e57c3..f0d00bf8 100644 --- a/generator/src/templates/poem.rs +++ b/generator/src/templates/poem.rs @@ -10,16 +10,12 @@ pub fn generate_main(crate_name: &str) -> TokenStream { let crate_name_token: TokenStream = crate_name.replace('-', "_").parse().unwrap(); quote! { - use async_graphql::{ - dataloader::DataLoader, - http::{playground_source, GraphQLPlaygroundConfig} - }; + use async_graphql::http::{playground_source, GraphQLPlaygroundConfig}; use async_graphql_poem::GraphQL; use dotenv::dotenv; use lazy_static::lazy_static; use poem::{get, handler, listener::TcpListener, web::Html, IntoResponse, Route, Server}; use sea_orm::Database; - use #crate_name_token::*; use std::env; lazy_static! { @@ -51,14 +47,8 @@ pub fn generate_main(crate_name: &str) -> TokenStream { let database = Database::connect(&*DATABASE_URL) .await .expect("Fail to initialize database connection"); - let orm_dataloader: DataLoader = DataLoader::new( - OrmDataloader { - db: database.clone(), - }, - tokio::spawn, - ); - let schema = #crate_name_token::query_root::schema(database, orm_dataloader, *DEPTH_LIMIT, *COMPLEXITY_LIMIT).unwrap(); + let schema = #crate_name_token::query_root::schema(database, *DEPTH_LIMIT, *COMPLEXITY_LIMIT).unwrap(); let app = Route::new().at( &*ENDPOINT, diff --git a/generator/src/writer.rs b/generator/src/writer.rs index d6b7a2c6..48a7bc1f 100644 --- a/generator/src/writer.rs +++ b/generator/src/writer.rs @@ -82,8 +82,8 @@ pub fn generate_query_root>(entities_path: &P) -> TokenStream { }); quote! { - use crate::{entities::*, OrmDataloader}; - use async_graphql::{dataloader::DataLoader, dynamic::*}; + use crate::entities::*; + use async_graphql::dynamic::*; use sea_orm::DatabaseConnection; use seaography::{Builder, BuilderContext}; @@ -93,11 +93,10 @@ pub fn generate_query_root>(entities_path: &P) -> TokenStream { pub fn schema( database: DatabaseConnection, - orm_dataloader: DataLoader, depth: Option, complexity: Option, ) -> Result { - let mut builder = Builder::new(&CONTEXT); + let mut builder = Builder::new(&CONTEXT, database.clone()); seaography::register_entities!( builder, @@ -122,7 +121,7 @@ pub fn generate_query_root>(entities_path: &P) -> TokenStream { schema }; - schema.data(database).data(orm_dataloader).finish() + schema.data(database).finish() } } } @@ -173,15 +172,8 @@ pub fn write_cargo_toml>( /// pub fn write_lib>(path: &P) -> std::io::Result<()> { let tokens = quote! { - use sea_orm::prelude::*; - pub mod entities; pub mod query_root; - - pub struct OrmDataloader { - pub db: DatabaseConnection, - } - }; let file_name = path.as_ref().join("lib.rs"); diff --git a/src/builder.rs b/src/builder.rs index dcb81885..af42a602 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1,4 +1,7 @@ -use async_graphql::dynamic::{Enum, Field, InputObject, Object, Schema, SchemaBuilder}; +use async_graphql::{ + dataloader::DataLoader, + dynamic::{Enum, Field, FieldFuture, InputObject, Object, Schema, SchemaBuilder, TypeRef}, +}; use sea_orm::{ActiveEnum, ActiveModelTrait, EntityTrait, IntoActiveModel}; use crate::{ @@ -6,14 +9,18 @@ use crate::{ CursorInputBuilder, EdgeObjectBuilder, EntityCreateBatchMutationBuilder, EntityCreateOneMutationBuilder, EntityInputBuilder, EntityObjectBuilder, EntityQueryFieldBuilder, EntityUpdateMutationBuilder, FilterInputBuilder, FilterTypesMapHelper, - OffsetInputBuilder, OrderByEnumBuilder, OrderInputBuilder, PageInfoObjectBuilder, - PageInputBuilder, PaginationInfoObjectBuilder, PaginationInputBuilder, + OffsetInputBuilder, OneToManyLoader, OneToOneLoader, OrderByEnumBuilder, OrderInputBuilder, + PageInfoObjectBuilder, PageInputBuilder, PaginationInfoObjectBuilder, PaginationInputBuilder, }; /// The Builder is used to create the Schema for GraphQL /// /// You can populate it with the entities, enumerations of your choice pub struct Builder { + pub query: Object, + pub mutation: Object, + pub schema: SchemaBuilder, + /// holds all output object types pub outputs: Vec, @@ -29,19 +36,34 @@ pub struct Builder { /// holds all entities mutations pub mutations: Vec, + /// holds a copy to the database connection + pub connection: sea_orm::DatabaseConnection, + /// configuration for builder pub context: &'static BuilderContext, } impl Builder { /// Used to create a new Builder from the given configuration context - pub fn new(context: &'static BuilderContext) -> Self { + pub fn new(context: &'static BuilderContext, connection: sea_orm::DatabaseConnection) -> Self { + let query: Object = Object::new("Query"); + let mutation = Object::new("Mutation").field(Field::new( + "_ping", + TypeRef::named(TypeRef::STRING), + |_| FieldFuture::new(async move { Ok(Some(async_graphql::Value::from("pong"))) }), + )); + let schema = Schema::build(query.type_name(), Some(mutation.type_name()), None); + Self { + query, + mutation, + schema, outputs: Vec::new(), inputs: Vec::new(), enumerations: Vec::new(), queries: Vec::new(), mutations: Vec::new(), + connection, context, } } @@ -113,7 +135,6 @@ impl Builder { .extend(vec![entity_insert_input_object, entity_update_input_object]); // create one mutation - let entity_create_one_mutation_builder = EntityCreateOneMutationBuilder { context: self.context, }; @@ -136,6 +157,38 @@ impl Builder { self.mutations.push(update_mutation); } + pub fn register_entity_dataloader_one_to_one(mut self, _entity: T, spawner: S) -> Self + where + T: EntityTrait, + ::Model: Sync, + S: Fn(async_graphql::futures_util::future::BoxFuture<'static, ()>) -> R + + Send + + Sync + + 'static, + { + self.schema = self.schema.data(DataLoader::new( + OneToOneLoader::::new(self.connection.clone()), + spawner, + )); + self + } + + pub fn register_entity_dataloader_one_to_many(mut self, _entity: T, spawner: S) -> Self + where + T: EntityTrait, + ::Model: Sync, + S: Fn(async_graphql::futures_util::future::BoxFuture<'static, ()>) -> R + + Send + + Sync + + 'static, + { + self.schema = self.schema.data(DataLoader::new( + OneToManyLoader::::new(self.connection.clone()), + spawner, + )); + self + } + /// used to register a new enumeration to the builder context pub fn register_enumeration(&mut self) where @@ -161,19 +214,9 @@ impl Builder { /// used to consume the builder context and generate a ready to be completed GraphQL schema pub fn schema_builder(self) -> SchemaBuilder { - let query: Object = Object::new("Query"); - - let schema = if !self.mutations.is_empty() { - let mutation = Object::new("Mutation"); - // register mutations - let mutation = self - .mutations - .into_iter() - .fold(mutation, |mutation, field| mutation.field(field)); - Schema::build(query.type_name(), Some(mutation.type_name()), None).register(mutation) - } else { - Schema::build(query.type_name(), None, None) - }; + let query = self.query; + let mutation = self.mutation; + let schema = self.schema; // register queries let query = self @@ -181,7 +224,13 @@ impl Builder { .into_iter() .fold(query, |query, field| query.field(field)); - // register output types to schema + // register mutations + let mutation = self + .mutations + .into_iter() + .fold(mutation, |mutation, field| mutation.field(field)); + + // register entities to schema let schema = self .outputs .into_iter() @@ -258,6 +307,7 @@ impl Builder { .to_object(), ) .register(query) + .register(mutation) } } @@ -276,6 +326,10 @@ macro_rules! register_entity { .map(|rel| seaography::RelationBuilder::get_relation(&rel, $builder.context)) .collect(), ); + $builder = + $builder.register_entity_dataloader_one_to_one($module_path::Entity, tokio::spawn); + $builder = + $builder.register_entity_dataloader_one_to_many($module_path::Entity, tokio::spawn); $builder.register_entity_mutations::<$module_path::Entity, $module_path::ActiveModel>(); }; } diff --git a/src/inputs/cursor_input.rs b/src/inputs/cursor_input.rs index e0a6c8c5..2d17d798 100644 --- a/src/inputs/cursor_input.rs +++ b/src/inputs/cursor_input.rs @@ -3,7 +3,7 @@ use async_graphql::dynamic::{InputObject, InputValue, ObjectAccessor, TypeRef}; use crate::BuilderContext; /// used to hold information about cursor pagination -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct CursorInput { pub cursor: Option, pub limit: u64, diff --git a/src/inputs/offset_input.rs b/src/inputs/offset_input.rs index 69f39a82..ffd8255e 100644 --- a/src/inputs/offset_input.rs +++ b/src/inputs/offset_input.rs @@ -3,7 +3,7 @@ use async_graphql::dynamic::{InputObject, InputValue, ObjectAccessor, TypeRef}; use crate::BuilderContext; /// used to hold information about offset pagination -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct OffsetInput { pub offset: u64, pub limit: u64, diff --git a/src/inputs/order_input.rs b/src/inputs/order_input.rs index 244e0da4..51382d16 100644 --- a/src/inputs/order_input.rs +++ b/src/inputs/order_input.rs @@ -1,4 +1,4 @@ -use async_graphql::dynamic::{InputObject, InputValue, TypeRef}; +use async_graphql::dynamic::{InputObject, InputValue, TypeRef, ValueAccessor}; use sea_orm::{EntityTrait, Iterable}; use crate::{BuilderContext, EntityObjectBuilder}; @@ -50,4 +50,48 @@ impl OrderInputBuilder { )) }) } + + pub fn parse_object( + &self, + value: Option>, + ) -> Vec<(T::Column, sea_orm::sea_query::Order)> + where + T: EntityTrait, + ::Model: Sync, + { + match value { + Some(value) => { + let mut data = Vec::new(); + + let order_by = value.object().unwrap(); + + let entity_object = EntityObjectBuilder { + context: self.context, + }; + + for col in T::Column::iter() { + let column_name = entity_object.column_name::(&col); + let order = order_by.get(&column_name); + + if let Some(order) = order { + let order = order.enum_name().unwrap(); + + let asc_variant = &self.context.order_by_enum.asc_variant; + let desc_variant = &self.context.order_by_enum.desc_variant; + + if order.eq(asc_variant) { + data.push((col, sea_orm::Order::Asc)); + } else if order.eq(desc_variant) { + data.push((col, sea_orm::Order::Desc)); + } else { + panic!("Cannot map enumeration") + } + } + } + + data + } + None => Vec::new(), + } + } } diff --git a/src/inputs/page_input.rs b/src/inputs/page_input.rs index 1109ea44..c6a8dee3 100644 --- a/src/inputs/page_input.rs +++ b/src/inputs/page_input.rs @@ -3,7 +3,7 @@ use async_graphql::dynamic::{InputObject, InputValue, ObjectAccessor, TypeRef}; use crate::BuilderContext; /// used to hold information about page pagination -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PageInput { pub page: u64, pub limit: u64, diff --git a/src/inputs/pagination_input.rs b/src/inputs/pagination_input.rs index 67f41231..e8f03678 100644 --- a/src/inputs/pagination_input.rs +++ b/src/inputs/pagination_input.rs @@ -1,4 +1,4 @@ -use async_graphql::dynamic::{InputObject, InputValue, ObjectAccessor, TypeRef}; +use async_graphql::dynamic::{InputObject, InputValue, TypeRef, ValueAccessor}; use crate::{BuilderContext, CursorInputBuilder, OffsetInputBuilder, PageInputBuilder}; @@ -6,7 +6,7 @@ use super::{CursorInput, OffsetInput, PageInput}; /// used to hold information about which pagination /// strategy will be applied on the query -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PaginationInput { pub cursor: Option, pub page: Option, @@ -65,7 +65,18 @@ impl PaginationInputBuilder { } /// used to parse query input to pagination information structure - pub fn parse_object(&self, object: &ObjectAccessor) -> PaginationInput { + pub fn parse_object(&self, value: Option>) -> PaginationInput { + if value.is_none() { + return PaginationInput { + cursor: None, + offset: None, + page: None, + }; + } + + let binding = value.unwrap(); + let object = binding.object().unwrap(); + let cursor_input_builder = CursorInputBuilder { context: self.context, }; diff --git a/src/query/entity_object_relation.rs b/src/query/entity_object_relation.rs index 25b6b292..a813c1da 100644 --- a/src/query/entity_object_relation.rs +++ b/src/query/entity_object_relation.rs @@ -1,16 +1,16 @@ use async_graphql::{ + dataloader::DataLoader, dynamic::{Field, FieldFuture, FieldValue, InputValue, TypeRef}, Error, }; use heck::ToSnakeCase; -use sea_orm::{ - ColumnTrait, Condition, DatabaseConnection, EntityTrait, Iden, ModelTrait, QueryFilter, - RelationDef, -}; +use sea_orm::{EntityTrait, Iden, ModelTrait, RelationDef}; use crate::{ - apply_order, apply_pagination, get_filter_conditions, BuilderContext, ConnectionObjectBuilder, - EntityObjectBuilder, FilterInputBuilder, GuardAction, OrderInputBuilder, + apply_memory_pagination, get_filter_conditions, BuilderContext, Connection, + ConnectionObjectBuilder, EntityObjectBuilder, FilterInputBuilder, GuardAction, + HashableGroupKey, KeyComplex, OneToManyLoader, OneToOneLoader, OrderInputBuilder, + PaginationInputBuilder, }; /// This builder produces a GraphQL field for an SeaORM entity relationship @@ -58,50 +58,56 @@ impl EntityObjectRelationBuilder { .unwrap(); let field = match relation_definition.is_owner { - false => { - Field::new(name, TypeRef::named(&object_name), move |ctx| { - // FIXME: optimize with dataloader - FieldFuture::new(async move { - let guard_flag = if let Some(guard) = guard { - (*guard)(&ctx) - } else { - GuardAction::Allow + false => Field::new(name, TypeRef::named(&object_name), move |ctx| { + FieldFuture::new(async move { + let guard_flag = if let Some(guard) = guard { + (*guard)(&ctx) + } else { + GuardAction::Allow + }; + + if let GuardAction::Block(reason) = guard_flag { + return match reason { + Some(reason) => { + Err::, async_graphql::Error>(Error::new(reason)) + } + None => Err::, async_graphql::Error>(Error::new( + "Entity guard triggered.", + )), }; - - if let GuardAction::Block(reason) = guard_flag { - return match reason { - Some(reason) => { - Err::, async_graphql::Error>(Error::new(reason)) - } - None => Err::, async_graphql::Error>(Error::new( - "Entity guard triggered.", - )), - }; - } - - let parent: &T::Model = ctx - .parent_value - .try_downcast_ref::() - .expect("Parent should exist"); - - let stmt = R::find(); - - let filter = Condition::all().add(to_col.eq(parent.get(from_col))); - - let stmt = stmt.filter(filter); - - let db = ctx.data::()?; - - let data = stmt.one(db).await?; - - if let Some(data) = data { - Ok(Some(FieldValue::owned_any(data))) - } else { - Ok(None) - } - }) + } + + let parent: &T::Model = ctx + .parent_value + .try_downcast_ref::() + .expect("Parent should exist"); + + let loader = ctx.data_unchecked::>>(); + + let stmt = R::find(); + let filters = ctx.args.get(&context.entity_query_field.filters); + let filters = get_filter_conditions::(context, filters); + let order_by = ctx.args.get(&context.entity_query_field.order_by); + let order_by = OrderInputBuilder { context }.parse_object::(order_by); + let key = KeyComplex:: { + key: vec![parent.get(from_col)], + meta: HashableGroupKey:: { + stmt, + columns: vec![to_col], + filters: Some(filters), + order_by, + }, + }; + + let data = loader.load_one(key).await?; + + if let Some(data) = data { + Ok(Some(FieldValue::owned_any(data))) + } else { + Ok(None) + } }) - } + }), true => Field::new( name, TypeRef::named_nn(connection_object_builder.type_name(&object_name)), @@ -125,30 +131,35 @@ impl EntityObjectRelationBuilder { }; } - // FIXME: optimize union queries - // NOTE: each has unique query in order to apply pagination... let parent: &T::Model = ctx .parent_value .try_downcast_ref::() .expect("Parent should exist"); - let stmt = R::find(); - - let condition = Condition::all().add(to_col.eq(parent.get(from_col))); + let loader = ctx.data_unchecked::>>(); + let stmt = R::find(); let filters = ctx.args.get(&context.entity_query_field.filters); + let filters = get_filter_conditions::(context, filters); let order_by = ctx.args.get(&context.entity_query_field.order_by); - let pagination = ctx.args.get(&context.entity_query_field.pagination); - - let base_condition = get_filter_conditions::(context, filters); + let order_by = OrderInputBuilder { context }.parse_object::(order_by); + let key = KeyComplex:: { + key: vec![parent.get(from_col)], + meta: HashableGroupKey:: { + stmt, + columns: vec![to_col], + filters: Some(filters), + order_by, + }, + }; - let stmt = stmt.filter(condition.add(base_condition)); - let stmt = apply_order(context, stmt, order_by); + let values = loader.load_one(key).await?; - let db = ctx.data::()?; + let pagination = ctx.args.get(&context.entity_query_field.pagination); + let pagination = + PaginationInputBuilder { context }.parse_object(pagination); - let connection = - apply_pagination::(context, db, stmt, pagination).await?; + let connection: Connection = apply_memory_pagination(values, pagination); Ok(Some(FieldValue::owned_any(connection))) }) diff --git a/src/query/entity_object_via_relation.rs b/src/query/entity_object_via_relation.rs index 9b638d54..db84ad23 100644 --- a/src/query/entity_object_via_relation.rs +++ b/src/query/entity_object_via_relation.rs @@ -1,4 +1,5 @@ use async_graphql::{ + dataloader::DataLoader, dynamic::{Field, FieldFuture, FieldValue, InputValue, TypeRef}, Error, }; @@ -8,8 +9,10 @@ use sea_orm::{ }; use crate::{ - apply_order, apply_pagination, get_filter_conditions, BuilderContext, ConnectionObjectBuilder, - EntityObjectBuilder, FilterInputBuilder, GuardAction, OrderInputBuilder, + apply_memory_pagination, apply_order, apply_pagination, get_filter_conditions, BuilderContext, + ConnectionObjectBuilder, EntityObjectBuilder, FilterInputBuilder, GuardAction, + HashableGroupKey, KeyComplex, OneToManyLoader, OneToOneLoader, OrderInputBuilder, + PaginationInputBuilder, }; /// This builder produces a GraphQL field for an SeaORM entity related trait @@ -64,54 +67,61 @@ impl EntityObjectViaRelationBuilder { .unwrap(); let field = match via_relation_definition.is_owner { - false => { - Field::new(name, TypeRef::named(&object_name), move |ctx| { - // FIXME: optimize by adding dataloader - FieldFuture::new(async move { - let guard_flag = if let Some(guard) = guard { - (*guard)(&ctx) - } else { - GuardAction::Allow + false => Field::new(name, TypeRef::named(&object_name), move |ctx| { + FieldFuture::new(async move { + let guard_flag = if let Some(guard) = guard { + (*guard)(&ctx) + } else { + GuardAction::Allow + }; + + if let GuardAction::Block(reason) = guard_flag { + return match reason { + Some(reason) => { + Err::, async_graphql::Error>(Error::new(reason)) + } + None => Err::, async_graphql::Error>(Error::new( + "Entity guard triggered.", + )), }; - - if let GuardAction::Block(reason) = guard_flag { - return match reason { - Some(reason) => { - Err::, async_graphql::Error>(Error::new(reason)) - } - None => Err::, async_graphql::Error>(Error::new( - "Entity guard triggered.", - )), - }; - } - - let parent: &T::Model = ctx - .parent_value - .try_downcast_ref::() - .expect("Parent should exist"); - - let stmt = if >::via().is_some() { - >::find_related() - } else { - R::find() - }; - - let filter = Condition::all().add(to_col.eq(parent.get(from_col))); - - let stmt = stmt.filter(filter); - - let db = ctx.data::()?; - - let data = stmt.one(db).await?; - - if let Some(data) = data { - Ok(Some(FieldValue::owned_any(data))) - } else { - Ok(None) - } - }) + } + + let parent: &T::Model = ctx + .parent_value + .try_downcast_ref::() + .expect("Parent should exist"); + + let loader = ctx.data_unchecked::>>(); + + let stmt = if >::via().is_some() { + >::find_related() + } else { + R::find() + }; + + let filters = ctx.args.get(&context.entity_query_field.filters); + let filters = get_filter_conditions::(context, filters); + let order_by = ctx.args.get(&context.entity_query_field.order_by); + let order_by = OrderInputBuilder { context }.parse_object::(order_by); + let key = KeyComplex:: { + key: vec![parent.get(from_col)], + meta: HashableGroupKey:: { + stmt, + columns: vec![to_col], + filters: Some(filters), + order_by, + }, + }; + + let data = loader.load_one(key).await?; + + if let Some(data) = data { + Ok(Some(FieldValue::owned_any(data))) + } else { + Ok(None) + } }) - } + }), true => Field::new( name, TypeRef::named_nn(connection_object_builder.type_name(&object_name)), @@ -148,25 +158,42 @@ impl EntityObjectViaRelationBuilder { R::find() }; - let condition = if is_via_relation { - Condition::all().add(from_col.eq(parent.get(from_col))) - } else { - Condition::all().add(to_col.eq(parent.get(from_col))) - }; - let filters = ctx.args.get(&context.entity_query_field.filters); + let filters = get_filter_conditions::(context, filters); + let order_by = ctx.args.get(&context.entity_query_field.order_by); + let order_by = OrderInputBuilder { context }.parse_object::(order_by); + let pagination = ctx.args.get(&context.entity_query_field.pagination); + let pagination = + PaginationInputBuilder { context }.parse_object(pagination); - let base_condition = get_filter_conditions::(context, filters); + let db = ctx.data::()?; - let stmt = stmt.filter(condition.add(base_condition)); - let stmt = apply_order(context, stmt, order_by); + let connection = if is_via_relation { + // TODO optimize query + let condition = Condition::all().add(from_col.eq(parent.get(from_col))); - let db = ctx.data::()?; + let stmt = stmt.filter(condition.add(filters)); + let stmt = apply_order(stmt, order_by); + apply_pagination::(db, stmt, pagination).await? + } else { + let loader = ctx.data_unchecked::>>(); + + let key = KeyComplex:: { + key: vec![parent.get(from_col)], + meta: HashableGroupKey:: { + stmt, + columns: vec![to_col], + filters: Some(filters), + order_by, + }, + }; - let connection = - apply_pagination::(context, db, stmt, pagination).await?; + let values = loader.load_one(key).await?; + + apply_memory_pagination(values, pagination) + }; Ok(Some(FieldValue::owned_any(connection))) }) diff --git a/src/query/entity_query_field.rs b/src/query/entity_query_field.rs index 936c4921..168b9da7 100644 --- a/src/query/entity_query_field.rs +++ b/src/query/entity_query_field.rs @@ -107,16 +107,19 @@ impl EntityQueryFieldBuilder { } let filters = ctx.args.get(&context.entity_query_field.filters); + let filters = get_filter_conditions::(context, filters); let order_by = ctx.args.get(&context.entity_query_field.order_by); + let order_by = OrderInputBuilder { context }.parse_object::(order_by); let pagination = ctx.args.get(&context.entity_query_field.pagination); + let pagination = PaginationInputBuilder { context }.parse_object(pagination); let stmt = T::find(); - let stmt = stmt.filter(get_filter_conditions::(context, filters)); - let stmt = apply_order(context, stmt, order_by); + let stmt = stmt.filter(filters); + let stmt = apply_order(stmt, order_by); let db = ctx.data::()?; - let connection = apply_pagination::(context, db, stmt, pagination).await?; + let connection = apply_pagination::(db, stmt, pagination).await?; Ok(Some(FieldValue::owned_any(connection))) }) diff --git a/src/query/loader.rs b/src/query/loader.rs new file mode 100644 index 00000000..61ac1ffc --- /dev/null +++ b/src/query/loader.rs @@ -0,0 +1,397 @@ +use sea_orm::{sea_query::ValueTuple, Condition, ModelTrait, QueryFilter}; +use std::{collections::HashMap, hash::Hash, marker::PhantomData, sync::Arc}; + +use crate::apply_order; + +#[derive(Clone, Debug)] +pub struct KeyComplex +where + T: sea_orm::EntityTrait, +{ + /// The key tuple to equal with columns + pub key: Vec, + /// Meta Information + pub meta: HashableGroupKey, +} + +impl PartialEq for KeyComplex +where + T: sea_orm::EntityTrait, +{ + fn eq(&self, other: &Self) -> bool { + self.key + .iter() + .map(map_key) + .eq(other.key.iter().map(map_key)) + && self.meta.eq(&other.meta) + } +} + +fn map_key(key: &sea_orm::Value) -> sea_orm::Value { + match key { + sea_orm::Value::TinyInt(value) => { + let value: Option = value.map(|value| value as i64); + sea_orm::Value::BigInt(value) + } + sea_orm::Value::SmallInt(value) => { + let value: Option = value.map(|value| value as i64); + sea_orm::Value::BigInt(value) + } + sea_orm::Value::Int(value) => { + let value: Option = value.map(|value| value as i64); + sea_orm::Value::BigInt(value) + } + sea_orm::Value::TinyUnsigned(value) => { + let value: Option = value.map(|value| value as u64); + sea_orm::Value::BigUnsigned(value) + } + sea_orm::Value::SmallUnsigned(value) => { + let value: Option = value.map(|value| value as u64); + sea_orm::Value::BigUnsigned(value) + } + sea_orm::Value::Unsigned(value) => { + let value: Option = value.map(|value| value as u64); + sea_orm::Value::BigUnsigned(value) + } + _ => key.clone(), + } +} + +impl Eq for KeyComplex where T: sea_orm::EntityTrait {} + +impl Hash for KeyComplex +where + T: sea_orm::EntityTrait, +{ + fn hash(&self, state: &mut H) { + for key in self.key.iter() { + match key { + sea_orm::Value::TinyInt(value) => { + let value: Option = value.map(|value| value as i64); + value.hash(state); + } + sea_orm::Value::SmallInt(value) => { + let value: Option = value.map(|value| value as i64); + value.hash(state); + } + sea_orm::Value::Int(value) => { + let value: Option = value.map(|value| value as i64); + value.hash(state); + } + sea_orm::Value::TinyUnsigned(value) => { + let value: Option = value.map(|value| value as u64); + value.hash(state); + } + sea_orm::Value::SmallUnsigned(value) => { + let value: Option = value.map(|value| value as u64); + value.hash(state); + } + sea_orm::Value::Unsigned(value) => { + let value: Option = value.map(|value| value as u64); + value.hash(state); + } + _ => key.hash(state), + } + } + self.meta.hash(state); + } +} + +#[derive(Clone, Debug)] +pub struct HashableGroupKey +where + T: sea_orm::EntityTrait, +{ + /// Foundation SQL statement + pub stmt: sea_orm::Select, + /// Columns tuple + pub columns: Vec, + /// Extra `WHERE` condition + pub filters: Option, + /// Ordering + pub order_by: Vec<(T::Column, sea_orm::sea_query::Order)>, +} + +impl PartialEq for HashableGroupKey +where + T: sea_orm::EntityTrait, +{ + fn eq(&self, other: &Self) -> bool { + self.filters.eq(&other.filters) + && format!("{:?}", self.columns).eq(&format!("{:?}", other.columns)) + && format!("{:?}", self.order_by).eq(&format!("{:?}", other.order_by)) + } +} + +impl Eq for HashableGroupKey where T: sea_orm::EntityTrait {} + +impl Hash for HashableGroupKey +where + T: sea_orm::EntityTrait, +{ + fn hash(&self, state: &mut H) { + format!("{:?}", self.filters).hash(state); + format!("{:?}", self.columns).hash(state); + format!("{:?}", self.order_by).hash(state); + } +} + +#[derive(Clone, Debug)] +pub struct HashableColumn(T::Column) +where + T: sea_orm::EntityTrait; + +impl PartialEq for HashableColumn +where + T: sea_orm::EntityTrait, +{ + fn eq(&self, other: &Self) -> bool { + format!("{:?}", self.0).eq(&format!("{:?}", other.0)) + } +} + +impl Eq for HashableColumn where T: sea_orm::EntityTrait {} + +impl Hash for HashableColumn +where + T: sea_orm::EntityTrait, +{ + fn hash(&self, state: &mut H) { + format!("{:?}", self.0).hash(state); + } +} + +pub struct OneToManyLoader +where + T: sea_orm::EntityTrait, +{ + connection: sea_orm::DatabaseConnection, + entity: PhantomData, +} + +impl OneToManyLoader +where + T: sea_orm::EntityTrait, + T::Model: Sync, +{ + pub fn new(connection: sea_orm::DatabaseConnection) -> Self { + Self { + connection, + entity: PhantomData::, + } + } +} + +#[async_trait::async_trait] +impl async_graphql::dataloader::Loader> for OneToManyLoader +where + T: sea_orm::EntityTrait, + T::Model: Sync, +{ + type Value = Vec; + type Error = std::sync::Arc; + + async fn load( + &self, + keys: &[KeyComplex], + ) -> Result, Self::Value>, Self::Error> { + let items: HashMap, Vec>> = keys + .iter() + .cloned() + .map(|item: KeyComplex| { + ( + HashableGroupKey { + stmt: item.meta.stmt, + columns: item.meta.columns, + filters: item.meta.filters, + order_by: item.meta.order_by, + }, + item.key, + ) + }) + .fold( + HashMap::, Vec>>::new(), + |mut acc: HashMap, Vec>>, + cur: (HashableGroupKey, Vec)| { + match acc.get_mut(&cur.0) { + Some(items) => { + items.push(cur.1); + } + None => { + acc.insert(cur.0, vec![cur.1]); + } + } + + acc + }, + ); + + let promises: HashMap, _> = items + .into_iter() + .map( + |(key, values): (HashableGroupKey, Vec>)| { + let cloned_key = key.clone(); + + let stmt = key.stmt; + + let condition = match key.filters { + Some(condition) => Condition::all().add(condition), + None => Condition::all(), + }; + let tuple = + sea_orm::sea_query::Expr::tuple(key.columns.iter().map( + |column: &T::Column| sea_orm::sea_query::Expr::col(*column).into(), + )); + let condition = + condition.add(tuple.in_tuples(values.into_iter().map(ValueTuple::Many))); + let stmt = stmt.filter(condition); + + let stmt = apply_order(stmt, key.order_by); + + (cloned_key, stmt.all(&self.connection)) + }, + ) + .collect(); + + let mut results: HashMap, Vec> = HashMap::new(); + + for (key, promise) in promises.into_iter() { + let key = key as HashableGroupKey; + let result: Vec = promise.await.map_err(Arc::new)?; + for item in result.into_iter() { + let key = &KeyComplex:: { + key: key + .columns + .iter() + .map(|col: &T::Column| item.get(*col)) + .collect(), + meta: key.clone(), + }; + match results.get_mut(key) { + Some(results) => { + results.push(item); + } + None => { + results.insert(key.clone(), vec![item]); + } + }; + } + } + + Ok(results) + } +} + +pub struct OneToOneLoader +where + T: sea_orm::EntityTrait, +{ + connection: sea_orm::DatabaseConnection, + entity: PhantomData, +} + +impl OneToOneLoader +where + T: sea_orm::EntityTrait, + T::Model: Sync, +{ + pub fn new(connection: sea_orm::DatabaseConnection) -> Self { + Self { + connection, + entity: PhantomData::, + } + } +} + +#[async_trait::async_trait] +impl async_graphql::dataloader::Loader> for OneToOneLoader +where + T: sea_orm::EntityTrait, + T::Model: Sync, +{ + type Value = T::Model; + type Error = std::sync::Arc; + + async fn load( + &self, + keys: &[KeyComplex], + ) -> Result, Self::Value>, Self::Error> { + let items: HashMap, Vec>> = keys + .iter() + .cloned() + .map(|item: KeyComplex| { + ( + HashableGroupKey { + stmt: item.meta.stmt, + columns: item.meta.columns, + filters: item.meta.filters, + order_by: item.meta.order_by, + }, + item.key, + ) + }) + .fold( + HashMap::, Vec>>::new(), + |mut acc: HashMap, Vec>>, + cur: (HashableGroupKey, Vec)| { + match acc.get_mut(&cur.0) { + Some(items) => { + items.push(cur.1); + } + None => { + acc.insert(cur.0, vec![cur.1]); + } + } + + acc + }, + ); + + let promises: HashMap, _> = items + .into_iter() + .map( + |(key, values): (HashableGroupKey, Vec>)| { + let cloned_key = key.clone(); + + let stmt = key.stmt; + + let condition = match key.filters { + Some(condition) => Condition::all().add(condition), + None => Condition::all(), + }; + let tuple = + sea_orm::sea_query::Expr::tuple(key.columns.iter().map( + |column: &T::Column| sea_orm::sea_query::Expr::col(*column).into(), + )); + let condition = + condition.add(tuple.in_tuples(values.into_iter().map(ValueTuple::Many))); + let stmt = stmt.filter(condition); + + let stmt = apply_order(stmt, key.order_by); + + (cloned_key, stmt.all(&self.connection)) + }, + ) + .collect(); + + let mut results: HashMap, T::Model> = HashMap::new(); + + for (key, promise) in promises.into_iter() { + let key = key as HashableGroupKey; + let result: Vec = promise.await.map_err(Arc::new)?; + for item in result.into_iter() { + let key = &KeyComplex:: { + key: key + .columns + .iter() + .map(|col: &T::Column| item.get(*col)) + .collect(), + meta: key.clone(), + }; + results.insert(key.clone(), item); + } + } + + Ok(results) + } +} diff --git a/src/query/mod.rs b/src/query/mod.rs index 49969615..4c9f9f15 100644 --- a/src/query/mod.rs +++ b/src/query/mod.rs @@ -1,3 +1,6 @@ +pub mod loader; +pub use loader::*; + pub mod entity_query_field; pub use entity_query_field::*; diff --git a/src/query/ordering.rs b/src/query/ordering.rs index 2bc383e3..28da4d79 100644 --- a/src/query/ordering.rs +++ b/src/query/ordering.rs @@ -1,46 +1,15 @@ -use async_graphql::dynamic::ValueAccessor; -use sea_orm::{EntityTrait, Iterable, QueryOrder, Select}; - -use crate::{BuilderContext, EntityObjectBuilder}; +use sea_orm::{EntityTrait, QueryOrder, Select}; /// used to parse order input object and apply it to statement pub fn apply_order( - context: &'static BuilderContext, stmt: Select, - order_by: Option, + order_by: Vec<(T::Column, sea_orm::sea_query::Order)>, ) -> Select where T: EntityTrait, ::Model: Sync, { - if let Some(order_by) = order_by { - let order_by = order_by.object().unwrap(); - - let entity_object = EntityObjectBuilder { context }; - - T::Column::iter().fold(stmt, |stmt, column: T::Column| { - let column_name = entity_object.column_name::(&column); - - let order = order_by.get(&column_name); - - if let Some(order) = order { - let order = order.enum_name().unwrap(); - - let asc_variant = &context.order_by_enum.asc_variant; - let desc_variant = &context.order_by_enum.desc_variant; - - if order.eq(asc_variant) { - stmt.order_by(column, sea_orm::Order::Asc) - } else if order.eq(desc_variant) { - stmt.order_by(column, sea_orm::Order::Desc) - } else { - panic!("Cannot map enumeration") - } - } else { - stmt - } - }) - } else { - stmt - } + order_by + .into_iter() + .fold(stmt, |stmt, (col, ord)| stmt.order_by(col, ord)) } diff --git a/src/query/pagination.rs b/src/query/pagination.rs index dabc47e1..e2c854e3 100644 --- a/src/query/pagination.rs +++ b/src/query/pagination.rs @@ -1,194 +1,278 @@ -use async_graphql::dynamic::ValueAccessor; use itertools::Itertools; #[allow(unused_imports)] use sea_orm::CursorTrait; use sea_orm::{ - ConnectionTrait, DatabaseConnection, DbErr, EntityTrait, Iterable, ModelTrait, PaginatorTrait, + ConnectionTrait, DatabaseConnection, EntityTrait, Iterable, ModelTrait, PaginatorTrait, PrimaryKeyToColumn, QuerySelect, QueryTrait, Select, }; use crate::{ - decode_cursor, encode_cursor, map_cursor_values, BuilderContext, Connection, Edge, PageInfo, - PaginationInfo, PaginationInputBuilder, + decode_cursor, encode_cursor, map_cursor_values, Connection, Edge, PageInfo, PaginationInfo, + PaginationInput, }; /// used to parse pagination input object and apply it to statement pub async fn apply_pagination( - context: &'static BuilderContext, db: &DatabaseConnection, stmt: Select, - pagination: Option>, + pagination: PaginationInput, ) -> Result, sea_orm::error::DbErr> where T: EntityTrait, ::Model: Sync, { - if let Some(pagination) = pagination { - let pagination = pagination.object().unwrap(); - let pagination_input_builder = PaginationInputBuilder { context }; - - let pagination = pagination_input_builder.parse_object(&pagination); - - if let Some(cursor_object) = pagination.cursor { - let next_stmt = stmt.clone(); - let previous_stmt = stmt.clone(); - - fn apply_stmt_cursor_by( - stmt: sea_orm::entity::prelude::Select, - ) -> sea_orm::Cursor> - where - T: EntityTrait, - ::Model: Sync, - { - let size = T::PrimaryKey::iter().fold(0, |acc, _| acc + 1); - if size == 1 { - let column = T::PrimaryKey::iter() - .map(|variant| variant.into_column()) - .collect::>()[0]; - stmt.cursor_by(column) - } else if size == 2 { - let columns = T::PrimaryKey::iter() - .map(|variant| variant.into_column()) - .collect_tuple::<(T::Column, T::Column)>() - .unwrap(); - stmt.cursor_by(columns) - } else if size == 3 { - let columns = T::PrimaryKey::iter() - .map(|variant| variant.into_column()) - .collect_tuple::<(T::Column, T::Column, T::Column)>() - .unwrap(); - stmt.cursor_by(columns) - } else { - panic!("seaography does not support cursors with size greater than 3") - } + if let Some(cursor_object) = pagination.cursor { + let next_stmt = stmt.clone(); + let previous_stmt = stmt.clone(); + + fn apply_stmt_cursor_by( + stmt: sea_orm::entity::prelude::Select, + ) -> sea_orm::Cursor> + where + T: EntityTrait, + ::Model: Sync, + { + let size = T::PrimaryKey::iter().fold(0, |acc, _| acc + 1); + if size == 1 { + let column = T::PrimaryKey::iter() + .map(|variant| variant.into_column()) + .collect::>()[0]; + stmt.cursor_by(column) + } else if size == 2 { + let columns = T::PrimaryKey::iter() + .map(|variant| variant.into_column()) + .collect_tuple::<(T::Column, T::Column)>() + .unwrap(); + stmt.cursor_by(columns) + } else if size == 3 { + let columns = T::PrimaryKey::iter() + .map(|variant| variant.into_column()) + .collect_tuple::<(T::Column, T::Column, T::Column)>() + .unwrap(); + stmt.cursor_by(columns) + } else { + panic!("seaography does not support cursors with size greater than 3") } + } + + let mut stmt = apply_stmt_cursor_by(stmt); + + if let Some(cursor) = cursor_object.cursor { + let values = decode_cursor(&cursor)?; + + let cursor_values: sea_orm::sea_query::value::ValueTuple = map_cursor_values(values); + + stmt.after(cursor_values); + } + + let data = stmt.first(cursor_object.limit).all(db).await.unwrap(); - let mut stmt = apply_stmt_cursor_by(stmt); + let has_next_page: bool = { + let mut next_stmt = apply_stmt_cursor_by(next_stmt); - if let Some(cursor) = cursor_object.cursor { - let values = decode_cursor(&cursor)?; + let last_node = data.last(); - let cursor_values: sea_orm::sea_query::value::ValueTuple = - map_cursor_values(values); + if let Some(node) = last_node { + let values: Vec = T::PrimaryKey::iter() + .map(|variant| node.get(variant.into_column())) + .collect(); + + let values = map_cursor_values(values); - stmt.after(cursor_values); + let next_data = next_stmt.first(1).after(values).all(db).await.unwrap(); + + !next_data.is_empty() + } else { + false } + }; - let data = stmt.first(cursor_object.limit).all(db).await.unwrap(); + let has_previous_page: bool = { + let mut previous_stmt = apply_stmt_cursor_by(previous_stmt); - let has_next_page: bool = { - let mut next_stmt = apply_stmt_cursor_by(next_stmt); + let first_node = data.first(); - let last_node = data.last(); + if let Some(node) = first_node { + let values: Vec = T::PrimaryKey::iter() + .map(|variant| node.get(variant.into_column())) + .collect(); - if let Some(node) = last_node { - let values: Vec = T::PrimaryKey::iter() - .map(|variant| node.get(variant.into_column())) - .collect(); + let values = map_cursor_values(values); - let values = map_cursor_values(values); + let previous_data = previous_stmt.first(1).before(values).all(db).await.unwrap(); - let next_data = next_stmt.first(1).after(values).all(db).await.unwrap(); + !previous_data.is_empty() + } else { + false + } + }; - !next_data.is_empty() - } else { - false - } - }; + let edges: Vec> = data + .into_iter() + .map(|node| { + let values: Vec = T::PrimaryKey::iter() + .map(|variant| node.get(variant.into_column())) + .collect(); - let has_previous_page: bool = { - let mut previous_stmt = apply_stmt_cursor_by(previous_stmt); + let cursor: String = encode_cursor(values); - let first_node = data.first(); + Edge { cursor, node } + }) + .collect(); - if let Some(node) = first_node { - let values: Vec = T::PrimaryKey::iter() - .map(|variant| node.get(variant.into_column())) - .collect(); + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - let values = map_cursor_values(values); + Ok(Connection { + edges, + page_info: PageInfo { + has_previous_page, + has_next_page, + start_cursor, + end_cursor, + }, + pagination_info: None, + }) + } else if let Some(page_object) = pagination.page { + let paginator = stmt.paginate(db, page_object.limit); - let previous_data = - previous_stmt.first(1).before(values).all(db).await.unwrap(); + let paginator_info = paginator.num_items_and_pages().await?; - !previous_data.is_empty() - } else { - false - } - }; + let data = paginator.fetch_page(page_object.page).await?; - let edges: Vec> = data - .into_iter() - .map(|node| { - let values: Vec = T::PrimaryKey::iter() - .map(|variant| node.get(variant.into_column())) - .collect(); + let edges: Vec> = data + .into_iter() + .map(|node| { + let values: Vec = T::PrimaryKey::iter() + .map(|variant| node.get(variant.into_column())) + .collect(); - let cursor: String = encode_cursor(values); + let cursor: String = encode_cursor(values); - Edge { cursor, node } - }) - .collect(); + Edge { cursor, node } + }) + .collect(); + + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); + + Ok(Connection { + edges, + page_info: PageInfo { + has_previous_page: page_object.page != 0, + has_next_page: page_object.page + 1 != paginator_info.number_of_pages, + start_cursor, + end_cursor, + }, + pagination_info: Some(PaginationInfo { + pages: paginator_info.number_of_pages, + current: page_object.page, + offset: page_object.page * page_object.limit, + total: paginator_info.number_of_items, + }), + }) + } else if let Some(offset_object) = pagination.offset { + let offset = offset_object.offset; + let limit = offset_object.limit; + + let count_stmt = stmt.clone().as_query().to_owned(); + + let data = stmt.offset(offset).limit(limit).all(db).await?; + + let edges: Vec> = data + .into_iter() + .map(|node| { + let values: Vec = T::PrimaryKey::iter() + .map(|variant| node.get(variant.into_column())) + .collect(); - let start_cursor = edges.first().map(|edge| edge.cursor.clone()); - let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - - Ok(Connection { - edges, - page_info: PageInfo { - has_previous_page, - has_next_page, - start_cursor, - end_cursor, - }, - pagination_info: None, + let cursor: String = encode_cursor(values); + + Edge { cursor, node } }) - } else if let Some(page_object) = pagination.page { - let paginator = stmt.paginate(db, page_object.limit); + .collect(); - let pages = paginator.num_pages().await?; + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - let data = paginator.fetch_page(page_object.page).await?; + let count_stmt = db.get_database_backend().build( + sea_orm::sea_query::SelectStatement::new() + .expr(sea_orm::sea_query::Expr::cust("COUNT(*) AS num_items")) + .from_subquery(count_stmt, sea_orm::sea_query::Alias::new("sub_query")), + ); - let edges: Vec> = data - .into_iter() - .map(|node| { - let values: Vec = T::PrimaryKey::iter() - .map(|variant| node.get(variant.into_column())) - .collect(); + let total = match db.query_one(count_stmt).await? { + Some(res) => match db.get_database_backend() { + sea_orm::DbBackend::Postgres => res.try_get::("", "num_items")? as u64, + _ => res.try_get::("", "num_items")? as u64, + }, + None => 0, + }; - let cursor: String = encode_cursor(values); + Ok(Connection { + edges, + page_info: PageInfo { + has_previous_page: offset != 0, + has_next_page: offset + limit < total, + start_cursor, + end_cursor, + }, + pagination_info: Some(PaginationInfo { + current: f64::ceil(offset as f64 / limit as f64) as u64, + pages: f64::ceil(total as f64 / limit as f64) as u64, + total, + offset, + }), + }) + } else { + let data = stmt.all(db).await?; - Edge { cursor, node } - }) - .collect(); + let edges: Vec> = data + .into_iter() + .map(|node| { + let values: Vec = T::PrimaryKey::iter() + .map(|variant| node.get(variant.into_column())) + .collect(); - let start_cursor = edges.first().map(|edge| edge.cursor.clone()); - let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - - Ok(Connection { - edges, - page_info: PageInfo { - has_previous_page: page_object.page != 0, - has_next_page: page_object.page + 1 != pages, - start_cursor, - end_cursor, - }, - pagination_info: Some(PaginationInfo { - pages, - current: page_object.page, - offset: page_object.page * page_object.limit, - total: pages * page_object.limit, - }), + let cursor: String = encode_cursor(values); + + Edge { cursor, node } }) - } else if let Some(offset_object) = pagination.offset { - let offset = offset_object.offset; - let limit = offset_object.limit; + .collect(); - let count_stmt = stmt.clone().as_query().to_owned(); + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - let data = stmt.offset(offset).limit(limit).all(db).await?; + let total = edges.len() as u64; + + Ok(Connection { + edges, + page_info: PageInfo { + has_previous_page: false, + has_next_page: false, + start_cursor, + end_cursor, + }, + pagination_info: Some(PaginationInfo { + pages: 1, + current: 1, + offset: 0, + total, + }), + }) + } +} +pub fn apply_memory_pagination( + values: Option>, + pagination: PaginationInput, +) -> Connection +where + T: EntityTrait, + T::Model: Sync, +{ + let edges: Vec> = match values { + Some(data) => { let edges: Vec> = data .into_iter() .map(|node| { @@ -201,66 +285,116 @@ where Edge { cursor, node } }) .collect(); + edges + } + None => Vec::new(), + }; - let start_cursor = edges.first().map(|edge| edge.cursor.clone()); - let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - - let count_stmt = db.get_database_backend().build( - sea_orm::sea_query::SelectStatement::new() - .expr(sea_orm::sea_query::Expr::cust("COUNT(*) AS num_items")) - .from_subquery(count_stmt, sea_orm::sea_query::Alias::new("sub_query")), - ); - - let total = match db.query_one(count_stmt).await? { - Some(res) => match db.get_database_backend() { - sea_orm::DbBackend::Postgres => res.try_get::("", "num_items")? as u64, - _ => res.try_get::("", "num_items")? as u64, - }, - None => 0, - }; - - Ok(Connection { - edges, - page_info: PageInfo { - has_previous_page: offset != 0, - has_next_page: offset * limit < total, - start_cursor, - end_cursor, - }, - pagination_info: Some(PaginationInfo { - current: f64::ceil(offset as f64 / limit as f64) as u64, - pages: f64::ceil(total as f64 / limit as f64) as u64, - total, - offset, - }), - }) + if let Some(cursor_object) = pagination.cursor { + let total: u64 = edges.len() as u64; + let pages = f64::ceil(total as f64 / cursor_object.limit as f64) as u64; + + let first_cursor = edges.first().map(|edge| edge.cursor.clone()); + let last_cursor = edges.last().map(|edge| edge.cursor.clone()); + + let edges: Vec> = if let Some(cursor) = cursor_object.cursor { + edges + .into_iter() + .filter(|edge: &Edge| edge.cursor.gt(&cursor)) + .collect() } else { - Err(DbErr::Type( - "Something is wrong with the pagination input".into(), - )) + edges + }; + + let current = f64::ceil(total as f64 / edges.len() as f64 * pages as f64) as u64; + + let edges: Vec> = edges + .into_iter() + .take(cursor_object.limit as usize) + .collect(); + + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); + + Connection { + edges, + page_info: PageInfo { + has_previous_page: !first_cursor.eq(&start_cursor), + has_next_page: !last_cursor.eq(&end_cursor), + start_cursor, + end_cursor, + }, + pagination_info: Some(PaginationInfo { + pages, + current, + offset: current * cursor_object.limit, + total, + }), } - } else { - let data = stmt.all(db).await?; + } else if let Some(page_object) = pagination.page { + let total = edges.len() as u64; + let pages = f64::ceil(total as f64 / page_object.limit as f64) as u64; - let edges: Vec> = data + let edges: Vec> = edges .into_iter() - .map(|node| { - let values: Vec = T::PrimaryKey::iter() - .map(|variant| node.get(variant.into_column())) - .collect(); + .skip((page_object.page * page_object.limit).try_into().unwrap()) + .take(page_object.limit.try_into().unwrap()) + .collect(); - let cursor: String = encode_cursor(values); + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); - Edge { cursor, node } - }) + Connection { + edges, + page_info: PageInfo { + has_previous_page: page_object.page != 0, + has_next_page: page_object.page + 1 < pages, + start_cursor, + end_cursor, + }, + pagination_info: Some(PaginationInfo { + pages, + current: page_object.page, + offset: page_object.page * page_object.limit, + total, + }), + } + } else if let Some(offset_object) = pagination.offset { + let total = edges.len() as u64; + let pages = f64::ceil(total as f64 / offset_object.limit as f64) as u64; + let current = f64::ceil(offset_object.offset as f64 / offset_object.limit as f64) as u64; + + let edges: Vec> = edges + .into_iter() + .skip((offset_object.offset).try_into().unwrap()) + .take(offset_object.limit.try_into().unwrap()) .collect(); let start_cursor = edges.first().map(|edge| edge.cursor.clone()); let end_cursor = edges.last().map(|edge| edge.cursor.clone()); + Connection { + edges, + page_info: PageInfo { + has_previous_page: offset_object.offset != 0, + has_next_page: offset_object.offset + offset_object.limit < total, + start_cursor, + end_cursor, + }, + pagination_info: Some(PaginationInfo { + pages, + current, + offset: offset_object.offset, + total, + }), + } + } else { + let start_cursor = edges.first().map(|edge| edge.cursor.clone()); + let end_cursor = edges.last().map(|edge| edge.cursor.clone()); + let total = edges.len() as u64; - Ok(Connection { + Connection { edges, page_info: PageInfo { has_previous_page: false, @@ -274,6 +408,6 @@ where offset: 0, total, }), - }) + } } }