diff --git a/integrations/tide/src/lib.rs b/integrations/tide/src/lib.rs index b233d44a..faa7bd96 100644 --- a/integrations/tide/src/lib.rs +++ b/integrations/tide/src/lib.rs @@ -9,7 +9,7 @@ #![forbid(unsafe_code)] use async_graphql::http::MultipartOptions; -use async_graphql::{resolver_utils::ObjectType, Schema, SubscriptionType}; +use async_graphql::{resolver_utils::ObjectType, ParseRequestError, Schema, SubscriptionType}; use async_trait::async_trait; use tide::{ http::{ @@ -19,27 +19,43 @@ use tide::{ Body, Request, Response, StatusCode, }; -/// Create a new GraphQL endpoint with the schema and default multipart options. +/// Create a new GraphQL endpoint with the schema. +/// +/// Default multipart options are used and batch operations are supported. pub fn endpoint( schema: Schema, ) -> Endpoint { - endpoint_options(schema, MultipartOptions::default()) -} - -/// Create a new GraphQL endpoint with the schema and custom multipart options. -pub fn endpoint_options( - schema: Schema, - opts: MultipartOptions, -) -> Endpoint { - Endpoint { schema, opts } + Endpoint { + schema, + opts: MultipartOptions::default(), + batch: true, + } } /// A GraphQL endpoint. +/// +/// This is created with the [`endpoint`](fn.endpoint.html) function. +#[non_exhaustive] pub struct Endpoint { /// The schema of the endpoint. pub schema: Schema, /// The multipart options of the endpoint. pub opts: MultipartOptions, + /// Whether to support batch requests in the endpoint. + pub batch: bool, +} + +impl Endpoint { + /// Set the multipart options of the endpoint. + #[must_use] + pub fn multipart_opts(self, opts: MultipartOptions) -> Self { + Self { opts, ..self } + } + /// Set whether batch requests are supported in the endpoint. + #[must_use] + pub fn batch(self, batch: bool) -> Self { + Self { batch, ..self } + } } // Manual impl to remove bounds on generics @@ -48,6 +64,7 @@ impl Clone for Endpoint) -> tide::Result { respond( self.schema - .execute(receive_request_opts(request, self.opts).await?) + .execute_batch(if self.batch { + receive_batch_request_opts(request, self.opts).await + } else { + receive_request_opts(request, self.opts) + .await + .map(Into::into) + }?) .await, ) } @@ -79,9 +102,27 @@ pub async fn receive_request( /// Convert a Tide request to a GraphQL request with options on how to receive multipart. pub async fn receive_request_opts( - mut request: Request, + request: Request, opts: MultipartOptions, ) -> tide::Result { + receive_batch_request_opts(request, opts) + .await? + .into_single() + .map_err(|e| tide::Error::new(StatusCode::BadRequest, e)) +} + +/// Convert a Tide request to a GraphQL batch request. +pub async fn receive_batch_request( + request: Request, +) -> tide::Result { + receive_batch_request_opts(request, Default::default()).await +} + +/// Convert a Tide request to a GraphQL batch request with options on how to receive multipart. +pub async fn receive_batch_request_opts( + mut request: Request, + opts: MultipartOptions, +) -> tide::Result { if request.method() == Method::Get { request.query() } else { @@ -91,17 +132,27 @@ pub async fn receive_request_opts( .and_then(|values| values.get(0)) .map(HeaderValue::as_str); - async_graphql::http::receive_body(content_type, body, opts) + async_graphql::http::receive_batch_body(content_type, body, opts) .await - .map_err(|e| tide::Error::new(StatusCode::BadRequest, e)) + .map_err(|e| { + tide::Error::new( + match &e { + ParseRequestError::PayloadTooLarge => StatusCode::PayloadTooLarge, + _ => StatusCode::BadRequest, + }, + e, + ) + }) } } /// Convert a GraphQL response to a Tide response. -pub fn respond(gql: async_graphql::Response) -> tide::Result { +pub fn respond(gql: impl Into) -> tide::Result { + let gql = gql.into(); + let mut response = Response::new(StatusCode::Ok); if gql.is_ok() { - if let Some(cache_control) = gql.cache_control.value() { + if let Some(cache_control) = gql.cache_control().value() { response.insert_header(headers::CACHE_CONTROL, cache_control); } }