From e7c8f461e498253468454fa913271189cd8c6e1f Mon Sep 17 00:00:00 2001 From: Laurence Isla Date: Tue, 28 Mar 2023 05:36:46 -0500 Subject: [PATCH] Add basic support for bulk delete --- CHANGELOG.md | 1 + src/PostgREST/ApiRequest.hs | 1 + src/PostgREST/ApiRequest/Types.hs | 1 + src/PostgREST/Error.hs | 53 ++++++++++++++++----------- src/PostgREST/Plan.hs | 4 +- src/PostgREST/Plan/MutatePlan.hs | 4 ++ src/PostgREST/Query/QueryBuilder.hs | 23 ++++++++---- src/PostgREST/Query/SqlFragment.hs | 7 ++-- test/spec/Feature/Query/DeleteSpec.hs | 37 ++++++++++++++----- test/spec/fixtures/data.sql | 6 +++ test/spec/fixtures/schema.sql | 23 ++++++++++++ 11 files changed, 117 insertions(+), 43 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f68f8d2f52c..608b6873642 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - #2694, Make `db-root-spec` stable. - @steve-chavez + This can be used to override the OpenAPI spec with a custom database function - #1567, On bulk inserts with `?columns`, undefined json keys can get columns' DEFAULT values by using the `Prefer: undefined-keys=apply-defaults` header - @steve-chavez + - #2314, Allow bulk delete by using DELETE with an array body - @laurenceisla ### Fixed diff --git a/src/PostgREST/ApiRequest.hs b/src/PostgREST/ApiRequest.hs index 5da127cc854..13be2778788 100644 --- a/src/PostgREST/ApiRequest.hs +++ b/src/PostgREST/ApiRequest.hs @@ -289,6 +289,7 @@ getPayload reqBody contentMediaType QueryParams{qsColumns} action PathInfo{pathI (ActionInvoke InvPost, _) -> True (ActionMutate MutationSingleUpsert, _) -> True (ActionMutate MutationUpdate, _) -> True + (ActionMutate MutationDelete, _) -> not (LBS.null reqBody) _ -> False columns = case action of diff --git a/src/PostgREST/ApiRequest/Types.hs b/src/PostgREST/ApiRequest/Types.hs index df373dacc11..4ea0df07a76 100644 --- a/src/PostgREST/ApiRequest/Types.hs +++ b/src/PostgREST/ApiRequest/Types.hs @@ -66,6 +66,7 @@ data ApiRequestError = AmbiguousRelBetween Text Text [Relationship] | AmbiguousRpc [ProcDescription] | BinaryFieldError MediaType + | BulkLimitNotAllowedError | MediaTypeError [ByteString] | InvalidBody ByteString | InvalidFilters diff --git a/src/PostgREST/Error.hs b/src/PostgREST/Error.hs index 6682f8980b7..3d218870468 100644 --- a/src/PostgREST/Error.hs +++ b/src/PostgREST/Error.hs @@ -59,28 +59,29 @@ class (JSON.ToJSON a) => PgrstError a where errorResponseFor err = responseLBS (status err) (headers err) $ errorPayload err instance PgrstError ApiRequestError where - status AmbiguousRelBetween{} = HTTP.status300 - status AmbiguousRpc{} = HTTP.status300 - status BinaryFieldError{} = HTTP.status406 - status MediaTypeError{} = HTTP.status415 - status InvalidBody{} = HTTP.status400 - status InvalidFilters = HTTP.status405 - status InvalidRpcMethod{} = HTTP.status405 - status InvalidRange{} = HTTP.status416 - status NotFound = HTTP.status404 - - status NoRelBetween{} = HTTP.status400 - status NoRpc{} = HTTP.status404 - status NotEmbedded{} = HTTP.status400 - status PutRangeNotAllowedError = HTTP.status400 - status QueryParamError{} = HTTP.status400 - status RelatedOrderNotToOne{} = HTTP.status400 - status SpreadNotToOne{} = HTTP.status400 - status UnacceptableFilter{} = HTTP.status400 - status UnacceptableSchema{} = HTTP.status406 - status UnsupportedMethod{} = HTTP.status405 - status LimitNoOrderError = HTTP.status400 - status ColumnNotFound{} = HTTP.status400 + status AmbiguousRelBetween{} = HTTP.status300 + status AmbiguousRpc{} = HTTP.status300 + status BinaryFieldError{} = HTTP.status406 + status BulkLimitNotAllowedError = HTTP.status400 + status MediaTypeError{} = HTTP.status415 + status InvalidBody{} = HTTP.status400 + status InvalidFilters = HTTP.status405 + status InvalidRpcMethod{} = HTTP.status405 + status InvalidRange{} = HTTP.status416 + status NotFound = HTTP.status404 + + status NoRelBetween{} = HTTP.status400 + status NoRpc{} = HTTP.status404 + status NotEmbedded{} = HTTP.status400 + status PutRangeNotAllowedError = HTTP.status400 + status QueryParamError{} = HTTP.status400 + status RelatedOrderNotToOne{} = HTTP.status400 + status SpreadNotToOne{} = HTTP.status400 + status UnacceptableFilter{} = HTTP.status400 + status UnacceptableSchema{} = HTTP.status406 + status UnsupportedMethod{} = HTTP.status405 + status LimitNoOrderError = HTTP.status400 + status ColumnNotFound{} = HTTP.status400 headers _ = [MediaType.toContentType MTApplicationJSON] @@ -172,6 +173,12 @@ instance JSON.ToJSON ApiRequestError where "details" .= ("Only is null or not is null filters are allowed on embedded resources":: Text), "hint" .= JSON.Null] + toJSON BulkLimitNotAllowedError = JSON.object [ + "code" .= ApiRequestErrorCode21, + "message" .= ("Range header and limit/offset querystring parameters are not allowed for PATCH with Prefer: params=multiple-objects" :: Text), + "details" .= JSON.Null, + "hint" .= JSON.Null] + toJSON (NoRelBetween parent child embedHint schema allRels) = JSON.object [ "code" .= SchemaCacheErrorCode00, "message" .= ("Could not find a relationship between '" <> parent <> "' and '" <> child <> "' in the schema cache" :: Text), @@ -598,6 +605,7 @@ data ErrorCode | ApiRequestErrorCode18 | ApiRequestErrorCode19 | ApiRequestErrorCode20 + | ApiRequestErrorCode21 -- Schema Cache errors | SchemaCacheErrorCode00 | SchemaCacheErrorCode01 @@ -644,6 +652,7 @@ buildErrorCode code = "PGRST" <> case code of ApiRequestErrorCode18 -> "118" ApiRequestErrorCode19 -> "119" ApiRequestErrorCode20 -> "120" + ApiRequestErrorCode21 -> "121" SchemaCacheErrorCode00 -> "200" SchemaCacheErrorCode01 -> "201" diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs index e52fc72a596..67833d3a547 100644 --- a/src/PostgREST/Plan.hs +++ b/src/PostgREST/Plan.hs @@ -518,7 +518,8 @@ mutatePlan mutation qi ApiRequest{iPreferences=preferences, ..} sCache readReq = then mapRight (\typedColumns -> Insert qi typedColumns body (Just (MergeDuplicates, pkCols)) combinedLogic returnings mempty False) typedColumnsOrError else Left InvalidFilters - MutationDelete -> Right $ Delete qi combinedLogic iTopLevelRange rootOrder returnings + MutationDelete -> + mapRight (\typedColumns -> Delete qi typedColumns body combinedLogic iTopLevelRange rootOrder returnings pkCols isBulk) typedColumnsOrError where confCols = fromMaybe pkCols qsOnConflict QueryParams.QueryParams{..} = iQueryParams @@ -534,6 +535,7 @@ mutatePlan mutation qi ApiRequest{iPreferences=preferences, ..} sCache readReq = tbl = HM.lookup qi $ dbTables sCache typedColumnsOrError = resolveOrError tbl `traverse` S.toList iColumns applyDefaults = preferences.preferUndefinedKeys == Just ApplyDefaults + isBulk = preferences.preferParameters == Just MultipleObjects resolveOrError :: Maybe Table -> FieldName -> Either ApiRequestError TypedField resolveOrError Nothing _ = Left NotFound diff --git a/src/PostgREST/Plan/MutatePlan.hs b/src/PostgREST/Plan/MutatePlan.hs index ffb0b8e53f0..9d4c26fa2b0 100644 --- a/src/PostgREST/Plan/MutatePlan.hs +++ b/src/PostgREST/Plan/MutatePlan.hs @@ -38,8 +38,12 @@ data MutatePlan } | Delete { in_ :: QualifiedIdentifier + , delCols :: [TypedField] + , delBody :: Maybe LBS.ByteString , where_ :: [LogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] + , delPkFlts :: [FieldName] + , isBulk :: Bool } diff --git a/src/PostgREST/Query/QueryBuilder.hs b/src/PostgREST/Query/QueryBuilder.hs index 88304c9a50b..adbc3b4979c 100644 --- a/src/PostgREST/Query/QueryBuilder.hs +++ b/src/PostgREST/Query/QueryBuilder.hs @@ -83,7 +83,7 @@ getSelectsJoins rr@(Node ReadPlan{select, relName, relToParent=Just rel, relAggA mutatePlanToQuery :: MutatePlan -> SQL.Snippet mutatePlanToQuery (Insert mainQi iCols body onConflct putConditions returnings _ applyDefaults) = "INSERT INTO " <> SQL.sql (fromQi mainQi) <> SQL.sql (if null iCols then " " else "(" <> cols <> ") ") <> - fromJsonBodyF body iCols True False applyDefaults <> + fromJsonBodyF body iCols True False False applyDefaults <> -- Only used for PUT (if null putConditions then mempty else "WHERE " <> intercalateSnippet " AND " (pgFmtLogicTree (QualifiedIdentifier mempty "pgrst_body") <$> putConditions)) <> SQL.sql (BS.unwords [ @@ -114,13 +114,13 @@ mutatePlanToQuery (Update mainQi uCols body logicForest range ordts returnings a | range == allRange = "UPDATE " <> mainTbl <> " SET " <> SQL.sql nonRangeCols <> " " <> - fromJsonBodyF body uCols False False applyDefaults <> + fromJsonBodyF body uCols False False False applyDefaults <> whereLogic <> " " <> SQL.sql (returningF mainQi returnings) | otherwise = "WITH " <> - "pgrst_update_body AS (" <> fromJsonBodyF body uCols True True applyDefaults <> "), " <> + "pgrst_update_body AS (" <> fromJsonBodyF body uCols True False True applyDefaults <> "), " <> "pgrst_affected_rows AS (" <> "SELECT " <> SQL.sql rangeIdF <> " FROM " <> mainTbl <> whereLogic <> " " <> @@ -140,10 +140,12 @@ mutatePlanToQuery (Update mainQi uCols body logicForest range ordts returnings a rangeCols = BS.intercalate ", " ((\col -> pgFmtIdent (tfName col) <> " = (SELECT " <> pgFmtIdent (tfName col) <> " FROM pgrst_update_body) ") <$> uCols) (whereRangeIdF, rangeIdF) = mutRangeF mainQi (fst . otTerm <$> ordts) -mutatePlanToQuery (Delete mainQi logicForest range ordts returnings) +mutatePlanToQuery (Delete mainQi dCols body logicForest range ordts returnings pkFlts isBulk) | range == allRange = "DELETE FROM " <> SQL.sql (fromQi mainQi) <> " " <> - whereLogic <> " " <> + (if isBulk + then fromJsonBodyF body dCols False True False False <> whereLogicBulk + else whereLogic) <> " " <> SQL.sql (returningF mainQi returnings) | otherwise = @@ -160,8 +162,15 @@ mutatePlanToQuery (Delete mainQi logicForest range ordts returnings) SQL.sql (returningF mainQi returnings) where - whereLogic = if null logicForest then mempty else " WHERE " <> intercalateSnippet " AND " (pgFmtLogicTree mainQi <$> logicForest) + whereLogic = pgFmtWhereF (null logicForest) logicForestF + whereLogicBulk = pgFmtWhereF (null logicForest && null pkFlts) (logicForestF <> pgrstDeleteBodyF) + pgFmtWhereF hasEmptyLogic flts = if hasEmptyLogic then mempty else " WHERE " <> intercalateSnippet " AND " flts +-- whereLogic = if null logicForest then mempty else " WHERE " <> intercalateSnippet " AND " (pgFmtLogicTree mainQi <$> logicForest) (whereRangeIdF, rangeIdF) = mutRangeF mainQi (fst . otTerm <$> ordts) + logicForestF = pgFmtLogicTree mainQi <$> logicForest + pgrstDeleteBodyF = pgFmtBodyFilter mainQi (QualifiedIdentifier mempty "pgrst_body") <$> pkFlts + pgFmtBodyFilter table cte f = SQL.sql (pgFmtColumn table f <> " = " <> pgFmtColumn cte f) +-- pgrstDeleteBodyF = SQL.sql (BS.intercalate " AND " $ (\x -> pgFmtColumn mainQi x <> " = " <> pgFmtColumn (QualifiedIdentifier mempty "pgrst_delete_body") x) <$> S.toList dCols) callPlanToQuery :: CallPlan -> SQL.Snippet callPlanToQuery (FunctionCall qi params args returnsScalar multipleCall returnings) = @@ -171,7 +180,7 @@ callPlanToQuery (FunctionCall qi params args returnsScalar multipleCall returnin fromCall = case params of OnePosParam prm -> "FROM " <> callIt (singleParameter args $ encodeUtf8 $ ppType prm) KeyParams [] -> "FROM " <> callIt mempty - KeyParams prms -> fromJsonBodyF args ((\p -> TypedField (ppName p) (ppType p) Nothing) <$> prms) False (not multipleCall) False <> ", " <> + KeyParams prms -> fromJsonBodyF args ((\p -> TypedField (ppName p) (ppType p) Nothing) <$> prms) False False (not multipleCall) False <> ", " <> "LATERAL " <> callIt (fmtParams prms) callIt :: SQL.Snippet -> SQL.Snippet diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs index 87c8b02e5c8..ce1f70fc30a 100644 --- a/src/PostgREST/Query/SqlFragment.hs +++ b/src/PostgREST/Query/SqlFragment.hs @@ -231,11 +231,12 @@ pgFmtSelectItem table (f@(fName, jp), Nothing, alias) = pgFmtField table f <> SQ pgFmtSelectItem table (f@(fName, jp), Just cast, alias) = "CAST (" <> pgFmtField table f <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs fName jp alias) -- TODO: At this stage there shouldn't be a Maybe since ApiRequest should ensure that an INSERT/UPDATE has a body -fromJsonBodyF :: Maybe LBS.ByteString -> [TypedField] -> Bool -> Bool -> Bool -> SQL.Snippet -fromJsonBodyF body fields includeSelect includeLimitOne includeDefaults = +fromJsonBodyF :: Maybe LBS.ByteString -> [TypedField] -> Bool -> Bool -> Bool -> Bool -> SQL.Snippet +fromJsonBodyF body fields includeSelect includeUsing includeLimitOne includeDefaults = SQL.sql (if includeSelect then "SELECT " <> parsedCols <> " " else mempty) <> - "FROM (SELECT " <> jsonPlaceHolder <> " AS json_data) pgrst_payload, " <> + (if includeUsing then "USING " else "FROM ") <> + "(SELECT " <> jsonPlaceHolder <> " AS json_data) pgrst_payload, " <> -- convert a json object into a json array, this way we can use json_to_recordset for all json payloads -- Otherwise we'd have to use json_to_record for json objects and json_to_recordset for json arrays -- We do this in SQL to avoid processing the JSON in application code diff --git a/test/spec/Feature/Query/DeleteSpec.hs b/test/spec/Feature/Query/DeleteSpec.hs index 772c7d490d2..fbe16820846 100644 --- a/test/spec/Feature/Query/DeleteSpec.hs +++ b/test/spec/Feature/Query/DeleteSpec.hs @@ -12,11 +12,17 @@ import Test.Hspec.Wai.JSON import Protolude hiding (get) import SpecHelper -tblDataBefore = [aesonQQ|[ - { "id": 1, "name": "item-1" } - , { "id": 2, "name": "item-2" } - , { "id": 3, "name": "item-3" } - ]|] +tblDataBeforeLimit = [aesonQQ|[ + { "id": 1, "name": "item-1" } + , { "id": 2, "name": "item-2" } + , { "id": 3, "name": "item-3" } + ]|] + +tblDataBeforeBulk = [aesonQQ|[ + { "id": 1, "name": "item-1", "observation": null } + , { "id": 2, "name": "item-2", "observation": null } + , { "id": 3, "name": "item-3", "observation": null } + ]|] spec :: SpecWith ((), Application) spec = @@ -152,7 +158,7 @@ spec = context "limited delete" $ do it "works with the limit and offset query params" $ - baseTable "limited_delete_items" "id" tblDataBefore + baseTable "limited_delete_items" "id" tblDataBeforeLimit `mutatesWith` requestMutation methodDelete "/limited_delete_items?order=id&limit=1&offset=1" mempty `shouldMutateInto` @@ -162,7 +168,7 @@ spec = ]|] it "works with the limit query param plus a filter" $ - baseTable "limited_delete_items" "id" tblDataBefore + baseTable "limited_delete_items" "id" tblDataBeforeLimit `mutatesWith` requestMutation methodDelete "/limited_delete_items?order=id&limit=1&id=gt.1" mempty `shouldMutateInto` @@ -198,7 +204,7 @@ spec = { matchStatus = 400 } it "works with views with an explicit order by unique col" $ - baseTable "limited_delete_items_view" "id" tblDataBefore + baseTable "limited_delete_items_view" "id" tblDataBeforeLimit `mutatesWith` requestMutation methodDelete "/limited_delete_items_view?order=id&limit=1&offset=1" mempty `shouldMutateInto` @@ -208,7 +214,7 @@ spec = ]|] it "works with views with an explicit order by composite pk" $ - baseTable "limited_delete_items_cpk_view" "id" tblDataBefore + baseTable "limited_delete_items_cpk_view" "id" tblDataBeforeLimit `mutatesWith` requestMutation methodDelete "/limited_delete_items_cpk_view?order=id,name&limit=1&offset=1" mempty `shouldMutateInto` @@ -218,7 +224,7 @@ spec = ]|] it "works on a table without a pk by ordering by 'ctid'" $ - baseTable "limited_delete_items_no_pk" "id" tblDataBefore + baseTable "limited_delete_items_no_pk" "id" tblDataBeforeLimit `mutatesWith` requestMutation methodDelete "/limited_delete_items_no_pk?order=ctid&limit=1&offset=1" mempty `shouldMutateInto` @@ -226,3 +232,14 @@ spec = { "id": 1, "name": "item-1" } , { "id": 3, "name": "item-3" } ]|] + +-- context "bulk deletes" $ do +-- it "can delete tables with simple pk" $ +-- baseTable "bulk_delete_items" "id" tblDataBeforeBulk +-- `mutatesWith` +-- requestMutation methodDelete "/bulk_delete_items" mempty +-- `shouldMutateInto` +-- [json|[ +-- { "id": 1, "name": "any name 1" } +-- , { "id": 3, "name": "any name 3" } +-- ]|] diff --git a/test/spec/fixtures/data.sql b/test/spec/fixtures/data.sql index cb2e219b9c8..1e169c2e7ba 100644 --- a/test/spec/fixtures/data.sql +++ b/test/spec/fixtures/data.sql @@ -838,3 +838,9 @@ INSERT INTO posters(id,name) VALUES (1,'Mark'), (2,'Elon'), (3,'Bill'), (4,'Jeff TRUNCATE TABLE subscriptions CASCADE; INSERT INTO subscriptions(subscriber,subscribed) VALUES (3,1), (4,1), (1,2); + +TRUNCATE TABLE test.body_delete_items CASCADE; +INSERT INTO test.body_delete_items (id, name, observation) VALUES (1, 'item-1', NULL), (2, 'item-2', NULL), (3, 'item-3', NULL); + +TRUNCATE TABLE test.bulk_delete_items CASCADE; +INSERT INTO test.bulk_delete_items (id, name, observation) VALUES (1, 'item-1', NULL), (2, 'item-2', NULL), (3, 'item-3', NULL); diff --git a/test/spec/fixtures/schema.sql b/test/spec/fixtures/schema.sql index 3dffb555cd5..a71d84066f6 100644 --- a/test/spec/fixtures/schema.sql +++ b/test/spec/fixtures/schema.sql @@ -3110,3 +3110,26 @@ create table test.tbl_w_json( id int, data json ); + +-- Table to test deletes with body in the payload + +CREATE TABLE test.body_delete_items ( + id INT PRIMARY KEY , + name TEXT, + observation TEXT +); + +-- Tables to test bulk deletes + +CREATE TABLE test.bulk_delete_items ( + id INT PRIMARY KEY, + name TEXT, + observation TEXT +); + +CREATE TABLE test.bulk_delete_items_cpk ( + id INT, + name TEXT, + observation TEXT, + PRIMARY KEY (id, name) +);