diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 7cfedb0b2b..96e6364a6c 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -67,7 +67,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- pgVersion: [12, 13, 14, 15, 16, 17]
+ pgVersion: [13, 14, 15, 16, 17]
name: PG ${{ matrix.pgVersion }}
runs-on: ubuntu-24.04
defaults:
diff --git a/default.nix b/default.nix
index af73cce9aa..6254421c1e 100644
--- a/default.nix
+++ b/default.nix
@@ -1,6 +1,6 @@
{ system ? builtins.currentSystem
-, compiler ? "ghc948"
+, compiler ? "ghc984"
, # Commit of the Nixpkgs repository that we want to use.
nixpkgsVersion ? import nix/nixpkgs-version.nix
@@ -35,7 +35,6 @@ let
allOverlays.build-toolbox
allOverlays.checked-shell-script
allOverlays.gitignore
- allOverlays.postgresql-libpq
(allOverlays.haskell-packages { inherit compiler; })
allOverlays.slocat
];
@@ -51,7 +50,6 @@ let
{ name = "postgresql-15"; postgresql = pkgs.postgresql_15.withPackages (p: [ p.postgis p.pg_safeupdate ]); }
{ name = "postgresql-14"; postgresql = pkgs.postgresql_14.withPackages (p: [ p.postgis p.pg_safeupdate ]); }
{ name = "postgresql-13"; postgresql = pkgs.postgresql_13.withPackages (p: [ p.postgis p.pg_safeupdate ]); }
- { name = "postgresql-12"; postgresql = pkgs.postgresql_12.withPackages (p: [ p.postgis p.pg_safeupdate ]); }
];
# Dynamic derivation for PostgREST
@@ -97,7 +95,8 @@ rec {
# Tooling for analyzing Haskell imports and exports.
hsie =
pkgs.callPackage nix/hsie {
- inherit (pkgs.haskell.packages."${compiler}") ghcWithPackages;
+ # TODO: Fix hsie with newer GHC
+ inherit (pkgs.haskell.packages.ghc948) ghcWithPackages;
};
### Tools
diff --git a/nix/README.md b/nix/README.md
index 601fa4f555..6159168cfa 100644
--- a/nix/README.md
+++ b/nix/README.md
@@ -75,12 +75,12 @@ The PostgREST utilities available in `nix-shell` all have names that begin with
postgrest-build postgrest-test-spec
postgrest-check postgrest-watch
postgrest-clean postgrest-with-all
-postgrest-coverage postgrest-with-postgresql-12
-postgrest-lint postgrest-with-postgresql-13
-postgrest-run postgrest-with-postgresql-14
-postgrest-style postgrest-with-postgresql-15
-postgrest-style-check postgrest-with-postgresql-16
-postgrest-test-io postgrest-with-postgresql-17
+postgrest-coverage postgrest-with-postgresql-13
+postgrest-lint postgrest-with-postgresql-14
+postgrest-run postgrest-with-postgresql-15
+postgrest-style postgrest-with-postgresql-16
+postgrest-style-check postgrest-with-postgresql-17
+postgrest-test-io
...
[nix-shell]$
@@ -99,12 +99,12 @@ $ nix-shell --arg memory true
postgrest-build postgrest-test-spec
postgrest-check postgrest-watch
postgrest-clean postgrest-with-all
-postgrest-coverage postgrest-with-postgresql-12
-postgrest-lint postgrest-with-postgresql-13
-postgrest-run postgrest-with-postgresql-14
-postgrest-style postgrest-with-postgresql-15
-postgrest-style-check postgrest-with-postgresql-16
-postgrest-test-io postgrest-with-postgresql-17
+postgrest-coverage postgrest-with-postgresql-13
+postgrest-lint postgrest-with-postgresql-14
+postgrest-run postgrest-with-postgresql-15
+postgrest-style postgrest-with-postgresql-16
+postgrest-style-check postgrest-with-postgresql-17
+postgrest-test-io
postgrest-test-memory
...
diff --git a/nix/libpq.nix b/nix/libpq.nix
deleted file mode 100644
index 1500294621..0000000000
--- a/nix/libpq.nix
+++ /dev/null
@@ -1,61 +0,0 @@
-# Creating a separate libpq package is is discussed in
-# https://github.com/NixOS/nixpkgs/issues/61580, but nixpkgs has not moved
-# forward, yet.
-# This package is passed to postgresql-libpq (haskell) which needs to be
-# cross-compiled to the static build and possibly other architectures as
-# as well. To reduce the number of dependencies that need to be built with
-# it, this derivation focuses on building the client libraries only. No
-# server, no tests.
-{ stdenv
-, lib
-, openssl
-, zlib
-, postgresql
-, pkg-config
-, tzdata
-}:
-
-stdenv.mkDerivation {
- pname = "libpq";
- inherit (postgresql) src version patches;
-
- __structuredAttrs = true;
- env.CFLAGS = "-fdata-sections -ffunction-sections"
- + (if stdenv.cc.isClang then " -flto" else " -fmerge-constants -Wl,--gc-sections");
-
- configureFlags = [
- "--without-gssapi"
- "--without-icu"
- "--without-readline"
- "--with-openssl"
- "--with-system-tzdata=${tzdata}/share/zoneinfo"
- "--sysconfdir=/etc/postgresql"
- ];
-
- nativeBuildInputs = [ pkg-config tzdata ];
- buildInputs = [ openssl zlib ];
-
- buildFlags = [ "submake-libpq" "submake-libpgport" ];
-
- installPhase = ''
- runHook preInstall
-
- make -C src/bin/pg_config install
- make -C src/common install
- make -C src/include install
- make -C src/interfaces/libpq install
- make -C src/port install
-
- rm -rfv $out/share
-
- runHook postInstall
- '';
-
- outputs = [ "out" ];
-
- meta = with lib; {
- homepage = "https://www.postgresql.org";
- description = "Client API library for PostgreSQL";
- license = licenses.postgresql;
- };
-}
diff --git a/nix/nixpkgs-version.nix b/nix/nixpkgs-version.nix
index 52dcfe645d..5be16d8ed5 100644
--- a/nix/nixpkgs-version.nix
+++ b/nix/nixpkgs-version.nix
@@ -2,8 +2,8 @@
{
owner = "NixOS";
repo = "nixpkgs";
- ref = "refs/heads/nixpkgs-unstable-darwin";
- date = "2024-11-09";
- rev = "a90280100f41a10914edfe729a4053e60c92b8e3";
- tarballHash = "1vwr665b6l6gma24w45q5hic86vbd8alc01mziwwr621hwlca88f";
+ ref = "refs/heads/nixpkgs-unstable";
+ date = "2025-01-11";
+ rev = "32af3611f6f05655ca166a0b1f47b57c762b5192";
+ tarballHash = "0shknvd56nfqh4awklgsxwaavpfixgh766m428qdlxihjmmqvhbl";
}
diff --git a/nix/overlays/checked-shell-script/checked-shell-script.nix b/nix/overlays/checked-shell-script/checked-shell-script.nix
index 64297d19c4..df6f03d491 100644
--- a/nix/overlays/checked-shell-script/checked-shell-script.nix
+++ b/nix/overlays/checked-shell-script/checked-shell-script.nix
@@ -6,6 +6,7 @@
, coreutils
, git
, lib
+, moreutils
, runCommand
, shellcheck
, stdenv
@@ -56,7 +57,7 @@ let
# Example: This way `postgrest-watch -h` will return the help output for watch, while
# `postgrest-watch postgrest-test-spec -h` will return the help output for test-spec.
# Taken from: https://github.com/matejak/argbash/issues/114#issuecomment-557108274
- sed '/_positionals_count + 1/a\\t\t\t\tset -- "''${@:1:1}" "--" "''${@:2}"' -i $out
+ sed '/_positionals_count + 1/a\\t\t\t\tset -- "''${@:1:1}" "--" "''${@:2}"' $out | ${moreutils}/bin/sponge $out
'';
bash-completion =
@@ -66,7 +67,7 @@ let
''
+ lib.optionalString (positionalCompletion != "") ''
- sed 's#COMPREPLY.*compgen -o bashdefault .*$#${escape positionalCompletion}#' -i $out
+ sed 's#COMPREPLY.*compgen -o bashdefault .*$#${escape positionalCompletion}#' $out | ${moreutils}/bin/sponge $out
''
);
diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix
index 18900f0a15..4a5f88643d 100644
--- a/nix/overlays/default.nix
+++ b/nix/overlays/default.nix
@@ -3,6 +3,5 @@
checked-shell-script = import ./checked-shell-script;
gitignore = import ./gitignore.nix;
haskell-packages = import ./haskell-packages.nix;
- postgresql-libpq = import ./postgresql-libpq.nix;
slocat = import ./slocat.nix;
}
diff --git a/nix/overlays/haskell-packages.nix b/nix/overlays/haskell-packages.nix
index 917ab2478a..65192b78f5 100644
--- a/nix/overlays/haskell-packages.nix
+++ b/nix/overlays/haskell-packages.nix
@@ -50,25 +50,36 @@ let
# jailbreak, because hspec limit for tests
fuzzyset = prev.fuzzyset_0_2_4;
- hasql-pool = lib.dontCheck (prev.callHackageDirect
+ postgresql-binary = prev.postgresql-binary_0_14;
+ hasql = prev.hasql_1_8_1_1;
+ hasql-dynamic-statements = prev.hasql-dynamic-statements_0_3_1_7;
+ hasql-implicits = prev.hasql-implicits_0_2;
+ hasql-pool = prev.hasql-pool_1_2_0_2;
+ hasql-transaction = prev.hasql-transaction_1_1_1_2;
+
+ hasql-notifications = lib.dontCheck (prev.callHackageDirect
{
- pkg = "hasql-pool";
- ver = "1.0.1";
- sha256 = "sha256-Hf1f7lX0LWkjrb25SDBovCYPRdmUP1H6pAxzi7kT4Gg=";
+ pkg = "hasql-notifications";
+ ver = "0.2.3.1";
+ sha256 = "sha256-vLLUBreUXLPACqzKun8a+Irew895/VydI1lKrnY/M1w=";
}
- { });
+ { }
+ );
jose-jwt = prev.jose-jwt_0_10_0;
- postgresql-libpq = lib.dontCheck (prev.callHackageDirect
+ postgresql-libpq = lib.overrideCabal (lib.dontCheck (prev.callHackageDirect
{
pkg = "postgresql-libpq";
ver = "0.10.1.0";
sha256 = "sha256-tXOMqCO8opMilI9rx0D+njqjIjbZsH168Bzb8Aq8Ff4=";
}
- {
- postgresql = super.libpq;
- });
+ { }
+ )) (drv: {
+ configureFlags = [ "-fuse-pkg-config" ];
+ libraryPkgconfigDepends = [ super.postgresql_16 ];
+ librarySystemDepends = [ ];
+ });
};
in
{
diff --git a/nix/overlays/postgresql-libpq.nix b/nix/overlays/postgresql-libpq.nix
deleted file mode 100644
index 2d1841e608..0000000000
--- a/nix/overlays/postgresql-libpq.nix
+++ /dev/null
@@ -1,6 +0,0 @@
-_: super:
-{
- libpq = super.callPackage ../libpq.nix {
- postgresql = super.postgresql_16;
- };
-}
diff --git a/nix/static.nix b/nix/static.nix
index 0e626d1410..b99dac9463 100644
--- a/nix/static.nix
+++ b/nix/static.nix
@@ -8,64 +8,22 @@ let
inherit (pkgs) pkgsStatic;
inherit (pkgsStatic.haskell) lib;
- packagesStatic =
- pkgsStatic.haskell.packages."${compiler}".override (old: {
- ghc = pkgsStatic.pkgsBuildHost.haskell.compiler."${compiler}".override {
- # Using the bundled libffi generally works better for cross-compiling
- libffi = null;
- # Building sphinx fails on some platforms
- enableDocs = false;
- # Cross compiling with native bignum works better than with gmp
- enableNativeBignum = true;
- };
-
- overrides = pkgs.lib.composeExtensions old.overrides (_: prev: {
- postgresql-libpq = (lib.overrideCabal prev.postgresql-libpq {
- # TODO: This section can be simplified when this PR has made it's way to us:
- # https://github.com/NixOS/nixpkgs/pull/286370
- # Additionally, we need to use the default version in nixpkgs, otherwise the
- # override will not be active as well.
- # Using use-pkg-config flag, because pg_config won't work when cross-compiling
- configureFlags = [ "-fuse-pkg-config" ];
- # postgresql doesn't build in the fully static overlay - but the default
- # derivation is built with static libraries anyway.
- libraryPkgconfigDepends = [ pkgsStatic.libpq ];
- librarySystemDepends = [ ];
- }).overrideAttrs (_: prevAttrs: {
- buildInputs = prevAttrs.buildInputs ++ [ pkgsStatic.openssl ];
- });
- });
- });
+ packagesStatic = pkgsStatic.haskell.packages.native-bignum."${compiler}";
makeExecutableStatic = drv: pkgs.lib.pipe drv [
lib.compose.justStaticExecutables
# To successfully compile a redistributable, fully static executable we need to:
- # 1. make executable really statically linked.
- # 2. avoid any references to /nix/store to prevent blowing up the closure size.
- # 3. be able to run the executable.
- # When checking for references, we ignore the following:
- # - eeee... are removed references which don't actually exist
- # - openssl-etc references are purposely designed to be very small
- (lib.compose.overrideCabal (drv: {
- postFixup = drv.postFixup + ''
- exe="$out/bin/postgrest"
-
- if ! (file "$exe" | grep 'statically linked') then
- echo "not a static executable, ldd output:"
- ldd "$exe"
- exit 1
- fi
-
- echo "Checking for references to /nix/store..."
- (${pkgsStatic.binutils}/bin/strings "$exe" \
- | grep -v /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee \
- | grep -v -etc/etc/ssl \
- | grep /nix/store || exit 0 && exit 1)
- echo "No references to /nix/store found"
-
- "$exe" --help
- '';
+ # 1. avoid any references to /nix/store to prevent blowing up the closure size.
+ # 2. be able to run the executable.
+ (drv: drv.overrideAttrs (finalAttrs: {
+ allowedReferences = [
+ pkgsStatic.openssl.etc
+ ];
+
+ passthru.tests.version = pkgsStatic.testers.testVersion {
+ package = finalAttrs.finalPackage;
+ };
}))
];
diff --git a/postgrest.cabal b/postgrest.cabal
index 092a98de82..90eaf172d6 100644
--- a/postgrest.cabal
+++ b/postgrest.cabal
@@ -85,7 +85,6 @@ library
PostgREST.ApiRequest.QueryParams
PostgREST.ApiRequest.Types
PostgREST.Response
- PostgREST.Response.OpenAPI
PostgREST.Response.GucHeader
PostgREST.Response.Performance
PostgREST.Version
@@ -109,12 +108,11 @@ library
, extra >= 1.7.0 && < 2.0
, fuzzyset >= 0.2.4 && < 0.3
, gitrev >= 1.2 && < 1.4
- , hasql >= 1.6.1.1 && < 1.7
+ , hasql >= 1.7 && < 1.9
, hasql-dynamic-statements >= 0.3.1 && < 0.4
, hasql-notifications >= 0.2.2.0 && < 0.3
- , hasql-pool >= 1.0.1 && < 1.1
- , hasql-transaction >= 1.0.1 && < 1.1
- , heredoc >= 0.2 && < 0.3
+ , hasql-pool >= 1.1 && < 1.3
+ , hasql-transaction >= 1.0.1 && < 1.2
, http-types >= 0.12.2 && < 0.13
, insert-ordered-containers >= 0.2.2 && < 0.3
, iproute >= 1.7.0 && < 1.8
@@ -122,7 +120,6 @@ library
, lens >= 4.14 && < 5.3
, lens-aeson >= 1.0.1 && < 1.3
, mtl >= 2.2.2 && < 2.4
- , neat-interpolation >= 0.5 && < 0.6
, network >= 2.6 && < 3.2
, network-uri >= 2.6.1 && < 2.8
, optparse-applicative >= 0.13 && < 0.19
@@ -134,7 +131,6 @@ library
, retry >= 0.7.4 && < 0.10
, scientific >= 0.3.4 && < 0.4
, streaming-commons >= 0.1.1 && < 0.3
- , swagger2 >= 2.4 && < 2.9
, text >= 1.2.2 && < 2.2
, time >= 1.6 && < 1.13
, timeit >= 2.0 && < 2.1
diff --git a/src/PostgREST/App.hs b/src/PostgREST/App.hs
index 99febebaca..38239beb4b 100644
--- a/src/PostgREST/App.hs
+++ b/src/PostgREST/App.hs
@@ -44,7 +44,6 @@ import PostgREST.ApiRequest (ApiRequest (..))
import PostgREST.AppState (AppState)
import PostgREST.Auth (AuthResult (..))
import PostgREST.Config (AppConfig (..), LogLevel (..))
-import PostgREST.Config.PgVersion (PgVersion (..))
import PostgREST.Error (Error)
import PostgREST.Network (resolveHost)
import PostgREST.Observation (Observation (..))
@@ -107,12 +106,11 @@ postgrest logLevel appState connWorker =
Right authResult -> do
appConf <- AppState.getConfig appState -- the config must be read again because it can reload
maybeSchemaCache <- AppState.getSchemaCache appState
- pgVer <- AppState.getPgVersion appState
let
eitherResponse :: IO (Either Error Wai.Response)
eitherResponse =
- runExceptT $ postgrestResponse appState appConf maybeSchemaCache pgVer authResult req
+ runExceptT $ postgrestResponse appState appConf maybeSchemaCache authResult req
response <- either Error.errorResponseFor identity <$> eitherResponse
-- Launch the connWorker when the connection is down. The postgrest
@@ -128,11 +126,10 @@ postgrestResponse
:: AppState.AppState
-> AppConfig
-> Maybe SchemaCache
- -> PgVersion
-> AuthResult
-> Wai.Request
-> Handler IO Wai.Response
-postgrestResponse appState conf@AppConfig{..} maybeSchemaCache pgVer authResult@AuthResult{..} req = do
+postgrestResponse appState conf@AppConfig{..} maybeSchemaCache authResult@AuthResult{..} req = do
sCache <-
case maybeSchemaCache of
Just sCache ->
@@ -146,7 +143,7 @@ postgrestResponse appState conf@AppConfig{..} maybeSchemaCache pgVer authResult@
(parseTime, apiReq@ApiRequest{..}) <- withTiming $ liftEither . mapLeft Error.ApiRequestError $ ApiRequest.userApiRequest conf req body sCache
(planTime, plan) <- withTiming $ liftEither $ Plan.actionPlan iAction conf apiReq sCache
- (queryTime, queryResult) <- withTiming $ Query.runQuery appState conf authResult apiReq plan sCache pgVer (Just authRole /= configDbAnonRole)
+ (queryTime, queryResult) <- withTiming $ Query.runQuery appState conf authResult apiReq plan sCache (Just authRole /= configDbAnonRole)
(respTime, resp) <- withTiming $ liftEither $ Response.actionResponse queryResult apiReq (T.decodeUtf8 prettyVersion, docsVersion) conf sCache iSchema iNegotiatedByProfile
return $ toWaiResponse (ServerTiming jwtTime parseTime planTime queryTime respTime) resp
diff --git a/src/PostgREST/AppState.hs b/src/PostgREST/AppState.hs
index c52c63d2a9..905c823b56 100644
--- a/src/PostgREST/AppState.hs
+++ b/src/PostgREST/AppState.hs
@@ -221,21 +221,30 @@ initPool AppConfig{..} observer = do
-- | Run an action with a database connection.
usePool :: AppState -> SQL.Session a -> IO (Either SQL.UsageError a)
usePool AppState{stateObserver=observer, stateMainThreadId=mainThreadId, ..} sess = do
- observer PoolRequest
+ observer PoolRequest
- res <- SQL.use statePool sess
+ res <- SQL.use statePool sess
- observer PoolRequestFullfilled
+ observer PoolRequestFullfilled
- whenLeft res (\case
- SQL.AcquisitionTimeoutUsageError ->
- observer $ PoolAcqTimeoutObs SQL.AcquisitionTimeoutUsageError
- err@(SQL.ConnectionUsageError e) ->
- let failureMessage = BS.unpack $ fromMaybe mempty e in
- when (("FATAL: password authentication failed" `isInfixOf` failureMessage) || ("no password supplied" `isInfixOf` failureMessage)) $ do
- observer $ ExitDBFatalError ServerAuthError err
- killThread mainThreadId
- err@(SQL.SessionUsageError (SQL.QueryError tpl _ (SQL.ResultError resultErr))) -> do
+ whenLeft res (\case
+ SQL.AcquisitionTimeoutUsageError ->
+ observer $ PoolAcqTimeoutObs SQL.AcquisitionTimeoutUsageError
+ err@(SQL.ConnectionUsageError e) ->
+ let failureMessage = BS.unpack $ fromMaybe mempty e in
+ when (("FATAL: password authentication failed" `isInfixOf` failureMessage) || ("no password supplied" `isInfixOf` failureMessage)) $ do
+ observer $ ExitDBFatalError ServerAuthError err
+ killThread mainThreadId
+ err@(SQL.SessionUsageError (SQL.QueryError tpl _ (SQL.ResultError resultErr))) -> handleResultError err tpl resultErr
+ -- Passing the empty template will not work for schema cache queries, see TODO further below.
+ err@(SQL.SessionUsageError (SQL.PipelineError (SQL.ResultError resultErr))) -> handleResultError err mempty resultErr
+ SQL.SessionUsageError (SQL.QueryError _ _ (SQL.ClientError _)) -> pure ()
+ SQL.SessionUsageError (SQL.PipelineError (SQL.ClientError _)) -> pure ()
+ )
+
+ return res
+ where
+ handleResultError err tpl resultErr = do
case resultErr of
SQL.UnexpectedResult{} -> do
observer $ ExitDBFatalError ServerPgrstBug err
@@ -268,11 +277,6 @@ usePool AppState{stateObserver=observer, stateMainThreadId=mainThreadId, ..} ses
SQL.ServerError{} ->
when (Error.status (Error.PgError False err) >= HTTP.status500) $
observer $ QueryErrorCodeHighObs err
- SQL.SessionUsageError (SQL.QueryError _ _ (SQL.ClientError _)) ->
- pure ()
- )
-
- return res
-- | Flush the connection pool so that any future use of the pool will
-- use connections freshly established after this call.
diff --git a/src/PostgREST/CLI.hs b/src/PostgREST/CLI.hs
index 0ba71144b8..252032bb79 100644
--- a/src/PostgREST/CLI.hs
+++ b/src/PostgREST/CLI.hs
@@ -1,5 +1,4 @@
{-# LANGUAGE NamedFieldPuns #-}
-{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE RecordWildCards #-}
module PostgREST.CLI
( main
@@ -11,11 +10,10 @@ module PostgREST.CLI
import qualified Data.Aeson as JSON
import qualified Data.ByteString.Char8 as BS
import qualified Data.ByteString.Lazy as LBS
+import qualified Data.String as S
import qualified Hasql.Transaction.Sessions as SQL
import qualified Options.Applicative as O
-import Text.Heredoc (str)
-
import PostgREST.AppState (AppState)
import PostgREST.Config (AppConfig (..))
import PostgREST.Observation (Observation (..))
@@ -124,112 +122,112 @@ readCLIShowHelp =
<> O.help "Dump loaded schema as JSON and exit (for debugging, output structure is unstable)"
exampleConfigFile :: [Char]
-exampleConfigFile =
- [str|## Admin server used for checks. It's disabled by default unless a port is specified.
- |# admin-server-port = 3001
- |
- |## The database role to use when no client authentication is provided
- |# db-anon-role = "anon"
- |
- |## Notification channel for reloading the schema cache
- |db-channel = "pgrst"
- |
- |## Enable or disable the notification channel
- |db-channel-enabled = true
- |
- |## Enable in-database configuration
- |db-config = true
- |
- |## Function for in-database configuration
- |## db-pre-config = "postgrest.pre_config"
- |
- |## Extra schemas to add to the search_path of every request
- |db-extra-search-path = "public"
- |
- |## Limit rows in response
- |# db-max-rows = 1000
- |
- |## Allow getting the EXPLAIN plan through the `Accept: application/vnd.pgrst.plan` header
- |# db-plan-enabled = false
- |
- |## Number of open connections in the pool
- |db-pool = 10
- |
- |## Time in seconds to wait to acquire a slot from the connection pool
- |# db-pool-acquisition-timeout = 10
- |
- |## Time in seconds after which to recycle pool connections
- |# db-pool-max-lifetime = 1800
- |
- |## Time in seconds after which to recycle unused pool connections
- |# db-pool-max-idletime = 30
- |
- |## Allow automatic database connection retrying
- |# db-pool-automatic-recovery = true
- |
- |## Stored proc to exec immediately after auth
- |# db-pre-request = "stored_proc_name"
- |
- |## Enable or disable prepared statements. disabling is only necessary when behind a connection pooler.
- |## When disabled, statements will be parametrized but won't be prepared.
- |db-prepared-statements = true
- |
- |## The name of which database schema to expose to REST clients
- |db-schemas = "public"
- |
- |## How to terminate database transactions
- |## Possible values are:
- |## commit (default)
- |## Transaction is always committed, this can not be overriden
- |## commit-allow-override
- |## Transaction is committed, but can be overriden with Prefer tx=rollback header
- |## rollback
- |## Transaction is always rolled back, this can not be overriden
- |## rollback-allow-override
- |## Transaction is rolled back, but can be overriden with Prefer tx=commit header
- |db-tx-end = "commit"
- |
- |## The standard connection URI format, documented at
- |## https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING
- |db-uri = "postgresql://"
- |
- |# jwt-aud = "your_audience_claim"
- |
- |## Jspath to the role claim key
- |jwt-role-claim-key = ".role"
- |
- |## Choose a secret, JSON Web Key (or set) to enable JWT auth
- |## (use "@filename" to load from separate file)
- |# jwt-secret = "secret_with_at_least_32_characters"
- |jwt-secret-is-base64 = false
- |
- |## Enables and set JWT Cache max lifetime, disables caching with 0
- |# jwt-cache-max-lifetime = 0
- |
- |## Logging level, the admitted values are: crit, error, warn, info and debug.
- |log-level = "error"
- |
- |## Determine if the OpenAPI output should follow or ignore role privileges or be disabled entirely.
- |## Admitted values: follow-privileges, ignore-privileges, disabled
- |openapi-mode = "follow-privileges"
- |
- |## Base url for the OpenAPI output
- |openapi-server-proxy-uri = ""
- |
- |## Configurable CORS origins
- |# server-cors-allowed-origins = ""
- |
- |server-host = "!4"
- |server-port = 3000
- |
- |## Allow getting the request-response timing information through the `Server-Timing` header
- |server-timing-enabled = false
- |
- |## Unix socket location
- |## if specified it takes precedence over server-port
- |# server-unix-socket = "/tmp/pgrst.sock"
- |
- |## Unix socket file mode
- |## When none is provided, 660 is applied by default
- |# server-unix-socket-mode = "660"
- |]
+exampleConfigFile = S.unlines
+ [ "## Admin server used for checks. It's disabled by default unless a port is specified."
+ , "# admin-server-port = 3001"
+ , ""
+ , "## The database role to use when no client authentication is provided"
+ , "# db-anon-role = \"anon\""
+ , ""
+ , "## Notification channel for reloading the schema cache"
+ , "db-channel = \"pgrst\""
+ , ""
+ , "## Enable or disable the notification channel"
+ , "db-channel-enabled = true"
+ , ""
+ , "## Enable in-database configuration"
+ , "db-config = true"
+ , ""
+ , "## Function for in-database configuration"
+ , "## db-pre-config = \"postgrest.pre_config\""
+ , ""
+ , "## Extra schemas to add to the search_path of every request"
+ , "db-extra-search-path = \"public\""
+ , ""
+ , "## Limit rows in response"
+ , "# db-max-rows = 1000"
+ , ""
+ , "## Allow getting the EXPLAIN plan through the `Accept: application/vnd.pgrst.plan` header"
+ , "# db-plan-enabled = false"
+ , ""
+ , "## Number of open connections in the pool"
+ , "db-pool = 10"
+ , ""
+ , "## Time in seconds to wait to acquire a slot from the connection pool"
+ , "# db-pool-acquisition-timeout = 10"
+ , ""
+ , "## Time in seconds after which to recycle pool connections"
+ , "# db-pool-max-lifetime = 1800"
+ , ""
+ , "## Time in seconds after which to recycle unused pool connections"
+ , "# db-pool-max-idletime = 30"
+ , ""
+ , "## Allow automatic database connection retrying"
+ , "# db-pool-automatic-recovery = true"
+ , ""
+ , "## Stored proc to exec immediately after auth"
+ , "# db-pre-request = \"stored_proc_name\""
+ , ""
+ , "## Enable or disable prepared statements. disabling is only necessary when behind a connection pooler."
+ , "## When disabled, statements will be parametrized but won't be prepared."
+ , "db-prepared-statements = true"
+ , ""
+ , "## The name of which database schema to expose to REST clients"
+ , "db-schemas = \"public\""
+ , ""
+ , "## How to terminate database transactions"
+ , "## Possible values are:"
+ , "## commit (default)"
+ , "## Transaction is always committed, this can not be overriden"
+ , "## commit-allow-override"
+ , "## Transaction is committed, but can be overriden with Prefer tx=rollback header"
+ , "## rollback"
+ , "## Transaction is always rolled back, this can not be overriden"
+ , "## rollback-allow-override"
+ , "## Transaction is rolled back, but can be overriden with Prefer tx=commit header"
+ , "db-tx-end = \"commit\""
+ , ""
+ , "## The standard connection URI format, documented at"
+ , "## https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING"
+ , "db-uri = \"postgresql://\""
+ , ""
+ , "# jwt-aud = \"your_audience_claim\""
+ , ""
+ , "## Jspath to the role claim key"
+ , "jwt-role-claim-key = \".role\""
+ , ""
+ , "## Choose a secret, JSON Web Key (or set) to enable JWT auth"
+ , "## (use \"@filename\" to load from separate file)"
+ , "# jwt-secret = \"secret_with_at_least_32_characters\""
+ , "jwt-secret-is-base64 = false"
+ , ""
+ , "## Enables and set JWT Cache max lifetime, disables caching with 0"
+ , "# jwt-cache-max-lifetime = 0"
+ , ""
+ , "## Logging level, the admitted values are: crit, error, warn, info and debug."
+ , "log-level = \"error\""
+ , ""
+ , "## Determine if the OpenAPI output should follow or ignore role privileges or be disabled entirely."
+ , "## Admitted values: follow-privileges, ignore-privileges, disabled"
+ , "openapi-mode = \"follow-privileges\""
+ , ""
+ , "## Base url for the OpenAPI output"
+ , "openapi-server-proxy-uri = \"\""
+ , ""
+ , "## Configurable CORS origins"
+ , "# server-cors-allowed-origins = \"\""
+ , ""
+ , "server-host = \"!4\""
+ , "server-port = 3000"
+ , ""
+ , "## Allow getting the request-response timing information through the `Server-Timing` header"
+ , "server-timing-enabled = false"
+ , ""
+ , "## Unix socket location"
+ , "## if specified it takes precedence over server-port"
+ , "# server-unix-socket = \"/tmp/pgrst.sock\""
+ , ""
+ , "## Unix socket file mode"
+ , "## When none is provided, 660 is applied by default"
+ , "# server-unix-socket-mode = \"660\""
+ ]
diff --git a/src/PostgREST/Config/Database.hs b/src/PostgREST/Config/Database.hs
index aff4b5b8a3..55f5fe7687 100644
--- a/src/PostgREST/Config/Database.hs
+++ b/src/PostgREST/Config/Database.hs
@@ -1,5 +1,3 @@
-{-# LANGUAGE QuasiQuotes #-}
-
module PostgREST.Config.Database
( pgVersionStatement
, queryDbSettings
@@ -24,8 +22,6 @@ import qualified Hasql.Statement as SQL
import qualified Hasql.Transaction as SQL
import qualified Hasql.Transaction.Sessions as SQL
-import NeatInterpolation (trimming)
-
import Protolude
type RoleSettings = (HM.HashMap ByteString (HM.HashMap ByteString ByteString))
@@ -95,40 +91,40 @@ queryDbSettings preConfFunc prepared =
let transaction = if prepared then SQL.transaction else SQL.unpreparedTransaction in
transaction SQL.ReadCommitted SQL.Read $ SQL.statement dbSettingsNames $ SQL.Statement sql (arrayParam HE.text) decodeSettings prepared
where
- sql = encodeUtf8 [trimming|
- WITH
- role_setting AS (
- SELECT setdatabase as database,
- unnest(setconfig) as setting
- FROM pg_catalog.pg_db_role_setting
- WHERE setrole = CURRENT_USER::regrole::oid
- AND setdatabase IN (0, (SELECT oid FROM pg_catalog.pg_database WHERE datname = CURRENT_CATALOG))
- ),
- kv_settings AS (
- SELECT database,
- substr(setting, 1, strpos(setting, '=') - 1) as k,
- substr(setting, strpos(setting, '=') + 1) as v
- FROM role_setting
- ${preConfigF}
- )
- SELECT DISTINCT ON (key)
- replace(k, '${prefix}', '') AS key,
- v AS value
- FROM kv_settings
- WHERE k = ANY($$1) AND v IS NOT NULL
- ORDER BY key, database DESC NULLS LAST;
- |]
+ sql = encodeUtf8 $ unlines
+ [ "WITH"
+ , "role_setting AS ("
+ , " SELECT setdatabase as database,"
+ , " unnest(setconfig) as setting"
+ , " FROM pg_catalog.pg_db_role_setting"
+ , " WHERE setrole = CURRENT_USER::regrole::oid"
+ , " AND setdatabase IN (0, (SELECT oid FROM pg_catalog.pg_database WHERE datname = CURRENT_CATALOG))"
+ , "),"
+ , "kv_settings AS ("
+ , " SELECT database,"
+ , " substr(setting, 1, strpos(setting, '=') - 1) as k,"
+ , " substr(setting, strpos(setting, '=') + 1) as v"
+ , " FROM role_setting"
+ , preConfigF
+ , ")"
+ , "SELECT DISTINCT ON (key)"
+ , " replace(k, '" <> prefix <> "', '') AS key,"
+ , " v AS value"
+ , "FROM kv_settings"
+ , "WHERE k = ANY($1) AND v IS NOT NULL"
+ , "ORDER BY key, database DESC NULLS LAST;"
+ ]
preConfigF = case preConfFunc of
Nothing -> mempty
- Just func -> [trimming|
- UNION
- SELECT
- null as database,
- x as k,
- current_setting(x, true) as v
- FROM unnest($$1) x
- JOIN ${func}() _ ON TRUE
- |]::Text
+ Just func -> unlines
+ [ "UNION"
+ , "SELECT"
+ , " null as database,"
+ , " x as k,"
+ , " current_setting(x, true) as v"
+ , "FROM unnest($1) x"
+ , "JOIN " <> func <> "() _ ON TRUE"
+ ]
decodeSettings = HD.rowList $ (,) <$> column HD.text <*> column HD.text
queryRoleSettings :: PgVersion -> Bool -> Session (RoleSettings, RoleIsolationLvl)
@@ -136,35 +132,35 @@ queryRoleSettings pgVer prepared =
let transaction = if prepared then SQL.transaction else SQL.unpreparedTransaction in
transaction SQL.ReadCommitted SQL.Read $ SQL.statement mempty $ SQL.Statement sql HE.noParams (processRows <$> rows) prepared
where
- sql = encodeUtf8 [trimming|
- with
- role_setting as (
- select r.rolname, unnest(r.rolconfig) as setting
- from pg_auth_members m
- join pg_roles r on r.oid = m.roleid
- where member = current_user::regrole::oid
- ),
- kv_settings AS (
- SELECT
- rolname,
- substr(setting, 1, strpos(setting, '=') - 1) as key,
- lower(substr(setting, strpos(setting, '=') + 1)) as value
- FROM role_setting
- ),
- iso_setting AS (
- SELECT rolname, value
- FROM kv_settings
- WHERE key = 'default_transaction_isolation'
- )
- select
- kv.rolname,
- i.value as iso_lvl,
- coalesce(array_agg(row(kv.key, kv.value)) filter (where key <> 'default_transaction_isolation'), '{}') as role_settings
- from kv_settings kv
- join pg_settings ps on ps.name = kv.key and (ps.context = 'user' ${hasParameterPrivilege})
- left join iso_setting i on i.rolname = kv.rolname
- group by kv.rolname, i.value;
- |]
+ sql = encodeUtf8 $ unlines
+ [ "with"
+ , "role_setting as ("
+ , " select r.rolname, unnest(r.rolconfig) as setting"
+ , " from pg_auth_members m"
+ , " join pg_roles r on r.oid = m.roleid"
+ , " where member = current_user::regrole::oid"
+ , "),"
+ , "kv_settings AS ("
+ , " SELECT"
+ , " rolname,"
+ , " substr(setting, 1, strpos(setting, '=') - 1) as key,"
+ , " lower(substr(setting, strpos(setting, '=') + 1)) as value"
+ , " FROM role_setting"
+ , "),"
+ , "iso_setting AS ("
+ , " SELECT rolname, value"
+ , " FROM kv_settings"
+ , " WHERE key = 'default_transaction_isolation'"
+ , ")"
+ , "select"
+ , " kv.rolname,"
+ , " i.value as iso_lvl,"
+ , " coalesce(array_agg(row(kv.key, kv.value)) filter (where key <> 'default_transaction_isolation'), '{}') as role_settings"
+ , "from kv_settings kv"
+ , "join pg_settings ps on ps.name = kv.key and (ps.context = 'user' " <> hasParameterPrivilege <> ")"
+ , "left join iso_setting i on i.rolname = kv.rolname"
+ , "group by kv.rolname, i.value;"
+ ]
hasParameterPrivilege
| pgVer >= pgVersion150 = "or has_parameter_privilege(current_user::regrole::oid, ps.name, 'set')"
diff --git a/src/PostgREST/Config/PgVersion.hs b/src/PostgREST/Config/PgVersion.hs
index 273d629419..6db42e90b1 100644
--- a/src/PostgREST/Config/PgVersion.hs
+++ b/src/PostgREST/Config/PgVersion.hs
@@ -3,7 +3,6 @@
module PostgREST.Config.PgVersion
( PgVersion(..)
, minimumPgVersion
- , pgVersion130
, pgVersion140
, pgVersion150
, pgVersion170
@@ -26,10 +25,7 @@ instance Ord PgVersion where
-- | Tells the minimum PostgreSQL version required by this version of PostgREST
minimumPgVersion :: PgVersion
-minimumPgVersion = pgVersion121
-
-pgVersion121 :: PgVersion
-pgVersion121 = PgVersion 120001 "12.1" "12.1"
+minimumPgVersion = pgVersion130
pgVersion130 :: PgVersion
pgVersion130 = PgVersion 130000 "13.0" "13.0"
diff --git a/src/PostgREST/Error.hs b/src/PostgREST/Error.hs
index 8f386a8000..4b74c01c9f 100644
--- a/src/PostgREST/Error.hs
+++ b/src/PostgREST/Error.hs
@@ -434,7 +434,8 @@ instance JSON.ToJSON SQL.UsageError where
toJSON SQL.AcquisitionTimeoutUsageError = toJsonPgrstError
ConnectionErrorCode03 "Timed out acquiring connection from connection pool." Nothing Nothing
-instance JSON.ToJSON SQL.QueryError where
+instance JSON.ToJSON SQL.SessionError where
+ toJSON (SQL.PipelineError e) = JSON.toJSON e
toJSON (SQL.QueryError _ _ e) = JSON.toJSON e
instance JSON.ToJSON SQL.CommandError where
@@ -465,8 +466,13 @@ instance JSON.ToJSON SQL.CommandError where
pgErrorStatus :: Bool -> SQL.UsageError -> HTTP.Status
pgErrorStatus _ (SQL.ConnectionUsageError _) = HTTP.status503
pgErrorStatus _ SQL.AcquisitionTimeoutUsageError = HTTP.status504
+pgErrorStatus _ (SQL.SessionUsageError (SQL.PipelineError (SQL.ClientError _))) = HTTP.status503
pgErrorStatus _ (SQL.SessionUsageError (SQL.QueryError _ _ (SQL.ClientError _))) = HTTP.status503
-pgErrorStatus authed (SQL.SessionUsageError (SQL.QueryError _ _ (SQL.ResultError rError))) =
+pgErrorStatus authed (SQL.SessionUsageError (SQL.PipelineError (SQL.ResultError rError))) = mapSQLtoHTTP authed rError
+pgErrorStatus authed (SQL.SessionUsageError (SQL.QueryError _ _ (SQL.ResultError rError))) = mapSQLtoHTTP authed rError
+
+mapSQLtoHTTP :: Bool -> SQL.ResultError -> HTTP.Status
+mapSQLtoHTTP authed rError =
case rError of
(SQL.ServerError c m d _ _) ->
case BS.unpack c of
diff --git a/src/PostgREST/Metrics.hs b/src/PostgREST/Metrics.hs
index 3999e43d83..3bd4131267 100644
--- a/src/PostgREST/Metrics.hs
+++ b/src/PostgREST/Metrics.hs
@@ -38,7 +38,7 @@ observationMetrics (MetricsState poolTimeouts poolAvailable poolWaiting _ schema
(PoolAcqTimeoutObs _) -> do
incCounter poolTimeouts
(HasqlPoolObs (SQL.ConnectionObservation _ status)) -> case status of
- SQL.ReadyForUseConnectionStatus -> do
+ SQL.ReadyForUseConnectionStatus _ -> do
incGauge poolAvailable
SQL.InUseConnectionStatus -> do
decGauge poolAvailable
diff --git a/src/PostgREST/Observation.hs b/src/PostgREST/Observation.hs
index 18fbf558d7..e7e39c62a2 100644
--- a/src/PostgREST/Observation.hs
+++ b/src/PostgREST/Observation.hs
@@ -130,13 +130,17 @@ observationMessage = \case
"Connection " <> show uuid <> (
case status of
SQL.ConnectingConnectionStatus -> " is being established"
- SQL.ReadyForUseConnectionStatus -> " is available"
+ SQL.ReadyForUseConnectionStatus reason -> " is available due to " <> case reason of
+ SQL.EstablishedConnectionReadyForUseReason -> "connection establishment"
+ SQL.SessionFailedConnectionReadyForUseReason _ -> "session failure"
+ SQL.SessionSucceededConnectionReadyForUseReason -> "session success"
SQL.InUseConnectionStatus -> " is used"
SQL.TerminatedConnectionStatus reason -> " is terminated due to " <> case reason of
SQL.AgingConnectionTerminationReason -> "max lifetime"
SQL.IdlenessConnectionTerminationReason -> "max idletime"
SQL.ReleaseConnectionTerminationReason -> "release"
SQL.NetworkErrorConnectionTerminationReason _ -> "network error" -- usage error is already logged, no need to repeat the same message.
+ SQL.InitializationErrorTerminationReason _ -> "init failure"
)
_ -> mempty
where
diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs
index 3a31d72a75..4b6ce496f9 100644
--- a/src/PostgREST/Plan.hs
+++ b/src/PostgREST/Plan.hs
@@ -66,7 +66,6 @@ import PostgREST.SchemaCache.Routine (MediaHandler (..),
Routine (..),
RoutineMap,
RoutineParam (..),
- funcReturnsCompositeAlias,
funcReturnsScalar,
funcReturnsSetOfScalar)
import PostgREST.SchemaCache.Table (Column (..), Table (..),
@@ -974,7 +973,6 @@ callPlan proc ApiRequest{} paramKeys args readReq = FunctionCall {
, funCArgs = args
, funCScalar = funcReturnsScalar proc
, funCSetOfScalar = funcReturnsSetOfScalar proc
-, funCRetCompositeAlias = funcReturnsCompositeAlias proc
, funCReturning = inferColsEmbedNeeds readReq []
}
where
diff --git a/src/PostgREST/Plan/CallPlan.hs b/src/PostgREST/Plan/CallPlan.hs
index 32ef34c359..472deefd75 100644
--- a/src/PostgREST/Plan/CallPlan.hs
+++ b/src/PostgREST/Plan/CallPlan.hs
@@ -19,13 +19,12 @@ import PostgREST.SchemaCache.Routine (Routine (..),
import Protolude
data CallPlan = FunctionCall
- { funCQi :: QualifiedIdentifier
- , funCParams :: CallParams
- , funCArgs :: CallArgs
- , funCScalar :: Bool
- , funCSetOfScalar :: Bool
- , funCRetCompositeAlias :: Bool
- , funCReturning :: [FieldName]
+ { funCQi :: QualifiedIdentifier
+ , funCParams :: CallParams
+ , funCArgs :: CallArgs
+ , funCScalar :: Bool
+ , funCSetOfScalar :: Bool
+ , funCReturning :: [FieldName]
}
data CallParams
diff --git a/src/PostgREST/Query.hs b/src/PostgREST/Query.hs
index 17c5854708..ea09bae6e8 100644
--- a/src/PostgREST/Query.hs
+++ b/src/PostgREST/Query.hs
@@ -40,7 +40,6 @@ import PostgREST.ApiRequest.Preferences (PreferCount (..),
import PostgREST.Auth (AuthResult (..))
import PostgREST.Config (AppConfig (..),
OpenAPIMode (..))
-import PostgREST.Config.PgVersion (PgVersion (..))
import PostgREST.Error (Error)
import PostgREST.MediaType (MediaType (..))
import PostgREST.Plan (ActionPlan (..),
@@ -74,9 +73,9 @@ data QueryResult
| NoDbResult InfoPlan
-- TODO This function needs to be free from IO, only App.hs should do IO
-runQuery :: AppState.AppState -> AppConfig -> AuthResult -> ApiRequest -> ActionPlan -> SchemaCache -> PgVersion -> Bool -> ExceptT Error IO QueryResult
-runQuery _ _ _ _ (NoDb x) _ _ _ = pure $ NoDbResult x
-runQuery appState config AuthResult{..} apiReq (Db plan) sCache pgVer authenticated = do
+runQuery :: AppState.AppState -> AppConfig -> AuthResult -> ApiRequest -> ActionPlan -> SchemaCache -> Bool -> ExceptT Error IO QueryResult
+runQuery _ _ _ _ (NoDb x) _ _ = pure $ NoDbResult x
+runQuery appState config AuthResult{..} apiReq (Db plan) sCache authenticated = do
dbResp <- lift $ do
let transaction = if prepared then SQL.transaction else SQL.unpreparedTransaction
AppState.usePool appState (transaction isoLvl txMode $ runExceptT dbHandler)
@@ -93,7 +92,7 @@ runQuery appState config AuthResult{..} apiReq (Db plan) sCache pgVer authentica
dbHandler = do
setPgLocals plan config authClaims authRole apiReq
runPreReq config
- actionQuery plan config apiReq pgVer sCache
+ actionQuery plan config apiReq sCache
planTxMode :: DbActionPlan -> SQL.Mode
planTxMode (DbCrud x) = pTxMode x
@@ -107,9 +106,9 @@ planIsoLvl AppConfig{configRoleIsoLvl} role actPlan = case actPlan of
where
roleIsoLvl = HM.findWithDefault SQL.ReadCommitted role configRoleIsoLvl
-actionQuery :: DbActionPlan -> AppConfig -> ApiRequest -> PgVersion -> SchemaCache -> DbHandler QueryResult
+actionQuery :: DbActionPlan -> AppConfig -> ApiRequest -> SchemaCache -> DbHandler QueryResult
-actionQuery (DbCrud plan@WrappedReadPlan{..}) conf@AppConfig{..} apiReq@ApiRequest{iPreferences=Preferences{..}} _ _ = do
+actionQuery (DbCrud plan@WrappedReadPlan{..}) conf@AppConfig{..} apiReq@ApiRequest{iPreferences=Preferences{..}} _ = do
let countQuery = QueryBuilder.readPlanToCountQuery wrReadPlan
resultSet <-
lift . SQL.statement mempty $
@@ -129,13 +128,13 @@ actionQuery (DbCrud plan@WrappedReadPlan{..}) conf@AppConfig{..} apiReq@ApiReque
optionalRollback conf apiReq
DbCrudResult plan <$> resultSetWTotal conf apiReq resultSet countQuery
-actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationCreate, ..}) conf apiReq _ _ = do
+actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationCreate, ..}) conf apiReq _ = do
resultSet <- writeQuery mrReadPlan mrMutatePlan mrMedia mrHandler apiReq conf
failNotSingular mrMedia resultSet
optionalRollback conf apiReq
pure $ DbCrudResult plan resultSet
-actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationUpdate, ..}) conf apiReq@ApiRequest{iPreferences=Preferences{..}, ..} _ _ = do
+actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationUpdate, ..}) conf apiReq@ApiRequest{iPreferences=Preferences{..}, ..} _ = do
resultSet <- writeQuery mrReadPlan mrMutatePlan mrMedia mrHandler apiReq conf
failNotSingular mrMedia resultSet
failExceedsMaxAffectedPref (preferMaxAffected,preferHandling) resultSet
@@ -143,13 +142,13 @@ actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationUpdate, ..}) conf api
optionalRollback conf apiReq
pure $ DbCrudResult plan resultSet
-actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationSingleUpsert, ..}) conf apiReq _ _ = do
+actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationSingleUpsert, ..}) conf apiReq _ = do
resultSet <- writeQuery mrReadPlan mrMutatePlan mrMedia mrHandler apiReq conf
failPut resultSet
optionalRollback conf apiReq
pure $ DbCrudResult plan resultSet
-actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationDelete, ..}) conf apiReq@ApiRequest{iPreferences=Preferences{..}, ..} _ _ = do
+actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationDelete, ..}) conf apiReq@ApiRequest{iPreferences=Preferences{..}, ..} _ = do
resultSet <- writeQuery mrReadPlan mrMutatePlan mrMedia mrHandler apiReq conf
failNotSingular mrMedia resultSet
failExceedsMaxAffectedPref (preferMaxAffected,preferHandling) resultSet
@@ -157,12 +156,12 @@ actionQuery (DbCrud plan@MutateReadPlan{mrMutation=MutationDelete, ..}) conf api
optionalRollback conf apiReq
pure $ DbCrudResult plan resultSet
-actionQuery (DbCall plan@CallReadPlan{..}) conf@AppConfig{..} apiReq@ApiRequest{iPreferences=Preferences{..}} pgVer _ = do
+actionQuery (DbCall plan@CallReadPlan{..}) conf@AppConfig{..} apiReq@ApiRequest{iPreferences=Preferences{..}} _ = do
resultSet <-
lift . SQL.statement mempty $
Statements.prepareCall
crProc
- (QueryBuilder.callPlanToQuery crCallPlan pgVer)
+ (QueryBuilder.callPlanToQuery crCallPlan)
(QueryBuilder.readPlanToQuery crReadPlan)
(QueryBuilder.readPlanToCountQuery crReadPlan)
(shouldCount preferCount)
@@ -175,7 +174,7 @@ actionQuery (DbCall plan@CallReadPlan{..}) conf@AppConfig{..} apiReq@ApiRequest{
failExceedsMaxAffectedPref (preferMaxAffected,preferHandling) resultSet
pure $ DbCallResult plan resultSet
-actionQuery (MaybeDb plan@InspectPlan{ipSchema=tSchema}) AppConfig{..} _ _ sCache =
+actionQuery (MaybeDb plan@InspectPlan{ipSchema=tSchema}) AppConfig{..} _ sCache =
lift $ case configOpenApiMode of
OAFollowPriv -> do
tableAccess <- SQL.statement [tSchema] (SchemaCache.accessibleTables configDbPreparedStatements)
diff --git a/src/PostgREST/Query/QueryBuilder.hs b/src/PostgREST/Query/QueryBuilder.hs
index 602ae27ef1..bb8ae0723c 100644
--- a/src/PostgREST/Query/QueryBuilder.hs
+++ b/src/PostgREST/Query/QueryBuilder.hs
@@ -27,7 +27,6 @@ import Data.Maybe (fromJust)
import Data.Tree (Tree (..))
import PostgREST.ApiRequest.Preferences (PreferResolution (..))
-import PostgREST.Config.PgVersion (PgVersion, pgVersion130)
import PostgREST.SchemaCache.Identifiers (QualifiedIdentifier (..))
import PostgREST.SchemaCache.Relationship (Cardinality (..),
Junction (..),
@@ -193,8 +192,8 @@ mutatePlanToQuery (Delete mainQi logicForest range ordts returnings)
whereLogic = if null logicForest then mempty else " WHERE " <> intercalateSnippet " AND " (pgFmtLogicTree mainQi <$> logicForest)
(whereRangeIdF, rangeIdF) = mutRangeF mainQi (cfName . coField <$> ordts)
-callPlanToQuery :: CallPlan -> PgVersion -> SQL.Snippet
-callPlanToQuery (FunctionCall qi params arguments returnsScalar returnsSetOfScalar returnsCompositeAlias returnings) pgVer =
+callPlanToQuery :: CallPlan -> SQL.Snippet
+callPlanToQuery (FunctionCall qi params arguments returnsScalar returnsSetOfScalar returnings) =
"SELECT " <> (if returnsScalar || returnsSetOfScalar then "pgrst_call.pgrst_scalar" else returnedColumns) <> " " <>
fromCall
where
@@ -210,9 +209,8 @@ callPlanToQuery (FunctionCall qi params arguments returnsScalar returnsSetOfScal
"LATERAL " <> callIt (fmtParams prms)
callIt :: SQL.Snippet -> SQL.Snippet
- callIt argument | pgVer < pgVersion130 && returnsCompositeAlias = "(SELECT (" <> fromQi qi <> "(" <> argument <> ")).*) pgrst_call"
- | returnsScalar || returnsSetOfScalar = "(SELECT " <> fromQi qi <> "(" <> argument <> ") pgrst_scalar) pgrst_call"
- | otherwise = fromQi qi <> "(" <> argument <> ") pgrst_call"
+ callIt argument | returnsScalar || returnsSetOfScalar = "(SELECT " <> fromQi qi <> "(" <> argument <> ") pgrst_scalar) pgrst_call"
+ | otherwise = fromQi qi <> "(" <> argument <> ") pgrst_call"
fmtParams :: [RoutineParam] -> SQL.Snippet
fmtParams prms = intercalateSnippet ", "
diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs
index fa79a564b8..db5d241360 100644
--- a/src/PostgREST/Query/SqlFragment.hs
+++ b/src/PostgREST/Query/SqlFragment.hs
@@ -1,6 +1,5 @@
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
-{-# LANGUAGE QuasiQuotes #-}
{-|
Module : PostgREST.Query.SqlFragment
Description : Helper functions for PostgREST.QueryBuilder.
@@ -54,7 +53,6 @@ import qualified Hasql.Encoders as HE
import Control.Arrow ((***))
import Data.Foldable (foldr1)
-import NeatInterpolation (trimming)
import PostgREST.ApiRequest.Types (AggregateFunction (..),
Alias, Cast,
@@ -229,12 +227,14 @@ customFuncF _ funcQi RelAnyElement = fromQi funcQi <> "(_postgrest_t)
customFuncF _ funcQi (RelId target) = fromQi funcQi <> "(_postgrest_t::" <> fromQi target <> ")"
locationF :: [Text] -> SQL.Snippet
-locationF pKeys = SQL.sql $ encodeUtf8 [trimming|(
- WITH data AS (SELECT row_to_json(_) AS row FROM ${sourceCTEName} AS _ LIMIT 1)
- SELECT array_agg(json_data.key || '=' || coalesce('eq.' || json_data.value, 'is.null'))
- FROM data CROSS JOIN json_each_text(data.row) AS json_data
- WHERE json_data.key IN ('${fmtPKeys}')
-)|]
+locationF pKeys = SQL.sql $ encodeUtf8 $ unlines
+ [ "("
+ , " WITH data AS (SELECT row_to_json(_) AS row FROM " <> sourceCTEName <> " AS _ LIMIT 1)"
+ , " SELECT array_agg(json_data.key || '=' || coalesce('eq.' || json_data.value, 'is.null'))"
+ , " FROM data CROSS JOIN json_each_text(data.row) AS json_data"
+ , " WHERE json_data.key IN ('" <> fmtPKeys <> "')"
+ , ")"
+ ]
where
fmtPKeys = T.intercalate "','" pKeys
diff --git a/src/PostgREST/Response.hs b/src/PostgREST/Response.hs
index 68a68c4b8e..f04c09f326 100644
--- a/src/PostgREST/Response.hs
+++ b/src/PostgREST/Response.hs
@@ -22,7 +22,6 @@ import qualified Network.HTTP.Types.URI as HTTP
import qualified PostgREST.Error as Error
import qualified PostgREST.MediaType as MediaType
import qualified PostgREST.RangeQuery as RangeQuery
-import qualified PostgREST.Response.OpenAPI as OpenAPI
import PostgREST.ApiRequest (ApiRequest (..),
InvokeMethod (..),
@@ -224,10 +223,10 @@ actionResponse (DbCallResult CallReadPlan{crMedia, crInvMthd=invMethod, crProc=p
RSPlan plan ->
Right $ PgrstResponse HTTP.status200 (contentTypeHeaders crMedia ctxApiRequest) $ LBS.fromStrict plan
-actionResponse (MaybeDbResult InspectPlan{ipHdrsOnly=headersOnly} body) _ versions conf sCache schema negotiatedByProfile =
+actionResponse (MaybeDbResult InspectPlan{} _) _ _ _ _ schema negotiatedByProfile =
Right $ PgrstResponse HTTP.status200
(MediaType.toContentType MTOpenAPI : maybeToList (profileHeader schema negotiatedByProfile))
- (maybe mempty (\(x, y, z) -> if headersOnly then mempty else OpenAPI.encode versions conf sCache x y z) body)
+ mempty
actionResponse (NoDbResult (RelInfoPlan identifier)) _ _ _ sCache _ _ =
case HM.lookup identifier (dbTables sCache) of
diff --git a/src/PostgREST/Response/OpenAPI.hs b/src/PostgREST/Response/OpenAPI.hs
deleted file mode 100644
index fe40e3c0f2..0000000000
--- a/src/PostgREST/Response/OpenAPI.hs
+++ /dev/null
@@ -1,454 +0,0 @@
-{-|
-Module : PostgREST.OpenAPI
-Description : Generates the OpenAPI output
--}
-{-# LANGUAGE LambdaCase #-}
-{-# LANGUAGE RecordWildCards #-}
-module PostgREST.Response.OpenAPI (encode) where
-
-import qualified Data.Aeson as JSON
-import qualified Data.ByteString.Char8 as BS
-import qualified Data.ByteString.Lazy as LBS
-import qualified Data.HashMap.Strict as HM
-import qualified Data.HashSet.InsOrd as Set
-import qualified Data.Text as T
-
-import Control.Arrow ((&&&))
-import Data.HashMap.Strict.InsOrd (InsOrdHashMap, fromList)
-import Data.Maybe (fromJust)
-import Data.String (IsString (..))
-import Network.URI (URI (..), URIAuth (..))
-
-import Control.Lens (at, (.~), (?~))
-
-import Data.Swagger
-
-import PostgREST.Config (AppConfig (..), Proxy (..),
- isMalformedProxyUri, toURI)
-import PostgREST.SchemaCache (SchemaCache (..))
-import PostgREST.SchemaCache.Identifiers (QualifiedIdentifier (..))
-import PostgREST.SchemaCache.Relationship (Cardinality (..),
- Relationship (..),
- RelationshipsMap)
-import PostgREST.SchemaCache.Routine (Routine (..),
- RoutineParam (..))
-import PostgREST.SchemaCache.Table (Column (..), Table (..),
- TablesMap,
- tableColumnsList)
-
-import PostgREST.MediaType
-
-import Protolude hiding (Proxy, get)
-
-encode :: (Text, Text) -> AppConfig -> SchemaCache -> TablesMap -> HM.HashMap k [Routine] -> Maybe Text -> LBS.ByteString
-encode versions conf sCache tables procs schemaDescription =
- JSON.encode $
- postgrestSpec
- versions
- (dbRelationships sCache)
- (concat $ HM.elems procs)
- (snd <$> HM.toList tables)
- (proxyUri conf)
- schemaDescription
- (configOpenApiSecurityActive conf)
-
-makeMimeList :: [MediaType] -> MimeList
-makeMimeList cs = MimeList $ fmap (fromString . BS.unpack . toMime) cs
-
-toSwaggerType :: Text -> Maybe (SwaggerType t)
-toSwaggerType "character varying" = Just SwaggerString
-toSwaggerType "character" = Just SwaggerString
-toSwaggerType "text" = Just SwaggerString
-toSwaggerType "boolean" = Just SwaggerBoolean
-toSwaggerType "smallint" = Just SwaggerInteger
-toSwaggerType "integer" = Just SwaggerInteger
-toSwaggerType "bigint" = Just SwaggerInteger
-toSwaggerType "numeric" = Just SwaggerNumber
-toSwaggerType "real" = Just SwaggerNumber
-toSwaggerType "double precision" = Just SwaggerNumber
-toSwaggerType "json" = Nothing
-toSwaggerType "jsonb" = Nothing
-toSwaggerType colType = case T.takeEnd 2 colType of
- "[]" -> Just SwaggerArray
- _ -> Just SwaggerString
-
-typeFromArray :: Text -> Text
-typeFromArray = T.dropEnd 2
-
-toSwaggerTypeFromArray :: Text -> Maybe (SwaggerType t)
-toSwaggerTypeFromArray arrType = toSwaggerType $ typeFromArray arrType
-
-makePropertyItems :: Text -> Maybe (Referenced Schema)
-makePropertyItems arrType = case toSwaggerType arrType of
- Just SwaggerArray -> Just $ Inline (mempty & type_ .~ toSwaggerTypeFromArray arrType)
- _ -> Nothing
-
-parseDefault :: Text -> Text -> Text
-parseDefault colType colDefault =
- case toSwaggerType colType of
- Just SwaggerString -> wrapInQuotations $ case T.stripSuffix ("::" <> colType) colDefault of
- Just def -> T.dropAround (=='\'') def
- Nothing -> colDefault
- _ -> colDefault
- where
- wrapInQuotations text = "\"" <> text <> "\""
-
-makeTableDef :: RelationshipsMap -> Table -> (Text, Schema)
-makeTableDef rels t =
- let tn = tableName t in
- (tn, (mempty :: Schema)
- & description .~ tableDescription t
- & type_ ?~ SwaggerObject
- & properties .~ fromList (makeProperty t rels <$> tableColumnsList t)
- & required .~ fmap colName (filter (not . colNullable) $ tableColumnsList t))
-
-makeProperty :: Table -> RelationshipsMap -> Column -> (Text, Referenced Schema)
-makeProperty tbl rels col = (colName col, Inline s)
- where
- e = if null $ colEnum col then Nothing else JSON.decode $ JSON.encode $ colEnum col
- fk :: Maybe Text
- fk =
- let
- searchedRels = fromMaybe mempty $ HM.lookup (QualifiedIdentifier (tableSchema tbl) (tableName tbl), tableSchema tbl) rels
- -- Sorts the relationship list to get tables first
- relsSortedByIsView = sortOn relFTableIsView [ r | r@Relationship{} <- searchedRels]
- -- Finds the relationship that has a single column foreign key
- rel = find (\case
- Relationship{relCardinality=(M2O _ relColumns)} -> [colName col] == (fst <$> relColumns)
- Relationship{relCardinality=(O2O _ relColumns False)} -> [colName col] == (fst <$> relColumns)
- _ -> False
- ) relsSortedByIsView
- fCol = (headMay . (\r -> snd <$> relColumns (relCardinality r)) =<< rel)
- fTbl = qiName . relForeignTable <$> rel
- fTblCol = (,) <$> fTbl <*> fCol
- in
- (\(a, b) -> T.intercalate "" ["This is a Foreign Key to `", a, ".", b, "`."]) <$> fTblCol
- pk :: Bool
- pk = colName col `elem` tablePKCols tbl
- n = catMaybes
- [ Just "Note:"
- , if pk then Just "This is a Primary Key." else Nothing
- , fk
- ]
- d =
- if length n > 1 then
- Just $ T.append (maybe "" (`T.append` "\n\n") $ colDescription col) (T.intercalate "\n" n)
- else
- colDescription col
- s =
- (mempty :: Schema)
- & default_ .~ (JSON.decode . toUtf8Lazy . parseDefault (colType col) =<< colDefault col)
- & description .~ d
- & enum_ .~ e
- & format ?~ colType col
- & maxLength .~ (fromIntegral <$> colMaxLen col)
- & type_ .~ toSwaggerType (colType col)
- & items .~ (SwaggerItemsObject <$> makePropertyItems (colType col))
-
-makeProcSchema :: Routine -> Schema
-makeProcSchema pd =
- (mempty :: Schema)
- & description .~ pdDescription pd
- & type_ ?~ SwaggerObject
- & properties .~ fromList (fmap makeProcProperty (pdParams pd))
- & required .~ fmap ppName (filter ppReq (pdParams pd))
-
-makeProcProperty :: RoutineParam -> (Text, Referenced Schema)
-makeProcProperty (RoutineParam n t _ _ _) = (n, Inline s)
- where
- s = (mempty :: Schema)
- & type_ .~ toSwaggerType t
- & items .~ (SwaggerItemsObject <$> makePropertyItems t)
- & format ?~ t
-
-makePreferParam :: [Text] -> Param
-makePreferParam ts =
- (mempty :: Param)
- & name .~ "Prefer"
- & description ?~ "Preference"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamHeader
- & type_ ?~ SwaggerString
- & enum_ .~ JSON.decode (JSON.encode $ foldl (<>) [] (val <$> ts)))
- where
- val :: Text -> [Text]
- val = \case
- "count" -> ["count=none"]
- "return" -> ["return=representation", "return=minimal", "return=none"]
- "resolution" -> ["resolution=ignore-duplicates", "resolution=merge-duplicates"]
- _ -> []
-
-makeProcGetParam :: RoutineParam -> Referenced Param
-makeProcGetParam (RoutineParam n t _ r v) =
- Inline $ (mempty :: Param)
- & name .~ n
- & required ?~ r
- & schema .~ ParamOther fullSchema
- where
- fullSchema = if v then schemaMulti else schemaNotMulti
- baseSchema = (mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- schemaNotMulti = baseSchema
- & format ?~ t
- & type_ ?~ toParamType (toSwaggerType t)
- schemaMulti = baseSchema
- & type_ ?~ fromMaybe SwaggerString (toSwaggerType t)
- & items ?~ SwaggerItemsPrimitive (Just CollectionMulti)
- ((mempty :: ParamSchema x)
- & type_ .~ toSwaggerTypeFromArray t
- & format ?~ typeFromArray t)
- toParamType paramType = case paramType of
- -- Array uses {} in query params
- Just SwaggerArray -> SwaggerString
- -- Type must be specified in query params
- Nothing -> SwaggerString
- _ -> fromJust paramType
-
-makeProcGetParams :: [RoutineParam] -> [Referenced Param]
-makeProcGetParams = fmap makeProcGetParam
-
-makeProcPostParams :: Routine -> [Referenced Param]
-makeProcPostParams pd =
- [ Inline $ (mempty :: Param)
- & name .~ "args"
- & required ?~ True
- & schema .~ ParamBody (Inline $ makeProcSchema pd)
- , Ref $ Reference "preferParams"
- ]
-
-makeParamDefs :: [Table] -> [(Text, Param)]
-makeParamDefs ti =
- -- TODO: create Prefer for each method (GET, PATCH, etc.)
- [ ("preferParams", makePreferParam ["params"])
- , ("preferReturn", makePreferParam ["return"])
- , ("preferCount", makePreferParam ["count"])
- , ("preferPost", makePreferParam ["return", "resolution"])
- , ("select", (mempty :: Param)
- & name .~ "select"
- & description ?~ "Filtering Columns"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- & type_ ?~ SwaggerString))
- , ("on_conflict", (mempty :: Param)
- & name .~ "on_conflict"
- & description ?~ "On Conflict"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- & type_ ?~ SwaggerString))
- , ("order", (mempty :: Param)
- & name .~ "order"
- & description ?~ "Ordering"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- & type_ ?~ SwaggerString))
- , ("range", (mempty :: Param)
- & name .~ "Range"
- & description ?~ "Limiting and Pagination"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamHeader
- & type_ ?~ SwaggerString))
- , ("rangeUnit", (mempty :: Param)
- & name .~ "Range-Unit"
- & description ?~ "Limiting and Pagination"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamHeader
- & type_ ?~ SwaggerString
- & default_ .~ JSON.decode "\"items\""))
- , ("offset", (mempty :: Param)
- & name .~ "offset"
- & description ?~ "Limiting and Pagination"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- & type_ ?~ SwaggerString))
- , ("limit", (mempty :: Param)
- & name .~ "limit"
- & description ?~ "Limiting and Pagination"
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- & type_ ?~ SwaggerString))
- ]
- <> concat [ makeObjectBody (tableName t) : makeRowFilters (tableName t) (tableColumnsList t)
- | t <- ti
- ]
-
-makeObjectBody :: Text -> (Text, Param)
-makeObjectBody tn =
- ("body." <> tn, (mempty :: Param)
- & name .~ tn
- & description ?~ tn
- & required ?~ False
- & schema .~ ParamBody (Ref (Reference tn)))
-
-makeRowFilter :: Text -> Column -> (Text, Param)
-makeRowFilter tn c =
- (T.intercalate "." ["rowFilter", tn, colName c], (mempty :: Param)
- & name .~ colName c
- & description .~ colDescription c
- & required ?~ False
- & schema .~ ParamOther ((mempty :: ParamOtherSchema)
- & in_ .~ ParamQuery
- & type_ ?~ SwaggerString))
-
-makeRowFilters :: Text -> [Column] -> [(Text, Param)]
-makeRowFilters tn = fmap (makeRowFilter tn)
-
-makePathItem :: Table -> (FilePath, PathItem)
-makePathItem t = ("/" ++ T.unpack tn, p $ tableInsertable t || tableUpdatable t || tableDeletable t)
- where
- -- Use first line of table description as summary; rest as description (if present)
- -- We strip leading newlines from description so that users can include a blank line between summary and description
- (tSum, tDesc) = fmap fst &&& fmap (T.dropWhile (=='\n') . snd) $
- T.breakOn "\n" <$> tableDescription t
- tOp = (mempty :: Operation)
- & tags .~ Set.fromList [tn]
- & summary .~ tSum
- & description .~ mfilter (/="") tDesc
- getOp = tOp
- & parameters .~ fmap ref (rs <> ["select", "order", "range", "rangeUnit", "offset", "limit", "preferCount"])
- & at 206 ?~ "Partial Content"
- & at 200 ?~ Inline ((mempty :: Response)
- & description .~ "OK"
- & schema ?~ Inline (mempty
- & type_ ?~ SwaggerArray
- & items ?~ SwaggerItemsObject (Ref $ Reference $ tableName t)
- )
- )
- postOp = tOp
- & parameters .~ fmap ref ["body." <> tn, "select", "preferPost"]
- & at 201 ?~ "Created"
- patchOp = tOp
- & parameters .~ fmap ref (rs <> ["body." <> tn, "preferReturn"])
- & at 204 ?~ "No Content"
- deletOp = tOp
- & parameters .~ fmap ref (rs <> ["preferReturn"])
- & at 204 ?~ "No Content"
- pr = (mempty :: PathItem) & get ?~ getOp
- pw = pr & post ?~ postOp & patch ?~ patchOp & delete ?~ deletOp
- p False = pr
- p True = pw
- tn = tableName t
- rs = [ T.intercalate "." ["rowFilter", tn, colName c ] | c <- tableColumnsList t ]
- ref = Ref . Reference
-
-makeProcPathItem :: Routine -> (FilePath, PathItem)
-makeProcPathItem pd = ("/rpc/" ++ toS (pdName pd), pe)
- where
- -- Use first line of proc description as summary; rest as description (if present)
- -- We strip leading newlines from description so that users can include a blank line between summary and description
- (pSum, pDesc) = fmap fst &&& fmap (T.dropWhile (=='\n') . snd) $
- T.breakOn "\n" <$> pdDescription pd
- procOp = (mempty :: Operation)
- & summary .~ pSum
- & description .~ mfilter (/="") pDesc
- & tags .~ Set.fromList ["(rpc) " <> pdName pd]
- & produces ?~ makeMimeList [MTApplicationJSON, MTVndSingularJSON True, MTVndSingularJSON False]
- & at 200 ?~ "OK"
- getOp = procOp
- & parameters .~ makeProcGetParams (pdParams pd)
- postOp = procOp
- & parameters .~ makeProcPostParams pd
- pe = (mempty :: PathItem)
- & get ?~ getOp
- & post ?~ postOp
-
-makeRootPathItem :: (FilePath, PathItem)
-makeRootPathItem = ("/", p)
- where
- getOp = (mempty :: Operation)
- & tags .~ Set.fromList ["Introspection"]
- & summary ?~ "OpenAPI description (this document)"
- & produces ?~ makeMimeList [MTOpenAPI, MTApplicationJSON]
- & at 200 ?~ "OK"
- pr = (mempty :: PathItem) & get ?~ getOp
- p = pr
-
-makePathItems :: [Routine] -> [Table] -> InsOrdHashMap FilePath PathItem
-makePathItems pds ti = fromList $ makeRootPathItem :
- fmap makePathItem ti ++ fmap makeProcPathItem pds
-
-makeSecurityDefinitions :: Text -> Bool -> SecurityDefinitions
-makeSecurityDefinitions secName allow
- | allow = SecurityDefinitions (fromList [(secName, SecurityScheme secSchType secSchDescription)])
- | otherwise = mempty
- where
- secSchType = SecuritySchemeApiKey (ApiKeyParams "Authorization" ApiKeyHeader)
- secSchDescription = Just "Add the token prepending \"Bearer \" (without quotes) to it"
-
-escapeHostName :: Text -> Text
-escapeHostName "*" = "0.0.0.0"
-escapeHostName "*4" = "0.0.0.0"
-escapeHostName "!4" = "0.0.0.0"
-escapeHostName "*6" = "0.0.0.0"
-escapeHostName "!6" = "0.0.0.0"
-escapeHostName h = h
-
-postgrestSpec :: (Text, Text) -> RelationshipsMap -> [Routine] -> [Table] -> (Text, Text, Integer, Text) -> Maybe Text -> Bool -> Swagger
-postgrestSpec (prettyVersion, docsVersion) rels pds ti (s, h, p, b) sd allowSecurityDef = (mempty :: Swagger)
- & basePath ?~ T.unpack b
- & schemes ?~ [s']
- & info .~ ((mempty :: Info)
- & version .~ prettyVersion
- & title .~ fromMaybe "PostgREST API" dTitle
- & description ?~ fromMaybe "This is a dynamic API generated by PostgREST" dDesc)
- & externalDocs ?~ ((mempty :: ExternalDocs)
- & description ?~ "PostgREST Documentation"
- & url .~ URL ("https://postgrest.org/en/" <> docsVersion <> "/references/api.html"))
- & host .~ h'
- & definitions .~ fromList (makeTableDef rels <$> ti)
- & parameters .~ fromList (makeParamDefs ti)
- & paths .~ makePathItems pds ti
- & produces .~ makeMimeList [MTApplicationJSON, MTVndSingularJSON True, MTVndSingularJSON False, MTTextCSV]
- & consumes .~ makeMimeList [MTApplicationJSON, MTVndSingularJSON True, MTVndSingularJSON False, MTTextCSV]
- & securityDefinitions .~ makeSecurityDefinitions securityDefName allowSecurityDef
- & security .~ [SecurityRequirement (fromList [(securityDefName, [])]) | allowSecurityDef]
- where
- s' = if s == "http" then Http else Https
- h' = Just $ Host (T.unpack $ escapeHostName h) (Just (fromInteger p))
- securityDefName = "JWT"
- (dTitle, dDesc) = fmap fst &&& fmap (T.dropWhile (=='\n') . snd) $
- T.breakOn "\n" <$> sd
-
-pickProxy :: Maybe Text -> Maybe Proxy
-pickProxy proxy
- | isNothing proxy = Nothing
- -- should never happen
- -- since the request would have been rejected by the middleware if proxy uri
- -- is malformed
- | isMalformedProxyUri $ fromMaybe mempty proxy = Nothing
- | otherwise = Just Proxy {
- proxyScheme = scheme
- , proxyHost = host'
- , proxyPort = port''
- , proxyPath = path'
- }
- where
- uri = toURI $ fromJust proxy
- scheme = T.init $ T.toLower $ T.pack $ uriScheme uri
- path URI {uriPath = ""} = "/"
- path URI {uriPath = p} = p
- path' = T.pack $ path uri
- authority = fromJust $ uriAuthority uri
- host' = T.pack $ uriRegName authority
- port' = uriPort authority
- readPort = fromMaybe 80 . readMaybe
- port'' :: Integer
- port'' = case (port', scheme) of
- ("", "http") -> 80
- ("", "https") -> 443
- _ -> readPort $ T.unpack $ T.tail $ T.pack port'
-
-proxyUri :: AppConfig -> (Text, Text, Integer, Text)
-proxyUri AppConfig{..} =
- case pickProxy $ toS <$> configOpenApiServerProxyUri of
- Just Proxy{..} ->
- (proxyScheme, proxyHost, proxyPort, proxyPath)
- Nothing ->
- ("http", configServerHost, toInteger configServerPort, "/")
diff --git a/src/PostgREST/SchemaCache.hs b/src/PostgREST/SchemaCache.hs
index 5c17f64426..ced3ed6889 100644
--- a/src/PostgREST/SchemaCache.hs
+++ b/src/PostgREST/SchemaCache.hs
@@ -13,7 +13,6 @@ These queries are executed once at startup or when PostgREST is reloaded.
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE NamedFieldPuns #-}
-{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeSynonymInstances #-}
@@ -41,7 +40,6 @@ import qualified Hasql.Statement as SQL
import qualified Hasql.Transaction as SQL
import Data.Functor.Contravariant ((>$<))
-import NeatInterpolation (trimming)
import PostgREST.Config (AppConfig (..))
import PostgREST.Config.Database (TimezoneNames,
@@ -341,24 +339,24 @@ decodeRepresentations =
dataRepresentations :: Bool -> SQL.Statement AppConfig RepresentationsMap
dataRepresentations = SQL.Statement sql mempty decodeRepresentations
where
- sql = encodeUtf8 [trimming|
- SELECT
- c.castsource::regtype::text,
- c.casttarget::regtype::text,
- c.castfunc::regproc::text
- FROM
- pg_catalog.pg_cast c
- JOIN pg_catalog.pg_type src_t
- ON c.castsource::oid = src_t.oid
- JOIN pg_catalog.pg_type dst_t
- ON c.casttarget::oid = dst_t.oid
- WHERE
- c.castcontext = 'i'
- AND c.castmethod = 'f'
- AND has_function_privilege(c.castfunc, 'execute')
- AND ((src_t.typtype = 'd' AND c.casttarget IN ('json'::regtype::oid , 'text'::regtype::oid))
- OR (dst_t.typtype = 'd' AND c.castsource IN ('json'::regtype::oid , 'text'::regtype::oid)))
- |]
+ sql = encodeUtf8 $ unlines
+ [ "SELECT"
+ , " c.castsource::regtype::text,"
+ , " c.casttarget::regtype::text,"
+ , " c.castfunc::regproc::text"
+ , "FROM"
+ , " pg_catalog.pg_cast c"
+ , "JOIN pg_catalog.pg_type src_t"
+ , " ON c.castsource::oid = src_t.oid"
+ , "JOIN pg_catalog.pg_type dst_t"
+ , " ON c.casttarget::oid = dst_t.oid"
+ , "WHERE"
+ , " c.castcontext = 'i'"
+ , " AND c.castmethod = 'f'"
+ , " AND has_function_privilege(c.castfunc, 'execute')"
+ , " AND ((src_t.typtype = 'd' AND c.casttarget IN ('json'::regtype::oid , 'text'::regtype::oid))"
+ , " OR (dst_t.typtype = 'd' AND c.castsource IN ('json'::regtype::oid , 'text'::regtype::oid)))"
+ ]
allFunctions :: Bool -> SQL.Statement AppConfig RoutineMap
allFunctions = SQL.Statement funcsSqlQuery params decodeFuncs
@@ -376,96 +374,97 @@ accessibleFuncs = SQL.Statement sql params decodeFuncs
sql = funcsSqlQuery <> " AND has_function_privilege(p.oid, 'execute')"
funcsSqlQuery :: SqlQuery
-funcsSqlQuery = encodeUtf8 [trimming|
- -- Recursively get the base types of domains
- WITH
- base_types AS (
- WITH RECURSIVE
- recurse AS (
- SELECT
- oid,
- typbasetype,
- COALESCE(NULLIF(typbasetype, 0), oid) AS base
- FROM pg_type
- UNION
- SELECT
- t.oid,
- b.typbasetype,
- COALESCE(NULLIF(b.typbasetype, 0), b.oid) AS base
- FROM recurse t
- JOIN pg_type b ON t.typbasetype = b.oid
- )
- SELECT
- oid,
- base
- FROM recurse
- WHERE typbasetype = 0
- ),
- arguments AS (
- SELECT
- oid,
- array_agg((
- COALESCE(name, ''), -- name
- type::regtype::text, -- type
- CASE type
- WHEN 'bit'::regtype THEN 'bit varying'
- WHEN 'bit[]'::regtype THEN 'bit varying[]'
- WHEN 'character'::regtype THEN 'character varying'
- WHEN 'character[]'::regtype THEN 'character varying[]'
- ELSE type::regtype::text
- END, -- convert types that ignore the length and accept any value till maximum size
- idx <= (pronargs - pronargdefaults), -- is_required
- COALESCE(mode = 'v', FALSE) -- is_variadic
- ) ORDER BY idx) AS args,
- CASE COUNT(*) - COUNT(name) -- number of unnamed arguments
- WHEN 0 THEN true
- WHEN 1 THEN (array_agg(type))[1] IN ('bytea'::regtype, 'json'::regtype, 'jsonb'::regtype, 'text'::regtype, 'xml'::regtype)
- ELSE false
- END AS callable
- FROM pg_proc,
- unnest(proargnames, proargtypes, proargmodes)
- WITH ORDINALITY AS _ (name, type, mode, idx)
- WHERE type IS NOT NULL -- only input arguments
- GROUP BY oid
- )
- SELECT
- pn.nspname AS proc_schema,
- p.proname AS proc_name,
- d.description AS proc_description,
- COALESCE(a.args, '{}') AS args,
- tn.nspname AS schema,
- COALESCE(comp.relname, t.typname) AS name,
- p.proretset AS rettype_is_setof,
- (t.typtype = 'c'
- -- if any TABLE, INOUT or OUT arguments present, treat as composite
- or COALESCE(proargmodes::text[] && '{t,b,o}', false)
- ) AS rettype_is_composite,
- bt.oid <> bt.base as rettype_is_composite_alias,
- p.provolatile,
- p.provariadic > 0 as hasvariadic,
- lower((regexp_split_to_array((regexp_split_to_array(iso_config, '='))[2], ','))[1]) AS transaction_isolation_level,
- coalesce(func_settings.kvs, '{}') as kvs
- FROM pg_proc p
- LEFT JOIN arguments a ON a.oid = p.oid
- JOIN pg_namespace pn ON pn.oid = p.pronamespace
- JOIN base_types bt ON bt.oid = p.prorettype
- JOIN pg_type t ON t.oid = bt.base
- JOIN pg_namespace tn ON tn.oid = t.typnamespace
- LEFT JOIN pg_class comp ON comp.oid = t.typrelid
- LEFT JOIN pg_description as d ON d.objoid = p.oid
- LEFT JOIN LATERAL unnest(proconfig) iso_config ON iso_config LIKE 'default_transaction_isolation%'
- LEFT JOIN LATERAL (
- SELECT
- array_agg(row(
- substr(setting, 1, strpos(setting, '=') - 1),
- substr(setting, strpos(setting, '=') + 1)
- )) as kvs
- FROM unnest(proconfig) setting
- WHERE setting ~ ANY($$2)
- ) func_settings ON TRUE
- WHERE t.oid <> 'trigger'::regtype AND COALESCE(a.callable, true)
- AND prokind = 'f'
- AND p.pronamespace = ANY($$1::regnamespace[]) |]
+funcsSqlQuery = encodeUtf8 $ unlines
+ [ "-- Recursively get the base types of domains"
+ , "WITH"
+ , "base_types AS ("
+ , " WITH RECURSIVE"
+ , " recurse AS ("
+ , " SELECT"
+ , " oid,"
+ , " typbasetype,"
+ , " COALESCE(NULLIF(typbasetype, 0), oid) AS base"
+ , " FROM pg_type"
+ , " UNION"
+ , " SELECT"
+ , " t.oid,"
+ , " b.typbasetype,"
+ , " COALESCE(NULLIF(b.typbasetype, 0), b.oid) AS base"
+ , " FROM recurse t"
+ , " JOIN pg_type b ON t.typbasetype = b.oid"
+ , " )"
+ , " SELECT"
+ , " oid,"
+ , " base"
+ , " FROM recurse"
+ , " WHERE typbasetype = 0"
+ , "),"
+ , "arguments AS ("
+ , " SELECT"
+ , " oid,"
+ , " array_agg(("
+ , " COALESCE(name, ''), -- name"
+ , " type::regtype::text, -- type"
+ , " CASE type"
+ , " WHEN 'bit'::regtype THEN 'bit varying'"
+ , " WHEN 'bit[]'::regtype THEN 'bit varying[]'"
+ , " WHEN 'character'::regtype THEN 'character varying'"
+ , " WHEN 'character[]'::regtype THEN 'character varying[]'"
+ , " ELSE type::regtype::text"
+ , " END, -- convert types that ignore the length and accept any value till maximum size"
+ , " idx <= (pronargs - pronargdefaults), -- is_required"
+ , " COALESCE(mode = 'v', FALSE) -- is_variadic"
+ , " ) ORDER BY idx) AS args,"
+ , " CASE COUNT(*) - COUNT(name) -- number of unnamed arguments"
+ , " WHEN 0 THEN true"
+ , " WHEN 1 THEN (array_agg(type))[1] IN ('bytea'::regtype, 'json'::regtype, 'jsonb'::regtype, 'text'::regtype, 'xml'::regtype)"
+ , " ELSE false"
+ , " END AS callable"
+ , " FROM pg_proc,"
+ , " unnest(proargnames, proargtypes, proargmodes)"
+ , " WITH ORDINALITY AS _ (name, type, mode, idx)"
+ , " WHERE type IS NOT NULL -- only input arguments"
+ , " GROUP BY oid"
+ , ")"
+ , "SELECT"
+ , " pn.nspname AS proc_schema,"
+ , " p.proname AS proc_name,"
+ , " d.description AS proc_description,"
+ , " COALESCE(a.args, '{}') AS args,"
+ , " tn.nspname AS schema,"
+ , " COALESCE(comp.relname, t.typname) AS name,"
+ , " p.proretset AS rettype_is_setof,"
+ , " (t.typtype = 'c'"
+ , " -- if any TABLE, INOUT or OUT arguments present, treat as composite"
+ , " or COALESCE(proargmodes::text[] && '{t,b,o}', false)"
+ , " ) AS rettype_is_composite,"
+ , " bt.oid <> bt.base as rettype_is_composite_alias,"
+ , " p.provolatile,"
+ , " p.provariadic > 0 as hasvariadic,"
+ , " lower((regexp_split_to_array((regexp_split_to_array(iso_config, '='))[2], ','))[1]) AS transaction_isolation_level,"
+ , " coalesce(func_settings.kvs, '{}') as kvs"
+ , "FROM pg_proc p"
+ , "LEFT JOIN arguments a ON a.oid = p.oid"
+ , "JOIN pg_namespace pn ON pn.oid = p.pronamespace"
+ , "JOIN base_types bt ON bt.oid = p.prorettype"
+ , "JOIN pg_type t ON t.oid = bt.base"
+ , "JOIN pg_namespace tn ON tn.oid = t.typnamespace"
+ , "LEFT JOIN pg_class comp ON comp.oid = t.typrelid"
+ , "LEFT JOIN pg_description as d ON d.objoid = p.oid"
+ , "LEFT JOIN LATERAL unnest(proconfig) iso_config ON iso_config LIKE 'default_transaction_isolation%'"
+ , "LEFT JOIN LATERAL ("
+ , " SELECT"
+ , " array_agg(row("
+ , " substr(setting, 1, strpos(setting, '=') - 1),"
+ , " substr(setting, strpos(setting, '=') + 1)"
+ , " )) as kvs"
+ , " FROM unnest(proconfig) setting"
+ , " WHERE setting ~ ANY($2)"
+ , ") func_settings ON TRUE"
+ , "WHERE t.oid <> 'trigger'::regtype AND COALESCE(a.callable, true)"
+ , "AND prokind = 'f'"
+ , "AND p.pronamespace = ANY($1::regnamespace[])"
+ ]
schemaDescription :: Bool -> SQL.Statement Schema (Maybe Text)
schemaDescription =
@@ -478,21 +477,22 @@ accessibleTables =
SQL.Statement sql params decodeAccessibleIdentifiers
where
params = map escapeIdent >$< arrayParam HE.text
- sql = encodeUtf8 [trimming|
- SELECT
- n.nspname AS table_schema,
- c.relname AS table_name
- FROM pg_class c
- JOIN pg_namespace n ON n.oid = c.relnamespace
- WHERE c.relkind IN ('v','r','m','f','p')
- AND c.relnamespace = ANY($$1::regnamespace[])
- AND (
- pg_has_role(c.relowner, 'USAGE')
- or has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER')
- or has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')
- )
- AND not c.relispartition
- ORDER BY table_schema, table_name|]
+ sql = encodeUtf8 $ unlines
+ [ "SELECT"
+ , " n.nspname AS table_schema,"
+ , " c.relname AS table_name"
+ , "FROM pg_class c"
+ , "JOIN pg_namespace n ON n.oid = c.relnamespace"
+ , "WHERE c.relkind IN ('v','r','m','f','p')"
+ , "AND c.relnamespace = ANY($1::regnamespace[])"
+ , "AND ("
+ , " pg_has_role(c.relowner, 'USAGE')"
+ , " or has_table_privilege(c.oid, 'SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER')"
+ , " or has_any_column_privilege(c.oid, 'SELECT, INSERT, UPDATE, REFERENCES')"
+ , ")"
+ , "AND not c.relispartition"
+ , "ORDER BY table_schema, table_name"
+ ]
{-
Adds M2O and O2O relationships for views to tables, tables to views, and views to views. The example below is taken from the test fixtures, but the views names/colnames were modified.
@@ -613,138 +613,139 @@ tablesSqlQuery =
-- (pg_has_role(ss.relowner, 'USAGE'::text) OR has_column_privilege(ss.roid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES'::text));
-- on the "columns" CTE, left joining on pg_depend and pg_class is used to obtain the sequence name as a column default in case there are GENERATED .. AS IDENTITY,
-- generated columns are only available from pg >= 10 but the query is agnostic to versions. dep.deptype = 'i' is done because there are other 'a' dependencies on PKs
- encodeUtf8 [trimming|
- WITH
- columns AS (
- SELECT
- c.oid AS relid,
- a.attname::name AS column_name,
- d.description AS description,
- -- typbasetype and typdefaultbin handles `CREATE DOMAIN .. DEFAULT val`, attidentity/attgenerated handles generated columns, pg_get_expr gets the default of a column
- CASE
- WHEN (t.typbasetype != 0) AND (ad.adbin IS NULL) THEN pg_get_expr(t.typdefaultbin, 0)
- WHEN a.attidentity = 'd' THEN format('nextval(%L)', seq.objid::regclass)
- WHEN a.attgenerated = 's' THEN null
- ELSE pg_get_expr(ad.adbin, ad.adrelid)::text
- END AS column_default,
- not (a.attnotnull OR t.typtype = 'd' AND t.typnotnull) AS is_nullable,
- CASE
- WHEN t.typtype = 'd' THEN
- CASE
- WHEN bt.typnamespace = 'pg_catalog'::regnamespace THEN format_type(t.typbasetype, NULL::integer)
- ELSE format_type(a.atttypid, a.atttypmod)
- END
- ELSE
- CASE
- WHEN t.typnamespace = 'pg_catalog'::regnamespace THEN format_type(a.atttypid, NULL::integer)
- ELSE format_type(a.atttypid, a.atttypmod)
- END
- END::text AS data_type,
- format_type(a.atttypid, a.atttypmod)::text AS nominal_data_type,
- information_schema._pg_char_max_length(
- information_schema._pg_truetypid(a.*, t.*),
- information_schema._pg_truetypmod(a.*, t.*)
- )::integer AS character_maximum_length,
- COALESCE(bt.oid, t.oid) AS base_type,
- a.attnum::integer AS position
- FROM pg_attribute a
- LEFT JOIN pg_description AS d
- ON d.objoid = a.attrelid and d.objsubid = a.attnum
- LEFT JOIN pg_attrdef ad
- ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum
- JOIN pg_class c
- ON a.attrelid = c.oid
- JOIN pg_type t
- ON a.atttypid = t.oid
- LEFT JOIN pg_type bt
- ON t.typtype = 'd' AND t.typbasetype = bt.oid
- LEFT JOIN pg_depend seq
- ON seq.refobjid = a.attrelid and seq.refobjsubid = a.attnum and seq.deptype = 'i'
- WHERE
- NOT pg_is_other_temp_schema(c.relnamespace)
- AND a.attnum > 0
- AND NOT a.attisdropped
- AND c.relkind in ('r', 'v', 'f', 'm', 'p')
- AND c.relnamespace = ANY($$1::regnamespace[])
- ),
- columns_agg AS (
- SELECT
- relid,
- array_agg(row(
- column_name,
- description,
- is_nullable::boolean,
- data_type,
- nominal_data_type,
- character_maximum_length,
- column_default,
- coalesce(
- (SELECT array_agg(enumlabel ORDER BY enumsortorder) FROM pg_enum WHERE enumtypid = base_type),
- '{}'
- )
- ) order by position) as columns
- FROM columns
- GROUP BY relid
- ),
- tbl_pk_cols AS (
- SELECT
- r.oid AS relid,
- array_agg(a.attname ORDER BY a.attname) AS pk_cols
- FROM pg_class r
- JOIN pg_constraint c
- ON r.oid = c.conrelid
- JOIN pg_attribute a
- ON a.attrelid = r.oid AND a.attnum = ANY (c.conkey)
- WHERE
- c.contype in ('p')
- AND r.relkind IN ('r', 'p')
- AND r.relnamespace NOT IN ('pg_catalog'::regnamespace, 'information_schema'::regnamespace)
- AND NOT pg_is_other_temp_schema(r.relnamespace)
- AND NOT a.attisdropped
- GROUP BY r.oid
- )
- SELECT
- n.nspname AS table_schema,
- c.relname AS table_name,
- d.description AS table_description,
- c.relkind IN ('v','m') as is_view,
- (
- c.relkind IN ('r','p')
- OR (
- c.relkind in ('v','f')
- -- The function `pg_relation_is_updateable` returns a bitmask where 8
- -- corresponds to `1 << CMD_INSERT` in the PostgreSQL source code, i.e.
- -- it's possible to insert into the relation.
- AND (pg_relation_is_updatable(c.oid::regclass, TRUE) & 8) = 8
- )
- ) AS insertable,
- (
- c.relkind IN ('r','p')
- OR (
- c.relkind in ('v','f')
- -- CMD_UPDATE
- AND (pg_relation_is_updatable(c.oid::regclass, TRUE) & 4) = 4
- )
- ) AS updatable,
- (
- c.relkind IN ('r','p')
- OR (
- c.relkind in ('v','f')
- -- CMD_DELETE
- AND (pg_relation_is_updatable(c.oid::regclass, TRUE) & 16) = 16
- )
- ) AS deletable,
- coalesce(tpks.pk_cols, '{}') as pk_cols,
- coalesce(cols_agg.columns, '{}') as columns
- FROM pg_class c
- JOIN pg_namespace n ON n.oid = c.relnamespace
- LEFT JOIN pg_description d on d.objoid = c.oid and d.objsubid = 0
- LEFT JOIN tbl_pk_cols tpks ON c.oid = tpks.relid
- LEFT JOIN columns_agg cols_agg ON c.oid = cols_agg.relid
- WHERE c.relkind IN ('v','r','m','f','p')
- AND c.relnamespace NOT IN ('pg_catalog'::regnamespace, 'information_schema'::regnamespace)
- AND not c.relispartition
- ORDER BY table_schema, table_name|]
+ encodeUtf8 $ unlines
+ [ "WITH"
+ , "columns AS ("
+ , " SELECT"
+ , " c.oid AS relid,"
+ , " a.attname::name AS column_name,"
+ , " d.description AS description,"
+ , " -- typbasetype and typdefaultbin handles `CREATE DOMAIN .. DEFAULT val`, attidentity/attgenerated handles generated columns, pg_get_expr gets the default of a column"
+ , " CASE"
+ , " WHEN (t.typbasetype != 0) AND (ad.adbin IS NULL) THEN pg_get_expr(t.typdefaultbin, 0)"
+ , " WHEN a.attidentity = 'd' THEN format('nextval(%L)', seq.objid::regclass)"
+ , " WHEN a.attgenerated = 's' THEN null"
+ , " ELSE pg_get_expr(ad.adbin, ad.adrelid)::text"
+ , " END AS column_default,"
+ , " not (a.attnotnull OR t.typtype = 'd' AND t.typnotnull) AS is_nullable,"
+ , " CASE"
+ , " WHEN t.typtype = 'd' THEN"
+ , " CASE"
+ , " WHEN bt.typnamespace = 'pg_catalog'::regnamespace THEN format_type(t.typbasetype, NULL::integer)"
+ , " ELSE format_type(a.atttypid, a.atttypmod)"
+ , " END"
+ , " ELSE"
+ , " CASE"
+ , " WHEN t.typnamespace = 'pg_catalog'::regnamespace THEN format_type(a.atttypid, NULL::integer)"
+ , " ELSE format_type(a.atttypid, a.atttypmod)"
+ , " END"
+ , " END::text AS data_type,"
+ , " format_type(a.atttypid, a.atttypmod)::text AS nominal_data_type,"
+ , " information_schema._pg_char_max_length("
+ , " information_schema._pg_truetypid(a.*, t.*),"
+ , " information_schema._pg_truetypmod(a.*, t.*)"
+ , " )::integer AS character_maximum_length,"
+ , " COALESCE(bt.oid, t.oid) AS base_type,"
+ , " a.attnum::integer AS position"
+ , " FROM pg_attribute a"
+ , " LEFT JOIN pg_description AS d"
+ , " ON d.objoid = a.attrelid and d.objsubid = a.attnum"
+ , " LEFT JOIN pg_attrdef ad"
+ , " ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum"
+ , " JOIN pg_class c"
+ , " ON a.attrelid = c.oid"
+ , " JOIN pg_type t"
+ , " ON a.atttypid = t.oid"
+ , " LEFT JOIN pg_type bt"
+ , " ON t.typtype = 'd' AND t.typbasetype = bt.oid"
+ , " LEFT JOIN pg_depend seq"
+ , " ON seq.refobjid = a.attrelid and seq.refobjsubid = a.attnum and seq.deptype = 'i'"
+ , " WHERE"
+ , " NOT pg_is_other_temp_schema(c.relnamespace)"
+ , " AND a.attnum > 0"
+ , " AND NOT a.attisdropped"
+ , " AND c.relkind in ('r', 'v', 'f', 'm', 'p')"
+ , " AND c.relnamespace = ANY($1::regnamespace[])"
+ , "),"
+ , "columns_agg AS ("
+ , " SELECT"
+ , " relid,"
+ , " array_agg(row("
+ , " column_name,"
+ , " description,"
+ , " is_nullable::boolean,"
+ , " data_type,"
+ , " nominal_data_type,"
+ , " character_maximum_length,"
+ , " column_default,"
+ , " coalesce("
+ , " (SELECT array_agg(enumlabel ORDER BY enumsortorder) FROM pg_enum WHERE enumtypid = base_type),"
+ , " '{}'"
+ , " )"
+ , " ) order by position) as columns"
+ , " FROM columns"
+ , " GROUP BY relid"
+ , "),"
+ , "tbl_pk_cols AS ("
+ , " SELECT"
+ , " r.oid AS relid,"
+ , " array_agg(a.attname ORDER BY a.attname) AS pk_cols"
+ , " FROM pg_class r"
+ , " JOIN pg_constraint c"
+ , " ON r.oid = c.conrelid"
+ , " JOIN pg_attribute a"
+ , " ON a.attrelid = r.oid AND a.attnum = ANY (c.conkey)"
+ , " WHERE"
+ , " c.contype in ('p')"
+ , " AND r.relkind IN ('r', 'p')"
+ , " AND r.relnamespace NOT IN ('pg_catalog'::regnamespace, 'information_schema'::regnamespace)"
+ , " AND NOT pg_is_other_temp_schema(r.relnamespace)"
+ , " AND NOT a.attisdropped"
+ , " GROUP BY r.oid"
+ , ")"
+ , "SELECT"
+ , " n.nspname AS table_schema,"
+ , " c.relname AS table_name,"
+ , " d.description AS table_description,"
+ , " c.relkind IN ('v','m') as is_view,"
+ , " ("
+ , " c.relkind IN ('r','p')"
+ , " OR ("
+ , " c.relkind in ('v','f')"
+ , " -- The function `pg_relation_is_updateable` returns a bitmask where 8"
+ , " -- corresponds to `1 << CMD_INSERT` in the PostgreSQL source code, i.e."
+ , " -- it's possible to insert into the relation."
+ , " AND (pg_relation_is_updatable(c.oid::regclass, TRUE) & 8) = 8"
+ , " )"
+ , " ) AS insertable,"
+ , " ("
+ , " c.relkind IN ('r','p')"
+ , " OR ("
+ , " c.relkind in ('v','f')"
+ , " -- CMD_UPDATE"
+ , " AND (pg_relation_is_updatable(c.oid::regclass, TRUE) & 4) = 4"
+ , " )"
+ , " ) AS updatable,"
+ , " ("
+ , " c.relkind IN ('r','p')"
+ , " OR ("
+ , " c.relkind in ('v','f')"
+ , " -- CMD_DELETE"
+ , " AND (pg_relation_is_updatable(c.oid::regclass, TRUE) & 16) = 16"
+ , " )"
+ , " ) AS deletable,"
+ , " coalesce(tpks.pk_cols, '{}') as pk_cols,"
+ , " coalesce(cols_agg.columns, '{}') as columns"
+ , "FROM pg_class c"
+ , "JOIN pg_namespace n ON n.oid = c.relnamespace"
+ , "LEFT JOIN pg_description d on d.objoid = c.oid and d.objsubid = 0"
+ , "LEFT JOIN tbl_pk_cols tpks ON c.oid = tpks.relid"
+ , "LEFT JOIN columns_agg cols_agg ON c.oid = cols_agg.relid"
+ , "WHERE c.relkind IN ('v','r','m','f','p')"
+ , "AND c.relnamespace NOT IN ('pg_catalog'::regnamespace, 'information_schema'::regnamespace)"
+ , "AND not c.relispartition"
+ , "ORDER BY table_schema, table_name"
+ ]
-- | Gets many-to-one relationships and one-to-one(O2O) relationships, which are a refinement of the many-to-one's
allM2OandO2ORels :: Bool -> SQL.Statement () [Relationship]
@@ -752,80 +753,81 @@ allM2OandO2ORels =
SQL.Statement sql HE.noParams decodeRels
where
-- We use jsonb_agg for comparing the uniques/pks instead of array_agg to avoid the ERROR: cannot accumulate arrays of different dimensionality
- sql = encodeUtf8 [trimming|
- WITH
- pks_uniques_cols AS (
- SELECT
- conrelid,
- array_agg(key order by key) as cols
- FROM pg_constraint,
- LATERAL unnest(conkey) AS _(key)
- WHERE
- contype IN ('p', 'u')
- AND connamespace <> 'pg_catalog'::regnamespace
- GROUP BY oid, conrelid
- )
- SELECT
- ns1.nspname AS table_schema,
- tab.relname AS table_name,
- ns2.nspname AS foreign_table_schema,
- other.relname AS foreign_table_name,
- traint.conrelid = traint.confrelid AS is_self,
- traint.conname AS constraint_name,
- column_info.cols_and_fcols,
- (column_info.cols IN (SELECT cols FROM pks_uniques_cols WHERE conrelid = traint.conrelid)) AS one_to_one
- FROM pg_constraint traint
- JOIN LATERAL (
- SELECT
- array_agg(row(cols.attname, refs.attname) order by ord) AS cols_and_fcols,
- array_agg(cols.attnum order by cols.attnum) AS cols
- FROM unnest(traint.conkey, traint.confkey) WITH ORDINALITY AS _(col, ref, ord)
- JOIN pg_attribute cols ON cols.attrelid = traint.conrelid AND cols.attnum = col
- JOIN pg_attribute refs ON refs.attrelid = traint.confrelid AND refs.attnum = ref
- ) AS column_info ON TRUE
- JOIN pg_namespace ns1 ON ns1.oid = traint.connamespace
- JOIN pg_class tab ON tab.oid = traint.conrelid
- JOIN pg_class other ON other.oid = traint.confrelid
- JOIN pg_namespace ns2 ON ns2.oid = other.relnamespace
- WHERE traint.contype = 'f'
- AND traint.conparentid = 0
- ORDER BY traint.conrelid, traint.conname|]
+ sql = encodeUtf8 $ unlines
+ [ "WITH"
+ , "pks_uniques_cols AS ("
+ , " SELECT"
+ , " conrelid,"
+ , " array_agg(key order by key) as cols"
+ , " FROM pg_constraint,"
+ , " LATERAL unnest(conkey) AS _(key)"
+ , " WHERE"
+ , " contype IN ('p', 'u')"
+ , " AND connamespace <> 'pg_catalog'::regnamespace"
+ , " GROUP BY oid, conrelid"
+ , ")"
+ , "SELECT"
+ , " ns1.nspname AS table_schema,"
+ , " tab.relname AS table_name,"
+ , " ns2.nspname AS foreign_table_schema,"
+ , " other.relname AS foreign_table_name,"
+ , " traint.conrelid = traint.confrelid AS is_self,"
+ , " traint.conname AS constraint_name,"
+ , " column_info.cols_and_fcols,"
+ , " (column_info.cols IN (SELECT cols FROM pks_uniques_cols WHERE conrelid = traint.conrelid)) AS one_to_one"
+ , "FROM pg_constraint traint"
+ , "JOIN LATERAL ("
+ , " SELECT"
+ , " array_agg(row(cols.attname, refs.attname) order by ord) AS cols_and_fcols,"
+ , " array_agg(cols.attnum order by cols.attnum) AS cols"
+ , " FROM unnest(traint.conkey, traint.confkey) WITH ORDINALITY AS _(col, ref, ord)"
+ , " JOIN pg_attribute cols ON cols.attrelid = traint.conrelid AND cols.attnum = col"
+ , " JOIN pg_attribute refs ON refs.attrelid = traint.confrelid AND refs.attnum = ref"
+ , ") AS column_info ON TRUE"
+ , "JOIN pg_namespace ns1 ON ns1.oid = traint.connamespace"
+ , "JOIN pg_class tab ON tab.oid = traint.conrelid"
+ , "JOIN pg_class other ON other.oid = traint.confrelid"
+ , "JOIN pg_namespace ns2 ON ns2.oid = other.relnamespace"
+ , "WHERE traint.contype = 'f'"
+ , "AND traint.conparentid = 0"
+ , "ORDER BY traint.conrelid, traint.conname|]"
+ ]
allComputedRels :: Bool -> SQL.Statement () [Relationship]
allComputedRels =
SQL.Statement sql HE.noParams (HD.rowList cRelRow)
where
- sql = encodeUtf8 [trimming|
- with
- all_relations as (
- select reltype
- from pg_class
- where relkind in ('v','r','m','f','p')
- ),
- computed_rels as (
- select
- (parse_ident(p.pronamespace::regnamespace::text))[1] as schema,
- p.proname::text as name,
- arg_schema.nspname::text as rel_table_schema,
- arg_name.typname::text as rel_table_name,
- ret_schema.nspname::text as rel_ftable_schema,
- ret_name.typname::text as rel_ftable_name,
- not p.proretset or p.prorows = 1 as single_row
- from pg_proc p
- join pg_type arg_name on arg_name.oid = p.proargtypes[0]
- join pg_namespace arg_schema on arg_schema.oid = arg_name.typnamespace
- join pg_type ret_name on ret_name.oid = p.prorettype
- join pg_namespace ret_schema on ret_schema.oid = ret_name.typnamespace
- where
- p.pronargs = 1
- and p.proargtypes[0] in (select reltype from all_relations)
- and p.prorettype in (select reltype from all_relations)
- )
- select
- *,
- row(rel_table_schema, rel_table_name) = row(rel_ftable_schema, rel_ftable_name) as is_self
- from computed_rels;
- |]
+ sql = encodeUtf8 $ unlines
+ [ "with"
+ , "all_relations as ("
+ , " select reltype"
+ , " from pg_class"
+ , " where relkind in ('v','r','m','f','p')"
+ , "),"
+ , "computed_rels as ("
+ , " select"
+ , " (parse_ident(p.pronamespace::regnamespace::text))[1] as schema,"
+ , " p.proname::text as name,"
+ , " arg_schema.nspname::text as rel_table_schema,"
+ , " arg_name.typname::text as rel_table_name,"
+ , " ret_schema.nspname::text as rel_ftable_schema,"
+ , " ret_name.typname::text as rel_ftable_name,"
+ , " not p.proretset or p.prorows = 1 as single_row"
+ , " from pg_proc p"
+ , " join pg_type arg_name on arg_name.oid = p.proargtypes[0]"
+ , " join pg_namespace arg_schema on arg_schema.oid = arg_name.typnamespace"
+ , " join pg_type ret_name on ret_name.oid = p.prorettype"
+ , " join pg_namespace ret_schema on ret_schema.oid = ret_name.typnamespace"
+ , " where"
+ , " p.pronargs = 1"
+ , " and p.proargtypes[0] in (select reltype from all_relations)"
+ , " and p.prorettype in (select reltype from all_relations)"
+ , ")"
+ , "select"
+ , " *,"
+ , " row(rel_table_schema, rel_table_name) = row(rel_ftable_schema, rel_ftable_name) as is_self"
+ , "from computed_rels;"
+ ]
cRelRow =
ComputedRelationship <$>
@@ -847,197 +849,197 @@ allViewsKeyDependencies =
params =
(map escapeIdent . toList . configDbSchemas >$< arrayParam HE.text) <>
(map escapeIdent . toList . configDbExtraSearchPath >$< arrayParam HE.text)
- sql = encodeUtf8 [trimming|
- with recursive
- pks_fks as (
- -- pk + fk referencing col
- select
- contype::text as contype,
- conname,
- array_length(conkey, 1) as ncol,
- conrelid as resorigtbl,
- col as resorigcol,
- ord
- from pg_constraint
- left join lateral unnest(conkey) with ordinality as _(col, ord) on true
- where contype IN ('p', 'f')
- union
- -- fk referenced col
- select
- concat(contype, '_ref') as contype,
- conname,
- array_length(confkey, 1) as ncol,
- confrelid,
- col,
- ord
- from pg_constraint
- left join lateral unnest(confkey) with ordinality as _(col, ord) on true
- where contype='f'
- ),
- views as (
- select
- c.oid as view_id,
- c.relnamespace as view_schema_id,
- n.nspname as view_schema,
- c.relname as view_name,
- r.ev_action as view_definition
- from pg_class c
- join pg_namespace n on n.oid = c.relnamespace
- join pg_rewrite r on r.ev_class = c.oid
- where c.relkind in ('v', 'm') and c.relnamespace = ANY($$1::regnamespace[] || $$2::regnamespace[])
- ),
- transform_json as (
- select
- view_id, view_schema_id, view_schema, view_name,
- -- the following formatting is without indentation on purpose
- -- to allow simple diffs, with less whitespace noise
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- regexp_replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- replace(
- view_definition::text,
- -- This conversion to json is heavily optimized for performance.
- -- The general idea is to use as few regexp_replace() calls as possible.
- -- Simple replace() is a lot faster, so we jump through some hoops
- -- to be able to use regexp_replace() only once.
- -- This has been tested against a huge schema with 250+ different views.
- -- The unit tests do NOT reflect all possible inputs. Be careful when changing this!
- -- -----------------------------------------------
- -- pattern | replacement | flags
- -- -----------------------------------------------
- -- `<>` in pg_node_tree is the same as `null` in JSON, but due to very poor performance of json_typeof
- -- we need to make this an empty array here to prevent json_array_elements from throwing an error
- -- when the targetList is null.
- -- We'll need to put it first, to make the node protection below work for node lists that start with
- -- null: `(<> ...`, too. This is the case for coldefexprs, when the first column does not have a default value.
- '<>' , '()'
- -- `,` is not part of the pg_node_tree format, but used in the regex.
- -- This removes all `,` that might be part of column names.
- ), ',' , ''
- -- The same applies for `{` and `}`, although those are used a lot in pg_node_tree.
- -- We remove the escaped ones, which might be part of column names again.
- ), E'\\{' , ''
- ), E'\\}' , ''
- -- The fields we need are formatted as json manually to protect them from the regex.
- ), ' :targetList ' , ',"targetList":'
- ), ' :resno ' , ',"resno":'
- ), ' :resorigtbl ' , ',"resorigtbl":'
- ), ' :resorigcol ' , ',"resorigcol":'
- -- Make the regex also match the node type, e.g. `{QUERY ...`, to remove it in one pass.
- ), '{' , '{ :'
- -- Protect node lists, which start with `({` or `((` from the greedy regex.
- -- The extra `{` is removed again later.
- ), '((' , '{(('
- ), '({' , '{({'
- -- This regex removes all unused fields to avoid the need to format all of them correctly.
- -- This leads to a smaller json result as well.
- -- Removal stops at `,` for used fields (see above) and `}` for the end of the current node.
- -- Nesting can't be parsed correctly with a regex, so we stop at `{` as well and
- -- add an empty key for the followig node.
- ), ' :[^}{,]+' , ',"":' , 'g'
- -- For performance, the regex also added those empty keys when hitting a `,` or `}`.
- -- Those are removed next.
- ), ',"":}' , '}'
- ), ',"":,' , ','
- -- This reverses the "node list protection" from above.
- ), '{(' , '('
- -- Every key above has been added with a `,` so far. The first key in an object doesn't need it.
- ), '{,' , '{'
- -- pg_node_tree has `()` around lists, but JSON uses `[]`
- ), '(' , '['
- ), ')' , ']'
- -- pg_node_tree has ` ` between list items, but JSON uses `,`
- ), ' ' , ','
- )::json as view_definition
- from views
- ),
- target_entries as(
- select
- view_id, view_schema_id, view_schema, view_name,
- json_array_elements(view_definition->0->'targetList') as entry
- from transform_json
- ),
- results as(
- select
- view_id, view_schema_id, view_schema, view_name,
- (entry->>'resno')::int as view_column,
- (entry->>'resorigtbl')::oid as resorigtbl,
- (entry->>'resorigcol')::int as resorigcol
- from target_entries
- ),
- -- CYCLE detection according to PG docs: https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-CYCLE
- -- Can be replaced with CYCLE clause once PG v13 is EOL.
- recursion(view_id, view_schema_id, view_schema, view_name, view_column, resorigtbl, resorigcol, is_cycle, path) as(
- select
- r.*,
- false,
- ARRAY[resorigtbl]
- from results r
- where view_schema_id = ANY ($$1::regnamespace[])
- union all
- select
- view.view_id,
- view.view_schema_id,
- view.view_schema,
- view.view_name,
- view.view_column,
- tab.resorigtbl,
- tab.resorigcol,
- tab.resorigtbl = ANY(path),
- path || tab.resorigtbl
- from recursion view
- join results tab on view.resorigtbl=tab.view_id and view.resorigcol=tab.view_column
- where not is_cycle
- ),
- repeated_references as(
- select
- view_id,
- view_schema,
- view_name,
- resorigtbl,
- resorigcol,
- array_agg(attname) as view_columns
- from recursion
- join pg_attribute vcol on vcol.attrelid = view_id and vcol.attnum = view_column
- group by
- view_id,
- view_schema,
- view_name,
- resorigtbl,
- resorigcol
- )
- select
- sch.nspname as table_schema,
- tbl.relname as table_name,
- rep.view_schema,
- rep.view_name,
- pks_fks.conname as constraint_name,
- pks_fks.contype as constraint_type,
- array_agg(row(col.attname, view_columns) order by pks_fks.ord) as column_dependencies
- from repeated_references rep
- join pks_fks using (resorigtbl, resorigcol)
- join pg_class tbl on tbl.oid = rep.resorigtbl
- join pg_attribute col on col.attrelid = tbl.oid and col.attnum = rep.resorigcol
- join pg_namespace sch on sch.oid = tbl.relnamespace
- group by sch.nspname, tbl.relname, rep.view_schema, rep.view_name, pks_fks.conname, pks_fks.contype, pks_fks.ncol
- -- make sure we only return key for which all columns are referenced in the view - no partial PKs or FKs
- having ncol = array_length(array_agg(row(col.attname, view_columns) order by pks_fks.ord), 1)
- |]
+ sql = encodeUtf8 $ unlines
+ [ "with recursive"
+ , "pks_fks as ("
+ , " -- pk + fk referencing col"
+ , " select"
+ , " contype::text as contype,"
+ , " conname,"
+ , " array_length(conkey, 1) as ncol,"
+ , " conrelid as resorigtbl,"
+ , " col as resorigcol,"
+ , " ord"
+ , " from pg_constraint"
+ , " left join lateral unnest(conkey) with ordinality as _(col, ord) on true"
+ , " where contype IN ('p', 'f')"
+ , " union"
+ , " -- fk referenced col"
+ , " select"
+ , " concat(contype, '_ref') as contype,"
+ , " conname,"
+ , " array_length(confkey, 1) as ncol,"
+ , " confrelid,"
+ , " col,"
+ , " ord"
+ , " from pg_constraint"
+ , " left join lateral unnest(confkey) with ordinality as _(col, ord) on true"
+ , " where contype='f'"
+ , "),"
+ , "views as ("
+ , " select"
+ , " c.oid as view_id,"
+ , " c.relnamespace as view_schema_id,"
+ , " n.nspname as view_schema,"
+ , " c.relname as view_name,"
+ , " r.ev_action as view_definition"
+ , " from pg_class c"
+ , " join pg_namespace n on n.oid = c.relnamespace"
+ , " join pg_rewrite r on r.ev_class = c.oid"
+ , " where c.relkind in ('v', 'm') and c.relnamespace = ANY($1::regnamespace[] || $2::regnamespace[])"
+ , "),"
+ , "transform_json as ("
+ , " select"
+ , " view_id, view_schema_id, view_schema, view_name,"
+ , " -- the following formatting is without indentation on purpose"
+ , " -- to allow simple diffs, with less whitespace noise"
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " regexp_replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " replace("
+ , " view_definition::text,"
+ , " -- This conversion to json is heavily optimized for performance."
+ , " -- The general idea is to use as few regexp_replace() calls as possible."
+ , " -- Simple replace() is a lot faster, so we jump through some hoops"
+ , " -- to be able to use regexp_replace() only once."
+ , " -- This has been tested against a huge schema with 250+ different views."
+ , " -- The unit tests do NOT reflect all possible inputs. Be careful when changing this!"
+ , " -- -----------------------------------------------"
+ , " -- pattern | replacement | flags"
+ , " -- -----------------------------------------------"
+ , " -- `<>` in pg_node_tree is the same as `null` in JSON, but due to very poor performance of json_typeof"
+ , " -- we need to make this an empty array here to prevent json_array_elements from throwing an error"
+ , " -- when the targetList is null."
+ , " -- We'll need to put it first, to make the node protection below work for node lists that start with"
+ , " -- null: `(<> ...`, too. This is the case for coldefexprs, when the first column does not have a default value."
+ , " '<>' , '()'"
+ , " -- `,` is not part of the pg_node_tree format, but used in the regex."
+ , " -- This removes all `,` that might be part of column names."
+ , " ), ',' , ''"
+ , " -- The same applies for `{` and `}`, although those are used a lot in pg_node_tree."
+ , " -- We remove the escaped ones, which might be part of column names again."
+ , " ), E'\\\\{' , ''"
+ , " ), E'\\\\}' , ''"
+ , " -- The fields we need are formatted as json manually to protect them from the regex."
+ , " ), ' :targetList ' , ',\"targetList\":'"
+ , " ), ' :resno ' , ',\"resno\":'"
+ , " ), ' :resorigtbl ' , ',\"resorigtbl\":'"
+ , " ), ' :resorigcol ' , ',\"resorigcol\":'"
+ , " -- Make the regex also match the node type, e.g. `{QUERY ...`, to remove it in one pass."
+ , " ), '{' , '{ :'"
+ , " -- Protect node lists, which start with `({` or `((` from the greedy regex."
+ , " -- The extra `{` is removed again later."
+ , " ), '((' , '{(('"
+ , " ), '({' , '{({'"
+ , " -- This regex removes all unused fields to avoid the need to format all of them correctly."
+ , " -- This leads to a smaller json result as well."
+ , " -- Removal stops at `,` for used fields (see above) and `}` for the end of the current node."
+ , " -- Nesting can't be parsed correctly with a regex, so we stop at `{` as well and"
+ , " -- add an empty key for the followig node."
+ , " ), ' :[^}{,]+' , ',\"\":' , 'g'"
+ , " -- For performance, the regex also added those empty keys when hitting a `,` or `}`."
+ , " -- Those are removed next."
+ , " ), ',\"\":}' , '}'"
+ , " ), ',\"\":,' , ','"
+ , " -- This reverses the 'node list protection' from above."
+ , " ), '{(' , '('"
+ , " -- Every key above has been added with a `,` so far. The first key in an object doesn't need it."
+ , " ), '{,' , '{'"
+ , " -- pg_node_tree has `()` around lists, but JSON uses `[]`"
+ , " ), '(' , '['"
+ , " ), ')' , ']'"
+ , " -- pg_node_tree has ` ` between list items, but JSON uses `,`"
+ , " ), ' ' , ','"
+ , " )::json as view_definition"
+ , " from views"
+ , "),"
+ , "target_entries as("
+ , " select"
+ , " view_id, view_schema_id, view_schema, view_name,"
+ , " json_array_elements(view_definition->0->'targetList') as entry"
+ , " from transform_json"
+ , "),"
+ , "results as("
+ , " select"
+ , " view_id, view_schema_id, view_schema, view_name,"
+ , " (entry->>'resno')::int as view_column,"
+ , " (entry->>'resorigtbl')::oid as resorigtbl,"
+ , " (entry->>'resorigcol')::int as resorigcol"
+ , " from target_entries"
+ , "),"
+ , "-- CYCLE detection according to PG docs: https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-CYCLE"
+ , "-- Can be replaced with CYCLE clause once PG v13 is EOL."
+ , "recursion(view_id, view_schema_id, view_schema, view_name, view_column, resorigtbl, resorigcol, is_cycle, path) as("
+ , " select"
+ , " r.*,"
+ , " false,"
+ , " ARRAY[resorigtbl]"
+ , " from results r"
+ , " where view_schema_id = ANY ($1::regnamespace[])"
+ , " union all"
+ , " select"
+ , " view.view_id,"
+ , " view.view_schema_id,"
+ , " view.view_schema,"
+ , " view.view_name,"
+ , " view.view_column,"
+ , " tab.resorigtbl,"
+ , " tab.resorigcol,"
+ , " tab.resorigtbl = ANY(path),"
+ , " path || tab.resorigtbl"
+ , " from recursion view"
+ , " join results tab on view.resorigtbl=tab.view_id and view.resorigcol=tab.view_column"
+ , " where not is_cycle"
+ , "),"
+ , "repeated_references as("
+ , " select"
+ , " view_id,"
+ , " view_schema,"
+ , " view_name,"
+ , " resorigtbl,"
+ , " resorigcol,"
+ , " array_agg(attname) as view_columns"
+ , " from recursion"
+ , " join pg_attribute vcol on vcol.attrelid = view_id and vcol.attnum = view_column"
+ , " group by"
+ , " view_id,"
+ , " view_schema,"
+ , " view_name,"
+ , " resorigtbl,"
+ , " resorigcol"
+ , ")"
+ , "select"
+ , " sch.nspname as table_schema,"
+ , " tbl.relname as table_name,"
+ , " rep.view_schema,"
+ , " rep.view_name,"
+ , " pks_fks.conname as constraint_name,"
+ , " pks_fks.contype as constraint_type,"
+ , " array_agg(row(col.attname, view_columns) order by pks_fks.ord) as column_dependencies"
+ , "from repeated_references rep"
+ , "join pks_fks using (resorigtbl, resorigcol)"
+ , "join pg_class tbl on tbl.oid = rep.resorigtbl"
+ , "join pg_attribute col on col.attrelid = tbl.oid and col.attnum = rep.resorigcol"
+ , "join pg_namespace sch on sch.oid = tbl.relnamespace"
+ , "group by sch.nspname, tbl.relname, rep.view_schema, rep.view_name, pks_fks.conname, pks_fks.contype, pks_fks.ncol"
+ , "-- make sure we only return key for which all columns are referenced in the view - no partial PKs or FKs"
+ , "having ncol = array_length(array_agg(row(col.attname, view_columns) order by pks_fks.ord), 1)"
+ ]
initialMediaHandlers :: MediaHandlerMap
initialMediaHandlers =
@@ -1052,64 +1054,65 @@ mediaHandlers =
SQL.Statement sql params decodeMediaHandlers
where
params = map escapeIdent . toList . configDbSchemas >$< arrayParam HE.text
- sql = encodeUtf8 [trimming|
- with
- all_relations as (
- select reltype
- from pg_class
- where relkind in ('v','r','m','f','p')
- union
- select oid
- from pg_type
- where typname = 'anyelement'
- ),
- media_types as (
- SELECT
- t.oid,
- lower(t.typname) as typname,
- t.typnamespace,
- case t.typname
- when '*/*' then 'application/octet-stream'
- else t.typname
- end as resolved_media_type
- FROM pg_type t
- JOIN pg_type b ON t.typbasetype = b.oid
- WHERE
- t.typbasetype <> 0 and
- (t.typname ~* '^[A-Za-z0-9.-]+/[A-Za-z0-9.\+-]+$$' or t.typname = '*/*')
- )
- select
- proc_schema.nspname as handler_schema,
- proc.proname as handler_name,
- arg_schema.nspname::text as target_schema,
- arg_name.typname::text as target_name,
- media_types.typname as media_type,
- media_types.resolved_media_type
- from media_types
- join pg_proc proc on proc.prorettype = media_types.oid
- join pg_namespace proc_schema on proc_schema.oid = proc.pronamespace
- join pg_aggregate agg on agg.aggfnoid = proc.oid
- join pg_type arg_name on arg_name.oid = proc.proargtypes[0]
- join pg_namespace arg_schema on arg_schema.oid = arg_name.typnamespace
- where
- proc.pronamespace = ANY($$1::regnamespace[]) and
- proc.pronargs = 1 and
- arg_name.oid in (select reltype from all_relations)
- union
- select
- typ_sch.nspname as handler_schema,
- mtype.typname as handler_name,
- pro_sch.nspname as target_schema,
- proname as target_name,
- mtype.typname as media_type,
- mtype.resolved_media_type
- from pg_proc proc
- join pg_namespace pro_sch on pro_sch.oid = proc.pronamespace
- join media_types mtype on proc.prorettype = mtype.oid
- join pg_namespace typ_sch on typ_sch.oid = mtype.typnamespace
- where
- proc.pronamespace = ANY($$1::regnamespace[]) and NOT proretset
- and prokind = 'f'|]
+ sql = encodeUtf8 $ unlines
+ [ "with"
+ , "all_relations as ("
+ , " select reltype"
+ , " from pg_class"
+ , " where relkind in ('v','r','m','f','p')"
+ , " union"
+ , " select oid"
+ , " from pg_type"
+ , " where typname = 'anyelement'"
+ , "),"
+ , "media_types as ("
+ , " SELECT"
+ , " t.oid,"
+ , " lower(t.typname) as typname,"
+ , " t.typnamespace,"
+ , " case t.typname"
+ , " when '*/*' then 'application/octet-stream'"
+ , " else t.typname"
+ , " end as resolved_media_type"
+ , " FROM pg_type t"
+ , " JOIN pg_type b ON t.typbasetype = b.oid"
+ , " WHERE"
+ , " t.typbasetype <> 0 and"
+ , " (t.typname ~* '^[A-Za-z0-9.-]+/[A-Za-z0-9.\\+-]+$' or t.typname = '*/*')"
+ , ")"
+ , "select"
+ , " proc_schema.nspname as handler_schema,"
+ , " proc.proname as handler_name,"
+ , " arg_schema.nspname::text as target_schema,"
+ , " arg_name.typname::text as target_name,"
+ , " media_types.typname as media_type,"
+ , " media_types.resolved_media_type"
+ , "from media_types"
+ , " join pg_proc proc on proc.prorettype = media_types.oid"
+ , " join pg_namespace proc_schema on proc_schema.oid = proc.pronamespace"
+ , " join pg_aggregate agg on agg.aggfnoid = proc.oid"
+ , " join pg_type arg_name on arg_name.oid = proc.proargtypes[0]"
+ , " join pg_namespace arg_schema on arg_schema.oid = arg_name.typnamespace"
+ , "where"
+ , " proc.pronamespace = ANY($1::regnamespace[]) and"
+ , " proc.pronargs = 1 and"
+ , " arg_name.oid in (select reltype from all_relations)"
+ , "union"
+ , "select"
+ , " typ_sch.nspname as handler_schema,"
+ , " mtype.typname as handler_name,"
+ , " pro_sch.nspname as target_schema,"
+ , " proname as target_name,"
+ , " mtype.typname as media_type,"
+ , " mtype.resolved_media_type"
+ , "from pg_proc proc"
+ , " join pg_namespace pro_sch on pro_sch.oid = proc.pronamespace"
+ , " join media_types mtype on proc.prorettype = mtype.oid"
+ , " join pg_namespace typ_sch on typ_sch.oid = mtype.typnamespace"
+ , "where"
+ , " proc.pronamespace = ANY($1::regnamespace[]) and NOT proretset"
+ , " and prokind = 'f'"
+ ]
decodeMediaHandlers :: HD.Result MediaHandlerMap
decodeMediaHandlers =
diff --git a/src/PostgREST/SchemaCache/Routine.hs b/src/PostgREST/SchemaCache/Routine.hs
index 248fb5682f..c0517d3808 100644
--- a/src/PostgREST/SchemaCache/Routine.hs
+++ b/src/PostgREST/SchemaCache/Routine.hs
@@ -14,7 +14,6 @@ module PostgREST.SchemaCache.Routine
, funcReturnsSingleComposite
, funcReturnsVoid
, funcTableName
- , funcReturnsCompositeAlias
, funcReturnsSingle
, MediaHandlerMap
, ResolvedHandler
@@ -127,12 +126,6 @@ funcReturnsSetOfScalar proc = case proc of
Function{pdReturnType = SetOf (Scalar{})} -> True
_ -> False
-funcReturnsCompositeAlias :: Routine -> Bool
-funcReturnsCompositeAlias proc = case proc of
- Function{pdReturnType = Single (Composite _ True)} -> True
- Function{pdReturnType = SetOf (Composite _ True)} -> True
- _ -> False
-
funcReturnsSingleComposite :: Routine -> Bool
funcReturnsSingleComposite proc = case proc of
Function{pdReturnType = Single (Composite _ _)} -> True
diff --git a/test/spec/Feature/Query/InsertSpec.hs b/test/spec/Feature/Query/InsertSpec.hs
index 0f6a1b6e7a..f6bbfd0645 100644
--- a/test/spec/Feature/Query/InsertSpec.hs
+++ b/test/spec/Feature/Query/InsertSpec.hs
@@ -11,8 +11,7 @@ import Test.Hspec.Wai
import Test.Hspec.Wai.JSON
import Text.Heredoc
-import PostgREST.Config.PgVersion (PgVersion, pgVersion130,
- pgVersion140)
+import PostgREST.Config.PgVersion (PgVersion, pgVersion140)
import Protolude hiding (get)
import SpecHelper
@@ -205,11 +204,7 @@ spec actualPgVersion = do
it "fails with 400 and error" $
post "/simple_pk" [json| { "extra":"foo"} |]
`shouldRespondWith`
- (if actualPgVersion >= pgVersion130 then
- [json|{"hint":null,"details":"Failing row contains (null, foo).","code":"23502","message":"null value in column \"k\" of relation \"simple_pk\" violates not-null constraint"}|]
- else
- [json|{"hint":null,"details":"Failing row contains (null, foo).","code":"23502","message":"null value in column \"k\" violates not-null constraint"}|]
- )
+ [json|{"hint":null,"details":"Failing row contains (null, foo).","code":"23502","message":"null value in column \"k\" of relation \"simple_pk\" violates not-null constraint"}|]
{ matchStatus = 400
, matchHeaders = [matchContentTypeJson]
}
diff --git a/test/spec/Feature/Query/PlanSpec.hs b/test/spec/Feature/Query/PlanSpec.hs
index 837332b21e..b478eb8bb8 100644
--- a/test/spec/Feature/Query/PlanSpec.hs
+++ b/test/spec/Feature/Query/PlanSpec.hs
@@ -15,8 +15,7 @@ import Test.Hspec hiding (pendingWith)
import Test.Hspec.Wai
import Test.Hspec.Wai.JSON
-import PostgREST.Config.PgVersion (PgVersion, pgVersion130,
- pgVersion170)
+import PostgREST.Config.PgVersion (PgVersion, pgVersion170)
import Protolude hiding (get)
import SpecHelper
@@ -49,27 +48,15 @@ spec actualPgVersion = do
resStatus `shouldBe` Status { statusCode = 200, statusMessage="OK" }
totalCost `shouldBe` 24.28
- it "outputs blocks info when using the buffers option" $
- if actualPgVersion >= pgVersion130
- then do
- r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=buffers") ""
+ it "outputs blocks info when using the buffers option" $ do
+ r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=buffers") ""
- let resBody = simpleBody r
- resHeaders = simpleHeaders r
-
- liftIO $ do
- resHeaders `shouldSatisfy` elem ("Content-Type", "application/vnd.pgrst.plan+json; for=\"application/json\"; options=buffers; charset=utf-8")
- resBody `shouldSatisfy` (\t -> T.isInfixOf "Shared Hit Blocks" (decodeUtf8 $ LBS.toStrict t))
- else do
- -- analyze is required for buffers on pg < 13
- r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=analyze|buffers") ""
-
- let blocks = simpleBody r ^? nth 0 . key "Plan" . key "Shared Hit Blocks"
- resHeaders = simpleHeaders r
+ let resBody = simpleBody r
+ resHeaders = simpleHeaders r
- liftIO $ do
- resHeaders `shouldSatisfy` elem ("Content-Type", "application/vnd.pgrst.plan+json; for=\"application/json\"; options=analyze|buffers; charset=utf-8")
- blocks `shouldBe` Just [aesonQQ| 1.0 |]
+ liftIO $ do
+ resHeaders `shouldSatisfy` elem ("Content-Type", "application/vnd.pgrst.plan+json; for=\"application/json\"; options=buffers; charset=utf-8")
+ resBody `shouldSatisfy` (\t -> T.isInfixOf "Shared Hit Blocks" (decodeUtf8 $ LBS.toStrict t))
it "outputs the search path when using the settings option" $ do
r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=settings") ""
@@ -86,16 +73,15 @@ spec actualPgVersion = do
}
|]
- when (actualPgVersion >= pgVersion130) $
- it "outputs WAL info when using the wal option" $ do
- r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=analyze|wal") ""
+ it "outputs WAL info when using the wal option" $ do
+ r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=analyze|wal") ""
- let walRecords = simpleBody r ^? nth 0 . key "Plan" . key "WAL Records"
- resHeaders = simpleHeaders r
+ let walRecords = simpleBody r ^? nth 0 . key "Plan" . key "WAL Records"
+ resHeaders = simpleHeaders r
- liftIO $ do
- resHeaders `shouldSatisfy` elem ("Content-Type", "application/vnd.pgrst.plan+json; for=\"application/json\"; options=analyze|wal; charset=utf-8")
- walRecords `shouldBe` Just [aesonQQ|0|]
+ liftIO $ do
+ resHeaders `shouldSatisfy` elem ("Content-Type", "application/vnd.pgrst.plan+json; for=\"application/json\"; options=analyze|wal; charset=utf-8")
+ walRecords `shouldBe` Just [aesonQQ|0|]
it "outputs columns info when using the verbose option" $ do
r <- request methodGet "/projects" (acceptHdrs "application/vnd.pgrst.plan+json; options=verbose") ""