diff --git a/.appveyor/appveyor_download_java.ps1 b/.appveyor/appveyor_download_java.ps1 new file mode 100644 index 00000000..18eb2272 --- /dev/null +++ b/.appveyor/appveyor_download_java.ps1 @@ -0,0 +1,20 @@ +Import-Module BitsTransfer +Install-Package -Force 7Zip4Powershell -ProviderName PowerShellGet +if ($Env:JDK -eq 9) +{ + $url = "https://download.java.net/java/GA/jdk9/9.0.4/binaries/openjdk-9.0.4_windows-x64_bin.tar.gz" + $out = "C:\Program Files\Java\jdk9" + Start-BitsTransfer -Source $url -Destination "$out.tar.gz" + Expand-7Zip "$out.tar.gz" "$out" + Expand-7Zip "$out\jdk9.tar" "$out" + Move-Item "$out\jdk-9.0.4\*" "$out" +} +if ($Env:JDK -eq 10) +{ + $url = "https://download.java.net/java/GA/jdk10/10/binaries/openjdk-10_windows-x64_bin.tar.gz" + $out = "C:\Program Files\Java\jdk10" + Start-BitsTransfer -Source $url -Destination "$out.tar.gz" + Expand-7Zip "$out.tar.gz" "$out" + Expand-7Zip "$out\jdk10.tar" "$out" + Move-Item "$out\jdk-10\*" "$out" +} \ No newline at end of file diff --git a/.appveyor/appveyor_mingw.sh b/.appveyor/appveyor_mingw.sh new file mode 100644 index 00000000..47099a14 --- /dev/null +++ b/.appveyor/appveyor_mingw.sh @@ -0,0 +1,14 @@ +set -e +JAVA_HOME="C:\Program Files\Java\jdk$1" +libjvm="$JAVA_HOME\bin\server\jvm.dll" +PATH=$JAVA_HOME/bin:$PATH +javac -version +pacman -S mingw-w64-x86_64-postgresql --noconfirm +pgConfig='C:\msys64\mingw64\bin\pg_config' +"$pgConfig" +cd /c/projects/pljava +mvn clean install \ + -Dpgsql.pgconfig="$pgConfig" \ + -Dpljava.libjvmdefault="$libjvm" \ + -Psaxon-examples -Ppgjdbc-ng --batch-mode \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn diff --git a/.appveyor/appveyor_mingw_or_msvc.bat b/.appveyor/appveyor_mingw_or_msvc.bat new file mode 100644 index 00000000..bb332e13 --- /dev/null +++ b/.appveyor/appveyor_mingw_or_msvc.bat @@ -0,0 +1,22 @@ +REM a bat file because PowerShell makes a mess of stderr output, and a multiline +REM command intended for CMD in appveyor.yml gets broken up. + +IF %SYS%==MINGW ( + set pgConfig=C:\msys64\mingw64\bin\pg_config +) ELSE ( + set pgConfig=%ProgramFiles%\PostgreSQL\%PG%\bin\pg_config + set libjvm=%JAVA_HOME%/bin/server/jvm.dll +) + +IF %SYS%==MINGW ( + C:\msys64\usr\bin\env MSYSTEM=MINGW64 ^ + C:\msys64\usr\bin\bash -l ^ + -c "/c/projects/pljava/.appveyor/appveyor_mingw.sh %JDK%" +) ELSE ( + "%pgConfig%" + mvn clean install ^ + -Dpgsql.pgconfig="%pgConfig%" ^ + -Dpljava.libjvmdefault="%libjvm%" ^ + -Psaxon-examples -Ppgjdbc-ng --batch-mode ^ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn +) diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..2a8504ea --- /dev/null +++ b/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*.{c,h,java,xml,ddr,project}] +indent_style = tab +indent_size = tab +tab_width = 4 + +[*.css] +indent_style = space +indent_size = 2 + +# Imported from another project with different indentation +[JarX.java] +indent_style = space +indent_size = 2 diff --git a/.github/workflows/cloudberry-ci.yml b/.github/workflows/cloudberry-ci.yml new file mode 100644 index 00000000..9c7d1233 --- /dev/null +++ b/.github/workflows/cloudberry-ci.yml @@ -0,0 +1,130 @@ +name: Cloudberry PL/Java CI + +on: + pull_request: + branches: [ main ] + types: [opened, synchronize, reopened, edited] + push: + branches: [ main ] + workflow_dispatch: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + CLOUDBERRY_REPO: apache/cloudberry + CLOUDBERRY_REF: main + COMPOSE_FILE: pljava/concourse/docker/ubuntu22.04/docker-compose.yml + CONTAINER_NAME: cbdb-pljava + +jobs: + ci: + name: Build + Smoke (Docker) + runs-on: ubuntu-latest + timeout-minutes: 180 + steps: + - name: Free disk space + run: | + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/share/boost || true + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /opt/hostedtoolcache || true + sudo docker system prune -af || true + df -h + + - name: Checkout PL/Java (this repo) + uses: actions/checkout@v4 + with: + fetch-depth: 1 + path: pljava + + - name: Checkout Cloudberry source + uses: actions/checkout@v4 + with: + repository: ${{ env.CLOUDBERRY_REPO }} + ref: ${{ env.CLOUDBERRY_REF }} + path: cloudberry + submodules: true + + - name: Build and start dev container + run: | + docker compose -f ${{ env.COMPOSE_FILE }} down -v || true + docker compose -f ${{ env.COMPOSE_FILE }} up -d + docker exec ${{ env.CONTAINER_NAME }} sudo find /home/gpadmin/workspace/cloudberry -path /home/gpadmin/workspace/cloudberry/.git -prune -o -exec chown gpadmin:gpadmin {} + + docker exec ${{ env.CONTAINER_NAME }} sudo find /home/gpadmin/workspace/pljava -path /home/gpadmin/workspace/pljava/.git -prune -o -exec chown gpadmin:gpadmin {} + + + - name: Build Cloudberry + PL/Java and run tests (installcheck) + run: | + docker exec ${{ env.CONTAINER_NAME }} bash -lc "bash /home/gpadmin/workspace/pljava/concourse/docker/ubuntu22.04/scripts/entrypoint.sh" + + - name: Collect logs and results + if: always() + run: | + mkdir -p artifacts + docker logs ${{ env.CONTAINER_NAME }} > artifacts/cbdb-pljava.log 2>&1 || true + docker exec ${{ env.CONTAINER_NAME }} bash -lc "source /usr/local/cloudberry-db/cloudberry-env.sh && source /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh && gpstate -s" > artifacts/gpstate.txt 2>&1 || true + docker exec ${{ env.CONTAINER_NAME }} bash -lc "source /usr/local/cloudberry-db/cloudberry-env.sh && source /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh && psql -d template1 -c 'select version()'" > artifacts/version.txt 2>&1 || true + + # Regression outputs (if present) + if [ -d pljava/gpdb/tests/results ]; then + tar czf artifacts/pljava-regress-results.tar.gz -C pljava gpdb/tests/results || true + fi + ls -la pljava/gpdb/tests/regression.* 2>/dev/null || true + cp -v pljava/gpdb/tests/regression.* artifacts/ 2>/dev/null || true + + # Cloudberry demo cluster logs (if present) + if [ -d cloudberry/gpAux/gpdemo ]; then + find cloudberry/gpAux/gpdemo -maxdepth 4 -type d \( -name pg_log -o -name log \) -print 2>/dev/null || true + tar czf artifacts/cloudberry-demo-logs.tar.gz -C cloudberry gpAux/gpdemo 2>/dev/null || true + fi + + - name: Summarize installcheck + if: always() + run: | + LOG="pljava/gpdb/tests/results/installcheck.log" + { + echo "## PL/Java installcheck" + if [ -f "$LOG" ]; then + echo "" + echo "- Log: \`$LOG\`" + echo "- Test results dir: \`pljava/gpdb/tests/results/\`" + echo "" + TOTAL=$(grep -E "^[[:space:]]*test .* \\.\\.\\." -c "$LOG" || true) + OK=$(grep -E "^[[:space:]]*test .* \\.\\.\\. ok" -c "$LOG" || true) + FAILED=$(grep -E "^[[:space:]]*test .* \\.\\.\\. FAILED" -c "$LOG" || true) + echo "- Total: $TOTAL" + echo "- ok: $OK" + echo "- FAILED: $FAILED" + echo "" + echo "Last 50 lines:" + echo "" + echo '```' + tail -n 50 "$LOG" || true + echo '```' + else + echo "" + echo "- No \`installcheck.log\` found (build may have failed early)." + fi + } >> "$GITHUB_STEP_SUMMARY" + + - name: Upload artifacts + if: always() + uses: actions/upload-artifact@v4 + with: + name: logs-and-results + path: | + artifacts/** + pljava/gpdb/tests/results/** + pljava/gpdb/tests/regression.* + if-no-files-found: ignore + retention-days: 7 + + - name: Cleanup + if: always() + run: | + docker compose -f ${{ env.COMPOSE_FILE }} down -v || true diff --git a/.gitignore b/.gitignore index aabfe1fb..9c1f645e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ /.classpath /.project /.settings +.idea/ target/site target/staging target/javadoc-bundle-options @@ -10,4 +11,9 @@ pljava-so/target/ target/ *.so *.zip -concourse/secrets/ \ No newline at end of file +concourse/secrets/ +nar-maven-plugin/ +nar-maven-plugin.tar.gz +gpdb/tests/results/ +gpdb/tests/regression.* +artifacts/ diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..9af39d68 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,156 @@ +if: false +language: minimal +os: + - linux +arch: + - amd64 + - ppc64le +dist: bionic +env: + - POSTGRESQL_VERSION: 13 + JAVA_VERSION: 15 + JVM_IMPL: hotspot + MVN_VERSION: 3.5.2 + - POSTGRESQL_VERSION: 12 + JAVA_VERSION: 14 + JVM_IMPL: hotspot + MVN_VERSION: 3.5.2 + - POSTGRESQL_VERSION: 12 + JAVA_VERSION: 14 + JVM_IMPL: openj9 + MVN_VERSION: 3.6.3 + - POSTGRESQL_VERSION: 12 + JAVA_VERSION: 11 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + - POSTGRESQL_VERSION: 12 + JAVA_VERSION: 9 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + - POSTGRESQL_VERSION: 10 + JAVA_VERSION: 14 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + - POSTGRESQL_VERSION: 9.5 + JAVA_VERSION: 14 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + +jobs: + exclude: + - arch: ppc64le + env: + POSTGRESQL_VERSION: 12 + JAVA_VERSION: 9 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + include: + - os: osx + osx_image: xcode11 + arch: amd64 + env: + - POSTGRESQL_VERSION: 11 + JAVA_VERSION: 14 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + - os: osx + osx_image: xcode11 + arch: amd64 + env: + - POSTGRESQL_VERSION: 10 + JAVA_VERSION: 14 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + - os: osx + osx_image: xcode11 + arch: amd64 + env: + - POSTGRESQL_VERSION: 9.5 + JAVA_VERSION: 14 + JVM_IMPL: hotspot + MVN_VERSION: 3.6.3 + +cache: + directories: + - $HOME/.m2 + +before_install: | + javaUrl=https://api.adoptopenjdk.net/v3/binary/latest + javaUrl="$javaUrl/$JAVA_VERSION/ga/${TRAVIS_OS_NAME//osx/mac}" + javaUrl="$javaUrl/${TRAVIS_CPU_ARCH//amd64/x64}/jdk" + javaUrl="$javaUrl/$JVM_IMPL/normal/adoptopenjdk" + + installJdk=$(false && which install-jdk.sh) || { + wget https://raw.githubusercontent.com/sormuras/bach/8c457fd6e46bd9f3f575867dd0c9af1d7edfd5b4/install-jdk.sh + installJdk=./install-jdk.sh + + printf '%s\n%s\n%s\n%s\n%s\n' \ + '--- install-jdk.sh' \ + '+++ install-jdk.sh' \ + '@@ -257 +257 @@' \ + '- target="${workspace}"/$(tar --list ${tar_options} | head -2 | tail -1 | cut -f 2 -d '"'/' -)/Contents/Home" \ + '+ target="${workspace}"/$(tar --list ${tar_options} | sed -n '"'/\/bin\/javac/s///p')" \ + | patch "$installJdk" + } + + [[ $JAVA_VERSION == 9 ]] && certs=--cacerts || unset certs + + . "$installJdk" --url "$javaUrl" ${certs+"$certs"} + + mvnUrl=https://archive.apache.org/dist/maven/maven-3 + mvnUrl="$mvnUrl/$MVN_VERSION/binaries/apache-maven-$MVN_VERSION-bin.tar.gz" + + wget --no-verbose "$mvnUrl" && tar xzf "apache-maven-$MVN_VERSION-bin.tar.gz" + mvn="./apache-maven-$MVN_VERSION/bin/mvn" + "$mvn" --version + + . .travis/travis_install_postgresql.sh + +install: | + "$pgConfig" + + if [ "$TRAVIS_OS_NAME" = "osx" ]; then + libjvm_name="libjli.dylib" + else + libjvm_name="libjvm.so" + fi + libjvm=$(find "$JAVA_HOME" -mindepth 2 -name $libjvm_name | head -n 1) + + "$mvn" clean install --batch-mode \ + -Dpgsql.pgconfig="$pgConfig" \ + -Dpljava.libjvmdefault="$libjvm" \ + -Psaxon-examples -Ppgjdbc \ + -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + +script: | + packageJar=$(find pljava-packaging -name pljava-pg*.jar -print) + + mavenRepo="$HOME/.m2/repository" + + saxonVer=$( + find "$mavenRepo/net/sf/saxon/Saxon-HE" -name 'Saxon-HE-*.jar' -print | + sort | + tail -n 1 + ) + saxonVer=${saxonVer%/*} + saxonVer=${saxonVer##*/} + + jdbcJar=$( + find "$mavenRepo/org/postgresql/postgresql" \ + -name 'postgresql-*.jar' -print | + sort | + tail -n 1 + ) + + sudo "$JAVA_HOME"/bin/java -Dpgconfig="$pgConfig" -jar "$packageJar" + + "$JAVA_HOME"/bin/jshell \ + -execution local \ + "-J--class-path=$packageJar:$jdbcJar" \ + "--class-path=$packageJar" \ + "-J--add-modules=java.sql.rowset" \ + "-J-Dpgconfig=$pgConfig" \ + "-J-DmavenRepo=$mavenRepo" \ + "-J-DsaxonVer=$saxonVer" \ + CI/integration + : travis wanted something here at the end once diff --git a/.travis/travis_install_postgresql.sh b/.travis/travis_install_postgresql.sh new file mode 100755 index 00000000..42b4442f --- /dev/null +++ b/.travis/travis_install_postgresql.sh @@ -0,0 +1,43 @@ +if [ "$TRAVIS_OS_NAME" = "osx" ]; then + HOMEBREW_NO_AUTO_UPDATE=1 + export HOMEBREW_NO_AUTO_UPDATE + + brew uninstall postgis postgresql + + if [ "$POSTGRESQL_VERSION" = "12" ]; then + unset POSTGRESQL_VERSION + else + POSTGRESQL_VERSION="@$POSTGRESQL_VERSION" + fi + brew install "postgresql${POSTGRESQL_VERSION}" + + pgConfig="/usr/local/opt/postgresql${POSTGRESQL_VERSION}/bin/pg_config" +else + sudo sh -c 'service postgresql stop || true' + sudo apt-get -qq remove postgresql libpq-dev libpq5 postgresql-client-common postgresql-common --purge + if [ "$POSTGRESQL_VERSION" = "SOURCE" ]; then + sudo apt-get -qq install build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libxml2-utils xsltproc + + git clone git://git.postgresql.org/git/postgresql.git ../postgresql + cd ../postgresql + git checkout REL_12_STABLE + + ./configure --with-libxml --enable-cassert --enable-debug CFLAGS='-ggdb -Og -g3 -fno-omit-frame-pointer' --quiet + make --silent && sudo make install + + cd contrib + make --silent && sudo make install + + pgConfig="/usr/local/pgsql/bin/pg_config" + + cd ../../pljava + else + . /etc/lsb-release + echo "deb http://apt.postgresql.org/pub/repos/apt/ $DISTRIB_CODENAME-pgdg main ${POSTGRESQL_VERSION}" > ../pgdg.list + sudo mv ../pgdg.list /etc/apt/sources.list.d/ + wget --quiet -O - https://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc | sudo apt-key add - + sudo apt-get -qq update + sudo apt-get -qq install "postgresql-${POSTGRESQL_VERSION}" "postgresql-server-dev-${POSTGRESQL_VERSION}" libecpg-dev libkrb5-dev + pgConfig=pg_config + fi +fi diff --git a/.travis/travis_test_pljava.sh b/.travis/travis_test_pljava.sh new file mode 100755 index 00000000..ec69ee8c --- /dev/null +++ b/.travis/travis_test_pljava.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +saxon_jar_name=$(find "${HOME}/.m2/repository/net/sf/saxon/Saxon-HE/" -name "Saxon-HE-*.jar" | head -n 1) +saxon_jar="file:${saxon_jar_name}" +examples_jar_name=$(find pljava-examples -name "pljava-examples*.jar") +examples_jar="file:${PWD}/${examples_jar_name}" + +if [ "$TRAVIS_OS_NAME" = "osx" ]; then + printf '%s\n' "SELECT sqlj.install_jar(:'path','saxon',true);" | psql -v path="$saxon_jar" -U postgres + psql -c "SELECT sqlj.set_classpath('public', 'saxon');" -U postgres + printf '%s\n' "SELECT sqlj.install_jar(:'path','examples',true);" | psql -v path="$examples_jar" -U postgres + psql -c "SELECT sqlj.get_classpath('javatest');" -U postgres + psql -c "SELECT sqlj.set_classpath('javatest', 'examples');" -U postgres + psql -c "SELECT javatest.java_addone(3);" -U postgres +elif [ "$POSTGRESQL_VERSION" = "SOURCE" ]; then + sudo setfacl -m u:postgres:rwx /home/travis/.m2/ + printf '%s\n' "SELECT sqlj.install_jar(:'path','saxon',true);" | sudo -u postgres /usr/local/pgsql/bin/psql -v path="$saxon_jar" -U postgres + sudo -u postgres /usr/local/pgsql/bin/psql -c "SELECT sqlj.set_classpath('public', 'saxon');" -U postgres + printf '%s\n' "SELECT sqlj.install_jar(:'path','examples',true);" | sudo -u postgres /usr/local/pgsql/bin/psql -v path="$examples_jar" -U postgres + sudo -u postgres /usr/local/pgsql/bin/psql -c "SELECT sqlj.get_classpath('javatest');" -U postgres + sudo -u postgres /usr/local/pgsql/bin/psql -c "SELECT sqlj.set_classpath('javatest', 'examples');" -U postgres + sudo -u postgres /usr/local/pgsql/bin/psql -c "SELECT javatest.java_addone(3);" -U postgres +else + sudo setfacl -m u:postgres:rwx /home/travis/.m2/ + printf '%s\n' "SELECT sqlj.install_jar(:'path','saxon',true);" | sudo -u postgres psql -v path="$saxon_jar" -U postgres + sudo -u postgres psql -c "SELECT sqlj.set_classpath('public', 'saxon');" -U postgres + printf '%s\n' "SELECT sqlj.install_jar(:'path','examples',true);" | sudo -u postgres psql -v path="$examples_jar" -U postgres 2> test.log + grep -w "WARNING" test.log + grep -w "ERROR" test.log + sudo -u postgres psql -c "SELECT sqlj.get_classpath('javatest');" -U postgres + sudo -u postgres psql -c "SELECT sqlj.set_classpath('javatest', 'examples');" -U postgres + sudo -u postgres psql -c "SELECT javatest.java_addone(3);" -U postgres +fi diff --git a/CI/integration b/CI/integration new file mode 100644 index 00000000..fd8da767 --- /dev/null +++ b/CI/integration @@ -0,0 +1,614 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + * + * This jshell script performs basic integration tests for PL/Java's CI. + * + * It must be executed with the built PL/Java packaged jar (produced by the + * pljava-packaging subproject) on the classpath, as well as a PGJDBC or + * pgjdbc-ng full jar. The PL/Java packaged jar includes a Node.class + * exporting functions not unlike the Perl module once called PostgresNode + * (and now called PostgreSQL::Test::Cluster) in the PostgreSQL distribution. + * The javadocs for Node.class explain the available functions. + * + * When jshell runs this script with -execution local, it needs both a + * --class-path and a -J--class-path argument. The former need only contain + * the PL/Java jar itself, so the contents are visible to jshell. The -J version + * passed to the underlying JVM needs both that jar and the PGJDBC or pgjdbc-ng + * driver jar. The driver classes need not be visible to jshell, but the JVM + * must be able to find them. + * + * Tests included in this script require + * -J--add-modules=java.sql.rowset,jdk.httpserver + * on the jshell command line. + * + * These Java properties must be set (as with -J-Dpgconfig=...) on the jshell + * command line: + * + * pgconfig + * the path to the pg_config executable that will be used to locate + * the PostgreSQL installation to be used in the tests + * mavenRepo + * the topmost directory of the local Maven repository. The Saxon jar + * downloaded as a dependency (when -Psaxon-examples was used on the mvn + * command line for building) will be found in this repository + * saxonVer + * the version of the Saxon library to use (appears in the library jar + * file name and as the name of its containing directory in the repository) + * + * These properties are optional (their absence is equivalent to a setting + * of false): + * + * redirectError + * if true, the standard error stream from the tests will be merged into + * the standard output stream. This can be desirable if this script is + * invoked from Windows PowerShell, which believes a standard error stream + * should only carry Error Records and makes an awful mess of anything else. + * extractFiles + * if true, begin by extracting and installing the PL/Java files from the jar + * into the proper locations indicated by the pg_config executable. If false, + * extraction will be skipped, assumed to have been done in a separate step + * simply running java -jar on the PL/Java packaged jar. Doing the extraction + * here can be useful, if this script is run with the needed permissions to + * write in the PostgreSQL install locations, when combined with redirectError + * if running under PowerShell, which would otherwise mess up the output. + * + * The script does not (yet) produce output in any standardized format such as + * TAP. The output will include numerous , , , or + * elements. If it runs to completion there will be a line with counts + * for info, warning, error, and ng. The count of ng results includes errors + * and certain warnings. The tests that are run from the deployment descriptor + * of the pljava-examples jar report test failures as warnings (to avoid cutting + * short the test as an error would), so those warnings are counted in ng. + * + * jshell will exit with a nonzero status if ng > 0 or anything else was seen + * to go wrong or the script did not run to completion. + */ +boolean succeeding = false; // begin pessimistic + +boolean redirectError = Boolean.getBoolean("redirectError"); + +if ( redirectError ) + System.setErr(System.out); // PowerShell makes a mess of stderr output + +UnaryOperator tweaks = + redirectError ? p -> p.redirectErrorStream(true) : UnaryOperator.identity(); + +import static java.nio.file.Files.createTempFile; +import static java.nio.file.Files.write; +import java.nio.file.Path; +import static java.nio.file.Paths.get; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import org.postgresql.pljava.packaging.Node; +import static org.postgresql.pljava.packaging.Node.q; +import static org.postgresql.pljava.packaging.Node.stateMachine; +import static org.postgresql.pljava.packaging.Node.isVoidResultSet; +import static org.postgresql.pljava.packaging.Node.s_isWindows; +import static + org.postgresql.pljava.packaging.Node.NOTHING_OR_PGJDBC_ZERO_COUNT; +/* + * Imports that will be needed to serve a jar file over http + * when the time comes for testing that. + */ +import static java.nio.charset.StandardCharsets.UTF_8; +import java.util.jar.Attributes; +import java.util.jar.Manifest; +import java.util.jar.JarOutputStream; +import java.util.zip.ZipEntry; +import com.sun.net.httpserver.BasicAuthenticator; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +if ( Boolean.getBoolean("extractFiles") ) + Node.main(new String[0]); // extract the files + +String javaHome = System.getProperty("java.home"); + +Path javaLibDir = get(javaHome, s_isWindows ? "bin" : "lib"); + +Path libjvm = ( + "Mac OS X".equals(System.getProperty("os.name")) + ? Stream.of("libjli.dylib", "jli/libjli.dylib") + .map(s -> javaLibDir.resolve(s)) + .filter(Files::exists).findFirst().get() + : javaLibDir.resolve(s_isWindows ? "server\\jvm.dll" : "server/libjvm.so") +); + +// Use deprecated major() here because feature() first appears in Java 10 +int jFeatureVersion = Runtime.version().major(); + +String vmopts = "-enableassertions:org.postgresql.pljava... -Xcheck:jni"; + +vmopts += " --limit-modules=org.postgresql.pljava.internal"; + +if ( 24 <= jFeatureVersion ) { + vmopts += " -Djava.security.manager=disallow"; // JEP 486 +} else if ( 18 <= jFeatureVersion ) + vmopts += " -Djava.security.manager=allow"; // JEP 411 + +if ( 23 <= jFeatureVersion ) + vmopts += " --sun-misc-unsafe-memory-access=deny"; // JEP 471 + +if ( 24 <= jFeatureVersion ) + vmopts += " --illegal-native-access=deny"; // JEP 472 + +Map serverOptions = new HashMap<>(Map.of( + "client_min_messages", "info", + "pljava.vmoptions", vmopts, + "pljava.libjvm_location", libjvm.toString() +)); +if ( 24 <= jFeatureVersion ) { + serverOptions.put("pljava.allow_unenforced", "java,java_tzset"); + serverOptions.put("pljava.allow_unenforced_udt", "on"); +} + +Node n1 = Node.get_new_node("TestNode1"); + +if ( s_isWindows ) + n1.use_pg_ctl(true); + +/* + * Keep a tally of the three types of diagnostic notices that may be + * received, and, independently, how many represent no-good test results + * (error always, but also warning if seen from the tests in the + * examples.jar deployment descriptor). + */ +Map results = + Stream.of("info", "warning", "error", "ng").collect( + LinkedHashMap::new, + (m,k) -> m.put(k, 0), (r,s) -> {}); + +boolean isDiagnostic(Object o, Set whatIsNG) +{ + if ( ! ( o instanceof Throwable ) ) + return false; + String[] parts = Node.classify((Throwable)o); + String type = parts[0]; + String message = parts[2]; + results.compute(type, (k,v) -> 1 + v); + if ( whatIsNG.contains(type) ) + if ( ! "warning".equals(type) || ! message.startsWith("[JEP 411]") ) + results.compute("ng", (k,v) -> 1 + v); + return true; +} + +/* + * Write a trial policy into a temporary file in n's data_dir, + * and set pljava.vmoptions accordingly over connection c. + * Returns the 'succeeding' flag from the state machine looking + * at the command results. + */ +boolean useTrialPolicy(Node n, Connection c, List contents) +throws Exception +{ + Path trialPolicy = + createTempFile(n.data_dir().getParent(), "trial", "policy"); + + write(trialPolicy, contents); + + PreparedStatement setVmOpts = c.prepareStatement( + "SELECT null::pg_catalog.void" + + " FROM pg_catalog.set_config('pljava.vmoptions', ?, false)" + ); + + setVmOpts.setString(1, vmopts + + " -Dorg.postgresql.pljava.policy.trial=" + trialPolicy.toUri()); + + return stateMachine( + "change pljava.vmoptions", + null, + + q(setVmOpts, setVmOpts::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); +} + +try ( + AutoCloseable t1 = n1.initialized_cluster(tweaks); + AutoCloseable t2 = n1.started_server(serverOptions, tweaks); +) +{ + int pgMajorVersion; + + try ( Connection c = n1.connect() ) + { + pgMajorVersion = c.getMetaData().getDatabaseMajorVersion(); + + succeeding = true; // become optimistic, will be using &= below + + succeeding &= stateMachine( + "create extension no result", + null, + + q(c, "CREATE EXTENSION pljava") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // state 1: consume any diagnostics, or to state 2 with same item + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + NOTHING_OR_PGJDBC_ZERO_COUNT, // state 2 + + // state 3: must be end of input + (o,p,q) -> null == o + ); + } + + /* + * Get a new connection; 'create extension' always sets a near-silent + * logging level, and PL/Java only checks once at VM start time, so in + * the same session where 'create extension' was done, logging is + * somewhat suppressed. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "saxon path examples path", + null, + + Node.installSaxonAndExamplesAndPath(c, + System.getProperty("mavenRepo"), + System.getProperty("saxonVer"), + true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + // states 1,2: diagnostics* then a void result set (saxon install) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + + // states 3,4: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, + + // states 5,6: diagnostics* then void result set (example install) + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, + + // states 7,8: diagnostics* then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, + + // state 9: must be end of input + (o,p,q) -> null == o + ); + + /* + * Exercise TrialPolicy some. Need another connection to change + * vmoptions. Uses some example functions, so insert here before the + * test of undeploying the examples. + */ + try ( Connection c2 = n1.connect() ) + { + succeeding &= useTrialPolicy(n1, c2, List.of( + "grant {", + " permission", + " org.postgresql.pljava.policy.TrialPolicy$Permission;", + "};" + )); + + PreparedStatement tryForbiddenRead = c2.prepareStatement( + "SELECT" + + " CASE WHEN javatest.java_getsystemproperty('java.home')" + + " OPERATOR(pg_catalog.=) ?" + + " THEN javatest.logmessage('INFO', 'trial policy test ok')" + + " ELSE javatest.logmessage('WARNING', 'trial policy test ng')" + + " END" + ); + + tryForbiddenRead.setString(1, javaHome); + + succeeding &= stateMachine( + "try to read a forbidden property", + null, + + q(tryForbiddenRead, tryForbiddenRead::execute) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 + } + + /* + * Spin up an http server with a little jar file to serve, and test + * that install_jar works with an http: url. + * + * First make a little jar empty but for a deployment descriptor. + */ + String ddrName = "foo.ddr"; + Attributes a = new Attributes(); + a.putValue("SQLJDeploymentDescriptor", "TRUE"); + Manifest m = new Manifest(); + m.getEntries().put(ddrName, a); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + JarOutputStream jos = new JarOutputStream(baos, m); + jos.putNextEntry(new ZipEntry(ddrName)); + jos.write( + ( + "SQLActions[]={\n\"BEGIN INSTALL\n" + + "SELECT javatest.logmessage('INFO'," + + " 'jar installed from http');\n" + + "END INSTALL\",\n\"BEGIN REMOVE\n" + + "BEGIN dummy\n" + + "END dummy;\n" + + "END REMOVE\"\n}\n" + ).getBytes(UTF_8) + ); + jos.closeEntry(); + jos.close(); + byte[] jar = baos.toByteArray(); + + /* + * Now an http server. + */ + HttpServer hs = + HttpServer.create(new InetSocketAddress("localhost", 0), 0); + + try ( + Connection c2 = n1.connect(); + AutoCloseable t = ((Supplier)() -> + { + hs.start(); + return () -> hs.stop(0); + } + ).get() + ) + { + InetSocketAddress addr = hs.getAddress(); + + String id = "bar", pw = "baz"; + + URL u = new URI( + "http", id+':'+pw, addr.getHostString(), addr.getPort(), + "/foo.jar", null, null + ).toURL(); + + HttpContext hc = hs.createContext( + u.getPath(), + new HttpHandler() + { + @Override + public void handle(HttpExchange t) throws IOException + { + try ( InputStream is = t.getRequestBody() ) { + is.readAllBytes(); + } + t.getResponseHeaders().add( + "Content-Type", "application/java-archive"); + t.sendResponseHeaders(200, jar.length); + try ( OutputStream os = t.getResponseBody() ) { + os.write(jar); + } + } + } + ); + + hc.setAuthenticator( + new BasicAuthenticator("CI realm") + // ("CI realm", UTF_8) only available in Java 14 or later + { + @Override + public boolean checkCredentials(String c_id, String c_pw) + { + return id.equals(c_id) && pw.equals(c_pw); + } + } + ); + + succeeding &= useTrialPolicy(n1, c2, List.of( + "grant codebase \"${org.postgresql.pljava.codesource}\" {", + " permission", + " java.net.URLPermission \"http:*\", \"GET:Accept\";", + "};" + )); + + succeeding &= stateMachine( + "install a jar over http", + null, + + Node.installJar(c2, u.toString(), "foo", true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + // done with connection c2 again, and the http server + } + + /* + * Also confirm that the generated undeploy actions work. + */ + succeeding &= stateMachine( + "remove jar void result", + null, + + q(c, "SELECT sqlj.remove_jar('examples', true)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + + /* + * Get another new connection and make sure the extension can be + * loaded in a non-superuser session. + */ + try ( Connection c2 = n1.connect() ) + { + succeeding &= stateMachine( + "become non-superuser", + null, + + q(c2, + "CREATE ROLE alice;" + + "GRANT USAGE ON SCHEMA sqlj TO alice;" + + "SET SESSION AUTHORIZATION alice") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + + succeeding &= stateMachine( + "load as non-superuser", + null, + + q(c2, "SELECT null::pg_catalog.void" + + " FROM sqlj.get_classpath('public')") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + (o,p,q) -> null == o + ); + // done with connection c2 again + } + + /* + * Make sure the extension drops cleanly and nothing + * is left in sqlj. + */ + succeeding &= stateMachine( + "drop extension and schema no result", + null, + + q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + } + + /* + * Get another new connection and confirm that the old, pre-extension, + * LOAD method of installing PL/Java works. It is largely obsolete in + * the era of extensions, but still covers the use case of installing + * PL/Java without admin access on the server filesystem to where + * CREATE EXTENSION requires the files to be; they can still be + * installed in some other writable location the server can read, and + * pljava.module_path set to the right locations of the jars, and the + * correct shared-object path given to LOAD. + * + * Also test the after-the-fact packaging up with CREATE EXTENSION + * FROM unpackaged. That officially goes away in PG 13, where the + * equivalent sequence + * CREATE EXTENSION pljava VERSION unpackaged + * \c + * ALTER EXTENSION pljava UPDATE + * should be tested instead. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "load as non-extension", + null, + + Node.loadPLJava(c) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + + if ( 13 <= pgMajorVersion ) + { + succeeding &= stateMachine( + "create unpackaged (PG >= 13)", + null, + + q(c, "CREATE EXTENSION pljava VERSION unpackaged") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + } + } + + /* + * CREATE EXTENSION FROM unpackaged (or the second half of the + * PG >= 13 CREATE EXTENSION VERSION unpackaged;ALTER EXTENSION UPDATE + * sequence) has to happen over a new connection. + */ + try ( Connection c = n1.connect() ) + { + succeeding &= stateMachine( + "package after loading", + null, + + q(c, 13 > pgMajorVersion + ? "CREATE EXTENSION pljava FROM unpackaged" + : "ALTER EXTENSION pljava UPDATE") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + + /* + * Again make sure extension drops cleanly with nothing left behind. + */ + succeeding &= stateMachine( + "drop extension and schema no result", + null, + + q(c, "DROP EXTENSION pljava;DROP SCHEMA sqlj") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + NOTHING_OR_PGJDBC_ZERO_COUNT, + NOTHING_OR_PGJDBC_ZERO_COUNT, + (o,p,q) -> null == o + ); + } +} catch ( Throwable t ) +{ + succeeding = false; + throw t; +} + +System.out.println(results); +succeeding &= (0 == results.get("ng")); +System.exit(succeeding ? 0 : 1); diff --git a/Makefile b/Makefile index 271eefdf..7d03e0ef 100755 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ JAVA_HOME := $(PLJAVA_HOME) PLJAVADATA = $(DESTDIR)$(datadir)/pljava PLJAVALIB = $(DESTDIR)$(pkglibdir)/java PLJAVAEXT = $(DESTDIR)$(datadir)/extension +PLJAVASYSCONF = $(shell pg_config --sysconfdir) REGRESS_OPTS = --dbname=pljava_test --create-role=pljava_test REGRESS = pljava_ext_init pljava_functions pljava_test pljava_ext_cleanup pljava_init pljava_functions pljava_test pljava_uninstall @@ -41,15 +42,17 @@ REGRESS_DIR = $(top_builddir) .PHONY: build installdirs install uninstall test localconfig targetconfig installcheck targetcheck release install-nar-snapshot: - curl -o nar-maven-plugin.tar.gz https://codeload.github.com/maven-nar/nar-maven-plugin/tar.gz/refs/tags/nar-maven-plugin-3.5.2 + rm -rf nar-maven-plugin nar-maven-plugin-nar-maven-plugin-3.5.2 nar-maven-plugin.tar.gz + curl -fsSL -o nar-maven-plugin.tar.gz https://codeload.github.com/maven-nar/nar-maven-plugin/tar.gz/refs/tags/nar-maven-plugin-3.5.2 tar xzf nar-maven-plugin.tar.gz mv nar-maven-plugin-nar-maven-plugin-3.5.2 nar-maven-plugin (cd nar-maven-plugin ; mvn) build: install-nar-snapshot mvn clean install - find $(PROJDIR)/pljava-so/target/nar/ -name "libpljava-so-$(PLJAVA_OSS_VERSION).so" -exec cp {} $(PROJDIR)/$(MODULE_big).so \; + cp $(PROJDIR)/pljava-so/target/pljava-pgxs/libpljava-so-$(PLJAVA_OSS_VERSION).so $(PROJDIR)/$(MODULE_big).so cp $(PROJDIR)/pljava/target/pljava-$(PLJAVA_OSS_VERSION).jar $(PROJDIR)/target/pljava.jar + cp $(PROJDIR)/pljava-api/target/pljava-api-$(PLJAVA_OSS_VERSION).jar $(PROJDIR)/target/pljava-api.jar cp $(PROJDIR)/pljava-examples/target/pljava-examples-$(PLJAVA_OSS_VERSION).jar $(PROJDIR)/target/examples.jar installdirs: @@ -57,19 +60,26 @@ installdirs: $(MKDIR_P) '$(PLJAVADATA)' $(MKDIR_P) '$(PLJAVADATA)/docs' $(MKDIR_P) '$(PLJAVAEXT)' + $(MKDIR_P) '$(PLJAVASYSCONF)' install: installdirs install-lib + $(INSTALL_PROGRAM) '$(PROJDIR)/pljava-so/target/pljava-pgxs/libpljava-so-$(PLJAVA_OSS_VERSION).so' '$(pkglibdir)/libpljava-so-$(PLJAVA_OSS_VERSION).so' + $(INSTALL_DATA) '$(PROJDIR)/pljava/target/pljava-$(PLJAVA_OSS_VERSION).jar' '$(PLJAVADATA)/pljava-$(PLJAVA_OSS_VERSION).jar' + $(INSTALL_DATA) '$(PROJDIR)/pljava-api/target/pljava-api-$(PLJAVA_OSS_VERSION).jar' '$(PLJAVADATA)/pljava-api-$(PLJAVA_OSS_VERSION).jar' + $(INSTALL_DATA) '$(PROJDIR)/pljava-examples/target/pljava-examples-$(PLJAVA_OSS_VERSION).jar' '$(PLJAVADATA)/pljava-examples-$(PLJAVA_OSS_VERSION).jar' $(INSTALL_DATA) '$(PROJDIR)/pljava/target/pljava-$(PLJAVA_OSS_VERSION).jar' '$(PLJAVALIB)/pljava.jar' + $(INSTALL_DATA) '$(PROJDIR)/pljava-api/target/pljava-api-$(PLJAVA_OSS_VERSION).jar' '$(PLJAVALIB)/pljava-api.jar' $(INSTALL_DATA) '$(PROJDIR)/pljava-examples/target/pljava-examples-$(PLJAVA_OSS_VERSION).jar' '$(PLJAVALIB)/examples.jar' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/install.sql' '$(PLJAVADATA)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/uninstall.sql' '$(PLJAVADATA)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/install_pljavat.sql' '$(PLJAVADATA)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/uninstall_pljavat.sql' '$(PLJAVADATA)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/examples.sql' '$(PLJAVADATA)' - $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/pljava--1.5.0.sql' '$(PLJAVAEXT)' + $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/pljava--$(PLJAVA_OSS_VERSION).sql' '$(PLJAVADATA)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/pljava.control' '$(PLJAVAEXT)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/pljavat--1.5.0.sql' '$(PLJAVAEXT)' $(INSTALL_DATA) '$(PROJDIR)/gpdb/installation/pljavat.control' '$(PLJAVAEXT)' + $(INSTALL_DATA) '$(PROJDIR)/pljava-packaging/target/classes/pljava.policy' '$(PLJAVASYSCONF)/pljava.policy' find $(PROJDIR)/docs -name "*.html" -exec $(INSTALL_DATA) {} '$(PLJAVADATA)/docs' \; uninstall: uninstall-lib @@ -81,7 +91,7 @@ test: echo 'host all pljava_test 0.0.0.0/0 trust # PLJAVA' >> $(MASTER_DATA_DIRECTORY)/pg_hba.conf echo 'local all pljava_test trust # PLJAVA' >> $(MASTER_DATA_DIRECTORY)/pg_hba.conf gpstop -u - cd $(PROJDIR)/gpdb/tests && $(REGRESS_DIR)/src/test/regress/pg_regress --bindir=$(bindir) $(REGRESS_OPTS) $(REGRESS) + cd $(PROJDIR)/gpdb/tests && $(REGRESS_DIR)/src/test/regress/pg_regress --bindir=$(bindir) $(REGRESS_OPTS) --prehook=pljava_examples $(REGRESS) localconfig: gpconfig -c pljava_classpath -v $(PROJDIR)/target/ diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 00000000..acdea607 --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,83 @@ +# These only_commits and branches settings ought to pretty much suppress +# Appveyor, whose runs have all been failing lately because of Maven repository +# connection resets that don't seem reproducible locally. This can be revisited +# later to see if things might be working again. +only_commits: + message: /appveyor/ +branches: + only: + - appveyor +image: Visual Studio 2019 +environment: + APPVEYOR_RDP_PASSWORD: MrRobot@2020 + VCVARSALL: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat + matrix: + - SYS: MINGW + JDK: 11 + PG: pacman + - SYS: MINGW + JDK: 17 + PG: pacman + - SYS: MINGW + JDK: 19 + PG: pacman + - SYS: MINGW + JDK: 21 + PG: pacman + - SYS: MSVC + JDK: 21 + PG: 15 + - SYS: MSVC + JDK: 21 + PG: 14 + - SYS: MSVC + JDK: 21 + PG: 13 + - SYS: MSVC + JDK: 21 + PG: 12 + - SYS: MSVC + JDK: 11 + PG: 9.6 +before_build: + - ps: .appveyor/appveyor_download_java.ps1 + - set JAVA_HOME=%ProgramFiles%\Java\jdk%JDK% + - path %JAVA_HOME%\bin;%PATH% + - '"%VCVARSALL%" x86' + - '"%VCVARSALL%" amd64' + - ps: $Env:JAVA_HOME = "C:\Program Files\Java\jdk$Env:JDK" + - ps: $Env:Path = "$Env:JAVA_HOME\bin;" + $Env:Path +build_script: + - .appveyor\appveyor_mingw_or_msvc.bat +# - ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1')) +test_script: + - ps: | + $pgConfig = $Env:pgConfig + + $packageJar = ('pljava-packaging' | + Get-ChildItem -Recurse -Filter pljava-pg*.jar + ).FullName + + $mavenRepo = "$env:UserProfile\.m2\repository" + + $saxonVer = (Join-Path $mavenRepo "net\sf\saxon\Saxon-HE" | + Get-ChildItem -Recurse -Filter Saxon-HE-*.jar | + Select-Object -Last 1 + ).Directory.Name + + $jdbcJar = (Join-Path $mavenRepo "org\postgresql\postgresql" | + Get-ChildItem -Recurse -Filter postgresql-*.jar | + Select-Object -Last 1 + ).FullName + + jshell ` + -execution local ` + "-J--class-path=$packageJar;$jdbcJar" ` + "--class-path=$packageJar" ` + "-J--add-modules=java.sql.rowset,jdk.httpserver" ` + "-J-Dpgconfig=$pgConfig" ` + "-J-DmavenRepo=$mavenRepo" ` + "-J-DsaxonVer=$saxonVer" ` + "-J-DredirectError=true" ` + "-J-DextractFiles=true" ` + CI\integration diff --git a/concourse/docker/ubuntu22.04/README.md b/concourse/docker/ubuntu22.04/README.md new file mode 100644 index 00000000..8722ba88 --- /dev/null +++ b/concourse/docker/ubuntu22.04/README.md @@ -0,0 +1,34 @@ +# Cloudberry PL/Java Docker (Ubuntu 22.04 / pxf-style) + +This directory uses the upstream image `apache/incubator-cloudberry:cbdb-build-ubuntu22.04-latest`: + +- **Sources are prepared externally** (CI checkout / local sibling directory mount), no `git clone` inside the container +- The container runs scripts to **build Cloudberry from source and create a demo cluster (with standby)** +- Then builds PL/Java and runs the built-in regression tests (`cbdb/tests`) + +## Directory layout + +``` + +/ + cloudberry/ + pljava/ + +``` + +## Run locally + +From `/pljava`: + +```sh +docker compose -f concourse/docker/ubuntu22.04/docker-compose.yml down -v || true +docker compose -f concourse/docker/ubuntu22.04/docker-compose.yml up -d + +docker exec cbdb-pljava bash -lc "bash /home/gpadmin/workspace/pljava/concourse/docker/ubuntu22.04/scripts/entrypoint.sh" +``` + +Run PL/Java regression only (assumes Cloudberry + cluster + PL/Java are ready): + +```sh +docker exec cbdb-pljava bash -lc "source /home/gpadmin/workspace/pljava/concourse/docker/ubuntu22.04/scripts/entrypoint.sh && run_pljava_test_only" +``` diff --git a/concourse/docker/ubuntu22.04/docker-compose.yml b/concourse/docker/ubuntu22.04/docker-compose.yml new file mode 100644 index 00000000..06c5172d --- /dev/null +++ b/concourse/docker/ubuntu22.04/docker-compose.yml @@ -0,0 +1,15 @@ +services: + cbdb-pljava: + platform: linux/amd64 + image: apache/incubator-cloudberry:cbdb-build-ubuntu22.04-latest + container_name: cbdb-pljava + hostname: cdw + tty: true + stdin_open: true + volumes: + - ../../..:/home/gpadmin/workspace/pljava:rw + - ../../../../cloudberry:/home/gpadmin/workspace/cloudberry:rw + ports: + - "15432:7000" + - "12222:22" + command: ["tail", "-f", "/dev/null"] diff --git a/concourse/docker/ubuntu22.04/scripts/build_cloudberrry.sh b/concourse/docker/ubuntu22.04/scripts/build_cloudberrry.sh new file mode 100644 index 00000000..ba9894f1 --- /dev/null +++ b/concourse/docker/ubuntu22.04/scripts/build_cloudberrry.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Ubuntu 22.04 Cloudberry build script (pxf-style). +# - assumes sources are already mounted at ~/workspace/cloudberry +# - builds + installs into /usr/local/cloudberry-db +# - creates a demo cluster (with standby) for pg_regress health checks + +log() { echo "[build_cloudberrry][$(date '+%F %T')] $*"; } +die() { log "ERROR: $*"; exit 1; } + +WORKSPACE="${WORKSPACE:-/home/gpadmin/workspace}" +CLOUDBERRY_SRC="${CLOUDBERRY_SRC:-${WORKSPACE}/cloudberry}" +INSTALL_PREFIX="${INSTALL_PREFIX:-/usr/local/cloudberry-db}" + +if [ ! -d "${CLOUDBERRY_SRC}" ]; then + die "Cloudberry source not found at ${CLOUDBERRY_SRC} (did you mount/checkout cloudberry?)" +fi + +if [ "${FORCE_CLOUDBERRY_BUILD:-}" != "true" ] && \ + [ -f "${INSTALL_PREFIX}/cloudberry-env.sh" ] && \ + [ -f "${CLOUDBERRY_SRC}/gpAux/gpdemo/gpdemo-env.sh" ]; then + log "Cloudberry already installed and demo cluster exists; skipping build (set FORCE_CLOUDBERRY_BUILD=true to rebuild)" + exit 0 +fi + +log "install base packages" +sudo apt-get update +sudo apt-get install -y sudo git locales openssh-server iproute2 \ + bison bzip2 cmake curl flex gcc g++ make pkg-config rsync wget tar \ + libapr1-dev libbz2-dev libcurl4-gnutls-dev libevent-dev libkrb5-dev libipc-run-perl \ + libldap2-dev libpam0g-dev libprotobuf-dev libreadline-dev libssl-dev libuv1-dev \ + liblz4-dev libxerces-c-dev libxml2-dev libyaml-dev libzstd-dev libperl-dev \ + protobuf-compiler python3-dev python3-pip python3-setuptools libsnappy-dev + +sudo locale-gen en_US.UTF-8 +sudo update-locale LANG=en_US.UTF-8 + +log "setup ssh keys for gpadmin" +mkdir -p /home/gpadmin/.ssh +if [ ! -f /home/gpadmin/.ssh/id_rsa ]; then + ssh-keygen -t rsa -b 2048 -C 'apache-cloudberry-dev' -f /home/gpadmin/.ssh/id_rsa -N "" +fi +cat /home/gpadmin/.ssh/id_rsa.pub >> /home/gpadmin/.ssh/authorized_keys +chmod 700 /home/gpadmin/.ssh +chmod 600 /home/gpadmin/.ssh/authorized_keys +chmod 644 /home/gpadmin/.ssh/id_rsa.pub + +log "configure resource limits" +sudo tee /etc/security/limits.d/90-db-limits.conf >/dev/null <<'EOF' +gpadmin soft core unlimited +gpadmin hard core unlimited +gpadmin soft nofile 524288 +gpadmin hard nofile 524288 +gpadmin soft nproc 131072 +gpadmin hard nproc 131072 +EOF + +log "prepare install prefix ${INSTALL_PREFIX}" +sudo rm -rf "${INSTALL_PREFIX}" +sudo mkdir -p "${INSTALL_PREFIX}" +sudo chown -R gpadmin:gpadmin "${INSTALL_PREFIX}" + +log "configure Cloudberry" +cd "${CLOUDBERRY_SRC}" +./configure --prefix="${INSTALL_PREFIX}" \ + --disable-external-fts \ + --enable-debug \ + --enable-cassert \ + --enable-debug-extensions \ + --enable-gpcloud \ + --enable-ic-proxy \ + --enable-mapreduce \ + --enable-orafce \ + --enable-orca \ + --disable-pax \ + --enable-pxf \ + --enable-tap-tests \ + --with-gssapi \ + --with-ldap \ + --with-libxml \ + --with-lz4 \ + --with-pam \ + --with-perl \ + --with-pgport=5432 \ + --with-python \ + --with-pythonsrc-ext \ + --with-ssl=openssl \ + --with-uuid=e2fs \ + --with-includes=/usr/include/xercesc + +log "build + install Cloudberry" +make -j"$(nproc)" -C "${CLOUDBERRY_SRC}" +make -j"$(nproc)" -C "${CLOUDBERRY_SRC}/contrib" +make install -C "${CLOUDBERRY_SRC}" +make install -C "${CLOUDBERRY_SRC}/contrib" + +log "create demo cluster" +# shellcheck disable=SC1091 +source "${INSTALL_PREFIX}/cloudberry-env.sh" +make create-demo-cluster -C "${CLOUDBERRY_SRC}" +# shellcheck disable=SC1091 +source "${CLOUDBERRY_SRC}/gpAux/gpdemo/gpdemo-env.sh" + +psql -P pager=off template1 -c "select version()" +psql -P pager=off template1 -c "select * from gp_segment_configuration order by dbid" + +log "Cloudberry demo cluster ready (PGPORT=${PGPORT:-})" + diff --git a/concourse/docker/ubuntu22.04/scripts/entrypoint.sh b/concourse/docker/ubuntu22.04/scripts/entrypoint.sh new file mode 100644 index 00000000..d1c70a8b --- /dev/null +++ b/concourse/docker/ubuntu22.04/scripts/entrypoint.sh @@ -0,0 +1,197 @@ +#!/usr/bin/env bash +set -euo pipefail + +log() { echo "[entrypoint][$(date '+%F %T')] $*"; } +die() { log "ERROR: $*"; exit 1; } + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +WORKSPACE="${WORKSPACE:-/home/gpadmin/workspace}" +PLJAVA_SRC="${PLJAVA_SRC:-${WORKSPACE}/pljava}" +CLOUDBERRY_SRC="${CLOUDBERRY_SRC:-${WORKSPACE}/cloudberry}" +INSTALL_PREFIX="${INSTALL_PREFIX:-/usr/local/cloudberry-db}" + +export LANG="${LANG:-en_US.UTF-8}" +export LC_ALL="${LC_ALL:-en_US.UTF-8}" + +setup_ssh() { + log "start sshd" + sudo mkdir -p /var/run/sshd + sudo ssh-keygen -A >/dev/null 2>&1 || true + sudo /usr/sbin/sshd || true + mkdir -p /home/gpadmin/.ssh + touch /home/gpadmin/.ssh/known_hosts + ssh-keyscan -t rsa "$(hostname)" 2>/dev/null >> /home/gpadmin/.ssh/known_hosts || true + chmod 600 /home/gpadmin/.ssh/known_hosts || true +} + +build_cloudberry() { + bash "${SCRIPT_DIR}/build_cloudberrry.sh" +} + +source_cbdb_env() { + if [ -f "${INSTALL_PREFIX}/cloudberry-env.sh" ]; then + # shellcheck disable=SC1091 + source "${INSTALL_PREFIX}/cloudberry-env.sh" + elif [ -f "${INSTALL_PREFIX}/greenplum_path.sh" ]; then + # shellcheck disable=SC1091 + source "${INSTALL_PREFIX}/greenplum_path.sh" + fi + + if [ -f "${CLOUDBERRY_SRC}/gpAux/gpdemo/gpdemo-env.sh" ]; then + # shellcheck disable=SC1091 + source "${CLOUDBERRY_SRC}/gpAux/gpdemo/gpdemo-env.sh" + fi +} + +wait_for_cbdb() { + log "wait for Cloudberry to accept connections (PGPORT=${PGPORT:-unset})" + for _ in $(seq 1 120); do + if psql -d template1 -v ON_ERROR_STOP=1 -c "select 1" >/dev/null 2>&1; then + return 0 + fi + sleep 5 + done + die "Cloudberry did not become ready in time" +} + +install_pljava_build_deps() { + log "install PL/Java build deps" + sudo apt-get update + sudo apt-get install -y curl wget tar gcc g++ make libkrb5-dev openssl libssl-dev + + # PL/Java 1.6.x requires Java 9+, use the Cloudberry-aligned JDK 11 LTS. + sudo apt-get install -y openjdk-11-jdk +} + +setup_java() { + local java_major="${JAVA_MAJOR:-11}" + local java_home="" + for candidate in /usr/lib/jvm/java-${java_major}-openjdk-*; do + if [ -d "${candidate}" ]; then + java_home="${candidate}" + break + fi + done + if [ -z "${java_home}" ]; then + die "could not find Java ${java_major} under /usr/lib/jvm (is openjdk-${java_major}-jdk installed?)" + fi + + export JAVA_HOME="${JAVA_HOME:-${java_home}}" + export PATH="${JAVA_HOME}/bin:${PATH}" + java -version +} + +setup_maven() { + local maven_version="3.9.6" + if [ ! -x /usr/local/apache-maven/bin/mvn ]; then + wget -nv "https://archive.apache.org/dist/maven/maven-3/${maven_version}/binaries/apache-maven-${maven_version}-bin.tar.gz" \ + -O "/tmp/apache-maven-${maven_version}-bin.tar.gz" + tar xzf "/tmp/apache-maven-${maven_version}-bin.tar.gz" -C /tmp + sudo rm -rf /usr/local/apache-maven + sudo mv "/tmp/apache-maven-${maven_version}" /usr/local/apache-maven + fi + export PATH="/usr/local/apache-maven/bin:${PATH}" + + # Configure a Maven mirror/proxy to avoid network flakiness when reaching + # Maven Central from some environments. + local mirror_url="${MAVEN_MIRROR_URL:-https://maven.aliyun.com/repository/public}" + mkdir -p /home/gpadmin/.m2 + cat > /home/gpadmin/.m2/settings.xml < + + + ${MAVEN_MIRROR_ID:-mirror} + central + ${mirror_url} + + + +EOF + log "configured Maven mirror: ${mirror_url}" + + mvn -version +} + +configure_pljava_runtime() { + # Point Cloudberry at libjvm.so for runtime. + local libjvm + libjvm="$(find "${JAVA_HOME}" -type f -name libjvm.so -path '*server*' | head -n 1 || true)" + [ -n "${libjvm}" ] || die "Could not locate libjvm.so under JAVA_HOME=${JAVA_HOME}" + + local jvm_server_dir jvm_lib_dir jvm_jli_dir + jvm_server_dir="$(dirname "${libjvm}")" + jvm_lib_dir="$(dirname "${jvm_server_dir}")" + jvm_jli_dir="${jvm_lib_dir}/jli" + export LD_LIBRARY_PATH="${jvm_server_dir}:${jvm_lib_dir}:${jvm_jli_dir}${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" + + log "configure PL/Java runtime GUCs" + gpconfig --skipvalidation -c pljava.libjvm_location -v "'${libjvm}'" + local module_path="${PLJAVA_SRC}/target/pljava.jar:${PLJAVA_SRC}/target/pljava-api.jar" + if [ -f "${PLJAVA_SRC}/target/examples.jar" ]; then + module_path="${module_path}:${PLJAVA_SRC}/target/examples.jar" + fi + gpconfig --skipvalidation -c pljava.module_path -v "'${module_path}'" + + gpstop -arf +} + +test_pljava() { + log "run PL/Java built-in regression tests (make installcheck)" + cd "${PLJAVA_SRC}" + mkdir -p "${PLJAVA_SRC}/gpdb/tests/results" + local installcheck_log="${PLJAVA_SRC}/gpdb/tests/results/installcheck.log" + log "installcheck log: ${installcheck_log}" + make installcheck REGRESS_DIR="${CLOUDBERRY_SRC}" 2>&1 | tee "${installcheck_log}" +} + +build_pljava() { + log "build + install PL/Java" + cd "${PLJAVA_SRC}" + make clean + make + make install +} + +build_and_test_pljava() { + build_pljava + configure_pljava_runtime + test_pljava +} + +run_pljava_test_only() { + source_cbdb_env + wait_for_cbdb + install_pljava_build_deps + setup_java + setup_maven + configure_pljava_runtime + test_pljava +} + +run_pljava_build_only() { + source_cbdb_env + wait_for_cbdb + install_pljava_build_deps + setup_java + setup_maven + build_pljava +} + +main() { + setup_ssh + build_cloudberry + source_cbdb_env + wait_for_cbdb + install_pljava_build_deps + setup_java + setup_maven + build_and_test_pljava + log "done" +} + +if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + main "$@" +fi diff --git a/gpdb/installation/pljava--1.6.10.sql b/gpdb/installation/pljava--1.6.10.sql new file mode 100644 index 00000000..9d3daff1 --- /dev/null +++ b/gpdb/installation/pljava--1.6.10.sql @@ -0,0 +1,101 @@ +\echo Use "CREATE EXTENSION pljava" to load this file. \quit + +/* + Note: most of the work of setting up PL/Java is done within PL/Java itself, + touched off by the LOAD command, making possible a decent installation + experience even on pre-9.1, pre-extension PostgreSQL versions. This script + simply wraps that. + + However, in this case, the native library has no easy way to find the + pathname it has just been loaded from (it looks for the path given to its + LOAD command, but finds the CREATE EXTENSION command instead). So, temporarily + save the path in a table. + + The table's existence also helps PL/Java distinguish the case where it is + being loaded as an extension itself (via this script), and the case where + it is simply being awakened during the creation of some other extension + (CREATE EXTENSION foo where foo is something implemented using PL/Java). + + The name of the table resembles an error message, because the user will see it + in the case that no installation happened because PL/Java was already loaded, + detected below by a table collision. + */ + +DROP TABLE IF EXISTS +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"; +CREATE TABLE +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session" +(path, exnihilo) AS +SELECT CAST('libpljava-so-1.6.10' AS text), true; +LOAD 'libpljava-so-1.6.10'; +GRANT USAGE ON LANGUAGE java TO public; +GRANT USAGE ON SCHEMA sqlj TO public; +GRANT SELECT ON ALL TABLES IN SCHEMA sqlj TO public; + +DO $pljava$ +BEGIN + PERFORM pg_catalog.pg_stat_file( + '/home/gpadmin/workspace/pljava/target/examples.jar' + ); + + IF EXISTS ( + SELECT 1 + FROM sqlj.jar_repository + WHERE jarName = 'examples' + ) THEN + PERFORM sqlj.replace_jar( + 'file:///home/gpadmin/workspace/pljava/target/examples.jar', + 'examples', + false + ); + ELSE + PERFORM sqlj.install_jar( + 'file:///home/gpadmin/workspace/pljava/target/examples.jar', + 'examples', + false + ); + END IF; + + PERFORM sqlj.set_classpath('public', 'examples'); +EXCEPTION + WHEN undefined_file THEN + NULL; +END +$pljava$; + +/* + Ok, the LOAD succeeded, so everything happened ... unless ... the same + PL/Java library had already been loaded earlier in this same session. + That would be an unusual case, but confusing if it happened, because + PostgreSQL turns LOAD into a (successful) no-op in that case, meaning + CREATE EXTENSION might appear to succeed without really completing. + To fail fast in that case, expect that the LOAD actions should have + dropped the 'loadpath' table already, and just re-create and re-drop it here, + to incur a (cryptic, but dependable) error if it is still around because the + work didn't happen. The error message will include the table name, which is + why the table name is phrased as an error message. + + The solution to a problem detected here is simply to close the session, + and be sure to execute 'CREATE EXTENSION pljava' in a new session (new + at least in the sense that Java hasn't been used in it yet). + */ + +CREATE TABLE +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"(); +DROP TABLE +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"; + +/* + All of these tables in sqlj are created empty by PL/Java itself, and + the contents are things later loaded by the user, so configure them to + be dumped. XXX Future work: loaded jars could be extensions themselves, + so these tables should be extended to record when that's the case, and the + config_dump calls should have WHERE clauses to avoid dumping rows that + would be supplied naturally by recreating those extensions. + */ + +SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_repository', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_entry', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_descriptor', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.classpath_entry', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.typemap_entry', ''); diff --git a/gpdb/installation/pljava.control b/gpdb/installation/pljava.control index b078793c..1dbe59d7 100644 --- a/gpdb/installation/pljava.control +++ b/gpdb/installation/pljava.control @@ -1,6 +1,5 @@ -# pljava extension comment = 'PL/Java procedural language (https://tada.github.io/pljava/)' -default_version = '1.5.0' -module_pathname = '$libdir/pljava' +default_version = '1.6.10' encoding = UTF8 -relocatable = true +directory = 'pljava' +schema = sqlj diff --git a/gpdb/tests/expected/pljava_test_optimizer.out b/gpdb/tests/expected/pljava_test_optimizer.out index 78d04c46..ba505cd6 100644 --- a/gpdb/tests/expected/pljava_test_optimizer.out +++ b/gpdb/tests/expected/pljava_test_optimizer.out @@ -649,8 +649,8 @@ SELECT id,name, salary FROM employees2 order by id; -- s/\(SOMEFILE\:SOMEFUNC\)// -- end_matchsubs SELECT javatest.transferPeople(1) FROM javatest.test; -- should error -ERROR: query plan with multiple segworker groups is not supported -HINT: likely caused by a function that reads or modifies data in a distributed table +ERROR: function cannot execute on a QE slice because it accesses relation "javatest.employees1" +CONTEXT: SQL statement "SELECT id, name, salary FROM employees1 WHERE salary > $1" select javatest.maxFromSetReturnExample(2,10); maxfromsetreturnexample ------------------------- diff --git a/gpdb/tests/pljava_examples b/gpdb/tests/pljava_examples new file mode 100644 index 00000000..e69de29b diff --git a/gpdb/tests/sql/hooks/pljava_examples.sql b/gpdb/tests/sql/hooks/pljava_examples.sql new file mode 100644 index 00000000..cfb494dd --- /dev/null +++ b/gpdb/tests/sql/hooks/pljava_examples.sql @@ -0,0 +1,35 @@ +-- start_ignore +-- +-- HOOK NAME: pljava_examples +-- HOOK TYPE: prehook +-- HOOK DESCRIPTION: +-- Install the PL/Java examples jar and set the public classpath when sqlj +-- exists (after CREATE EXTENSION pljava). +-- +-- end_ignore + +DO $pljava$ +BEGIN + IF EXISTS (SELECT 1 FROM pg_namespace WHERE nspname = 'sqlj') THEN + IF EXISTS ( + SELECT 1 + FROM sqlj.jar_repository + WHERE jarName = 'examples' + ) THEN + PERFORM sqlj.replace_jar( + 'file:///home/gpadmin/workspace/pljava/target/examples.jar', + 'examples', + false + ); + ELSE + PERFORM sqlj.install_jar( + 'file:///home/gpadmin/workspace/pljava/target/examples.jar', + 'examples', + false + ); + END IF; + + PERFORM sqlj.set_classpath('public', 'examples'); + END IF; +END +$pljava$; diff --git a/pljava-ant/pom.xml b/pljava-ant/pom.xml index 2dfc8f72..33b3bda5 100644 --- a/pljava-ant/pom.xml +++ b/pljava-ant/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.5.0 + 1.6.10 pljava-ant PL/Java Ant tasks diff --git a/pljava-api/pom.xml b/pljava-api/pom.xml index 06c6f781..effcf7a8 100644 --- a/pljava-api/pom.xml +++ b/pljava-api/pom.xml @@ -4,37 +4,14 @@ org.postgresql pljava.app - 1.5.0 + 1.6.10 pljava-api PL/Java API The API for Java stored procedures in PostgreSQL using PL/Java + - - org.apache.maven.plugins - maven-jar-plugin - - - - - org/postgresql/pljava/ - - - ${project.name} - - - ${project.version} - - - ${project.organization.name} - - - - - - - maven-resources-plugin 2.5 @@ -59,4 +36,143 @@ + + + + org.postgresql + pljava-pgxs + ${pljava.pgxs.version} + + + + scripted-report + + + + + + + + + diff --git a/pljava-api/src/main/java/module-info.java b/pljava-api/src/main/java/module-info.java new file mode 100644 index 00000000..fbfcb8bd --- /dev/null +++ b/pljava-api/src/main/java/module-info.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +/** + * Defines the API for PL/Java. + */ +@SuppressWarnings("module") // don't warn that o.p.p.internal's not visible yet +module org.postgresql.pljava +{ + requires java.base; + requires transitive java.sql; + requires transitive java.compiler; + + exports org.postgresql.pljava; + exports org.postgresql.pljava.annotation; + exports org.postgresql.pljava.sqlgen; + + exports org.postgresql.pljava.annotation.processing + to org.postgresql.pljava.internal; + + uses org.postgresql.pljava.Session; + + provides javax.annotation.processing.Processor + with org.postgresql.pljava.annotation.processing.DDRProcessor; +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java b/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java new file mode 100644 index 00000000..341348ca --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/Adjusting.java @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2019-2024 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.io.Reader; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.util.List; +import static java.util.Objects.requireNonNull; +import java.util.function.Consumer; +import javax.xml.stream.XMLInputFactory; // for javadoc +import javax.xml.stream.XMLResolver; // for javadoc +import javax.xml.stream.XMLStreamReader; +import javax.xml.validation.Schema; +import org.xml.sax.EntityResolver; +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +/** + * Convenience class whose member classes will provide APIs that in some way + * adjust aspects of PL/Java's behavior. + *

+ * The intention is that a single + *

+ * import org.postgresql.pljava.Adjusting;
+ *
+ * will make various adjusting API classes available with easily readable + * references like {@code Adjusting.XML.SAXSource}. + */ +public final class Adjusting +{ + private Adjusting() { } // no instances + + /** + * Class that collects adjustment APIs for affecting the behavior of + * PL/Java's XML support. + *

XML parser behavior adjustments

+ *

+ * Retrieving or verifying the XML content in a JDBC {@code SQLXML} object + * can involve applying an XML parser. The full XML specification includes + * features that can require an XML parser to retrieve external resources or + * consume unexpected amounts of memory. The full feature support may be an + * asset in an environment where the XML content will always be from a + * known, trusted source, or a liability if less is known about the XML + * content being processed. + *

+ * The Open Web Application Security Project (OWASP) advocates for the + * default use of settings that strictly limit the related features of Java + * XML parsers, as outlined in a + * "cheat sheet" the organization publishes. The strict default settings + * can then be selectively relaxed in applications where the features are + * needed and the content is sufficiently trusted. + *

+ * However, the recommended defaults really are severely restrictive (for + * example, disabling document-type declarations by default will cause + * PL/Java's {@code SQLXML} implementation to reject all XML values that + * contain DTDs). Therefore, there must be a simple and clear way for code + * to selectively adjust the settings, or adopting the strictest settings by + * default would pose an unacceptable burden to developers. + *

+ * The usual way that Java XML parsers expose their settings for adjustment + * is through {@code setFeature} or {@code setProperty} methods that must be + * passed particular URIs that identify adjustable features, and objects of + * appropriate types (often boolean) as the values for those properties. The + * supported properties and the URIs that identify them can be different + * from one parser implementation to another or one version to another. That + * is not the "simple and clear" adjustment mechanism needed here. + * Furthermore, the JDBC {@code SQLXML} API conceals much of the complexity + * of configuring any underlying XML parser behind a simple + * {@code getSource} method whose result can be used directly with other + * Java APIs expecting some flavor of {@code Source} object, and for some of + * those flavors, the returned object does not even expose the methods one + * would need to call to adjust the underlying parser, if any. + *

+ * Hence this adjustment API. JDBC already provides for extensibility of the + * {@code SQLXML.getSource} method; it is passed the class object for a + * desired subtype of {@code Source} and, if the implementation supports it, + * returns an object of that type. The subtypes that every conformant + * implementation must support are {@code StreamSource}, {@code SAXSource}, + * {@code StAXSource}, and {@code DOMSource}. If {@code null} is passed, the + * implementation will choose which flavor to return, often based on + * internal implementation details making one most natural or efficient. + *

+ * The types {@link SAXSource}, {@link StAXSource}, and {@link DOMSource} + * are used the same way, by passing the corresponding class literal to + * {@code SQLXML}'s {@code getSource} method, which will return an object + * providing the chainable adjustment methods of {@link Source}, with the + * chain ending in a {@link Source#get get} method that returns the + * corresponding Java {@code Source} object, configured as adjusted. + *

+ * Example: + *

+	 *SAXSource src1 = sqx1.getSource(SAXSource.class);
+	 *SAXSource src2 = sqx2.getSource(Adjusting.XML.SAXSource.class)
+	 *                     .allowDTD(true).get();
+	 *
+ * {@code src1} would be assigned a {@code SAXSource} object configured with + * the OWASP-recommended defaults, which will not allow the content to have + * a DTD, among other restrictions, while {@code src2} would be assigned a + * {@code SAXSource} object configured with the other default restrictions + * (as if the {@code allowDTD(true)} is preceded by an implied + * {@link Source#defaults defaults()}), but with DTD parsing enabled. + *

+ * No {@code Adjusting.XML.StreamSource} is needed or provided, as any + * application code that requests a {@code StreamSource} will have to + * provide and configure its own parser anyway. + *

+ * Like passing {@code null} to {@code getSource}, passing the parent + * interface {@code Adjusting.XML.Source.class} will allow the + * implementation to choose which subtype of {@code Adjusting.XML.Source} to + * return. The object returned by {@link Source#get get} can then be passed + * directly to Java APIs like {@code Transformer} that accept several + * flavors of {@code Source}, or examined to see of what class it is. + */ + public static final class XML + { + private XML() { } // no instances + + /** + * Attempts a given action (typically to set something) using a given + * value, trying one or more supplied keys in order until the action + * succeeds with no exception. + *

+ * This logic is common to the + * {@link Parsing#setFirstSupportedFeature setFirstSupportedFeature} + * and + * {@link Parsing#setFirstSupportedProperty setFirstSupportedProperty} + * methods, and is exposed here because it may be useful for other + * tasks in Java's XML APIs, such as configuring {@code Transformer}s. + *

+ * If any attempt succeeds, null is returned. If no attempt + * succeeds, the first exception caught is returned, with any + * exceptions from the subsequent attempts retrievable from it with + * {@link Exception#getSuppressed getSuppressed}. The return is + * immediate, without any remaining names being tried, if an exception + * is caught that is not assignable to a class in the + * expected list. Such an exception is returned (or added to + * the suppressed list of an exception already to be returned) only if + * the onUnexpected handler is null; otherwise, it is passed + * to the handler and does not affect the method's return. + *

+ * For some purposes, a single call of this method may not suffice: if + * alternate means to establish a desired configuration have existed and + * are not simply alternate property names that will accept the same + * value. For such a case, this method may be called more than once. The + * caller abandons the sequence of calls after the first call that + * returns null (indicating that it either succeeded, or incurred an + * unexpected exception and passed it to the onUnexpected + * handler. Otherwise, the exception returned by the first call can be + * passed as caught to the next call, instead of passing the + * usual null. (When a non-null caught is passed, it will be + * returned on failure, even if an unexpected exception has been caught; + * therefore, should it ever be necessary to chain more than two of + * these calls, the caller should abandon the sequence as soon as a call + * returns null or returns its caught argument with + * no growth of its suppressed list.) + * @param setter typically a method reference for a method that + * takes a string key and some value. + * @param value the value to pass to the setter + * @param expected a list of exception classes that can be foreseen + * to indicate that a key was not recognized, and the operation + * should be retried with the next possible key. + * @param caught null, or an exception returned by a preceding call if + * an operation cannot be implemented with one call of this method + * @param onUnexpected invoked, if non-null, on an {@code Exception} + * that is caught and matches nothing in the expected list, instead + * of returning it. If this parameter is null, such an exception is + * returned (or added to the suppressed list of the exception to be + * returned), just as for expected exceptions, but the return is + * immediate, without trying remaining names, if any. + * @param names one or more String keys to be tried in order until + * the action succeeds. + * @return null if any attempt succeeded, or if the first exception + * caught was passed to the onUnexpected handler; otherwise the first + * exception caught (if the caller supplied a non-null + * caught, then that exception), which may have further + * exceptions in its suppressed list. + */ + public static Exception setFirstSupported( + SetMethod setter, V value, + List> expected, + Exception caught, + Consumer onUnexpected, String... names) + { + requireNonNull(expected); + for ( String name : names ) + { + try + { + setter.set(name, value); + return null; + } + catch ( Exception e ) + { + boolean benign = + expected.stream().anyMatch(c -> c.isInstance(e)); + + if ( benign || null == onUnexpected ) + { + if ( null == caught ) + caught = e; + else + caught.addSuppressed(e); + } + else + onUnexpected.accept(e); + + if ( ! benign ) + break; + } + } + return caught; + } + + /** + * Calls the six-argument overload passing null for caught. + */ + public static Exception setFirstSupported( + SetMethod setter, V value, + List> expected, + Consumer onUnexpected, String... names) + { + return setFirstSupported( + setter, value, expected, null, onUnexpected, names); + } + + /** + * A functional interface fitting various {@code setFeature} or + * {@code setProperty} methods in Java XML APIs. + *

+ * The XML APIs have a number of methods on various interfaces that can + * be used to set some property or feature, and can generally be + * assigned to this functional interface by bound method reference, and + * used with {@link #setFirstSupported setFirstSupported}. + */ + @FunctionalInterface + public interface SetMethod + { + void set(String key, T value) throws Exception; + } + + /** + * Interface with methods to adjust the restrictions on XML parsing + * that are commonly considered when XML content might be from untrusted + * sources. + *

+ * The adjusting methods are best-effort; not all of + * the adjustments are available for all flavors of {@code Source} or + * {@code Result} or for all parser implementations or versions the Java + * runtime may supply. Cases where a requested adjustment has not been + * made are handled as follows: + *

+ * Any sequence of adjustment calls will ultimately be followed by a + * {@code get}. During the sequence of adjustments, exceptions caught + * are added to a signaling list or to a quiet list, where "added to" + * means that if either list has a first exception, any caught later are + * attached to that exception with + * {@link Exception#addSuppressed addSuppressed}. + *

+ * For each adjustment (and depending on the type of underlying + * {@code Source} or {@code Result}), one or more exception types will + * be 'expected' as indications that an identifying key or value for + * that adjustment was not recognized. This implementation may continue + * trying to apply the adjustment, using other keys that have at times + * been used to identify it. Expected exceptions caught during these + * attempts form a temporary list (a first exception and those attached + * to it by {@code addSuppressed}). Once any such attempt succeeds, the + * adjustment is considered made, and any temporary expected exceptions + * list from the adjustment is discarded. If no attempt succeeded, the + * temporary list is retained, by adding its head exception to the quiet + * list. + *

+ * Any exceptions caught that are not instances of any of the 'expected' + * types are added to the signaling list. + *

+ * When {@code get} is called, the head exception on the signaling list, + * if any, is thrown. Otherwise, the head exception on the quiet list, + * if any, is logged at {@code WARNING} level. + *

+ * During a chain of adjustments, {@link #lax lax()} can be called to + * tailor the handling of the quiet list. A {@code lax()} call applies + * to whatever exceptions have been added to the quiet list up to that + * point. To discard them, call {@code lax(true)}; to move them to the + * signaling list, call {@code lax(false)}. + */ + public interface Parsing> + { + /** Whether to allow a DTD at all. */ + T allowDTD(boolean v); + + /** + * Specifies that any DTD should be ignored (neither processed nor + * rejected as an error). + *

+ * This treatment is available in Java 22 and later. + * In earlier Java versions, this will not succeed. Where it is + * supported, the most recent call of this method or of + * {@link #allowDTD allowDTD} will be honored. + */ + T ignoreDTD(); + + /** + * Whether to retrieve external "general" entities (those + * that can be used in the document body) declared in the DTD. + */ + T externalGeneralEntities(boolean v); + + /** + * Whether to retrieve external "parameter" entities (those + * declared with a {@code %} and usable only within the DTD) + * declared in the DTD. + */ + T externalParameterEntities(boolean v); + + /** + * Whether to retrieve any external DTD subset declared in the DTD. + */ + T loadExternalDTD(boolean v); + + /** + * Whether to honor XInclude syntax in the document. + */ + T xIncludeAware(boolean v); + + /** + * Whether to expand entity references in the document to their + * declared replacement content. + */ + T expandEntityReferences(boolean v); + + /** + * For a feature that may have been identified by more than one URI + * in different parsers or versions, tries passing the supplied + * value with each URI from names in order until + * one is not rejected by the underlying parser. + */ + T setFirstSupportedFeature(boolean value, String... names); + + /** + * Makes a best effort to apply the recommended, restrictive + * defaults from the OWASP cheat sheet, to the extent they are + * supported by the underlying parser, runtime, and version. + *

+ * Equivalent to: + *

+			 * allowDTD(false).externalGeneralEntities(false)
+			 * .externalParameterEntities(false).loadExternalDTD(false)
+			 * .xIncludeAware(false).expandEntityReferences(false)
+			 *
+ */ + T defaults(); + + /** + * For a parser property (in DOM parlance, attribute) that may have + * been identified by more than one URI in different parsers or + * versions, tries passing the supplied value with each URI + * from names in order until one is not rejected by the + * underlying parser. + *

+ * A property differs from a feature in taking a value of some + * specified type, rather than being simply enabled/disabled with + * a boolean. + */ + T setFirstSupportedProperty(Object value, String... names); + + /** + * Maximum number of attributes on an element, with a negative or + * zero value indicating no limit. + */ + T elementAttributeLimit(int limit); + + /** + * Maximum number of entity expansions, with a negative or + * zero value indicating no limit. + */ + T entityExpansionLimit(int limit); + + /** + * Limit on total number of nodes in all entity referenced, + * with a negative or zero value indicating no limit. + */ + T entityReplacementLimit(int limit); + + /** + * Maximum element depth, + * with a negative or zero value indicating no limit. + */ + T maxElementDepth(int depth); + + /** + * Maximum size of any general entities, + * with a negative or zero value indicating no limit. + */ + T maxGeneralEntitySizeLimit(int limit); + + /** + * Maximum size of any parameter entities (including the result + * of nesting parameter entities), + * with a negative or zero value indicating no limit. + */ + T maxParameterEntitySizeLimit(int limit); + + /** + * Maximum size of XML names (including element and attribute names, + * namespace prefix, and namespace URI even though that isn't an + * XML name), + * with a negative or zero value indicating no limit. + */ + T maxXMLNameLimit(int limit); + + /** + * Limit on total size of all entities, general or parameter, + * with a negative or zero value indicating no limit. + */ + T totalEntitySizeLimit(int limit); + + /** + * Protocol schemes allowed in the URL of an external DTD to be + * fetched. + * @param protocols Empty string to deny all external DTD access, + * the string "all" to allow fetching by any protocol, or a + * comma-separated, case insensitive list of protocols to allow. + * A protocol name prefixed with "jar:" is also a protocol name. + */ + T accessExternalDTD(String protocols); + + /** + * Protocol schemes allowed in the URL of an external schema to be + * fetched. + * @param protocols Empty string to deny all external DTD access, + * the string "all" to allow fetching by any protocol, or a + * comma-separated, case insensitive list of protocols to allow. + * A protocol name prefixed with "jar:" is also a protocol name. + */ + T accessExternalSchema(String protocols); + + /** + * Sets an {@link EntityResolver} of the type used by SAX and DOM + * (optional operation). + *

+ * This method only succeeds for a {@code SAXSource} or + * {@code DOMSource} (or a {@code StreamResult}, where the resolver + * is set on the parser that will verify the content written). + * Unlike the best-effort behavior of most methods in this + * interface, this one will report failure with an exception. + *

+ * If the StAX API is wanted, a StAX {@link XMLResolver} should be + * set instead, using {@code setFirstSupportedProperty} with the + * property name {@link XMLInputFactory#RESOLVER}. + * @param resolver an instance of org.xml.sax.EntityResolver + * @throws UnsupportedOperationException if not supported by the + * underlying flavor of source or result. + */ + T entityResolver(EntityResolver resolver); + + /** + * Sets a {@link Schema} to be applied during SAX or DOM parsing + *(optional operation). + *

+ * This method only succeeds for a {@code SAXSource} or + * {@code DOMSource} (or a {@code StreamResult}, where the schema + * is set on the parser that will verify the content written). + * Unlike the best-effort behavior of most methods in this + * interface, this one will report failure with an exception. + *

+ * In the SAX case, this must be called before other + * methods of this interface. + * @param schema an instance of javax.xml.validation.Schema + * @throws UnsupportedOperationException if not supported by the + * underlying flavor of source or result. + * @throws IllegalStateException if the underlying implementation is + * SAX-based and another method from this interface has been called + * already. + */ + T schema(Schema schema); + + /** + * Tailors the treatment of 'quiet' exceptions during a chain of + * best-effort adjustments. + *

+ * See {@link Parsing the class description} for an explanation of + * the signaling and quiet lists. + *

+ * This method applies to whatever exceptions may have been added to + * the quiet list by best-effort adjustments made up to that point. + * They can be moved to the signaling list with {@code lax(false)}, + * or simply discarded with {@code lax(true)}. In either case, the + * quiet list is left empty when {@code lax} returns. + *

+ * At the time a {@code get} method is later called, any exception + * at the head of the signaling list will be thrown (possibly + * wrapped in an exception permitted by {@code get}'s {@code throws} + * clause), with any later exceptions on that list retrievable from + * the head exception with + * {@link Exception#getSuppressed getSuppressed}. Otherwise, any + * exception at the head of the quiet list (again with any later + * ones attached as its suppressed list) will be logged at + * {@code WARNING} level. + */ + T lax(boolean discard); + } + + /** + * Adjusting version of {@code javax.xml.transform.Source}, allowing + * various parser features to be configured before calling + * {@link #get get()} to obtain the usable {@code Source} object. + *

+ * Passing this class itself to an {@code SQLXML} object's + * {@code getSource} method, as in + *

+		 * Source src = sqx.getSource(Adjusting.XML.Source.class);
+		 *
+ * will allow the implementation to choose the particular subtype of + * {@code Source} it will return. To obtain a {@code Source} of a + * particular desired type, pass the class literal of one of the + * subtypes {@link SAXSource}, {@link StAXSource}, or {@link DOMSource}. + *

+ * The {@link #get get()} method can only be called once. The adjusting + * methods inherited from {@link Parsing} can only be called before + * {@code get()}. + *

+ * Although this extends {@code javax.xml.transform.Source}, + * implementing classes will likely throw exceptions from the + * {@code Source}-specific methods for getting and setting system IDs. + * Those methods, if needed, should be called on the {@code Source} + * object obtained from {@code get()}. + */ + public interface Source + extends Parsing>, javax.xml.transform.Source + { + /** + * Returns an object of the expected {@code Source} subtype + * reflecting any adjustments made with the other methods. + *

+ * Refer to {@link Parsing the {@code Parsing} class description} + * and the {@link Parsing#lax lax()} method for how any exceptions + * caught while applying best-effort adjustments are handled. + * @return an implementing object of the expected Source subtype + * @throws SQLException for any reason that {@code getSource} might + * have thrown when supplying the corresponding non-Adjusting + * subtype of Source, or for reasons saved while applying + * adjustments. + */ + T get() throws SQLException; + } + + /** + * Adjusting version of a {@code SAXSource}. + */ + public interface SAXSource + extends Source + { + } + + /** + * Adjusting version of a {@code StAXSource}. + */ + public interface StAXSource + extends Source + { + } + + /** + * Adjusting version of a {@code DOMSource}. + */ + public interface DOMSource + extends Source + { + } + + /** + * Adjusting version of {@code javax.xml.transform.Result}, offering + * the adjustment methods of {@link Parsing}, chiefly so that + * there is a way to apply those adjustments to any implicitly-created + * parser used to verify the content that will be written to the + * {@code Result}. + */ + public interface Result + extends Parsing>, javax.xml.transform.Result + { + /** + * Returns an object of the expected {@code Result} subtype + * reflecting any adjustments made with the other methods. + * Refer to {@link Parsing the {@code Parsing} class description} + * and the {@link Parsing#lax lax()} method for how any exceptions + * caught while applying best-effort adjustments are handled. + * @return an implementing object of the expected Result subtype + * @throws SQLException for any reason that {@code getResult} might + * have thrown when supplying the corresponding non-Adjusting + * subtype of Result, or for reasons saved while applying + * adjustments. + */ + T get() throws SQLException; + } + + /** + * Specialized {@code Result} type for setting a new PL/Java + * {@code SQLXML} instance's content from an arbitrary {@code Source} + * object of any of the types JDBC requires the {@code SQLXML} type + * to support. + *

+ * The {@link #set set} method must be called before any of the + * inherited adjustment methods, and the {@link #getSQLXML getSQLXML} + * method only after any adjustments. + *

+ * This is used transparently when another JDBC driver's {@code SQLXML} + * instance is returned from a PL/Java function, or passed to a + * {@code ResultSet} or {@code PreparedStatement}, to produce the + * PL/Java instance that is ultimately needed. In that case, the source + * {@code SQLXML} instance's {@code getSource} method is passed a null + * {@code sourceClass} argument, allowing the source instance to return + * whichever flavor of {@code Source} it efficiently implements, and + * that will be passed to this interface's {@code set} method. + *

+ * Through explicit use of this interface, code can adjust the parser + * restrictions that may be applied in the process, in case the defaults + * are too restrictive. + */ + public interface SourceResult extends Result + { + /** + * Supplies the {@code Source} instance that is the source of the + * content. + *

+ * This method must be called before any of the inherited adjustment + * methods. The argument may be a {@code StreamSource}, + * {@code SAXSource}, {@code StAXSource}, or {@code DOMSource}. If + * it is an instance of {@link Source}, its {@code get} method will + * be called, and must return one of those four supported types. + */ + SourceResult set(javax.xml.transform.Source source) + throws SQLException; + + /** + * Specialization of {@link #set(javax.xml.transform.Source) set} + * for an argument of type {@code StreamSource}. + *

+ * It may encapsulate either an {@code InputStream} or a {@code + * Reader}. In either case (even for a {@code Reader}), the start + * of the stream will be checked for an encoding declaration and + * compared to PostgreSQL's server encoding. If the encoding + * matches, a direct copy is done. If the encoding does not match + * but the source character set is contained in the server character + * set, a transcoding via Unicode is done. In either case, an XML + * parser is used to verify that the copied content is XML, and the + * parser's restrictions can be adjusted by the methods on this + * interface. + *

+ * If the source character set is neither the same as nor contained + * in the server's, the content will be parsed to SAX events and + * reserialized into the server encoding, and this parser's + * restrictions can be adjusted by the methods on this interface. + */ + SourceResult set(javax.xml.transform.stream.StreamSource source) + throws SQLException; + + /** + * Specialization of {@link #set(javax.xml.transform.Source) set} + * for an argument of type {@code SAXSource}. + *

+ * Because the content will be received in an already-parsed form, + * the parser-adjusting methods will have no effect. + */ + SourceResult set(javax.xml.transform.sax.SAXSource source) + throws SQLException; + + /** + * Specialization of {@link #set(javax.xml.transform.Source) set} + * for an argument of type {@code StAXSource}. + *

+ * Because the content will be received in an already-parsed form, + * the parser-adjusting methods will have no effect. + */ + SourceResult set(javax.xml.transform.stax.StAXSource source) + throws SQLException; + + /** + * Provides the content to be copied in the form of a + * {@code String}. + *

+ * An exception from the pattern of {@code Source}-typed arguments, + * this method simplifies retrofitting adjustments into code that + * was using {@code SQLXML}'s {@code setString}. Has the same effect + * as {@link #set(javax.xml.transform.stream.StreamSource) set} with + * a {@code StreamSource} wrapping a {@code StringReader} over the + * {@code String}. + */ + SourceResult set(String source) + throws SQLException; + + /** + * Specialization of {@link #set(javax.xml.transform.Source) set} + * for an argument of type {@code DOMSource}. + *

+ * Because the content will be received in an already-parsed form, + * the parser-adjusting methods will have no effect. + */ + SourceResult set(javax.xml.transform.dom.DOMSource source) + throws SQLException; + + /** + * Returns the result {@code SQLXML} instance ready for handing off + * to PostgreSQL. + *

+ * The handling/logging of exceptions normally handled in a + * {@code get} method happens here for a {@code SourceResult}. + *

+ * Any necessary calls of the inherited adjustment methods must be + * made before this method is called. + */ + SQLXML getSQLXML() throws SQLException; + } + + /** + * Adjusting version of a {@code StreamResult}. + *

+ * In addition to the adjusting methods inherited from + * {@link Result} (which will apply to any XML parser the implementation + * constructs to verify the content written, otherwise having no + * effect), this interface supplies two methods to influence whether the + * constructed {@code StreamResult} will expect a binary stream or a + * character stream. + */ + public interface StreamResult + extends Result + { + StreamResult preferBinaryStream(); + StreamResult preferCharacterStream(); + } + + /** + * Adjusting version of a {@code SAXResult}. + *

+ * The adjusting methods inherited from + * {@link Result} will apply to any XML parser the implementation + * constructs to verify the content written, otherwise having no + * effect. + */ + public interface SAXResult + extends Result + { + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/BasePrincipal.java b/pljava-api/src/main/java/org/postgresql/pljava/BasePrincipal.java new file mode 100644 index 00000000..ed20de77 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/BasePrincipal.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2020-2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.io.InvalidObjectException; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; + +import static java.lang.reflect.Modifier.isFinal; + +import static java.util.Objects.requireNonNull; + +import java.security.Principal; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Abstract base class for {@link Principal}s named by SQL simple identifiers. + *

+ * Subclasses are expected to be either {@code abstract} or {@code final}. + */ +abstract class BasePrincipal implements Principal, Serializable +{ + private static final long serialVersionUID = -3577164744804938351L; + + BasePrincipal(String name) + { + this(Simple.fromJava(name)); + } + + BasePrincipal(Simple name) + { + m_name = requireNonNull(name); + assert isFinal(getClass().getModifiers()) + : "instantiating a non-final BasePrincipal subclass"; + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + if ( null == m_name ) + throw new InvalidObjectException( + "deserializing a BasePrincipal with null name"); + } + + protected final Simple m_name; + + @Override + public boolean equals(Object other) + { + if ( this == other ) + return true; + if ( getClass().isInstance(other) ) + return m_name.equals(((BasePrincipal)other).m_name); + return false; + } + + @Override + public String toString() + { + Class c = getClass(); + return c.getCanonicalName() + .substring(1+c.getPackageName().length()) + ": " + getName(); + } + + @Override + public int hashCode() + { + return m_name.hashCode(); + } + + @Override + public String getName() + { + return m_name.toString(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/ObjectPool.java b/pljava-api/src/main/java/org/postgresql/pljava/ObjectPool.java index fb04ba0f..6a3cb21a 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/ObjectPool.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/ObjectPool.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,7 +21,7 @@ * for the class to be pooled, which must implement {@link PooledObject}. * @author Thomas Hallgren */ -public interface ObjectPool +public interface ObjectPool { /** * Obtain a pooled object, calling its {@link PooledObject#activate()} @@ -30,7 +30,7 @@ public interface ObjectPool * * @return A new object or an object found in the pool. */ - PooledObject activateInstance() + T activateInstance() throws SQLException; /** @@ -38,13 +38,13 @@ PooledObject activateInstance() * to the pool. * @param instance The instance to passivate. */ - void passivateInstance(PooledObject instance) + void passivateInstance(T instance) throws SQLException; /** * Call the {@link PooledObject#remove()} method and evict the object * from the pool. */ - void removeInstance(PooledObject instance) + void removeInstance(T instance) throws SQLException; } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/PLPrincipal.java b/pljava-api/src/main/java/org/postgresql/pljava/PLPrincipal.java new file mode 100644 index 00000000..cd003cae --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/PLPrincipal.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.io.InvalidObjectException; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; + +import org.postgresql.pljava.annotation.Function.Trust; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Java {@code Principal} representing a PostgreSQL {@code PROCEDURAL LANGUAGE}, + * which has a name (a simple identifier, not schema-qualified) and is either + * {@code Sandboxed} (declared with SQL {@code CREATE TRUSTED LANGUAGE} or + * {@code Unsandboxed}. + *

+ * Only the subclasses, {@code Sandboxed} or {@code Unsandboxed} can be + * instantiated, or granted permissions in policy. + */ +public abstract class PLPrincipal extends BasePrincipal +{ + private static final long serialVersionUID = 4876111394761861189L; + + PLPrincipal(String name) + { + super(name); + } + + PLPrincipal(Simple name) + { + super(name); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + Class c = getClass(); + if ( c != Sandboxed.class && c != Unsandboxed.class ) + throw new InvalidObjectException( + "deserializing unknown PLPrincipal subclass: " + + c.getName()); + } + + /** + * Returns either {@link Trust#SANDBOXED SANDBOXED} or + * {@link Trust#UNSANDBOXED UNSANDBOXED} + * according to PostgreSQL's catalog entry for the language. + */ + public abstract Trust trust(); + + /** + * Java {@code Principal} representing a PostgreSQL + * {@code PROCEDURAL LANGUAGE} that was declared with the {@code TRUSTED} + * keyword and can be used to declare new functions by any role that has + * been granted {@code USAGE} permission on it. + *

+ * A Java security policy can grant permissions to this {@code Principal} + * by class and wildcard name, or by class and the specific name given in + * SQL to the language. + */ + public static final class Sandboxed extends PLPrincipal + { + private static final long serialVersionUID = 55704990613451177L; + + /** + * Construct an instance given its name in {@code String} form. + *

+ * The name will be parsed as described for + * {@link Simple#fromJava Identifier.Simple.fromJava}. + */ + public Sandboxed(String name) + { + super(name); + } + + /** + * Construct an instance given its name already as an + * {@code Identifier.Simple}. + */ + public Sandboxed(Simple name) + { + super(name); + } + + /** + * Returns {@code SANDBOXED}. + */ + @Override + public Trust trust() + { + return Trust.SANDBOXED; + } + } + + /** + * Java {@code Principal} representing a PostgreSQL + * {@code PROCEDURAL LANGUAGE} that was declared without the + * {@code TRUSTED} keyword, and can be used to declare new functions only + * by a PostgreSQL superuser. + *

+ * A Java security policy can grant permissions to this {@code Principal} + * by class and wildcard name, or by class and the specific name given in + * SQL to the language. + */ + public static final class Unsandboxed extends PLPrincipal + { + private static final long serialVersionUID = 7487230786813048525L; + + /** + * Construct an instance given its name in {@code String} form. + *

+ * The name will be parsed as described for + * {@link Simple#fromJava Identifier.Simple.fromJava}. + */ + public Unsandboxed(String name) + { + super(name); + } + + /** + * Construct an instance given its name already as an + * {@code Identifier.Simple}. + */ + public Unsandboxed(Simple name) + { + super(name); + } + + /** + * Returns {@code UNSANDBOXED}. + */ + @Override + public Trust trust() + { + return Trust.UNSANDBOXED; + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/ResultSetHandle.java b/pljava-api/src/main/java/org/postgresql/pljava/ResultSetHandle.java index 89b2f935..8b4a6642 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/ResultSetHandle.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/ResultSetHandle.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,11 +21,24 @@ * of a {@link java.sql.ResultSet}. The primary motivation for this interface is * that an implementation that returns a ResultSet must be able to close the * connection and statement when no more rows are requested. - * + *

* A function returning a SET OF a complex type generated on the * fly (rather than obtained from a query) would return * {@link ResultSetProvider} instead. One returning a SET OF a * simple type should simply return an {@link java.util.Iterator}. + *

+ * In the case of a function declared to return {@code SETOF RECORD} rather than + * of a complex type known in advance, SQL requires any query using the function + * to include a column definition list. If the number of those columns does not + * match the number in the {@code ResultSet} returned here, only as many as the + * caller expects (in index order starting with 1) will be used; an exception is + * thrown if this result set has too few columns. If the types expected by the + * caller differ, values are converted as if retrieved one by one from this + * {@code ResultSet} and stored into the caller's with + * {@link ResultSet#updateObject(int, Object) updateObject}. + *

+ * A function that needs to know the names or types of the caller's expected + * columns should implement {@link ResultSetProvider}. * @author Thomas Hallgren */ public interface ResultSetHandle diff --git a/pljava-api/src/main/java/org/postgresql/pljava/ResultSetProvider.java b/pljava-api/src/main/java/org/postgresql/pljava/ResultSetProvider.java index 3e5d2bed..294b8133 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/ResultSetProvider.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/ResultSetProvider.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -14,6 +14,7 @@ import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.SQLNonTransientException; /** * An implementation of this interface is returned from functions and procedures @@ -23,6 +24,11 @@ * it should just return {@link ResultSetHandle} instead. Functions that * return SET OF a simple type should simply return an * {@link java.util.Iterator Iterator}. + *

+ * For a function declared to return {@code SETOF RECORD} rather than a specific + * complex type known in advance, the {@code receiver} argument to + * {@link #assignRowValues(ResultSet,int) assignRowValues} can be queried to + * learn the number, names, and types of columns expected by the caller. * @author Thomas Hallgren */ public interface ResultSetProvider @@ -31,13 +37,42 @@ public interface ResultSetProvider * This method is called once for each row that should be returned from * a procedure that returns a set of rows. The receiver * is a {@code SingleRowWriter} - * writer instance that is used for capturing the data for the row. + * instance that is used to capture the data for the row. + *

+ * If the return type is {@code RECORD} rather than a specific complex type, + * SQL requires a column definition list to follow any use of the function + * in a query. The {@link ResultSet#getMetaData() ResultSetMetaData} + * of {@code receiver} can be used here to learn the number, names, + * and types of the columns expected by the caller. (It can also be used in + * the case of a specific complex type, but in that case the names and types + * are probably already known.) + *

+ * This default implementation calls + * {@link #assignRowValues(ResultSet,int)}, or throws an + * {@code SQLException} with SQLState 54000 (program limit exceeded) if + * the row number exceeds {@code Integer.MAX_VALUE}. * @param receiver Receiver of values for the given row. - * @param currentRow Row number. First call will have row number 0. - * @return true if a new row was provided, false + * @param currentRow Row number, zero on the first call, incremented by one + * on each subsequent call. + * @return {@code true} if a new row was provided, {@code false} * if not (end of data). * @throws SQLException */ + default boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + if ( currentRow <= Integer.MAX_VALUE ) + return assignRowValues(receiver, (int)currentRow); + throw new SQLNonTransientException( + getClass().getCanonicalName() + + " implements only the assignRowValues method limited to" + + " Integer.MAX_VALUE rows; this result set is too large", "54000"); + } + + /** + * Older version where currentRow is limited to the range + * of {@code int}. + */ boolean assignRowValues(ResultSet receiver, int currentRow) throws SQLException; @@ -47,4 +82,42 @@ boolean assignRowValues(ResultSet receiver, int currentRow) */ void close() throws SQLException; + + /** + * Version of {@code ResultSetProvider} where the {@code assignRowValues} + * method accepting a {@code long} row count must be implemented, and the + * {@code int} version defaults to using it. + */ + interface Large extends ResultSetProvider + { + /** + * This method is called once for each row that should be returned + * from a procedure that returns a set of rows. The receiver is a + * {@code SingleRowWriter} instance that is used to capture the data + * for the row. + *

+ * If the return type is {@code RECORD} rather than a specific complex + * type, SQL requires a column definition list to follow any use of the + * function in a query. + * The {@link ResultSet#getMetaData() ResultSetMetaData} + * of {@code receiver} can be used here to learn the number, names, and + * types of the columns expected by the caller. (It can also be used in + * the case of a specific complex type, but in that case the names and + * types are probably already known.) + * @param receiver Receiver of values for the given row. + * @param currentRow Row number, zero on the first call, incremented by one + * on each subsequent call. + * @return {@code true} if a new row was provided, {@code false} + * if not (end of data). + * @throws SQLException + */ + boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException; + + default boolean assignRowValues(ResultSet receiver, int currentRow) + throws SQLException + { + return assignRowValues(receiver, (long)currentRow); + } + } } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/SavepointListener.java b/pljava-api/src/main/java/org/postgresql/pljava/SavepointListener.java index 8f1e2460..967ff307 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/SavepointListener.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/SavepointListener.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,6 +9,7 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava; @@ -16,11 +17,17 @@ import java.sql.Savepoint; /** - * Interface for a listener to be notified of the start and commit or abort of - * savepoints. To receive such notifications, implement this interface, with - * the three methods that will be called in those three cases, and pass an - * instance to {@link Session#addSavepointListener}. - * + * Interface for a listener to be notified of the start and pre-commit, commit, + * or abort of savepoints. To receive such notifications, implement this + * interface, with the methods that will be called in the cases of interest, + * and pass an instance to {@link Session#addSavepointListener}. The default + * implementations of these methods do nothing. + *

+ * It is possible for a listener method to be called with savepoint + * null, or parent null, or both; that can happen if the application + * code has not kept a strong reference to the {@code Savepoint} object in + * question. + *

* SavepointListener exposes a * listener to the list of listeners that will - * receive savepoint events. This method does nothing if the listener - * was already added. + * Returns an unmodifiable defensive copy of the Java + * {@link System#getProperties() system properties} taken early in PL/Java + * startup before user code has an opportunity to write them. + *

+ * When PL/Java is running without security policy enforcement, as on stock + * Java 24 and later, using the frozen properties can simplify defensive + * coding against the possibility of arbitrary property modifications. + * + * @return a {@link Properties} object that departs from the API spec by + * throwing {@link UnsupportedOperationException} from any method if the + * properties would otherwise be modified. + */ + Properties frozenSystemProperties(); + + /** + * Adds the specified {@code listener} to the list of listeners that will + * receive savepoint events. An {@link AccessControlContext} saved by this + * method will be used when the listener is invoked. If the listener was + * already registered, it remains registered just once, though the + * {@code AccessControlContext} is updated and its order of invocation + * relative to other listeners may change. * @param listener The listener to be added. */ void addSavepointListener(SavepointListener listener); /** - * Adds the specified listener to the list of listeners that will - * receive transactional events. This method does nothing if the listener - * was already added. + * Adds the specified {@code listener} to the list of listeners that will + * receive transaction events. An {@link AccessControlContext} saved by this + * method will be used when the listener is invoked. If the listener was + * already registered, it remains registered just once, though the + * {@code AccessControlContext} is updated and its order of invocation + * relative to other listeners may change. * @param listener The listener to be added. */ void addTransactionListener(TransactionListener listener); /** * Obtain an attribute from the current session. + * + * @deprecated {@code Session}'s attribute store once had a special, and + * possibly useful, transactional behavior, but since PL/Java 1.2.0 it has + * lacked that, and offers nothing you don't get with an ordinary + * {@code Map} (that forbids nulls). If some kind of store with + * transactional behavior is needed, it should be implemented in straight + * Java and kept in sync by using a {@link TransactionListener}. * @param attributeName The name of the attribute * @return The value of the attribute */ + @Deprecated(since="1.5.3", forRemoval=true) Object getAttribute(String attributeName); /** @@ -52,7 +96,7 @@ public interface Session * constructor for one argument of type ObjectPool. * @return An object pool that pools object of the given class. */ - ObjectPool getObjectPool(Class cls); + ObjectPool getObjectPool(Class cls); /** * Return the current effective database user name. @@ -91,10 +135,10 @@ public interface Session * unconditionally, which is incorrect for any PostgreSQL version newer * than 8.0, because it was unaware of {@code SET ROLE} introduced in * 8.1. Any actual use case for a method that ignores roles and reports - * only the session ID should be - * reported as an issue. + * only the session ID should be reported as an issue. */ - @Deprecated + @Deprecated(since="1.5.0", forRemoval=true) String getSessionUserName(); /** @@ -120,40 +164,56 @@ void executeAsOuterUser(Connection conn, String statement) * which is incorrect for any PostgreSQL version newer than 8.0, because * it was unaware of {@code SET ROLE} introduced in 8.1. Any actual use * case for a method that ignores roles and uses only the session ID - * should be reported as an - * issue. + * should be reported as an issue. */ - @Deprecated + @Deprecated(since="1.5.0", forRemoval=true) void executeAsSessionUser(Connection conn, String statement) throws SQLException; /** * Remove an attribute previously stored in the session. If * no attribute is found, nothing happens. + * + * @deprecated {@code Session}'s attribute store once had a special, and + * possibly useful, transactional behavior, but since PL/Java 1.2.0 it has + * lacked that, and offers nothing you don't get with an ordinary + * {@code Map} (that forbids nulls). If some kind of store with + * transactional behavior is needed, it should be implemented in straight + * Java and kept in sync by using a {@link TransactionListener}. * @param attributeName The name of the attribute. */ + @Deprecated(since="1.5.3", forRemoval=true) void removeAttribute(String attributeName); /** - * Removes the specified listener from the list of listeners that will - * receive savepoint events. This method does nothing unless the listener is - * found. + * Removes the specified {@code listener} from the list of listeners that + * will receive savepoint events. This method does nothing unless + * the listener is found. * @param listener The listener to be removed. */ void removeSavepointListener(SavepointListener listener); /** - * Removes the specified listener from the list of listeners that will - * receive transactional events. This method does nothing unless the listener is - * found. + * Removes the specified {@code listener} from the list of listeners that + * will receive transaction events. This method does nothing unless + * the listener is found. * @param listener The listener to be removed. */ void removeTransactionListener(TransactionListener listener); /** * Set an attribute to a value in the current session. + * + * @deprecated {@code Session}'s attribute store once had a special, and + * possibly useful, transactional behavior, but since PL/Java 1.2.0 it has + * lacked that, and offers nothing you don't get with an ordinary + * {@code Map} (that forbids nulls). If some kind of store with + * transactional behavior is needed, it should be implemented in straight + * Java and kept in sync by using a {@link TransactionListener}. * @param attributeName * @param value */ + @Deprecated(since="1.5.3", forRemoval=true) void setAttribute(String attributeName, Object value); } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java b/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java index 251bc890..3211e4db 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/SessionManager.java @@ -1,15 +1,21 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.sql.SQLException; +import static java.util.ServiceLoader.load; + /** * The SessionManager makes the current {@link Session} available to the * caller. @@ -17,8 +23,6 @@ */ public class SessionManager { - private static Method s_getSession; - /** * Returns the current session. */ @@ -27,32 +31,33 @@ public static Session current() { try { - if(s_getSession == null) - { - String sp = System.getProperty( - "org.postgresql.pljava.sessionprovider", - "org.postgresql.pljava.internal.Backend"); - Class spc = Class.forName(sp); - s_getSession = spc.getMethod("getSession", null); - } - return (Session)s_getSession.invoke(null, null); + return Holder.s_session; } - catch (RuntimeException e) + catch ( ExceptionInInitializerError e ) { + Throwable c = e.getCause(); + if ( c instanceof SQLException ) + throw (SQLException)c; throw e; } - catch (InvocationTargetException e) - { - Throwable t = e.getTargetException(); - if(t instanceof SQLException) - throw (SQLException)t; - if(t instanceof RuntimeException) - throw (RuntimeException)t; - throw new SQLException(t.getMessage()); - } - catch (Exception e) - { - throw new SQLException(e.getMessage()); + } + + private static class Holder + { + private static final Session s_session; + + static { + try + { + s_session = load( + Session.class.getModule().getLayer(), Session.class) + .findFirst().orElseThrow(() -> new SQLException( + "could not obtain PL/Java Session object")); + } + catch ( SQLException e ) + { + throw new ExceptionInInitializerError(e); + } } } } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/TransactionListener.java b/pljava-api/src/main/java/org/postgresql/pljava/TransactionListener.java index d8598708..65e18edc 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/TransactionListener.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/TransactionListener.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,16 +9,19 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava; import java.sql.SQLException; /** - * Interface for a listener to be notified of prepare, and commit or abort, of - * distributed transactions. To receive such notifications, implement this - * interface, with the three methods that will be called in those three cases, - * and pass an instance to {@link Session#addTransactionListener}. + * Interface for a listener to be notified of prepare, and commit, abort, + * or other phase transitions, of distributed transactions. To receive + * such notifications, implement this interface, with the methods that + * will be called in the cases of interest, and pass an instance to + * {@link Session#addTransactionListener}. The default implementations of these + * methods do nothing. * * TransactionListener exposes a * The returned set will be updateable and positioned on a * valid row. When the trigger call returns, the trigger manager will see * the changes that has been made to this row and construct a new tuple @@ -44,7 +44,7 @@ public interface TriggerData /** * Returns the ResultSet that represents the old row. This ResultSet will - * be null for insert triggers and for triggers that was fired for + * be null for insert triggers and for triggers that were fired for * statement.
The returned set will be read-only and positioned on a * valid row. * @@ -152,4 +152,12 @@ public interface TriggerData * if the contained native buffer has gone stale. */ boolean isFiredByUpdate() throws SQLException; + + /** + * Advise PostgreSQL to silently suppress the operation on this row. + * + * @throws SQLException + * if called in an {@code AFTER} or a {@code STATEMENT} trigger + */ + void suppress() throws SQLException; } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Aggregate.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Aggregate.java new file mode 100644 index 00000000..7479c17b --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Aggregate.java @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Declares a PostgreSQL aggregate. + *

+ * An aggregate function in PostgreSQL is defined by using + * {@code CREATE AGGREGATE} to specify its name and argument types, along with + * at least one "plan" for evaluating it, where the plan specifies at least: + * a data type to use for the accumulating state, and a function (here called + * "accumulate") called for each row to update the state. If the plan includes + * a function "finish", its return type is the return type of the aggregate; + * with no "finish" function, the state type is also the aggregate's return + * type. + *

+ * Optionally, a plan can include a "combine" function, which is passed two + * instances of the state type and combines them, to allow aggregate evaluation + * to be parallelized. The names "accumulate", "combine", and "finish" are not + * exactly as used in the PostgreSQL command (those are unpronounceable + * abbreviations), but follow the usage in {@code java.util.stream.Collector}, + * which should make them natural to Java programmers. PL/Java will generate the + * SQL with the unpronounceable names. + *

+ * If an aggregate function might be used in a window with a moving frame start, + * it can be declared with a second plan ({@code movingPlan}) that includes a + * "remove" function that may be called, passing values that were earlier + * accumulated into the state, to remove them again as the frame start advances + * past them. (Java's {@code Collector} has no equivalent of a "remove" + * function.) A "remove" function may only be specified (and must be specified) + * in a plan given as {@code movingPlan}. + *

+ * Any function referred to in a plan is specified by its name, optionally + * schema-qualified. Its argument types are not specified; they are implied by + * those declared for the aggregate itself. An "accumulate" function gets one + * argument of the state type, followed by all those given as {@code arguments}. + * The same is true of a "remove" function. A "combine" function is passed + * two arguments of the state type. + *

+ * A "finish" function has a first argument of the state type. If the aggregate + * is declared with any {@code directArguments}, those follow the state type. + * (Declaring {@code directArguments} makes the aggregate an "ordered-set + * aggregate", which could additionally have {@code hypothetical=true} to make + * it a "hypothetical-set aggregate", for which the PostgreSQL documentation + * covers the details.) If {@code polymorphic=true}, the "finish" function's + * argument list will end with {@code arguments.length} additional arguments; + * they will all be passed as {@code NULL} when the finisher is called, but will + * have the right run-time types, which may be necessary to resolve the + * finisher's return type, if polymorphic types are involved. + *

+ * If any of the functions or types mentioned in this declaration are also being + * generated into the same deployment descriptor, the {@code CREATE AGGREGATE} + * generated from this annotation will follow them. Other ordering dependencies, + * if necessary, can be explicitly arranged with {@code provides} and + * {@code requires}. + *

+ * While this annotation can generate {@code CREATE AGGREGATE} deployment + * commands with the features available in PostgreSQL, + * at present there are limits to which aggregate features can be implemented + * purely in PL/Java. In particular, PL/Java functions currently have no access + * to the PostgreSQL data structures needed for an ordered-set or + * hypothetical-set aggregate. Such an aggregate could be implemented by writing + * some of its support functions in another procedural language; this annotation + * could still be used to automatically generate the declaration. + * @author Chapman Flack + */ +@Documented +@Target({ElementType.TYPE,ElementType.METHOD}) +@Repeatable(Aggregate.Container.class) +@Retention(RetentionPolicy.CLASS) +public @interface Aggregate +{ + /** + * Declares the effect of the {@code finish} function in a {@code Plan}. + *

+ * If {@code READ_ONLY}, PostgreSQL can continue updating the same state + * with additional rows, and call the finisher again for updated results. + *

+ * If {@code SHAREABLE}, the state cannot be further updated after + * a finisher call, but finishers for other aggregates that use the same + * state representation (and are also {@code SHAREABLE}) can be called to + * produce the results for those aggregates. An example could be the several + * linear-regression-related aggregates, all of which can work from a state + * that contains the count of values, sum of values, and sum of squares. + *

+ * If {@code READ_WRITE}, no further use can be made of the state after + * the finisher has run. + */ + enum FinishEffect { READ_ONLY, SHAREABLE, READ_WRITE }; + + /** + * Specifies one "plan" for evaluating the aggregate; one must always be + * specified (as {@code plan}), and a second may be specified (as + * {@code movingPlan}). + *

+ * A plan must specify a data type {@code stateType} to hold the + * accumulating state, optionally an estimate of its expected size in bytes, + * and optionally its initial contents. The plan also specifies up to four + * functions {@code accumulate}, {@code combine}, {@code finish}, and + * {@code remove}. Only {@code accumulate} is always required; + * {@code remove} is required in a {@code movingPlan}, and otherwise not + * allowed. + *

+ * Each of the four functions may be specified as a single string "name", + * which will be leniently parsed as an optionally schema-qualified name, + * or as two strings {@code {"schema","local"}} with the schema made + * explicit. The two-string form with {@code ""} as the schema represents + * an explicitly non-schema-qualified name. + */ + @Documented + @Target({}) + @Retention(RetentionPolicy.CLASS) + @interface Plan + { + /** + * The data type to be used to hold the accumulating state. + *

+ * This will be the first argument type for all of the support functions + * except {@code deserialize} (both argument types for {@code combine}) + * and also, if there is no {@code finish} function, the result type + * of the aggregate. + */ + String stateType() default ""; + + /** + * An optional estimate of the size in bytes that the state may grow + * to occupy. + */ + int stateSize() default 0; + + /** + * An optional initial value for the state (which will otherwise be + * initially null). + *

+ * Must be a string the state type's text-input conversion would accept. + *

+ * Omitting the initial value only works if the {@code accumulate} + * function is {@code onNullInput=CALLED}, or if the aggregate's first + * argument type is the same as the state type. + */ + String initialState() default ""; + + /** + * Name of the function that will be called for each row being + * aggregated. + *

+ * The function named here must have an argument list that starts with + * one argument of the state type, followed by all of this aggregate's + * {@code arguments}. It does not receive the {@code directArguments}, + * if any. + */ + String[] accumulate() default {}; + + /** + * Name of an optional function to combine two instances of the state + * type. + *

+ * The function named here should be one that has two arguments, both + * of the state type, and returns the state type. + */ + String[] combine() default {}; + + /** + * Name of an optional function to produce the aggregate's result from + * the final value of the state; without this function, the aggregate's + * result type is the state type, and the result is simply the final + * value of the state. + *

+ * When this function is specified, its result type determines the + * result type of the aggregate. Its argument list signature is a single + * argument of the state type, followed by all the + * {@code directArguments} if any, followed (only if {@code polymorphic} + * is true) by {@code arguments.length} additional arguments for which + * nulls will be passed at runtime but with their resolved runtime + * types. + */ + String[] finish() default {}; + + /** + * Name of an optional function that can reverse the effect on the state + * of a row previously passed to {@code accumulate}. + *

+ * The function named here should have the same argument list signature + * as the {@code accumulate} function. + *

+ * Required in a {@code movingPlan}; not allowed otherwise. + */ + String[] remove() default {}; + + /** + * Whether the argument list for {@code finish} should be extended with + * slots corresponding to the aggregated {@code arguments}, all nulls at + * runtime but with their resolved runtime types. + */ + boolean polymorphic() default false; + + /** + * The effect of the {@code finish} function in this {@code Plan}. + *

+ * If {@code READ_ONLY}, PostgreSQL can continue updating the same + * state with additional rows, and call the finisher again for updated + * results. + *

+ * If {@code SHAREABLE}, the state cannot be further updated after a + * finisher call, but finishers for other aggregates that use the same + * state representation (and are also {@code SHAREABLE}) can be called + * to produce the results for those aggregates. An example could be the + * several linear-regression-related aggregates, all of which can work + * from a state that contains the count of values, sum of values, and + * sum of squares. + *

+ * If {@code READ_WRITE}, no further use can be made of the state after + * the finisher has run. + *

+ * Leaving this to default is not exactly equivalent to specifying the + * default value shown here. If left to default, it will be left + * unspecified in the generated {@code CREATE AGGREGATE}, and PostgreSQL + * will apply its default, which is {@code READ_ONLY} in the case of an + * ordinary aggregate, but {@code READ_WRITE} for an ordered-set or + * hypothetical-set aggregate. + */ + FinishEffect finishEffect() default FinishEffect.READ_ONLY; + + /** + * Name of a serializing function ({@code internal} to {@code bytea}), + * usable only if a {@link #combine() combine} function is specified and + * the aggregate's state type is {@code internal}. + *

+ * Not allowed in a {@code movingPlan}. Not allowed without + * {@code deserialize}. + */ + String[] serialize() default {}; + + /** + * Name of a deserializing function (({@code bytea}, {@code internal}) + * to {@code internal}), usable only if a {@code serialize} function is + * also specified. + *

+ * Not allowed in a {@code movingPlan}. + */ + String[] deserialize() default {}; + } + + /** + * Name for this aggregate. + *

+ * May be specified in explicit {@code {"schema","localname"}} form, or as + * a single string that will be leniently parsed as an optionally + * schema-qualified name. In the explicit form, {@code ""} as the schema + * will make the name explicitly unqualified (in case the local name might + * contain a dot and be misread as a qualified name). + *

+ * When this annotation is not placed on a method, there is no default, and + * a name must be supplied. When the annotation is on a method (which can be + * either the {@code accumulate} or the {@code finish} function for the + * aggregate), the default name will be the same as the SQL name given for + * the function. That is typically possible because the parameter signature + * for the aggregate function will not be the same as either the + * {@code accumulate} or the {@code finish} function. The exception is if + * the annotation is on the {@code finish} function and the aggregate has + * exactly one parameter of the same type as the state; in that case another + * name must be given here. + */ + String[] name() default {}; + + /** + * Names and types of the arguments to be aggregated: the ones passed to the + * {@code accumulate} function for each aggregated row. + *

+ * Each element is a name and a type specification, separated by whitespace. + * An element that begins with whitespace declares a parameter with no + * name, only a type. The name is an ordinary SQL identifier; if it would + * be quoted in SQL, naturally each double-quote must be represented as + * {@code \"} in Java. + *

+ * When this annotation does not appear on a method, there is no default, + * and arguments must be declared here. If the annotation appears on a + * method supplying the {@code accumulate} function, this element can be + * omitted, and the arguments will be those of the function (excepting the + * first one, which corresponds to the state). + */ + String[] arguments() default {}; + + /** + * Names and types of the "direct arguments" to an ordered-set or + * hypothetical-set aggregate (specifying this element is what makes an + * ordered-set aggregate, which will be a hypothetical-set aggregate if + * {@code hypothetical=true} is also supplied). + *

+ * Specified as for {@code arguments}. The direct arguments are not passed + * to the {@code accumulate} function for each aggregated row; they are only + * passed to the {@code finish} function when producing the result. + */ + String[] directArguments() default {}; + + /** + * Specify {@code true} in an ordered-set aggregate (one with + * {@code directArguments} specified) to make it a hypothetical-set + * aggregate. + *

+ * When {@code true}, the {@code directArguments} list must be at least as + * long as {@code arguments}, and its last {@code arguments.length} types + * must match {@code arguments} one-to-one. When the {@code finish} function + * is called, those last direct arguments will carry the caller-supplied + * values for the "hypothetical" row. + */ + boolean hypothetical() default false; + + /** + * Whether the aggregate has a variadic last argument. + *

+ * Specify as a single boolean, {@code variadic=true}, to declare an + * ordinary aggregate variadic. The last type of its declared + * {@code arguments} must then be either an array type, or + * {@code pg_catalog."any"} + *

+ * The form {@code variadic={boolean,boolean}} is for an ordered-set + * aggregate, which has both a list of {@code directArguments} (the first + * boolean) and its aggregated {@code arguments} (the second). For an + * ordered-set aggregate, {@code "any"} is the only allowed type for a + * variadic argument. + *

+ * When also {@code hypothetical} is true, the requirement that the + * {@code directArguments} have a tail matching the {@code arguments} + * implies that the two lists must both or neither be variadic. + */ + boolean[] variadic() default {}; + + /** + * The {@link Plan Plan} normally to be used for evaluating this aggregate, + * except possibly in a moving-window context if {@code movingPlan} is also + * supplied. + *

+ * Though declared as an array, only one plan is allowed here. It may not + * name a {@code remove} function; only a {@code movingPlan} can do that. + * This plan can be omitted only if the {@code @Aggregate} annotation + * appears on a Java method intended as the {@code accumulate} function and + * the rest of the plan is all to be inferred or defaulted. + */ + Plan[] plan() default {}; + + + /** + * An optional {@link Plan Plan} that may be more efficient for evaluating + * this aggregate in a moving-window context. + *

+ * Though declared as an array, only one plan is allowed here. It must + * name a {@code remove} function. + *

+ * A {@code movingPlan} may not have {@code serialize}/{@code deserialize} + * functions; only {@code plan} can have those. + */ + Plan[] movingPlan() default {}; + + /** + * Parallel-safety declaration for this aggregate; PostgreSQL's planner + * will consult this only, not the declarations on the individual supporting + * functions. + *

+ * See {@link Function#parallel() Function.parallel} for the implications. + * In PL/Java, any setting other than {@code UNSAFE} should be considered + * experimental. + */ + Function.Parallel parallel() default Function.Parallel.UNSAFE; + + /** + * Name of an operator (declared as either the less-than or greater-than + * strategy member of a {@code btree} operator class) such that the result + * of this aggregate is the same as the first result from {@code ORDER BY} + * over the aggregated values, using this operator. + *

+ * May be specified in explicit {@code {"schema","localname"}} form, or as + * a single string that will be leniently parsed as an optionally + * schema-qualified name. In the explicit form, {@code ""} as the schema + * will make the name explicitly unqualified. The operator will be assumed + * to have two operands of the same type as the argument to the aggregate + * (which must have exactly one aggregated argument, and no direct + * arguments). The operator's membership in a {@code btree} operator class + * is not (currently) checked at compile time, but if it does not hold at + * run time, the optimization will not be used. + */ + String[] sortOperator() default {}; + + /** + * One or more arbitrary labels that will be considered 'provided' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'require' labels + * 'provided' by this come later in the output for install actions, and + * earlier for remove actions. + */ + String[] provides() default {}; + + /** + * One or more arbitrary labels that will be considered 'required' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'provide' labels + * 'required' by this come earlier in the output for install actions, and + * later for remove actions. + */ + String[] requires() default {}; + + /** + * The {@code } to be used around SQL code generated + * for this aggregate. Defaults to {@code PostgreSQL}. Set explicitly to + * {@code ""} to emit code not wrapped in an {@code }. + */ + String implementor() default ""; + + /** + * A comment to be associated with the aggregate. The default is no comment + * if the annotation does not appear on a method, or the first sentence of + * the method's Javadoc comment, if any, if it does. + */ + String comment() default ""; + + /** + * @hidden container type allowing Cast to be repeatable. + */ + @Documented + @Target(ElementType.TYPE) + @Retention(RetentionPolicy.CLASS) + @interface Container + { + Aggregate[] value(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java index 7f65883a..3798c0a3 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/BaseUDT.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,6 +11,7 @@ */ package org.postgresql.pljava.annotation; +import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -55,16 +56,18 @@ *

* Other static methods in the class may be exported as SQL functions by * marking them with {@code @Function} in the usual way, and will not have any - * special treatment on account of being in a UDT class. If those function - * declarations will depend on the existence of this type, or the type must + * special treatment on account of being in a UDT class. Those function + * declarations will be correctly ordered before or after this type's, in common + * cases such as when this type appears in their signatures, or the type must * refer to the functions (as it must for * {@link #typeModifierInput typeModifierInput} or - * {@link #typeModifierOutput typeModifierOutput} functions, for example), - * appropriate {@link #provides provides}/{@link #requires requires} labels must - * be used in their {@code @Function} annotations and this annotation, to make + * {@link #typeModifierOutput typeModifierOutput} functions, for example). + * In a case that the automatic ordering does not handle correctly, + * appropriate {@link #provides provides}/{@link #requires requires} labels can + * be used in the {@code @Function} annotations and this annotation, to make * the order come out right. */ -@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) @Documented public @interface BaseUDT { /** @@ -79,7 +82,120 @@ enum Alignment { CHAR, INT2, INT4, DOUBLE } >TOAST strategies for the type's stored representation. * Only {@code PLAIN} is applicable to fixed-length types. */ - enum Storage { PLAIN, EXTERNAL, EXTENDED, MAIN } + enum Storage + { + /** + * Never compressed or stored out-of-line. + */ + PLAIN, + + /** + * Can be moved out-of-line but not compressed. + */ + EXTERNAL, + + /** + * Can be compressed, and moved out-of-line if still too big. + */ + EXTENDED, + + /** + * Can be compressed, but moved out-of-line only if there is no other + * way to make the containing tuple fit a page. + */ + MAIN + } + + /** + * The type categories that are predefined in PostgreSQL. + *

+ * This enumeration is not used as the type of the + * {@link #category category} element below, because PostgreSQL allows use + * of other single-ASCII-character category codes for custom purposes. + * Therefore, the annotation can be any such character. PostgreSQL reserves + * all of the upper-case ASCII letters to represent current or future + * predefined categories, and this enumeration allows mapping between those + * and their more readable names. + *

+ * When one of the predefined categories is wanted for the + * {@link #category category} element, the corresponding character constant + * in {@link Code PredefinedCategory.Code} can be used in the annotation as + * a more readable alternative to the one-character code. + */ + enum PredefinedCategory + { + ARRAY (Code.ARRAY), + BOOLEAN (Code.BOOLEAN), + COMPOSITE (Code.COMPOSITE), + DATETIME (Code.DATETIME), + ENUM (Code.ENUM), + GEOMETRIC (Code.GEOMETRIC), + NETWORK (Code.NETWORK), + NUMERIC (Code.NUMERIC), + PSEUDOTYPE (Code.PSEUDOTYPE), + RANGE (Code.RANGE), + STRING (Code.STRING), + TIMESPAN (Code.TIMESPAN), + USER (Code.USER), + BITSTRING (Code.BITSTRING), + UNKNOWN (Code.UNKNOWN), + INTERNAL (Code.INTERNAL); + + private final char code; + + PredefinedCategory(char code) + { + this.code = code; + } + + /** + * Return this category's single-character code. + */ + public char code() + { + return code; + } + + /** + * Return the {@code PredefinedCategory} corresponding to a + * single-character code as found in the system catalogs, or null + * if the character represents a custom category (or a predefined one in + * a PostgreSQL version newer than this class). + */ + public static PredefinedCategory valueOf(char code) + { + for ( PredefinedCategory c : values() ) + if ( c.code == code ) + return c; + return null; + } + + /** + * Character constants corresponding to the predefined categories, + * for use in the {@link #category} annotation element. + */ + public interface Code + { + char ARRAY = 'A'; + char BOOLEAN = 'B'; + char COMPOSITE = 'C'; + char DATETIME = 'D'; + char ENUM = 'E'; + char GEOMETRIC = 'G'; + char NETWORK = 'I'; + char NUMERIC = 'N'; + char PSEUDOTYPE = 'P'; + char RANGE = 'R'; + char STRING = 'S'; + char TIMESPAN = 'T'; + char USER = 'U'; + char BITSTRING = 'V'; + char UNKNOWN = 'X'; + char INTERNAL = 'Z'; + } + } /** * Name of the new type in SQL, if it is not to be the simple name of @@ -140,9 +256,8 @@ enum Storage { PLAIN, EXTERNAL, EXTENDED, MAIN } *

* Even if the method is defined on the UDT class marked by this annotation, * it is not automatically found or used. It will need its own - * {@link Function} annotation giving it a name and a {@code provides} - * label, and this annotation must refer to it by that name and include the - * label in {@code requires} to ensure the SQL is generated in the right + * {@link Function} annotation giving it a name, and this annotation must + * refer to it by that name to ensure the SQL is generated in the right * order. */ String typeModifierInput() default ""; @@ -160,9 +275,8 @@ enum Storage { PLAIN, EXTERNAL, EXTENDED, MAIN } *

* Even if the method is defined on the UDT class marked by this annotation, * it is not automatically found or used. It will need its own - * {@link Function} annotation giving it a name and a {@code provides} - * label, and this annotation must refer to it by that name and include the - * label in {@code requires} to ensure the SQL is generated in the right + * {@link Function} annotation giving it a name, and this annotation must + * refer to it by that name to ensure the SQL is generated in the right * order. */ String typeModifierOutput() default ""; @@ -174,6 +288,12 @@ enum Storage { PLAIN, EXTERNAL, EXTENDED, MAIN } * The details of the necessary API are in {@code vacuum.h}. + *

+ * Even if the method is defined on the UDT class marked by this annotation, + * it is not automatically found or used. It will need its own + * {@link Function} annotation giving it a name, and this annotation must + * refer to it by that name to ensure the SQL is generated in the right + * order. */ String analyze() default ""; @@ -221,8 +341,13 @@ enum Storage { PLAIN, EXTERNAL, EXTENDED, MAIN } * This must be a single character, which PostgreSQL calls "simple ASCII" * and really forces to be in {@code [ -~]}, that is, space to tilde, * inclusive. + *

+ * The upper-case ASCII letters are reserved for PostgreSQL's predefined + * categories, which can be found in the + * {@link PredefinedCategory PredefinedCategory} enumeration. The default is + * {@link PredefinedCategory.Code#USER PredefinedCategory.Code.USER}. */ - char category() default 'U'; + char category() default PredefinedCategory.Code.USER; /** * Whether this type is to be "preferred" in its {@link #category}, diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Cast.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Cast.java new file mode 100644 index 00000000..140993f8 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Cast.java @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Declares a PostgreSQL {@code CAST}. + *

+ * May annotate a Java method (which should also carry a + * {@link Function @Function} annotation, making it a PostgreSQL function), + * or a class or interface (just to have a place to put it when not directly + * associated with a method). + *

+ * If not applied to a method, must supply {@code path=} specifying + * {@code BINARY} or {@code INOUT}. + *

+ * The source and target types must be specified with {@code from} and + * {@code to}, unless the annotation appears on a method, in which case these + * default to the first parameter and return types of the function, + * respectively. + *

+ * The cast will, by default, have to be applied explicitly, unless + * {@code application=ASSIGNMENT} or {@code application=IMPLICIT} is given. + * + * @author Chapman Flack + */ +@Documented +@Target({ElementType.METHOD, ElementType.TYPE}) +@Repeatable(Cast.Container.class) +@Retention(RetentionPolicy.CLASS) +public @interface Cast +{ + /** + * When this cast can be applied: only in explicit form, when used in + * assignment context, or implicitly whenever needed. + */ + enum Application { EXPLICIT, ASSIGNMENT, IMPLICIT }; + + /** + * A known conversion path when a dedicated function is not supplied: + * {@code BINARY} for two types that are known to have the same internal + * representation, or {@code INOUT} to invoke the first type's text-output + * function followed by the second type's text-input function. + */ + enum Path { BINARY, INOUT }; + + /** + * The source type to be cast. Will default to the first parameter type of + * the associated function, if known. + *

+ * PostgreSQL will allow this type and the function's first parameter type + * to differ, if there is an existing binary cast between them. That cannot + * be checked at compile time, so a cast with a different type given here + * might successfully compile but fail to deploy in PostgreSQL. + */ + String from() default ""; + + /** + * The target type to cast to. Will default to the return type of + * the associated function, if known. + *

+ * PostgreSQL will allow this type and the function's return type + * to differ, if there is an existing binary cast between them. That cannot + * be checked at compile time, so a cast with a different type given here + * might successfully compile but fail to deploy in PostgreSQL. + */ + String to() default ""; + + /** + * A stock conversion path when a dedicated function is not supplied: + * {@code BINARY} for two types that are known to have the same internal + * representation, or {@code INOUT} to invoke the first type's text-output + * function followed by the second type's text-input function. + *

+ * To declare an {@code INOUT} cast, {@code with=INOUT} must appear + * explicitly; the default value is treated as unspecified. + */ + Path path() default Path.INOUT; + + /** + * When this cast can be applied: only in explicit form, when used in + * assignment context, or implicitly whenever needed. + */ + Application application() default Application.EXPLICIT; + + /** + * One or more arbitrary labels that will be considered 'provided' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'require' labels + * 'provided' by this come later in the output for install actions, and + * earlier for remove actions. + */ + String[] provides() default {}; + + /** + * One or more arbitrary labels that will be considered 'required' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'provide' labels + * 'required' by this come earlier in the output for install actions, and + * later for remove actions. + */ + String[] requires() default {}; + + /** + * The {@code } to be used around SQL code generated + * for this cast. Defaults to {@code PostgreSQL}. Set explicitly to + * {@code ""} to emit code not wrapped in an {@code }. + */ + String implementor() default ""; + + /** + * A comment to be associated with the cast. If left to default, and the + * annotated Java construct has a doc comment, its first sentence will be + * used. If an empty string is explicitly given, no comment will be set. + */ + String comment() default ""; + + /** + * @hidden container type allowing Cast to be repeatable. + */ + @Documented + @Target({ElementType.METHOD, ElementType.TYPE}) + @Retention(RetentionPolicy.CLASS) + @interface Container + { + Cast[] value(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java index 28768c83..705d53e6 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Function.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -65,14 +65,53 @@ enum Effects { IMMUTABLE, STABLE, VOLATILE }; */ enum Trust { SANDBOXED, UNSANDBOXED }; + /** + * Whether the function is unsafe to use in any parallel query plan at all, + * or avoids certain operations and can appear in such a plan but must be + * executed only in the parallel group leader, or avoids an even larger + * set of operations and is safe to execute anywhere in a parallel plan. + */ + enum Parallel { UNSAFE, RESTRICTED, SAFE }; + /** * The element type in case the annotated function returns a * {@link org.postgresql.pljava.ResultSetProvider ResultSetProvider}, * or can be used to specify the return type of any function if the * compiler hasn't inferred it correctly. + *

+ * Only one of {@code type} or {@code out} may appear, except as described + * for {@code out} below. */ String type() default ""; + /** + * The result column names and types of a composite-returning function. + *

+ * This is for a function defining its own one-off composite type + * (declared with {@code OUT} parameters). If the function returns some + * composite type known to the catalog, simply use {@code type} and the name + * of that type. + *

+ * Each element is a name and a type specification, separated by whitespace. + * An element that begins with whitespace declares an output column with no + * name, only a type. The name is an ordinary SQL identifier; if it would + * be quoted in SQL, naturally each double-quote must be represented as + * {@code \"} in Java. + *

+ * If there is exactly one {@code OUT} parameter declared, PostgreSQL treats + * the function as returning that parameter's type, rather than + * a one-element composite; therefore, the Java method must have the + * corresponding form (returning the result type directly, or an + * {@code Iterator} of that type, rather than expecting a {@code ResultSet} + * final parameter. + *

+ * If a one-element composite type is wanted, PL/Java will allow + * {@code type="pg_catalog.RECORD"} along with a one-element {@code out}, + * and will generate the corresponding declaration in SQL. As of + * this writing, however, no version of PostgreSQL will accept it. + */ + String[] out() default {}; + /** * The name of the function. This is optional. The default is * to use the name of the annotated method. @@ -84,16 +123,24 @@ enum Trust { SANDBOXED, UNSANDBOXED }; */ String schema() default ""; + /** + * Whether PostgreSQL should gather trailing arguments into an array that + * will be bound to the last (non-output) Java parameter (which must have an + * array type). + * Appeared in PostgreSQL 8.4. + */ + boolean variadic() default false; + /** * Estimated cost in units of cpu_operator_cost. - * + *

* If left unspecified (-1) the backend's default will apply. */ int cost() default -1; /** * Estimated number of rows returned (only for functions returning set). - * + *

* If left unspecified (-1) the backend's default will apply. */ int rows() default -1; @@ -121,7 +168,7 @@ enum Trust { SANDBOXED, UNSANDBOXED }; /** * What the query optimizer is allowed to assume about this function. - * + *

* IMMUTABLE describes a pure function whose return will always be the same * for the same parameter values, with no side effects and no dependency on * anything in the environment. STABLE describes a function that has no @@ -138,6 +185,42 @@ enum Trust { SANDBOXED, UNSANDBOXED }; * in the "untrusted" language instance. */ Trust trust() default Trust.SANDBOXED; + + /** + * The name of the PostgreSQL procedural language to which this function + * should be declared, as an alternative to specifying {@link #trust trust}. + *

+ * Ordinarily, PL/Java installs two procedural languages, {@code java} and + * {@code javau}, and a function is declared in one or the other by giving + * {@code trust} the value {@code SANDBOXED} or {@code UNSANDBOXED}, + * respectively. It is possible to declare other named procedural languages + * sharing PL/Java's handler functions, and assign customized permissions + * to them in {@code pljava.policy}. A function can be declared in the + * specific language named with this element. + *

+ * It is an error to specify both {@code language} and {@code trust} in + * the same annotation. + */ + String language() default ""; + + /** + * Whether the function is UNSAFE to use in any + * parallel query plan at all + * (the default), or avoids all disqualifying operations and so is SAFE to + * execute anywhere in a parallel plan, or, by avoiding some such + * operations, may appear in parallel plans but RESTRICTED to execute only + * on the parallel group leader. The operations that must be considered are + * set out in Parallel Labeling for Functions and Aggregates in the PostgreSQL docs. + *

+ * For much more on the practicalities of parallel query and PL/Java, + * please see the users' guide. + *

+ * Appeared in PostgreSQL 9.6. + */ + Parallel parallel() default Parallel.UNSAFE; /** * Whether the function can be safely pushed inside the evaluation of views @@ -161,7 +244,7 @@ enum Trust { SANDBOXED, UNSANDBOXED }; * configuration_parameter FROM CURRENT. The latter will ensure that the * function executes with the same setting for configuration_parameter that * was in effect when the function was created. - * + *

* Appeared in PostgreSQL 8.3. */ String[] settings() default {}; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java index 69563f84..fcd168a5 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/MappedUDT.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,6 +11,7 @@ */ package org.postgresql.pljava.annotation; +import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -42,7 +43,7 @@ * of the class being annotated, and found in {@link #schema schema} if * specified, or by following the search path) to the annotated class. */ -@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) @Documented public @interface MappedUDT { /** diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java new file mode 100644 index 00000000..0b60808b --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Operator.java @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Declares a PostgreSQL {@code OPERATOR}. + *

+ * May annotate a Java method (which should also carry a + * {@link Function @Function} annotation, making it a PostgreSQL function), + * or a class or interface (just to have a place to put it when not directly + * annotating a method). + * + * @author Chapman Flack + */ +@Documented +@Target({ElementType.METHOD, ElementType.TYPE}) +@Repeatable(Operator.Container.class) +@Retention(RetentionPolicy.CLASS) +public @interface Operator +{ + /** + * Distinguished value usable for {@link #commutator commutator=} to + * indicate that an operator is its own commutator without having to + * repeat its schema and name. + *

+ * This value strictly declares that the operator is its own commutator, and + * therefore is only allowed for an operator with two operands of the same + * type. If the types are different, a commutator with the same + * name would in fact be a different operator with the operand + * types exchanged; use {@link #TWIN TWIN} for that. + */ + String SELF = ".self."; + + /** + * Distinguished value usable for {@link #commutator commutator=} to + * indicate that an operator's commutator is another operator with the same + * schema and name, without having to repeat them. + *

+ * This value strictly declares that the commutator is a different + * operator, and therefore is only allowed for an operator with two + * operands of different types. As commutators, this operator and its twin + * will have those operand types in opposite orders, so PostgreSQL + * overloading will allow them to have the same name. + *

+ * This value may also be used with {@link #synthetic synthetic=} to give + * the synthesized function the same schema and name as the one it is based + * on; this also is possible only for a function synthesized by commutation + * where the commuted parameter types differ. + */ + String TWIN = ".twin."; + + /** + * Name for this operator. + *

+ * May be specified in explicit {@code {"schema","operatorname"}} form, or + * as a single string that will be leniently parsed as an optionally + * schema-qualified name. In the explicit form, {@code ""} as the schema + * will make the name explicitly unqualified. + */ + String[] name(); + + /** + * The type of the operator's left operand, if any. + * Will default to the first parameter type of an associated two-parameter + * function, or none for an associated one-parameter function. + */ + String left() default ""; + + /** + * The type of the operator's right operand, if any. + * Will default to the second parameter type of an associated two-parameter + * function, or the parameter type for an associated one-parameter function. + */ + String right() default ""; + + /** + * Name of the function backing the operator; may be omitted if this + * annotation appears on a method. + *

+ * The function named here must take one parameter of the matching type if + * only one of {@code left} or {@code right} is specified, or the + * {@code left} and {@code right} types in that order if both are present. + */ + String[] function() default {}; + + /** + * Name of a function to be synthesized by PL/Java based on the method this + * annotation appears on and this operator's {@code commutator} or + * {@code negator} relationship to another operator declared on the same + * method. + *

+ * Only allowed in an annotation on a Java method, and where + * {@code function} is not specified. + *

+ * The special value {@link #TWIN TWIN} is allowed, to avoid repeating the + * schema and name when the desired name for the synthesized function is the + * same as the one it is derived from (which is only possible if the + * derivation involves commuting the arguments and their types are + * different, so the two functions can be distinguished by overloading). A + * typical case would be the twin of a cross-type function like {@code add} + * that is commutative, so using the same name makes sense. + *

+ * When derived by commutation, the synthetic function simply calls the + * base function with the parameters swapped. For negation, the base + * function must return {@code boolean} or {@code Boolean}, and the + * synthetic function returns true for false, false for true, and null + * for null. This will give familiar SQL behavior in many cases. For a base + * function with {@code onNullInput=CALLED}, if it can return non-null + * boolean results on some null inputs, it may be necessary to code + * a negator or commutator by hand if the synthetic one would not have + * the intended semantics. + */ + String[] synthetic() default {}; + + /** + * Name of an operator that is the commutator of this one. + *

+ * Specified in the same ways as {@code name}. The value + * {@link #SELF SELF} can be used to avoid repeating the schema and name + * for the common case of an operator that is its own commutator. The value + * {@link #TWIN TWIN} can likewise declare that the commutator is the + * different operator with the same name and schema but the operand types + * (which must be different) reversed. A typical case would be the twin of a + * cross-type operator like {@code +} that is commutative, so using the same + * name makes sense. + */ + String[] commutator() default {}; + + /** + * Name of an operator that is the negator of this one. + *

+ * Specified in the same ways as {@code name}. + */ + String[] negator() default {}; + + /** + * Whether this operator can be used in computing a hash join. + *

+ * Only sensible for a boolean-valued binary operator, which must have a + * commutator in the same hash index operator family, with the underlying + * functions marked {@link Function.Effects#IMMUTABLE} or + * {@link Function.Effects#STABLE}. + */ + boolean hashes() default false; + + /** + * Whether this operator can be used in computing a merge join. + *

+ * Only sensible for a boolean-valued binary operator, which must have a + * commutator also appearing as an equality member in the same btree index + * operator family, with the underlying functions marked + * {@link Function.Effects#IMMUTABLE} or {@link Function.Effects#STABLE}. + */ + boolean merges() default false; + + /** + * Name of a function that can estimate the selectivity of this operator + * when used in a {@code WHERE} clause. + *

+ * Specified in the same ways as {@code function}. + *

+ * A custom estimator is a complex undertaking (and, at present, requires + * a language other than Java), but several predefined ones can be found in + * {@link SelectivityEstimators}. + */ + String[] restrict() default {}; + + /** + * Name of a function that can estimate the selectivity of this operator + * when used in a join. + *

+ * Specified in the same ways as {@code function}. + *

+ * A custom estimator is a complex undertaking (and, at present, requires + * a language other than Java), but several predefined ones can be found in + * {@link SelectivityEstimators}. + */ + String[] join() default {}; + + /** + * One or more arbitrary labels that will be considered 'provided' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'require' labels + * 'provided' by this come later in the output for install actions, and + * earlier for remove actions. + */ + String[] provides() default {}; + + /** + * One or more arbitrary labels that will be considered 'required' by the + * object carrying this annotation. The deployment descriptor will be + * generated in such an order that other objects that 'provide' labels + * 'required' by this come earlier in the output for install actions, and + * later for remove actions. + */ + String[] requires() default {}; + + /** + * The {@code } to be used around SQL code generated + * for this operator. Defaults to {@code PostgreSQL}. Set explicitly to + * {@code ""} to emit code not wrapped in an {@code }. + */ + String implementor() default ""; + + /** + * A comment to be associated with the operator. If left to default, and the + * annotated Java construct has a doc comment, its first sentence will be + * used. If an empty string is explicitly given, no comment will be set. + */ + String comment() default ""; + + /** + * Names of several functions predefined in PostgreSQL for estimating the + * selectivity of operators in restriction clauses or joins. + */ + interface SelectivityEstimators + { + /** + * A restriction-selectivity estimator suitable for an operator + * with rather high selectivity typical of an operator like {@code =}. + */ + String EQSEL = "pg_catalog.eqsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * somewhat less strict than a typical {@code =} operator. + */ + String MATCHINGSEL = "pg_catalog.matchingsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with rather low selectivity typical of an operator like {@code <>}. + */ + String NEQSEL = "pg_catalog.neqsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <}. + */ + String SCALARLTSEL = "pg_catalog.scalarltsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <=}. + */ + String SCALARLESEL = "pg_catalog.scalarlesel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >}. + */ + String SCALARGTSEL = "pg_catalog.scalargtsel"; + + /** + * A restriction-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >=}. + */ + String SCALARGESEL = "pg_catalog.scalargesel"; + + /** + * A join-selectivity estimator suitable for an operator + * with rather high selectivity typical of an operator like {@code =}. + */ + String EQJOINSEL = "pg_catalog.eqjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * somewhat less strict than a typical {@code =} operator. + */ + String MATCHINGJOINSEL = "pg_catalog.matchingjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with rather low selectivity typical of an operator like {@code <>}. + */ + String NEQJOINSEL = "pg_catalog.neqjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <}. + */ + String SCALARLTJOINSEL = "pg_catalog.scalarltjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code <=}. + */ + String SCALARLEJOINSEL = "pg_catalog.scalarlejoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >}. + */ + String SCALARGTJOINSEL = "pg_catalog.scalargtjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * with selectivity typical of an operator like {@code >=}. + */ + String SCALARGEJOINSEL = "pg_catalog.scalargejoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * doing 2-D area-based comparisons. + */ + String AREAJOINSEL = "pg_catalog.areajoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * doing 2-D position-based comparisons. + */ + String POSITIONJOINSEL = "pg_catalog.positionjoinsel"; + + /** + * A join-selectivity estimator suitable for an operator + * doing 2-D containment-based comparisons. + */ + String CONTJOINSEL = "pg_catalog.contjoinsel"; + } + + /** + * @hidden container type allowing Operator to be repeatable. + */ + @Documented + @Target({ElementType.METHOD, ElementType.TYPE}) + @Retention(RetentionPolicy.CLASS) + @interface Container + { + Operator[] value(); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java index edbf63fe..a1ff4737 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLAction.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -14,6 +14,7 @@ import java.lang.annotation.Documented; import java.lang.annotation.ElementType; +import java.lang.annotation.Repeatable; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @@ -21,20 +22,40 @@ /** * Annotation that supplies verbatim commands to be copied into the * deployment descriptor. - * - * Strings supplied within a single SQLAction annotation will be copied - * in the order supplied. Strings from different SQLAction annotations, and - * generated code for functions, will be assembled in an order that can be - * influenced by 'provides' and 'requires' labels. No snippet X will be - * emitted ahead of any snippets that provide what X requires. The "remove" - * actions will be assembled in the reverse of that order. - * + *

+ * Strings supplied to {@link #install install} or {@link #remove remove} within + * a single {@code SQLAction} annotation become code snippets emitted into the + * deployment descriptor's {@code INSTALL} or {@code REMOVE} section, + * respectively, in the order supplied. + *

+ * Snippets from different {@code SQLAction} annotations, + * and snippets generated by annotations on functions, types, and such, will be + * assembled in an order that can be influenced by {@link #provides provides} + * and {@link #requires requires} labels. No snippet X will be emitted as an + * {@code INSTALL} action ahead of any snippets that provide what X requires. + * The sense of that dependency is reversed when ordering {@code REMOVE} + * snippets. + *

Conditional execution

+ *

+ * An {@code SQLAction} may supply an {@code install} snippet that tests some + * condition at the time of deployment and adjusts the + * {@code pljava.implementors} setting to include or not include a specific + * {@code }, controlling whether actions later in + * the deployment descriptor that are annotated with that + * {@code } will be executed. The {@code SQLAction} that + * controls whether an {@code } will be recognized should use + * {@link #provides provides} with exactly that name, which is implicitly + * 'required' by statements that use that name as + * {@link #implementor implementor}. For details on this usage, which involves + * a different ordering rule, see "conditional execution" in + * {@link org.postgresql.pljava.annotation the package documentation}. * @author Thomas Hallgren - pre-Java6 version * @author Chapman Flack (Purdue Mathematics) - updated to Java6, * added SQLAction */ @Documented @Target({ElementType.PACKAGE,ElementType.TYPE}) +@Repeatable(SQLActions.class) @Retention(RetentionPolicy.CLASS) public @interface SQLAction { @@ -56,6 +77,10 @@ * generated in such an order that other objects that 'require' labels * 'provided' by this come later in the output for install actions, and * earlier for remove actions. + *

+ * For use of this element on an {@code SQLAction} that tests a condition + * to control conditional execution, see "conditional execution" in + * {@link SQLAction the class description}. */ String[] provides() default {}; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java index bc618b4a..753e1df2 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLActions.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -21,7 +21,11 @@ /** * Container for multiple {@link SQLAction} annotations (in case it is * convenient to hang more than one on a given program element). - * + *

+ * This container annotation is documented for historical reasons (it existed + * in PL/Java versions targeting earlier Java versions than 8). In new code, it + * would be more natural to simply hang more than one {@code SQLAction} + * annotation directly on a program element. * @author Thomas Hallgren - pre-Java6 version * @author Chapman Flack (Purdue Mathematics) - updated to Java6, * added SQLActions diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLType.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLType.java index ce6608f0..45805ac5 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLType.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/SQLType.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2016 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -41,17 +41,58 @@ String value() default ""; /** - * Default value for the parameter. Parameters of array type can have + * Default value for the parameter. Parameters of row or array type can have * defaults too, so this element accepts an array. For a scalar type, * just supply one value. Values given here go into the descriptor file * as properly-escaped string literals explicitly cast to the parameter * type, which covers the typical case of defaults that are simple * literals or can be computed as Java String-typed constant expressions * (e.g. ""+Math.PI) and ensures the parsability of the descriptor file. + *

+ * For a row type of unknown structure (PostgreSQL type {@code RECORD}), the + * only default that can be specified is {@code {}}, which can be useful for + * functions that use a {@code RECORD} parameter to accept an arbitrary + * sequence of named, typed parameters from the caller. For a named row type + * (not {@code RECORD}), an array of nonzero length will be accepted. It + * needs to match the number and order of components of the row type (which + * cannot be checked at compile time, but will cause the deployment + * descriptor code to fail at jar install time if it does not). + *

+ * A Java annotation value cannot be null. If null is what the default value + * should be, use {@code optional=true}. */ String[] defaultValue() default {}; + + /** + * What {@code optional=true} means is just what {@code defaultValue=null} + * would mean, if Java permitted null values in annotations. + *

+ * There is no difference between {@code optional=false} and simply having + * no {@code optional} or {@code defaultValue} element at all. + *

+ * Only one of {@code optional} or {@code defaultValue} may be present + * in one annotation. + *

+ * If {@code optional=true}, the function must not be annotated with + * {@code onNullInput=RETURNS_NULL}. + */ + boolean optional() default false; // Is it worth having a defaultRaw() for rare cases wanting some // arbitrary SQL expression for the default? // String[] defaultRaw() default {}; + + /** + * SQL name for the parameter, to allow calling the function using named + * parameter notation, in preference to the parameter's Java name. + * By default, the SQL name is taken from the Java name, but in some cases + * the SQL name expected by callers may be a reserved word in Java, or the + * Java name may be reserved in SQL. The name specified here can simply be + * different, or it can be given in quoted-identifier form to work around + * the reservedness in SQL of the unquoted name. Callers, in that case, have + * to quote the name also, but that may be acceptable for clarity's sake if + * there is a particular name that is used in a standard or is otherwise the + * most natural choice. + */ + String name() default ""; } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java index bae7e838..5c629389 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/Trigger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,9 +9,11 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava.annotation; +import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; @@ -19,9 +21,28 @@ /** * Annotation, only used in {@link Function#triggers @Function(triggers=...)}, * to specify what trigger(s) the function will be called for. + *

+ * Transition tables ({@link #tableOld} and {@link #tableNew}) appear in + * PostgreSQL 10. If a trigger is declared with + * {@code tableOld="oo", tableNew="nn"}, then the trigger function can query + * {@code oo} and {@code nn} as if they are actual tables with the same + * columns as the table responsible for the trigger, and containing the affected + * rows before and after the changes. Only an AFTER trigger can have transition + * tables. An UPDATE will populate both tables. INSERT will not populate the + * old table, and DELETE will not populate the new table. It is an error to + * specify either table if {@code events} does not include at least one event + * that could populate that table. As long as at least one such event is + * included, the table can be specified, and will simply have no rows if the + * trigger is invoked for an event that does not populate it. + *

+ * In an after-statement trigger, the transition tables include all rows + * affected by the statement. In an after-row trigger, the same is true: + * after-row triggers are all queued until the statement completes, and then + * the function will be invoked for each row that was affected, but will see + * the complete transition tables on each invocation. * @author Thomas Hallgren */ -@Target({}) @Retention(RetentionPolicy.CLASS) +@Target({}) @Retention(RetentionPolicy.CLASS) @Documented public @interface Trigger { /** @@ -29,6 +50,14 @@ */ enum Called { BEFORE, AFTER, INSTEAD_OF }; + /** + * Deferrability (only applies to constraint triggers). + * {@code NOT_DEFERRABLE} if the constraint trigger is not deferrable + * at all; otherwise, the trigger is deferrable and this value indicates + * whether initially deferred or not. + */ + enum Constraint { NOT_DEFERRABLE, INITIALLY_IMMEDIATE, INITIALLY_DEFERRED }; + /** * Types of event that can occasion a trigger. */ @@ -45,11 +74,40 @@ enum Scope { STATEMENT, ROW }; */ String[] arguments() default {}; + /** + * Only for a constraint trigger, whether it is deferrable and, if so, + * initially immediate or deferred. To create a constraint trigger that is + * not deferrable, this attribute must be explicitly given with the value + * {@code NOT_DEFERRABLE}; leaving it to default is not the same. When this + * attribute is not specified, a normal trigger, not a constraint trigger, + * is created. + *

+ * A constraint trigger must have {@code called=AFTER} and + * {@code scope=ROW}. + */ + Constraint constraint() default Constraint.NOT_DEFERRABLE; + /** * The event(s) that will trigger the call. */ Event[] events(); + /** + * The name of another table referenced by the constraint. + * This option is used for foreign-key constraints and is not recommended + * for general use. This can only be specified for constraint triggers. + * If the name should be schema-qualified, use + * {@link #fromSchema() fromSchema} to specify the schema. + */ + String from() default ""; + + /** + * The schema containing another table referenced by the constraint. + * This can only be specified for constraint triggers, and only to name the + * schema for a table named with {@link #from() from}. + */ + String fromSchema() default ""; + /** * Name of the trigger. If not set, the name will * be generated. @@ -92,6 +150,26 @@ enum Scope { STATEMENT, ROW }; */ String[] columns() default {}; + /** + * Name to refer to "before" table of affected rows. Only usable in an AFTER + * trigger whose {@code events} include UPDATE or DELETE. The trigger + * function can issue queries as if a table by this name exists and contains + * all rows affected by the event, in their prior state. (If the trigger is + * called for an event other than UPDATE or DELETE, the function can still + * query a table by this name, which will appear to be empty.) + */ + String tableOld() default ""; + + /** + * Name to refer to "after" table of affected rows. Only usable in an AFTER + * trigger whose {@code events} include UPDATE or INSERT. The trigger + * function can issue queries as if a table by this name exists and contains + * all rows affected by the event, in their new state. (If the trigger is + * called for an event other than UPDATE or INSERT, the function can still + * query a table by this name, which will appear to be empty.) + */ + String tableNew() default ""; + /** * A comment to be associated with the trigger. If left to default, * and the Java function has a doc comment, its first sentence will be used. diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java index 32c6984e..932113bd 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -13,6 +13,7 @@ /** * Annotations for use in Java code to generate the SQLJ Deployment Descriptor * automatically. + *

Eliminating error-prone hand-maintained SQL scripts

*

* To define functions or types in PL/Java requires more than one step. The * Java code must be written, compiled to a jar, and made available to the @@ -22,61 +23,157 @@ * version that undoes it when uninstalling the jar) can be written in a * prescribed form and stored inside the jar itself as an "SQLJ Deployment * Descriptor", and processed automatically when the jar is installed in or - * removed from the backend. + * removed from the DBMS. *

* To write the deployment descriptor by hand can be tedious and error-prone, * as it must largely duplicate the method and type declarations in the * Java code, but using SQL's syntax and types in place of Java's. Instead, * when the annotations in this package are used in the Java code, the Java - * compiler itself will generate a deployment descriptor file, ready to include - * with the compiled classes to make a complete SQLJ jar. + * compiler itself will generate a deployment descriptor (DDR) file, ready to + * include with the compiled classes to make a complete SQLJ jar. *

* Automatic descriptor generation requires attention to a few things. *

    - *
  • A Java 6 or later Java compiler is required, and at least the - * pljava-api jar must be on its class path. (The full - * pljava.jar would also work, but only pljava-api - * is required.) The jar must be on the class path in any case in order to - * compile PL/Java code. + *
  • The {@code pljava-api} jar must be on the Java compiler's class path. + * (All but the simplest PL/Java functions probably refer to some class in + * PL/Java's API anyway, in which case the jar would already have to be on + * the class path.) + *
  • Java compilers older than Java 23 will automatically find and use + * PL/Java's DDR processor as long as the {@code pljava-api} jar is on the class + * path. Starting in Java 23, the compiler will not do so automatically, and a + * {@code -processor org.postgresql.pljava.annotation.processing.DDRProcessor} + * option is also needed on the {@code javac} command line. (Warnings about this + * are issued starting in Java 21, though the processor is still used + * automatically, with the warnings, until Java 23.) *
  • When recompiling after changing only a few sources, it is possible the * Java compiler will only process a subset of the source files containing * annotations. If so, it may generate an incomplete deployment descriptor, * and a clean build may be required to ensure the complete descriptor is * written. - *
  • Additional options are available when invoking the Java compiler, and - * can be specified with -Aoption=value on the command line: + *
+ *

New compiler options when generating the deployment descriptor

+ *

Additional options are available when invoking the Java compiler, and + * can be specified with {@code -Aoption=value} on the command line: *

- *
ddr.output + *
{@code ddr.output} *
The file name to be used for the generated deployment descriptor. * If not specified, the file will be named pljava.ddr and found * in the top directory of the tree where the compiled class files are written. - *
ddr.name.trusted + *
{@code ddr.name.trusted} *
The language name that will be used to declare methods that are * annotated to have {@link org.postgresql.pljava.annotation.Function.Trust#SANDBOXED} behavior. If not - * specified, the name java will be used. It must match the name + * specified, the name {@code java} will be used. It must match the name * used for the "trusted" language declaration when PL/Java was installed. - *
ddr.name.untrusted + *
{@code ddr.name.untrusted} *
The language name that will be used to declare methods that are * annotated to have {@link org.postgresql.pljava.annotation.Function.Trust#UNSANDBOXED} behavior. If not - * specified, the name javaU will be used. It must match the name + * specified, the name {@code javaU} will be used. It must match the name * used for the "untrusted" language declaration when PL/Java was installed. - *
ddr.implementor + *
{@code ddr.implementor} *
The identifier (defaulting to {@code PostgreSQL} if not specified here) * that will be used in the {@code }s wrapping any SQL * generated from elements that do not specify their own. If this is set to a * single hyphen (-), elements that specify no implementor will produce plain * {@code }s not wrapped in {@code }s. + *
{@code ddr.reproducible} + *
When {@code true} (the default), SQL statements are written to the + * deployment descriptor in an order meant to be consistent across successive + * compilations of the same sources. This option is further discussed below. *
- *
  • The deployment descriptor may contain statements that cannot succeed if + *

    Controlling order of statements in the deployment descriptor

    + *

    The deployment descriptor may contain statements that cannot succeed if * placed in the wrong order, and to keep a manually-edited script in a workable * order while adding and modifying code can be difficult. Most of the - * annotations in this package accept arbitrary requires and - * provides strings, which can be used to control the order of + * annotations in this package accept arbitrary {@code requires} and + * {@code provides} strings, which can be used to control the order of * statements in the generated descriptor. The strings given for - * requires and provides have no meaning to the + * {@code requires} and {@code provides} have no meaning to the * compiler, except that it will make sure not to write anything that - * requires some string X into the generated script - * before whatever provides it. + * {@code requires} some string X into the generated script + * before whatever {@code provides} it. + *

    Effect of {@code ddr.reproducible}

    + *

    There can be multiple ways to order the statements in the deployment + * descriptor to satisfy the given {@code provides} and {@code requires} + * relationships. While the compiler will always write the descriptor in an + * order that satisfies those relationships, when the {@code ddr.reproducible} + * option is {@code false}, the precise order may differ between successive + * compilations of the same sources, which should not affect successful + * loading and unloading of the jar with {@code install_jar} and + * {@code remove_jar}. In testing, this can help to confirm that all of the + * needed {@code provides} and {@code requires} relationships have been + * declared. When the {@code ddr.reproducible} option is {@code true}, the order + * of statements in the deployment descriptor will be one of the possible + * orders, chosen arbitrarily but consistently between multiple compilations as + * long as the sources are unchanged. This can be helpful in software + * distribution when reproducible output is wanted. + *

    Conditional execution in the deployment descriptor

    + *

    The deployment-descriptor syntax fixed by the ISO SQL/JRT standard has + * a rudimentary conditional-inclusion feature based on + * {@code }s. + * SQL statements wrapped in {@code BEGIN}/{@code END} with an + * {@code } are executed only if that name is recognized + * by the DBMS when installing or removing the jar. Statements in the deployment + * descriptor that are not wrapped in an {@code } are + * executed unconditionally. + *

    PL/Java's descriptor generator normally emits statements + * as {@code }s, using the name {@code PostgreSQL} + * (or the value of the {@code ddr.implementor} option if present on + * the compiler command line) by default, or a specific name supplied + * with {@code implementor=} to one of the annotations in this package. + *

    When loading or unloading a jar file and processing its deployment + * descriptor, PL/Java 'recognizes' any implementor name listed in the runtime + * setting {@code pljava.implementors}, which contains only {@code PostgreSQL} + * by default. + *

    The {@code pljava.implementors} setting can be changed, even by SQL + * statements within a deployment descriptor, to affect which subsequent + * statements will be executed. An SQL statement may test some condition and + * set {@code pljava.implementors} accordingly. In PL/Java's supplied examples, + * ConditionalDDR illustrates this approach to conditional execution. + *

    Naturally, this scheme requires the SQL generator to emit the statement + * that tests the condition earlier in the deployment descriptor than + * the statements relying on the {@code } being set. + * Building on the existing ability to control the order of statements + * using {@code provides} and {@code requires} elements, an {@code implementor} + * element specified in the annotation for a statement is treated also as + * an implicit {@code requires} for that name, so the programmer only needs + * to place an explicit {@code provides} element on whatever + * {@link SQLAction SQLAction} tests the condition and determines if the name + * will be recognized. + *

    The {@code provides}/{@code requires} relationship so created differs + * in three ways from other {@code provides}/{@code requires} relationships: + *

      + *
    • It does not reverse for generating {@code remove} actions. + * Normal dependencies must be reversed for that case, so dependent objects + * are removed before those they depend on. By contrast, a condition determining + * the setting of an implementor name must be evaluated before the name + * is needed, whether the jar is being installed or removed. + *
    • If it does not have an explicit {@code remove} action (the usual case), + * its {@code install} action (the condition test and setting of the name) + * is used both when installing and removing. + *
    • It is weak. The SQL generator does not flag an error if the implicit + * {@code requires} for an implementor name is not satisfied by any annotation's + * {@code provides} in the visible Java sources. It is possible the name may be + * set some other way in the DBMS environment where the jar is to be deployed. + * Faced with statements that require such 'unprovided' implementor names, + * the SQL generator just falls back to emitting them as late in the deployment + * descriptor as possible, after all other statements that do not depend + * on them. *
    + *

    Matching {@code implementor} and {@code provides}

    + *

    Given the 'weak' nature of the {@code implementor}/{@code provides} + * relationship, an error will not be reported if a spelling or upper/lower case + * difference prevents identifying an {@code } with the + * {@code provides} string of an annotated statement intended to match it. + * The resulting deployment descriptor may have a workable order + * as a result of the fallback ordering rules, or may have a mysteriously + * unworkable order, particularly of the {@code remove} actions. + *

    According to the ISO SQL/JRT standard, an {@code } is + * an SQL identifier, having a case-insensitive matching behavior unless quoted. + * PL/Java, however, treats a {@code provides} value as an arbitrary Java string + * that can only match exactly, and so PL/Java's SQL generator will successfully + * match up {@code implementor} and {@code provides} strings only when + * they are identical in spelling and case. */ package org.postgresql.pljava.annotation; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Commentable.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Commentable.java new file mode 100644 index 00000000..d0320386 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Commentable.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2016-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import javax.lang.model.element.Element; + +interface Commentable +{ + public String comment(); + public void setComment( Object o, boolean explicit, Element e); + public String derivedComment( Element e); +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DBType.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DBType.java new file mode 100644 index 00000000..ac9d4968 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DBType.java @@ -0,0 +1,638 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.AbstractMap; +import java.util.Map; +import static java.util.Objects.requireNonNull; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static java.util.regex.Pattern.compile; + +import javax.annotation.processing.Messager; + +import static org.postgresql.pljava.sqlgen.Lexicals + .ISO_AND_PG_IDENTIFIER_CAPTURING; +import static org.postgresql.pljava.sqlgen.Lexicals.ISO_REGULAR_IDENTIFIER_PART; +import static org.postgresql.pljava.sqlgen.Lexicals.PG_REGULAR_IDENTIFIER_PART; +import static org.postgresql.pljava.sqlgen.Lexicals.SEPARATOR; +import static org.postgresql.pljava.sqlgen.Lexicals.identifierFrom; +import static org.postgresql.pljava.sqlgen.Lexicals.separator; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import static org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple.pgFold; + +/** + * Abstraction of a database type, which is usually specified by an + * {@code Identifier.Qualified}, but sometimes by reserved SQL syntax. + */ +abstract class DBType +{ + DBType withModifier(String modifier) + { + return new Modified(this, modifier); + } + + DBType asArray(String notated) + { + return new Array(this, notated); + } + + DBType withDefault(String suffix) + { + return new Defaulting(this, suffix); + } + + String toString(boolean withDefault) + { + return toString(); + } + + abstract DependTag dependTag(); + + /** + * Return the original underlying (leaf) type, either a {@code Named} or + * a {@code Reserved}. + *

    + * Override in non-leaf classes (except {@code Array}). + */ + DBType leaf() + { + return this; + } + + boolean isArray() + { + return false; + } + + @Override + public final boolean equals(Object o) + { + return equals(o, null); + } + + /** + * True if the underlying (leaf) types compare equal (overridden for + * {@code Array}). + *

    + * The assumption is that equality checking will be done for function + * signature equivalence, for which defaults and typmods don't matter + * (but arrayness does). + */ + public final boolean equals(Object o, Messager msgr) + { + if ( this == o ) + return true; + if ( ! (o instanceof DBType) ) + return false; + DBType dt1 = this.leaf(); + DBType dt2 = ((DBType)o).leaf(); + if ( dt1.getClass() != dt2.getClass() ) + return false; + if ( dt1 instanceof Array ) + { + dt1 = ((Array)dt1).m_component.leaf(); + dt2 = ((Array)dt2).m_component.leaf(); + if ( dt1.getClass() != dt2.getClass() ) + return false; + } + if ( dt1 instanceof Named ) + return ((Named)dt1).m_ident.equals(((Named)dt2).m_ident, msgr); + return pgFold(((Reserved)dt1).m_reservedName) + .equals(pgFold(((Reserved)dt2).m_reservedName)); + } + + /** + * Pattern to match type names that are special in SQL, if they appear as + * regular (unquoted) identifiers and without a schema qualification. + *

    + * This list does not include {@code DOUBLE} or {@code NATIONAL}, as the + * reserved SQL form for each includes a following keyword + * ({@code PRECISION} or {@code CHARACTER}/{@code CHAR}, respectively). + * There is a catch-all test in {@code fromSQLTypeAnnotation} that will fall + * back to 'reserved' treatment if the name is followed by anything that + * isn't a parenthesized type modifier, so the fallback will naturally catch + * these two cases. + */ + static final Pattern s_reservedTypeFirstWords = compile( + "(?i:" + + "INT|INTEGER|SMALLINT|BIGINT|REAL|FLOAT|DECIMAL|DEC|NUMERIC|" + + "BOOLEAN|BIT|CHARACTER|CHAR|VARCHAR|TIMESTAMP|TIME|INTERVAL" + + ")" + ); + + /** + * Parse a string, representing an optional parameter/column name followed + * by a type, into an {@code Identifier.Simple}, possibly null, and a + * {@code DBType}. + *

    + * Whitespace (or, strictly, separator; comments would be accepted) must + * separate the name from the type, if the name is not quoted. To omit a + * name and supply only the type, the string must begin with whitespace + * (ahem, separator). + */ + static Map.Entry fromNameAndType(String nandt) + { + Identifier.Simple name = null; + Matcher m = ISO_AND_PG_IDENTIFIER_CAPTURING.matcher(nandt); + if ( m.lookingAt() ) + { + nandt = nandt.substring(m.end()); + name = identifierFrom(m); + } + return + new AbstractMap.SimpleImmutableEntry<>( + name, fromSQLTypeAnnotation(nandt)); + } + + /** + * Make a {@code DBType} from whatever might appear in an {@code SQLType} + * annotation. + *

    + * The possibilities are numerous, as that text used to be dumped rather + * blindly into the descriptor and thus could be whatever PostgreSQL would + * make sense of. The result could be a {@code DBType.Named} if the start of + * the text parses as a (possibly schema-qualified) identifier, or a + * {@code DBType.Reserved} if it doesn't (or it parses as a non-schema- + * qualified regular identifier and matches one of SQL's grammatically + * reserved type names). It could be either of those wrapped in a + * {@code DBType.Modified} if a type modifier was parsed out. It could be + * any of those wrapped in a {@code DBType.Array} if the text ended with any + * of the recognized forms of array dimension notation. The one thing it + * can't be (as a result from this method) is a {@code DBType.Defaulting}; + * that wrapping can be applied to the result later, to carry a default + * value that has been specified at a particular site of use. + *

    + * The parsing strategy is a bit heuristic. An attempt is made to parse a + * (possibly schema-qualified) identifier at the start of the string. + * An attempt is made to find a match for array-dimension notation that runs + * to the end of the string. Whatever lies between gets to be a typmod if it + * looks enough like one, or gets rolled with the front of the string into a + * {@code DBType.Reserved}, which is not otherwise scrutinized; the + * {@code Reserved} case is still more or less a catch-all that will be + * dumped blindly into the descriptor in the hope that PostgreSQL will make + * sense of it. + *

    + * This strategy is used because compared to what can appear in a typmod + * (which could require arbitrary constant expression parsing), the array + * grammar depends on much less. + */ + static DBType fromSQLTypeAnnotation(String value) + { + Identifier.Qualified qname = null; + + Matcher m = SEPARATOR.matcher(value); + separator(m, false); + int postSeparator = m.regionStart(); + + if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) + { + Identifier.Simple id1 = identifierFrom(m); + m.region(m.end(), m.regionEnd()); + + separator(m, false); + if ( value.startsWith(".", m.regionStart()) ) + { + m.region(m.regionStart() + 1, m.regionEnd()); + separator(m, false); + if ( m.usePattern(ISO_AND_PG_IDENTIFIER_CAPTURING).lookingAt() ) + { + Identifier.Simple id2 = identifierFrom(m); + qname = id2.withQualifier(id1); + m.region(m.end(), m.regionEnd()); + separator(m, false); + } + } + else + qname = id1.withQualifier(null); + } + + /* + * At this point, qname may have a local name and qualifier, or it may + * have a local name and null qualifier (if a single identifier was + * successfully matched but not followed by a dot). It is also possible + * for qname to be null, either because the start of the string didn't + * look like an identifier at all, or because it did, but was followed + * by a dot, and what followed the dot could not be parsed as another + * identifier. Probably both of those cases are erroneous, but they can + * also be handled by simply treating the content as Reserved and hoping + * PostgreSQL can make sense of it. + * + * Search from here to the end of the string for possible array notation + * that can be stripped off the end, leaving just the middle (if any) to + * be dealt with. + */ + + String arrayNotation = arrayNotationIfPresent(m, value); + + /* + * If arrayNotation is not null, m's region end has been adjusted to + * exclude the array notation. + */ + + boolean reserved; + + if ( null == qname ) + reserved = true; + else if ( null != qname.qualifier() ) + reserved = false; + else + { + Identifier.Simple local = qname.local(); + if ( ! local.folds() ) + reserved = false; + else + { + Matcher m1 = + s_reservedTypeFirstWords.matcher(local.nonFolded()); + reserved = m1.matches(); + } + } + + /* + * If this is a reserved type, just wrap up everything from its start to + * the array notation (if any) as a Reserved; there is no need to try to + * tease out a typmod separately. (The reserved syntax can be quite + * unlike the generic typename(typmod) pattern; there could be what + * looks like a (typmod) between TIME and WITH TIME ZONE, or the moral + * equivalent of a typmod could look like HOUR TO MINUTE, and so on.) + * + * If we think this is a non-reserved type, and there is anything left + * in the matching region (preceding the array notation, if any), then + * it had better be a typmod in the generic form starting with a (. We + * will capture whatever is there and call it a typmod as long as it + * does start that way. (More elaborate checking, such as balancing the + * parens, would require ability to parse an expr_list.) This can allow + * malformed syntax to be uncaught until deployment time when PostgreSQL + * sees it, but that's unchanged from when the entire SQLType string was + * passed along verbatim. The 'threat' model here is just that the + * legitimate developer may get an error later when earlier would be + * more helpful, not a malicious adversary bent on injection. + * + * On the other hand, if what's left doesn't start with a ( then we + * somehow don't know what we're looking at, so fall back and treat it + * as reserved. This will naturally catch the two-token reserved names + * DOUBLE PRECISION, NATIONAL CHARACTER or NATIONAL CHAR, which were + * therefore left out of the s_reservedTypeFirstWords pattern. + */ + + if ( ! reserved && m.regionStart() < m.regionEnd() ) + if ( ! value.startsWith("(", m.regionStart()) ) + reserved = true; + + DBType result; + + if ( reserved ) + result = new DBType.Reserved( + value.substring(postSeparator, m.regionEnd())); + else + { + result = new DBType.Named(qname); + if ( m.regionStart() < m.regionEnd() ) + result = result.withModifier( + value.substring(m.regionStart(), m.regionEnd())); + } + + if ( null != arrayNotation ) + result = result.asArray(arrayNotation); + + return result; + } + + private static final Pattern s_arrayDimStart = compile(String.format( + "(?i:(? + * If a non-null string is returned, the matcher's region-end has been + * adjusted to exclude it. + *

    + * The matcher's associated pattern may have been changed, and the region + * transiently changed, but on return the region will either be the same as + * on entry (if no array notation was found), or have only the region end + * adjusted to exclude the notation. + *

    + * The returned string can include a {@code separator} that followed the + * array notation. + */ + private static String arrayNotationIfPresent(Matcher m, String s) + { + int originalRegionStart = m.regionStart(); + int notationStart; + int dims; + boolean atMostOneDimAllowed; // true after ARRAY keyword + +restart:for ( ;; ) + { + notationStart = -1; + dims = 0; + atMostOneDimAllowed = false; + + m.usePattern(s_arrayDimStart); + if ( ! m.find() ) + break restart; // notationStart is -1 indicating not found + + notationStart = m.start(); + if ( ! "[".equals(m.group()) ) // saw ARRAY + { + atMostOneDimAllowed = true; + m.region(m.end(), m.regionEnd()); + separator(m, false); + if ( ! s.startsWith("[", m.regionStart()) ) + { + if ( m.regionStart() == m.regionEnd() ) + { + dims = 1; // ARRAY separator $ --ok (means 1 dim) + break restart; + } + /* + * ARRAY separator something-other-than-[ + * This is not the match we're looking for. The regionStart + * already points here, so restart the loop to look for + * another potential array notation start beyond this point. + */ + continue restart; + } + m.region(m.regionStart() + 1, m.regionEnd()); + } + + /* + * Invariant: have seen [ and regionStart still points to it. + * Accept optional digits, then ] + * Repeat if followed by a [ + */ + for ( ;; ) + { + m.region(m.regionStart() + 1, m.regionEnd()); + separator(m, false); + + if ( m.usePattern(s_digits).lookingAt() ) + { + m.region(m.end(), m.regionEnd()); + separator(m, false); + } + + if ( ! s.startsWith("]", m.regionStart()) ) + continue restart; + + ++ dims; // have seen a complete [ (\d+)? ] + m.region(m.regionStart() + 1, m.regionEnd()); + separator(m, false); + if ( s.startsWith("[", m.regionStart()) ) + continue; + if ( m.regionStart() == m.regionEnd() ) + if ( ! atMostOneDimAllowed || 1 == dims ) + break restart; + continue restart; // not at end, not at [ --start over + } + } + + if ( -1 == notationStart ) + { + m.region(originalRegionStart, m.regionEnd()); + return null; + } + + m.region(originalRegionStart, notationStart); + return s.substring(notationStart); + } + + static final class Reserved extends DBType + { + private final String m_reservedName; + + Reserved(String name) + { + m_reservedName = name; + } + + @Override + public String toString() + { + return m_reservedName; + } + + @Override + DependTag dependTag() + { + return null; + } + + @Override + public int hashCode() + { + return pgFold(m_reservedName).hashCode(); + } + } + + static final class Named extends DBType + { + private final Identifier.Qualified m_ident; + + Named(Identifier.Qualified ident) + { + m_ident = ident; + } + + @Override + public String toString() + { + return m_ident.toString(); + } + + @Override + DependTag dependTag() + { + return new DependTag.Type(m_ident); + } + + @Override + public int hashCode() + { + return m_ident.hashCode(); + } + } + + static final class Modified extends DBType + { + private final DBType m_raw; + private final String m_modifier; + + Modified(DBType raw, String modifier) + { + m_raw = raw; + m_modifier = modifier; + } + + @Override + public String toString() + { + return m_raw.toString() + m_modifier; + } + + @Override + DBType withModifier(String modifier) + { + throw new UnsupportedOperationException( + "withModifier on a Modified"); + } + + @Override + DependTag dependTag() + { + return m_raw.dependTag(); + } + + @Override + public int hashCode() + { + return m_raw.hashCode(); + } + + @Override + DBType leaf() + { + return m_raw.leaf(); + } + } + + static final class Array extends DBType + { + private final DBType m_component; + private final int m_dims; + private final String m_notated; + + Array(DBType component, String notated) + { + assert component instanceof Named + || component instanceof Reserved + || component instanceof Modified; + int dims = 0; + for ( int pos = 0; -1 != (pos = notated.indexOf('[', pos)); ++ pos ) + ++ dims; + m_dims = 0 == dims ? 1 : dims; // "ARRAY" with no [ has dimension 1 + m_notated = notated; + m_component = requireNonNull(component); + } + + @Override + Array asArray(String notated) + { + /* Implementable in principle, but may never be needed */ + throw new UnsupportedOperationException("asArray on an Array"); + } + + @Override + public String toString() + { + return m_component.toString() + m_notated; + } + + @Override + DependTag dependTag() + { + return m_component.dependTag(); + } + + @Override + boolean isArray() + { + return true; + } + + @Override + public int hashCode() + { + return m_component.hashCode(); + } + } + + static final class Defaulting extends DBType + { + private final DBType m_raw; + private final String m_suffix; + + Defaulting(DBType raw, String suffix) + { + assert ! (raw instanceof Defaulting); + m_raw = requireNonNull(raw); + m_suffix = suffix; + } + + @Override + Modified withModifier(String notated) + { + throw new UnsupportedOperationException( + "withModifier on a Defaulting"); + } + + @Override + Array asArray(String notated) + { + throw new UnsupportedOperationException("asArray on a Defaulting"); + } + + @Override + Array withDefault(String suffix) + { + /* Implementable in principle, but may never be needed */ + throw new UnsupportedOperationException( + "withDefault on a Defaulting"); + } + + @Override + public String toString() + { + return m_raw.toString() + " " + m_suffix; + } + + @Override + String toString(boolean withDefault) + { + return withDefault ? toString() : m_raw.toString(); + } + + @Override + DependTag dependTag() + { + return m_raw.dependTag(); + } + + @Override + boolean isArray() + { + return m_raw.isArray(); + } + + @Override + public int hashCode() + { + return m_raw.hashCode(); + } + + @Override + DBType leaf() + { + return m_raw.leaf(); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java new file mode 100644 index 00000000..e59c63c1 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessor.java @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.lang.annotation.Annotation; + +import java.util.Set; + +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.annotation.processing.SupportedOptions; + +import javax.lang.model.SourceVersion; + +import javax.lang.model.element.TypeElement; + +/** + * Annotation processor invoked by the annotations framework in javac for + * annotations of type org.postgresql.pljava.annotation.*. + * + * Simply forwards to a DDRProcessorImpl instance that is not constructed + * until the framework calls init (since there is nothing useful for the + * constructor to do until then). + * + * @author Thomas Hallgren - pre-Java6 version + * @author Chapman Flack (Purdue Mathematics) - update to Java6, + * add SQLType/SQLAction, polishing + */ +@SupportedAnnotationTypes({"org.postgresql.pljava.annotation.*"}) +@SupportedOptions +({ + "ddr.reproducible", // default true + "ddr.name.trusted", // default "java" + "ddr.name.untrusted", // default "javaU" + "ddr.implementor", // implementor when not annotated, default "PostgreSQL" + "ddr.output" // name of ddr file to write +}) +public class DDRProcessor extends AbstractProcessor +{ + private DDRProcessorImpl impl; + + @Override + public SourceVersion getSupportedSourceVersion() + { + /* + * Because this must compile on Java versions back to 9, it must not + * mention by name any SourceVersion constant later than RELEASE_9. + * + * Update latest_tested to be the latest Java release on which this + * annotation processor has been tested without problems. + */ + int latest_tested = 25; + int ordinal_9 = SourceVersion.RELEASE_9.ordinal(); + int ordinal_latest = latest_tested - 9 + ordinal_9; + + SourceVersion latestSupported = SourceVersion.latestSupported(); + + if ( latestSupported.ordinal() <= ordinal_latest ) + return latestSupported; + + return SourceVersion.values()[ordinal_latest]; + } + + @Override + public void init( ProcessingEnvironment processingEnv) + { + super.init( processingEnv); + impl = new DDRProcessorImpl( processingEnv); + } + + @Override + public boolean process( Set tes, RoundEnvironment re) + { + if ( null == impl ) + throw new IllegalStateException( + "The annotation processing framework has called process() " + + "before init()"); + return impl.process( tes, re); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java new file mode 100644 index 00000000..d5b6aa4b --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRProcessorImpl.java @@ -0,0 +1,5973 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.io.IOException; + +import java.lang.annotation.Annotation; + +import java.lang.reflect.Array; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import java.sql.ResultSet; +import java.sql.SQLData; +import java.sql.SQLInput; +import java.sql.SQLOutput; +import java.sql.Time; +import java.sql.Timestamp; + +import java.text.BreakIterator; + +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetTime; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; + +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import static java.util.Collections.unmodifiableSet; +import java.util.Comparator; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import static java.util.Objects.requireNonNull; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.Set; + +import java.util.function.BiConsumer; +import java.util.function.Supplier; +import static java.util.function.UnaryOperator.identity; + +import java.util.stream.Stream; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +import javax.annotation.processing.Filer; +import javax.annotation.processing.Messager; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; + +import javax.lang.model.SourceVersion; + +import javax.lang.model.element.AnnotationMirror; +import javax.lang.model.element.AnnotationValue; +import javax.lang.model.element.Element; +import javax.lang.model.element.ElementKind; +import javax.lang.model.element.ExecutableElement; +import javax.lang.model.element.Modifier; +import javax.lang.model.element.ModuleElement; +import javax.lang.model.element.NestingKind; +import javax.lang.model.element.TypeElement; +import javax.lang.model.element.VariableElement; + +import javax.lang.model.type.ArrayType; +import javax.lang.model.type.DeclaredType; +import javax.lang.model.type.ExecutableType; +import javax.lang.model.type.NoType; +import javax.lang.model.type.PrimitiveType; +import javax.lang.model.type.TypeKind; +import javax.lang.model.type.TypeMirror; + +import javax.lang.model.util.Elements; +import javax.lang.model.util.Types; + +import static javax.lang.model.util.ElementFilter.constructorsIn; +import static javax.lang.model.util.ElementFilter.methodsIn; + +import static javax.tools.Diagnostic.Kind; + +import org.postgresql.pljava.ResultSetHandle; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.TriggerData; + +import org.postgresql.pljava.annotation.Aggregate; +import org.postgresql.pljava.annotation.Cast; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.Operator; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLActions; +import org.postgresql.pljava.annotation.SQLType; +import org.postgresql.pljava.annotation.Trigger; +import org.postgresql.pljava.annotation.BaseUDT; +import org.postgresql.pljava.annotation.MappedUDT; + +import org.postgresql.pljava.sqlgen.Lexicals; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Where the work happens. + */ +class DDRProcessorImpl +{ + // Things supplied by the calling framework in ProcessingEnvironment, + // used enough that it makes sense to break them out here with + // short names that all nested classes below will inherit. + // + final Elements elmu; + final Filer filr; + final Locale loca; + final Messager msgr; + final Map opts; + final SourceVersion srcv; + final Types typu; + + // Similarly, the TypeMapper should be easily available to code below. + // + final TypeMapper tmpr; + final SnippetTiebreaker snippetTiebreaker; + + // Options obtained from the invocation + // + final Identifier.Simple nameTrusted; + final Identifier.Simple nameUntrusted; + final String output; + final Identifier.Simple defaultImplementor; + final boolean reproducible; + + // Certain known types that need to be recognized in the processed code + // + final DeclaredType TY_ITERATOR; + final DeclaredType TY_OBJECT; + final DeclaredType TY_RESULTSET; + final DeclaredType TY_RESULTSETPROVIDER; + final DeclaredType TY_RESULTSETHANDLE; + final DeclaredType TY_SQLDATA; + final DeclaredType TY_SQLINPUT; + final DeclaredType TY_SQLOUTPUT; + final DeclaredType TY_STRING; + final DeclaredType TY_TRIGGERDATA; + final NoType TY_VOID; + + // Our own annotations + // + final TypeElement AN_FUNCTION; + final TypeElement AN_SQLTYPE; + final TypeElement AN_TRIGGER; + final TypeElement AN_BASEUDT; + final TypeElement AN_MAPPEDUDT; + final TypeElement AN_SQLACTION; + final TypeElement AN_SQLACTIONS; + final TypeElement AN_CAST; + final TypeElement AN_CASTS; + final TypeElement AN_AGGREGATE; + final TypeElement AN_AGGREGATES; + final TypeElement AN_OPERATOR; + final TypeElement AN_OPERATORS; + + // Certain familiar DBTypes (capitalized as this file historically has) + // + final DBType DT_BOOLEAN = new DBType.Reserved("boolean"); + final DBType DT_INTEGER = new DBType.Reserved("integer"); + final DBType DT_RECORD = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.RECORD")); + final DBType DT_TRIGGER = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.trigger")); + final DBType DT_VOID = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.void")); + final DBType DT_ANY = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.\"any\"")); + final DBType DT_BYTEA = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.bytea")); + final DBType DT_INTERNAL = new DBType.Named( + Identifier.Qualified.nameFromJava("pg_catalog.internal")); + + // Function signatures for certain known functions + // + final DBType[] SIG_TYPMODIN = + { DBType.fromSQLTypeAnnotation("pg_catalog.cstring[]") }; + final DBType[] SIG_TYPMODOUT = { DT_INTEGER }; + final DBType[] SIG_ANALYZE = { DT_INTERNAL }; + + DDRProcessorImpl( ProcessingEnvironment processingEnv) + { + elmu = processingEnv.getElementUtils(); + filr = processingEnv.getFiler(); + loca = processingEnv.getLocale(); + msgr = processingEnv.getMessager(); + opts = processingEnv.getOptions(); + srcv = processingEnv.getSourceVersion(); + typu = processingEnv.getTypeUtils(); + + tmpr = new TypeMapper(); + + String optv; + + optv = opts.get( "ddr.name.trusted"); + if ( null != optv ) + nameTrusted = Identifier.Simple.fromJava(optv); + else + nameTrusted = Identifier.Simple.fromJava("java"); + + optv = opts.get( "ddr.name.untrusted"); + if ( null != optv ) + nameUntrusted = Identifier.Simple.fromJava(optv); + else + nameUntrusted = Identifier.Simple.fromJava("javaU"); + + optv = opts.get( "ddr.implementor"); + if ( null != optv ) + defaultImplementor = "-".equals( optv) ? null : + Identifier.Simple.fromJava(optv); + else + defaultImplementor = Identifier.Simple.fromJava("PostgreSQL"); + + optv = opts.get( "ddr.output"); + if ( null != optv ) + output = optv; + else + output = "pljava.ddr"; + + optv = opts.get( "ddr.reproducible"); + if ( null != optv ) + reproducible = Boolean.parseBoolean( optv); + else + reproducible = true; + + snippetTiebreaker = reproducible ? new SnippetTiebreaker() : null; + + TY_ITERATOR = declaredTypeForClass(java.util.Iterator.class); + TY_OBJECT = declaredTypeForClass(Object.class); + TY_RESULTSET = declaredTypeForClass(java.sql.ResultSet.class); + TY_RESULTSETPROVIDER = declaredTypeForClass(ResultSetProvider.class); + TY_RESULTSETHANDLE = declaredTypeForClass(ResultSetHandle.class); + TY_SQLDATA = declaredTypeForClass(SQLData.class); + TY_SQLINPUT = declaredTypeForClass(SQLInput.class); + TY_SQLOUTPUT = declaredTypeForClass(SQLOutput.class); + TY_STRING = declaredTypeForClass(String.class); + TY_TRIGGERDATA = declaredTypeForClass(TriggerData.class); + TY_VOID = typu.getNoType(TypeKind.VOID); + + AN_FUNCTION = elmu.getTypeElement( Function.class.getName()); + AN_SQLTYPE = elmu.getTypeElement( SQLType.class.getName()); + AN_TRIGGER = elmu.getTypeElement( Trigger.class.getName()); + AN_BASEUDT = elmu.getTypeElement( BaseUDT.class.getName()); + AN_MAPPEDUDT = elmu.getTypeElement( MappedUDT.class.getName()); + + // Repeatable annotations and their containers. + // + AN_SQLACTION = elmu.getTypeElement( SQLAction.class.getName()); + AN_SQLACTIONS = elmu.getTypeElement( SQLActions.class.getName()); + AN_CAST = elmu.getTypeElement( Cast.class.getName()); + AN_CASTS = elmu.getTypeElement( + Cast.Container.class.getCanonicalName()); + AN_AGGREGATE = elmu.getTypeElement( Aggregate.class.getName()); + AN_AGGREGATES = elmu.getTypeElement( + Aggregate.Container.class.getCanonicalName()); + AN_OPERATOR = elmu.getTypeElement( Operator.class.getName()); + AN_OPERATORS = elmu.getTypeElement( + Operator.Container.class.getCanonicalName()); + } + + void msg( Kind kind, String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args)); + } + + void msg( Kind kind, Element e, String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args), e); + } + + void msg( Kind kind, Element e, AnnotationMirror a, + String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args), e, a); + } + + void msg( Kind kind, Element e, AnnotationMirror a, AnnotationValue v, + String fmt, Object... args) + { + msgr.printMessage( kind, String.format( fmt, args), e, a, v); + } + + /** + * Map a {@code Class} to a {@code TypeElement} and from there to a + * {@code DeclaredType}. + *

    + * This needs to work around some weird breakage in javac 10 and 11 when + * given a {@code --release} option naming an earlier release, as described + * in commit c763cee. The version of of {@code getTypeElement} with a module + * parameter is needed then, because the other version will go bonkers and + * think it found the class in every module that transitively requires + * its actual module and then return null because the result wasn't + * unique. That got fixed in Java 12, but because 11 is the LTS release and + * there won't be another for a while yet, it is better to work around the + * issue here. + *

    + * If not supporting Java 10 or 11, this could be simplified to + * {@code typu.getDeclaredType(elmu.getTypeElement(className))}. + */ + private DeclaredType declaredTypeForClass(Class clazz) + { + String className = clazz.getName(); + String moduleName = clazz.getModule().getName(); + + TypeElement e; + + if ( null == moduleName ) + e = elmu.getTypeElement(className); + else + { + ModuleElement m = elmu.getModuleElement(moduleName); + if ( null == m ) + e = elmu.getTypeElement(className); + else + e = elmu.getTypeElement(m, className); + } + + requireNonNull(e, + () -> "unexpected failure to resolve TypeElement " + className); + + DeclaredType t = typu.getDeclaredType(e); + + requireNonNull(t, + () -> "unexpected failure to resolve DeclaredType " + e); + + return t; + } + + /** + * Key usable in a mapping from (Object, Snippet-subtype) to Snippet. + * Because there's no telling in which order a Map implementation will + * compare two keys, the class matches if either one is assignable to + * the other. That's ok as long as the Snippet-subtype is never Snippet + * itself, no Object ever has two Snippets hung on it where one extends + * the other, and getSnippet is always called for the widest of any of + * the types it may retrieve. + */ + static final class SnippetsKey + { + final Object o; + final Class c; + SnippetsKey(Object o, Class c) + { + assert Snippet.class != c : "Snippet key must be a subtype"; + this.o = o; + this.c = c; + } + public boolean equals(Object oth) + { + if ( ! (oth instanceof SnippetsKey) ) + return false; + SnippetsKey osk = (SnippetsKey)oth; + return o.equals( osk.o) + && ( c.isAssignableFrom( osk.c) || osk.c.isAssignableFrom( c) ); + } + public int hashCode() + { + return o.hashCode(); // must not depend on c (subtypes will match) + } + } + + /** + * Collection of code snippets being accumulated (possibly over more than + * one round), keyed by the object for which each snippet has been + * generated. + */ + /* + * This is a LinkedHashMap so that the order of handling annotation types + * in process() below will be preserved in calling their characterize() + * methods at end-of-round, and so, for example, characterize() on a Cast + * can use values set by characterize() on an associated Function. + */ + Map snippets = new LinkedHashMap<>(); + + S getSnippet(Object o, Class c, Supplier ctor) + { + return + c.cast(snippets + .computeIfAbsent(new SnippetsKey( o, c), k -> ctor.get())); + } + + void putSnippet( Object o, Snippet s) + { + snippets.put( new SnippetsKey( o, s.getClass()), s); + } + + /** + * Queue on which snippets are entered in preparation for topological + * ordering. Has to be an instance field because populating the queue + * (which involves invoking the snippets' characterize methods) cannot + * be left to generateDescriptor, which runs in the final round. This is + * (AFAICT) another workaround for javac 7's behavior of throwing away + * symbol tables between rounds; when characterize was invoked in + * generateDescriptor, any errors reported were being shown with no source + * location info, because it had been thrown away. + */ + List> snippetVPairs = new ArrayList<>(); + + /** + * Map from each arbitrary provides/requires label to the snippet + * that 'provides' it (snippets, in some cases). Has to be out here as an + * instance field for the same reason {@code snippetVPairs} does. + *

    + * Originally limited each tag to have only one provider; that is still + * enforced for implicitly-generated tags, but relaxed for explicit ones + * supplied in annotations, hence the list. + */ + Map>> provider = new HashMap<>(); + + /** + * Find the elements in each round that carry any of the annotations of + * interest and generate code snippets accordingly. On the last round, with + * all processing complete, generate the deployment descriptor file. + */ + boolean process( Set tes, RoundEnvironment re) + { + boolean functionPresent = false; + boolean sqlActionPresent = false; + boolean baseUDTPresent = false; + boolean mappedUDTPresent = false; + boolean castPresent = false; + boolean aggregatePresent = false; + boolean operatorPresent = false; + + boolean willClaim = true; + + for ( TypeElement te : tes ) + { + if ( AN_FUNCTION.equals( te) ) + functionPresent = true; + else if ( AN_BASEUDT.equals( te) ) + baseUDTPresent = true; + else if ( AN_MAPPEDUDT.equals( te) ) + mappedUDTPresent = true; + else if ( AN_SQLTYPE.equals( te) ) + ; // these are handled within FunctionImpl + else if ( AN_SQLACTION.equals( te) || AN_SQLACTIONS.equals( te) ) + sqlActionPresent = true; + else if ( AN_CAST.equals( te) || AN_CASTS.equals( te) ) + castPresent = true; + else if ( AN_AGGREGATE.equals( te) || AN_AGGREGATES.equals( te) ) + aggregatePresent = true; + else if ( AN_OPERATOR.equals( te) || AN_OPERATORS.equals( te) ) + operatorPresent = true; + else + { + msg( Kind.WARNING, te, + "PL/Java annotation processor version may be older than " + + "this annotation:\n%s", te.toString()); + willClaim = false; + } + } + + if ( baseUDTPresent ) + for ( Element e : re.getElementsAnnotatedWith( AN_BASEUDT) ) + processUDT( e, UDTKind.BASE); + + if ( mappedUDTPresent ) + for ( Element e : re.getElementsAnnotatedWith( AN_MAPPEDUDT) ) + processUDT( e, UDTKind.MAPPED); + + if ( functionPresent ) + for ( Element e : re.getElementsAnnotatedWith( AN_FUNCTION) ) + processFunction( e); + + if ( sqlActionPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_SQLACTION, AN_SQLACTIONS) ) + processRepeatable( + e, AN_SQLACTION, AN_SQLACTIONS, SQLActionImpl.class, null); + + if ( castPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_CAST, AN_CASTS) ) + processRepeatable( + e, AN_CAST, AN_CASTS, CastImpl.class, null); + + if ( operatorPresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_OPERATOR, AN_OPERATORS) ) + processRepeatable( + e, AN_OPERATOR, AN_OPERATORS, OperatorImpl.class, + this::operatorPreSynthesize); + + if ( aggregatePresent ) + for ( Element e + : re.getElementsAnnotatedWithAny( AN_AGGREGATE, AN_AGGREGATES) ) + processRepeatable( + e, AN_AGGREGATE, AN_AGGREGATES, AggregateImpl.class, null); + + tmpr.workAroundJava7Breakage(); // perhaps to be fixed in Java 9? nope. + + if ( ! re.processingOver() ) + defensiveEarlyCharacterize(); + else if ( ! re.errorRaised() ) + generateDescriptor(); + + return willClaim; + } + + /** + * Iterate over collected snippets, characterize them, and enter them + * (if no error) in the data structures for topological ordering. Was + * originally the first part of {@code generateDescriptor}, but that is + * run in the final round, which is too late for javac 7 anyway, which + * throws symbol tables away between rounds. Any errors reported from + * characterize were being shown without source locations, because the + * information was gone. This may now be run more than once, so the + * {@code snippets} map is cleared before returning. + */ + void defensiveEarlyCharacterize() + { + for ( Snippet snip : snippets.values() ) + { + Set ready = snip.characterize(); + for ( Snippet readySnip : ready ) + { + VertexPair v = new VertexPair<>( readySnip); + snippetVPairs.add( v); + for ( DependTag t : readySnip.provideTags() ) + { + List> ps = + provider.computeIfAbsent(t, k -> new ArrayList<>()); + /* + * Explicit tags are allowed more than one provider. + */ + if ( t instanceof DependTag.Explicit || ps.isEmpty() ) + ps.add(v); + else + msg(Kind.ERROR, "tag %s has more than one provider", t); + } + } + } + snippets.clear(); + } + + /** + * Arrange the collected snippets into a workable sequence (nothing with + * requires="X" can come before whatever has provides="X"), then create + * a deployment descriptor file in proper form. + */ + void generateDescriptor() + { + boolean errorRaised = false; + Set fwdConsumers = new HashSet<>(); + Set revConsumers = new HashSet<>(); + + for ( VertexPair v : snippetVPairs ) + { + List> ps; + + /* + * First handle the implicit requires(implementor()). This is unlike + * the typical provides/requires relationship, in that it does not + * reverse when generating the 'remove' actions. Conditions that + * determined what got installed must also be evaluated early and + * determine what gets removed. + */ + Identifier.Simple impName = v.payload().implementorName(); + DependTag imp = v.payload().implementorTag(); + if ( null != imp ) + { + ps = provider.get( imp); + if ( null != ps ) + { + fwdConsumers.add( imp); + revConsumers.add( imp); + + ps.forEach(p -> + { + p.fwd.precede( v.fwd); + p.rev.precede( v.rev); + + /* + * A snippet providing an implementor tag probably has + * no undeployStrings, because its deployStrings should + * be used on both occasions; if so, replace it with a + * proxy that returns deployStrings for undeployStrings. + */ + if ( 0 == p.rev.payload.undeployStrings().length ) + p.rev.payload = new ImpProvider( p.rev.payload); + }); + } + else if ( ! defaultImplementor.equals( impName, msgr) ) + { + /* + * Don't insist that every implementor tag have a provider + * somewhere in the code. Perhaps the environment will + * provide it at load time. If this is not the default + * implementor, bump the relying vertices' indegree anyway + * so the snippet won't be emitted until the cycle-breaker + * code (see below) sets it free after any others that + * can be handled first. + */ + ++ v.fwd.indegree; + ++ v.rev.indegree; + } + } + for ( DependTag s : v.payload().requireTags() ) + { + ps = provider.get( s); + if ( null != ps ) + { + fwdConsumers.add( s); + revConsumers.add( s); + ps.forEach(p -> + { + p.fwd.precede( v.fwd); + v.rev.precede( p.rev); // these relationships do reverse + }); + } + else if ( s instanceof DependTag.Explicit ) + { + msg( Kind.ERROR, + "tag \"%s\" is required but nowhere provided", s); + errorRaised = true; + } + } + } + + if ( errorRaised ) + return; + + Queue> fwdBlocked = new LinkedList<>(); + Queue> revBlocked = new LinkedList<>(); + + Queue> fwdReady; + Queue> revReady; + if ( reproducible ) + { + fwdReady = new PriorityQueue<>( 11, snippetTiebreaker); + revReady = new PriorityQueue<>( 11, snippetTiebreaker); + } + else + { + fwdReady = new LinkedList<>(); + revReady = new LinkedList<>(); + } + + for ( VertexPair vp : snippetVPairs ) + { + Vertex v = vp.fwd; + if ( 0 == v.indegree ) + fwdReady.add( v); + else + fwdBlocked.add( v); + v = vp.rev; + if ( 0 == v.indegree ) + revReady.add( v); + else + revBlocked.add( v); + } + + Snippet[] fwdSnips = order( fwdReady, fwdBlocked, fwdConsumers, true); + Snippet[] revSnips = order( revReady, revBlocked, revConsumers, false); + + if ( null == fwdSnips || null == revSnips ) + return; // error already reported + + try + { + DDRWriter.emit( fwdSnips, revSnips, this); + } + catch ( IOException ioe ) + { + msg( Kind.ERROR, "while writing %s: %s", output, ioe.getMessage()); + } + } + + /** + * Given a Snippet DAG, either the forward or reverse one, return the + * snippets in a workable order. + * @return Array of snippets in order, or null if no suitable order could + * be found. + */ + Snippet[] order( + Queue> ready, Queue> blocked, + Set consumer, boolean deploying) + { + ArrayList snips = new ArrayList<>(ready.size()+blocked.size()); + Vertex cycleBreaker = null; + +queuerunning: + for ( ; ; ) + { + while ( ! ready.isEmpty() ) + { + Vertex v = ready.remove(); + snips.add(v.payload); + v.use(ready, blocked); + for ( DependTag p : v.payload.provideTags() ) + consumer.remove(p); + } + if ( blocked.isEmpty() ) + break; // all done + + /* + * There are snippets remaining to output but they all have + * indegree > 0, normally a 'cycle' error. But some may have + * breakCycle methods that can help. Add any vertices they return + * onto the ready queue (all at once, so that for reproducible + * builds, the ready queue's ordering constraints will take effect). + */ + boolean cycleBroken = false; + for ( Iterator> it = blocked.iterator(); + it.hasNext(); ) + { + Vertex v = it.next(); + cycleBreaker = v.payload.breakCycle(v, deploying); + if ( null == cycleBreaker ) + continue; + /* + * If v supplied another vertex to go on the ready queue, leave + * v on the blocked queue; it should become ready in due course. + * If v nominated itself as cycle breaker, remove from blocked. + */ + if ( cycleBreaker == v ) + it.remove(); + ready.add(cycleBreaker); + cycleBroken = true; + } + if ( cycleBroken ) + continue; + + /* + * A cycle was detected and no snippet's breakCycle method broke it, + * but there may yet be a way. Somewhere there may be a vertex + * with indegree exactly 1 and an implicit requirement of its + * own implementor tag, with no snippet on record to provide it. + * That's allowed (maybe the installing/removing environment will + * be "providing" that tag anyway), so set one such snippet free + * and see how much farther we get. + */ + for ( Iterator> it = blocked.iterator(); + it.hasNext(); ) + { + Vertex v = it.next(); + if ( 1 < v.indegree ) + continue; + Identifier.Simple impName = v.payload.implementorName(); + if ( null == impName + || defaultImplementor.equals( impName, msgr) ) + continue; + if ( provider.containsKey( v.payload.implementorTag()) ) + continue; + if ( reproducible ) + { + if (null == cycleBreaker || + 0 < snippetTiebreaker.compare(cycleBreaker, v)) + cycleBreaker = v; + } + else + { + -- v.indegree; + it.remove(); + ready.add( v); + continue queuerunning; + } + } + if ( null != cycleBreaker ) + { + blocked.remove( cycleBreaker); + -- cycleBreaker.indegree; + ready.add( cycleBreaker); + cycleBreaker = null; + continue; + } + /* + * Got here? It's a real cycle ... nothing to be done. + */ + for ( DependTag s : consumer ) + msg( Kind.ERROR, "requirement in a cycle: %s", s); + return null; + } + return snips.toArray(new Snippet[snips.size()]); + } + + void putRepeatableSnippet(Element e, T snip) + { + if ( null != snip ) + putSnippet( snip, (Snippet)snip); + } + + /** + * Process an element carrying a repeatable annotation, the container + * of that repeatable annotation, or both. + *

    + * Snippets corresponding to repeatable annotations might not be entered in the + * {@code snippets} map keyed by the target element, as that might not be + * unique. Each populated snippet is passed to putter along with + * the element it annotates, and putter determines what to do with + * it. If putter is null, the default enters the snippet with a key + * made from its class and itself, as typical repeatable snippets are are + * not expected to be looked up, only processed when all of the map entries + * are enumerated. + *

    + * After all snippets of the desired class have been processed for a given + * element, a final call to putter is made passing the element and + * null for the snippet. + */ + void processRepeatable( + Element e, TypeElement annot, TypeElement container, Class clazz, + BiConsumer putter) + { + if ( null == putter ) + putter = this::putRepeatableSnippet; + + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + Element asElement = am.getAnnotationType().asElement(); + if ( asElement.equals( annot) ) + { + T snip; + try + { + snip = clazz.getDeclaredConstructor( DDRProcessorImpl.class, + Element.class, AnnotationMirror.class) + .newInstance( DDRProcessorImpl.this, e, am); + } + catch ( ReflectiveOperationException re ) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", re); + } + populateAnnotationImpl( snip, e, am); + putter.accept( e, snip); + } + else if ( asElement.equals( container) ) + { + Container c = new Container<>(clazz); + populateAnnotationImpl( c, e, am); + for ( T snip : c.value() ) + putter.accept( e, snip); + } + } + + putter.accept( e, null); + } + + static enum UDTKind { BASE, MAPPED } + + /** + * Process a single element annotated with @BaseUDT or @MappedUDT, as + * indicated by the UDTKind k. + */ + void processUDT( Element e, UDTKind k) + { + /* + * The allowed target type for the UDT annotations is TYPE, which can + * be a class, interface (including annotation type) or enum, of which + * only CLASS is valid here. If it is anything else, just return, as + * that can only mean a source error prevented the compiler making sense + * of it, and the compiler will have its own messages about that. + */ + switch ( e.getKind() ) + { + case CLASS: + break; + case ANNOTATION_TYPE: + case ENUM: + case INTERFACE: + msg( Kind.ERROR, e, "A PL/Java UDT must be a class"); + default: + return; + } + Set mods = e.getModifiers(); + if ( ! mods.contains( Modifier.PUBLIC) ) + { + msg( Kind.ERROR, e, "A PL/Java UDT must be public"); + } + if ( mods.contains( Modifier.ABSTRACT) ) + { + msg( Kind.ERROR, e, "A PL/Java UDT must not be abstract"); + } + if ( ! ((TypeElement)e).getNestingKind().equals( + NestingKind.TOP_LEVEL) ) + { + if ( ! mods.contains( Modifier.STATIC) ) + { + msg( Kind.ERROR, e, + "When nested, a PL/Java UDT must be static (not inner)"); + } + for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) + { + if ( ! ee.getModifiers().contains( Modifier.PUBLIC) ) + msg( Kind.ERROR, ee, + "A PL/Java UDT must not have a non-public " + + "enclosing class"); + if ( ((TypeElement)ee).getNestingKind().equals( + NestingKind.TOP_LEVEL) ) + break; + } + } + + switch ( k ) + { + case BASE: + BaseUDTImpl bu = getSnippet( e, BaseUDTImpl.class, () -> + new BaseUDTImpl( (TypeElement)e)); + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + if ( am.getAnnotationType().asElement().equals( AN_BASEUDT) ) + populateAnnotationImpl( bu, e, am); + } + bu.registerFunctions(); + break; + + case MAPPED: + MappedUDTImpl mu = getSnippet( e, MappedUDTImpl.class, () -> + new MappedUDTImpl( (TypeElement)e)); + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + if ( am.getAnnotationType().asElement().equals( AN_MAPPEDUDT) ) + populateAnnotationImpl( mu, e, am); + } + mu.registerMapping(); + break; + } + } + + ExecutableElement huntFor(List ees, String name, + boolean isStatic, TypeMirror retType, TypeMirror... paramTypes) + { + ExecutableElement quarry = null; +hunt: for ( ExecutableElement ee : ees ) + { + if ( null != name && ! ee.getSimpleName().contentEquals( name) ) + continue; + if ( ee.isVarArgs() ) + continue; + if ( null != retType + && ! typu.isSameType( ee.getReturnType(), retType) ) + continue; + List pts = + ((ExecutableType)ee.asType()).getParameterTypes(); + if ( pts.size() != paramTypes.length ) + continue; + for ( int i = 0; i < paramTypes.length; ++i ) + if ( ! typu.isSameType( pts.get( i), paramTypes[i]) ) + continue hunt; + Set mods = ee.getModifiers(); + if ( ! mods.contains( Modifier.PUBLIC) ) + continue; + if ( isStatic && ! mods.contains( Modifier.STATIC) ) + continue; + if ( null == quarry ) + quarry = ee; + else + { + msg( Kind.ERROR, ee, + "Found more than one candidate " + + (null == name ? "constructor" : (name + " method"))); + } + } + return quarry; + } + + /** + * Process a single element annotated with @Function. After checking that + * it has the right modifiers to be called via PL/Java, analyze its type + * information and annotations and register an appropriate SQL code snippet. + */ + void processFunction( Element e) + { + /* + * METHOD is the only target type allowed for the Function annotation, + * so the only way for e to be anything else is if some source error has + * prevented the compiler making sense of it. In that case just return + * silently on the assumption that the compiler will have its own + * message about the true problem. + */ + if ( ! ElementKind.METHOD.equals( e.getKind()) ) + return; + + Set mods = e.getModifiers(); + if ( ! mods.contains( Modifier.PUBLIC) ) + { + msg( Kind.ERROR, e, "A PL/Java function must be public"); + } + + for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) + { + ElementKind ek = ee.getKind(); + switch ( ek ) + { + case CLASS: + case INTERFACE: + break; + default: + msg( Kind.ERROR, ee, + "A PL/Java function must not have an enclosing " + ek); + return; + } + + // It's a class or interface, represented by TypeElement + TypeElement te = (TypeElement)ee; + mods = ee.getModifiers(); + + if ( ! mods.contains( Modifier.PUBLIC) ) + msg( Kind.ERROR, ee, + "A PL/Java function must not have a non-public " + + "enclosing class"); + + if ( ! te.getNestingKind().isNested() ) + break; // no need to look above top-level class + } + + FunctionImpl f = getSnippet( e, FunctionImpl.class, () -> + new FunctionImpl( (ExecutableElement)e)); + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) + { + if ( am.getAnnotationType().asElement().equals( AN_FUNCTION) ) + populateAnnotationImpl( f, e, am); + } + } + + /** + * Populate an array of specified type from an annotation value + * representing an array. + * + * AnnotationValue's getValue() method returns Object, where the + * object is known to be an instance of one of a small set of classes. + * Populating an array when that value represents one is a common + * operation, so it is factored out here. + */ + static T[] avToArray( Object o, Class k) + { + boolean isEnum = k.isEnum(); + + @SuppressWarnings({"unchecked"}) + List vs = (List)o; + + @SuppressWarnings({"unchecked"}) + T[] a = (T[])Array.newInstance( k, vs.size()); + + int i = 0; + for ( AnnotationValue av : vs ) + { + Object v = getValue( av); + if ( isEnum ) + { + @SuppressWarnings({"unchecked"}) + T t = (T)Enum.valueOf( k.asSubclass( Enum.class), + ((VariableElement)v).getSimpleName().toString()); + a[i++] = t; + } + else + a[i++] = k.cast( v); + } + return a; + } + + /** + * Abstract superclass for synthetic implementations of annotation + * interfaces; these can be populated with element-value pairs from + * an AnnotationMirror and then used in the natural way for access to + * the values. Each subclass of this should implement the intended + * annotation interface, and should also have a + * setFoo(Object,boolean,Element) method for each foo() method in the + * interface. Rather than longwindedly using the type system to enforce + * that the needed setter methods are all there, they will be looked + * up using reflection. + */ + class AbstractAnnotationImpl implements Annotation + { + private Set m_provideTags = new HashSet<>(); + private Set m_requireTags = new HashSet<>(); + + @Override + public Class annotationType() + { + throw new UnsupportedOperationException(); + } + + /** + * Supply the required implementor() method for those subclasses + * that will implement {@link Snippet}. + */ + public String implementor() + { + return null == _implementor ? null : _implementor.pgFolded(); + } + + /** + * Supply the required implementor() method for those subclasses + * that will implement {@link Snippet}. + */ + public Identifier.Simple implementorName() + { + return _implementor; + } + + Identifier.Simple _implementor = defaultImplementor; + String _comment; + boolean commentDerived; + + public void setImplementor( Object o, boolean explicit, Element e) + { + if ( explicit ) + _implementor = "".equals( o) ? null : + Identifier.Simple.fromJava((String)o, msgr); + } + + @Override + public String toString() + { + return String.format( + "(%s)%s", getClass().getSimpleName(), _comment); + } + + public String comment() { return _comment; } + + public void setComment( Object o, boolean explicit, Element e) + { + if ( explicit ) + { + _comment = (String)o; + if ( "".equals( _comment) ) + _comment = null; + } + else + { + _comment = ((Commentable)this).derivedComment( e); + commentDerived = true; + } + } + + protected void replaceCommentIfDerived( String comment) + { + if ( ! commentDerived ) + return; + commentDerived = false; + _comment = comment; + } + + public String derivedComment( Element e) + { + String dc = elmu.getDocComment( e); + if ( null == dc ) + return null; + return firstSentence( dc); + } + + public String firstSentence( String s) + { + BreakIterator bi = BreakIterator.getSentenceInstance( loca); + bi.setText( s); + int start = bi.first(); + int end = bi.next(); + if ( BreakIterator.DONE == end ) + return null; + return s.substring( start, end).trim(); + } + + /** + * Called by a snippet's {@code characterize} method to install its + * explicit, annotation-supplied 'provides' / 'requires' strings, if + * any, into the {@code provideTags} and {@code requireTags} sets, then + * making those sets immutable. + */ + protected void recordExplicitTags(String[] provides, String[] requires) + { + if ( null != provides ) + for ( String s : provides ) + m_provideTags.add(new DependTag.Explicit(s)); + if ( null != requires ) + for ( String s : requires ) + m_requireTags.add(new DependTag.Explicit(s)); + m_provideTags = unmodifiableSet(m_provideTags); + m_requireTags = unmodifiableSet(m_requireTags); + } + + /** + * Return the set of 'provide' tags, mutable before + * {@code recordExplicitTags} has been called, immutable thereafter. + */ + public Set provideTags() + { + return m_provideTags; + } + + /** + * Return the set of 'require' tags, mutable before + * {@code recordExplicitTags} has been called, immutable thereafter. + */ + public Set requireTags() + { + return m_requireTags; + } + } + + class Repeatable extends AbstractAnnotationImpl + { + final Element m_targetElement; + final AnnotationMirror m_origin; + + Repeatable(Element e, AnnotationMirror am) + { + m_targetElement = e; + m_origin = am; + } + } + + /** + * Populate an AbstractAnnotationImpl-derived Annotation implementation + * from the element-value pairs in an AnnotationMirror. For each element + * foo in the annotation interface, the implementation is assumed to have + * a method setFoo(Object o, boolean explicit, element e) where o is the + * element's value as obtained from AnnotationValue.getValue(), explicit + * indicates whether the element was explicitly present in the annotation + * or filled in from a default value, and e is the element carrying the + * annotation (chiefly for use as a location hint in diagnostic messages). + * + * Some of the annotation implementations below will leave certain elements + * null if they were not given explicit values, in order to have a clear + * indication that they were defaulted, even though that is not the way + * normal annotation objects behave. + * + * If a setFoo(Object o, boolean explicit, element e) method is not found + * but there is an accessible field _foo it will be set directly, but only + * if the value was explicitly present in the annotation or the field value + * is null. By this convention, an implementation can declare a field + * initially null and let its default value be filled in from what the + * annotation declares, or initially some non-null value distinct from + * possible annotation values, and be able to tell whether it was explicitly + * set. Note that a field of primitive type will never be seen as null. + */ + void populateAnnotationImpl( + AbstractAnnotationImpl inst, Element e, AnnotationMirror am) + { + Map explicit = + am.getElementValues(); + Map defaulted = + elmu.getElementValuesWithDefaults( am); + + // Astonishingly, even though JLS3 9.7 clearly says "annotations must + // contain an element-value pair for every element of the corresponding + // annotation type, except for those elements with default values, or a + // compile-time error occurs" - in Sun 1.6.0_39 javac never flags + // the promised error, and instead allows us to NPE on something that + // ought to be guaranteed to be there! >:[ + // + // If you want something done right, you have to do it yourself.... + // + + Element anne = am.getAnnotationType().asElement(); + List keys = methodsIn( anne.getEnclosedElements()); + for ( ExecutableElement k : keys ) + if ( ! defaulted.containsKey( k) ) + msg( Kind.ERROR, e, am, + "annotation missing required element \"%s\"", + k.getSimpleName()); + + for ( + Map.Entry me + : defaulted.entrySet() + ) + { + ExecutableElement k = me.getKey(); + AnnotationValue av = me.getValue(); + boolean isExplicit = explicit.containsKey( k); + String name = k.getSimpleName().toString(); + Class kl = inst.getClass(); + try + { + Object v = getValue( av); + kl.getMethod( // let setter for foo() be setFoo() + "set"+name.substring( 0, 1).toUpperCase() + + name.substring( 1), + Object.class, boolean.class, Element.class) + .invoke(inst, v, isExplicit, e); + } + catch (AnnotationValueException ave) + { + msg( Kind.ERROR, e, am, + "unresolved value for annotation member \"%s\"" + + " (check for missing/misspelled import, etc.)", + name); + } + catch (NoSuchMethodException nsme) + { + Object v = getValue( av); + try + { + Field f = kl.getField( "_"+name); + Class fkl = f.getType(); + if ( ! isExplicit && null != f.get( inst) ) + continue; + if ( fkl.isArray() ) + { + try { + f.set( inst, avToArray( v, fkl.getComponentType())); + } + catch (AnnotationValueException ave) + { + msg( Kind.ERROR, e, am, + "unresolved value for an element of annotation" + + " member \"%s\" (check for missing/misspelled" + + " import, etc.)", + name); + } + } + else if ( fkl.isEnum() ) + { + @SuppressWarnings("unchecked") + Object t = Enum.valueOf( fkl.asSubclass( Enum.class), + ((VariableElement)v).getSimpleName().toString()); + f.set( inst, t); + } + else + f.set( inst, v); + nsme = null; + } + catch (NoSuchFieldException | IllegalAccessException ex) { } + if ( null != nsme ) + throw new RuntimeException( + "Incomplete implementation in annotation processor", + nsme); + } + catch (IllegalAccessException iae) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", iae); + } + catch (InvocationTargetException ite) + { + String msg = ite.getCause().getMessage(); + msg( Kind.ERROR, e, am, av, "%s", msg); + } + } + } + + // It could be nice to have another annotation-driven tool that could just + // generate these implementations of some annotation types.... + + class SQLTypeImpl extends AbstractAnnotationImpl implements SQLType + { + public String value() { return _value; } + public String[] defaultValue() { return _defaultValue; } + public boolean optional() { return Boolean.TRUE.equals(_optional); } + public String name() { return _name; } + + String _value; + String[] _defaultValue; + String _name; + Boolean _optional; // boxed so it can be null if not explicit + + public void setValue( Object o, boolean explicit, Element e) + { + if ( explicit ) + _value = (String)o; + } + + public void setDefaultValue( Object o, boolean explicit, Element e) + { + if ( explicit ) + _defaultValue = avToArray( o, String.class); + } + + public void setOptional( Object o, boolean explicit, Element e) + { + if ( explicit ) + _optional = (Boolean)o; + } + + public void setName( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + _name = (String)o; + if ( _name.startsWith( "\"") + && ! Lexicals.ISO_DELIMITED_IDENTIFIER.matcher( _name).matches() + ) + msg( Kind.WARNING, e, "malformed parameter name: %s", _name); + } + } + + class Container + extends AbstractAnnotationImpl + { + public T[] value() { return _value; } + + T[] _value; + final Class _clazz; + + Container(Class clazz) + { + _clazz = clazz; + } + + public void setValue( Object o, boolean explicit, Element e) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + + @SuppressWarnings("unchecked") + T[] t = (T[])Array.newInstance( _clazz, ams.length); + _value = t; + + int i = 0; + for ( AnnotationMirror am : ams ) + { + try + { + T a = _clazz.getDeclaredConstructor(DDRProcessorImpl.class, + Element.class, AnnotationMirror.class) + .newInstance(DDRProcessorImpl.this, e, am); + populateAnnotationImpl( a, e, am); + _value [ i++ ] = a; + } + catch ( ReflectiveOperationException re ) + { + throw new RuntimeException( + "Incorrect implementation of annotation processor", re); + } + } + } + } + + class SQLActionImpl + extends Repeatable + implements SQLAction, Snippet + { + SQLActionImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] install() { return _install; } + public String[] remove() { return _remove; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] deployStrings() { return _install; } + public String[] undeployStrings() { return _remove; } + + public String[] _install; + public String[] _remove; + public String[] _provides; + public String[] _requires; + + public Set characterize() + { + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + } + + class TriggerImpl + extends AbstractAnnotationImpl + implements Trigger, Snippet, Commentable + { + public String[] arguments() { return _arguments; } + public Constraint constraint() { return _constraint; } + public Event[] events() { return _events; } + public String fromSchema() { return _fromSchema; } + public String from() { return _from; } + public String name() { return _name; } + public String schema() { return _schema; } + public String table() { return _table; } + public Scope scope() { return _scope; } + public Called called() { return _called; } + public String when() { return _when; } + public String[] columns() { return _columns; } + public String tableOld() { return _tableOld; } + public String tableNew() { return _tableNew; } + + public String[] provides() { return new String[0]; } + public String[] requires() { return new String[0]; } + /* Trigger is a Snippet but doesn't directly participate in tsort */ + + public String[] _arguments; + public Constraint _constraint; + public Event[] _events; + public String _fromSchema; + public String _from; + public String _name; + public String _schema; + public String _table; + public Scope _scope; + public Called _called; + public String _when; + public String[] _columns; + public String _tableOld; + public String _tableNew; + + FunctionImpl func; + AnnotationMirror origin; + + boolean refOld; + boolean refNew; + boolean isConstraint = false; + + /* The only values of the Constraint enum are those applicable to + * constraint triggers. To determine whether this IS a constraint + * trigger or not, use the 'explicit' parameter to distinguish whether + * the 'constraint' attribute was or wasn't seen in the annotation. + */ + public void setConstraint( Object o, boolean explicit, Element e) + { + if ( explicit ) + { + isConstraint = true; + _constraint = Constraint.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + } + + TriggerImpl( FunctionImpl f, AnnotationMirror am) + { + func = f; + origin = am; + } + + public Set characterize() + { + if ( Scope.ROW.equals( _scope) ) + { + for ( Event e : _events ) + if ( Event.TRUNCATE.equals( e) ) + msg( Kind.ERROR, func.func, origin, + "TRUNCATE trigger cannot be FOR EACH ROW"); + } + else if ( Called.INSTEAD_OF.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "INSTEAD OF trigger cannot be FOR EACH STATEMENT"); + + if ( ! "".equals( _when) && Called.INSTEAD_OF.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "INSTEAD OF triggers do not support WHEN conditions"); + + if ( 0 < _columns.length ) + { + if ( Called.INSTEAD_OF.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "INSTEAD OF triggers do not support lists of columns"); + boolean seen = false; + for ( Event e : _events ) + if ( Event.UPDATE.equals( e) ) + seen = true; + if ( ! seen ) + msg( Kind.ERROR, func.func, origin, + "Column list is meaningless unless UPDATE is a trigger event"); + } + + refOld = ! "".equals( _tableOld); + refNew = ! "".equals( _tableNew); + + if ( refOld || refNew ) + { + if ( ! Called.AFTER.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "Only AFTER triggers can reference OLD TABLE or NEW TABLE"); + boolean badOld = refOld; + boolean badNew = refNew; + for ( Event e : _events ) + { + switch ( e ) + { + case INSERT: badNew = false; break; + case UPDATE: badOld = badNew = false; break; + case DELETE: badOld = false; break; + } + } + if ( badOld ) + msg( Kind.ERROR, func.func, origin, + "Trigger must be callable on UPDATE or DELETE to reference OLD TABLE"); + if ( badNew ) + msg( Kind.ERROR, func.func, origin, + "Trigger must be callable on UPDATE or INSERT to reference NEW TABLE"); + } + + if ( isConstraint ) + { + if ( ! Called.AFTER.equals( _called) ) + msg( Kind.ERROR, func.func, origin, + "A constraint trigger must be an AFTER trigger"); + if ( ! Scope.ROW.equals( _scope) ) + msg( Kind.ERROR, func.func, origin, + "A constraint trigger must be FOR EACH ROW"); + if ( "".equals( _from) && ! "".equals( _fromSchema) ) + msg( Kind.ERROR, func.func, origin, + "To use fromSchema, specify a table name with from"); + } + else + { + if ( ! "".equals( _from) ) + msg( Kind.ERROR, func.func, origin, + "Only a constraint trigger can use 'from'"); + if ( ! "".equals( _fromSchema) ) + msg( Kind.ERROR, func.func, origin, + "Only a constraint trigger can use 'fromSchema'"); + } + + if ( "".equals( _name) ) + _name = TriggerNamer.synthesizeName( this); + return Set.of(); + } + + public String[] deployStrings() + { + StringBuilder sb = new StringBuilder(); + sb.append("CREATE "); + if ( isConstraint ) + { + sb.append("CONSTRAINT "); + } + sb.append("TRIGGER ").append(name()).append("\n\t"); + switch ( called() ) + { + case BEFORE: sb.append( "BEFORE " ); break; + case AFTER: sb.append( "AFTER " ); break; + case INSTEAD_OF: sb.append( "INSTEAD OF "); break; + } + int s = _events.length; + for ( Event e : _events ) + { + sb.append( e.toString()); + if ( Event.UPDATE.equals( e) && 0 < _columns.length ) + { + sb.append( " OF "); + int cs = _columns.length; + for ( String c : _columns ) + { + sb.append( c); + if ( 0 < -- cs ) + sb.append( ", "); + } + } + if ( 0 < -- s ) + sb.append( " OR "); + } + sb.append( "\n\tON "); + sb.append(qnameFrom(table(), schema())); + if ( ! "".equals( from()) ) + { + sb.append("\n\tFROM "); + sb.append(qnameFrom(from(), fromSchema())); + } + if ( isConstraint ) { + sb.append("\n\t"); + switch ( _constraint ) + { + case NOT_DEFERRABLE: + sb.append("NOT DEFERRABLE"); + break; + case INITIALLY_IMMEDIATE: + sb.append("DEFERRABLE INITIALLY IMMEDIATE"); + break; + case INITIALLY_DEFERRED: + sb.append("DEFERRABLE INITIALLY DEFERRED"); + break; + } + } + if ( refOld || refNew ) + { + sb.append( "\n\tREFERENCING"); + if ( refOld ) + sb.append( " OLD TABLE AS ").append( _tableOld); + if ( refNew ) + sb.append( " NEW TABLE AS ").append( _tableNew); + } + sb.append( "\n\tFOR EACH "); + sb.append( scope().toString()); + if ( ! "".equals( _when) ) + sb.append( "\n\tWHEN ").append( _when); + sb.append( "\n\tEXECUTE PROCEDURE "); + func.appendNameAndParams( sb, true, false, false); + sb.setLength( sb.length() - 1); // drop closing ) + s = _arguments.length; + for ( String a : _arguments ) + { + sb.append( "\n\t").append( DDRWriter.eQuote( a)); + if ( 0 < -- s ) + sb.append( ','); + } + sb.append( ')'); + + String comm = comment(); + if ( null == comm ) + return new String[] { sb.toString() }; + + return new String[] { + sb.toString(), + "COMMENT ON TRIGGER " + name() + " ON " + + qnameFrom(table(), schema()) + + "\nIS " + + DDRWriter.eQuote( comm) + }; + } + + public String[] undeployStrings() + { + StringBuilder sb = new StringBuilder(); + sb.append( "DROP TRIGGER ").append( name()).append( "\n\tON "); + sb.append(qnameFrom(table(), schema())); + return new String[] { sb.toString() }; + } + } + + /** + * Enumeration of different method "shapes" and the treatment of + * {@code type=} and {@code out=} annotation elements they need. + *

    + * Each member has a {@code setComposite} method that will be invoked + * by {@code checkOutType} if the method is judged to have a composite + * return type according to the annotations present. + *

    + * There is one case (no {@code out} and a {@code type} other than + * {@code RECORD}) where {@code checkOutType} will resolve the + * ambiguity by assuming composite, and will have set + * {@code assumedComposite} accordingly. The {@code MAYBECOMPOSITE} + * shape checks that assumption against the presence of a countervailing + * {@code SQLType} annotation, the {@code ITERATOR} shape clears it and + * behaves as noncomposite as always, and the {@code PROVIDER} shape + * clears it because that shape is unambiguously composite. + */ + enum MethodShape + { + /** + * Method has the shape {@code boolean foo(..., ResultSet)}, which + * could be an ordinary method with an incoming record parameter and + * boolean return, or a composite-returning method whose last + * a writable ResultSet supplied by PL/Java for the return value. + */ + MAYBECOMPOSITE((f,msgr) -> + { + boolean sqlTyped = null != + f.paramTypeAnnotations[f.paramTypeAnnotations.length - 1]; + if ( ! sqlTyped ) + f.complexViaInOut = true; + else if ( f.assumedComposite ) + f.assumedComposite = false; // SQLType cancels assumption + else + msgr.printMessage(Kind.ERROR, + "no @SQLType annotation may appear on " + + "the return-value ResultSet parameter", f.func); + }), + + /** + * Method has the shape {@code Iterator foo(...)} and represents + * a set-returning function with a non-composite return type. + *

    + * If the shape has been merely assumed composite, clear + * that flag and proceed as if it is not. Otherwise, issue an error + * that it can't be composite. + */ + ITERATOR((f,msgr) -> + { + if ( f.assumedComposite ) + f.assumedComposite = false; + else + msgr.printMessage(Kind.ERROR, + "the iterator style cannot return a row-typed result", + f.func); + }), + + /** + * Method has the shape {@code ResultSetProvider foo(...)} or + * {@code ResultSetHandle foo(...)} and represents + * a set-returning function with a non-composite return type. + *

    + * If the shape has been merely assumed composite, clear + * that flag; for this shape that assumption is not tentative. + */ + PROVIDER((f,msgr) -> f.assumedComposite = false), + + /** + * Method is something else (trigger, for example) for which no + * {@code type} or {@code out} is allowed. + *

    + * The {@code setComposite} method for this shape will never + * be called. + */ + OTHER(null); + + private final BiConsumer compositeSetter; + + MethodShape(BiConsumer setter) + { + compositeSetter = setter; + } + + void setComposite(FunctionImpl f, Messager msgr) + { + compositeSetter.accept(f, msgr); + } + } + + class FunctionImpl + extends AbstractAnnotationImpl + implements Function, Snippet, Commentable + { + public String type() { return _type; } + public String[] out() { return _out; } + public String name() { return _name; } + public String schema() { return _schema; } + public boolean variadic() { return _variadic; } + public OnNullInput onNullInput() { return _onNullInput; } + public Security security() { return _security; } + public Effects effects() { return _effects; } + public Trust trust() { return _trust; } + public Parallel parallel() { return _parallel; } + public boolean leakproof() { return _leakproof; } + public int cost() { return _cost; } + public int rows() { return _rows; } + public String[] settings() { return _settings; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + public Trigger[] triggers() { return _triggers; } + public String language() + { + return _languageIdent.toString(); + } + + ExecutableElement func; + + public String _type; + public String[] _out; + public String _name; + public String _schema; + public boolean _variadic; + public OnNullInput _onNullInput; + public Security _security; + public Effects _effects; + public Trust _trust; + public Parallel _parallel; + public Boolean _leakproof; + int _cost; + int _rows; + public String[] _settings; + public String[] _provides; + public String[] _requires; + Trigger[] _triggers; + + public Identifier.Simple _languageIdent; + + boolean complexViaInOut = false; + boolean setof = false; + TypeMirror setofComponent = null; + boolean trigger = false; + TypeMirror returnTypeMapKey = null; + SQLType[] paramTypeAnnotations; + + DBType returnType; + DBType[] parameterTypes; + List> outParameters; + boolean assumedComposite = false; + boolean forceResultRecord = false; + + boolean subsumed = false; + + FunctionImpl(ExecutableElement e) + { + func = e; + } + + public void setType( Object o, boolean explicit, Element e) + { + if ( explicit ) + _type = (String)o; + } + + public void setOut( Object o, boolean explicit, Element e) + { + if ( explicit ) + _out = avToArray( o, String.class); + } + + public void setTrust( Object o, boolean explicit, Element e) + { + if ( explicit ) + _trust = Trust.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public void setLanguage( Object o, boolean explicit, Element e) + { + if ( explicit ) + _languageIdent = Identifier.Simple.fromJava((String)o); + } + + public void setCost( Object o, boolean explicit, Element e) + { + _cost = ((Integer)o).intValue(); + if ( _cost < 0 && explicit ) + throw new IllegalArgumentException( "cost must be nonnegative"); + } + + public void setRows( Object o, boolean explicit, Element e) + { + _rows = ((Integer)o).intValue(); + if ( _rows < 0 && explicit ) + throw new IllegalArgumentException( "rows must be nonnegative"); + } + + public void setTriggers( Object o, boolean explicit, Element e) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + _triggers = new Trigger [ ams.length ]; + int i = 0; + for ( AnnotationMirror am : ams ) + { + TriggerImpl ti = new TriggerImpl( this, am); + populateAnnotationImpl( ti, e, am); + _triggers [ i++ ] = ti; + } + } + + public Set characterize() + { + if ( "".equals( _name) ) + _name = func.getSimpleName().toString(); + + resolveLanguage(); + + Set mods = func.getModifiers(); + if ( ! mods.contains( Modifier.STATIC) ) + { + msg( Kind.ERROR, func, "A PL/Java function must be static"); + } + + TypeMirror ret = func.getReturnType(); + if ( ret.getKind().equals( TypeKind.ERROR) ) + { + msg( Kind.ERROR, func, + "Unable to resolve return type of function"); + return Set.of(); + } + + ExecutableType et = (ExecutableType)func.asType(); + List ptms = et.getParameterTypes(); + List typeArgs; + int arity = ptms.size(); + + /* + * Collect the parameter type annotations now, in case needed below + * in checkOutType(MAYBECOMPOSITE) to disambiguate. + */ + + collectParameterTypeAnnotations(); + + /* + * If a type= annotation is present, provisionally set returnType + * accordingly. Otherwise, leave it null, to be filled in by + * resolveParameterAndReturnTypes below. + */ + + if ( null != _type ) + returnType = DBType.fromSQLTypeAnnotation(_type); + + /* + * Take a first look according to the method's Java return type. + */ + if ( ret.getKind().equals( TypeKind.BOOLEAN) ) + { + if ( 0 < arity ) + { + TypeMirror tm = ptms.get( arity - 1); + if ( ! tm.getKind().equals( TypeKind.ERROR) + // unresolved things seem assignable to anything + && typu.isSameType( tm, TY_RESULTSET) ) + { + checkOutType(MethodShape.MAYBECOMPOSITE); + } + } + } + else if ( null != (typeArgs = specialization( ret, TY_ITERATOR)) ) + { + setof = true; + if ( 1 != typeArgs.size() ) + { + msg( Kind.ERROR, func, + "Need one type argument for Iterator return type"); + return Set.of(); + } + setofComponent = typeArgs.get( 0); + if ( null == setofComponent ) + { + msg( Kind.ERROR, func, + "Failed to find setof component type"); + return Set.of(); + } + checkOutType(MethodShape.ITERATOR); + } + else if ( typu.isAssignable( ret, TY_RESULTSETPROVIDER) + || typu.isAssignable( ret, TY_RESULTSETHANDLE) ) + { + setof = true; + checkOutType(MethodShape.PROVIDER); + } + else if ( ret.getKind().equals( TypeKind.VOID) && 1 == arity ) + { + TypeMirror tm = ptms.get( 0); + if ( ! tm.getKind().equals( TypeKind.ERROR) + // unresolved things seem assignable to anything + && typu.isSameType( tm, TY_TRIGGERDATA) ) + { + trigger = true; + checkOutType(MethodShape.OTHER); + } + } + + returnTypeMapKey = ret; + + if ( ! setof && -1 != rows() ) + msg( Kind.ERROR, func, + "ROWS specified on a function not returning SETOF"); + + if ( ! trigger && 0 != _triggers.length ) + msg( Kind.ERROR, func, + "a function with triggers needs void return and " + + "one TriggerData parameter"); + + /* + * Report any unmappable types now that could appear in + * deployStrings (return type or parameter types) ... so that the + * error messages won't be missing the source location, as they can + * with javac 7 throwing away symbol tables between rounds. + */ + resolveParameterAndReturnTypes(); + + if ( _variadic ) + { + int last = parameterTypes.length - 1; + if ( 0 > last || ! parameterTypes[last].isArray() ) + msg( Kind.ERROR, func, + "VARIADIC function must have a last, non-output " + + "parameter that is an array"); + } + + recordImplicitTags(); + + recordExplicitTags(_provides, _requires); + + for ( Trigger t : triggers() ) + ((TriggerImpl)t).characterize(); + return Set.of(this); + } + + void resolveLanguage() + { + if ( null != _trust && null != _languageIdent ) + msg( Kind.ERROR, func, "A PL/Java function may specify " + + "only one of trust, language"); + if ( null == _languageIdent ) + { + if ( null == _trust || Trust.SANDBOXED == _trust ) + _languageIdent = nameTrusted; + else + _languageIdent = nameUntrusted; + } + } + + /* + * Factored out of characterize() so it could be called if needed by + * BaseUDTFunctionImpl.characterize(), which does not need anything else + * from its super.characterize(). But for now it doesn't need this + * either; it knows what parameters the base UDT functions take, and it + * takes no heed of @SQLType annotations. Perhaps it should warn if such + * annotations are used, but that's for another day. + */ + void collectParameterTypeAnnotations() + { + List ves = func.getParameters(); + paramTypeAnnotations = new SQLType [ ves.size() ]; + int i = 0; + boolean anyOptional = false; + for ( VariableElement ve : ves ) + { + for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( ve) ) + { + if ( am.getAnnotationType().asElement().equals(AN_SQLTYPE) ) + { + SQLTypeImpl sti = new SQLTypeImpl(); + populateAnnotationImpl( sti, ve, am); + paramTypeAnnotations[i] = sti; + + if (null != sti._optional && null != sti._defaultValue) + msg(Kind.ERROR, ve, "Only one of optional= or " + + "defaultValue= may be given"); + + anyOptional |= sti.optional(); + } + } + ++ i; + } + + if ( anyOptional && OnNullInput.RETURNS_NULL.equals(_onNullInput) ) + msg(Kind.ERROR, func, "A PL/Java function with " + + "onNullInput=RETURNS_NULL may not have parameters with " + + "optional=true"); + } + + private static final int NOOUT = 0; + private static final int ONEOUT = 4; + private static final int MOREOUT = 8; + + private static final int NOTYPE = 0; + private static final int RECORDTYPE = 1; + private static final int OTHERTYPE = 2; + + /** + * Reads the tea leaves of the {@code type=} and {@code out=} + * annotation elements to decide whether the method has a composite + * or noncomposite return. + *

    + * This is complicated by the PostgreSQL behavior of treating a function + * declared with one {@code OUT} parameter, or as + * a one-element {@code TABLE} function, as not + * returning a row type. + *

    + * This method avoids rejecting the case of a one-element {@code out=} + * with an explicit {@code type=RECORD}, to provide a way to explicitly + * request composite behavior for that case, on the chance that some + * future PostgreSQL version may accept it, though as of this writing + * no current version does. + *

    + * If the {@code MAYBECOMPOSITE} shape is used with a single {@code out} + * parameter, it is likely a mistake (what are the odds the developer + * wanted a function with a row-typed input parameter and a named out + * parameter of boolean type?), and will be rejected unless the + * {@code ResultSet} final parameter has been given an {@code SQLType} + * annotation. + */ + void checkOutType(MethodShape shape) + { + int out = + null == _out ? NOOUT : 1 == _out.length ? ONEOUT : MOREOUT; + + /* + * The caller will have set returnType from _type if present, + * or left it null otherwise. We know RECORD is a composite type; + * we don't presume here to know whether any other type is or not. + */ + int type = + null == returnType ? NOTYPE : + DT_RECORD.equals(returnType) ? RECORDTYPE : OTHERTYPE; + + if ( MethodShape.OTHER == shape && 0 != (out | type) ) + { + msg( Kind.ERROR, func, + "no type= or out= element may be applied to this method"); + return; + } + + switch ( out | type ) + { + case NOOUT | OTHERTYPE: + assumedComposite = true; // annotations not definitive; assume + shape.setComposite(this, msgr); + return; + case NOOUT | RECORDTYPE: + case MOREOUT | NOTYPE: + shape.setComposite(this, msgr); + return; + case ONEOUT | RECORDTYPE: // in case PostgreSQL one day allows this + forceResultRecord = true; + shape.setComposite(this, msgr); + return; + case ONEOUT | NOTYPE: + /* + * No special action needed here except for the MAYBECOMPOSITE + * or PROVIDER shapes, to check for likely mistakes. + */ + if ( MethodShape.MAYBECOMPOSITE == shape + && null == + paramTypeAnnotations[paramTypeAnnotations.length - 1] ) + { + msg(Kind.ERROR, func, + "a function with one declared OUT parameter returns " + + "it normally, not through an extra ResultSet " + + "parameter. If the trailing ResultSet parameter is " + + "intended as an input, it can be marked with an " + + "@SQLType annotation"); + } + else if ( MethodShape.PROVIDER == shape ) + { + msg(Kind.ERROR, func, + "a set-returning function with one declared OUT " + + "parameter must return an Iterator, not a " + + "ResultSetProvider or ResultSetHandle"); + } + return; + case NOOUT | NOTYPE: + /* + * No special action; MAYBECOMPOSITE will treat as noncomposite, + * ITERATOR and PROVIDER will behave as they always do. + */ + return; + case ONEOUT | OTHERTYPE: + msg( Kind.ERROR, func, + "no type= allowed here (the out parameter " + + "declares its own type)"); + return; + case MOREOUT | RECORDTYPE: + case MOREOUT | OTHERTYPE: + msg( Kind.ERROR, func, + "type= and out= may not be combined here"); + return; + default: + throw new AssertionError("unhandled case"); + } + } + + /** + * Return a stream of {@code ParameterInfo} 'records' for the function's + * parameters in order. + *

    + * If {@code paramTypeAnnotations} has not been set, every element in + * the stream will have null for {@code st}. + *

    + * If {@code parameterTypes} has not been set, every element in + * the stream will have null for {@code dt}. + */ + Stream parameterInfo() + { + if ( trigger ) + return Stream.empty(); + + ExecutableType et = (ExecutableType)func.asType(); + List tms = et.getParameterTypes(); + if ( complexViaInOut ) + tms = tms.subList( 0, tms.size() - 1); + + Iterator ves = + func.getParameters().iterator(); + + Supplier sts = + null == paramTypeAnnotations + ? () -> null + : Arrays.asList(paramTypeAnnotations).iterator()::next; + + Supplier dts = + null == parameterTypes + ? () -> null + : Arrays.asList(parameterTypes).iterator()::next; + + return tms.stream().map(tm -> + new ParameterInfo(tm, ves.next(), sts.get(), dts.get())); + } + + /** + * Create the {@code DBType}s to populate {@code returnType} and + * {@code parameterTypes}. + */ + void resolveParameterAndReturnTypes() + { + if ( null != returnType ) + /* it was already set from a type= attribute */; + else if ( null != setofComponent ) + returnType = tmpr.getSQLType( setofComponent, func); + else if ( setof ) + returnType = DT_RECORD; + else + returnType = tmpr.getSQLType( returnTypeMapKey, func); + + parameterTypes = parameterInfo() + .map(i -> tmpr.getSQLType(i.tm, i.ve, i.st, true, true)) + .toArray(DBType[]::new); + + if ( null != _out ) + { + outParameters = Arrays.stream(_out) + .map(DBType::fromNameAndType) + .collect(toList()); + if ( 1 < _out.length || forceResultRecord ) + returnType = DT_RECORD; + else + returnType = outParameters.get(0).getValue(); + } + } + + /** + * Record that this function provides itself, and requires its + * parameter and return types. + *

    + * Must be called before {@code recordExplicitTags}, which makes the + * provides and requires sets immutable. + */ + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(new DependTag.Function( + qnameFrom(_name, _schema), parameterTypes)); + + DependTag t = returnType.dependTag(); + if ( null != t ) + requires.add(t); + + for ( DBType dbt : parameterTypes ) + { + t = dbt.dependTag(); + if ( null != t ) + requires.add(t); + } + + if ( null != outParameters ) + outParameters.stream() + .map(m -> m.getValue().dependTag()) + .filter(Objects::nonNull) + .forEach(requires::add); + } + + @Override + public void subsume() + { + subsumed = true; + } + + /** + * Append SQL syntax for the function's name (schema-qualified if + * appropriate) and parameters, either with any defaults indicated + * (for use in CREATE FUNCTION) or without (for use in DROP FUNCTION). + * + * @param sb StringBuilder in which to generate the SQL. + * @param names Whether to include the parameter names. + * @param outs Whether to include out parameters. + * @param dflts Whether to include the defaults, if any. + */ + void appendNameAndParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts) + { + appendNameAndParams(sb, names, outs, dflts, + qnameFrom(name(), schema()), parameterInfo().collect(toList())); + } + + /** + * Internal version taking name and parameter stream as extra arguments + * so they can be overridden from {@link Transformed}. + */ + void appendNameAndParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Identifier.Qualified qname, + Iterable params) + { + sb.append(qname).append( '('); + appendParams( sb, names, outs, dflts, params); + // TriggerImpl relies on ) being the very last character + sb.append( ')'); + } + + /** + * Takes the parameter stream as an extra argument + * so it can be overridden from {@link Transformed}. + */ + void appendParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Iterable params) + { + int lengthOnEntry = sb.length(); + + Iterator iter = params.iterator(); + ParameterInfo i; + while ( iter.hasNext() ) + { + i = iter.next(); + + String name = i.name(); + + sb.append("\n\t"); + + if ( _variadic && ! iter.hasNext() ) + sb.append("VARIADIC "); + + if ( names ) + sb.append(name).append(' '); + + sb.append(i.dt.toString(dflts)); + + sb.append(','); + } + + if ( outs && null != outParameters ) + { + outParameters.forEach(e -> { + sb.append("\n\tOUT "); + if ( null != e.getKey() ) + sb.append(e.getKey()).append(' '); + sb.append(e.getValue().toString(false)).append(','); + }); + } + + if ( lengthOnEntry < sb.length() ) + sb.setLength(sb.length() - 1); // that last pesky comma + } + + String makeAS() + { + StringBuilder sb = new StringBuilder(); + if ( ! ( complexViaInOut || setof || trigger ) ) + sb.append( typu.erasure( func.getReturnType())).append( '='); + Element e = func.getEnclosingElement(); + // e was earlier checked and ensured to be a class or interface + sb.append( elmu.getBinaryName((TypeElement)e)).append( '.'); + sb.append( trigger ? func.getSimpleName() : func.toString()); + return sb.toString(); + } + + public String[] deployStrings() + { + return deployStrings( + qnameFrom(name(), schema()), parameterInfo().collect(toList()), + makeAS(), comment()); + } + + /** + * Internal version taking the function name, parameter stream, + * AS string, and comment (if any) as extra arguments so they can be + * overridden from {@link Transformed}. + */ + String[] deployStrings( + Identifier.Qualified qname, + Iterable params, String as, String comment) + { + ArrayList al = new ArrayList<>(); + StringBuilder sb = new StringBuilder(); + if ( assumedComposite ) + sb.append("/*\n * PL/Java generated this declaration assuming" + + "\n * a composite-returning function was intended." + + "\n * If a boolean function with a row-typed parameter" + + "\n * was intended, add any @SQLType annotation on the" + + "\n * ResultSet final parameter to make the intent clear." + + "\n */\n"); + if ( forceResultRecord ) + sb.append("/*\n * PL/Java generated this declaration for a" + + "\n * function with one OUT parameter that was annotated" + + "\n * to explicitly request treatment as a function that" + + "\n * returns RECORD. A given version of PostgreSQL might" + + "\n * not accept such a declaration. More at" + + "\n * https://www.postgresql.org/message-id/" + + "619BBE78.7040009%40anastigmatix.net" + + "\n */\n"); + sb.append( "CREATE OR REPLACE FUNCTION "); + appendNameAndParams( sb, true, true, true, qname, params); + sb.append( "\n\tRETURNS "); + if ( trigger ) + sb.append( DT_TRIGGER.toString()); + else + { + if ( setof ) + sb.append( "SETOF "); + sb.append( returnType); + } + sb.append( "\n\tLANGUAGE "); + sb.append( _languageIdent.toString()); + sb.append( ' ').append( effects()); + if ( leakproof() ) + sb.append( " LEAKPROOF"); + sb.append( '\n'); + if ( OnNullInput.RETURNS_NULL.equals( onNullInput()) ) + sb.append( "\tRETURNS NULL ON NULL INPUT\n"); + if ( Security.DEFINER.equals( security()) ) + sb.append( "\tSECURITY DEFINER\n"); + if ( ! Parallel.UNSAFE.equals( parallel()) ) + sb.append( "\tPARALLEL ").append( parallel()).append( '\n'); + if ( -1 != cost() ) + sb.append( "\tCOST ").append( cost()).append( '\n'); + if ( -1 != rows() ) + sb.append( "\tROWS ").append( rows()).append( '\n'); + for ( String s : settings() ) + sb.append( "\tSET ").append( s).append( '\n'); + sb.append( "\tAS ").append( DDRWriter.eQuote( as)); + al.add( sb.toString()); + + if ( null != comment ) + { + sb.setLength( 0); + sb.append( "COMMENT ON FUNCTION "); + appendNameAndParams( sb, true, false, false, qname, params); + sb.append( "\nIS "); + sb.append( DDRWriter.eQuote( comment)); + al.add( sb.toString()); + } + + for ( Trigger t : triggers() ) + for ( String s : ((TriggerImpl)t).deployStrings() ) + al.add( s); + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return undeployStrings( + qnameFrom(name(), schema()), parameterInfo().collect(toList())); + } + + String[] undeployStrings( + Identifier.Qualified qname, + Iterable params) + { + if ( subsumed ) + return new String[0]; + + String[] rslt = new String [ 1 + triggers().length ]; + int i = rslt.length - 1; + for ( Trigger t : triggers() ) + for ( String s : ((TriggerImpl)t).undeployStrings() ) + rslt [ --i ] = s; + + StringBuilder sb = new StringBuilder(); + sb.append( "DROP FUNCTION "); + appendNameAndParams( sb, true, false, false, qname, params); + rslt [ rslt.length - 1 ] = sb.toString(); + return rslt; + } + + /** + * Test whether the type {@code tm} is, directly or indirectly, + * a specialization of generic type {@code dt}. + * @param tm a type to be checked + * @param dt known generic type to check for + * @return null if {@code tm} does not extend {@code dt}, otherwise the + * list of type arguments with which it specializes {@code dt} + */ + List specialization( + TypeMirror tm, DeclaredType dt) + { + if ( ! typu.isAssignable( typu.erasure( tm), dt) ) + return null; + + List pending = new LinkedList<>(); + pending.add( tm); + while ( ! pending.isEmpty() ) + { + tm = pending.remove( 0); + if ( typu.isSameType( typu.erasure( tm), dt) ) + return ((DeclaredType)tm).getTypeArguments(); + pending.addAll( typu.directSupertypes( tm)); + } + /* + * This is a can't-happen: tm is assignable to dt but has no + * supertype that's dt? Could throw an AssertionError, but returning + * an empty list will lead the caller to report an error, and that + * will give more information about the location in the source being + * compiled. + */ + return Collections.emptyList(); + } + + private Map m_variants= new HashMap<>(); + + /** + * Return an instance representing a transformation of this function, + * or null on second and subsequent requests for the same + * transformation (so the caller will not register the variant more + * than once). + */ + Transformed transformed( + Identifier.Qualified qname, + boolean commute, boolean negate) + { + Transformed prospect = new Transformed(qname, commute, negate); + DependTag.Function tag = + (DependTag.Function)prospect.provideTags().iterator().next(); + Transformed found = m_variants.putIfAbsent(tag, prospect); + if ( null == found ) + return prospect; + return null; + } + + class Transformed implements Snippet + { + final Identifier.Qualified m_qname; + final boolean m_commute; + final boolean m_negate; + final String m_comment; + + Transformed( + Identifier.Qualified qname, + boolean commute, boolean negate) + { + EnumSet how = + EnumSet.noneOf(OperatorPath.Transform.class); + if ( commute ) + how.add(OperatorPath.Transform.COMMUTATION); + if ( negate ) + how.add(OperatorPath.Transform.NEGATION); + assert ! how.isEmpty() : "no transformation to apply"; + m_qname = requireNonNull(qname); + m_commute = commute; + m_negate = negate; + m_comment = "Function automatically derived by " + how + + " from " + qnameFrom( + FunctionImpl.this.name(), FunctionImpl.this.schema()); + } + + List parameterInfo() + { + List params = + FunctionImpl.this.parameterInfo().collect(toList()); + if ( ! m_commute ) + return params; + assert 2 == params.size() : "commute with arity != 2"; + Collections.reverse(params); + return params; + } + + @Override + public Set characterize() + { + return Set.of(); + } + + @Override + public Identifier.Simple implementorName() + { + return FunctionImpl.this.implementorName(); + } + + @Override + public Set requireTags() + { + return FunctionImpl.this.requireTags(); + } + + @Override + public Set provideTags() + { + DBType[] sig = + parameterInfo().stream() + .map(p -> p.dt) + .toArray(DBType[]::new); + return Set.of(new DependTag.Function(m_qname, sig)); + } + + @Override + public String[] deployStrings() + { + String as = Stream.of( + m_commute ? "commute" : (String)null, + m_negate ? "negate" : (String)null) + .filter(Objects::nonNull) + .collect(joining(",", "[", "]")) + + FunctionImpl.this.makeAS(); + + return FunctionImpl.this.deployStrings( + m_qname, parameterInfo(), as, m_comment); + } + + @Override + public String[] undeployStrings() + { + return FunctionImpl.this.undeployStrings( + m_qname, parameterInfo()); + } + } + } + + static enum BaseUDTFunctionID + { + INPUT("in", null, "pg_catalog.cstring", "pg_catalog.oid", "integer"), + OUTPUT("out", "pg_catalog.cstring", (String[])null), + RECEIVE("recv", null, "pg_catalog.internal","pg_catalog.oid","integer"), + SEND("send", "pg_catalog.bytea", (String[])null); + BaseUDTFunctionID( String suffix, String ret, String... param) + { + this.suffix = suffix; + this.param = null == param ? null : + Arrays.stream(param) + .map(DBType::fromSQLTypeAnnotation) + .toArray(DBType[]::new); + this.ret = null == ret ? null : + new DBType.Named(Identifier.Qualified.nameFromJava(ret)); + } + private String suffix; + private DBType[] param; + private DBType ret; + String getSuffix() { return suffix; } + DBType[] getParam( BaseUDTImpl u) + { + if ( null != param ) + return param; + return new DBType[] { u.qname }; + } + DBType getRet( BaseUDTImpl u) + { + if ( null != ret ) + return ret; + return u.qname; + } + } + + class BaseUDTFunctionImpl extends FunctionImpl + { + BaseUDTFunctionImpl( + BaseUDTImpl ui, TypeElement te, BaseUDTFunctionID id) + { + super( null); + this.ui = ui; + this.te = te; + this.id = id; + + returnType = id.getRet( ui); + parameterTypes = id.getParam( ui); + + _type = returnType.toString(); + _name = Identifier.Simple.fromJava(ui.name()) + .concat("_", id.getSuffix()).toString(); + _schema = ui.schema(); + _variadic = false; + _cost = -1; + _rows = -1; + _onNullInput = OnNullInput.CALLED; + _security = Security.INVOKER; + _effects = Effects.VOLATILE; + _parallel = Parallel.UNSAFE; + _leakproof = false; + _settings = new String[0]; + _triggers = new Trigger[0]; + _provides = _settings; + _requires = _settings; + } + + BaseUDTImpl ui; + TypeElement te; + BaseUDTFunctionID id; + + @Override + public String[] deployStrings() + { + return deployStrings( + qnameFrom(name(), schema()), + null, // parameter iterable unused in appendParams below + "UDT[" + elmu.getBinaryName(te) + "] " + id.name(), + comment()); + } + + @Override + public String[] undeployStrings() + { + return undeployStrings( + qnameFrom(name(), schema()), + null); // parameter iterable unused in appendParams below + } + + @Override + void appendParams( + StringBuilder sb, boolean names, boolean outs, boolean dflts, + Iterable params) + { + sb.append( + Arrays.stream(id.getParam( ui)) + .map(Object::toString) + .collect(joining(", ")) + ); + } + + StringBuilder appendTypeOp( StringBuilder sb) + { + sb.append( id.name()).append( " = "); + return sb.append(qnameFrom(name(), schema())); + } + + @Override + public Set characterize() + { + resolveLanguage(); + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public void setType( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The type of a UDT function may not be changed"); + } + + public void setOut( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The type of a UDT function may not be changed"); + } + + public void setVariadic( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, "A UDT function is never variadic"); + } + + public void setRows( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "The rows attribute of a UDT function may not be set"); + } + + public void setProvides( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function does not have its own provides/requires"); + } + + public void setRequires( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function does not have its own provides/requires"); + } + + public void setTriggers( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function may not have associated triggers"); + } + + public void setImplementor( Object o, boolean explicit, Element e) + { + if ( explicit ) + msg( Kind.ERROR, e, + "A UDT function does not have its own implementor"); + } + + public String implementor() + { + return ui.implementor(); + } + + public String derivedComment( Element e) + { + String comm = super.derivedComment( e); + if ( null != comm ) + return comm; + return id.name() + " method for type " + ui.qname; + } + } + + abstract class AbstractUDTImpl + extends AbstractAnnotationImpl + implements Snippet, Commentable + { + public String name() { return _name; } + public String schema() { return _schema; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] _provides; + public String[] _requires; + public String _name; + public String _schema; + + TypeElement tclass; + + DBType qname; + + AbstractUDTImpl(TypeElement e) + { + tclass = e; + + if ( ! typu.isAssignable( e.asType(), TY_SQLDATA) ) + { + msg( Kind.ERROR, e, "A PL/Java UDT must implement %s", + TY_SQLDATA); + } + + ExecutableElement niladicCtor = huntFor( + constructorsIn( tclass.getEnclosedElements()), null, false, + null); + + if ( null == niladicCtor ) + { + msg( Kind.ERROR, tclass, + "A PL/Java UDT must have a public no-arg constructor"); + } + } + + protected void setQname() + { + if ( "".equals( _name) ) + _name = tclass.getSimpleName().toString(); + + qname = new DBType.Named(qnameFrom(_name, _schema)); + + if ( ! tmpr.mappingsFrozen() ) + tmpr.addMap( tclass.asType(), qname); + } + + protected void addComment( ArrayList al) + { + String comm = comment(); + if ( null == comm ) + return; + al.add( "COMMENT ON TYPE " + qname + "\nIS " + + DDRWriter.eQuote( comm)); + } + } + + class MappedUDTImpl + extends AbstractUDTImpl + implements MappedUDT + { + public String[] structure() { return _structure; } + + String[] _structure; + + public void setStructure( Object o, boolean explicit, Element e) + { + if ( explicit ) + _structure = avToArray( o, String.class); + } + + MappedUDTImpl(TypeElement e) + { + super( e); + } + + public void registerMapping() + { + setQname(); + } + + public Set characterize() + { + if ( null != structure() ) + { + DependTag t = qname.dependTag(); + if ( null != t ) + provideTags().add(t); + } + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public String[] deployStrings() + { + ArrayList al = new ArrayList<>(); + if ( null != structure() ) + { + StringBuilder sb = new StringBuilder(); + sb.append( "CREATE TYPE ").append( qname).append( " AS ("); + int i = structure().length; + for ( String s : structure() ) + sb.append( "\n\t").append( s).append( + ( 0 < -- i ) ? ',' : '\n'); + sb.append( ')'); + al.add( sb.toString()); + } + al.add( "SELECT sqlj.add_type_mapping(" + + DDRWriter.eQuote( qname.toString()) + ", " + + DDRWriter.eQuote( elmu.getBinaryName(tclass)) + ')'); + addComment( al); + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + ArrayList al = new ArrayList<>(); + al.add( "SELECT sqlj.drop_type_mapping(" + + DDRWriter.eQuote( qname.toString()) + ')'); + if ( null != structure() ) + al.add( "DROP TYPE " + qname); + return al.toArray( new String [ al.size() ]); + } + } + + class BaseUDTImpl + extends AbstractUDTImpl + implements BaseUDT + { + class Shell implements Snippet + { + @Override + public Identifier.Simple implementorName() + { + return BaseUDTImpl.this.implementorName(); + } + + @Override + public String[] deployStrings() + { + return new String[] { "CREATE TYPE " + qname }; + } + + @Override + public String[] undeployStrings() + { + return new String[0]; + } + + @Override + public Set provideTags() + { + return Set.of(); + } + + @Override + public Set requireTags() + { + return Set.of(); + } + + @Override + public Set characterize() + { + return Set.of(); + } + } + + public String typeModifierInput() { return _typeModifierInput; } + public String typeModifierOutput() { return _typeModifierOutput; } + public String analyze() { return _analyze; } + public int internalLength() { return _internalLength; } + public boolean passedByValue() { return _passedByValue; } + public Alignment alignment() { return _alignment; } + public Storage storage() { return _storage; } + public String like() { return _like; } + public char category() { return _category; } + public boolean preferred() { return _preferred; } + public String defaultValue() { return _defaultValue; } + public String element() { return _element; } + public char delimiter() { return _delimiter; } + public boolean collatable() { return _collatable; } + + BaseUDTFunctionImpl in, out, recv, send; + + public String _typeModifierInput; + public String _typeModifierOutput; + public String _analyze; + int _internalLength; + public Boolean _passedByValue; + Alignment _alignment; + Storage _storage; + public String _like; + char _category; + public Boolean _preferred; + String _defaultValue; + public String _element; + char _delimiter; + public Boolean _collatable; + + boolean lengthExplicit; + boolean alignmentExplicit; + boolean storageExplicit; + boolean categoryExplicit; + boolean delimiterExplicit; + + public void setInternalLength( Object o, boolean explicit, Element e) + { + _internalLength = (Integer)o; + lengthExplicit = explicit; + } + + public void setAlignment( Object o, boolean explicit, Element e) + { + _alignment = Alignment.valueOf( + ((VariableElement)o).getSimpleName().toString()); + alignmentExplicit = explicit; + } + + public void setStorage( Object o, boolean explicit, Element e) + { + _storage = Storage.valueOf( + ((VariableElement)o).getSimpleName().toString()); + storageExplicit = explicit; + } + + public void setDefaultValue( Object o, boolean explicit, Element e) + { + if ( explicit ) + _defaultValue = (String)o; // "" could be a real default value + } + + public void setCategory( Object o, boolean explicit, Element e) + { + _category = (Character)o; + categoryExplicit = explicit; + } + + public void setDelimiter( Object o, boolean explicit, Element e) + { + _delimiter = (Character)o; + delimiterExplicit = explicit; + } + + BaseUDTImpl(TypeElement e) + { + super( e); + } + + void registerFunctions() + { + setQname(); + + ExecutableElement instanceReadSQL = huntFor( + methodsIn( tclass.getEnclosedElements()), "readSQL", false, + TY_VOID, TY_SQLINPUT, TY_STRING); + + ExecutableElement instanceWriteSQL = huntFor( + methodsIn( tclass.getEnclosedElements()), "writeSQL", false, + TY_VOID, TY_SQLOUTPUT); + + ExecutableElement instanceToString = huntFor( + methodsIn( tclass.getEnclosedElements()), "toString", false, + TY_STRING); + + ExecutableElement staticParse = huntFor( + methodsIn( tclass.getEnclosedElements()), "parse", true, + tclass.asType(), TY_STRING, TY_STRING); + + if ( null == staticParse ) + { + msg( Kind.ERROR, tclass, + "A PL/Java UDT must have a public static " + + "parse(String,String) method that returns the UDT"); + } + else + { + in = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.INPUT); + putSnippet( staticParse, in); + } + + out = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.OUTPUT); + putSnippet( null != instanceToString ? instanceToString : out, out); + + recv = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.RECEIVE); + putSnippet( null != instanceReadSQL ? instanceReadSQL : recv, recv); + + send = new BaseUDTFunctionImpl( + this, tclass, BaseUDTFunctionID.SEND); + putSnippet( null != instanceWriteSQL ? instanceWriteSQL : send, + send); + } + + public Set characterize() + { + if ( "".equals( typeModifierInput()) + && ! "".equals( typeModifierOutput()) ) + msg( Kind.ERROR, tclass, + "UDT typeModifierOutput useless without typeModifierInput"); + + if ( 1 > internalLength() && -1 != internalLength() ) + msg( Kind.ERROR, tclass, + "UDT internalLength must be positive, or -1 for varying"); + + if ( passedByValue() && + ( 8 < internalLength() || -1 == internalLength() ) ) + msg( Kind.ERROR, tclass, + "Only a UDT of fixed length <= 8 can be passed by value"); + + if ( -1 == internalLength() && + -1 == alignment().compareTo( Alignment.INT4) ) + msg( Kind.ERROR, tclass, + "A variable-length UDT must have alignment at least INT4"); + + if ( -1 != internalLength() && Storage.PLAIN != storage() ) + msg( Kind.ERROR, tclass, + "Storage for a fixed-length UDT must be PLAIN"); + + // see PostgreSQL backend/commands/typecmds.c "must be simple ASCII" + if ( 32 > category() || category() > 126 ) + msg( Kind.ERROR, tclass, + "UDT category must be a printable ASCII character"); + + if ( categoryExplicit && Character.isUpperCase(category()) ) + if ( null == PredefinedCategory.valueOf(category()) ) + msg( Kind.WARNING, tclass, + "upper-case letters are reserved for PostgreSQL's " + + "predefined UDT categories, but '%c' is not recognized", + category()); + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + + return Set.of(this); + } + + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(qname.dependTag()); + + for ( BaseUDTFunctionImpl f : List.of(in, out, recv, send) ) + requires.add(new DependTag.Function( + qnameFrom(f._name, f._schema), f.parameterTypes)); + + String s = typeModifierInput(); + if ( ! s.isEmpty() ) + requires.add(new DependTag.Function( + qnameFrom(s), SIG_TYPMODIN)); + + s = typeModifierOutput(); + if ( ! s.isEmpty() ) + requires.add(new DependTag.Function( + qnameFrom(s), SIG_TYPMODOUT)); + + s = analyze(); + if ( ! s.isEmpty() ) + requires.add(new DependTag.Function(qnameFrom(s), SIG_ANALYZE)); + } + + public String[] deployStrings() + { + ArrayList al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + sb.append( "CREATE TYPE ").append( qname).append( " (\n\t"); + in.appendTypeOp( sb).append( ",\n\t"); + out.appendTypeOp( sb).append( ",\n\t"); + recv.appendTypeOp( sb).append( ",\n\t"); + send.appendTypeOp( sb); + + if ( ! "".equals( typeModifierInput()) ) + sb.append( ",\n\tTYPMOD_IN = ").append( typeModifierInput()); + + if ( ! "".equals( typeModifierOutput()) ) + sb.append( ",\n\tTYPMOD_OUT = ").append( typeModifierOutput()); + + if ( ! "".equals( analyze()) ) + sb.append( ",\n\tANALYZE = ").append( analyze()); + + if ( lengthExplicit || "".equals( like()) ) + sb.append( ",\n\tINTERNALLENGTH = ").append( + -1 == internalLength() ? "VARIABLE" + : String.valueOf( internalLength())); + + if ( passedByValue() ) + sb.append( ",\n\tPASSEDBYVALUE"); + + if ( alignmentExplicit || "".equals( like()) ) + sb.append( ",\n\tALIGNMENT = ").append( alignment().name()); + + if ( storageExplicit || "".equals( like()) ) + sb.append( ",\n\tSTORAGE = ").append( storage().name()); + + if ( ! "".equals( like()) ) + sb.append( ",\n\tLIKE = ").append( like()); + + if ( categoryExplicit ) + sb.append( ",\n\tCATEGORY = ").append( + DDRWriter.eQuote( String.valueOf( category()))); + + if ( preferred() ) + sb.append( ",\n\tPREFERRED = true"); + + if ( null != defaultValue() ) + sb.append( ",\n\tDEFAULT = ").append( + DDRWriter.eQuote( defaultValue())); + + if ( ! "".equals( element()) ) + sb.append( ",\n\tELEMENT = ").append( element()); + + if ( delimiterExplicit ) + sb.append( ",\n\tDELIMITER = ").append( + DDRWriter.eQuote( String.valueOf( delimiter()))); + + if ( collatable() ) + sb.append( ",\n\tCOLLATABLE = true"); + + al.add( sb.append( "\n)").toString()); + addComment( al); + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP TYPE " + qname + " CASCADE" + }; + } + + @Override + public Vertex breakCycle(Vertex v, boolean deploy) + { + assert this == v.payload; + + /* + * Find the entries in my adjacency list that are implicated in the + * cycle (that is, that precede, perhaps transitively, me). + */ + Vertex[] vs = v.precedesTransitively(v); + + assert null != vs && 0 < vs.length : "breakCycle not in a cycle"; + + if ( vs.length < v.indegree ) + return null; // other non-cyclic edges not satisfied yet + + if ( deploy ) + { + Vertex breaker = new Vertex<>(new Shell()); + v.transferSuccessorsTo(breaker, vs); + return breaker; + } + + for ( Vertex subsumed : vs ) + subsumed.payload.subsume(); + + /* + * Set indegree now to zero, so that when the subsumed snippets are + * themselves emitted, they will not decrement it to zero and cause + * this to be scheduled again. + */ + v.indegree = 0; + + return v; // use this vertex itself in the undeploy case + } + } + + class CastImpl + extends Repeatable + implements Cast, Snippet, Commentable + { + CastImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String from() { return _from; } + public String to() { return _to; } + public Cast.Path path() { return _path; } + public Cast.Application application() { return _application; } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String _from; + public String _to; + public Cast.Path _path; + public Cast.Application _application; + public String[] _provides; + public String[] _requires; + + FunctionImpl func; + DBType fromType; + DBType toType; + + public void setPath( Object o, boolean explicit, Element e) + { + if ( explicit ) + _path = Path.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public Set characterize() + { + boolean ok = true; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + if ( null == func ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A method annotated with @Cast must also have @Function" + ); + ok = false; + } + } + + if ( null == func && "".equals(_from) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method must specify from=" + ); + ok = false; + } + + if ( null == func && "".equals(_to) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method must specify to=" + ); + ok = false; + } + + if ( null == func && null == _path ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Cast not annotating a method, and without path=, " + + "is not yet supported" + ); + ok = false; + } + + if ( ok ) + { + fromType = ("".equals(_from)) + ? func.parameterTypes[0] + : DBType.fromSQLTypeAnnotation(_from); + + toType = ("".equals(_to)) + ? func.returnType + : DBType.fromSQLTypeAnnotation(_to); + } + + if ( null != _path ) + { + if ( ok && Path.BINARY == _path && fromType.equals(toType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast with from and to types the same can only " + + "apply a type modifier; path=BINARY will have " + + "no effect"); + ok = false; + } + } + else if ( null != func ) + { + int nparams = func.parameterTypes.length; + + if ( ok && 2 > nparams && fromType.equals(toType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast with from and to types the same can only " + + "apply a type modifier, therefore must have at least " + + "two parameters"); + ok = false; + } + + if ( 1 > nparams || nparams > 3 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A cast function must have 1, 2, or 3 parameters"); + ok = false; + } + + if (1 < nparams && ! DT_INTEGER.equals(func.parameterTypes[1])) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Parameter 2 of a cast function must have integer type" + ); + ok = false; + } + + if (3 == nparams && ! DT_BOOLEAN.equals(func.parameterTypes[2])) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Parameter 3 of a cast function must have boolean type" + ); + ok = false; + } + } + + if ( ! ok ) + return Set.of(); + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + void recordImplicitTags() + { + Set requires = requireTags(); + + DependTag dt = fromType.dependTag(); + if ( null != dt ) + requires.add(dt); + + dt = toType.dependTag(); + if ( null != dt ) + requires.add(dt); + + if ( null == _path ) + { + dt = func.provideTags().stream() + .filter(DependTag.Function.class::isInstance) + .findAny().get(); + requires.add(dt); + } + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE CAST (") + .append(fromType).append(" AS ").append(toType).append(")\n\t"); + + if ( Path.BINARY == _path ) + sb.append("WITHOUT FUNCTION"); + else if ( Path.INOUT == _path ) + sb.append("WITH INOUT"); + else + { + sb.append("WITH FUNCTION "); + func.appendNameAndParams(sb, false, false, false); + } + + switch ( _application ) + { + case ASSIGNMENT: sb.append("\n\tAS ASSIGNMENT"); break; + case EXPLICIT: break; + case IMPLICIT: sb.append("\n\tAS IMPLICIT"); + } + + al.add(sb.toString()); + + if ( null != comment() ) + al.add( + "COMMENT ON CAST (" + + fromType + " AS " + toType + ") IS " + + DDRWriter.eQuote(comment())); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP CAST (" + fromType + " AS " + toType + ")" + }; + } + } + + /* + * Called by processRepeatable for each @Operator processed. + * This happens before characterize, but after populating, so the + * operator's name and commutator/negator/synthetic elements can be + * inspected. All operators annotating a given element e are processed + * consecutively, and followed by a call with the same e and null snip. + * + * This will accumulate the snippets onto two lists, for non-synthetic and + * synthetic ones and, on the final call, process the lists to find possible + * paths from non-synthetic to synthetic ones via commutation and/or + * negation. The possible paths will be recorded on each synthetic operator. + * They will have to be confirmed during characterize after things like + * operand types and arity have been resolved. + */ + void operatorPreSynthesize( Element e, OperatorImpl snip) + { + if ( ! ElementKind.METHOD.equals(e.getKind()) ) + { + if ( null != snip ) + putSnippet( snip, (Snippet)snip); + return; + } + + if ( null != snip ) + { + if ( snip.selfCommutator || snip.twinCommutator ) + snip.commutator = snip.qname; + + (snip.isSynthetic ? m_synthetic : m_nonSynthetic).add(snip); + return; + } + + /* + * Initially: + * processed: is empty + * ready: contains all non-synthetic snippets + * pending: contains all synthetic snippets + * Step: + * A snippet s is removed from ready and added to processed. + * If s.commutator or s.negator matches a synthetic snippet in pending, + * a corresponding path is recorded on that snippet. If it is + * the first path recorded on that snippet, the snippet is moved + * to ready. + */ + + List processed = + new ArrayList<>(m_nonSynthetic.size() + m_synthetic.size()); + Queue ready = new LinkedList<>(m_nonSynthetic); + LinkedList pending = new LinkedList<>(m_synthetic); + m_nonSynthetic.clear(); + m_synthetic.clear(); + + while ( null != (snip = ready.poll()) ) + { + processed.add(snip); + if ( null != snip.commutator ) + { + ListIterator it = pending.listIterator(); + while ( it.hasNext() ) + { + OperatorImpl other = it.next(); + if ( maybeAddPath(snip, other, + OperatorPath.Transform.COMMUTATION) ) + { + it.remove(); + ready.add(other); + } + } + } + if ( null != snip.negator ) + { + ListIterator it = pending.listIterator(); + while ( it.hasNext() ) + { + OperatorImpl other = it.next(); + if ( maybeAddPath(snip, other, + OperatorPath.Transform.NEGATION) ) + { + it.remove(); + ready.add(other); + } + } + } + } + + if ( ! pending.isEmpty() ) + msg(Kind.ERROR, e, "Cannot synthesize operator(s) (%s)", + pending.stream() + .map(o -> o.qname.toString()) + .collect(joining(" "))); + + for ( OperatorImpl s : processed ) + putSnippet( s, (Snippet)s); + } + + boolean maybeAddPath( + OperatorImpl from, OperatorImpl to, OperatorPath.Transform how) + { + if ( ! to.isSynthetic ) + return false; // don't add paths to a non-synthetic operator + + /* + * setSynthetic will have left synthetic null in the synthetic=TWIN + * case. That case imposes more constraints on what paths can be added: + * an acceptable path must involve commutation (and only commutation) + * from another operator that will have a function name (so, either + * a non-synthetic one, or a synthetic one given an actual name, other + * than TWIN). In the latter case, copy the name here (for the former, + * it will be copied from the function's name, in characterize()). + */ + boolean syntheticTwin = null == to.synthetic; + + switch ( how ) + { + case COMMUTATION: + if ( ! from.commutator.equals(to.qname) ) + return false; // this is not the operator you're looking for + if ( null != to.commutator && ! to.commutator.equals(from.qname) ) + return false; // you're not the one it's looking for + break; + case NEGATION: + if ( ! from.negator.equals(to.qname) ) + return false; // move along + if ( null != to.negator && ! to.negator.equals(from.qname) ) + return false; // move along + if ( syntheticTwin ) + return false; + break; + } + + if ( syntheticTwin ) + { + /* + * We will apply commutation to 'from' (the negation case + * would have been rejected above). Either 'from' is nonsynthetic + * and its function name will be copied in characterize(), or it is + * synthetic and must have a name or we reject it here. If not + * rejected, copy the name. + */ + if ( from.isSynthetic ) + { + if ( null == from.synthetic ) + return false; + to.synthetic = from.synthetic; + } + } + + if ( null == to.paths ) + to.paths = new ArrayList<>(); + + if ( ! from.isSynthetic ) + to.paths.add(new OperatorPath(from, from, null, EnumSet.of(how))); + else + { + for ( OperatorPath path : from.paths ) + { + to.paths.add(new OperatorPath( + path.base, from, path.fromBase, EnumSet.of(how))); + } + } + + return true; + } + + /** + * Why has {@code Set} or at least {@code EnumSet} not got this? + */ + static > EnumSet symmetricDifference( + EnumSet a, EnumSet b) + { + EnumSet result = a.clone(); + result.removeAll(b); + b = b.clone(); + b.removeAll(a); + result.addAll(b); + return result; + } + + List m_nonSynthetic = new ArrayList<>(); + List m_synthetic = new ArrayList<>(); + + static class OperatorPath + { + OperatorImpl base; + OperatorImpl proximate; + EnumSet fromBase; + EnumSet fromProximate; + + enum Transform { NEGATION, COMMUTATION } + + OperatorPath( + OperatorImpl base, OperatorImpl proximate, + EnumSet baseToProximate, + EnumSet proximateToNew) + { + this.base = base; + this.proximate = proximate; + fromProximate = proximateToNew.clone(); + + if ( base == proximate ) + fromBase = fromProximate; + else + fromBase = symmetricDifference(baseToProximate, proximateToNew); + } + + public String toString() + { + return + base.commentDropForm() + " " + fromBase + + (base == proximate + ? "" + : " (... " + proximate.commentDropForm() + + " " + fromProximate); + } + } + + class OperatorImpl + extends Repeatable + implements Operator, Snippet, Commentable + { + OperatorImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] name() { return qstrings(qname); } + public String left() { return operand(0); } + public String right() { return operand(1); } + public String[] function() { return qstrings(funcName); } + public String[] synthetic() { return qstrings(synthetic); } + public String[] commutator() { return qstrings(commutator); } + public String[] negator() { return qstrings(negator); } + public boolean hashes() { return _hashes; } + public boolean merges() { return _merges; } + public String[] restrict() { return qstrings(restrict); } + public String[] join() { return qstrings(join); } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public String[] _provides; + public String[] _requires; + public boolean _hashes; + public boolean _merges; + + Identifier.Qualified qname; + DBType[] operands = { null, null }; + FunctionImpl func; + Identifier.Qualified funcName; + Identifier.Qualified commutator; + Identifier.Qualified negator; + Identifier.Qualified restrict; + Identifier.Qualified join; + Identifier.Qualified synthetic; + boolean isSynthetic; + boolean selfCommutator; + boolean twinCommutator; + List paths; + + private String operand(int i) + { + return null == operands[i] ? null : operands[i].toString(); + } + + public void setName( Object o, boolean explicit, Element e) + { + qname = operatorNameFrom(avToArray( o, String.class)); + } + + public void setLeft( Object o, boolean explicit, Element e) + { + if ( explicit ) + operands[0] = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setRight( Object o, boolean explicit, Element e) + { + if ( explicit ) + operands[1] = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setFunction( Object o, boolean explicit, Element e) + { + if ( explicit ) + funcName = qnameFrom(avToArray( o, String.class)); + } + + public void setSynthetic( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + /* + * Use isSynthetic to indicate that synthetic= has been used at all. + * Set synthetic to the supplied qname only if it is a qname, and + * not the distinguished value TWIN. + * + * Most of the processing below only needs to look at isSynthetic. + * The TWIN case, recognized by isSynthetic && null == synthetic, + * will be handled late in the game by copying the base function's + * qname. + */ + + isSynthetic = true; + String[] ss = avToArray( o, String.class); + if ( 1 != ss.length || ! TWIN.equals(ss[0]) ) + synthetic = qnameFrom(ss); + } + + public void setCommutator( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + String[] ss = avToArray( o, String.class); + if ( 1 == ss.length ) + { + if ( SELF.equals(ss[0]) ) + { + selfCommutator = true; + return; + } + if ( TWIN.equals(ss[0]) ) + { + twinCommutator = true; + return; + } + } + commutator = operatorNameFrom(ss); + } + + public void setNegator( Object o, boolean explicit, Element e) + { + if ( explicit ) + negator = operatorNameFrom(avToArray( o, String.class)); + } + + public void setRestrict( + Object o, boolean explicit, Element e) + { + if ( explicit ) + restrict = qnameFrom(avToArray( o, String.class)); + } + + public void setJoin( + Object o, boolean explicit, Element e) + { + if ( explicit ) + join = qnameFrom(avToArray( o, String.class)); + } + + public Set characterize() + { + boolean ok = true; + Snippet syntheticFunction = null; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + } + + if ( isSynthetic ) + { + if ( null != funcName ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator may not specify both function= and " + + "synthetic=" + ); + ok = false; + } + funcName = synthetic; // can be null (the TWIN case) + } + + if ( null == func && null == funcName && ! isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator not annotating a method must specify function=" + ); + ok = false; + } + + if ( null == func ) + { + if ( null == operands[0] && null == operands[1] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator not annotating a method must specify " + + "left= or right= or both" + ); + ok = false; + } + } + else + { + Identifier.Qualified fn = + qnameFrom(func.name(), func.schema()); + + if ( null == funcName ) + funcName = fn; + else if ( ! funcName.equals(fn) && ! isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but function= gives a " + + "different name" + ); + ok = false; + } + + long explicit = + Arrays.stream(operands).filter(Objects::nonNull).count(); + + if ( 0 != explicit && isSynthetic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with synthetic= must not specify " + + "operand types" + ); + ok = false; + } + + if ( 0 == explicit ) + { + int nparams = func.parameterTypes.length; + if ( 1 > nparams || nparams > 2 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "method annotated with @Operator must take one " + + "or two parameters" + ); + ok = false; + } + if ( 1 == nparams ) + operands[1] = func.parameterTypes[0]; + else + System.arraycopy(func.parameterTypes,0, operands,0,2); + } + else if ( explicit != func.parameterTypes.length ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but specifies " + + "a different number of operands" + ); + ok = false; + } + else if ( 2 == explicit + && ! Arrays.equals(operands, func.parameterTypes) + || 1 == explicit + && ! Arrays.asList(operands) + .contains(func.parameterTypes[0]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator annotates a method but specifies " + + "different operand types" + ); + ok = false; + } + } + + /* + * At this point, ok ==> there is a non-null funcName ... UNLESS + * isSynthetic is true, synthetic=TWIN was given, and we are not + * annotating a method (that last condition is currently not + * supported, so we could in fact rely on having a funcName here, + * but that condition may be worth supporting in the future, so + * better to keep the exception in mind). + */ + + if ( ! ok ) + return Set.of(); + + long arity = + Arrays.stream(operands).filter(Objects::nonNull).count(); + + if ( 1 == arity && null == operands[1] ) + { + msg(Kind.WARNING, m_targetElement, m_origin, + "Right unary (postfix) operators are deprecated and will " + + "be removed in PostgreSQL version 14." + ); + } + + if ( null != commutator ) + { + if ( 2 != arity ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "unary @Operator cannot have a commutator" + ); + ok = false; + } + else if ( selfCommutator && ! operands[0].equals(operands[1]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with different left and right operand " + + "types cannot have commutator=SELF" + ); + ok = false; + } + else if ( twinCommutator && operands[0].equals(operands[1]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator with matching left and right operand " + + "types cannot have commutator=TWIN" + ); + ok = false; + } + } + + boolean knownNotBoolean = + null != func && ! DT_BOOLEAN.equals(func.returnType); + + if ( null != negator ) + { + if ( knownNotBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "negator= only belongs on a boolean @Operator" + ); + ok = false; + } + else if ( negator.equals(qname) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Operator can never be its own negator" + ); + ok = false; + } + } + + boolean knownNotBinaryBoolean = 2 != arity || knownNotBoolean; + boolean knownVolatile = + null != func && Function.Effects.VOLATILE == func.effects(); + boolean operandTypesDiffer = + 2 == arity && ! operands[0].equals(operands[1]); + boolean selfCommutates = + null != commutator && commutator.equals(qname); + + ok &= Stream.of( + _hashes ? "hashes" : null, + _merges ? "merges" : null) + .filter(Objects::nonNull) + .map(s -> + { + boolean inner_ok = true; + if ( knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= only belongs on a boolean " + + "binary @Operator", s + ); + inner_ok = false; + } + if ( null == commutator ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires that the @Operator " + + "have a commutator", s + ); + inner_ok = false; + } + else if ( ! (operandTypesDiffer || selfCommutates) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires the @Operator to be its own" + + "commutator as its operand types are the same", s + ); + inner_ok = false; + } + if ( knownVolatile ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "%s= requires an underlying function " + + "declared IMMUTABLE or STABLE", s + ); + inner_ok = false; + } + return inner_ok; + }) + .allMatch(t -> t); + + if ( null != restrict && knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "restrict= only belongs on a boolean binary @Operator" + ); + ok = false; + } + + if ( null != join && knownNotBinaryBoolean ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "join= only belongs on a boolean binary @Operator" + ); + ok = false; + } + + if ( ! ok ) + return Set.of(); + + if ( isSynthetic ) + { + if ( null == func ) + { + /* + * It could be possible to relax this requirement if there + * is a need, but this way is easier. + */ + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator annotation must appear " + + "on the method to be used as the base"); + ok = false; + } + + if ( paths.isEmpty() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has no derivation path " + + "involving negation or commutation from another " + + "operator", qnameUnwrapped()); + /* + * If no paths at all, return empty from here; no point in + * further checks. + */ + return Set.of(); + } + + /* + * Check for conditions where deriving by commutation wouldn't + * make sense. Any of these three conditions will trigger the + * test of available paths. The conditions are rechecked but the + * third one is changed, so either of the first two will always + * preclude commutation, but ! operandTypesDiffer only does if + * the synthetic function's name will be the same as the base's. + * (If the types were different, PostgreSQL overloading would + * allow the functions to share a name, but that's not possible + * if the types are the same.) In those cases, any commutation + * paths are filtered out; if no path remains, that's an error. + */ + if ( 2 != arity || selfCommutator || ! operandTypesDiffer ) + { + List filtered = + paths.stream() + .filter( + p -> ! p.fromBase.contains( + OperatorPath.Transform.COMMUTATION)) + .collect(toList()); + if ( 2 != arity || selfCommutator + || null == synthetic || + synthetic.equals(qnameFrom(func.name(), func.schema()))) + { + if ( filtered.isEmpty() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s cannot be another " + + "operator's commutator, but found only " + + "path(s) involving commutation: %s", + qnameUnwrapped(), paths.toString()); + ok = false; + } + else + paths = filtered; + } + } + + ok &= paths.stream().collect( + groupingBy(p -> p.base, + mapping(p -> p.fromBase, toSet()))) + .entrySet().stream() + .filter(e -> 1 < e.getValue().size()) + .map(e -> + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s found paths with " + + "different transforms %s from same base %s", + qnameUnwrapped(), + e.getValue(), e.getKey().qnameUnwrapped()); + return false; + }) + .allMatch(t -> t); + + ok &= paths.stream().collect( + groupingBy(p -> p.proximate, + mapping(p -> p.fromProximate, toSet()))) + .entrySet().stream() + .filter(e -> 1 < e.getValue().size()) + .map(e -> + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s found paths with " + + "different transforms %s from %s", + qnameUnwrapped(), + e.getValue(), e.getKey().qnameUnwrapped()); + return false; + }) + .allMatch(t -> t); + + Set> + commutatorCandidates = + paths.stream() + .filter( + p -> p.fromProximate.contains( + OperatorPath.Transform.COMMUTATION)) + .map(p -> p.proximate.qname) + .collect(toSet()); + if ( null == commutator && 0 < commutatorCandidates.size() ) + { + if ( 1 == commutatorCandidates.size() ) + commutator = commutatorCandidates.iterator().next(); + else + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has muliple commutator " + + "candidates %s", + qnameUnwrapped(), commutatorCandidates); + ok = false; + } + } + + Set> + negatorCandidates = + paths.stream() + .filter( + p -> p.fromProximate.contains( + OperatorPath.Transform.NEGATION)) + .map(p -> p.proximate.qname) + .collect(toSet()); + if ( null == negator && 0 < negatorCandidates.size() ) + { + if ( 1 == negatorCandidates.size() ) + negator = negatorCandidates.iterator().next(); + else + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has muliple negator " + + "candidates %s", + qnameUnwrapped(), negatorCandidates); + ok = false; + } + } + + /* + * Filter paths to only those based on an operator that is built + * over this method. (That's currently guaranteed by the way + * operatorPreSynthesize generates paths, but may as well check + * here to ensure sanity during future maintenance.) + * + * For synthetic=TWIN (represented here by null==synthetic), + * also filter out paths that don't involve commutation (without + * it, the synthetic function would collide with the base one). + */ + + boolean nonCommutedOK = null != synthetic; + + paths = paths.stream() + .filter( + p -> p.base.func == func + && (nonCommutedOK || p.fromBase.contains( + OperatorPath.Transform.COMMUTATION)) + ).collect(toList()); + + if ( 0 == paths.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Synthetic operator %s has no derivation path " + + "from an operator that is based on this method%s", + qnameUnwrapped(), + nonCommutedOK ? "" : " and involves commutation"); + ok = false; + } + + if ( ! ok ) + return Set.of(); + + /* + * Select a base. Could there be more than one? As the checks + * for transform inconsistencies above found none, we will + * assume any should be ok, and choose one semi-arbitrarily. + */ + + OperatorPath selected = + paths.stream() + .sorted( + Comparator.comparingInt( + p -> p.fromBase.size()) + .thenComparingInt( + p -> p.fromBase.stream() + .mapToInt(Enum::ordinal) + .max().getAsInt()) + .thenComparing(p -> p.base.qnameUnwrapped())) + .findFirst().get(); + + /* + * At last, the possibly null funcName (synthetic=TWIN case) + * can be fixed up. + */ + if ( null == synthetic ) + { + FunctionImpl f = selected.base.func; + funcName = synthetic = qnameFrom(f.name(), f.schema()); + } + + replaceCommentIfDerived("Operator " + qnameUnwrapped() + + " automatically derived by " + + selected.fromBase + " from " + + selected.base.qnameUnwrapped()); + + boolean commute = selected.fromBase + .contains(OperatorPath.Transform.COMMUTATION); + boolean negate = selected.fromBase + .contains(OperatorPath.Transform.NEGATION); + + if ( operandTypesDiffer && commute ) + { + DBType t = operands[0]; + operands[0] = operands[1]; + operands[1] = t; + } + + syntheticFunction = + func.transformed(synthetic, commute, negate); + } + + recordImplicitTags(); + recordExplicitTags(_provides, _requires); + return null == syntheticFunction + ? Set.of(this) : Set.of(syntheticFunction, this); + } + + void recordImplicitTags() + { + Set provides = provideTags(); + Set requires = requireTags(); + + provides.add(new DependTag.Operator(qname, operands)); + + /* + * Commutator and negator often involve cycles. PostgreSQL already + * has its own means of breaking them, so it is not necessary here + * even to declare dependencies based on them. + * + * There is also, for now, no point in declaring dependencies on + * selectivity estimators; they can't be written in Java, so they + * won't be products of this compilation. + * + * So, just require the operand types and the function. + */ + + Arrays.stream(operands) + .filter(Objects::nonNull) + .map(DBType::dependTag) + .filter(Objects::nonNull) + .forEach(requires::add); + + if ( null != func && null == synthetic ) + { + func.provideTags().stream() + .filter(DependTag.Function.class::isInstance) + .forEach(requires::add); + } + else + { + requires.add(new DependTag.Function(funcName, + Arrays.stream(operands) + .filter(Objects::nonNull) + .toArray(DBType[]::new))); + } + } + + /** + * Just to keep things interesting, a schema-qualified operator name is + * wrapped in OPERATOR(...) pretty much everywhere, except as the guest + * of honor in a CREATE OPERATOR or DROP OPERATOR, where the unwrapped + * form is needed. + */ + private String qnameUnwrapped() + { + String local = qname.local().toString(); + Identifier.Simple qualifier = qname.qualifier(); + return null == qualifier ? local : qualifier + "." + local; + } + + /** + * An operator is identified this way in a COMMENT or DROP. + */ + private String commentDropForm() + { + return qnameUnwrapped() + " (" + + (null == operands[0] ? "NONE" : operands[0]) + ", " + + (null == operands[1] ? "NONE" : operands[1]) + ")"; + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE OPERATOR ").append(qnameUnwrapped()); + sb.append(" (\n\tPROCEDURE = ").append(funcName); + + if ( null != operands[0] ) + sb.append(",\n\tLEFTARG = ").append(operands[0]); + + if ( null != operands[1] ) + sb.append(",\n\tRIGHTARG = ").append(operands[1]); + + if ( null != commutator ) + sb.append(",\n\tCOMMUTATOR = ").append(commutator); + + if ( null != negator ) + sb.append(",\n\tNEGATOR = ").append(negator); + + if ( null != restrict ) + sb.append(",\n\tRESTRICT = ").append(restrict); + + if ( null != join ) + sb.append(",\n\tJOIN = ").append(join); + + if ( _hashes ) + sb.append(",\n\tHASHES"); + + if ( _merges ) + sb.append(",\n\tMERGES"); + + sb.append(')'); + + al.add(sb.toString()); + if ( null != comment() ) + al.add( + "COMMENT ON OPERATOR " + commentDropForm() + " IS " + + DDRWriter.eQuote(comment())); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return new String[] + { + "DROP OPERATOR " + commentDropForm() + }; + } + } + + class AggregateImpl + extends Repeatable + implements Aggregate, Snippet, Commentable + { + AggregateImpl(Element e, AnnotationMirror am) + { + super(e, am); + } + + public String[] name() { return qstrings(qname); } + public String[] arguments() { return argsOut(aggregateArgs); } + public String[] directArguments() { return argsOut(directArgs); } + public boolean hypothetical() { return _hypothetical; } + public boolean[] variadic() { return _variadic; } + public Plan[] plan() { return new Plan[]{_plan}; } + public Plan[] movingPlan() { return _movingPlan; } + public Function.Parallel parallel() { return _parallel; } + public String[] sortOperator() { return qstrings(sortop); } + public String[] provides() { return _provides; } + public String[] requires() { return _requires; } + + public boolean _hypothetical; + public boolean[] _variadic = {false, false}; + public Plan _plan; + public Plan[] _movingPlan; + public Function.Parallel _parallel; + public String[] _provides; + public String[] _requires; + + FunctionImpl func; + Identifier.Qualified qname; + List> aggregateArgs; + List> directArgs; + Identifier.Qualified sortop; + static final int DIRECT_ARGS = 0; // index into _variadic[] + static final int AGG_ARGS = 1; // likewise + boolean directVariadicExplicit; + + private List> + argsIn(String[] names) + { + return Arrays.stream(names) + .map(DBType::fromNameAndType) + .collect(toList()); + } + + private String[] + argsOut(List> names) + { + return names.stream() + .map(e -> e.getKey() + " " + e.getValue()) + .toArray(String[]::new); + } + + @Override + public String derivedComment( Element e) + { + /* + * When this annotation targets a TYPE, just as a + * place to hang it, there's no particular reason to believe a + * doc comment on the type is a good choice for this aggregate. + * When the annotation is on a method, the chances are better. + */ + if ( ElementKind.METHOD.equals(e.getKind()) ) + return super.derivedComment(e); + return null; + } + + public void setName( Object o, boolean explicit, Element e) + { + if ( explicit ) + qname = qnameFrom(avToArray( o, String.class)); + } + + public void setArguments( Object o, boolean explicit, Element e) + { + if ( explicit ) + aggregateArgs = argsIn( avToArray( o, String.class)); + } + + public void setDirectArguments( Object o, boolean explicit, Element e) + { + if ( explicit ) + directArgs = argsIn( avToArray( o, String.class)); + } + + public void setSortOperator( Object o, boolean explicit, Element e) + { + if ( explicit ) + sortop = operatorNameFrom(avToArray( o, String.class)); + } + + public void setVariadic( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + Boolean[] a = avToArray( o, Boolean.class); + + if ( 1 > a.length || a.length > 2 ) + throw new IllegalArgumentException( + "supply only boolean or {boolean,boolean} for variadic"); + + if ( ! Arrays.asList(a).contains(true) ) + throw new IllegalArgumentException( + "supply variadic= only if aggregated arguments, direct " + + "arguments, or both, are variadic"); + + _variadic[AGG_ARGS] = a[a.length - 1]; + if ( 2 == a.length ) + { + directVariadicExplicit = true; + _variadic[DIRECT_ARGS] = a[0]; + } + } + + public void setPlan( Object o, boolean explicit, Element e) + { + _plan = new Plan(); // always a plan, even if members uninitialized + + if ( explicit ) + _plan = planFrom( _plan, o, e, "plan"); + } + + public void setMovingPlan( Object o, boolean explicit, Element e) + { + if ( ! explicit ) + return; + + _movingPlan = new Plan[1]; + _movingPlan [ 0 ] = planFrom( new Moving(), o, e, "movingPlan"); + } + + Plan planFrom( Plan p, Object o, Element e, String which) + { + AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); + + if ( 1 != ams.length ) + throw new IllegalArgumentException( + which + " must be given exactly one @Plan"); + + populateAnnotationImpl( p, e, ams[0]); + return p; + } + + public Set characterize() + { + boolean ok = true; + boolean orderedSet = null != directArgs; + boolean moving = null != _movingPlan; + boolean checkAccumulatorSig = false; + boolean checkFinisherSig = false; + boolean unary = false; + + if ( ElementKind.METHOD.equals(m_targetElement.getKind()) ) + { + func = getSnippet(m_targetElement, FunctionImpl.class, + () -> (FunctionImpl)null); + if ( null == func ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "A method annotated with @Aggregate must " + + "also have @Function" + ); + ok = false; + } + } + + if ( null != func ) + { + Identifier.Qualified funcName = + qnameFrom(func.name(), func.schema()); + boolean inferAccumulator = + null == _plan.accumulate || null == aggregateArgs; + boolean inferFinisher = + null == _plan.finish && ! inferAccumulator; + boolean stateTypeExplicit = false; + + if ( null == qname ) + { + + if ( inferFinisher && 1 == aggregateArgs.size() + && 1 == func.parameterTypes.length + && func.parameterTypes[0] == + aggregateArgs.get(0).getValue() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Default name %s for this aggregate would " + + "collide with finish function; use name= to " + + "specify a name", funcName + ); + ok = false; + } + else + qname = funcName; + } + + if ( 1 > func.parameterTypes.length ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Function with no arguments cannot be @Aggregate " + + "accumulate or finish function" + ); + ok = false; + } + else if ( null == _plan.stateType ) + { + _plan.stateType = func.parameterTypes[0]; + if (null != _movingPlan + && null == _movingPlan[0].stateType) + _movingPlan[0].stateType = func.parameterTypes[0]; + } + else + stateTypeExplicit = true; + + if ( inferAccumulator || inferFinisher ) + { + if ( ok ) + { + if ( inferAccumulator ) + { + if ( null == aggregateArgs ) + { + aggregateArgs = + func.parameterInfo() + .skip(1) // skip the state argument + .map(pi -> + (Map.Entry) + new AbstractMap.SimpleImmutableEntry<>( + Identifier.Simple.fromJava( + pi.name() + ), + pi.dt + ) + ) + .collect(toList()); + } + else + checkAccumulatorSig = true; + _plan.accumulate = funcName; + if ( null != _movingPlan + && null == _movingPlan[0].accumulate ) + _movingPlan[0].accumulate = funcName; + } + else // inferFinisher + { + _plan.finish = funcName; + if ( null != _movingPlan + && null == _movingPlan[0].finish ) + _movingPlan[0].finish = funcName; + } + } + + if ( stateTypeExplicit + && ! _plan.stateType.equals(func.parameterTypes[0]) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "First function argument does not match " + + "stateType specified with @Aggregate" + ); + ok = false; + } + } + else if ( funcName.equals(_plan.accumulate) ) + checkAccumulatorSig = true; + else if ( funcName.equals(_plan.finish) ) + checkFinisherSig = true; + else + { + msg(Kind.WARNING, m_targetElement, m_origin, + "@Aggregate annotation on a method not recognized " + + "as either the accumulate or the finish function " + + "for the aggregate"); + } + + // If the method is the accumulator and is RETURNS_NULL, ensure + // there is either an initialState or a first aggregate arg that + // matches the stateType. + if ( ok && ( inferAccumulator || checkAccumulatorSig ) ) + { + if ( Function.OnNullInput.RETURNS_NULL == func.onNullInput() + && ( 0 == aggregateArgs.size() + || ! _plan.stateType.equals( + aggregateArgs.get(0).getValue()) ) + && null == _plan._initialState ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate without initialState= must have " + + "either a first argument matching the stateType " + + "or an accumulate method with onNullInput=CALLED."); + ok = false; + } + } + } + + if ( null == qname ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing name="); + ok = false; + } + + if ( null == aggregateArgs ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing arguments="); + ok = false; + } + else + unary = 1 == aggregateArgs.size(); + + if ( null == _plan.stateType ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate missing stateType="); + ok = false; + } + + if ( null == _plan.accumulate ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate plan missing accumulate="); + ok = false; + } + + // Could check argument count against FUNC_MAX_ARGS, but that would + // hardcode an assumed value for PostgreSQL's FUNC_MAX_ARGS. + + // Check that, if a stateType is polymorphic, there are compatible + // polymorphic arg types? Not today. + + // If a plan has no initialState, then either the accumulate + // function must NOT be RETURNS NULL ON NULL INPUT, or the first + // aggregated argument type must be the same as the state type. + // The type check is easy, but the returnsNull check on the + // accumulate function would require looking up the function (and + // still we wouldn't know, if it's not seen in this compilation). + // For another day. + + // Allow hypothetical only for ordered-set aggregate. + if ( _hypothetical && ! orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "hypothetical=true is only allowed for an ordered-set " + + "aggregate (one with directArguments specified, " + + "even if only {})"); + ok = false; + } + + // Allow two-element variadic= only for ordered-set aggregate. + if ( directVariadicExplicit && ! orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Two values for variadic= are only allowed for an " + + "ordered-set aggregate (one with directArguments " + + "specified, even if only {})"); + ok = false; + } + + // Require a movingPlan to have a remove function. + if ( moving && null == _movingPlan[0].remove ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "a movingPlan must include a remove function"); + ok = false; + } + + // Checks if the aggregated argument list is declared variadic. + // The last element must be an array type or "any"; an ordered-set + // aggregate allows only one argument and it must be "any". + if ( _variadic[AGG_ARGS] ) + { + if ( 1 > aggregateArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "To declare the aggregated argument list variadic, " + + "there must be at least one argument."); + ok = false; + } + else + { + DBType t = + aggregateArgs.get(aggregateArgs.size() - 1).getValue(); + boolean isAny = // allow omission of pg_catalog namespace + DT_ANY.equals(t) || "\"any\"".equals(t.toString()); + if ( orderedSet && (! isAny || 1 != aggregateArgs.size()) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "If variadic, an ordered-set aggregate's " + + "aggregated argument list must be only one " + + "argument and of type \"any\"."); + ok = false; + } + else if ( ! isAny && ! t.isArray() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "If variadic, the last aggregated argument must " + + "be an array type (or \"any\")."); + ok = false; + } + } + } + + // Checks specific to ordered-set aggregates. + if ( orderedSet ) + { + if ( 0 == aggregateArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An ordered-set aggregate needs at least one " + + "aggregated argument"); + ok = false; + } + + // Checks specific to hypothetical-set aggregates. + // The aggregated argument types must match the trailing direct + // arguments, and the two variadic declarations must match. + if ( _hypothetical ) + { + if ( _variadic[DIRECT_ARGS] != _variadic[AGG_ARGS] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "For a hypothetical-set aggregate, neither or " + + "both the direct and aggregated argument lists " + + "must be declared variadic."); + ok = false; + } + if ( directArgs.size() < aggregateArgs.size() + || + ! directArgs.subList( + directArgs.size() - aggregateArgs.size(), + directArgs.size()) + .equals(aggregateArgs) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The last direct arguments of a hypothetical-set " + + "aggregate must match the types of the " + + "aggregated arguments"); + ok = false; + } + } + } + + // It is allowed to omit a finisher function, but some things + // make no sense without one. + if ( orderedSet && null == _plan.finish && 0 < directArgs.size() ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Direct arguments serve no purpose without a finisher"); + ok = false; + } + + if ( null == _plan.finish && _plan._polymorphic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The polymorphic flag is meaningless with no finisher"); + ok = false; + } + + // The same finisher checks for a movingPlan, if present. + if ( moving ) + { + if ( orderedSet + && null == _movingPlan[0].finish + && directArgs.size() > 0 ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Direct arguments serve no purpose without a finisher"); + ok = false; + } + + if ( null == _movingPlan[0].finish + && _movingPlan[0]._polymorphic ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The polymorphic flag is meaningless with no finisher"); + ok = false; + } + } + + // Checks involving sortOperator + if ( null != sortop ) + { + if ( orderedSet ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The sortOperator optimization is not available for " + + "an ordered-set aggregate (one with directArguments)"); + ok = false; + } + + if ( ! unary || _variadic[AGG_ARGS] ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "The sortOperator optimization is only available for " + + "a one-argument (and non-variadic) aggregate"); + ok = false; + } + } + + // Checks involving serialize / deserialize + if ( null != _plan.serialize || null != _plan.deserialize ) + { + if ( null == _plan.combine ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An aggregate plan without combine= may not have " + + "serialize= or deserialize="); + ok = false; + } + + if ( null == _plan.serialize || null == _plan.deserialize ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "An aggregate plan must have both " + + "serialize= and deserialize= or neither"); + ok = false; + } + + if ( ! DT_INTERNAL.equals(_plan.stateType) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "Only an aggregate plan with stateType " + + "pg_catalog.internal may have serialize=/deserialize="); + ok = false; + } + } + + if ( ! ok ) + return Set.of(); + + Set requires = requireTags(); + + DBType[] accumulatorSig = + Stream.of( + Stream.of(_plan.stateType), + aggregateArgs.stream().map(Map.Entry::getValue)) + .flatMap(identity()).toArray(DBType[]::new); + + DBType[] combinerSig = { _plan.stateType, _plan.stateType }; + + DBType[] finisherSig = + Stream.of( + Stream.of(_plan.stateType), + orderedSet + ? directArgs.stream().map(Map.Entry::getValue) + : Stream.of(), + _plan._polymorphic + ? aggregateArgs.stream().map(Map.Entry::getValue) + : Stream.of() + ) + .flatMap(identity()) + .toArray(DBType[]::new); + + if ( checkAccumulatorSig + && ! Arrays.equals(accumulatorSig, func.parameterTypes) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate annotation on a method that matches the name " + + "but not argument types expected for the aggregate's " + + "accumulate function"); + ok = false; + } + + if ( checkFinisherSig + && ! Arrays.equals(finisherSig, func.parameterTypes) ) + { + msg(Kind.ERROR, m_targetElement, m_origin, + "@Aggregate annotation on a method that matches the name " + + "but not argument types expected for the aggregate's " + + "finish function"); + ok = false; + } + + requires.add( + new DependTag.Function(_plan.accumulate, accumulatorSig)); + + if ( null != _plan.combine ) + { + DBType[] serialSig = { DT_INTERNAL }; + DBType[] deserialSig = { DT_BYTEA, DT_INTERNAL }; + + requires.add( + new DependTag.Function(_plan.combine, combinerSig)); + + if ( null != _plan.serialize ) + { + requires.add( + new DependTag.Function(_plan.serialize, serialSig)); + requires.add( + new DependTag.Function(_plan.deserialize, deserialSig)); + } + } + + if ( null != _plan.finish ) + requires.add( + new DependTag.Function(_plan.finish, finisherSig)); + + if ( moving ) + { + accumulatorSig[0] = _movingPlan[0].stateType; + Arrays.fill(combinerSig, _movingPlan[0].stateType); + finisherSig[0] = _movingPlan[0].stateType; + + requires.add(new DependTag.Function( + _movingPlan[0].accumulate, accumulatorSig)); + + requires.add(new DependTag.Function( + _movingPlan[0].remove, accumulatorSig)); + + if ( null != _movingPlan[0].combine ) + requires.add(new DependTag.Function( + _movingPlan[0].combine, combinerSig)); + + if ( null != _movingPlan[0].finish ) + requires.add(new DependTag.Function( + _movingPlan[0].finish, finisherSig)); + } + + if ( null != sortop ) + { + DBType arg = aggregateArgs.get(0).getValue(); + DBType[] opSig = { arg, arg }; + requires.add(new DependTag.Operator(sortop, opSig)); + } + + /* + * That establishes dependency on the various support functions, + * which should, transitively, depend on all of the types. But it is + * possible we do not have a whole-program view (perhaps some + * support functions are implemented in other languages, and there + * are @SQLActions setting them up?). Therefore also, redundantly as + * it may be, declare dependency on the types. + */ + + Stream.of( + aggregateArgs.stream().map(Map.Entry::getValue), + orderedSet + ? directArgs.stream().map(Map.Entry::getValue) + : Stream.of(), + Stream.of(_plan.stateType), + moving + ? Stream.of(_movingPlan[0].stateType) + : Stream.of() + ) + .flatMap(identity()) + .map(DBType::dependTag) + .filter(Objects::nonNull) + .forEach(requires::add); + + recordExplicitTags(_provides, _requires); + return Set.of(this); + } + + public String[] deployStrings() + { + List al = new ArrayList<>(); + + StringBuilder sb = new StringBuilder("CREATE AGGREGATE "); + appendNameAndArguments(sb); + sb.append(" ("); + + String[] planStrings = _plan.deployStrings(); + int n = planStrings.length; + for ( String s : planStrings ) + { + sb.append("\n\t").append(s); + if ( 0 < -- n ) + sb.append(','); + } + + if ( null != _movingPlan ) + { + planStrings = _movingPlan[0].deployStrings(); + for ( String s : planStrings ) + sb.append(",\n\tM").append(s); + } + + if ( null != sortop ) + sb.append(",\n\tSORTOP = ").append(sortop); + + if ( Function.Parallel.UNSAFE != _parallel ) + sb.append(",\n\tPARALLEL = ").append(_parallel); + + if ( _hypothetical ) + sb.append(",\n\tHYPOTHETICAL"); + + sb.append(')'); + + al.add(sb.toString()); + + if ( null != comment() ) + { + sb = new StringBuilder("COMMENT ON AGGREGATE "); + appendNameAndArguments(sb); + sb.append(" IS ").append(DDRWriter.eQuote(comment())); + al.add(sb.toString()); + } + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + StringBuilder sb = new StringBuilder("DROP AGGREGATE "); + appendNameAndArguments(sb); + return new String[] { sb.toString() }; + } + + private void appendNameAndArguments(StringBuilder sb) + { + ListIterator> iter; + Map.Entry entry; + + sb.append(qname).append('('); + if ( null != directArgs ) + { + iter = directArgs.listIterator(); + while ( iter.hasNext() ) + { + entry = iter.next(); + sb.append("\n\t"); + if ( _variadic[DIRECT_ARGS] && ! iter.hasNext() ) + sb.append("VARIADIC "); + if ( null != entry.getKey() ) + sb.append(entry.getKey()).append(' '); + sb.append(entry.getValue()); + if ( iter.hasNext() ) + sb.append(','); + else + sb.append("\n\t"); + } + sb.append("ORDER BY"); + } + else if ( 0 == aggregateArgs.size() ) + sb.append('*'); + + iter = aggregateArgs.listIterator(); + while ( iter.hasNext() ) + { + entry = iter.next(); + sb.append("\n\t"); + if ( _variadic[AGG_ARGS] && ! iter.hasNext() ) + sb.append("VARIADIC "); + if ( null != entry.getKey() ) + sb.append(entry.getKey()).append(' '); + sb.append(entry.getValue()); + if ( iter.hasNext() ) + sb.append(','); + } + sb.append(')'); + } + + class Plan extends AbstractAnnotationImpl implements Aggregate.Plan + { + public String stateType() { return stateType.toString(); } + public int stateSize() { return _stateSize; } + public String initialState() { return _initialState; } + public String[] accumulate() { return qstrings(accumulate); } + public String[] combine() { return qstrings(combine); } + public String[] finish() { return qstrings(finish); } + public String[] remove() { return qstrings(remove); } + public String[] serialize() { return qstrings(serialize); } + public String[] deserialize() { return qstrings(deserialize); } + public boolean polymorphic() { return _polymorphic; } + public FinishEffect finishEffect() { return _finishEffect; } + + public int _stateSize; + public String _initialState; + public boolean _polymorphic; + public FinishEffect _finishEffect; + + DBType stateType; + Identifier.Qualified accumulate; + Identifier.Qualified combine; + Identifier.Qualified finish; + Identifier.Qualified remove; + Identifier.Qualified serialize; + Identifier.Qualified deserialize; + + public void setStateType(Object o, boolean explicit, Element e) + { + if ( explicit ) + stateType = DBType.fromSQLTypeAnnotation((String)o); + } + + public void setStateSize(Object o, boolean explicit, Element e) + { + _stateSize = (Integer)o; + if ( explicit && 0 >= _stateSize ) + throw new IllegalArgumentException( + "An explicit stateSize must be positive"); + } + + public void setInitialState(Object o, boolean explicit, Element e) + { + if ( explicit ) + _initialState = (String)o; + } + + public void setAccumulate(Object o, boolean explicit, Element e) + { + if ( explicit ) + accumulate = qnameFrom(avToArray( o, String.class)); + } + + public void setCombine(Object o, boolean explicit, Element e) + { + if ( explicit ) + combine = qnameFrom(avToArray( o, String.class)); + } + + public void setFinish(Object o, boolean explicit, Element e) + { + if ( explicit ) + finish = qnameFrom(avToArray( o, String.class)); + } + + public void setRemove(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a movingPlan may have a remove function"); + } + + public void setSerialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + serialize = qnameFrom(avToArray( o, String.class)); + } + + public void setDeserialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + deserialize = qnameFrom(avToArray( o, String.class)); + } + + public void setFinishEffect( Object o, boolean explicit, Element e) + { + if ( explicit ) + _finishEffect = FinishEffect.valueOf( + ((VariableElement)o).getSimpleName().toString()); + } + + public Set characterize() + { + return Set.of(); + } + + /** + * Returns one string per plan element (not per SQL statement). + *

    + * This method has to be here anyway because the class extends + * {@code AbstractAnnotationImpl}, but it will never be processed as + * an actual SQL snippet. This will be called by the containing + * {@code AggregateImpl} and return the individual plan elements + * that it will build into its own deploy strings. + *

    + * When this class represents a moving plan, the caller will prefix + * each of these strings with {@code M}. + */ + public String[] deployStrings() + { + List al = new ArrayList<>(); + + al.add("STYPE = " + stateType); + + if ( 0 != _stateSize ) + al.add("SSPACE = " + _stateSize); + + if ( null != _initialState ) + al.add("INITCOND = " + DDRWriter.eQuote(_initialState)); + + al.add("SFUNC = " + accumulate); + + if ( null != remove ) + al.add("INVFUNC = " + remove); + + if ( null != finish ) + al.add("FINALFUNC = " + finish); + + if ( _polymorphic ) + al.add("FINALFUNC_EXTRA"); + + if ( null != _finishEffect ) + al.add("FINALFUNC_MODIFY = " + _finishEffect); + + if ( null != combine ) + al.add("COMBINEFUNC = " + combine); + + if ( null != serialize ) + al.add("SERIALFUNC = " + serialize); + + if ( null != deserialize ) + al.add("DESERIALFUNC = " + deserialize); + + return al.toArray( new String [ al.size() ]); + } + + public String[] undeployStrings() + { + return null; + } + } + + class Moving extends Plan + { + public void setRemove(Object o, boolean explicit, Element e) + { + if ( explicit ) + remove = qnameFrom(avToArray( o, String.class)); + } + + public void setSerialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a (non-moving) plan may have a " + + "serialize function"); + } + + public void setDeserialize(Object o, boolean explicit, Element e) + { + if ( explicit ) + throw new IllegalArgumentException( + "Only a (non-moving) plan may have a " + + "deserialize function"); + } + } + } + + /** + * Provides the default mappings from Java types to SQL types. + */ + class TypeMapper + { + ArrayList> protoMappings; + ArrayList> finalMappings; + + TypeMapper() + { + protoMappings = new ArrayList<>(); + + // Primitives (these need not, indeed cannot, be schema-qualified) + // + this.addMap(boolean.class, DT_BOOLEAN); + this.addMap(Boolean.class, DT_BOOLEAN); + this.addMap(byte.class, "smallint"); + this.addMap(Byte.class, "smallint"); + this.addMap(char.class, "smallint"); + this.addMap(Character.class, "smallint"); + this.addMap(double.class, "double precision"); + this.addMap(Double.class, "double precision"); + this.addMap(float.class, "real"); + this.addMap(Float.class, "real"); + this.addMap(int.class, DT_INTEGER); + this.addMap(Integer.class, DT_INTEGER); + this.addMap(long.class, "bigint"); + this.addMap(Long.class, "bigint"); + this.addMap(short.class, "smallint"); + this.addMap(Short.class, "smallint"); + + // Known common mappings + // + this.addMap(Number.class, "pg_catalog", "numeric"); + this.addMap(String.class, "pg_catalog", "varchar"); + this.addMap(java.util.Date.class, "pg_catalog", "timestamp"); + this.addMap(Timestamp.class, "pg_catalog", "timestamp"); + this.addMap(Time.class, "pg_catalog", "time"); + this.addMap(java.sql.Date.class, "pg_catalog", "date"); + this.addMap(java.sql.SQLXML.class, "pg_catalog", "xml"); + this.addMap(BigInteger.class, "pg_catalog", "numeric"); + this.addMap(BigDecimal.class, "pg_catalog", "numeric"); + this.addMap(ResultSet.class, DT_RECORD); + this.addMap(Object.class, DT_ANY); + + this.addMap(byte[].class, DT_BYTEA); + + this.addMap(LocalDate.class, "pg_catalog", "date"); + this.addMap(LocalTime.class, "pg_catalog", "time"); + this.addMap(OffsetTime.class, "pg_catalog", "timetz"); + this.addMap(LocalDateTime.class, "pg_catalog", "timestamp"); + this.addMap(OffsetDateTime.class, "pg_catalog", "timestamptz"); + } + + private boolean mappingsFrozen() + { + return null != finalMappings; + } + + /* + * What worked in Java 6 was to keep a list of Class -> sqltype + * mappings, and get TypeMirrors from the Classes at the time of trying + * to identify types (in the final, after-all-sources-processed round). + * Starting in Java 7, you get different TypeMirror instances in + * different rounds for the same types, so you can't match something + * seen in round 1 to something looked up in the final round. (However, + * you can match things seen in round 1 to things looked up prior to + * the first round, when init() is called and constructs the processor.) + * + * So, this method needs to be called at the end of round 1 (or at the + * end of every round, it just won't do anything but once), and at that + * point it will compute the list order and freeze a list of TypeMirrors + * to avoid looking up the Classes later and getting different + * mirrors. + * + * This should work as long as all the sources containg PL/Java + * annotations will be found in round 1. That would only not be the case + * if some other annotation processor is in use that could generate new + * sources with pljava annotations in them, requiring additional rounds. + * In the present state of things, that simply won't work. Java bug + * http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8038455 might + * cover this, and promises a fix in Java 9, but who knows? + */ + private void workAroundJava7Breakage() + { + if ( mappingsFrozen() ) + return; // after the first round, it's too late! + + // Need to check more specific types before those they are + // assignable to by widening reference conversions, so a + // topological sort is in order. + // + List>> vs = new ArrayList<>( + protoMappings.size()); + + for ( Map.Entry me : protoMappings ) + vs.add( new Vertex<>( me)); + + for ( int i = vs.size(); i --> 1; ) + { + Vertex> vi = vs.get( i); + TypeMirror ci = vi.payload.getKey(); + for ( int j = i; j --> 0; ) + { + Vertex> vj = vs.get( j); + TypeMirror cj = vj.payload.getKey(); + boolean oij = typu.isAssignable( ci, cj); + boolean oji = typu.isAssignable( cj, ci); + if ( oji == oij ) + continue; // no precedence constraint between these two + if ( oij ) + vi.precede( vj); + else + vj.precede( vi); + } + } + + Queue>> q; + if ( reproducible ) + { + q = new PriorityQueue<>( 11, new TypeTiebreaker()); + } + else + { + q = new LinkedList<>(); + } + + for ( Vertex> v : vs ) + if ( 0 == v.indegree ) + q.add( v); + + protoMappings.clear(); + finalMappings = protoMappings; + protoMappings = null; + + while ( ! q.isEmpty() ) + { + Vertex> v = q.remove(); + v.use( q); + finalMappings.add( v.payload); + } + } + + private TypeMirror typeMirrorFromClass( Class k) + { + if ( k.isArray() ) + { + TypeMirror ctm = typeMirrorFromClass( k.getComponentType()); + return typu.getArrayType( ctm); + } + + if ( k.isPrimitive() ) + { + TypeKind tk = TypeKind.valueOf( k.getName().toUpperCase()); + return typu.getPrimitiveType( tk); + } + + String cname = k.getCanonicalName(); + if ( null == cname ) + { + msg( Kind.WARNING, + "Cannot register type mapping for class %s" + + "that lacks a canonical name", k.getName()); + return null; + } + + return declaredTypeForClass(k); + } + + /** + * Add a custom mapping from a Java class to an SQL type identified + * by SQL-standard reserved syntax. + * + * @param k Class representing the Java type + * @param v String representing the SQL (language-reserved) type + * to be used + */ + void addMap(Class k, String v) + { + addMap( typeMirrorFromClass( k), new DBType.Reserved(v)); + } + + /** + * Add a custom mapping from a Java class to an SQL type identified + * by an SQL qualified identifier. + * + * @param k Class representing the Java type + * @param schema String representing the qualifier of the type name + * (may be null) + * @param local String representing the SQL (language-reserved) type + * to be used + */ + void addMap(Class k, String schema, String local) + { + addMap( typeMirrorFromClass( k), + new DBType.Named(qnameFrom(local, schema))); + } + + /** + * Add a custom mapping from a Java class to an SQL type + * already in the form of a {@code DBType}. + * + * @param k Class representing the Java type + * @param type DBType representing the SQL type to be used + */ + void addMap(Class k, DBType type) + { + addMap( typeMirrorFromClass( k), type); + } + + /** + * Add a custom mapping from a Java class to an SQL type, if a class + * with the given name exists. + * + * @param k Canonical class name representing the Java type + * @param v String representing the SQL type to be used + */ + void addMapIfExists(String k, String v) + { + TypeElement te = elmu.getTypeElement( k); + if ( null != te ) + addMap( te.asType(), new DBType.Reserved(v)); + } + + /** + * Add a custom mapping from a Java class (represented as a TypeMirror) + * to an SQL type. + * + * @param tm TypeMirror representing the Java type + * @param v String representing the SQL type to be used + */ + void addMap(TypeMirror tm, DBType v) + { + if ( mappingsFrozen() ) + { + msg( Kind.ERROR, + "addMap(%s, %s)\n" + + "called after workAroundJava7Breakage", tm.toString(), v); + return; + } + protoMappings.add( new AbstractMap.SimpleImmutableEntry<>( tm, v)); + } + + /** + * Return the SQL type for the Java type represented by a TypeMirror, + * from an explicit annotation if present, otherwise by applying the + * default mappings. No default-value information is included in the + * string returned. It is assumed that a function return is being typed + * rather than a function parameter. + * + * @param tm Represents the type whose corresponding SQL type is wanted. + * @param e Annotated element (chiefly for use as a location hint in + * diagnostic messages). + */ + DBType getSQLType(TypeMirror tm, Element e) + { + return getSQLType( tm, e, null, false, false); + } + + + /** + * Return the SQL type for the Java type represented by a TypeMirror, + * from an explicit annotation if present, otherwise by applying the + * default mappings. + * + * @param tm Represents the type whose corresponding SQL type is wanted. + * @param e Annotated element (chiefly for use as a location hint in + * diagnostic messages). + * @param st {@code SQLType} annotation, or null if none, explicitly + * given for the element. + * @param contravariant Indicates that the element whose type is wanted + * is a function parameter and should be given the widest type that can + * be assigned to it. If false, find the narrowest type that a function + * return can be assigned to. + * @param withDefault Indicates whether any specified default value + * information should also be included in the "type" string returned. + */ + DBType getSQLType(TypeMirror tm, Element e, SQLType st, + boolean contravariant, boolean withDefault) + { + boolean array = false; + boolean row = false; + DBType rslt = null; + + String[] defaults = null; + boolean optional = false; + + if ( null != st ) + { + String s = st.value(); + if ( null != s ) + rslt = DBType.fromSQLTypeAnnotation(s); + defaults = st.defaultValue(); + optional = st.optional(); + } + + if ( tm.getKind().equals( TypeKind.ARRAY) ) + { + ArrayType at = ((ArrayType)tm); + if ( ! at.getComponentType().getKind().equals( TypeKind.BYTE) ) + { + array = true; + tm = at.getComponentType(); + // only for bytea[] should this ever still be an array + } + } + + if ( ! array && typu.isSameType( tm, TY_RESULTSET) ) + row = true; + + if ( null != rslt ) + return typeWithDefault( + e, rslt, array, row, defaults, optional, withDefault); + + if ( tm.getKind().equals( TypeKind.VOID) ) + return DT_VOID; // return type only; no defaults apply + + if ( tm.getKind().equals( TypeKind.ERROR) ) + { + msg ( Kind.ERROR, e, + "Cannot determine mapping to SQL type for unresolved type"); + rslt = new DBType.Reserved(tm.toString()); + } + else + { + ArrayList> ms = finalMappings; + if ( contravariant ) + ms = reversed(ms); + for ( Map.Entry me : ms ) + { + TypeMirror ktm = me.getKey(); + if ( ktm instanceof PrimitiveType ) + { + if ( typu.isSameType( tm, ktm) ) + { + rslt = me.getValue(); + break; + } + } + else + { + boolean accept; + if ( contravariant ) + accept = typu.isAssignable( ktm, tm); + else + accept = typu.isAssignable( tm, ktm); + if ( accept ) + { + // don't compute a type of Object/"any" for + // a function return (just admit defeat instead) + if ( contravariant + || ! typu.isSameType( ktm, TY_OBJECT) ) + rslt = me.getValue(); + break; + } + } + } + } + + if ( null == rslt ) + { + msg( Kind.ERROR, e, + "No known mapping to an SQL type"); + rslt = new DBType.Reserved(tm.toString()); + } + + if ( array ) + rslt = rslt.asArray("[]"); + + return typeWithDefault( + e, rslt, array, row, defaults, optional, withDefault); + } + + /** + * Given the matching SQL type already determined, return it with or + * without default-value information appended, as the caller desires. + * To ensure that the generated descriptor will be in proper form, the + * default values are emitted as properly-escaped string literals and + * then cast to the appropriate type. This approach will not work for + * defaults given as arbitrary SQL expressions, but covers the typical + * cases of simple literals and even anything that can be computed as + * a Java String constant expression (e.g. ""+Math.PI). + * + * @param e Annotated element (chiefly for use as a location hint in + * diagnostic messages). + * @param rslt The bare SQL type string already determined + * @param array Whether the Java type was determined to be an array + * @param row Whether the Java type was ResultSet, indicating an SQL + * record or row type. + * @param defaults Array (null if not present) of default value strings + * @param withDefault Whether to append the default information to the + * type. + */ + DBType typeWithDefault( + Element e, DBType rslt, boolean array, boolean row, + String[] defaults, boolean optional, boolean withDefault) + { + if ( ! withDefault || null == defaults && ! optional ) + return rslt; + + if ( optional ) + return rslt.withDefault("DEFAULT NULL"); + + int n = defaults.length; + if ( row ) + { + assert ! array; + if ( n > 0 && rslt.toString().equalsIgnoreCase("record") ) + msg( Kind.ERROR, e, + "Only supported default for unknown RECORD type is {}"); + } + else if ( n != 1 ) + array = true; + else if ( ! array ) + array = rslt.isArray(); + + StringBuilder sb = new StringBuilder(); + sb.append( " DEFAULT "); + sb.append( row ? "ROW(" : "CAST("); + if ( array ) + sb.append( "ARRAY["); + if ( n > 1 ) + sb.append( "\n\t"); + for ( String s : defaults ) + { + sb.append( DDRWriter.eQuote( s)); + if ( 0 < -- n ) + sb.append( ",\n\t"); + } + if ( array ) + sb.append( ']'); + if ( ! row ) + sb.append( " AS ").append( rslt); + sb.append( ')'); + return rslt.withDefault(sb.toString()); + } + } + + /** + * Work around bizarre javac behavior that silently supplies an Error + * class in place of an attribute value for glaringly obvious source errors, + * instead of reporting them. + * @param av AnnotationValue to extract the value from + * @return The result of getValue unless {@code av} is an error placeholder + */ + static Object getValue( AnnotationValue av) + { + if ( "com.sun.tools.javac.code.Attribute.Error".equals( + av.getClass().getCanonicalName()) ) + throw new AnnotationValueException(); + return av.getValue(); + } + + /** + * Return a reversed copy of an ArrayList. + */ + static > T reversed(T orig) + { + @SuppressWarnings("unchecked") + T list = (T)orig.clone(); + Collections.reverse(list); + return list; + } + + /** + * Return an {@code Identifier.Qualified} from discrete Java strings + * representing the local name and schema, with a zero-length schema string + * producing a qualified name with null qualifier. + */ + Identifier.Qualified qnameFrom( + String name, String schema) + { + Identifier.Simple qualifier = + "".equals(schema) ? null : Identifier.Simple.fromJava(schema, msgr); + Identifier.Simple local = Identifier.Simple.fromJava(name, msgr); + return local.withQualifier(qualifier); + } + + /** + * Return an {@code Identifier.Qualified} from a single Java string + * representing the local name and possibly a schema. + */ + Identifier.Qualified qnameFrom(String name) + { + return Identifier.Qualified.nameFromJava(name, msgr); + } + + /** + * Return an {@code Identifier.Qualified} from an array of Java strings + * representing schema and local name separately if of length two, or as by + * {@link #qnameFrom(String)} if of length one; invalid if of any other + * length. + *

    + * The first of two elements may be explicitly {@code ""} to produce a + * qualified name with null qualifier. + */ + Identifier.Qualified qnameFrom(String[] names) + { + switch ( names.length ) + { + case 2: return qnameFrom(names[1], names[0]); + case 1: return qnameFrom(names[0]); + default: + throw new IllegalArgumentException( + "Only a one- or two-element String array is accepted"); + } + } + + /** + * Like {@link #qnameFrom(String[])} but for an operator name. + */ + Identifier.Qualified operatorNameFrom(String[] names) + { + switch ( names.length ) + { + case 2: + Identifier.Simple qualifier = null; + if ( ! names[0].isEmpty() ) + qualifier = Identifier.Simple.fromJava(names[0], msgr); + return Identifier.Operator.from(names[1], msgr) + .withQualifier(qualifier); + case 1: + return Identifier.Qualified.operatorFromJava(names[0], msgr); + default: + throw new IllegalArgumentException( + "Only a one- or two-element String array is accepted"); + } + } + + String[] qstrings(Identifier.Qualified qname) + { + if ( null == qname ) + return null; + Identifier.Simple q = qname.qualifier(); + String local = qname.local().toString(); + return new String[] { null == q ? null : q.toString(), local }; + } +} + +/** + * Exception thrown when an expected annotation value is a compiler-internal + * Error class instead, which happens in some javac versions when the annotation + * value wasn't resolved because of a source error the compiler really should + * have reported. + */ +class AnnotationValueException extends RuntimeException { } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/DDRWriter.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRWriter.java similarity index 82% rename from pljava-api/src/main/java/org/postgresql/pljava/sqlgen/DDRWriter.java rename to pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRWriter.java index 02adf67d..c196f9ce 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/DDRWriter.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DDRWriter.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -10,7 +10,7 @@ * Tada AB * Purdue University */ -package org.postgresql.pljava.sqlgen; +package org.postgresql.pljava.annotation.processing; import java.io.IOException; import java.io.Writer; @@ -21,6 +21,8 @@ import static javax.tools.Diagnostic.Kind.ERROR; import static javax.tools.StandardLocation.CLASS_OUTPUT; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + import static org.postgresql.pljava.sqlgen.Lexicals.ISO_PG_JAVA_IDENTIFIER; /** @@ -36,16 +38,25 @@ public class DDRWriter { /** * Generate the deployment descriptor file. + *

    + * Important: it is assumed that {@code fwdSnips} and + * {@code revSnips} contain exactly the same snippets and differ only in + * their ordering. * - * @param snips Code snippets to include in the file, in a workable order - * for the install actions group. The remove actions group will be generated - * by processing this array in reverse order. + * @param fwdSnips Code snippets to include in the file, in a workable order + * for the install actions group. + * @param revSnips The same snippets in a workable order for the remove + * actions group. Not necessarily simply fwdSnips back to front, as the + * implied dependencies on implementor tags have the same sense for both + * install and remove: the tag conditions have to be evaluated before the + * snippets that depend on them. * @param p Reference to the calling object, used to obtain the Filer * object and desired output file name, and for diagnostic messages. */ - static void emit( Snippet[] snips, DDRProcessorImpl p) throws IOException + static void emit( Snippet[] fwdSnips, Snippet[] revSnips, + DDRProcessorImpl p) throws IOException { - if ( ! ensureLexable( snips, p) ) + if ( ! ensureLexable( fwdSnips, p) ) // assume same members as revSnips! return; Writer w = @@ -53,15 +64,15 @@ static void emit( Snippet[] snips, DDRProcessorImpl p) throws IOException w.write( "SQLActions[]={\n\"BEGIN INSTALL\n"); - for ( Snippet snip : snips ) + for ( Snippet snip : fwdSnips ) for ( String s : snip.deployStrings() ) - writeCommand( w, s, snip.implementor()); + writeCommand( w, s, snip.implementorName()); w.write( "END INSTALL\",\n\"BEGIN REMOVE\n"); - for ( int i = snips.length; i --> 0; ) - for ( String s : snips[i].undeployStrings() ) - writeCommand( w, s, snips[i].implementor()); + for ( Snippet snip : revSnips ) + for ( String s : snip.undeployStrings() ) + writeCommand( w, s, snip.implementorName()); w.write( "END REMOVE\"\n}\n"); @@ -82,13 +93,13 @@ static void emit( Snippet[] snips, DDRProcessorImpl p) throws IOException * is implementor-nonspecific. PostgreSQL is the string to use for * PostgreSQL-specific commands. */ - static void writeCommand( Writer w, String s, String implementor) + static void writeCommand( Writer w, String s, Identifier.Simple implementor) throws IOException { if ( null != implementor ) { w.write( "BEGIN "); - w.write( implementor); + w.write( implementor.toString()); w.write( '\n'); } @@ -97,7 +108,7 @@ static void writeCommand( Writer w, String s, String implementor) if ( null != implementor ) { w.write( "\nEND "); - w.write( implementor); + w.write( implementor.toString()); } w.write( ";\n"); @@ -155,7 +166,7 @@ static boolean ensureLexable( Snippet[] snips, DDRProcessorImpl p) for ( Snippet snip : snips ) { - String implementor = snip.implementor(); + String implementor = snip.implementorName().nonFolded(); if ( null != implementor ) { i.reset( implementor); diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DependTag.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DependTag.java new file mode 100644 index 00000000..a9aeec05 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/DependTag.java @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Arrays; +import static java.util.Objects.hash; +import static java.util.Objects.requireNonNull; + +import javax.annotation.processing.Messager; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Abstraction of a dependency tag, encompassing {@code Explicit} ones declared + * in annotations and distinguished by {@code String}s, and others added + * implicitly such as {@code Type}s known by {@code Identifier.Qualified}. + */ +abstract class DependTag +{ + protected final T m_value; + + protected DependTag(T value) + { + m_value = value; + } + + @Override + public int hashCode() + { + return hash(getClass(), m_value); + } + + @Override + public final boolean equals(Object o) + { + return equals(o, null); + } + + public boolean equals(Object o, Messager msgr) + { + if ( this == o ) + return true; + if ( null == o ) + return false; + return + getClass() == o.getClass() + && m_value.equals(((DependTag)o).m_value); + } + + @Override + public String toString() + { + return '(' + getClass().getSimpleName() + ')' + m_value.toString(); + } + + static final class Explicit extends DependTag + { + Explicit(String value) + { + super(requireNonNull(value)); + } + } + + static abstract class Named extends DependTag + { + Named(T value) + { + super(value); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( this == o ) + return true; + if ( null == o ) + return false; + return + getClass() == o.getClass() + && m_value.equals(((DependTag)o).m_value, msgr); + } + } + + static final class Type + extends Named> + { + Type(Identifier.Qualified value) + { + super(requireNonNull(value)); + } + } + + static final class Function + extends Named> + { + private DBType[] m_signature; + + Function( + Identifier.Qualified value, DBType[] signature) + { + super(requireNonNull(value)); + m_signature = signature.clone(); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( ! super.equals(o, msgr) ) + return false; + Function f = (Function)o; + if ( m_signature.length != f.m_signature.length ) + return false; + for ( int i = 0; i < m_signature.length; ++ i ) + { + if ( null == m_signature[i] || null == f.m_signature[i] ) + { + if ( m_signature[i] != f.m_signature[i] ) + return false; + continue; + } + if ( ! m_signature[i].equals(f.m_signature[i], msgr) ) + return false; + } + return true; + } + + @Override + public String toString() + { + return super.toString() + Arrays.toString(m_signature); + } + } + + static final class Operator + extends Named> + { + private DBType[] m_signature; + + Operator( + Identifier.Qualified value, DBType[] signature) + { + super(requireNonNull(value)); + assert 2 == signature.length : "invalid Operator signature length"; + m_signature = signature.clone(); + } + + @Override + public boolean equals(Object o, Messager msgr) + { + if ( ! super.equals(o, msgr) ) + return false; + Operator op = (Operator)o; + if ( m_signature.length != op.m_signature.length ) + return false; + for ( int i = 0; i < m_signature.length; ++ i ) + { + if ( null == m_signature[i] || null == op.m_signature[i] ) + { + if ( m_signature[i] != op.m_signature[i] ) + return false; + continue; + } + if ( ! m_signature[i].equals(op.m_signature[i], msgr) ) + return false; + } + return true; + } + + @Override + public String toString() + { + return super.toString() + Arrays.toString(m_signature); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ImpProvider.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ImpProvider.java new file mode 100644 index 00000000..ed51f4bf --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ImpProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Set; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * Proxy a snippet that 'provides' an implementor tag and has no + * undeployStrings, returning its deployStrings in their place. + */ +class ImpProvider implements Snippet +{ + Snippet s; + + ImpProvider( Snippet s) { this.s = s; } + + @Override public Identifier.Simple implementorName() + { + return s.implementorName(); + } + @Override public String[] deployStrings() { return s.deployStrings(); } + @Override public String[] undeployStrings() { return s.deployStrings(); } + @Override public Set provideTags() { return s.provideTags(); } + @Override public Set requireTags() { return s.requireTags(); } + @Override public Set characterize() { return s.characterize(); } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ParameterInfo.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ParameterInfo.java new file mode 100644 index 00000000..a419c413 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/ParameterInfo.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import javax.lang.model.element.VariableElement; + +import javax.lang.model.type.TypeMirror; + +import org.postgresql.pljava.annotation.SQLType; + +/** + * Tiny 'record' used in factoring duplicative operations on function parameter + * lists into operations on streams of these. + */ +class ParameterInfo +{ + final TypeMirror tm; + final VariableElement ve; + final SQLType st; + final DBType dt; + + String name() + { + String name = null == st ? null : st.name(); + if ( null == name ) + name = ve.getSimpleName().toString(); + return name; + } + + ParameterInfo(TypeMirror m, VariableElement e, SQLType t, DBType d) + { + tm = m; + ve = e; + st = t; + dt = d; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Snippet.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Snippet.java new file mode 100644 index 00000000..bd84faf7 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Snippet.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Set; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +/** + * A code snippet. May contain zero, one, or more complete SQL commands for + * each of deploying and undeploying. The commands contained in one Snippet + * will always be emitted in a fixed order. A collection of Snippets will be + * output in an order constrained by their provides and requires methods. + */ +interface Snippet +{ + /** + * An {@code } that will be used to wrap each command + * from this Snippet as an {@code }. If null, the + * commands will be emitted as plain {@code }s. + */ + public Identifier.Simple implementorName(); + /** + * A {@code DependTag} to represent this snippet's dependence on whatever + * determines whether the implementor name is to be recognized. + *

    + * Represented for now as a {@code DependTag.Explicit} even though the + * dependency is implicitly created; an {@code SQLAction} snippet may have + * an explicit {@code provides=} that has to be matched. + */ + default DependTag implementorTag() + { + return new DependTag.Explicit(implementorName().nonFolded()); + } + /** + * Return an array of SQL commands (one complete command to a string) to + * be executed in order during deployment. + */ + public String[] deployStrings(); + /** + * Return an array of SQL commands (one complete command to a string) to + * be executed in order during undeployment. + */ + public String[] undeployStrings(); + /** + * Return an array of arbitrary labels considered "provided" by this + * Snippet. In generating the final order of the deployment descriptor file, + * this Snippet will come before any whose requires method returns any of + * the same labels. + */ + public Set provideTags(); + /** + * Return an array of arbitrary labels considered "required" by this + * Snippet. In generating the final order of the deployment descriptor file, + * this Snippet will come after those whose provides method returns any of + * the same labels. + */ + public Set requireTags(); + /** + * Method to be called after all annotations' + * element/value pairs have been filled in, to compute any additional + * information derived from those values before deployStrings() or + * undeployStrings() can be called. May also check for and report semantic + * errors that are not easily checked earlier while populating the + * element/value pairs. + * @return A set of snippets that are now prepared and should be added to + * the graph to be scheduled and emitted according to provides/requires. + * Typically Set.of(this) if all went well, or Set.of() in case of an error + * or when the snippet will be emitted by something else. In some cases a + * characterize method can return additional snippets that are ready to be + * scheduled. + */ + public Set characterize(); + + /** + * If it is possible to break an ordering cycle at this snippet, return a + * vertex wrapping a snippet (possibly this one, or another) that can be + * considered ready, otherwise return null. + *

    + * The default implementation returns null unconditionally. + * @param v Vertex that wraps this Snippet + * @param deploy true when generating an ordering for the deploy strings + * @return a Vertex wrapping a Snippet that can be considered ready + */ + default Vertex breakCycle(Vertex v, boolean deploy) + { + return null; + } + + /** + * Called when undeploy ordering breaks a cycle by using + * {@code DROP ... CASCADE} or equivalent on another object, with effects + * that would duplicate or interfere with this snippet's undeploy actions. + *

    + * A snippet for which this can matter should note that this method has been + * called, and later generate its undeploy strings with any necessary + * adjustments. + *

    + * The default implementation does nothing. + */ + default void subsume() + { + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/SnippetTiebreaker.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/SnippetTiebreaker.java new file mode 100644 index 00000000..d21e59e8 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/SnippetTiebreaker.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Arrays; +import java.util.Comparator; +import static java.util.Comparator.comparing; +import static java.util.Comparator.naturalOrder; +import static java.util.Comparator.nullsFirst; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; + +/** + * Resolve ties in {@code Snippet} ordering in an arbitrary but deterministic + * way, for use when {@code ddr.reproducible} is set. + */ +class SnippetTiebreaker implements Comparator> +{ + private static final Comparator> VCMP; + + static + { + Comparator scmp = + comparing(Snippet::implementorName, + nullsFirst(comparing(Simple::pgFolded, naturalOrder())) + ) + .thenComparing(Snippet::deployStrings, Arrays::compare) + .thenComparing(Snippet::undeployStrings, Arrays::compare); + + VCMP = comparing(v -> v.payload, scmp); + } + + @Override + public int compare(Vertex o1, Vertex o2) + { + return VCMP.compare(o1, o2); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/TriggerNamer.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TriggerNamer.java similarity index 82% rename from pljava-api/src/main/java/org/postgresql/pljava/sqlgen/TriggerNamer.java rename to pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TriggerNamer.java index 9134be93..a166f60b 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/TriggerNamer.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TriggerNamer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -10,14 +10,10 @@ * Tada AB * Purdue University */ -package org.postgresql.pljava.sqlgen; +package org.postgresql.pljava.annotation.processing; import org.postgresql.pljava.annotation.Trigger; -import static org.postgresql.pljava.annotation.Trigger.Event.DELETE; -import static org.postgresql.pljava.annotation.Trigger.Event.INSERT; -import static org.postgresql.pljava.annotation.Trigger.Event.TRUNCATE; - /** * @author Thomas Hallgren - pre-Java6 version * @author Chapman Flack (Purdue Mathematics) - update to Java6 diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TypeTiebreaker.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TypeTiebreaker.java new file mode 100644 index 00000000..04fa37f8 --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/TypeTiebreaker.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.Comparator; +import static java.util.Comparator.comparing; +import java.util.Map; + +import javax.lang.model.type.TypeMirror; + +/** + * Resolve ties in type-mapping resolution in an arbitrary but deterministic + * way, for use when {@code ddr.reproducible} is set. + */ +class TypeTiebreaker +implements Comparator>> +{ + private static final Comparator>> VCMP; + + static + { + Comparator> ecmp = + comparing( + (Map.Entry e) -> e.getValue().toString()) + .thenComparing(e -> e.getKey().toString()); + + VCMP = comparing(v -> v.payload, ecmp); + } + + @Override + public int compare( + Vertex> o1, + Vertex> o2) + { + return VCMP.compare(o1, o2); + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Vertex.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Vertex.java new file mode 100644 index 00000000..3175e91c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/Vertex.java @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.IdentityHashMap; +import java.util.LinkedList; +import java.util.List; +import static java.util.Objects.requireNonNull; +import java.util.Queue; + +/** + * Vertex in a DAG, as used to put things in workable topological order + */ +class Vertex

    +{ + P payload; + int indegree; + List> adj; + + /** + * Construct a new vertex with the supplied payload, indegree zero, and an + * empty out-adjacency list. + * @param payload Object to be associated with this vertex. + */ + Vertex( P payload) + { + this.payload = payload; + indegree = 0; + adj = new ArrayList<>(); + } + + /** + * Record that this vertex must precede the specified vertex. + * @param v a Vertex that this Vertex must precede. + */ + void precede( Vertex

    v) + { + ++ v.indegree; + adj.add( v); + } + + /** + * Record that this vertex has been 'used'. Decrement the indegree of any + * in its adjacency list, and add to the supplied queue any of those whose + * indegree becomes zero. + * @param q A queue of vertices that are ready (have indegree zero). + */ + void use( Collection> q) + { + for ( Vertex

    v : adj ) + if ( 0 == -- v.indegree ) + q.add( v); + } + + /** + * Record that this vertex has been 'used'. Decrement the indegree of any + * in its adjacency list; any of those whose indegree becomes zero should be + * both added to the ready queue {@code q} and removed from the collection + * {@code vs}. + * @param q A queue of vertices that are ready (have indegree zero). + * @param vs A collection of vertices not yet ready. + */ + void use( Collection> q, Collection> vs) + { + for ( Vertex

    v : adj ) + if ( 0 == -- v.indegree ) + { + vs.remove( v); + q.add( v); + } + } + + /** + * Whether a vertex is known to transitively precede, or not so precede, a + * target vertex, or cannot yet be so classified. + */ + enum MemoState { YES, NO, PENDING } + + /** + * Return the memoized state of this vertex or, if none, enqueue the vertex + * for further exploration, memoize its state as {@code PENDING}, and return + * that. + */ + MemoState classifyOrEnqueue( + Queue> queue, IdentityHashMap,MemoState> memos) + { + MemoState state = memos.putIfAbsent(this, MemoState.PENDING); + if ( null == state ) + { + queue.add(this); + return MemoState.PENDING; + } + return state; + } + + /** + * Execute one step of {@code precedesTransitively} determination. + *

    + * On entry, this vertex has been removed from the queue. Its immediate + * adjacency successors will be evaluated. + *

    + * If any immediate successor is a {@code YES}, this vertex + * is a {@code YES}. + *

    + * If any immediate successor is {@code PENDING}, this vertex remains + * {@code PENDING} and is replaced on the queue, to be encountered again + * after all currently pending vertices. + *

    + * Otherwise, this vertex is a {@code NO}. + */ + MemoState stepOfPrecedes( + Queue> queue, IdentityHashMap,MemoState> memos) + { + boolean anyPendingSuccessors = false; + for ( Vertex

    v : adj ) + { + switch ( v.classifyOrEnqueue(queue, memos) ) + { + case YES: + memos.replace(this, MemoState.YES); + return MemoState.YES; + case PENDING: + anyPendingSuccessors = true; + break; + case NO: + break; + } + } + + if ( anyPendingSuccessors ) + { + queue.add(this); + return MemoState.PENDING; + } + + memos.replace(this, MemoState.NO); + return MemoState.NO; + } + + /** + * Determine whether this vertex (transitively) precedes other, + * returning, if so, that subset of its immediate adjacency successors + * through which other is reachable. + * @param other vertex to which reachability is to be tested + * @return array of immediate adjacencies through which other is reachable, + * or null if it is not + */ + Vertex

    [] precedesTransitively(Vertex

    other) + { + Queue> queue = new LinkedList<>(); + IdentityHashMap,MemoState> memos = new IdentityHashMap<>(); + boolean anyYeses = false; + + /* + * Initially: the 'other' vertex itself is known to be a YES. + * Nothing is yet known to be a NO. + */ + memos.put(requireNonNull(other), MemoState.YES); + + /* + * classifyOrEnqueue my immediate successors. Any that is not 'other' + * itself will be enqueued in PENDING status. + */ + for ( Vertex

    v : adj ) + if ( MemoState.YES == v.classifyOrEnqueue(queue, memos) ) + anyYeses = true; + + /* + * After running stepOfPrecedes on every enqueued vertex until the queue + * is empty, every vertex seen will be in memos as a YES or a NO. + */ + while ( ! queue.isEmpty() ) + if ( MemoState.YES == queue.remove().stepOfPrecedes(queue, memos) ) + anyYeses = true; + + if ( ! anyYeses ) + return null; + + @SuppressWarnings("unchecked") // can't quite say Vertex

    []::new + Vertex

    [] result = adj.stream() + .filter(v -> MemoState.YES == memos.get(v)) + .toArray(Vertex[]::new); + + return result; + } + + /** + * Remove successors from the adjacency list of this vertex, and + * add them to the adjacency list of other. + *

    + * No successor's indegree is changed. + */ + void transferSuccessorsTo(Vertex

    other, Vertex

    [] successors) + { + for ( Vertex

    v : successors ) + { + boolean removed = adj.remove(v); + assert removed : "transferSuccessorsTo passed a non-successor"; + other.adj.add(v); + } + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/VertexPair.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/VertexPair.java new file mode 100644 index 00000000..b381563c --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/VertexPair.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Purdue University + * Chapman Flack + */ +package org.postgresql.pljava.annotation.processing; + +/** + * A pair of Vertex instances for the same payload, for use when two directions + * of topological ordering must be computed. + */ +class VertexPair

    +{ + Vertex

    fwd; + Vertex

    rev; + + VertexPair( P payload) + { + fwd = new Vertex<>( payload); + rev = new Vertex<>( payload); + } + + P payload() + { + return rev.payload; + } +} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/package-info.java new file mode 100644 index 00000000..3fce0b0d --- /dev/null +++ b/pljava-api/src/main/java/org/postgresql/pljava/annotation/processing/package-info.java @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2015-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + */ +/** + *

    Not strictly part of the API, this package contains the compiler extension + * itself that recognizes + * {@linkplain org.postgresql.pljava.annotation PL/Java annotations} and + * generates the deployment descriptor. It is part of this module so that the + * pljava-api jar will be all that is needed on the class path + * when compiling PL/Java code, even with annotations. + * + *

    Limitation note: A Java bug introoduced in Java 7 + * required a workaround that was added here in + * pull #42. The workaround + * has a limitation: if you are compiling Java sources that also use other + * annotations and other annotation processors, and if those other processors + * can write new Java files and cause more than one round of compilation, they + * must not include org.postgresql.pljava.annotation annotations + * in those files. This code needs to find all such annotations in round 1. + * + *

    If Oracle fixes the underlying bug, the limitation can be removed. + * Oracle's bug site suggests that won't happen until Java 9, if then. + */ +package org.postgresql.pljava.annotation.processing; diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/DDRProcessor.java b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/DDRProcessor.java deleted file mode 100644 index 96c3d035..00000000 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/DDRProcessor.java +++ /dev/null @@ -1,2505 +0,0 @@ -/* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB - * Purdue University - */ -package org.postgresql.pljava.sqlgen; - -import java.io.IOException; - -import java.lang.annotation.Annotation; - -import java.lang.reflect.Array; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; - -import java.math.BigDecimal; -import java.math.BigInteger; - -import java.sql.ResultSet; -import java.sql.SQLData; -import java.sql.SQLInput; -import java.sql.SQLOutput; -import java.sql.Time; -import java.sql.Timestamp; - -import java.text.BreakIterator; - -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Queue; -import java.util.Set; - -import java.util.regex.Pattern; - -import javax.annotation.processing.*; - -import javax.lang.model.SourceVersion; - -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.ExecutableElement; -import javax.lang.model.element.Modifier; -import javax.lang.model.element.NestingKind; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.VariableElement; - -import javax.lang.model.type.ArrayType; -import javax.lang.model.type.DeclaredType; -import javax.lang.model.type.ExecutableType; -import javax.lang.model.type.NoType; -import javax.lang.model.type.PrimitiveType; -import javax.lang.model.type.TypeKind; -import javax.lang.model.type.TypeMirror; - -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; - -import static javax.lang.model.util.ElementFilter.constructorsIn; -import static javax.lang.model.util.ElementFilter.methodsIn; - -import static javax.tools.Diagnostic.Kind; - -import org.postgresql.pljava.ResultSetHandle; -import org.postgresql.pljava.ResultSetProvider; -import org.postgresql.pljava.TriggerData; - -import org.postgresql.pljava.annotation.Function; -import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; -import org.postgresql.pljava.annotation.SQLType; -import org.postgresql.pljava.annotation.Trigger; -import org.postgresql.pljava.annotation.BaseUDT; -import org.postgresql.pljava.annotation.MappedUDT; - -/** - * Annotation processor invoked by the annotations framework in javac for - * annotations of type org.postgresql.pljava.annotation.*. - * - * Simply forwards to a DDRProcessorImpl instance that is not constructed - * until the framework calls init (since there is nothing useful for the - * constructor to do until then). - * - * @author Thomas Hallgren - pre-Java6 version - * @author Chapman Flack (Purdue Mathematics) - update to Java6, - * add SQLType/SQLAction, polishing - */ -@SupportedAnnotationTypes({"org.postgresql.pljava.annotation.*"}) -@SupportedOptions -({ - "ddr.name.trusted", // default "java" - "ddr.name.untrusted", // default "javaU" - "ddr.implementor", // implementor when not annotated, default "PostgreSQL" - "ddr.output" // name of ddr file to write -}) -@SupportedSourceVersion(SourceVersion.RELEASE_6) -public class DDRProcessor extends AbstractProcessor -{ - private DDRProcessorImpl impl; - - @Override - public void init( ProcessingEnvironment processingEnv) - { - super.init( processingEnv); - impl = new DDRProcessorImpl( processingEnv); - } - - @Override - public boolean process( Set tes, RoundEnvironment re) - { - if ( null == impl ) - throw new IllegalStateException( - "The annotation processing framework has called process() " + - "before init()"); - return impl.process( tes, re); - } -} - -/** - * Where the work happens. - */ -class DDRProcessorImpl -{ - // Things supplied by the calling framework in ProcessingEnvironment, - // used enough that it makes sense to break them out here with - // short names that all nested classes below will inherit. - // - final Elements elmu; - final Filer filr; - final Locale loca; - final Messager msgr; - final Map opts; - final SourceVersion srcv; - final Types typu; - - // Similarly, the TypeMapper should be easily available to code below. - // - final TypeMapper tmpr; - - // Options obtained from the invocation - // - final String nameTrusted; - final String nameUntrusted; - final String output; - final String defaultImplementor; - - // Certain known types that need to be recognized in the processed code - // - final DeclaredType TY_ITERATOR; - final DeclaredType TY_OBJECT; - final DeclaredType TY_RESULTSET; - final DeclaredType TY_RESULTSETPROVIDER; - final DeclaredType TY_RESULTSETHANDLE; - final DeclaredType TY_SQLDATA; - final DeclaredType TY_SQLINPUT; - final DeclaredType TY_SQLOUTPUT; - final DeclaredType TY_STRING; - final DeclaredType TY_TRIGGERDATA; - final NoType TY_VOID; - - // Our own annotations - // - final TypeElement AN_FUNCTION; - final TypeElement AN_SQLACTION; - final TypeElement AN_SQLACTIONS; - final TypeElement AN_SQLTYPE; - final TypeElement AN_TRIGGER; - final TypeElement AN_BASEUDT; - final TypeElement AN_MAPPEDUDT; - - DDRProcessorImpl( ProcessingEnvironment processingEnv) - { - elmu = processingEnv.getElementUtils(); - filr = processingEnv.getFiler(); - loca = processingEnv.getLocale(); - msgr = processingEnv.getMessager(); - opts = processingEnv.getOptions(); - srcv = processingEnv.getSourceVersion(); - typu = processingEnv.getTypeUtils(); - - tmpr = new TypeMapper(); - - String optv; - - optv = opts.get( "ddr.name.trusted"); - if ( null != optv ) - nameTrusted = optv; - else - nameTrusted = "java"; - - optv = opts.get( "ddr.name.untrusted"); - if ( null != optv ) - nameUntrusted = optv; - else - nameUntrusted = "javaU"; - - optv = opts.get( "ddr.implementor"); - if ( null != optv ) - defaultImplementor = "-".equals( optv) ? null : optv; - else - defaultImplementor = "PostgreSQL"; - - optv = opts.get( "ddr.output"); - if ( null != optv ) - output = optv; - else - output = "pljava.ddr"; - - TY_ITERATOR = typu.getDeclaredType( - elmu.getTypeElement( java.util.Iterator.class.getName())); - TY_OBJECT = typu.getDeclaredType( - elmu.getTypeElement( Object.class.getName())); - TY_RESULTSET = typu.getDeclaredType( - elmu.getTypeElement( java.sql.ResultSet.class.getName())); - TY_RESULTSETPROVIDER = typu.getDeclaredType( - elmu.getTypeElement( ResultSetProvider.class.getName())); - TY_RESULTSETHANDLE = typu.getDeclaredType( - elmu.getTypeElement( ResultSetHandle.class.getName())); - TY_SQLDATA = typu.getDeclaredType( - elmu.getTypeElement( SQLData.class.getName())); - TY_SQLINPUT = typu.getDeclaredType( - elmu.getTypeElement( SQLInput.class.getName())); - TY_SQLOUTPUT = typu.getDeclaredType( - elmu.getTypeElement( SQLOutput.class.getName())); - TY_STRING = typu.getDeclaredType( - elmu.getTypeElement( String.class.getName())); - TY_TRIGGERDATA = typu.getDeclaredType( - elmu.getTypeElement( TriggerData.class.getName())); - TY_VOID = typu.getNoType( TypeKind.VOID); - - AN_FUNCTION = elmu.getTypeElement( Function.class.getName()); - AN_SQLACTION = elmu.getTypeElement( SQLAction.class.getName()); - AN_SQLACTIONS = elmu.getTypeElement( SQLActions.class.getName()); - AN_SQLTYPE = elmu.getTypeElement( SQLType.class.getName()); - AN_TRIGGER = elmu.getTypeElement( Trigger.class.getName()); - AN_BASEUDT = elmu.getTypeElement( BaseUDT.class.getName()); - AN_MAPPEDUDT = elmu.getTypeElement( MappedUDT.class.getName()); - } - - void msg( Kind kind, String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args)); - } - - void msg( Kind kind, Element e, String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args), e); - } - - void msg( Kind kind, Element e, AnnotationMirror a, - String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args), e, a); - } - - void msg( Kind kind, Element e, AnnotationMirror a, AnnotationValue v, - String fmt, Object... args) - { - msgr.printMessage( kind, String.format( fmt, args), e, a, v); - } - - /** - * Key usable in a mapping from (Object, Snippet-subtype) to Snippet. - * Because there's no telling in which order a Map implementation will - * compare two keys, the class matches if either one is assignable to - * the other. That's ok as long as the Snippet-subtype is never Snippet - * itself, no Object ever has two Snippets hung on it where one extends - * the other, and getSnippet is always called for the widest of any of - * the types it may retrieve. - */ - static final class SnippetsKey - { - final Object o; - final Class c; - SnippetsKey(Object o, Class c) - { - assert Snippet.class != c : "Snippet key must be a subtype"; - this.o = o; - this.c = c; - } - public boolean equals(Object oth) - { - if ( ! (oth instanceof SnippetsKey) ) - return false; - SnippetsKey osk = (SnippetsKey)oth; - return o.equals( osk.o) - && ( c.isAssignableFrom( osk.c) || osk.c.isAssignableFrom( c) ); - } - public int hashCode() - { - return o.hashCode(); // must not depend on c (subtypes will match) - } - } - - /** - * Collection of code snippets being accumulated (possibly over more than - * one round), keyed by the object for which each snippet has been - * generated. - */ - Map snippets = new HashMap(); - - S getSnippet(Object o, Class c) - { - return (S)snippets.get( new SnippetsKey( o, c)); - } - - void putSnippet( Object o, Snippet s) - { - snippets.put( new SnippetsKey( o, s.getClass()), s); - } - - /** - * Queue on which snippets are entered in preparation for topological - * ordering. Has to be an instance field because populating the queue - * (which involves invoking the snippets' characterize methods) cannot - * be left to generateDescriptor, which runs in the final round. This is - * (AFAICT) another workaround for javac 7's behavior of throwing away - * symbol tables between rounds; when characterize was invoked in - * generateDescriptor, any errors reported were being shown with no source - * location info, because it had been thrown away. - */ - Queue> snippetQueue = new LinkedList>(); - - /** - * Map from each arbitrary provides/requires label to the snippet - * that 'provides' it. Has to be out here as an instance field for the - * same reason {@code snippetQueue} does. - */ - Map> provider = - new HashMap>(); - - /** - * Set of provides/requires labels for which at least one consumer has - * been seen. An instance field for the same reason as {@code provider}. - */ - Set consumer = new HashSet(); - - /** - * Find the elements in each round that carry any of the annotations of - * interest and generate code snippets accordingly. On the last round, with - * all processing complete, generate the deployment descriptor file. - */ - boolean process( Set tes, RoundEnvironment re) - { - boolean functionPresent = false; - boolean sqlActionPresent = false; - boolean sqlActionsPresent = false; - boolean baseUDTPresent = false; - boolean mappedUDTPresent = false; - - boolean willClaim = true; - - for ( TypeElement te : tes ) - { - if ( AN_FUNCTION.equals( te) ) - functionPresent = true; - else if ( AN_SQLACTION.equals( te) ) - sqlActionPresent = true; - else if ( AN_SQLACTIONS.equals( te) ) - sqlActionsPresent = true; - else if ( AN_BASEUDT.equals( te) ) - baseUDTPresent = true; - else if ( AN_MAPPEDUDT.equals( te) ) - mappedUDTPresent = true; - else if ( AN_SQLTYPE.equals( te) ) - ; // these are handled within FunctionImpl - else - { - msg( Kind.WARNING, te, - "pljava annotation processor version may be older than " + - "this annotation:\n%s", te.toString()); - willClaim = false; - } - } - - if ( baseUDTPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_BASEUDT) ) - processUDT( e, UDTKind.BASE); - - if ( mappedUDTPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_MAPPEDUDT) ) - processUDT( e, UDTKind.MAPPED); - - if ( functionPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_FUNCTION) ) - processFunction( e); - - if ( sqlActionPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_SQLACTION) ) - processSQLAction( e); - - if ( sqlActionsPresent ) - for ( Element e : re.getElementsAnnotatedWith( AN_SQLACTIONS) ) - processSQLActions( e); - - tmpr.workAroundJava7Breakage(); // perhaps it will be fixed in Java 9? - - if ( ! re.processingOver() ) - defensiveEarlyCharacterize(); - else if ( ! re.errorRaised() ) - generateDescriptor(); - - return willClaim; - } - - /** - * Iterate over collected snippets, characterize them, and enter them - * (if no error) in the data structures for topological ordering. Was - * originally the first part of {@code generateDescriptor}, but that is - * run in the final round, which is too late for javac 7 anyway, which - * throws symbol tables away between rounds. Any errors reported from - * characterize were being shown without source locations, because the - * information was gone. This may now be run more than once, so the - * {@code snippets} map is cleared before returning. - */ - void defensiveEarlyCharacterize() - { - for ( Snippet snip : snippets.values() ) - { - if ( ! snip.characterize() ) - continue; - Vertex v = new Vertex( snip); - snippetQueue.add( v); - for ( String s : snip.provides() ) - if ( null != provider.put( s, v) ) - msg( Kind.ERROR, "tag %s has more than one provider", s); - for ( String s : snip.requires() ) - consumer.add( s); - } - snippets.clear(); - } - - /** - * Arrange the collected snippets into a workable sequence (nothing with - * requires="X" can come before whatever has provides="X"), then create - * a deployment descriptor file in proper form. - */ - void generateDescriptor() - { - boolean errorRaised = false; - - for ( Vertex v : snippetQueue ) - for ( String s : v.payload.requires() ) - { - Vertex p = provider.get( s); - if ( null != p ) - p.precede( v); - else if ( s == v.payload.implementor() ) // yes == if from impl - { - /* - * It's the implicit requires(implementor()). Bump the - * indegree anyway so the snippet won't be emitted until - * the cycle breaker code (see below) sets it free after - * any others that can be handled first. - */ - if ( ! defaultImplementor.equals( s) ) - ++ v.indegree; - } - else - { - msg( Kind.ERROR, - "tag \"%s\" is required but nowhere provided", s); - errorRaised = true; - } - } - - if ( errorRaised ) - return; - - Snippet[] snips = new Snippet [ snippetQueue.size() ]; - - Queue> q = new LinkedList>(); - for ( Iterator> it = snippetQueue.iterator() ; - it.hasNext() ; ) - { - Vertex v = it.next(); - if ( 0 == v.indegree ) - { - q.add( v); - it.remove(); - } - } - -queuerunning: for ( int i = 0 ; ; ) - { - while ( ! q.isEmpty() ) - { - Vertex v = q.remove(); - snips[i++] = v.payload; - v.use( q, snippetQueue); - for ( String p : v.payload.provides() ) - consumer.remove(p); - } - if ( snippetQueue.isEmpty() ) - break; // all done - /* - * There are snippets remaining to output but they all have - * indegree > 0, normally a 'cycle' error. But somewhere there may - * be one with indegree exactly 1 and an implicit requirement of its - * own implementor tag, with no snippet on record to provide it. - * That's allowed (maybe the installing/removing environment will - * be "providing" that tag anyway), so set one such snippet free - * and see how much farther we get. - */ - for ( Iterator> it = snippetQueue.iterator(); - it.hasNext(); ) - { - Vertex v = it.next(); - if ( 1 < v.indegree || null == v.payload.implementor() ) - continue; - if ( provider.containsKey( v.payload.implementor()) ) - continue; - -- v.indegree; - it.remove(); - q.add( v); - continue queuerunning; - } - /* - * Got here? It's a real cycle ... nothing to be done. - */ - for ( String s : consumer ) - msg( Kind.ERROR, "requirement in a cycle: %s", s); - return; - } - - try - { - DDRWriter.emit( snips, this); - } - catch ( IOException ioe ) - { - msg( Kind.ERROR, "while writing %s: %s", output, ioe.getMessage()); - } - } - - /** - * Process a single element annotated with @SQLAction. - */ - void processSQLAction( Element e) - { - SQLActionImpl sa = getSnippet( e, SQLActionImpl.class); - if ( null == sa ) - { - sa = new SQLActionImpl(); - putSnippet( e, sa); - } - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_SQLACTION) ) - populateAnnotationImpl( sa, e, am); - } - } - - /** - * Process a single element annotated with @SQLActions (which simply takes - * an array of @SQLAction as a way to associate more than one SQLAction with - * a single program element).. - */ - void processSQLActions( Element e) - { - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_SQLACTIONS) ) - { - SQLActionsImpl sas = new SQLActionsImpl(); - populateAnnotationImpl( sas, e, am); - for ( SQLAction sa : sas.value() ) - putSnippet( sa, (Snippet)sa); - } - } - } - - static enum UDTKind { BASE, MAPPED } - - /** - * Process a single element annotated with @BaseUDT or @MappedUDT, as - * indicated by the UDTKind k. - */ - void processUDT( Element e, UDTKind k) - { - /* - * The allowed target type for the UDT annotations is TYPE, which can - * be a class, interface (including annotation type) or enum, of which - * only CLASS is valid here. If it is anything else, just return, as - * that can only mean a source error prevented the compiler making sense - * of it, and the compiler will have its own messages about that. - */ - switch ( e.getKind() ) - { - case CLASS: - break; - case ANNOTATION_TYPE: - case ENUM: - case INTERFACE: - msg( Kind.ERROR, e, "A pljava UDT must be a class"); - default: - return; - } - Set mods = e.getModifiers(); - if ( ! mods.contains( Modifier.PUBLIC) ) - { - msg( Kind.ERROR, e, "A pljava UDT must be public"); - } - if ( mods.contains( Modifier.ABSTRACT) ) - { - msg( Kind.ERROR, e, "A pljava UDT must not be abstract"); - } - if ( ! ((TypeElement)e).getNestingKind().equals( - NestingKind.TOP_LEVEL) ) - { - if ( ! mods.contains( Modifier.STATIC) ) - { - msg( Kind.ERROR, e, - "When nested, a pljava UDT must be static (not inner)"); - } - for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) - { - if ( ! ee.getModifiers().contains( Modifier.PUBLIC) ) - msg( Kind.ERROR, ee, - "A pljava UDT must not have a non-public " + - "enclosing class"); - if ( ((TypeElement)ee).getNestingKind().equals( - NestingKind.TOP_LEVEL) ) - break; - } - } - - switch ( k ) - { - case BASE: - BaseUDTImpl bu = getSnippet( e, BaseUDTImpl.class); - if ( null == bu ) - { - bu = new BaseUDTImpl( (TypeElement)e); - putSnippet( e, bu); - } - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_BASEUDT) ) - populateAnnotationImpl( bu, e, am); - } - bu.registerFunctions(); - break; - - case MAPPED: - MappedUDTImpl mu = getSnippet( e, MappedUDTImpl.class); - if ( null == mu ) - { - mu = new MappedUDTImpl( (TypeElement)e); - putSnippet( e, mu); - } - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_MAPPEDUDT) ) - populateAnnotationImpl( mu, e, am); - } - break; - } - } - - ExecutableElement huntFor(List ees, String name, - boolean isStatic, TypeMirror retType, TypeMirror... paramTypes) - { - ExecutableElement quarry = null; -hunt: for ( ExecutableElement ee : ees ) - { - if ( null != name && ! ee.getSimpleName().contentEquals( name) ) - continue; - if ( ee.isVarArgs() ) - continue; - if ( null != retType - && ! typu.isSameType( ee.getReturnType(), retType) ) - continue; - List pts = - ((ExecutableType)ee.asType()).getParameterTypes(); - if ( pts.size() != paramTypes.length ) - continue; - for ( int i = 0; i < paramTypes.length; ++i ) - if ( ! typu.isSameType( pts.get( i), paramTypes[i]) ) - continue hunt; - Set mods = ee.getModifiers(); - if ( ! mods.contains( Modifier.PUBLIC) ) - continue; - if ( isStatic && ! mods.contains( Modifier.STATIC) ) - continue; - if ( null == quarry ) - quarry = ee; - else - { - msg( Kind.ERROR, ee, - "Found more than one candidate " + - (null == name ? "constructor" : (name + " method"))); - } - } - return quarry; - } - - /** - * Process a single element annotated with @Function. After checking that - * it has the right modifiers to be called via pljava, analyze its type - * information and annotations and register an appropriate SQL code snippet. - */ - void processFunction( Element e) - { - /* - * METHOD is the only target type allowed for the Function annotation, - * so the only way for e to be anything else is if some source error has - * prevented the compiler making sense of it. In that case just return - * silently on the assumption that the compiler will have its own - * message about the true problem. - */ - if ( ! ElementKind.METHOD.equals( e.getKind()) ) - return; - - Set mods = e.getModifiers(); - if ( ! mods.contains( Modifier.PUBLIC) ) - { - msg( Kind.ERROR, e, "A pljava function must be public"); - } - - for ( Element ee = e; null != ( ee = ee.getEnclosingElement() ); ) - { - if ( ElementKind.CLASS.equals( ee.getKind()) ) - { - if ( ! ee.getModifiers().contains( Modifier.PUBLIC) ) - msg( Kind.ERROR, ee, - "A pljava function must not have a non-public " + - "enclosing class"); - if ( ((TypeElement)ee).getNestingKind().equals( - NestingKind.TOP_LEVEL) ) - break; - } - } - - FunctionImpl f = getSnippet( e, FunctionImpl.class); - if ( null == f ) - { - f = new FunctionImpl( (ExecutableElement)e); - putSnippet( e, f); - } - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_FUNCTION) ) - populateAnnotationImpl( f, e, am); - } - } - - /** - * Populate an array of specified type from an annotation value - * representing an array. - * - * AnnotationValue's getValue() method returns Object, where the - * object is known to be an instance of one of a small set of classes. - * Populating an array when that value represents one is a common - * operation, so it is factored out here. - */ - static T[] avToArray( Object o, Class k) - { - boolean isEnum = k.isEnum(); - - @SuppressWarnings({"unchecked"}) - List vs = (List)o; - - @SuppressWarnings({"unchecked"}) - T[] a = (T[])Array.newInstance( k, vs.size()); - - int i = 0; - for ( AnnotationValue av : vs ) - { - Object v = getValue( av); - if ( isEnum ) - v = Enum.valueOf( k.asSubclass( Enum.class), - ((VariableElement)v).getSimpleName().toString()); - a[i++] = k.cast( v); - } - return a; - } - - /** - * Abstract superclass for synthetic implementations of annotation - * interfaces; these can be populated with element-value pairs from - * an AnnotationMirror and then used in the natural way for access to - * the values. Each subclass of this should implement the intended - * annotation interface, and should also have a - * setFoo(Object,boolean,Element) method for each foo() method in the - * interface. Rather than longwindedly using the type system to enforce - * that the needed setter methods are all there, they will be looked - * up using reflection. - */ - class AbstractAnnotationImpl implements Annotation - { - public Class annotationType() - { - throw new UnsupportedOperationException(); - } - - /** - * Supply the required implementor() method for those subclasses - * that will implement {@link Snippet}. - */ - public String implementor() { return _implementor; } - - String _implementor = defaultImplementor; - String _comment; - - public void setImplementor( Object o, boolean explicit, Element e) - { - if ( explicit ) - _implementor = "".equals( o) ? null : (String)o; - } - - /** - * Use from characterize() in any subclass implementing Snippet. - */ - protected String[] augmentRequires( String req[], String imp) - { - if ( null == imp ) - return req; - String[] newreq = new String [ 1 + req.length ]; - System.arraycopy( req, 0, newreq, 0, req.length); - newreq[req.length] = imp; - return newreq; - } - - public String comment() { return _comment; } - - public void setComment( Object o, boolean explicit, Element e) - { - if ( explicit ) - { - _comment = (String)o; - if ( "".equals( _comment) ) - _comment = null; - } - else - _comment = ((Commentable)this).derivedComment( e); - } - - public String derivedComment( Element e) - { - String dc = elmu.getDocComment( e); - if ( null == dc ) - return null; - return firstSentence( dc); - } - - public String firstSentence( String s) - { - BreakIterator bi = BreakIterator.getSentenceInstance( loca); - bi.setText( s); - int start = bi.first(); - int end = bi.next(); - if ( BreakIterator.DONE == end ) - return null; - return s.substring( start, end).trim(); - } - } - - /** - * Populate an AbstractAnnotationImpl-derived Annotation implementation - * from the element-value pairs in an AnnotationMirror. For each element - * foo in the annotation interface, the implementation is assumed to have - * a method setFoo(Object o, boolean explicit, element e) where o is the - * element's value as obtained from AnnotationValue.getValue(), explicit - * indicates whether the element was explicitly present in the annotation - * or filled in from a default value, and e is the element carrying the - * annotation (chiefly for use as a location hint in diagnostic messages). - * - * Some of the annotation implementations below will leave certain elements - * null if they were not given explicit values, in order to have a clear - * indication that they were defaulted, even though that is not the way - * normal annotation objects behave. - * - * If a setFoo(Object o, boolean explicit, element e) method is not found - * but there is an accessible field _foo it will be set directly, but only - * if the value was explicitly present in the annotation or the field value - * is null. By this convention, an implementation can declare a field - * initially null and let its default value be filled in from what the - * annotation declares, or initially some non-null value distinct from - * possible annotation values, and be able to tell whether it was explicitly - * set. Note that a field of primitive type will never be seen as null. - */ - void populateAnnotationImpl( - AbstractAnnotationImpl inst, Element e, AnnotationMirror am) - { - Map explicit = - am.getElementValues(); - Map defaulted = - elmu.getElementValuesWithDefaults( am); - - // Astonishingly, even though JLS3 9.7 clearly says "annotations must - // contain an element-value pair for every element of the corresponding - // annotation type, except for those elements with default values, or a - // compile-time error occurs" - in Sun 1.6.0_39 javac never flags - // the promised error, and instead allows us to NPE on something that - // ought to be guaranteed to be there! >:[ - // - // If you want something done right, you have to do it yourself.... - // - - Element anne = am.getAnnotationType().asElement(); - List keys = methodsIn( anne.getEnclosedElements()); - for ( ExecutableElement k : keys ) - if ( ! defaulted.containsKey( k) ) - msg( Kind.ERROR, e, am, - "annotation missing required element \"%s\"", - k.getSimpleName()); - - for ( - Map.Entry me - : defaulted.entrySet() - ) - { - ExecutableElement k = me.getKey(); - AnnotationValue av = me.getValue(); - boolean isExplicit = explicit.containsKey( k); - String name = k.getSimpleName().toString(); - Class kl = inst.getClass(); - try - { - Object v = getValue( av); - kl.getMethod( // let setter for foo() be setFoo() - "set"+name.substring( 0, 1).toUpperCase() + - name.substring( 1), - Object.class, boolean.class, Element.class) - .invoke(inst, v, isExplicit, e); - } - catch (AnnotationValueException ave) - { - msg( Kind.ERROR, e, am, - "unresolved value for annotation member \"%s\"" + - " (check for missing/misspelled import, etc.)", - name); - } - catch (NoSuchMethodException nsme) - { - Object v = getValue( av); - try - { - Field f = kl.getField( "_"+name); - Class fkl = f.getType(); - if ( ! isExplicit && null != f.get( inst) ) - continue; - if ( fkl.isArray() ) - { - try { - f.set( inst, avToArray( v, fkl.getComponentType())); - } - catch (AnnotationValueException ave) - { - msg( Kind.ERROR, e, am, - "unresolved value for an element of annotation" + - " member \"%s\" (check for missing/misspelled" + - " import, etc.)", - name); - } - } - else if ( fkl.isEnum() ) - f.set( inst, Enum.valueOf( fkl.asSubclass( Enum.class), - ((VariableElement)v).getSimpleName().toString())); - else - f.set( inst, v); - nsme = null; - } - catch (NoSuchFieldException nsfe) { } - catch (IllegalAccessException iae) { } - if ( null != nsme ) - throw new RuntimeException( - "Incomplete implementation in annotation processor", - nsme); - } - catch (IllegalAccessException iae) - { - throw new RuntimeException( - "Incorrect implementation of annotation processor", iae); - } - catch (InvocationTargetException ite) - { - String msg = ite.getCause().getMessage(); - msg( Kind.ERROR, e, am, av, "%s", msg); - } - } - } - - // It could be nice to have another annotation-driven tool that could just - // generate these implementations of some annotation types.... - - class SQLTypeImpl extends AbstractAnnotationImpl implements SQLType - { - public String value() { return _value; } - public String[] defaultValue() { return _defaultValue; } - - String _value; - String[] _defaultValue; - - public void setValue( Object o, boolean explicit, Element e) - { - if ( explicit ) - _value = (String)o; - } - - public void setDefaultValue( Object o, boolean explicit, Element e) - { - if ( explicit ) - _defaultValue = avToArray( o, String.class); - } - } - - class SQLActionsImpl extends AbstractAnnotationImpl implements SQLActions - { - public SQLAction[] value() { return _value; } - - SQLAction[] _value; - - public void setValue( Object o, boolean explicit, Element e) - { - AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); - _value = new SQLAction [ ams.length ]; - int i = 0; - for ( AnnotationMirror am : ams ) - { - SQLActionImpl a = new SQLActionImpl(); - populateAnnotationImpl( a, e, am); - _value [ i++ ] = a; - } - } - } - - class SQLActionImpl - extends AbstractAnnotationImpl - implements SQLAction, Snippet - { - public String[] install() { return _install; } - public String[] remove() { return _remove; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public String[] deployStrings() { return _install; } - public String[] undeployStrings() { return _remove; } - - public String[] _install; - public String[] _remove; - public String[] _provides; - public String[] _requires; - - public boolean characterize() - { - _requires = augmentRequires( _requires, implementor()); - return true; - } - } - - class TriggerImpl - extends AbstractAnnotationImpl - implements Trigger, Snippet, Commentable - { - public String[] arguments() { return _arguments; } - public Event[] events() { return _events; } - public String name() { return _name; } - public String schema() { return _schema; } - public String table() { return _table; } - public Scope scope() { return _scope; } - public Called called() { return _called; } - public String when() { return _when; } - public String[] columns() { return _columns; } - - public String[] provides() { return new String[0]; } - public String[] requires() { return new String[0]; } - /* Trigger is a Snippet but doesn't directly participate in tsort */ - - public String[] _arguments; - public Event[] _events; - public String _name; - public String _schema; - public String _table; - public Scope _scope; - public Called _called; - public String _when; - public String[] _columns; - - FunctionImpl func; - AnnotationMirror origin; - - TriggerImpl( FunctionImpl f, AnnotationMirror am) - { - func = f; - origin = am; - } - - public boolean characterize() - { - if ( Scope.ROW.equals( _scope) ) - { - for ( Event e : _events ) - if ( Event.TRUNCATE.equals( e) ) - msg( Kind.ERROR, func.func, origin, - "TRUNCATE trigger cannot be FOR EACH ROW"); - } - else if ( Called.INSTEAD_OF.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "INSTEAD OF trigger cannot be FOR EACH STATEMENT"); - - if ( ! "".equals( _when) && Called.INSTEAD_OF.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "INSTEAD OF triggers do not support WHEN conditions"); - - if ( 0 < _columns.length ) - { - if ( Called.INSTEAD_OF.equals( _called) ) - msg( Kind.ERROR, func.func, origin, - "INSTEAD OF triggers do not support lists of columns"); - boolean seen = false; - for ( Event e : _events ) - if ( Event.UPDATE.equals( e) ) - seen = true; - if ( ! seen ) - msg( Kind.ERROR, func.func, origin, - "Column list is meaningless unless UPDATE is a trigger event"); - } - - if ( "".equals( _name) ) - _name = TriggerNamer.synthesizeName( this); - return false; - } - - public String[] deployStrings() - { - StringBuilder sb = new StringBuilder(); - sb.append( "CREATE TRIGGER ").append( name()).append( "\n\t"); - switch ( called() ) - { - case BEFORE: sb.append( "BEFORE " ); break; - case AFTER: sb.append( "AFTER " ); break; - case INSTEAD_OF: sb.append( "INSTEAD OF "); break; - } - int s = _events.length; - for ( Event e : _events ) - { - sb.append( e.toString()); - if ( Event.UPDATE.equals( e) && 0 < _columns.length ) - { - sb.append( " OF "); - int cs = _columns.length; - for ( String c : _columns ) - { - sb.append( c); - if ( 0 < -- cs ) - sb.append( ", "); - } - } - if ( 0 < -- s ) - sb.append( " OR "); - } - sb.append( "\n\tON "); - if ( ! "".equals( schema()) ) - sb.append( schema()).append( '.'); - sb.append( table()).append( "\n\tFOR EACH "); - sb.append( scope().toString()); - if ( ! "".equals( _when) ) - sb.append( "\n\tWHEN ").append( _when); - sb.append( "\n\tEXECUTE PROCEDURE "); - func.appendNameAndParams( sb, false); - sb.setLength( sb.length() - 1); // drop closing ) - s = _arguments.length; - for ( String a : _arguments ) - { - sb.append( "\n\t").append( DDRWriter.eQuote( a)); - if ( 0 < -- s ) - sb.append( ','); - } - sb.append( ')'); - - String comm = comment(); - if ( null == comm ) - return new String[] { sb.toString() }; - - return new String[] { - sb.toString(), - "COMMENT ON TRIGGER " + name() + " ON " + - ( "".equals( schema()) ? "" : ( schema() + '.' ) ) + table() + - "\nIS " + - DDRWriter.eQuote( comm) - }; - } - - public String[] undeployStrings() - { - StringBuilder sb = new StringBuilder(); - sb.append( "DROP TRIGGER ").append( name()).append( "\n\tON "); - if ( ! "".equals( schema()) ) - sb.append( schema()).append( '.'); - sb.append( table()); - return new String[] { sb.toString() }; - } - } - - class FunctionImpl - extends AbstractAnnotationImpl - implements Function, Snippet, Commentable - { - public String type() { return _type; } - public String name() { return _name; } - public String schema() { return _schema; } - public OnNullInput onNullInput() { return _onNullInput; } - public Security security() { return _security; } - public Effects effects() { return _effects; } - public Trust trust() { return _trust; } - public boolean leakproof() { return _leakproof; } - public int cost() { return _cost; } - public int rows() { return _rows; } - public String[] settings() { return _settings; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - public Trigger[] triggers() { return _triggers; } - - ExecutableElement func; - - public String _type; - public String _name; - public String _schema; - public OnNullInput _onNullInput; - public Security _security; - public Effects _effects; - public Trust _trust; - public Boolean _leakproof; - int _cost; - int _rows; - public String[] _settings; - public String[] _provides; - public String[] _requires; - Trigger[] _triggers; - - boolean complexViaInOut = false; - boolean setof = false; - TypeMirror setofComponent = null; - boolean trigger = false; - - FunctionImpl(ExecutableElement e) - { - func = e; - } - - public void setCost( Object o, boolean explicit, Element e) - { - _cost = ((Integer)o).intValue(); - if ( _cost < 0 && explicit ) - throw new IllegalArgumentException( "cost must be nonnegative"); - } - - public void setRows( Object o, boolean explicit, Element e) - { - _rows = ((Integer)o).intValue(); - if ( _rows < 0 && explicit ) - throw new IllegalArgumentException( "rows must be nonnegative"); - } - - public void setTriggers( Object o, boolean explicit, Element e) - { - AnnotationMirror[] ams = avToArray( o, AnnotationMirror.class); - _triggers = new Trigger [ ams.length ]; - int i = 0; - for ( AnnotationMirror am : ams ) - { - TriggerImpl ti = new TriggerImpl( this, am); - populateAnnotationImpl( ti, e, am); - _triggers [ i++ ] = ti; - } - } - - public boolean characterize() - { - if ( "".equals( _name) ) - _name = func.getSimpleName().toString(); - - Set mods = func.getModifiers(); - if ( ! mods.contains( Modifier.STATIC) ) - { - msg( Kind.ERROR, func, "A pljava function must be static"); - } - - TypeMirror ret = func.getReturnType(); - if ( ret.getKind().equals( TypeKind.ERROR) ) - { - msg( Kind.ERROR, func, - "Unable to resolve return type of function"); - return false; - } - - ExecutableType et = (ExecutableType)func.asType(); - List ptms = et.getParameterTypes(); - int arity = ptms.size(); - - if ( ! "".equals( type()) - && ret.getKind().equals( TypeKind.BOOLEAN) ) - { - complexViaInOut = true; - TypeMirror tm = ptms.get( arity - 1); - if ( tm.getKind().equals( TypeKind.ERROR) - // unresolved things seem assignable to anything - || ! typu.isSameType( tm, TY_RESULTSET) ) - { - msg( Kind.ERROR, func.getParameters().get( arity - 1), - "Last parameter of complex-type-returning function " + - "must be ResultSet"); - return false; - } - } - else if ( typu.isAssignable( typu.erasure( ret), TY_ITERATOR) ) - { - setof = true; - List pending = new LinkedList(); - pending.add( ret); - while ( ! pending.isEmpty() ) - { - TypeMirror tm = pending.remove( 0); - if ( typu.isSameType( typu.erasure( tm), TY_ITERATOR) ) - { - DeclaredType dt = (DeclaredType)tm; - List typeArgs = - dt.getTypeArguments(); - if ( 1 != typeArgs.size() ) - { - msg( Kind.ERROR, func, - "Need one type argument for Iterator " + - "return type"); - return false; - } - setofComponent = typeArgs.get( 0); - break; - } - else - { - pending.addAll( typu.directSupertypes( tm)); - } - } - if ( null == setofComponent ) - { - msg( Kind.ERROR, func, - "Failed to find setof component type"); - return false; - } - } - else if ( typu.isAssignable( ret, TY_RESULTSETPROVIDER) - || typu.isAssignable( ret, TY_RESULTSETHANDLE) ) - { - setof = true; - } - else if ( ret.getKind().equals( TypeKind.VOID) && 1 == arity ) - { - TypeMirror tm = ptms.get( 0); - if ( ! tm.getKind().equals( TypeKind.ERROR) - // unresolved things seem assignable to anything - && typu.isSameType( tm, TY_TRIGGERDATA) ) - { - trigger = true; - } - } - - if ( ! setof && -1 != rows() ) - msg( Kind.ERROR, func, - "ROWS specified on a function not returning SETOF"); - - if ( ! trigger && 0 != _triggers.length ) - msg( Kind.ERROR, func, - "a function with triggers needs void return and " + - "one TriggerData parameter"); - - /* - * Report any unmappable types now that could appear in - * deployStrings (return type or parameter types) ... so that the - * error messages won't be missing the source location, as they can - * with javac 7 throwing away symbol tables between rounds. - * Because the logic in deployStrings determining what to call - * getSQLType on is a bit fiddly, the simplest way to make all those - * calls here is just ... call deployStrings. - */ - deployStrings(); - - _requires = augmentRequires( _requires, implementor()); - - for ( Trigger t : triggers() ) - ((TriggerImpl)t).characterize(); - return true; - } - - /** - * Append SQL syntax for the function's name (schema-qualified if - * appropriate) and parameters, either with any defaults indicated - * (for use in CREATE FUNCTION) or without (for use in DROP FUNCTION). - * - * @param dflts Whether to include the defaults, if any. - */ - void appendNameAndParams( StringBuilder sb, boolean dflts) - { - if ( ! "".equals( schema()) ) - sb.append( schema()).append( '.'); - sb.append( name()).append( '('); - appendParams( sb, dflts); - // TriggerImpl relies on ) being the very last character - sb.append( ')'); - } - - void appendParams( StringBuilder sb, boolean dflts) - { - if ( ! trigger ) - { - ExecutableType et = (ExecutableType)func.asType(); - List tms = et.getParameterTypes(); - Iterator ves = - func.getParameters().iterator(); - if ( complexViaInOut ) - tms = tms.subList( 0, tms.size() - 1); - int s = tms.size(); - for ( TypeMirror tm : tms ) - { - VariableElement ve = ves.next(); - sb.append( "\n\t").append( ve.getSimpleName().toString()); - sb.append( ' '); - sb.append( tmpr.getSQLType( tm, ve, true, dflts)); - if ( 0 < -- s ) - sb.append( ','); - } - } - } - - void appendAS( StringBuilder sb) - { - Element e = func.getEnclosingElement(); - if ( ! e.getKind().equals( ElementKind.CLASS) ) - msg( Kind.ERROR, func, - "Somehow this method got enclosed by something other " + - "than a class"); - sb.append( e.toString()).append( '.'); - sb.append( trigger ? func.getSimpleName() : func.toString()); - } - - public String[] deployStrings() - { - ArrayList al = new ArrayList(); - StringBuilder sb = new StringBuilder(); - sb.append( "CREATE OR REPLACE FUNCTION "); - appendNameAndParams( sb, true); - sb.append( "\n\tRETURNS "); - if ( trigger ) - sb.append( "trigger"); - else - { - if ( setof ) - sb.append( "SETOF "); - if ( ! "".equals( type()) ) - sb.append( type()); - else if ( null != setofComponent ) - sb.append( tmpr.getSQLType( setofComponent, func)); - else if ( setof ) - sb.append( "RECORD"); - else - sb.append( tmpr.getSQLType( func.getReturnType(), func)); - } - sb.append( "\n\tLANGUAGE "); - if ( Trust.SANDBOXED.equals( trust()) ) - sb.append( nameTrusted); - else - sb.append( nameUntrusted); - sb.append( ' ').append( effects()); - if ( leakproof() ) - sb.append( " LEAKPROOF"); - sb.append( '\n'); - if ( OnNullInput.RETURNS_NULL.equals( onNullInput()) ) - sb.append( "\tRETURNS NULL ON NULL INPUT\n"); - if ( Security.DEFINER.equals( security()) ) - sb.append( "\tSECURITY DEFINER\n"); - if ( -1 != cost() ) - sb.append( "\tCOST ").append( cost()).append( '\n'); - if ( -1 != rows() ) - sb.append( "\tROWS ").append( rows()).append( '\n'); - for ( String s : settings() ) - sb.append( "\tSET ").append( s).append( '\n'); - sb.append( "\tAS '"); - appendAS( sb); - sb.append( '\''); - al.add( sb.toString()); - - String comm = comment(); - if ( null != comm ) - { - sb.setLength( 0); - sb.append( "COMMENT ON FUNCTION "); - appendNameAndParams( sb, false); - sb.append( "\nIS "); - sb.append( DDRWriter.eQuote( comm)); - al.add( sb.toString()); - } - - for ( Trigger t : triggers() ) - for ( String s : ((TriggerImpl)t).deployStrings() ) - al.add( s); - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - String[] rslt = new String [ 1 + triggers().length ]; - int i = rslt.length - 1; - for ( Trigger t : triggers() ) - for ( String s : ((TriggerImpl)t).undeployStrings() ) - rslt [ --i ] = s; - - StringBuilder sb = new StringBuilder(); - sb.append( "DROP FUNCTION "); - appendNameAndParams( sb, false); - rslt [ rslt.length - 1 ] = sb.toString(); - return rslt; - } - } - - static enum BaseUDTFunctionID - { - INPUT( "in", "cstring, oid, integer", null), - OUTPUT( "out", null, "cstring"), - RECEIVE( "recv", "internal, oid, integer", null), - SEND( "send", null, "bytea"); - BaseUDTFunctionID( String suffix, String param, String ret) - { - this.suffix = suffix; - this.param = param; - this.ret = ret; - } - private String suffix; - private String param; - private String ret; - String getSuffix() { return suffix; } - String getParam( BaseUDTImpl u) - { - if ( null != param ) - return param; - return u.qname; - } - String getRet( BaseUDTImpl u) - { - if ( null != ret ) - return ret; - return u.qname; - } - } - - class BaseUDTFunctionImpl extends FunctionImpl - { - BaseUDTFunctionImpl( - BaseUDTImpl ui, TypeElement te, BaseUDTFunctionID id) - { - super( null); - this.ui = ui; - this.te = te; - this.id = id; - - _type = id.getRet( ui); - _name = ui.name() + '_' + id.getSuffix(); - _schema = ui.schema(); - _cost = -1; - _rows = -1; - _onNullInput = OnNullInput.CALLED; - _security = Security.INVOKER; - _effects = Effects.VOLATILE; - _trust = Trust.SANDBOXED; - _leakproof = false; - _settings = new String[0]; - _triggers = new Trigger[0]; - _provides = _settings; - _requires = _settings; - } - - BaseUDTImpl ui; - TypeElement te; - BaseUDTFunctionID id; - - @Override - void appendParams( StringBuilder sb, boolean dflts) - { - sb.append( id.getParam( ui)); - } - - @Override - void appendAS( StringBuilder sb) - { - sb.append( "UDT[").append( te.toString()).append( "] "); - sb.append( id.name()); - } - - StringBuilder appendTypeOp( StringBuilder sb) - { - sb.append( id.name()).append( " = "); - if ( ! "".equals( schema()) ) - sb.append( schema()).append( '.'); - return sb.append( name()); - } - - @Override - public boolean characterize() - { - return false; - } - - public void setType( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "The type of a UDT function may not be changed"); - } - - public void setRows( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "The rows attribute of a UDT function may not be set"); - } - - public void setProvides( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function does not have its own provides/requires"); - } - - public void setRequires( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function does not have its own provides/requires"); - } - - public void setTriggers( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function may not have associated triggers"); - } - - public void setImplementor( Object o, boolean explicit, Element e) - { - if ( explicit ) - msg( Kind.ERROR, e, - "A UDT function does not have its own implementor"); - } - - public String implementor() - { - return ui.implementor(); - } - - public String derivedComment( Element e) - { - String comm = super.derivedComment( e); - if ( null != comm ) - return comm; - return id.name() + " method for type " + ui.qname; - } - } - - abstract class AbstractUDTImpl - extends AbstractAnnotationImpl - implements Snippet, Commentable - { - public String name() { return _name; } - public String schema() { return _schema; } - public String[] provides() { return _provides; } - public String[] requires() { return _requires; } - - public String[] _provides; - public String[] _requires; - public String _name; - public String _schema; - - TypeElement tclass; - - String qname; - - AbstractUDTImpl(TypeElement e) - { - tclass = e; - - if ( ! typu.isAssignable( e.asType(), TY_SQLDATA) ) - { - msg( Kind.ERROR, e, "A pljava UDT must implement %s", - TY_SQLDATA); - } - - ExecutableElement niladicCtor = huntFor( - constructorsIn( tclass.getEnclosedElements()), null, false, - null); - - if ( null == niladicCtor ) - { - msg( Kind.ERROR, tclass, - "A pljava UDT must have a public no-arg constructor"); - } - } - - protected void setQname() - { - if ( "".equals( _name) ) - _name = tclass.getSimpleName().toString(); - - if ( "".equals( _schema) ) - qname = _name; - else - qname = _schema + "." + _name; - } - - protected void addComment( ArrayList al) - { - String comm = comment(); - if ( null == comm ) - return; - al.add( "COMMENT ON TYPE " + qname + "\nIS " + - DDRWriter.eQuote( comm)); - } - } - - class MappedUDTImpl - extends AbstractUDTImpl - implements MappedUDT - { - public String[] structure() { return _structure; } - - String[] _structure; - - public void setStructure( Object o, boolean explicit, Element e) - { - if ( explicit ) - _structure = avToArray( o, String.class); - } - - MappedUDTImpl(TypeElement e) - { - super( e); - } - - public boolean characterize() - { - setQname(); - - _requires = augmentRequires( _requires, implementor()); - - return true; - } - - public String[] deployStrings() - { - ArrayList al = new ArrayList(); - if ( null != structure() ) - { - StringBuilder sb = new StringBuilder(); - sb.append( "CREATE TYPE ").append( qname).append( " AS ("); - int i = structure().length; - for ( String s : structure() ) - sb.append( "\n\t").append( s).append( - ( 0 < -- i ) ? ',' : '\n'); - sb.append( ')'); - al.add( sb.toString()); - } - al.add( "SELECT sqlj.add_type_mapping(" + - DDRWriter.eQuote( qname) + ", " + - DDRWriter.eQuote( tclass.toString()) + ')'); - addComment( al); - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - ArrayList al = new ArrayList(); - al.add( "SELECT sqlj.drop_type_mapping(" + - DDRWriter.eQuote( qname) + ')'); - if ( null != structure() ) - al.add( "DROP TYPE " + qname); - return al.toArray( new String [ al.size() ]); - } - } - - class BaseUDTImpl - extends AbstractUDTImpl - implements BaseUDT - { - public String typeModifierInput() { return _typeModifierInput; } - public String typeModifierOutput() { return _typeModifierOutput; } - public String analyze() { return _analyze; } - public int internalLength() { return _internalLength; } - public boolean passedByValue() { return _passedByValue; } - public Alignment alignment() { return _alignment; } - public Storage storage() { return _storage; } - public String like() { return _like; } - public char category() { return _category; } - public boolean preferred() { return _preferred; } - public String defaultValue() { return _defaultValue; } - public String element() { return _element; } - public char delimiter() { return _delimiter; } - public boolean collatable() { return _collatable; } - - BaseUDTFunctionImpl in, out, recv, send; - - public String _typeModifierInput; - public String _typeModifierOutput; - public String _analyze; - int _internalLength; - public Boolean _passedByValue; - Alignment _alignment; - Storage _storage; - public String _like; - char _category; - public Boolean _preferred; - String _defaultValue; - public String _element; - char _delimiter; - public Boolean _collatable; - - boolean lengthExplicit; - boolean alignmentExplicit; - boolean storageExplicit; - boolean categoryExplicit; - boolean delimiterExplicit; - - public void setInternalLength( Object o, boolean explicit, Element e) - { - _internalLength = (Integer)o; - lengthExplicit = explicit; - } - - public void setAlignment( Object o, boolean explicit, Element e) - { - _alignment = Alignment.valueOf( - ((VariableElement)o).getSimpleName().toString()); - alignmentExplicit = explicit; - } - - public void setStorage( Object o, boolean explicit, Element e) - { - _storage = Storage.valueOf( - ((VariableElement)o).getSimpleName().toString()); - categoryExplicit = explicit; - } - - public void setDefaultValue( Object o, boolean explicit, Element e) - { - if ( explicit ) - _defaultValue = (String)o; // "" could be a real default value - } - - public void setCategory( Object o, boolean explicit, Element e) - { - _category = (Character)o; - categoryExplicit = explicit; - } - - public void setDelimiter( Object o, boolean explicit, Element e) - { - _delimiter = (Character)o; - delimiterExplicit = explicit; - } - - BaseUDTImpl(TypeElement e) - { - super( e); - } - - void registerFunctions() - { - setQname(); - - ExecutableElement instanceReadSQL = huntFor( - methodsIn( tclass.getEnclosedElements()), "readSQL", false, - TY_VOID, TY_SQLINPUT, TY_STRING); - - ExecutableElement instanceWriteSQL = huntFor( - methodsIn( tclass.getEnclosedElements()), "writeSQL", false, - TY_VOID, TY_SQLOUTPUT); - - ExecutableElement instanceToString = huntFor( - methodsIn( tclass.getEnclosedElements()), "toString", false, - TY_STRING); - - ExecutableElement staticParse = huntFor( - methodsIn( tclass.getEnclosedElements()), "parse", true, - tclass.asType(), TY_STRING, TY_STRING); - - if ( null == staticParse ) - { - msg( Kind.ERROR, tclass, - "A pljava UDT must have a public static " + - "parse(String,String) method that returns the UDT"); - } - else - { - in = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.INPUT); - putSnippet( staticParse, in); - } - - out = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.OUTPUT); - putSnippet( null != instanceToString ? instanceToString : out, out); - - recv = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.RECEIVE); - putSnippet( null != instanceReadSQL ? instanceReadSQL : recv, recv); - - send = new BaseUDTFunctionImpl( - this, tclass, BaseUDTFunctionID.SEND); - putSnippet( null != instanceWriteSQL ? instanceWriteSQL : send, - send); - } - - public boolean characterize() - { - if ( "".equals( typeModifierInput()) - && ! "".equals( typeModifierOutput()) ) - msg( Kind.ERROR, tclass, - "UDT typeModifierOutput useless without typeModifierInput"); - - if ( 1 > internalLength() && -1 != internalLength() ) - msg( Kind.ERROR, tclass, - "UDT internalLength must be positive, or -1 for varying"); - - if ( passedByValue() && - ( 8 < internalLength() || -1 == internalLength() ) ) - msg( Kind.ERROR, tclass, - "Only a UDT of fixed length <= 8 can be passed by value"); - - if ( -1 == internalLength() && - -1 == alignment().compareTo( Alignment.INT4) ) - msg( Kind.ERROR, tclass, - "A variable-length UDT must have alignment at least INT4"); - - if ( -1 != internalLength() && Storage.PLAIN != storage() ) - msg( Kind.ERROR, tclass, - "Storage for a fixed-length UDT must be PLAIN"); - - // see PostgreSQL backend/commands/typecmds.c "must be simple ASCII" - if ( 32 > category() || category() > 126 ) - msg( Kind.ERROR, tclass, - "UDT category must be a printable ASCII character"); - - _requires = augmentRequires( _requires, implementor()); - - return true; - } - - public String[] deployStrings() - { - ArrayList al = new ArrayList(); - al.add( "CREATE TYPE " + qname); - - al.addAll( Arrays.asList( in.deployStrings())); - al.addAll( Arrays.asList( out.deployStrings())); - al.addAll( Arrays.asList( recv.deployStrings())); - al.addAll( Arrays.asList( send.deployStrings())); - - StringBuilder sb = new StringBuilder(); - sb.append( "CREATE TYPE ").append( qname).append( " (\n\t"); - in.appendTypeOp( sb).append( ",\n\t"); - out.appendTypeOp( sb).append( ",\n\t"); - recv.appendTypeOp( sb).append( ",\n\t"); - send.appendTypeOp( sb); - - if ( ! "".equals( typeModifierInput()) ) - sb.append( ",\n\tTYPMOD_IN = ").append( typeModifierInput()); - - if ( ! "".equals( typeModifierOutput()) ) - sb.append( ",\n\tTYPMOD_OUT = ").append( typeModifierOutput()); - - if ( ! "".equals( analyze()) ) - sb.append( ",\n\tANALYZE = ").append( typeModifierOutput()); - - if ( lengthExplicit || "".equals( like()) ) - sb.append( ",\n\tINTERNALLENGTH = ").append( - -1 == internalLength() ? "VARIABLE" - : String.valueOf( internalLength())); - - if ( passedByValue() ) - sb.append( ",\n\tPASSEDBYVALUE"); - - if ( alignmentExplicit || "".equals( like()) ) - sb.append( ",\n\tALIGNMENT = ").append( alignment().name()); - - if ( storageExplicit || "".equals( like()) ) - sb.append( ",\n\tSTORAGE = ").append( storage().name()); - - if ( ! "".equals( like()) ) - sb.append( ",\n\tLIKE = ").append( like()); - - if ( categoryExplicit ) - sb.append( ",\n\tCATEGORY = '").append( - DDRWriter.eQuote( String.valueOf( category()))); - - if ( preferred() ) - sb.append( ",\n\tPREFERRED = true"); - - if ( null != defaultValue() ) - sb.append( ",\n\tDEFAULT = ").append( - DDRWriter.eQuote( defaultValue())); - - if ( ! "".equals( element()) ) - sb.append( ",\n\tELEMENT = ").append( element()); - - if ( delimiterExplicit ) - sb.append( ",\n\tDELIMITER = '").append( - DDRWriter.eQuote( String.valueOf( delimiter()))); - - if ( collatable() ) - sb.append( ",\n\tCOLLATABLE = true"); - - al.add( sb.append( "\n)").toString()); - addComment( al); - return al.toArray( new String [ al.size() ]); - } - - public String[] undeployStrings() - { - return new String[] - { - "DROP TYPE " + qname + " CASCADE" - }; - } - } - - /** - * Provides the default mappings from Java types to SQL types. - */ - class TypeMapper - { - ArrayList, String>> protoMappings; - ArrayList> finalMappings; - - TypeMapper() - { - protoMappings = new ArrayList, String>>(); - - // Primitives - // - this.addMap(boolean.class, "boolean"); - this.addMap(Boolean.class, "boolean"); - this.addMap(byte.class, "smallint"); - this.addMap(Byte.class, "smallint"); - this.addMap(char.class, "smallint"); - this.addMap(Character.class, "smallint"); - this.addMap(double.class, "double precision"); - this.addMap(Double.class, "double precision"); - this.addMap(float.class, "real"); - this.addMap(Float.class, "real"); - this.addMap(int.class, "integer"); - this.addMap(Integer.class, "integer"); - this.addMap(long.class, "bigint"); - this.addMap(Long.class, "bigint"); - this.addMap(short.class, "smallint"); - this.addMap(Short.class, "smallint"); - - // Known common mappings - // - this.addMap(Number.class, "numeric"); - this.addMap(String.class, "varchar"); - this.addMap(java.util.Date.class, "timestamp"); - this.addMap(Timestamp.class, "timestamp"); - this.addMap(Time.class, "time"); - this.addMap(java.sql.Date.class, "date"); - this.addMap(BigInteger.class, "numeric"); - this.addMap(BigDecimal.class, "numeric"); - this.addMap(ResultSet.class, "record"); - this.addMap(Object.class, "\"any\""); - - this.addMap(byte[].class, "bytea"); - } - - /* - * What worked in Java 6 was to keep a list of Class -> sqltype - * mappings, and get TypeMirrors from the Classes at the time of trying - * to identify types (in the final, after-all-sources-processed round). - * Starting in Java 7, you get different TypeMirror instances in - * different rounds for the same types, so you can't match something - * seen in round 1 to something looked up in the final round. (However, - * you can match things seen in round 1 to things looked up prior to - * the first round, when init() is called and constructs the processor.) - * - * So, this method needs to be called at the end of round 1 (or at the - * end of every round, it just won't do anything but once), and at that - * point it will compute the list order and freeze a list of TypeMirrors - * to avoid looking up the Classes later and getting different - * mirrors. - * - * This should work as long as all the sources containg pljava - * annotations will be found in round 1. That would only not be the case - * if some other annotation processor is in use that could generate new - * sources with pljava annotations in them, requiring additional rounds. - * In the present state of things, that simply won't work. Java bug - * http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8038455 might - * cover this, and promises a fix in Java 9, but who knows? - */ - private void workAroundJava7Breakage() - { - if ( null != finalMappings ) - return; // after the first round, it's too late! - - // Need to check more specific types before those they are - // assignable to by widening reference conversions, so a - // topological sort is in order. - // - List, String>>> vs = - new ArrayList, String>>>( - protoMappings.size()); - - for ( Map.Entry, String> me : protoMappings ) - vs.add( new Vertex, String>>( me)); - - for ( int i = vs.size(); i --> 1; ) - { - Vertex, String>> vi = vs.get( i); - Class ci = vi.payload.getKey(); - for ( int j = i; j --> 0; ) - { - Vertex, String>> vj = vs.get( j); - Class cj = vj.payload.getKey(); - boolean oij = ci.isAssignableFrom( cj); - boolean oji = cj.isAssignableFrom( ci); - if ( oji == oij ) - continue; // no precedence constraint between these two - if ( oij ) - vj.precede( vi); - else - vi.precede( vj); - } - } - - Queue, String>>> q = - new LinkedList, String>>>(); - for ( Vertex, String>> v : vs ) - if ( 0 == v.indegree ) - q.add( v); - - finalMappings = new ArrayList>( - protoMappings.size()); - protoMappings.clear(); - - while ( ! q.isEmpty() ) - { - Vertex, String>> v = q.remove(); - v.use( q); - Class k = v.payload.getKey(); - TypeMirror ktm; - if ( k.isPrimitive() ) - { - TypeKind tk = - TypeKind.valueOf( k.getName().toUpperCase()); - ktm = typu.getPrimitiveType( tk); - } - else - { - TypeElement te = - elmu.getTypeElement( k.getName()); - if ( null == te ) // can't find it -> not used in code? - { - msg( Kind.WARNING, - "Found no TypeElement for %s", k.getName()); - continue; // hope it wasn't one we'll need! - } - ktm = te.asType(); - } - finalMappings.add( - new AbstractMap.SimpleImmutableEntry( - ktm, v.payload.getValue())); - } - } - - /** - * Add a custom mapping from a Java class to an SQL type. - * - * @param k Class representing the Java type - * @param v String representing the SQL type to be used - */ - void addMap(Class k, String v) - { - if ( null != finalMappings ) - { - msg( Kind.ERROR, - "addMap(%s, %s)\n" + - "called after workAroundJava7Breakage", k.getName(), v); - return; - } - protoMappings.add( - new AbstractMap.SimpleImmutableEntry, String>( k, v)); - } - - /** - * Return the SQL type for the Java type represented by a TypeMirror, - * from an explicit annotation if present, otherwise by applying the - * default mappings. No default-value information is included in the - * string returned. It is assumed that a function return is being typed - * rather than a function parameter. - * - * @param tm Represents the type whose corresponding SQL type is wanted. - * @param e Annotated element (chiefly for use as a location hint in - * diagnostic messages). - */ - String getSQLType(TypeMirror tm, Element e) - { - return getSQLType( tm, e, false, false); - } - - - /** - * Return the SQL type for the Java type represented by a TypeMirror, - * from an explicit annotation if present, otherwise by applying the - * default mappings. - * - * @param tm Represents the type whose corresponding SQL type is wanted. - * @param e Annotated element (chiefly for use as a location hint in - * diagnostic messages). - * @param contravariant Indicates that the element whose type is wanted - * is a function parameter and should be given the widest type that can - * be assigned to it. If false, find the narrowest type that a function - * return can be assigned to. - * @param withDefault Indicates whether any specified default value - * information should also be included in the "type" string returned. - */ - String getSQLType(TypeMirror tm, Element e, - boolean contravariant, boolean withDefault) - { - boolean array = false; - String rslt = null; - - String[] defaults = null; - - for ( AnnotationMirror am : elmu.getAllAnnotationMirrors( e) ) - { - if ( am.getAnnotationType().asElement().equals( AN_SQLTYPE) ) - { - SQLTypeImpl sti = new SQLTypeImpl(); - populateAnnotationImpl( sti, e, am); - rslt = sti.value(); - defaults = sti.defaultValue(); - } - } - - if ( tm.getKind().equals( TypeKind.ARRAY) ) - { - ArrayType at = ((ArrayType)tm); - if ( ! at.getComponentType().getKind().equals( TypeKind.BYTE) ) - { - array = true; - tm = at.getComponentType(); - // only for bytea[] should this ever still be an array - } - } - - if ( null != rslt ) - return typeWithDefault( rslt, array, defaults, withDefault); - - if ( tm.getKind().equals( TypeKind.VOID) ) - return "void"; // can't be a parameter type so no defaults apply - - if ( tm.getKind().equals( TypeKind.ERROR) ) - { - msg ( Kind.ERROR, e, - "Cannot determine mapping to SQL type for unresolved type"); - rslt = tm.toString(); - } - else - { - ArrayList> ms = finalMappings; - if ( contravariant ) - { - ms = (ArrayList>)ms.clone(); - Collections.reverse( ms); - } - for ( Map.Entry me : ms ) - { - TypeMirror ktm = me.getKey(); - if ( ktm instanceof PrimitiveType ) - { - if ( typu.isSameType( tm, ktm) ) - { - rslt = me.getValue(); - break; - } - } - else - { - boolean accept; - if ( contravariant ) - accept = typu.isAssignable( ktm, tm); - else - accept = typu.isAssignable( tm, ktm); - if ( accept ) - { - // don't compute a type of Object/"any" for - // a function return (just admit defeat instead) - if ( contravariant - || ! typu.isSameType( ktm, TY_OBJECT) ) - rslt = me.getValue(); - break; - } - } - } - } - - if ( null == rslt ) - { - msg( Kind.ERROR, e, - "No known mapping to an SQL type"); - rslt = tm.toString(); - } - - if ( array ) - rslt += "[]"; - - return typeWithDefault( rslt, array, defaults, withDefault); - } - - /** - * Given the matching SQL type already determined, return it with or - * without default-value information appended, as the caller desires. - * To ensure that the generated descriptor will be in proper form, the - * default values are emitted as properly-escaped string literals and - * then cast to the appropriate type. This approach will not work for - * defaults given as arbitrary SQL expressions, but covers the typical - * cases of simple literals and even anything that can be computed as - * a Java String constant expression (e.g. ""+Math.PI). - * - * @param rslt The bare SQL type string already determined - * @param array Whether the Java type was determined to be an array - * @param defaults Array (null if not present) of default value strings - * @param withDefault Whether to append the default information to the - * type. - */ - String typeWithDefault( - String rslt, boolean array, String[] defaults, boolean withDefault) - { - if ( null == defaults || ! withDefault ) - return rslt; - - int n = defaults.length; - if ( n != 1 ) - array = true; - else if ( ! array ) - array = arrayish.matcher( rslt).matches(); - - StringBuilder sb = new StringBuilder( rslt); - sb.append( " DEFAULT CAST("); - if ( array ) - sb.append( "ARRAY["); - if ( n != 1 ) - sb.append( "\n\t"); - for ( String s : defaults ) - { - sb.append( DDRWriter.eQuote( s)); - if ( 0 < -- n ) - sb.append( ",\n\t"); - } - if ( array ) - sb.append( ']'); - sb.append( " AS ").append( rslt).append( ')'); - return sb.toString(); - } - } - - // expression intended to match SQL types that are arrays - static final Pattern arrayish = - Pattern.compile( "(?si:(?:\\[\\s*\\d*\\s*\\]|ARRAY)\\s*)$"); - - /** - * Work around bizarre javac behavior that silently supplies an Error - * class in place of an attribute value for glaringly obvious source errors, - * instead of reporting them. - * @param av AnnotationValue to extract the value from - * @return The result of getValue unless {@code av} is an error placeholder - */ - static Object getValue( AnnotationValue av) - { - if ( "com.sun.tools.javac.code.Attribute.Error".equals( - av.getClass().getCanonicalName()) ) - throw new AnnotationValueException(); - return av.getValue(); - } -} - -/** - * Exception thrown when an expected annotation value is a compiler-internal - * Error class instead, which happens in some javac versions when the annotation - * value wasn't resolved because of a source error the compiler really should - * have reported. - */ -class AnnotationValueException extends RuntimeException { } - -/** - * A code snippet. May contain zero, one, or more complete SQL commands for - * each of deploying and undeploying. The commands contained in one Snippet - * will always be emitted in a fixed order. A collection of Snippets will be - * output in an order constrained by their provides and requires methods. - */ -interface Snippet -{ - /** - * An {@code } that will be used to wrap each command - * from this Snippet as an {@code }. If null, the - * commands will be emitted as plain {@code }s. - */ - public String implementor(); - /** - * Return an array of SQL commands (one complete command to a string) to - * be executed in order during deployment. - */ - public String[] deployStrings(); - /** - * Return an array of SQL commands (one complete command to a string) to - * be executed in order during undeployment. - */ - public String[] undeployStrings(); - /** - * Return an array of arbitrary labels considered "provided" by this - * Snippet. In generating the final order of the deployment descriptor file, - * this Snippet will come before any whose requires method returns any of - * the same labels. - */ - public String[] provides(); - /** - * Return an array of arbitrary labels considered "required" by this - * Snippet. In generating the final order of the deployment descriptor file, - * this Snippet will come after those whose provides method returns any of - * the same labels. - */ - public String[] requires(); - /** - * Method to be called on the final round, after all annotations' - * element/value pairs have been filled in, to compute any additional - * information derived from those values before deployStrings() or - * undeployStrings() can be called. - * @return true if this Snippet is standalone and should be scheduled and - * emitted based on provides/requires; false if something else will emit it. - */ - public boolean characterize(); -} - -interface Commentable -{ - public String comment(); - public void setComment( Object o, boolean explicit, Element e); - public String derivedComment( Element e); -} - -/** - * Vertex in a DAG, as used to put things in workable topological order - */ -class Vertex

    -{ - P payload; - int indegree; - List> adj; - - Vertex( P payload) - { - this.payload = payload; - indegree = 0; - adj = new ArrayList>(); - } - - void precede( Vertex

    v) - { - ++ v.indegree; - adj.add( v); - } - - void use( Collection> q) - { - for ( Vertex

    v : adj ) - if ( 0 == -- v.indegree ) - q.add( v); - } - - void use( Collection> q, Collection> vs) - { - for ( Vertex

    v : adj ) - if ( 0 == -- v.indegree ) - { - vs.remove( v); - q.add( v); - } - } -} diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java index 10b36ae9..1b6e014a 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/Lexicals.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,7 +11,23 @@ */ package org.postgresql.pljava.sqlgen; +import java.io.InvalidObjectException; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectStreamException; +import java.io.Serializable; + +import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.InputMismatchException; + +import static java.util.Objects.requireNonNull; + +import javax.annotation.processing.Messager; +import javax.tools.Diagnostic.Kind; /** * A few useful SQL lexical definitions supplied as {@link Pattern} objects. @@ -19,60 +35,167 @@ * The idea is not to go overboard and reimplement an SQL lexer, but to * capture in one place the rules for those bits of SQL snippets that are * likely to be human-supplied in annotations and need to be checked for - * correctness when emitted into deployment descriptors. For starters, that - * means regular (not quoted, not Unicode escaped) identifiers. + * correctness when emitted into deployment descriptors. Identifiers, for a + * start. * * Supplied in the API module so they are available to {@code javac} to * compile and generate DDR when the rest of PL/Java is not necessarily * present. Of course backend code such as {@code SQLDeploymentDescriptor} * can also refer to these. */ -public interface Lexicals { +public abstract class Lexicals +{ + private Lexicals() { } // do not instantiate + + static + { + /* + * Reject a Java version affected by JDK-8309515 bug. + */ + Boolean hasBug = null; + Pattern p1 = Pattern.compile("(?.)(?.)"); + Pattern p2 = Pattern.compile("(?.)(?.)"); + Matcher m = p1.matcher("xy"); + + if ( m.matches() && 0 == m.start("a") ) + { + m.usePattern(p2); + if ( m.matches() ) + { + switch ( m.start("a") ) + { + case 0: + hasBug = true; + break; + case 1: + hasBug = false; + break; + } + } + } + + if ( null == hasBug ) + throw new ExceptionInInitializerError( + "Unexpected result while testing for bug JDK-8309515"); + + if ( hasBug ) + throw new ExceptionInInitializerError( + "Java bug JDK-8309515 affects this version of Java. PL/Java " + + "requires a Java version earlier than 20 (when the bug first " + + "appears) or recent enough to have had the bug fixed."); + } /** Allowed as the first character of a regular identifier by ISO. */ - Pattern ISO_REGULAR_IDENTIFIER_START = Pattern.compile( + public static final Pattern ISO_REGULAR_IDENTIFIER_START = Pattern.compile( "[\\p{Lu}\\p{Ll}\\p{Lt}\\p{Lm}\\p{Lo}\\p{Nl}]" ); /** Allowed as any non-first character of a regular identifier by ISO. */ - Pattern ISO_REGULAR_IDENTIFIER_PART = Pattern.compile(String.format( + public static final Pattern ISO_REGULAR_IDENTIFIER_PART = + Pattern.compile(String.format( "[\\xb7\\p{Mn}\\p{Mc}\\p{Nd}\\p{Pc}\\p{Cf}%1$s]", ISO_REGULAR_IDENTIFIER_START.pattern() )); /** A complete regular identifier as allowed by ISO. */ - Pattern ISO_REGULAR_IDENTIFIER = Pattern.compile(String.format( - "%1$s%2$s*+", + public static final Pattern ISO_REGULAR_IDENTIFIER = + Pattern.compile(String.format( + "%1$s%2$s{0,127}+", ISO_REGULAR_IDENTIFIER_START.pattern(), ISO_REGULAR_IDENTIFIER_PART.pattern() )); /** A complete ISO regular identifier in a single capturing group. */ - Pattern ISO_REGULAR_IDENTIFIER_CAPTURING = Pattern.compile(String.format( + public static final Pattern ISO_REGULAR_IDENTIFIER_CAPTURING = + Pattern.compile(String.format( "(%1$s)", ISO_REGULAR_IDENTIFIER.pattern() )); + /** A complete delimited identifier as allowed by ISO. As it happens, this + * is also the form PostgreSQL uses for elements of a LIST_QUOTE-typed GUC. + */ + public static final Pattern ISO_DELIMITED_IDENTIFIER = Pattern.compile( + "\"(?:[^\"]|\"\"){1,128}+\"" + ); + + /** An ISO delimited identifier with a single capturing group that captures + * the content (which still needs to have "" replaced with " throughout). + * The capturing group is named {@code xd}. + */ + public static final Pattern ISO_DELIMITED_IDENTIFIER_CAPTURING = + Pattern.compile(String.format( + "\"(?(?:[^\"]|\"\"){1,128}+)\"" + )); + + /** The escape-specifier part of a Unicode delimited identifier or string. + * The escape character itself is in the capturing group named {@code uec}. + * The group can be absent, in which case \ should be used as the uec. + *

    + * What makes this implementable as a regular expression is that what + * precedes/follows {@code UESCAPE} is restricted to simple white space, + * not the more general {@code separator} (which can include nesting + * comments and therefore isn't a regular language). PostgreSQL enforces + * the same restriction, and a bit of language lawyering does confirm + * it's what ISO entails. ISO says "any {@code } may be followed by + * a {@code }", and enumerates the expansions of {@code }. + * While an entire {@code } or + * {@code } is a {@code }, the + * constituent pieces of one, like {@code UESCAPE} here, are not. + */ + public static final Pattern ISO_UNICODE_ESCAPE_SPECIFIER = + Pattern.compile( + "(?:\\p{IsWhite_Space}*+[Uu][Ee][Ss][Cc][Aa][Pp][Ee]"+ + "\\p{IsWhite_Space}*+'(?[^0-9A-Fa-f+'\"\\p{IsWhite_Space}])')?+" + ); + + /** A Unicode delimited identifier. The body is in capturing group + * {@code xui} and the escape character in group {@code uec}. The body + * still needs to have "" replaced with ", and {@code Unicode escape value}s + * decoded and replaced, and then it has to be verified to be no longer + * than 128 codepoints. + */ + public static final Pattern ISO_UNICODE_IDENTIFIER = + Pattern.compile(String.format( + "[Uu]&\"(?(?:[^\"]|\"\")++)\"%1$s", + ISO_UNICODE_ESCAPE_SPECIFIER.pattern() + )); + + /** A compilable pattern to match a {@code Unicode escape value}. + * A match should have one of three named capturing groups. If {@code cev}, + * substitute the {@code uec} itself. If {@code u4d} or {@code u6d}, + * substitute the codepoint represented by the hex digits. A match with none + * of those capturing groups indicates an ill-formed string. + *

    + * Maka a Pattern from this by supplying the right {@code uec}, so: + * {@code Pattern.compile(String.format(ISO_UNICODE_REPLACER, + * Pattern.quote(uec)));} + */ + public static final String ISO_UNICODE_REPLACER = + "%1$s(?:(?%1$s)|(?[0-9A-Fa-f]{4})|\\+(?[0-9A-Fa-f]{6}))"; + /** Allowed as the first character of a regular identifier by PostgreSQL * (PG 7.4 -). */ - Pattern PG_REGULAR_IDENTIFIER_START = Pattern.compile( + public static final Pattern PG_REGULAR_IDENTIFIER_START = Pattern.compile( "[A-Za-z\\P{ASCII}_]" // hasn't seen a change since PG 7.4 ); /** Allowed as any non-first character of a regular identifier by PostgreSQL * (PG 7.4 -). */ - Pattern PG_REGULAR_IDENTIFIER_PART = Pattern.compile(String.format( + public static final Pattern PG_REGULAR_IDENTIFIER_PART = + Pattern.compile(String.format( "[0-9$%1$s]", PG_REGULAR_IDENTIFIER_START.pattern() )); /** A complete regular identifier as allowed by PostgreSQL (PG 7.4 -). */ - Pattern PG_REGULAR_IDENTIFIER = Pattern.compile(String.format( + public static final Pattern PG_REGULAR_IDENTIFIER = + Pattern.compile(String.format( "%1$s%2$s*+", PG_REGULAR_IDENTIFIER_START.pattern(), PG_REGULAR_IDENTIFIER_PART.pattern() @@ -80,13 +203,15 @@ public interface Lexicals { /** A complete PostgreSQL regular identifier in a single capturing group. */ - Pattern PG_REGULAR_IDENTIFIER_CAPTURING = Pattern.compile(String.format( + public static final Pattern PG_REGULAR_IDENTIFIER_CAPTURING = + Pattern.compile(String.format( "(%1$s)", PG_REGULAR_IDENTIFIER.pattern() )); /** A regular identifier that satisfies both ISO and PostgreSQL rules. */ - Pattern ISO_AND_PG_REGULAR_IDENTIFIER = Pattern.compile(String.format( + public static final Pattern ISO_AND_PG_REGULAR_IDENTIFIER = + Pattern.compile(String.format( "(?:(?=%1$s)%2$s)(?:(?=%3$s)%4$s)*+", ISO_REGULAR_IDENTIFIER_START.pattern(), PG_REGULAR_IDENTIFIER_START.pattern(), @@ -95,12 +220,28 @@ public interface Lexicals { )); /** A regular identifier that satisfies both ISO and PostgreSQL rules, - * in a single capturing group. + * in a single capturing group named {@code i}. */ - Pattern ISO_AND_PG_REGULAR_IDENTIFIER_CAPTURING = Pattern.compile( - String.format( "(%1$s)", ISO_AND_PG_REGULAR_IDENTIFIER.pattern()) + public static final Pattern ISO_AND_PG_REGULAR_IDENTIFIER_CAPTURING = + Pattern.compile( + String.format( "(?%1$s)", ISO_AND_PG_REGULAR_IDENTIFIER.pattern()) ); + /** Pattern that matches any identifier valid by both ISO and PG rules, + * with the presence of named capturing groups indicating which kind it is: + * {@code i} for a regular identifier, {@code xd} for a delimited identifier + * (still needing "" replaced with "), or {@code xui} (with or without an + * explicit {@code uec} for a Unicode identifier (still needing "" to " and + * decoding of {@code Unicode escape value}s). + */ + public static final Pattern ISO_AND_PG_IDENTIFIER_CAPTURING = + Pattern.compile(String.format( + "%1$s|(?:%2$s)|(?:%3$s)", + ISO_AND_PG_REGULAR_IDENTIFIER_CAPTURING.pattern(), + ISO_DELIMITED_IDENTIFIER_CAPTURING.pattern(), + ISO_UNICODE_IDENTIFIER.pattern() + )); + /** An identifier by ISO SQL, PostgreSQL, and Java (not SQL at all) * rules. (Not called {@code REGULAR} because Java allows no other form of * identifier.) This restrictive form is the safest for identifiers being @@ -108,7 +249,8 @@ public interface Lexicals { * PL/Java might load, because through 1.4.3 PL/Java used the Java * identifier rules to recognize identifiers in deployment descriptors. */ - Pattern ISO_PG_JAVA_IDENTIFIER = Pattern.compile(String.format( + public static final Pattern ISO_PG_JAVA_IDENTIFIER = + Pattern.compile(String.format( "(?:(?=%1$s)(?=\\p{%5$sStart})%2$s)(?:(?=%3$s)(?=\\p{%5$sPart})%4$s)*+", ISO_REGULAR_IDENTIFIER_START.pattern(), PG_REGULAR_IDENTIFIER_START.pattern(), @@ -116,4 +258,1330 @@ public interface Lexicals { PG_REGULAR_IDENTIFIER_PART.pattern(), "javaJavaIdentifier" )); + + /** An operator by PostgreSQL rules. The length limit ({@code NAMELEN - 1}) + * is not applied here. The match will not include a {@code -} followed by + * {@code -} or a {@code /} followed by {@code *}, and a multicharacter + * match will not end with {@code +} or {@code -} unless it also contains + * one of {@code ~ ! @ # % ^ & | ` ?}. + */ + public static final Pattern PG_OPERATOR = + Pattern.compile( + "(?:(?!--|/\\*)(?![-+][+]*+(?:$|[^-+*/<>=~!@#%^&|`?]))[-+*/<>=])++" + + "(?:[~!@#%^&|`?](?:(?!--|/\\*)[-+*/<>=~!@#%^&|`?])*+)?+" + + "|" + + "[~!@#%^&|`?](?:(?!--|/\\*)[-+*/<>=~!@#%^&|`?])*+" + + "|" + + "(?!--)[-+]" + ); + + /** A newline, in any of the various forms recognized by the Java regex + * engine, letting it handle the details. + */ + public static final Pattern NEWLINE = Pattern.compile( + "(?ms:$.{1,2}?(?:^|\\z))" // fewest of 1,2 chars between $ and ^ (or \z) + ); + + /** White space except newline, for any Java-recognized newline. + */ + public static final Pattern WHITESPACE_NO_NEWLINE = Pattern.compile( + "(?-s:(?=\\s).)" + ); + + /** The kind of comment that extends from -- to the end of the line. + * This pattern does not eat the newline (though the ISO production does). + */ + public static final Pattern SIMPLE_COMMENT = Pattern.compile("(?-s:--.*+)"); + + /** Most of the inside of a bracketed comment, defined in an odd way. + * It expects both characters of the /* introducer to have been consumed + * already. This pattern will then eat the whole comment including both + * closing characters if it encounters no nested comment; + * otherwise it will consume everything including the / of the nested + * introducer, but leaving the *, and the {@code } capturing group + * will be present in the result. That signals the caller to increment the + * nesting level, consume one * and invoke this pattern again. If the nested + * match succeeds (without again setting the {@code } group), the + * caller should then decrement the nest level and match this pattern again + * to consume the rest of the comment at the original level. + *

    + * This pattern leaves the * unconsumed upon finding a nested comment + * introducer as a way to end the repetition in the SEPARATOR pattern, as + * nothing the SEPARATOR pattern can match can begin with a *. + */ + public static final Pattern BRACKETED_COMMENT_INSIDE = Pattern.compile( + "(?:(?:[^*/]++|/(?!\\*)|\\*(?!/))*+(?:\\*/|(?/(?=\\*))))" + ); + + /** SQL's SEPARATOR, which can include any amount of whitespace, simple + * comments, or bracketed comments. This pattern will consume as much of all + * that as it can in one match. There are two capturing groups that might be + * set in a match result: {@code } if there was at least one newline + * matched among the whitespace (which needs to be known to get the + * continuation of string literals right), and {@code } if the + * start of a bracketed comment was encountered. + *

    + * In the {@code } case, the / of the comment introducer will have + * been consumed but the * will remain to consume (as described above + * for BRACKETED_COMMENT_INSIDE); the caller will need to increment a nest + * level, consume the *, and match BRACKETED_COMMENT_INSIDE to handle the + * nesting comment. Assuming that completes without another {@code } + * found, the level should be decremented and BRACKETED_COMMENT_INSIDE + * matched again to match the rest of the outer comment. When that completes + * (without a {@code }) at the outermost level, this pattern should be + * matched again to mop up any remaining SEPARATOR content. + */ + public static final Pattern SEPARATOR = + Pattern.compile(String.format( + "(?:(?:%1$s++|(?%2$s))++|%3$s|(?/(?=\\*)))++", + WHITESPACE_NO_NEWLINE.pattern(), + NEWLINE.pattern(), + SIMPLE_COMMENT.pattern() + )); + + /** + * Consume any SQL SEPARATOR at the beginning of {@code Matcher} + * m's current region. + *

    + * The region start is advanced to the character following any separator + * (or not at all, if no separator is found). + *

    + * The meaning of the return value is altered by the significant + * parameter: when significant is true (meaning the very presence + * or absence of a separator is significant at that point in the grammar), + * the result will be true if any separator was found, false otherwise. + * When significant is false, the result does not reveal whether + * any separator was found, but will be true only if a separator was found + * that includes at least one newline. That information is needed for the + * grammar of string and binary-string literals. + * @param m a {@code Matcher} whose current region should have any separator + * at the beginning consumed. The region start is advanced past any + * separator found. The {@code Pattern} associated with the {@code Matcher} + * may be changed. + * @param significant when true, the result should report whether any + * separator was found or not; when false, the result should report only + * whether a separator containing at least one newline was found, or not. + * @return whether any separator was found, or whether any separator + * containing a newline was found, as selected by significant. + * @throws InputMismatchException if an unclosed /*-style comment is found. + */ + public static boolean separator(Matcher m, boolean significant) + { + int state = 0; + int level = 0; + boolean result = false; + + loop: + for ( ;; ) + { + switch ( state ) + { + case 0: + m.usePattern(SEPARATOR); + if ( ! m.lookingAt() ) + return result; // leave matcher region alone + if ( significant || -1 != m.start("nl") ) + result = true; + if ( -1 != m.start("nest") ) + { + m.region(m.end(0) + 1, m.regionEnd()); // + 1 to eat the * + m.usePattern(BRACKETED_COMMENT_INSIDE); + ++ level; + state = 1; + continue; + } + state = 2; // advance matcher region, then break loop + break; + case 1: + if ( ! m.lookingAt() ) + throw new InputMismatchException("unclosed comment"); + if ( -1 != m.start("nest") ) + { + m.region(m.end(0) + 1, m.regionEnd()); // + 1 to eat the * + ++ level; + continue; + } + else if ( 0 == -- level ) + state = 0; + break; + case 2: + break loop; + } + m.region(m.end(0), m.regionEnd()); // advance past matched portion + } + return result; + } + + /** + * Return an Identifier.Simple, given a {@code Matcher} that has matched an + * ISO_AND_PG_IDENTIFIER_CAPTURING. Will determine from the matching named + * groups which type of identifier it was, process the matched sequence + * appropriately, and return it. + * @param m A {@code Matcher} known to have matched an identifier. + * @return Identifier.Simple made from the recovered string. + */ + public static Identifier.Simple identifierFrom(Matcher m) + { + String s = m.group("i"); + if ( null != s ) + return Identifier.Simple.from(s, false); + s = m.group("xd"); + if ( null != s ) + return Identifier.Simple.from(s.replace("\"\"", "\""), true); + s = m.group("xui"); + if ( null == s ) + return null; // XXX? + s = s.replace("\"\"", "\""); + String uec = m.group("uec"); + if ( null == uec ) + uec = "\\"; + int uecp = uec.codePointAt(0); + Matcher replacer = + Pattern.compile( + String.format(ISO_UNICODE_REPLACER, Pattern.quote(uec))) + .matcher(s); + StringBuffer sb = new StringBuffer(); + while ( replacer.find() ) + { + replacer.appendReplacement(sb, ""); + int cp; + String uev = replacer.group("u4d"); + if ( null == uev ) + uev = replacer.group("u6d"); + if ( null != uev ) + cp = Integer.parseInt(uev, 16); + else + cp = uecp; + // XXX check validity + sb.appendCodePoint(cp); + } + return Identifier.Simple.from(replacer.appendTail(sb).toString(), true); + } + + /** + * Class representing a SQL identifier. These have wild and wooly behavior + * depending on whether they were represented in the source in quoted form + * or not. Quoted ones are case-sensitive, + * and {@link #equals(Object) equals} will only recognize exact matches. + * Non-quoted ones match case-insensitively; just to make this interesting, + * ISO SQL has one set of case-folding rules, while PostgreSQL has another. + * Also, a non-quoted identifier can match a quoted one, if the quoted one's + * exact spelling matches the non-quoted one's case-folded form. + *

    + * For even more fun, the PostgreSQL rules depend on the server encoding. + * For any multibyte encoding, only the 26 ASCII uppercase letters + * are folded to lower, leaving all other characters alone. In single-byte + * encodings, more letters can be touched. But this code has to run in a + * javac annotation processor without knowledge of any particular database's + * server encoding. The recommended encoding, UTF-8, is multibyte, so the + * PostgreSQL rule will be taken to be: only the 26 ASCII letters, always. + */ + public static abstract class Identifier implements Serializable + { + private static final long serialVersionUID = 1L; + + Identifier() { } // not API + + /** + * This Identifier represented as it would be in SQL source. + *

    + * The passed {@code Charset} indicates the character encoding + * in which the deparsed result will be stored; the method should verify + * that the characters can be encoded there, or use the Unicode + * delimited identifier form and escape the ones that cannot. + * @return The identifier, quoted, unless it is folding. + */ + public abstract String deparse(Charset cs); + + /** + * Equality test with the case-sensitivity rules of SQL. + * @param other Object to compare to + * @return true if two quoted Identifiers match exactly, or two + * non-quoted ones match in either the PostgreSQL or ISO SQL folded + * form, or a quoted one exactly matches either folded form of a + * non-quoted one. + */ + @Override + public boolean equals(Object other) + { + return equals(other, null); + } + + /** + * For use in an annotation processor, a version of {@code equals} that + * can take a {@link Messager} and use it to emit warnings. It will + * emit a warning whenever it compares two Identifiers that are equal + * by one or the other of PostgreSQL's or ISO SQL's rules but not both. + * @param other Object to compare to + * @param msgr a Messager to use for warnings; if {@code null}, no + * warnings will be generated. + * @return true if two quoted Identifiers match exactly, or two + * non-quoted ones match in either the PostgreSQL or ISO SQL folded + * form, or a quoted one exactly matches either folded form of a + * non-quoted one. + */ + public abstract boolean equals(Object other, Messager msgr); + + /** + * Convert to {@code String} as by {@code deparse} passing a character + * set of {@code UTF_8}. + */ + @Override + public String toString() + { + return deparse(UTF_8); + } + + /** + * Ensure deserialization doesn't produce any unknown {@code Identifier} + * subclass. + *

    + * The natural hierarchy means not everything can be made {@code final}. + */ + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + Class c = getClass(); + if ( c != Simple.class && c != Foldable.class && c != Folding.class + && c != Pseudo.class && c != Operator.class + && c != Qualified.class ) + throw new InvalidObjectException( + "deserializing unknown Identifier subclass: " + + c.getName()); + } + + /** + * Class representing a non-schema-qualified identifier, either the + * {@link Simple Simple} form used for naming most things, or the + * {@link Operator Operator} form specific to PostgreSQL operators. + */ + public static abstract class Unqualified> + extends Identifier + { + private static final long serialVersionUID = -6580227110716782079L; + + Unqualified() { } // not API + + /** + * Produce the deparsed form of a qualified identifier with the + * given qualifier and this as the local part. + * @throws NullPointerException if qualifier is null + */ + public abstract String deparse(Simple qualifier, Charset cs); + + /** + * Form an {@code Identifier.Qualified} with this as the local part. + */ + public abstract Qualified withQualifier(Simple qualifier); + } + + /** + * Class representing an unqualified identifier in the form of a name + * (whether a case-insensitive "regular identifier" without quotes, + * or a delimited form). + */ + public static class Simple extends Unqualified + { + private static final long serialVersionUID = 8571819710429273206L; + + protected final String m_nonFolded; + + /** + * Create an {@code Identifier.Simple} given its original, + * non-folded spelling, and whether it represents a quoted + * identifier. + * @param s The exact, internal, non-folded spelling of the + * identifier (unwrapped from any quoting in its external form). + * @param quoted Pass {@code true} if this was parsed from any + * quoted external form, false if non-quoted. + * @return A corresponding Identifier.Simple + * @throws IllegalArgumentException if {@code quoted} is + * {@code false} but {@code s} cannot be a non-quoted identifier, + * or {@code s} is empty or longer than the ISO SQL maximum 128 + * codepoints. + */ + public static Simple from(String s, boolean quoted) + { + boolean foldable = + ISO_AND_PG_REGULAR_IDENTIFIER.matcher(s).matches(); + if ( ! quoted ) + { + if ( ! foldable ) + throw new IllegalArgumentException(String.format( + "impossible for \"%1$s\" to be" + + " a non-quoted identifier", s)); + return new Folding(s); + } + if ( foldable ) + return new Foldable(s); + return new Simple(s); + } + + /** + * Concatenates one or more strings or identifiers to the end of + * this identifier. + *

    + * The arguments may be instances of {@code Simple} or of + * {@code CharSequence}, in any combination. + *

    + * The resulting identifier folds if this identifier and all + * identifier arguments fold and the concatenation (with all + * {@code Simple} and {@code CharSequence} components included) + * still matches the {@code ISO_AND_PG_REGULAR_IDENTIFIER} pattern. + */ + public Simple concat(Object... more) + { + boolean foldable = folds(); + StringBuilder s = new StringBuilder(nonFolded()); + + for ( Object o : more ) + { + if ( o instanceof Simple ) + { + Simple si = (Simple)o; + foldable = foldable && si.folds(); + s.append(si.nonFolded()); + } + else if ( o instanceof CharSequence ) + { + CharSequence cs = (CharSequence)o; + s.append(cs); + } + else + throw new IllegalArgumentException( + "arguments to Identifier.Simple.concat() must be " + + "Identifier.Simple or CharSequence"); + } + + if ( foldable ) + foldable=ISO_AND_PG_REGULAR_IDENTIFIER.matcher(s).matches(); + + return from(s.toString(), ! foldable); + } + + /** + * Create an {@code Identifier.Simple} from a name string found in + * a PostgreSQL system catalog. + *

    + * There is not an explicit indication in the catalog of whether the + * name was originally quoted. It must have been, however, if it + * does not have the form of a regular identifier, or if it has that + * form but does not match its pgFold-ed form (without quotes, PG + * would have folded it in that case). + * @param s name of the simple identifier, as found in a system + * catalog. + * @return an Identifier.Simple or subclass appropriate to the form + * of the name. + */ + public static Simple fromCatalog(String s) + { + if ( PG_REGULAR_IDENTIFIER.matcher(s).matches() ) + { + if ( s.equals(Folding.pgFold(s)) ) + return new Folding(s); + /* + * Having just determined it does not match its pgFolded + * form, there is no point returning it as a Foldable; there + * is no chance PG will see it as a match to a folded one. + */ + } + return new Simple(s); + } + + /** + * Create an {@code Identifier.Simple} from a name string supplied + * in Java source, such as an annotation value. + *

    + * Equivalent to {@code fromJava(s, null)}. + */ + public static Simple fromJava(String s) + { + return fromJava(s, null); + } + + /** + * Create an {@code Identifier.Simple} from a name string supplied + * in Java source, such as an annotation value. + *

    + * Historically, PL/Java has treated these identifiers as regular + * ones, requiring delimited ones to be represented by adding quotes + * explicitly at start and end, and doubling internal quotes, all + * escaped for Java, naturally. This method accepts either of those + * forms, and will also accept a string that neither qualifies as a + * regular identifier nor starts and ends with quotes. Such a string + * will be treated as if it were a delimited identifier with the + * start/end quotes already stripped and internal ones already + * undoubled. + *

    + * The SQL Unicode escape syntax is not accepted here. Java already + * has its own Unicode escape syntax, which is what should be used. + * @param s name of the simple identifier, as found in Java source. + * @param msgr a Messager for reporting diagnostics at compile time, + * or null if not in a compilation context. + * @return an Identifier.Simple or subclass appropriate to the form + * of the name. + */ + public static Simple fromJava(String s, Messager msgr) + { + Matcher m = ISO_DELIMITED_IDENTIFIER_CAPTURING.matcher(s); + boolean warn = false; + + if ( m.find() ) + { + if ( 0 == m.start() && s.length() == m.end() ) + s = m.group("xd").replace("\"\"", "\""); + else + warn = true; + } + else if ( m.usePattern(PG_REGULAR_IDENTIFIER).matches() ) + return new Folding(s); + + Simple rslt = from(s, true); + + if ( warn && null != msgr ) + msgr.printMessage(Kind.WARNING, + "identifier input as [" + s + + "] interpreted as [" + rslt + ']'); + + return rslt; + } + + @Override + public Qualified withQualifier(Simple qualifier) + { + return new Qualified<>(qualifier, this); + } + + @Override + public String deparse(Charset cs) + { + if ( ! cs.contains(UTF_8) + && ! cs.newEncoder().canEncode(m_nonFolded) ) + throw noUnicodeQuotingYet(m_nonFolded); + return '"' + m_nonFolded.replace("\"", "\"\"") + '"'; + } + + @Override + public String deparse(Simple qualifier, Charset cs) + { + return qualifier.deparse(cs) + "." + deparse(cs); + } + + /** + * Whether this Identifier case-folds. + * @return true if this Identifier was non-quoted in the source, + * false if it was quoted. + */ + public boolean folds() + { + return false; + } + + /** + * This Identifier's original spelling. + * @return The spelling as seen in the source, with no case folding. + */ + public String nonFolded() + { + return m_nonFolded; + } + + /** + * This Identifier as PostgreSQL would case-fold it (or the same as + * nonFolded if this was quoted and does not fold). + * @return The spelling with ASCII letters (only) folded to + * lowercase, if this Identifier folds. + */ + public String pgFolded() + { + return m_nonFolded; + } + + /** + * This Identifier as ISO SQL would case-fold it (or the same as + * nonFolded if this was quoted and does not fold). + * @return The spelling with lowercase and titlecase letters folded + * to (possibly length-changing) uppercase equivalents, if this + * Identifier folds. + */ + public String isoFolded() + { + return m_nonFolded; + } + + /** + * For a quoted identifier that could not match any non-quoted one, + * the hash code of its non-folded spelling is good enough. In other + * cases, the code must be derived more carefully. + */ + @Override + public int hashCode() + { + return m_nonFolded.hashCode(); + } + + @Override + public boolean equals(Object other, Messager msgr) + { + if ( this == other ) + return true; + if ( other instanceof Pseudo ) + return false; + if ( ! (other instanceof Simple) ) + return false; + Simple oi = (Simple)other; + if ( oi.folds() ) + return oi.equals(this); + return m_nonFolded.equals(oi.nonFolded()); + } + + /** + * Case-fold a string by the PostgreSQL rules (assuming a + * multibyte server encoding, where only the 26 uppercase ASCII + * letters fold to lowercase). + * @param s The non-folded value. + * @return The folded value. + */ + public static String pgFold(String s) + { + Matcher m = s_pgFolded.matcher(s); + StringBuffer sb = new StringBuffer(); + while ( m.find() ) + m.appendReplacement(sb, m.group().toLowerCase()); + return m.appendTail(sb).toString(); + } + + /** + * The characters that PostgreSQL rules will fold: only the 26 + * uppercase ASCII letters. + */ + private static final Pattern s_pgFolded = Pattern.compile("[A-Z]"); + + private Simple(String nonFolded) + { + String diag = checkLength(nonFolded); + if ( null != diag ) + throw new IllegalArgumentException(diag); + m_nonFolded = nonFolded; + } + + private static String checkLength(String s) + { + int cpc = s.codePointCount(0, s.length()); + if ( 0 < cpc && cpc <= 128 ) + return null; /* check has passed */ + return String.format( + "identifier empty or longer than 128 codepoints: \"%s\"", + s); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + String diag = checkLength(m_nonFolded); + if ( null != diag ) + throw new InvalidObjectException(diag); + if ( ! ( this instanceof Foldable ) + && ! ( this instanceof Pseudo ) + && ISO_AND_PG_REGULAR_IDENTIFIER.matcher(m_nonFolded) + .matches() ) + throw new InvalidObjectException( + "foldable identifier deserialized as not foldable: " + + m_nonFolded); + } + } + + /** + * Class representing an Identifier that was quoted, therefore does + * not case-fold, but satisfies {@code ISO_AND_PG_REGULAR_IDENTIFIER} + * and so could conceivably be matched by a non-quoted identifier. + */ + static class Foldable extends Simple + { + private static final long serialVersionUID = 108336518899180185L; + + private transient /*otherwise final*/ int m_hashCode; + + private Foldable(String nonFolded) + { + this(nonFolded, isoFold(nonFolded)); + } + + private Foldable(String nonFolded, String isoFolded) + { + super(nonFolded); + m_hashCode = isoFolded.hashCode(); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + if ( ! PG_REGULAR_IDENTIFIER.matcher(m_nonFolded).matches() ) + throw new InvalidObjectException( + "cannot be an SQL regular identifier: " + m_nonFolded); + m_hashCode = isoFold(m_nonFolded).hashCode(); + } + + /** + * For any identifier that case-folds, or even could be matched by + * another identifier that case-folds, the hash code is tricky. + * Hash codes are required to be equal for any instances that are + * equal (but not required to be different for instances that are + * unequal). In this case, the hash codes need to be equal whenever + * the PostgreSQL or ISO SQL folded forms match. + *

    + * This hash code will be derived from the ISO-folded spelling of + * the identifier. As long as the PostgreSQL rules only affect the + * 26 ASCII letters, all of which are also folded (albeit in the + * other direction) by the ISO rules, hash codes will also match for + * identifiers equal under PostgreSQL rules. + */ + @Override + public int hashCode() + { + return m_hashCode; + } + + /** + * The characters that ISO SQL rules will fold: anything that is + * lowercase or titlecase. + */ + private static final Pattern s_isoFolded = + Pattern.compile("[\\p{javaLowerCase}\\p{javaTitleCase}]"); + + /** + * Case-fold a string by the ISO SQL rules, where any lowercase or + * titlecase character gets replaced by its uppercase form (the + * generalized, possibly length-changing one, requiring + * {@link String#toUpperCase} and not + * {@link Character#toUpperCase}. + * @param s The non-folded value. + * @return The folded value. + */ + protected static String isoFold(String s) + { + Matcher m = s_isoFolded.matcher(s); + StringBuffer sb = new StringBuffer(); + while ( m.find() ) + m.appendReplacement(sb, m.group().toUpperCase()); + return m.appendTail(sb).toString(); + } + } + + /** + * Class representing an Identifier that was not quoted, and therefore + * has case-folded forms. + */ + static class Folding extends Foldable + { + private static final long serialVersionUID = -1222773531891296743L; + + private transient /*otherwise final*/ String m_pgFolded; + private transient /*otherwise final*/ String m_isoFolded; + + private Folding(String nonFolded) + { + this(nonFolded, isoFold(nonFolded)); + } + + private Folding(String nonFolded, String isoFolded) + { + super(nonFolded, isoFolded); + m_pgFolded = pgFold(nonFolded); + m_isoFolded = isoFolded; + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + m_pgFolded = pgFold(m_nonFolded); + m_isoFolded = isoFold(m_nonFolded); + } + + @Override + public String pgFolded() + { + return m_pgFolded; + } + + @Override + public String isoFolded() + { + return m_isoFolded; + } + + @Override + public boolean folds() + { + return true; + } + + @Override + public String deparse(Charset cs) + { + if ( ! cs.contains(UTF_8) + && ! cs.newEncoder().canEncode(m_nonFolded) ) + throw noUnicodeQuotingYet(m_nonFolded); + return m_nonFolded; + } + + @Override + public boolean equals(Object other, Messager msgr) + { + if ( this == other ) + return true; + if ( other instanceof Pseudo ) + return false; + if ( ! (other instanceof Simple) ) + return false; + Simple oi = (Simple)other; + boolean eqPG = m_pgFolded.equals(oi.pgFolded()); + boolean eqISO = m_isoFolded.equals(oi.isoFolded()); + if ( eqPG != eqISO && oi.folds() && null != msgr ) + { + msgr.printMessage(Kind.WARNING, String.format( + "identifiers \"%1$s\" and \"%2$s\" are equal by ISO " + + "or PostgreSQL case-insensitivity rules but not both", + m_nonFolded, oi.nonFolded())); + } + return eqPG || eqISO; + } + } + + /** + * Displays/deparses like a {@code Simple} identifier, but no singleton + * of this class matches anything but itself, to represent + * pseudo-identifiers like {@code PUBLIC} as a privilege grantee. + */ + public static final class Pseudo extends Simple + { + private static final long serialVersionUID = 4760344682650087583L; + + /** + * Instance intended to represent {@code PUBLIC} when used as a + * privilege grantee. + *

    + * It would not be correct to use this instance for other special + * things that happen to be named {@code PUBLIC}, such as the + * {@code PUBLIC} schema. That is a real catalog object that has + * the actual name {@code PUBLIC}, and should be represented as a + * {@code Simple} with that name. + *

    + * Note: through PG 14 at least, the database itself does not treat + * the public grantee in the way anticipated here; it is, instead, + * treated as an ordinary folding name "public" and forbidden as the + * name of any role. Therefore, a model of grantee roles would not + * need this symbol after all, but the definition will remain here + * illustrating the concept. + */ + public static final Pseudo PUBLIC = new Pseudo("PUBLIC"); + + /** + * A {@code Pseudo} identifier instance is only equal + * to itself. + */ + @Override + public boolean equals(Object other) + { + return this == other; + } + + private Pseudo(String name) + { + super(name); + } + + private Object readResolve() throws ObjectStreamException + { + switch ( m_nonFolded ) + { + case "PUBLIC": return PUBLIC; + default: + throw new InvalidObjectException( + "not a known Pseudo-identifier: " + m_nonFolded); + } + } + } + + /** + * Class representing an Identifier that names a PostgreSQL operator. + */ + public static class Operator extends Unqualified + { + private static final long serialVersionUID = -7230613628520513783L; + + private final String m_name; + + private Operator(String name) + { + m_name = name; + } + + /** + * Create an {@code Identifier.Operator} from a name string. + *

    + * Equivalent to {@code from(s, null)}. + */ + public static Operator from(String name) + { + return from(name, null); + } + + /** + * Create an {@code Identifier.Operator} from a name string. + *

    + * There are not different ways to represent an operator in Java + * source and in the PostgreSQL catalogs, so there do not need to be + * {@code fromCatalog} and {@code fromJava} flavors of this method. + *

    + * @param name The operator name. + * @param msgr a Messager for reporting diagnostics at compile time, + * or null if not in a compilation context. + */ + public static Operator from(String name, Messager msgr) + { + requireNonNull(name); + String diag = checkMatch(name); + + if ( null != diag ) + { + if ( null == msgr ) + throw new IllegalArgumentException(diag); + msgr.printMessage(Kind.ERROR, diag); + } + + /* + * It would be considerate to check the length here, but that + * would require knowing the server encoding, because the length + * limit in PostgreSQL is NAMELEN - 1 encoded octets (or + * possibly fewer, if the character that overflows encodes to + * more than one octet). In the SQL generator, that would + * require an argument to supply the assumed PG server encoding + * being compiled for, and passing it here. Too much work. + */ + return new Operator(name); + } + + private static String checkMatch(String name) + { + if ( PG_OPERATOR.matcher(name).matches() ) + return null; /* the check has passed */ + return String.format( + "not a valid PostgreSQL operator name: %s", name); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + String diag = checkMatch(m_name); + if ( null != diag ) + throw new InvalidObjectException(diag); + } + + @Override + public Qualified withQualifier(Simple qualifier) + { + return new Qualified<>(qualifier, this); + } + + /** + * Returns a hash code value for the object. + */ + @Override + public int hashCode() + { + return m_name.hashCode(); + } + + @Override + public boolean equals(Object other, Messager msgr) + { + if ( this == other ) + return true; + if ( ! (other instanceof Operator) ) + return false; + return m_name.equals(((Operator)other).m_name); + } + + @Override + public String deparse(Charset cs) + { + /* + * Operator characters are limited to ASCII. Don't bother + * checking that cs can encode m_name. + */ + return m_name; + } + + @Override + public String deparse(Simple qualifier, Charset cs) + { + return "OPERATOR(" + + qualifier.deparse(cs) + "." + deparse(cs) + ")"; + } + } + + /** + * Class representing a schema-qualified identifier. + * This is distinct from an Identifier.Unqualified even when it has no + * qualifier (and would therefore deparse the same way). + */ + public static class Qualified> + extends Identifier + { + private static final long serialVersionUID = 4834510180698247396L; + + private final Simple m_qualifier; + private final T m_local; + + /** + * Create an {@code Identifier.Qualified} from name strings found in + * PostgreSQL system catalogs. + *

    + * There is not an explicit indication in the catalog of whether a + * name was originally quoted. It must have been, however, if it + * does not have the form of a regular identifier, or if it has that + * form but does not match its pgFold-ed form (without quotes, PG + * would have folded it in that case). + * @param qualifier string with the name of a schema, as found in + * the pg_namespace system catalog. + * @param local string with the local name of an object in that + * schema. + * @return an Identifier.Qualified + * @throws NullPointerException if the local name is null. + */ + public static Qualified nameFromCatalog( + String qualifier, String local) + { + Simple localId = Simple.fromCatalog(local); + Simple qualId = ( null == qualifier ) ? + null : Simple.fromCatalog(qualifier); + return localId.withQualifier(qualId); + } + + /** + * Create an {@code Identifier.Qualified} representing an operator + * from name strings found in PostgreSQL system catalogs. + * @param qualifier string with the name of a schema, as found in + * the pg_namespace system catalog. + * @param local string with the local name of an object in that + * schema. + * @return an Identifier.Qualified + * @throws NullPointerException if the local name is null. + */ + public static Qualified operatorFromCatalog( + String qualifier, String local) + { + Operator localId = Operator.from(local); + Simple qualId = ( null == qualifier ) ? + null : Simple.fromCatalog(qualifier); + return localId.withQualifier(qualId); + } + + + /** + * Create an {@code Identifier.Qualified} from a name + * string supplied in Java source, such as an annotation value. + *

    + * Equivalent to {@code nameFromJava(s, null)}. + */ + public static Qualified nameFromJava(String s) + { + return nameFromJava(s, null); + } + + /** + * Create an {@code Identifier.Qualified} from a name + * string supplied in Java source, such as an annotation value. + *

    + * Explicit delimited-identifier syntax is recognized if it spans + * the entire string (producing a local name and null qualifier), + * or from the beginning to a dot (representing the qualifier), or + * from a dot to the end (representing the local name). If both the + * qualifier and local name are given in the delimited syntax, they + * define the result. + *

    + * Otherwise, if either component is given in the delimited syntax + * as above, the other component is taken from the rest of the + * string on the other side of the dot, as a folding regular + * identifier if it is one, otherwise as an implicitly quoted one. + *

    + * Any subsequence that resembles delimited syntax but does not + * appear where it is recognized as above will be treated as literal + * content (so, its quotes will be doubled when deparsed, etc.), and + * produce a compiler warning if called in a compilation context. + *

    + * If neither component is given in delimited syntax, the string + * must contain at most one dot. If it contains none, it is a local + * name with null qualifier, again treated as a regular identifier + * if it is one, or an implicitly quoted one. If there is one + * dot, the substrings that precede and follow it are the qualifier + * and the local name, treated the same way. It is an error if there + * is more than one dot. + *

    + * The SQL Unicode escape syntax is not accepted here. Java already + * has its own Unicode escape syntax, which is what should be used. + * @param s the qualified identifier, as found in Java source. + * @param msgr a Messager for reporting diagnostics at compile time, + * or null if not in a compilation context. + * @return the Identifier.Qualified<Simple> + */ + @SuppressWarnings("fallthrough") + public static Qualified nameFromJava( + String s, Messager msgr) + { + String qualifier = null; + String localName = null; + + /* + * Find out if delimited-identifier-resembling syntax appears + * anywhere in s. Save the first (and last, if more than one). + */ + Matcher m = ISO_DELIMITED_IDENTIFIER.matcher(s); + int startFirst = -1, endFirst = -1; + int startLast = -1, endLast = -1; + int matched = 0; + + if ( m.find() ) + { + matched = 1; + startFirst = m.start(); + endFirst = m.end(); + while ( m.find() ) + { + matched = 2; + startLast = m.start(); + endLast = m.end(); + } + } + + switch ( matched ) + { + case 2: + if ( s.length() == endLast && '.' == s.charAt(startLast-1) ) + { + localName = s.substring(startLast); + if ( 0 == startFirst && 2 + endFirst == startLast ) + { + qualifier = s.substring(startFirst, endFirst); + break; + } + qualifier = s.substring(0, startLast - 1); + break; + } + /* FALLTHROUGH */ + case 1: + if ( 0 == startFirst ) + { + if ( s.length() == endFirst ) + { + localName = s; + break; + } + if ( '.' == s.charAt(endFirst) ) + { + qualifier = s.substring(0, endFirst); + localName = s.substring(endFirst + 1); + break; + } + } + else if ( '.' == s.charAt(startFirst - 1) + && s.length() == endFirst ) + { + qualifier = s.substring(0, startFirst - 1); + localName = s.substring(startFirst); + break; + } + /* FALLTHROUGH */ + default: + endFirst = s.indexOf('.'); + if ( -1 != endFirst ) + { + if ( -1 != s.indexOf('.', 1 + endFirst) ) + { + String diag = + "ambiguous qualified identifier: \"" + s + '"'; + if ( null == msgr ) + throw new IllegalArgumentException(diag); + msgr.printMessage(Kind.ERROR, diag); + } + qualifier = s.substring(0, endFirst); + localName = s.substring(endFirst + 1); + break; + } + localName = s; + } + + Qualified q = + Simple.fromJava(localName).withQualifier( + null == qualifier ? null : Simple.fromJava(qualifier)); + + return q; + } + + /** + * Create an {@code Identifier.Qualified} from a + * name string supplied in Java source, such as an annotation value. + *

    + * Equivalent to {@code operatorFromJava(s, null)}. + */ + public static Qualified operatorFromJava(String s) + { + return operatorFromJava(s, null); + } + + /** + * Create an {@code Identifier.Qualified} from a + * name string supplied in Java source, such as an annotation value. + *

    + * The string must end in a valid operator name. That is either the + * entire string (representing a local name and null qualifier), or + * follows a dot. Whatever precedes the dot becomes the qualifier, + * treated as a folding regular identifier if it is one, or as a + * delimited identifier if it has that form, or as an implicitly + * quoted one. + *

    + * The SQL Unicode escape syntax is not accepted here. Java already + * has its own Unicode escape syntax, which is what should be used. + * @param s the qualified identifier, as found in Java source. + * @param msgr a Messager for reporting diagnostics at compile time, + * or null if not in a compilation context. + * @return the Identifier.Qualified<Operator> + */ + public static Qualified operatorFromJava( + String s, Messager msgr) + { + String qualifier = null; + String localName = null; + boolean error = false; + + /* + * This string had better end with a match of PG_OPERATOR. + * Find the last such, in case of a nutty schema name. + */ + int opStart = -1, opEnd = -1; + Matcher m = PG_OPERATOR.matcher(s); + while ( m.find() ) + { + opStart = m.start(); + opEnd = m.end(); + } + + if ( s.length() == opEnd ) + { + localName = s.substring(opStart); + if ( 1 < opStart && '.' == s.charAt(opStart - 1) ) + qualifier = s.substring(0, opStart - 1); + else if ( 0 != opStart ) + { + error = true; + /* + * This is compilation time; the ERROR above will + * ultimately fail the compilation, but for now return a + * value, however bogus, so the compiler can proceed. + */ + qualifier = s.substring(0, opStart); + } + } + else + { + error = true; + /* Again, make something bogus to return. */ + qualifier = s; + localName = "???"; + } + + if ( error ) + { + String diag = + "cannot parse qualified operator: \"" + s + '"'; + if ( null == msgr ) + throw new IllegalArgumentException(diag); + msgr.printMessage(Kind.ERROR, diag); + } + + return new Operator(localName).withQualifier( + null == qualifier ? null : Simple.fromJava(qualifier)); + } + + private Qualified(Simple qualifier, T local) + { + m_qualifier = qualifier; + m_local = requireNonNull(local); + } + + private void readObject(ObjectInputStream in) + throws IOException, ClassNotFoundException + { + in.defaultReadObject(); + if ( null == m_local ) + throw new InvalidObjectException( + "Identifier.Qualified deserialized with " + + "null local part"); + } + + @Override + public String deparse(Charset cs) + { + if ( null == m_qualifier ) + return m_local.deparse(cs); + return m_local.deparse(m_qualifier, cs); + } + + /** + * Combines the hash codes of the qualifier and local part. + *

    + * Equal to the local part's hash if the qualifier is null, though a + * {@code Qualified} with null qualifier is still not considered + * "equal" to an {@code Unqualified} with the same name. + */ + @Override + public int hashCode() + { + return (null == m_qualifier? 0 : 31 * m_qualifier.hashCode()) + + m_local.hashCode(); + } + + @Override + public boolean equals(Object other, Messager msgr) + { + if ( ! (other instanceof Qualified) ) + return false; + Qualified oi = (Qualified)other; + + return (null == m_qualifier + ? null == oi.m_qualifier + : m_qualifier.equals(oi.m_qualifier, msgr)) + && m_local.equals(oi.m_local, msgr); + } + + /** + * Returns the qualifier, possibly null, as a {@code Simple}. + */ + public Simple qualifier() + { + return m_qualifier; + } + + /** + * Returns the local part, a {@code Simple} or an {@code Operator}, + * as the case may be. + */ + public T local() + { + return m_local; + } + } + + private static RuntimeException noUnicodeQuotingYet(String n) + { + return new UnsupportedOperationException( + "cannot yet Unicode-escape identifier \"" + n + '"'); + } + } } diff --git a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/package-info.java b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/package-info.java index d19c0777..db36fc4c 100644 --- a/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/package-info.java +++ b/pljava-api/src/main/java/org/postgresql/pljava/sqlgen/package-info.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,23 +11,8 @@ * Purdue University */ /** - *

    Not strictly part of the API, this package contains the compiler extension - * itself that recognizes - * {@linkplain org.postgresql.pljava.annotation PL/Java annotations} and - * generates the deployment descriptor. It is part of this module so that the - * pljava-api jar will be all that is needed on the class path - * when compiling PL/Java code, even with annotations. - * - *

    Limitation note: A Java bug introoduced in Java 7 - * required a workaround that was added here in - * pull #42. The workaround - * has a limitation: if you are compiling Java sources that also use other - * annotations and other annotation processors, and if those other processors - * can write new Java files and cause more than one round of compilation, they - * must not include org.postgresql.pljava.annotation annotations - * in those files. This code needs to find all such annotations in round 1. - * - *

    If Oracle fixes the underlying bug, the limitation can be removed. - * Oracle's bug site suggests that won't happen until Java 9, if then. + *

    API for manipulating common lexical elements of SQL such as identifiers, + * mostly for use by PL/Java's SQL deployment-descriptor-generating code, but + * exported here for other possible uses. */ package org.postgresql.pljava.sqlgen; diff --git a/pljava-api/src/main/late-added-resources/META-INF/services/javax.annotation.processing.Processor b/pljava-api/src/main/late-added-resources/META-INF/services/javax.annotation.processing.Processor index 48e97011..30c7b7f2 100644 --- a/pljava-api/src/main/late-added-resources/META-INF/services/javax.annotation.processing.Processor +++ b/pljava-api/src/main/late-added-resources/META-INF/services/javax.annotation.processing.Processor @@ -1 +1 @@ -org.postgresql.pljava.sqlgen.DDRProcessor +org.postgresql.pljava.annotation.processing.DDRProcessor diff --git a/pljava-api/src/site/markdown/index.md b/pljava-api/src/site/markdown/index.md index b720afbb..edbba65e 100644 --- a/pljava-api/src/site/markdown/index.md +++ b/pljava-api/src/site/markdown/index.md @@ -7,4 +7,4 @@ If you arrived here from a search for PL/Java API, you probably want [the user guide][ug], or [the API documentation][tad]. [ug]: ../use/use.html -[tad]: apidocs/index.html +[tad]: apidocs/org.postgresql.pljava/module-summary.html diff --git a/pljava-api/src/test/java/LexicalsTest.java b/pljava-api/src/test/java/LexicalsTest.java new file mode 100644 index 00000000..89b94d07 --- /dev/null +++ b/pljava-api/src/test/java/LexicalsTest.java @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2016-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import java.util.InputMismatchException; + +import junit.framework.TestCase; + +import static org.junit.Assert.*; +import static org.hamcrest.CoreMatchers.*; +import static org.hamcrest.MatcherAssert.assertThat; + +import static + org.postgresql.pljava.sqlgen.Lexicals.ISO_AND_PG_IDENTIFIER_CAPTURING; +import static + org.postgresql.pljava.sqlgen.Lexicals.NEWLINE; +import static + org.postgresql.pljava.sqlgen.Lexicals.SEPARATOR; +import static + org.postgresql.pljava.sqlgen.Lexicals.PG_OPERATOR; +import static org.postgresql.pljava.sqlgen.Lexicals.identifierFrom; +import static org.postgresql.pljava.sqlgen.Lexicals.separator; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Operator; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier.Qualified; + +public class LexicalsTest extends TestCase +{ + public LexicalsTest(String name) { super(name); } + + public void testNewline() throws Exception + { + Matcher m = NEWLINE.matcher("abcd\nefgh"); + m.region(4, 9); + assertTrue("newline 0", m.lookingAt()); + assertTrue("newline 1", m.lookingAt()); + + m.reset("abcd\r\nefgh").region(4, 10); + assertTrue("newline 2", m.lookingAt()); + assertEquals("\r\n", m.group()); + + m.reset("abcd\n\refgh").region(4, 10); + assertTrue("newline 3", m.lookingAt()); + assertEquals("\n", m.group()); + } + + public void testSeparator() throws Exception + { + Pattern allTheRest = Pattern.compile(".*", Pattern.DOTALL); + + Matcher m = SEPARATOR.matcher("no starting separator"); + assertFalse("separator 0", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("no starting separator", m.group(0)); + + m.reset(); + assertFalse("separator 1", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("no starting separator", m.group(0)); + + m.reset(" simple separator"); + assertTrue("separator 2", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("simple separator", m.group(0)); + + m.reset(); + assertFalse("separator 3", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("simple separator", m.group(0)); + + m.reset(" \n simple separator"); + assertTrue("separator 4", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("simple separator", m.group(0)); + + m.reset(); + assertTrue("separator 5", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("simple separator", m.group(0)); + + m.reset(" -- a simple comment\nsimple comment"); + assertTrue("separator 6", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("simple comment", m.group(0)); + + m.reset(); + assertTrue("separator 7", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("simple comment", m.group(0)); + + m.reset("/* a bracketed comment\n */ bracketed comment"); + assertTrue("separator 8", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("bracketed comment", m.group(0)); + + m.reset(); + assertFalse("separator 9", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("bracketed comment", m.group(0)); + + m.reset("/* a /* nested */ comment\n */ nested comment"); + assertTrue("separator 10", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("nested comment", m.group(0)); + + m.reset(); + assertFalse("separator 11", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("nested comment", m.group(0)); + + m.reset("/* an /* unclosed */ comment\n * / unclosed comment"); + try + { + separator(m, true); + fail("unclosed comment not detected"); + } + catch ( Exception ex ) + { + assertTrue("separator 12", ex instanceof InputMismatchException); + } + + m.reset("/* -- tricky \n */ nested comment"); + assertTrue("separator 13", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("nested comment", m.group(0)); + + m.reset(); + assertFalse("separator 14", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("nested comment", m.group(0)); + + m.reset("-- /* tricky \n */ nested comment"); + assertTrue("separator 15", separator(m, true)); + m.usePattern(allTheRest).matches(); + assertEquals("*/ nested comment", m.group(0)); + + m.reset(); + assertTrue("separator 16", separator(m, false)); + m.usePattern(allTheRest).matches(); + assertEquals("*/ nested comment", m.group(0)); + } + + public void testIdentifierFrom() throws Exception + { + Matcher m = ISO_AND_PG_IDENTIFIER_CAPTURING.matcher("anIdentifier"); + assertTrue("i", m.matches()); + assertEquals("anIdentifier", identifierFrom(m).nonFolded()); + + m.reset("\"an\"\"Identifier\"\"\""); + assertTrue("xd", m.matches()); + assertEquals("an\"Identifier\"", identifierFrom(m).nonFolded()); + + m.reset("u&\"an\\0049dent\"\"if\\+000069er\""); + assertTrue("xui2", m.matches()); + assertEquals("anIdent\"ifier", identifierFrom(m).nonFolded()); + + m.reset("u&\"an@@\"\"@0049dent@+000069fier\"\"\" uescape '@'"); + assertTrue("xui3", m.matches()); + assertEquals("an@\"Identifier\"", identifierFrom(m).nonFolded()); + } + + public void testIdentifierEquivalence() throws Exception + { + Identifier baß = Simple.from("baß", false); + Identifier Baß = Simple.from("Baß", false); + Identifier bass = Simple.from("bass", false); + Identifier BASS = Simple.from("BASS", false); + + Identifier qbaß = Simple.from("baß", true); + Identifier qBaß = Simple.from("Baß", true); + Identifier qbass = Simple.from("bass", true); + Identifier qBASS = Simple.from("BASS", true); + + Identifier sab = Simple.from("sopran alt baß", true); + Identifier SAB = Simple.from("Sopran Alt Baß", true); + + /* DESERET SMALL LETTER OW */ + Identifier ow = Simple.from("\uD801\uDC35", false); + /* DESERET CAPITAL LETTER OW */ + Identifier OW = Simple.from("\uD801\uDC0D", false); + + assertEquals("hash1", baß.hashCode(), Baß.hashCode()); + assertEquals("hash2", baß.hashCode(), bass.hashCode()); + assertEquals("hash3", baß.hashCode(), BASS.hashCode()); + + assertEquals("hash4", baß.hashCode(), qbaß.hashCode()); + assertEquals("hash5", baß.hashCode(), qBaß.hashCode()); + assertEquals("hash6", baß.hashCode(), qbass.hashCode()); + assertEquals("hash7", baß.hashCode(), qBASS.hashCode()); + + assertEquals("hash8", ow.hashCode(), OW.hashCode()); + + assertEquals("eq1", baß, Baß); + assertEquals("eq2", baß, bass); + assertEquals("eq3", baß, BASS); + + assertEquals("eq4", Baß, qbaß); + assertEquals("eq5", Baß, qBASS); + + assertEquals("eq6", ow, OW); + + assertThat("ne1", Baß, not(equalTo(qBaß))); + assertThat("ne2", Baß, not(equalTo(qbass))); + + assertThat("ne3", sab, not(equalTo(SAB))); + } + + public void testIdentifierSimpleFrom() throws Exception + { + Identifier.Simple s1 = Simple.fromJava("aB"); + Identifier.Simple s2 = Simple.fromJava("\"ab\""); + Identifier.Simple s3 = Simple.fromJava("\"A\"\"b\""); + Identifier.Simple s4 = Simple.fromJava("A\"b"); + + Identifier.Simple s5 = Simple.fromCatalog("ab"); + Identifier.Simple s6 = Simple.fromCatalog("AB"); + Identifier.Simple s7 = Simple.fromCatalog("A\"b"); + + assertEquals("eq1", s1, s2); + assertEquals("eq2", s3, s4); + assertEquals("eq3", s1, s5); + assertEquals("eq4", s2, s5); + assertEquals("eq5", s1, s6); + assertEquals("eq6", s5, s6); + assertEquals("eq7", s3, s7); + + assertThat("ne1", s2, not(equalTo(s6))); + + assertEquals("deparse1", s1.toString(), "aB"); + assertEquals("deparse2", s2.toString(), "\"ab\""); + assertEquals("deparse3", s3.toString(), "\"A\"\"b\""); + assertEquals("deparse4", s4.toString(), "\"A\"\"b\""); + assertEquals("deparse5", s5.toString(), "ab"); + assertEquals("deparse6", s6.toString(), "\"AB\""); + assertEquals("deparse7", s7.toString(), "\"A\"\"b\""); + } + + public void testOperatorPattern() throws Exception + { + Matcher m = PG_OPERATOR.matcher("+"); + assertTrue("+", m.matches()); + assertTrue("-", m.reset("-").matches()); + assertFalse("--", m.reset("--").matches()); + assertFalse("/-", m.reset("/-").matches()); + assertTrue("@-", m.reset("@-").matches()); + assertTrue("@_--", m.reset("@--").lookingAt()); + assertEquals("eq1", m.group(), "@"); + assertTrue("@/", m.reset("@/").lookingAt()); + assertEquals("eq2", m.group(), "@/"); + assertTrue("@_/*", m.reset("@/*").lookingAt()); + assertEquals("eq3", m.group(), "@"); + assertTrue("+_-", m.reset("+-").lookingAt()); + assertEquals("eq4", m.group(), "+"); + assertTrue("-_+", m.reset("-+").lookingAt()); + assertEquals("eq5", m.group(), "-"); + assertFalse("--+", m.reset("--+").lookingAt()); + assertTrue("-_++", m.reset("-++").lookingAt()); + assertEquals("eq6", m.group(), "-"); + assertTrue("**_-++", m.reset("**-++").lookingAt()); + assertEquals("eq7", m.group(), "**"); + assertTrue("*!*-++", m.reset("*!*-++").lookingAt()); + assertEquals("eq8", m.group(), "*!*-++"); + } + + public void testIdentifierOperatorFrom() throws Exception + { + Operator o1 = Operator.from("!@#%*"); + Operator o2 = Operator.from("!@#%*"); + assertEquals("eq1", o1, o2); + assertEquals("eq2", o1.toString(), "!@#%*"); + Simple s1 = Simple.from("foo", false); + Qualified q1 = o1.withQualifier(null); + assertEquals("eq3", q1.toString(), o1.toString()); + Qualified q2 = o1.withQualifier(s1); + assertEquals("eq4", q2.toString(), "OPERATOR(foo.!@#%*)"); + Simple s2 = Simple.from("foo", true); + Qualified q3 = o1.withQualifier(s2); + assertEquals("eq5", q3.toString(), "OPERATOR(\"foo\".!@#%*)"); + } + + public void testIdentifierSerialization() throws Exception + { + Identifier[] orig = { + Simple.from("Foo", false), + Simple.from("foo", true), + Simple.from("I do not fold", true), + + Identifier.Pseudo.PUBLIC, + + Operator.from("!@#%*"), + + null, + null + }; + + orig[5] = (( Simple )orig[2]).withQualifier((Simple)orig[1]); + orig[6] = ((Operator)orig[4]).withQualifier((Simple)orig[1]); + + Identifier[] got; + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + try (ObjectOutputStream oos = new ObjectOutputStream(bos)) + { + oos.writeObject(orig); + } + + bos.close(); + + try ( + ByteArrayInputStream bis = + new ByteArrayInputStream(bos.toByteArray()); + ObjectInputStream ois = new ObjectInputStream(bis) + ) + { + got = (Identifier[])ois.readObject(); + } + + assertArrayEquals("identifier serialization", orig, got); + } +} diff --git a/pljava-deploy/.classpath b/pljava-deploy/.classpath deleted file mode 100644 index fd7ad7fb..00000000 --- a/pljava-deploy/.classpath +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/pljava-deploy/.gitignore b/pljava-deploy/.gitignore deleted file mode 100644 index ea8c4bf7..00000000 --- a/pljava-deploy/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/target diff --git a/pljava-deploy/.project b/pljava-deploy/.project deleted file mode 100644 index d88c05f0..00000000 --- a/pljava-deploy/.project +++ /dev/null @@ -1,23 +0,0 @@ - - - pljava-deploy - - - - - - org.eclipse.jdt.core.javabuilder - - - - - org.eclipse.m2e.core.maven2Builder - - - - - - org.eclipse.jdt.core.javanature - org.eclipse.m2e.core.maven2Nature - - diff --git a/pljava-deploy/.settings/org.eclipse.core.resources.prefs b/pljava-deploy/.settings/org.eclipse.core.resources.prefs deleted file mode 100644 index e9441bb1..00000000 --- a/pljava-deploy/.settings/org.eclipse.core.resources.prefs +++ /dev/null @@ -1,3 +0,0 @@ -eclipse.preferences.version=1 -encoding//src/main/java=UTF-8 -encoding/=UTF-8 diff --git a/pljava-deploy/.settings/org.eclipse.jdt.core.prefs b/pljava-deploy/.settings/org.eclipse.jdt.core.prefs deleted file mode 100644 index 60105c1b..00000000 --- a/pljava-deploy/.settings/org.eclipse.jdt.core.prefs +++ /dev/null @@ -1,5 +0,0 @@ -eclipse.preferences.version=1 -org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 -org.eclipse.jdt.core.compiler.compliance=1.6 -org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning -org.eclipse.jdt.core.compiler.source=1.6 diff --git a/pljava-deploy/.settings/org.eclipse.m2e.core.prefs b/pljava-deploy/.settings/org.eclipse.m2e.core.prefs deleted file mode 100644 index f897a7f1..00000000 --- a/pljava-deploy/.settings/org.eclipse.m2e.core.prefs +++ /dev/null @@ -1,4 +0,0 @@ -activeProfiles= -eclipse.preferences.version=1 -resolveWorkspaceProjects=true -version=1 diff --git a/pljava-deploy/pom.xml b/pljava-deploy/pom.xml deleted file mode 100644 index 5e77f2c0..00000000 --- a/pljava-deploy/pom.xml +++ /dev/null @@ -1,34 +0,0 @@ - - 4.0.0 - - org.postgresql - pljava.app - 1.5.0 - - pljava-deploy - PL/Java Deploy - - A Java standalone utility to complete the SQL steps of deploying - PL/Java into a PostgreSQL database, over a JDBC connection. - As of PL/Java 1.5.0, this utility is unnecessary and obsolescent. - For current installation instructions that do not involve the Deployer, - see "Installing into PostgreSQL" at the project web site. - - - - - org.apache.maven.plugins - maven-jar-plugin - 2.2 - - - - org.postgresql.pljava.deploy.Deployer - - - - - - - diff --git a/pljava-deploy/src/main/java/org/postgresql/pljava/deploy/Deployer.java b/pljava-deploy/src/main/java/org/postgresql/pljava/deploy/Deployer.java deleted file mode 100644 index 356b5f36..00000000 --- a/pljava-deploy/src/main/java/org/postgresql/pljava/deploy/Deployer.java +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB - * Purdue University - */ -package org.postgresql.pljava.deploy; - -import java.io.PrintStream; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.ResultSet; -import java.util.ArrayList; - -/** - * When running the deployer, you must use a classpath that can see the - * deploy.jar found in the Pl/Java distribution and the postgresql.jar from the - * PostgreSQL distribution. The former contains the code for the deployer - * command and the second includes the PostgreSQL JDBC driver. You then run the - * deployer with the command: - *

    - *

    - * java -cp <your classpath> org.postgresql.pljava.deploy.Deployer [ options ] - *
    - *

    - * It's recommended that create a shell script or a .bat script that does this - * for you so that you don't have to do this over and over again. - *

    - *

    Deployer options

    - *
    - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    Options for Deployer
    OptionDescription
    -installInstalls the Java language along with the sqlj procedures. The deployer - * will fail if the language is installed already.
    -reinstallReinstalls the Java language and the sqlj procedures. This will - * effectively drop all jar files that have been loaded.
    -removeDrops the Java language and the sqjl procedures and loaded jars
    -user <user name>Name of user that connects to the database. Default is current user
    -password <password>Password of user that connects to the database. Default is no password - *
    -database <database>The name of the database to connect to. Default is current user
    -host <hostname>Name of the host. Default is "localhost"
    -windowsUse this option if the host runs on a windows platform. Affects the - * name used for the Pl/Java dynamic library
    - * @deprecated See - * Installing PL/Java - * for the current installation instructions that do not require this code. - * This subproject will eventually be outdated and removed. - * @author Thomas Hallgren - */ -public class Deployer -{ - private static final int CMD_AMBIGUOUS = -2; - private static final int CMD_UNKNOWN = -1; - private static final int CMD_UNINSTALL = 0; - private static final int CMD_INSTALL = 1; - private static final int CMD_REINSTALL = 2; - private static final int CMD_USER = 3; - private static final int CMD_PASSWORD = 4; - private static final int CMD_DATABASE = 5; - private static final int CMD_HOSTNAME = 6; - private static final int CMD_PORT = 7; - - private final Connection m_connection; - - private static final ArrayList s_commands = new ArrayList(); - - static - { - s_commands.add(CMD_UNINSTALL, "uninstall"); - s_commands.add(CMD_INSTALL, "install"); - s_commands.add(CMD_REINSTALL, "reinstall"); - s_commands.add(CMD_USER, "user"); - s_commands.add(CMD_PASSWORD, "password"); - s_commands.add(CMD_DATABASE, "database"); - s_commands.add(CMD_HOSTNAME, "host"); - s_commands.add(CMD_PORT, "port"); - } - - private static final int getCommand(String arg) - { - int top = s_commands.size(); - int candidateCmd = CMD_UNKNOWN; - for(int idx = 0; idx < top; ++idx) - { - if(((String)s_commands.get(idx)).startsWith(arg)) - { - if(candidateCmd != CMD_UNKNOWN) - return CMD_AMBIGUOUS; - candidateCmd = idx; - } - } - return candidateCmd; - } - public static void printUsage() - { - PrintStream out = System.err; - out.println("usage: java org.postgresql.pljava.deploy.Deployer"); - out.println(" {-install | -uninstall | -reinstall}"); - out.println(" [ -host ] # default is localhost"); - out.println(" [ -port ] # default is blank"); - out.println(" [ -database ] # default is name of current user"); - out.println(" [ -user ] # default is name of current user"); - out.println(" [ -password ] # default is no password"); - } - - public static void main(String[] argv) - { - String driverClass = "org.postgresql.Driver"; - String hostName = "localhost"; - String userName = System.getProperty("user.name", "postgres"); - String database = userName; - String subsystem = "postgresql"; - String password = null; - String portNumber = null; - int cmd = CMD_UNKNOWN; - - int top = argv.length; - for(int idx = 0; idx < top; ++idx) - { - String arg = argv[idx]; - if(arg.length() < 2) - { - printUsage(); - return; - } - - if(arg.charAt(0) == '-') - { - int optCmd = getCommand(arg.substring(1)); - switch(optCmd) - { - case CMD_INSTALL: - case CMD_UNINSTALL: - case CMD_REINSTALL: - if(cmd != CMD_UNKNOWN) - { - printUsage(); - return; - } - cmd = optCmd; - break; - - case CMD_USER: - if(++idx < top) - { - userName = argv[idx]; - if(userName.length() > 0 - && userName.charAt(0) != '-') - break; - } - printUsage(); - return; - - case CMD_PASSWORD: - if(++idx < top) - { - password = argv[idx]; - if(password.length() > 0 - && password.charAt(0) != '-') - break; - } - printUsage(); - return; - - case CMD_DATABASE: - if(++idx < top) - { - database = argv[idx]; - if(database.length() > 0 - && database.charAt(0) != '-') - break; - } - printUsage(); - return; - - case CMD_HOSTNAME: - if(++idx < top) - { - hostName = argv[idx]; - if(hostName.length() > 0 - && hostName.charAt(0) != '-') - break; - } - printUsage(); - return; - - case CMD_PORT: - if(++idx < top) - { - portNumber = argv[idx]; - if(portNumber.length() > 0 - && portNumber.charAt(0) != '-') - break; - } - printUsage(); - return; - - default: - printUsage(); - return; - } - } - } - if(cmd == CMD_UNKNOWN) - { - printUsage(); - return; - } - - try - { - Class.forName(driverClass); - - StringBuffer cc = new StringBuffer(); - cc.append("jdbc:"); - cc.append(subsystem); - cc.append("://"); - cc.append(hostName); - if(portNumber != null) - { - cc.append(':'); - cc.append(portNumber); - } - cc.append('/'); - cc.append(database); - Connection c = DriverManager.getConnection( - cc.toString(), - userName, - password); - - checkIfConnectedAsSuperuser(c); - c.setAutoCommit(false); - Deployer deployer = new Deployer(c); - - if(cmd == CMD_UNINSTALL || cmd == CMD_REINSTALL) - { - deployer.dropSQLJSchema(); - } - - if(cmd == CMD_INSTALL || cmd == CMD_REINSTALL) - { - deployer.createSQLJSchema(); - deployer.initJavaHandlers(); - deployer.initializeSQLJSchema(); - } - c.commit(); - c.close(); - } - catch(Exception e) - { - e.printStackTrace(); - } - } - - public Deployer(Connection c) - { - m_connection = c; - } - - public static void checkIfConnectedAsSuperuser(Connection conn) - throws SQLException - { - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SHOW IS_SUPERUSER"); - - try - { - if(rs.next() && rs.getString(1).equals("on")) - return; - } - finally - { - rs.close(); - stmt.close(); - } - - throw new SQLException( - "You must be a superuser to deploy/undeploy pl/Java."); - } - - public void dropSQLJSchema() - throws SQLException - { - Statement stmt = m_connection.createStatement(); - Savepoint p = null; - - try - { - if (m_connection.getMetaData().supportsSavepoints()) - p = m_connection.setSavepoint(); - stmt.execute("DROP LANGUAGE java CASCADE"); - stmt.execute("DROP LANGUAGE javaU CASCADE"); - } - catch(SQLException e) - { - /* roll back to savepoint (if available) - * or restart the transaction (if no savepoint is available) - * & ignore the exception */ - - if (p != null) - m_connection.rollback(p); - else - /* Assuming that the dropSQLJSchema is the - * first method called in a transaction, - * we can afford to restart the transaction. - * - * This solution is designed for PostgreSQL < 8 (no savepoints available) - */ - m_connection.rollback(); - } - finally - { - if (p != null) - m_connection.releaseSavepoint(p); - } - - stmt.execute("DROP SCHEMA sqlj CASCADE"); - stmt.close(); - } - - public void createSQLJSchema() - throws SQLException - { - Statement stmt = m_connection.createStatement(); - stmt.execute("CREATE SCHEMA sqlj"); - stmt.execute("GRANT USAGE ON SCHEMA sqlj TO public"); - stmt.close(); - } - - public void initializeSQLJSchema() - throws SQLException - { - Statement stmt = m_connection.createStatement(); - - stmt.execute( - "CREATE TABLE sqlj.jar_repository(" + - " jarId SERIAL PRIMARY KEY," + - " jarName VARCHAR(100) UNIQUE NOT NULL," + - " jarOrigin VARCHAR(500) NOT NULL," + - " jarOwner NAME NOT NULL," + - " jarManifest TEXT" + - ")"); - stmt.execute("GRANT SELECT ON sqlj.jar_repository TO public"); - - stmt.execute( - "CREATE TABLE sqlj.jar_entry(" + - " entryId SERIAL PRIMARY KEY," + - " entryName VARCHAR(200) NOT NULL," + - " jarId INT NOT NULL REFERENCES sqlj.jar_repository ON DELETE CASCADE," + - " entryImage BYTEA NOT NULL," + - " UNIQUE(jarId, entryName)" + - ")"); - - stmt.execute("GRANT SELECT ON sqlj.jar_entry TO public"); - - stmt.execute( - "CREATE TABLE sqlj.jar_descriptor(" + - " jarId INT REFERENCES sqlj.jar_repository ON DELETE CASCADE," + - " ordinal INT2," + - " PRIMARY KEY (jarId, ordinal)," + - " entryId INT NOT NULL REFERENCES sqlj.jar_entry ON DELETE CASCADE" + - ")"); - - stmt.execute("GRANT SELECT ON sqlj.jar_descriptor TO public"); - - // Create the table maintaining the class path. - // - stmt.execute( - "CREATE TABLE sqlj.classpath_entry(" + - " schemaName VARCHAR(30) NOT NULL," + - " ordinal INT2 NOT NULL," + // Ordinal in class path - " jarId INT NOT NULL REFERENCES sqlj.jar_repository ON DELETE CASCADE," + - " PRIMARY KEY(schemaName, ordinal)" + - ")"); - stmt.execute("GRANT SELECT ON sqlj.classpath_entry TO public"); - - // Create the table maintaining the SQL to Java type mappings - // - stmt.execute( - "CREATE TABLE sqlj.typemap_entry(" + - " mapId SERIAL PRIMARY KEY," + - " javaName VARCHAR(200) NOT NULL," + - " sqlName NAME NOT NULL" + - ")"); - stmt.execute("GRANT SELECT ON sqlj.typemap_entry TO public"); - - // These are the proposed SQL standard methods. - // - stmt.execute( - "CREATE FUNCTION sqlj.install_jar(VARCHAR, VARCHAR, BOOLEAN) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.installJar'" + - " LANGUAGE java SECURITY DEFINER"); - - stmt.execute( - "CREATE FUNCTION sqlj.replace_jar(VARCHAR, VARCHAR, BOOLEAN) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.replaceJar'" + - " LANGUAGE java SECURITY DEFINER"); - - stmt.execute( - "CREATE FUNCTION sqlj.remove_jar(VARCHAR, BOOLEAN) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.removeJar'" + - " LANGUAGE java SECURITY DEFINER"); - - // Not proposed, but very useful if you want to send the image over - // your JDBC connection. - // - stmt.execute( - "CREATE FUNCTION sqlj.install_jar(BYTEA, VARCHAR, BOOLEAN) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.installJar'" + - " LANGUAGE java SECURITY DEFINER"); - - stmt.execute( - "CREATE FUNCTION sqlj.replace_jar(BYTEA, VARCHAR, BOOLEAN) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.replaceJar'" + - " LANGUAGE java SECURITY DEFINER"); - - // This function is not as proposed. It's more Java'ish. The proposal - // using sqlj.alter_jar_path is in my opinion bloated and will not be - // well received in the Java community. Luckily, the support is suggested - // to be optional. - // - stmt.execute( - "CREATE FUNCTION sqlj.set_classpath(VARCHAR, VARCHAR) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.setClassPath'" + - " LANGUAGE java SECURITY DEFINER"); - - stmt.execute( - "CREATE FUNCTION sqlj.get_classpath(VARCHAR) RETURNS VARCHAR" + - " AS 'org.postgresql.pljava.management.Commands.getClassPath'" + - " LANGUAGE java STABLE SECURITY DEFINER"); - - // The following functions are not included in the standard. Type mapping - // is radically different in SQL 2003 and requires a lot of additions to - // the PostgreSQL dialect. - // - stmt.execute( - "CREATE FUNCTION sqlj.add_type_mapping(VARCHAR, VARCHAR) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.addTypeMapping'" + - " LANGUAGE java SECURITY DEFINER"); - - stmt.execute( - "CREATE FUNCTION sqlj.drop_type_mapping(VARCHAR) RETURNS void" + - " AS 'org.postgresql.pljava.management.Commands.dropTypeMapping'" + - " LANGUAGE java SECURITY DEFINER"); - - stmt.close(); - } - - public void initJavaHandlers() - throws SQLException - { - Statement stmt = m_connection.createStatement(); - stmt.execute( - "CREATE FUNCTION sqlj.java_call_handler()" + - " RETURNS language_handler" + - " AS 'pljava'" + - " LANGUAGE C"); - - stmt.execute("CREATE TRUSTED LANGUAGE java HANDLER sqlj.java_call_handler"); - - stmt.execute( - "CREATE FUNCTION sqlj.javau_call_handler()" + - " RETURNS language_handler" + - " AS 'pljava'" + - " LANGUAGE C"); - - stmt.execute("CREATE LANGUAGE javaU HANDLER sqlj.javau_call_handler"); - stmt.close(); - } -} diff --git a/pljava-deploy/src/main/java/org/postgresql/pljava/deploy/package-info.java b/pljava-deploy/src/main/java/org/postgresql/pljava/deploy/package-info.java deleted file mode 100644 index a9a390e0..00000000 --- a/pljava-deploy/src/main/java/org/postgresql/pljava/deploy/package-info.java +++ /dev/null @@ -1,13 +0,0 @@ -/** - *

    Note: this package is obsolescent as of PL/Java 1.5.0. - * See Installing PL/Java - * for the current installation instructions that do not require this code. - * This subproject will eventually be outdated and removed. - *

    - * One approach to completing the SQL steps of PL/Java installation; will build - * a normal, standalone Java app to connect to the database over a vanilla - * JDBC connection and install, remove, or reinstall the necessary objects. - * See {@link org.postgresql.pljava.deploy.Deployer Deployer} for usage. - * @author Thomas Hallgren - */ -package org.postgresql.pljava.deploy; diff --git a/pljava-examples/pom.xml b/pljava-examples/pom.xml index 194ad830..8747d392 100644 --- a/pljava-examples/pom.xml +++ b/pljava-examples/pom.xml @@ -4,11 +4,38 @@ org.postgresql pljava.app - 1.5.0 + 1.6.10 pljava-examples PL/Java examples Examples of Java stored procedures using PL/Java + + + + saxon-examples + + + net.sf.saxon + Saxon-HE + [10.0,11) + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + org/postgresql/pljava/example/saxon/*.java + + + + + + + + org.postgresql @@ -16,8 +43,28 @@ ${project.version} + + + org.apache.maven.plugins + maven-compiler-plugin + + + org/postgresql/pljava/example/*.java + org/postgresql/pljava/example/annotation/*.java + + + --processor-module-path + ${basedir}/../pljava-api/target/pljava-api-${project.version}.jar + + + + org.postgresql.pljava.annotation.processing.DDRProcessor + + + + org.apache.maven.plugins maven-jar-plugin @@ -30,4 +77,189 @@ + + + + + org.postgresql + pljava-pgxs + ${pljava.pgxs.version} + + + + scripted-report + + + + + + + + + + diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/AnyTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/AnyTest.java index f8551f2d..8eade522 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/AnyTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/AnyTest.java @@ -1,39 +1,21 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB + * Compatibility shim for legacy examples package names and outputs. */ package org.postgresql.pljava.example; import java.lang.reflect.Array; -import java.sql.SQLException; -import java.util.logging.Logger; -/** - * Provides example methods to illustrate the polymorphic types {@code any}, - * {@code anyarray}, and {@code anyelement}. - */ public class AnyTest { - private static Logger s_logger = Logger.getAnonymousLogger(); - - public static void logAny(Object param) throws SQLException { - s_logger.config("logAny received an object of class " + param.getClass()); + public static void logAny(Object param) { + // Intentionally no logging to keep regression output stable. } - public static Object logAnyElement(Object param) throws SQLException { - s_logger.config("logAnyElement received an object of class " - + param.getClass()); + public static Object logAnyElement(Object param) { return param; } public static Object[] makeArray(Object param) { - Object[] result = (Object[]) Array.newInstance(param.getClass(), 1); + Object[] result = (Object[])Array.newInstance(param.getClass(), 1); result[0] = param; return result; } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataBooleans.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataBooleans.java index 071649af..141764b2 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataBooleans.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataBooleans.java @@ -64,8 +64,8 @@ public int compare(Method a, Method b) { Class returntype; Object[] args = new Object[0]; Boolean result = null; - ArrayList mn = new ArrayList(); - ArrayList mr = new ArrayList(); + ArrayList mn = new ArrayList<>(); + ArrayList mr = new ArrayList<>(); for (int i = 0; i < m.length; i++) { prototype = m[i].getParameterTypes(); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataInts.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataInts.java index a2f54ab5..671d535d 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataInts.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataInts.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,6 +9,7 @@ * Contributors: * Tada AB * Filip Hrbek + * Chapman Flack */ package org.postgresql.pljava.example; @@ -65,8 +66,8 @@ public int compare(Method a, Method b) { Class returntype; Object[] args = new Object[0]; Integer result = null; - ArrayList mn = new ArrayList(); - ArrayList mr = new ArrayList(); + ArrayList mn = new ArrayList<>(); + ArrayList mr = new ArrayList<>(); for (int i = 0; i < m.length; i++) { prototype = m[i].getParameterTypes(); @@ -82,10 +83,10 @@ public int compare(Method a, Method b) { } catch (InvocationTargetException e) { log.config("Method: " + m[i].getName() + " => " + e.getTargetException().getMessage()); - result = new Integer(-1); + result = -1; } catch (Exception e) { - log.config("Method: " + m[i].getName() + " => " + e.getMessage()); - result = new Integer(-1); + log.info("Method: " + m[i].getName() + " => " + e.getMessage()); + result = -1; } mn.add(m[i].getName()); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataStrings.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataStrings.java index 360d41a2..ddf71187 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataStrings.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataStrings.java @@ -64,8 +64,8 @@ public int compare(Method a, Method b) { Class returntype; Object[] args = new Object[0]; String result = null; - ArrayList mn = new ArrayList(); - ArrayList mr = new ArrayList(); + ArrayList mn = new ArrayList<>(); + ArrayList mr = new ArrayList<>(); for (int i = 0; i < m.length; i++) { prototype = m[i].getParameterTypes(); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataTest.java index 227d5bc2..1ac15be6 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/MetaDataTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,6 +9,7 @@ * Contributors: * Tada AB * Filip Hrbek + * Chapman Flack */ package org.postgresql.pljava.example; @@ -49,7 +50,7 @@ public MetaDataTest(String methodCall) throws SQLException { .getConnection("jdbc:default:connection"); DatabaseMetaData md = conn.getMetaData(); ResultSet rs; - m_results = new ArrayList(); + m_results = new ArrayList<>(); StringBuffer result; parseMethodCall(methodCall); @@ -136,8 +137,8 @@ private void parseMethodCall(String methodCall) throws SQLException { String paramString; String auxParamString; String param; - ArrayList objects = new ArrayList(); - ArrayList> types = new ArrayList>(); + ArrayList objects = new ArrayList<>(); + ArrayList> types = new ArrayList<>(); if (m.matches()) { m_methodName = m.group(1); @@ -182,7 +183,7 @@ private void parseMethodCall(String methodCall) throws SQLException { .compile("^\\s*\"((?:[^\\\\\"]|\\\\.)*)\"\\s*(?:,|$)(.*)$"); Matcher marr; String auxParamArr = param.trim(); - ArrayList strList = new ArrayList(); + ArrayList strList = new ArrayList<>(); while (!auxParamArr.equals("")) { marr = parr.matcher(auxParamArr); @@ -201,7 +202,7 @@ private void parseMethodCall(String methodCall) throws SQLException { // a // boolean { - objects.add(new Boolean(param)); + objects.add(Boolean.valueOf(param)); types.add(Boolean.TYPE); } else if (param.startsWith("(")) // it is String, String[] // or int[] null @@ -238,7 +239,7 @@ private void parseMethodCall(String methodCall) throws SQLException { .compile("^\\s*(\\d+)\\s*(?:,|$)(.*)$"); Matcher marr; String auxParamArr = param.trim(); - ArrayList intList = new ArrayList(); + ArrayList intList = new ArrayList<>(); while (!auxParamArr.equals("")) { marr = parr.matcher(auxParamArr); @@ -247,14 +248,14 @@ private void parseMethodCall(String methodCall) throws SQLException { + param); } - intList.add(new Integer(marr.group(1))); + intList.add(Integer.valueOf(marr.group(1))); auxParamArr = marr.group(2).trim(); } objects.add(intList.toArray(new Integer[0])); types.add(int[].class); } else // it is an int { - objects.add(new Integer(param)); + objects.add(Integer.valueOf(param)); types.add(Integer.TYPE); } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/Parameters.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/Parameters.java index 7c7f7055..cdf562bd 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/Parameters.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/Parameters.java @@ -1,13 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB + * Compatibility shim for legacy examples package names and signatures. */ package org.postgresql.pljava.example; @@ -17,259 +9,123 @@ import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; -import java.text.DateFormat; import java.text.SimpleDateFormat; +import java.util.Locale; import java.util.TimeZone; -import java.util.logging.Logger; -/** - * Some methods used for testing parameter and return value coersion and - * resolution of overloaded methods. - * - * @author Thomas Hallgren - */ public class Parameters { - public static double addNumbers(short a, int b, long c, BigDecimal d, - BigDecimal e, float f, double g) { - return d.doubleValue() + e.doubleValue() + a + b + c + f + g; - } + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); - public static int addOne(int value) { - return value + 1; + public static Timestamp getTimestamp() { + return new Timestamp(System.currentTimeMillis()); } - public static int addOne(Integer value) { - return value.intValue() + 1; + public static String print(Date value) { + SimpleDateFormat fmt = + new SimpleDateFormat("EEEE, MMMM d, yyyy", Locale.US); + fmt.setTimeZone(UTC); + return fmt.format(value); } - public static int addOneLong(long value) { - return (int) value + 1; + public static String print(Time value) { + SimpleDateFormat fmt = + new SimpleDateFormat("HH:mm:ss z Z", Locale.US); + fmt.setTimeZone(UTC); + return fmt.format(value); } - public static int countNulls(Integer[] intArray) throws SQLException { - int nullCount = 0; - int top = intArray.length; - for (int idx = 0; idx < top; ++idx) { - if (intArray[idx] == null) - nullCount++; - } - return nullCount; + public static String print(Timestamp value) { + SimpleDateFormat fmt = + new SimpleDateFormat("EEEE, MMMM d, yyyy h:mm:ss a z", Locale.US); + fmt.setTimeZone(UTC); + return fmt.format(value); } - public static int countNulls(ResultSet input) throws SQLException { - int nullCount = 0; - int top = input.getMetaData().getColumnCount(); - for (int idx = 1; idx <= top; ++idx) { - input.getObject(idx); - if (input.wasNull()) - nullCount++; - } - return nullCount; + public static String print(String value) { + return value; } - public static Date getDate() { - return new Date(System.currentTimeMillis()); + public static byte[] print(byte[] value) { + return value; } - public static Time getTime() { - return new Time(System.currentTimeMillis()); + public static short print(short value) { + return value; } - public static Timestamp getTimestamp() { - return new Timestamp(System.currentTimeMillis()); + public static short[] print(short[] value) { + return value; } - static void log(String msg) { - // GCJ has a somewhat serious bug (reported) - // - if ("GNU libgcj".equals(System.getProperty("java.vm.name"))) { - System.out.print("INFO: "); - System.out.println(msg); - } else - Logger.getAnonymousLogger().config(msg); + public static int print(int value) { + return value; } - public static Integer nullOnEven(int value) { - return (value % 2) == 0 ? null : new Integer(value); + public static int[] print(int[] value) { + return value; } - public static byte print(byte value) { - log("byte " + value); + public static long print(long value) { return value; } - public static byte[] print(byte[] byteArray) { - StringBuffer buf = new StringBuffer(); - int top = byteArray.length; - buf.append("byte[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(byteArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(byteArray[idx]); - } - buf.append('}'); - } - log(buf.toString()); - return byteArray; + public static long[] print(long[] value) { + return value; } - public static String print(String value) - { - log("string " + value); + public static float print(float value) { return value; } - public static String print(Date time) { - TimeZone.setDefault(TimeZone.getTimeZone("UTC")); - DateFormat p = DateFormat.getDateInstance(DateFormat.FULL); - log("Local Date is " + p.format(time)); - log("TZ = " + TimeZone.getDefault().getDisplayName()); - return p.format(time); + public static float[] print(float[] value) { + return value; } public static double print(double value) { - log("double " + value); return value; } - public static double[] print(double[] doubleArray) { - StringBuffer buf = new StringBuffer(); - int top = doubleArray.length; - buf.append("double[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(doubleArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(doubleArray[idx]); - } - buf.append('}'); - } - log(buf.toString()); - return doubleArray; - } - - public static float print(float value) { - log("float " + value); + public static double[] print(double[] value) { return value; } - public static float[] print(float[] floatArray) { - StringBuffer buf = new StringBuffer(); - int top = floatArray.length; - buf.append("float[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(floatArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(floatArray[idx]); - } - buf.append('}'); - } - log(buf.toString()); - return floatArray; - } - - public static int print(int value) { - log("int " + value); + public static Integer[] print(Integer[] value) { return value; } - public static int[] print(int[] intArray) { - StringBuffer buf = new StringBuffer(); - int top = intArray.length; - buf.append("int[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(intArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(intArray[idx]); - } - buf.append('}'); - } - log(buf.toString()); - return intArray; + public static int addOne(Integer value) { + return value.intValue() + 1; } - public static Integer[] print(Integer[] intArray) { - StringBuffer buf = new StringBuffer(); - int top = intArray.length; - buf.append("Integer[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(intArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(intArray[idx]); - } - buf.append('}'); - } - log(buf.toString()); - return intArray; + public static Integer nullOnEven(int value) { + return (value % 2) == 0 ? null : value; } - public static long print(long value) { - log("long " + value); - return value; + public static double addNumbers(short a, int b, long c, BigDecimal d, + BigDecimal e, float f, double g) { + return d.doubleValue() + e.doubleValue() + a + b + c + f + g; } - public static long[] print(long[] longArray) { - StringBuffer buf = new StringBuffer(); - int top = longArray.length; - buf.append("long[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(longArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(longArray[idx]); + public static int countNulls(Integer[] intArray) throws SQLException { + int nullCount = 0; + int top = intArray.length; + for (int idx = 0; idx < top; ++idx) { + if (intArray[idx] == null) { + nullCount++; } - buf.append('}'); } - log(buf.toString()); - return longArray; - } - - public static short print(short value) { - log("short " + value); - return value; + return nullCount; } - public static short[] print(short[] shortArray) { - StringBuffer buf = new StringBuffer(); - int top = shortArray.length; - buf.append("short[] of size " + top); - if (top > 0) { - buf.append(" {"); - buf.append(shortArray[0]); - for (int idx = 1; idx < top; ++idx) { - buf.append(','); - buf.append(shortArray[idx]); + public static int countNulls(ResultSet input) throws SQLException { + int nullCount = 0; + int top = input.getMetaData().getColumnCount(); + for (int idx = 1; idx <= top; ++idx) { + input.getObject(idx); + if (input.wasNull()) { + nullCount++; } - buf.append('}'); } - log(buf.toString()); - return shortArray; - } - - public static String print(Time time) { - TimeZone.setDefault(TimeZone.getTimeZone("UTC")); - DateFormat p = new SimpleDateFormat("HH:mm:ss z Z"); - log("Local Time is " + p.format(time)); - log("TZ = " + TimeZone.getDefault().getDisplayName()); - return p.format(time); - } - - public static String print(Timestamp time) { - TimeZone.setDefault(TimeZone.getTimeZone("UTC")); - DateFormat p = DateFormat.getDateTimeInstance(DateFormat.FULL, - DateFormat.FULL); - log("Local Timestamp is " + p.format(time)); - log("TZ = " + TimeZone.getDefault().getDisplayName()); - return p.format(time); + return nullCount; } } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/ResultSetTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/ResultSetTest.java index 833b4da2..9e70aaaa 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/ResultSetTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/ResultSetTest.java @@ -42,7 +42,7 @@ public static Iterator executeSelect(String selectSQL) throws SQLExcepti public ResultSetTest(String selectSQL) throws SQLException { Connection conn = DriverManager .getConnection("jdbc:default:connection"); - m_results = new ArrayList(); + m_results = new ArrayList<>(); StringBuffer result; Statement stmt = conn.createStatement(); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/SPIActions.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/SPIActions.java index e25ee3ee..2d771841 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/SPIActions.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/SPIActions.java @@ -1,329 +1,8 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB + * Compatibility shim for legacy examples package names. */ package org.postgresql.pljava.example; -import java.sql.Connection; -import java.sql.Date; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.Time; -import java.util.logging.Logger; - -import org.postgresql.pljava.SavepointListener; -import org.postgresql.pljava.Session; -import org.postgresql.pljava.SessionManager; - -/** - * Some methods used for testing the SPI JDBC driver. - * - * @author Thomas Hallgren - */ -public class SPIActions { - private static final String SP_CHECKSTATE = "sp.checkState"; - - private static final SavepointListener spListener = new SavepointListener() { - @Override - public void onAbort(Session session, Savepoint savepoint, - Savepoint parent) throws SQLException { - log("Abort of savepoint " + savepoint.getSavepointId()); - nextState(session, 3, 4); - } - - @Override - public void onCommit(Session session, Savepoint savepoint, - Savepoint parent) throws SQLException { - log("Commit of savepoint " + savepoint.getSavepointId()); - nextState(session, 3, 4); - } - - @Override - public void onStart(Session session, Savepoint savepoint, - Savepoint parent) throws SQLException { - log("Start of savepoint " + savepoint.getSavepointId()); - nextState(session, 0, 1); - } - }; - - public static String getDateAsString() throws SQLException { - ResultSet rs = null; - Statement stmt = null; - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - try { - stmt = conn.createStatement(); - rs = stmt.executeQuery("SELECT CURRENT_DATE"); - if (rs.next()) - return rs.getDate(1).toString(); - return "Date could not be retrieved"; - } finally { - if (rs != null) - rs.close(); - if (stmt != null) - stmt.close(); - conn.close(); - } - } - - public static String getTimeAsString() throws SQLException { - ResultSet rs = null; - Statement stmt = null; - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - try { - stmt = conn.createStatement(); - rs = stmt.executeQuery("SELECT CURRENT_TIME"); - if (rs.next()) - return rs.getTime(1).toString(); - return "Time could not be retrieved"; - } finally { - if (rs != null) - rs.close(); - if (stmt != null) - stmt.close(); - conn.close(); - } - } - - static void log(String msg) { - // GCJ has a somewhat serious bug (reported) - // - if ("GNU libgcj".equals(System.getProperty("java.vm.name"))) { - System.out.print("INFO: "); - System.out.println(msg); - } else - Logger.getAnonymousLogger().config(msg); - } - - public static int maxFromSetReturnExample(int base, int increment) - throws SQLException { - int max = Integer.MIN_VALUE; - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = conn - .prepareStatement("SELECT base FROM setReturnExample(?, ?)"); - stmt.setInt(1, base); - stmt.setInt(2, increment); - rs = stmt.executeQuery(); - while (rs.next()) { - base = rs.getInt(1); - if (base > max) - max = base; - } - return base; - } finally { - if (rs != null) - rs.close(); - if (stmt != null) - stmt.close(); - conn.close(); - } - } - - /** - * Test of bug #1556 - * - */ - public static void nestedStatements(int innerCount) throws SQLException { - Connection connection = DriverManager - .getConnection("jdbc:default:connection"); - Statement statement = connection.createStatement(); - - // Create a set of ID's so that we can do somthing semi-useful during - // the long loop. - // - statement.execute("DELETE FROM javatest.employees1"); - statement.execute("INSERT INTO javatest.employees1 VALUES(" - + "1, 'Calvin Forrester', 10000)"); - statement.execute("INSERT INTO javatest.employees1 VALUES(" - + "2, 'Edwin Archer', 20000)"); - statement.execute("INSERT INTO javatest.employees1 VALUES(" - + "3, 'Rebecka Shawn', 30000)"); - statement.execute("INSERT INTO javatest.employees1 VALUES(" - + "4, 'Priscilla Johnson', 25000)"); - - int idx = 1; - ResultSet results = statement - .executeQuery("SELECT * FROM javatest.hugeResult(" + innerCount - + ")"); - while (results.next()) { - Statement innerStatement = connection.createStatement(); - innerStatement - .executeUpdate("UPDATE javatest.employees1 SET salary = salary + 1 WHERE id=" - + idx); - innerStatement.close(); - if (++idx == 5) - idx = 0; - } - results.close(); - statement.close(); - connection.close(); - } - - private static void nextState(Session session, int expected, int next) - throws SQLException { - Integer state = (Integer) session.getAttribute(SP_CHECKSTATE); - if (state == null || state.intValue() != expected) - throw new SQLException(SP_CHECKSTATE + ": Expected " + expected - + ", got " + state); - session.setAttribute(SP_CHECKSTATE, new Integer(next)); - } - - public static int testSavepointSanity() throws SQLException { - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - - // Create an anonymous savepoint. - // - log("Attempting to set an anonymous savepoint"); - Session currentSession = SessionManager.current(); - currentSession.setAttribute(SP_CHECKSTATE, new Integer(0)); - currentSession.addSavepointListener(spListener); - - Savepoint sp = conn.setSavepoint(); - nextState(currentSession, 1, 2); - try { - Statement stmt = conn.createStatement(); - log("Attempting to set a SAVEPOINT using SQL (should fail)"); - stmt.execute("SAVEPOINT foo"); - } catch (SQLException e) { - log("It failed allright. Everything OK then"); - log("Rolling back to anonymous savepoint"); - - nextState(currentSession, 2, 3); - conn.rollback(sp); - nextState(currentSession, 4, 5); - return 1; - } finally { - currentSession.removeSavepointListener(spListener); - } - throw new SQLException( - "SAVEPOINT through SQL succeeded. That's bad news!"); - } - - public static int testTransactionRecovery() throws SQLException { - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - - // Create an anonymous savepoint. - // - log("Attempting to set an anonymous savepoint"); - Session currentSession = SessionManager.current(); - currentSession.setAttribute(SP_CHECKSTATE, new Integer(0)); - currentSession.addSavepointListener(spListener); - - Statement stmt = conn.createStatement(); - Savepoint sp = conn.setSavepoint(); - nextState(currentSession, 1, 2); - - try { - log("Attempting to execute a statement with a syntax error"); - stmt.execute("THIS MUST BE A SYNTAX ERROR"); - } catch (SQLException e) { - log("It failed. Let's try to recover " - + "by rolling back to anonymous savepoint"); - nextState(currentSession, 2, 3); - conn.rollback(sp); - nextState(currentSession, 4, 5); - log("Rolled back."); - log("Now let's try to execute a correct statement."); - - currentSession.setAttribute(SP_CHECKSTATE, new Integer(0)); - sp = conn.setSavepoint(); - nextState(currentSession, 1, 2); - ResultSet rs = stmt.executeQuery("SELECT 'OK'"); - while (rs.next()) { - log("Expected: OK; Retrieved: " + rs.getString(1)); - } - rs.close(); - stmt.close(); - nextState(currentSession, 2, 3); - conn.releaseSavepoint(sp); - nextState(currentSession, 4, 5); - return 1; - } finally { - currentSession.removeSavepointListener(spListener); - } - - // Should never get here - return -1; - } - - public static int transferPeopleWithSalary(int salary) throws SQLException { - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - PreparedStatement select = null; - PreparedStatement insert = null; - PreparedStatement delete = null; - ResultSet rs = null; - - String stmt; - try { - stmt = "SELECT id, name, salary FROM employees1 WHERE salary > ?"; - log(stmt); - select = conn.prepareStatement(stmt); - - stmt = "INSERT INTO employees2(id, name, salary, transferDay, transferTime) VALUES (?, ?, ?, ?, ?)"; - log(stmt); - insert = conn.prepareStatement(stmt); - - stmt = "DELETE FROM employees1 WHERE id = ?"; - log(stmt); - delete = conn.prepareStatement(stmt); - - log("assigning parameter value " + salary); - select.setInt(1, salary); - log("Executing query"); - rs = select.executeQuery(); - int rowNo = 0; - log("Doing next"); - while (rs.next()) { - log("Processing row " + ++rowNo); - int id = rs.getInt(1); - String name = rs.getString(2); - int empSal = rs.getInt(3); - - insert.setInt(1, id); - insert.setString(2, name); - insert.setInt(3, empSal); - long now = System.currentTimeMillis(); - insert.setDate(4, new Date(now)); - insert.setTime(5, new Time(now)); - int nRows = insert.executeUpdate(); - log("Insert processed " + nRows + " rows"); - - delete.setInt(1, id); - nRows = delete.executeUpdate(); - log("Delete processed " + nRows + " rows"); - log("Doing next"); - } - if (rowNo == 0) - log("No row found"); - return rowNo; - } finally { - if (select != null) - select.close(); - if (insert != null) - insert.close(); - if (delete != null) - delete.close(); - conn.close(); - } - } +public class SPIActions + extends org.postgresql.pljava.example.annotation.SPIActions { } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/SetOfRecordTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/SetOfRecordTest.java index be64c5dd..e43344a3 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/SetOfRecordTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/SetOfRecordTest.java @@ -1,50 +1,17 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB + * Compatibility shim for legacy examples package names. */ package org.postgresql.pljava.example; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.sql.SQLException; import org.postgresql.pljava.ResultSetHandle; -/** - * Example implementing the {@code ResultSetHandle} interface, to return - * the {@link ResultSet} from any SQL {@code SELECT} query passed as a string - * to the {@link #executeSelect executeSelect} function. - */ -public class SetOfRecordTest implements ResultSetHandle { +public class SetOfRecordTest { public static ResultSetHandle executeSelect(String selectSQL) - throws SQLException { - return new SetOfRecordTest(selectSQL); - } - - private final PreparedStatement m_statement; - - public SetOfRecordTest(String selectSQL) throws SQLException { - Connection conn = DriverManager - .getConnection("jdbc:default:connection"); - m_statement = conn.prepareStatement(selectSQL); - } - - @Override - public void close() throws SQLException { - m_statement.close(); - } - - @Override - public ResultSet getResultSet() throws SQLException { - return m_statement.executeQuery(); + throws SQLException + { + return org.postgresql.pljava.example.annotation.SetOfRecordTest + .executeSelect(selectSQL); } } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingProperties.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingProperties.java index c4d9c1d4..4676f813 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingProperties.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingProperties.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -8,6 +8,7 @@ * * Contributors: * Tada AB + * Chapman Flack */ package org.postgresql.pljava.example; @@ -31,22 +32,23 @@ * of PL/Java's {@code ObjectPool} facility. * @author Thomas Hallgren */ -public class UsingProperties implements ResultSetProvider, PooledObject { +public class UsingProperties implements ResultSetProvider.Large, PooledObject { private static Logger s_logger = Logger.getAnonymousLogger(); public static ResultSetProvider getProperties() throws SQLException { - ObjectPool pool = SessionManager.current().getObjectPool( - UsingProperties.class); + ObjectPool pool = + SessionManager.current().getObjectPool(UsingProperties.class); return (ResultSetProvider) pool.activateInstance(); } private final Properties m_properties; - private final ObjectPool m_pool; + private final ObjectPool m_pool; private Enumeration m_propertyIterator; - public UsingProperties(ObjectPool pool) throws IOException { + public UsingProperties(ObjectPool pool) throws IOException + { m_pool = pool; m_properties = new Properties(); @@ -83,7 +85,7 @@ public void activate() { } @Override - public boolean assignRowValues(ResultSet receiver, int currentRow) + public boolean assignRowValues(ResultSet receiver, long currentRow) throws SQLException { if (m_propertyIterator == null || !m_propertyIterator.hasMoreElements()) { s_logger.fine("no more rows, returning false"); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingPropertiesAsScalarSet.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingPropertiesAsScalarSet.java index 8670e7db..d486c2a6 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingPropertiesAsScalarSet.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/UsingPropertiesAsScalarSet.java @@ -30,7 +30,7 @@ public class UsingPropertiesAsScalarSet { public static Iterator getProperties() throws SQLException { StringBuffer bld = new StringBuffer(); - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList<>(); Connection conn = DriverManager .getConnection("jdbc:default:connection"); Statement stmt = conn.createStatement(); diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Aggregates.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Aggregates.java new file mode 100644 index 00000000..500d18bd --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Aggregates.java @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import static java.lang.Math.fma; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.postgresql.pljava.annotation.Aggregate; +import org.postgresql.pljava.annotation.Function; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * A class demonstrating several aggregate functions. + *

    + * They are (some of) the same two-variable statistical aggregates already + * offered in core PostgreSQL, just because they make clear examples. For + * numerical reasons, they might not produce results identical to PG's built-in + * ones. These closely follow the "schoolbook" formulas in the HP-11C calculator + * owner's handbook, while the ones built into PostgreSQL use a more clever + * algorithm instead to reduce rounding error in the finishers. + *

    + * All these aggregates can be computed by different finishers that share a + * state that accumulates the count of rows, sum of x, sum of xx, sum of y, sum + * of yy, and sum of xy. That is easy with finishers that don't need to modify + * the state, so the default {@code FinishEffect=READ_ONLY} is appropriate. + *

    + * Everything here takes the y parameter first, then x, like the SQL ones. + */ +@SQLAction(requires = { "avgx", "avgy", "slope", "intercept" }, install = { + "WITH" + + " data (y, x) AS (VALUES" + + " (1.761 ::float8, 5.552::float8)," + + " (1.775, 5.963)," + + " (1.792, 6.135)," + + " (1.884, 6.313)," + + " (1.946, 6.713)" + + " )," + + " expected (avgx, avgy, slope, intercept) AS (" + + " SELECT 6.1352, 1.8316, 0.1718, 0.7773" + + " )," + + " got AS (" + + " SELECT" + + " round( avgx(y,x)::numeric, 4) AS avgx," + + " round( avgy(y,x)::numeric, 4) AS avgy," + + " round( slope(y,x)::numeric, 4) AS slope," + + " round(intercept(y,x)::numeric, 4) AS intercept" + + " FROM" + + " data" + + " )" + + "SELECT" + + " CASE WHEN expected IS NOT DISTINCT FROM got" + + " THEN javatest.logmessage('INFO', 'aggregate examples ok')" + + " ELSE javatest.logmessage('WARNING', 'aggregate examples ng')" + + " END" + + " FROM" + + " expected, got" +}) +@Aggregate(provides = "avgx", + name = { "javatest", "avgx" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + /* + * State size is merely a hint to PostgreSQL's planner and can + * be omitted. Perhaps it is worth hinting, as the state type + * "double precision[]" does not tell PostgreSQL how large the array + * might be. Anyway, this is an example and should show how to do it. + * For this aggregate, the state never grows; the size of the initial + * value is the size forever. + * + * To get a quick sense of the size, one can assign the initial state + * as the default for a table column, then consult the pg_node_tree for + * the attribute default entry: + * + * CREATE TEMPORARY TABLE + * foo (bar DOUBLE PRECISION[] DEFAULT '{0,0,0,0,0,0}'); + * + * SELECT + * xpath('/CONST/member[@name="constvalue"]/@length', + * javatest.pgNodeTreeAsXML(adbin) ) + * FROM pg_attrdef + * WHERE adrelid = 'foo'::regclass; + * + * In this case the 72 that comes back represents 48 bytes for six + * float8s, plus 24 for varlena and array overhead, with no null bitmap + * because no element is null. + */ + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishAvgX" } + ) +) +@Aggregate(provides = "avgy", + name = { "javatest", "avgy" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishAvgY" } + ) +) +@Aggregate(provides = "slope", + name = { "javatest", "slope" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishSlope" } + ) +) +@Aggregate(provides = "intercept", + name = { "javatest", "intercept" }, + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishIntercept" } + ) +) +@Aggregate( + name = "javatest.regression", + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + finish = { "javatest", "finishRegr" } + ), + /* + * There is no special reason for this aggregate and not the others to have + * a movingPlan; one example is enough, that's all. + */ + movingPlan = @Aggregate.Plan( + stateType = "double precision[]", + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" }, + remove = { "javatest", "removeXY" }, + finish = { "javatest", "finishRegr" } + ) +) +public class Aggregates +{ + private Aggregates() { } // do not instantiate + + private static final int N = 0; + private static final int SX = 1; + private static final int SXX = 2; + private static final int SY = 3; + private static final int SYY = 4; + private static final int SXY = 5; + + /** + * A common accumulator for two-variable statistical aggregates that + * depend on n, Sx, Sxx, Sy, Syy, and Sxy. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static double[] accumulateXY(double[] state, double y, double x) + { + state[N ] += 1.; + state[SX ] += x; + state[SXX] = fma(x, x, state[2]); + state[SY ] += y; + state[SYY] = fma(y, y, state[4]); + state[SXY] = fma(x, y, state[5]); + return state; + } + + /** + * 'Removes' from the state a row previously accumulated, for possible use + * in a window with a moving frame start. + *

    + * This can be a numerically poor idea for exactly the reasons covered in + * the PostgreSQL docs involving loss of significance in long sums, but it + * does demonstrate the idea. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static double[] removeXY(double[] state, double y, double x) + { + state[N ] -= 1.; + state[SX ] -= x; + state[SXX] = fma(x, -x, state[2]); + state[SY ] -= y; + state[SYY] = fma(y, -y, state[4]); + state[SXY] = fma(x, -y, state[5]); + return state; + } + + /** + * Finisher that returns the count of non-null rows accumulated. + *

    + * As an alternative to collecting all {@code @Aggregate} annotations up at + * the top of the class and specifying everything explicitly, an + * {@code @Aggregate} annotation can be placed on a method, either + * the accumulator or the finisher, in which case less needs to be + * specified. The state type can always be determined from the annotated + * method (whether it is the accumulator or the finisher), and its SQL name + * will be the default name for the aggregate also. When the method is the + * accumulator, the aggregate's arguments are also determined. + *

    + * This being a finisher method, the {@code @Aggregate} annotation placed + * here does need to specify the arguments, initial state, and accumulator. + */ + @Aggregate( + arguments = { "y double precision", "x double precision" }, + plan = @Aggregate.Plan( + stateSize = 72, + initialState = "{0,0,0,0,0,0}", + accumulate = { "javatest", "accumulateXY" } + ) + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static long count(double[] state) + { + return (long)state[N]; + } + + /** + * Finisher that returns the mean of the accumulated x values. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishAvgX(double[] state) + { + if ( 0. == state[N] ) + return null; + return state[SX] / state[N]; + } + + /** + * Finisher that returns the mean of the accumulated y values. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishAvgY(double[] state) + { + if ( 0. == state[N] ) + return null; + return state[SY] / state[N]; + } + + /** + * Finisher that returns the slope of a regression line. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishSlope(double[] state) + { + if ( 2. > state[N] ) + return null; + + double numer = fma(state[SX], -state[SY], state[N] * state[SXY]); + double denom = fma(state[SX], -state[SX], state[N] * state[SXX]); + return 0. == denom ? null : numer / denom; + } + + /** + * Finisher that returns the intercept of a regression line. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static Double finishIntercept(double[] state) + { + if ( 2 > state[N] ) + return null; + + double numer = fma(state[SY], state[SXX], -state[SX] * state[SXY]); + double denom = fma(state[SX], -state[SX], state[N] * state[SXX]); + return 0. == denom ? null : numer / denom; + } + + /** + * A finisher that returns the slope and intercept together. + *

    + * An aggregate can be built over this finisher and will return a record + * result, but at present (PG 13) access to that record by field doesn't + * work, as its tuple descriptor gets lost along the way. Unclear so far + * whether it might be feasible to fix that. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL, + out = { "slope double precision", "intercept double precision" } + ) + public static boolean finishRegr(double[] state, ResultSet out) + throws SQLException + { + out.updateObject(1, finishSlope(state)); + out.updateObject(2, finishIntercept(state)); + return true; + } + + /** + * An example aggregate that sums its input. + *

    + * The simplest kind of aggregate, having only an accumulate function, + * default initial state, and no finisher (the state value is the return) + * can be declared very concisely by annotating the accumulate method. + */ + @Aggregate + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static double sum(double state, double x) + { + return state + x; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/AnyTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/AnyTest.java new file mode 100644 index 00000000..f1f7515b --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/AnyTest.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.lang.reflect.Array; +import java.sql.SQLException; +import java.util.logging.Logger; + +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Provides example methods to illustrate the polymorphic types {@code any}, + * {@code anyarray}, and {@code anyelement}. + */ +public class AnyTest { + private static Logger s_logger = Logger.getAnonymousLogger(); + + /** + * Log (at INFO level) the Java class received for the passed argument. + */ + @Function(schema="javatest", effects=IMMUTABLE, onNullInput=RETURNS_NULL) + public static void logAny(@SQLType("pg_catalog.any") Object param) + throws SQLException + { + s_logger.info("logAny received an object of class " + param.getClass()); + } + + /** + * Log (at INFO level) the Java class received for the passed argument, and + * return the same value. + */ + @Function(schema="javatest", effects=IMMUTABLE, onNullInput=RETURNS_NULL, + type="pg_catalog.anyelement") + public static Object logAnyElement( + @SQLType("pg_catalog.anyelement") Object param) + throws SQLException + { + s_logger.info("logAnyElement received an object of class " + + param.getClass()); + return param; + } + + /** + * Return the Java object received for the passed argument, wrapped in a + * one-element array with the object's class as its element type. + */ + @Function(schema="javatest", effects=IMMUTABLE, onNullInput=RETURNS_NULL, + type="pg_catalog.anyarray") + public static Object[] makeArray( + @SQLType("pg_catalog.anyelement") Object param) + { + Object[] result = (Object[]) Array.newInstance(param.getClass(), 1); + result[0] = param; + return result; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java index d2a8a45e..32e65830 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexScalar.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,15 +15,22 @@ import java.io.IOException; import java.io.StreamTokenizer; import java.io.StringReader; + +import static java.lang.Math.hypot; + import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.SQLOutput; + import java.util.logging.Logger; +import org.postgresql.pljava.annotation.Aggregate; import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.Operator; +import static org.postgresql.pljava.annotation.Operator.SELF; +import static org.postgresql.pljava.annotation.Operator.TWIN; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLType; import org.postgresql.pljava.annotation.BaseUDT; import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; @@ -32,30 +39,79 @@ /** * Complex (re and im parts are doubles) implemented in Java as a scalar UDT. + *

    + * The {@code SQLAction} here demonstrates a {@code requires} tag + * ("complex relationals"} that has multiple providers, something not allowed + * prior to PL/Java 1.6.1. It is more succinct to require one tag and have each + * of the relational operators 'provide' it than to have to define and require + * several different tags to accomplish the same thing. + *

    + * The operator class created here is not actively used for anything (the + * examples will not break if it is removed), but the {@code minMagnitude} + * example aggregate does specify a {@code sortOperator}, which PostgreSQL will + * not exploit in query optimization without finding it as a member of + * a {@code btree} operator class. + *

    + * Note that {@code CREATE OPERATOR CLASS} implicitly creates an operator family + * as well (unless one is explicitly specified), so the correct {@code remove} + * action to clean everything up is {@code DROP OPERATOR FAMILY} (which takes + * care of dropping the class). */ -@SQLAction(requires={ - "scalar complex type", "complex assertHasValues", "complexscalar boot fn" - }, install={ - "SELECT javatest.complexscalar()", +@SQLAction(requires = { "complex assertHasValues", "complex relationals" }, + install = { + "CREATE OPERATOR CLASS javatest.complex_ops" + + " DEFAULT FOR TYPE javatest.complex USING btree" + + " AS" + + " OPERATOR 1 javatest.< ," + + " OPERATOR 2 javatest.<= ," + + " OPERATOR 3 javatest.= ," + + " OPERATOR 4 javatest.>= ," + + " OPERATOR 5 javatest.> ," + + " FUNCTION 1 javatest.cmpMagnitude(javatest.complex,javatest.complex)", + + "SELECT javatest.assertHasValues(" + + " CAST('(1,2)' AS javatest.complex), 1, 2)", + "SELECT javatest.assertHasValues(" + - " CAST('(1,2)' AS javatest.complex), 1, 2)" + " 2.0 + CAST('(1,2)' AS javatest.complex) + 3.0, 6, 2)", + + "SELECT" + + " CASE WHEN" + + " '(1,2)'::javatest.complex < '(2,2)'::javatest.complex" + + " AND" + + " '(2,2)'::javatest.complex > '(1,2)'::javatest.complex" + + " AND" + + " '(1,2)'::javatest.complex <= '(2,2)'::javatest.complex" + + " THEN javatest.logmessage('INFO', 'ComplexScalar operators ok')" + + " ELSE javatest.logmessage('WARNING', 'ComplexScalar operators ng')" + + " END" + }, + + remove = { + "DROP OPERATOR FAMILY javatest.complex_ops USING btree" } ) -@BaseUDT(schema="javatest", name="complex", provides="scalar complex type", +@BaseUDT(schema="javatest", name="complex", internalLength=16, alignment=BaseUDT.Alignment.DOUBLE) public class ComplexScalar implements SQLData { private static Logger s_logger = Logger.getAnonymousLogger(); /** * Return the same 'complex' passed in, logging its contents at level INFO. + *

    + * Also create an unnecessary {@code <<} operator for this, with an equally + * unnecessary explicit operand type, simply as a regression test + * of issue #330. * @param cpl any instance of this UDT * @return the same instance passed in */ - @Function(requires="scalar complex type", type="javatest.complex", + @Operator( + name = "javatest.<<", right = "javatest.complex" + ) + @Function( schema="javatest", name="logcomplex", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static ComplexScalar logAndReturn( - @SQLType("javatest.complex") ComplexScalar cpl) { + public static ComplexScalar logAndReturn(ComplexScalar cpl) { s_logger.info(cpl.getSQLTypeName() + cpl); return cpl; } @@ -69,10 +125,9 @@ public static ComplexScalar logAndReturn( * @throws SQLException if the values do not match */ @Function(schema="javatest", - requires="scalar complex type", provides="complex assertHasValues", + provides="complex assertHasValues", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static void assertHasValues( - @SQLType("javatest.complex") ComplexScalar cpl, double re, double im) + public static void assertHasValues(ComplexScalar cpl, double re, double im) throws SQLException { if ( cpl.m_x != re || cpl.m_y != im ) @@ -91,7 +146,7 @@ public static ComplexScalar parse(String input, String typeName) && tz.nextToken() == StreamTokenizer.TT_NUMBER) { double y = tz.nval; if (tz.nextToken() == ')') { - s_logger.info(typeName + " from string"); + s_logger.fine(typeName + " from string"); return new ComplexScalar(x, y, typeName); } } @@ -112,6 +167,157 @@ public static ComplexScalar parse(String input, String typeName) public ComplexScalar() { } + /** + * Add two instances of {@code ComplexScalar}. + */ + @Operator(name = {"javatest","+"}, commutator = SELF) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static ComplexScalar add(ComplexScalar a, ComplexScalar b) + { + return new ComplexScalar( + a.m_x + b.m_x, a.m_y + b.m_y, a.m_typeName); + } + + /** + * Add a {@code ComplexScalar} and a real (supplied as a {@code double}). + */ + @Operator(name = {"javatest","+"}, commutator = TWIN) + @Operator(name = {"javatest","+"}, synthetic = TWIN) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static ComplexScalar add(ComplexScalar a, double b) + { + return new ComplexScalar(a.m_x + b, a.m_y, a.m_typeName); + } + + /** + * True if the left argument is smaller than the right in magnitude + * (Euclidean distance from the origin). + */ + @Operator( + name = "javatest.<", + commutator = "javatest.>", negator = "javatest.>=", + provides = "complex relationals" + ) + @Operator( + name = "javatest.<=", synthetic = "javatest.magnitudeLE", + provides = "complex relationals" + ) + @Operator( + name = "javatest.>=", synthetic = "javatest.magnitudeGE", + commutator = "javatest.<=", provides = "complex relationals" + ) + @Operator( + name = "javatest.>", synthetic = "javatest.magnitudeGT", + negator = "javatest.<=", provides = "complex relationals" + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static boolean magnitudeLT(ComplexScalar a, ComplexScalar b) + { + return hypot(a.m_x, a.m_y) < hypot(b.m_x, b.m_y); + } + + /** + * True if the left argument and the right are componentwise equal. + */ + @Operator( + name = "javatest.=", + commutator = SELF, negator = "javatest.<>", + provides = "complex relationals" + ) + @Operator( + name = "javatest.<>", synthetic = "javatest.componentsNE", + commutator = SELF, provides = "complex relationals" + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static boolean componentsEQ(ComplexScalar a, ComplexScalar b) + { + return a.m_x == b.m_x && a.m_y == b.m_y; + } + + /** + * True if the complex argument is real-valued and equal to the real + * argument. + *

    + * From one equality method on (complex,double) can be synthesized all four + * cross-type operators, {@code =} and {@code <>} for that pair of types and + * their {@code TWIN} commutators. One of the {@code <>} twins does need to + * specify what its synthetic function should be named. + */ + @Operator( + name = "javatest.=", + commutator = TWIN, negator = "javatest.<>", + provides = "complex:double relationals" + ) + @Operator( + name = "javatest.=", + synthetic = TWIN, negator = "javatest.<>", + provides = "complex:double relationals" + ) + @Operator( + name = "javatest.<>", synthetic = "javatest.neToReal", + commutator = TWIN, provides = "complex:double relationals" + ) + @Operator( + name = "javatest.<>", synthetic = TWIN, + provides = "complex:double relationals" + ) + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static boolean eqToReal(ComplexScalar a, double b) + { + return a.m_x == b && 0. == a.m_y; + } + + /** + * As an ordinary function, returns the lesser in magnitude of two + * arguments; as a simple aggregate, returns the least in magnitude over its + * aggregated arguments. + *

    + * As an aggregate, this is a simple example where this method serves as the + * {@code accumulate} function, the state (a here) has the same + * type as the argument (here b), there is no {@code finish} + * function, and the final value of the state is the result. + *

    + * An optimization is available in case there is an index on the aggregated + * values based on the {@code <} operator above; in that case, the first + * value found in a scan of that index is the aggregate result. That is + * indicated here by naming the {@code <} operator as {@code sortOperator}. + */ + @Aggregate(sortOperator = "javatest.<") + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL + ) + public static ComplexScalar minMagnitude(ComplexScalar a, ComplexScalar b) + { + return magnitudeLT(a, b) ? a : b; + } + + /** + * An integer-returning comparison function by complex magnitude, usable to + * complete an example {@code btree} operator class. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL, + provides = "complex relationals" + ) + public static int cmpMagnitude(ComplexScalar a, ComplexScalar b) + { + if ( magnitudeLT(a, b) ) + return -1; + if ( magnitudeLT(b, a) ) + return 1; + return 0; + } + public ComplexScalar(double x, double y, String typeName) { m_x = x; m_y = y; @@ -126,7 +332,7 @@ public String getSQLTypeName() { @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @Override public void readSQL(SQLInput stream, String typeName) throws SQLException { - s_logger.info(typeName + " from SQLInput"); + s_logger.fine(typeName + " from SQLInput"); m_x = stream.readDouble(); m_y = stream.readDouble(); m_typeName = typeName; @@ -135,7 +341,7 @@ public void readSQL(SQLInput stream, String typeName) throws SQLException { @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @Override public String toString() { - s_logger.info(m_typeName + " toString"); + s_logger.fine(m_typeName + " toString"); StringBuffer sb = new StringBuffer(); sb.append('('); sb.append(m_x); @@ -148,27 +354,8 @@ public String toString() { @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @Override public void writeSQL(SQLOutput stream) throws SQLException { - s_logger.info(m_typeName + " to SQLOutput"); + s_logger.fine(m_typeName + " to SQLOutput"); stream.writeDouble(m_x); stream.writeDouble(m_y); } - - /** - * A no-op function that forces the ComplexScalar class to be loaded. - * This is only necessary because the deployment-descriptor install - * actions contain a query making use of this type, and PostgreSQL does - * not expect type in/out/send/recv functions to need an updated - * snapshot, so it will try to find this class in the snapshot from - * before the jar was installed, and fail. By providing this function, - * which defaults to volatile so it gets an updated snapshot, and - * calling it first, the class will be found and loaded; once it is - * loaded, the user-defined type operations are able to find it. - *

    - * Again, this is only an issue when trying to make use of the newly - * loaded UDT from right within the deployment descriptor for the jar. - */ - @Function(schema="javatest", provides="complexscalar boot fn") - public static void ComplexScalar() - { - } } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexTuple.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexTuple.java index 54a04197..459956e8 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexTuple.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ComplexTuple.java @@ -12,6 +12,10 @@ */ package org.postgresql.pljava.example.annotation; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.SQLData; import java.sql.SQLException; import java.sql.SQLInput; @@ -21,7 +25,6 @@ import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.MappedUDT; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLType; import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; import static @@ -31,11 +34,13 @@ * Complex (re and im parts are doubles) implemented in Java as a mapped UDT. */ @SQLAction(requires={ - "complextuple type", "complextuple assertHasValues"}, install= + "complextuple assertHasValues","complextuple setParameter"}, install={ "SELECT javatest.assertHasValues(" + - " CAST('(1,2)' AS javatest.complextuple), 1, 2)" + " CAST('(1,2)' AS javatest.complextuple), 1, 2)", + "SELECT javatest.setParameter()" + } ) -@MappedUDT(schema="javatest", name="complextuple", provides="complextuple type", +@MappedUDT(schema="javatest", name="complextuple", structure={ "x float8", "y float8" @@ -50,10 +55,8 @@ public class ComplexTuple implements SQLData { * @return the same instance passed in */ @Function(schema="javatest", name="logcomplex", - effects=IMMUTABLE, onNullInput=RETURNS_NULL, - type="javatest.complextuple", requires="complextuple type") - public static ComplexTuple logAndReturn( - @SQLType("javatest.complextuple") ComplexTuple cpl) { + effects=IMMUTABLE, onNullInput=RETURNS_NULL) + public static ComplexTuple logAndReturn(ComplexTuple cpl) { s_logger.info(cpl.getSQLTypeName() + "(" + cpl.m_x + ", " + cpl.m_y + ")"); return cpl; @@ -67,18 +70,38 @@ public static ComplexTuple logAndReturn( * @param im the 'imaginary' value it should have * @throws SQLException if the values do not match */ - @Function(schema="javatest", - requires="complextuple type", provides="complextuple assertHasValues", + @Function(schema="javatest", provides="complextuple assertHasValues", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static void assertHasValues( - @SQLType("javatest.complextuple") ComplexTuple cpl, - double re, double im) + public static void assertHasValues(ComplexTuple cpl, double re, double im) throws SQLException { if ( cpl.m_x != re || cpl.m_y != im ) throw new SQLException("assertHasValues fails"); } + /** + * Pass a 'complextuple' UDT as a parameter to a PreparedStatement + * that returns it, and verify that it makes the trip intact. + */ + @Function(schema="javatest", provides="complextuple setParameter", + effects=IMMUTABLE, onNullInput=RETURNS_NULL) + public static void setParameter() throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + PreparedStatement ps = + c.prepareStatement("SELECT CAST(? AS javatest.complextuple)"); + ComplexTuple ct = new ComplexTuple(); + ct.m_x = 1.5; + ct.m_y = 2.5; + ct.m_typeName = "javatest.complextuple"; + ps.setObject(1, ct); + ResultSet rs = ps.executeQuery(); + rs.next(); + ct = (ComplexTuple)rs.getObject(1); + ps.close(); + assertHasValues(ct, 1.5, 2.5); + } + private double m_x; private double m_y; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java index 0d877259..d982be0b 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ConditionalDDR.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -11,8 +11,8 @@ */ package org.postgresql.pljava.example.annotation; +import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; /** * Test of a very simple form of conditional execution in the deployment @@ -25,50 +25,110 @@ * that are not tagged with an implementor name). The default setting of * {@code pljava.implementors} is simply {@code postgresql}. *

    - * In this example, an SQLAction (with the default implementor name PostgreSQL - * so it should always execute) tests some condition and, based on the result, - * adds {@code LifeIsGood} to the list of recognized implementor names. + * In this example, an {@code SQLAction} (with the default implementor name + * {@code PostgreSQL} so it should always execute) tests some condition and, + * based on the result, adds {@code LifeIsGood} to the list of recognized + * implementor names. *

    - * Later SQLActions with that implementor name should also be executed, while - * those with a different, unrecognized implementor should not. + * Later {@code SQLAction}s with that implementor name should also be executed, + * while those with a different, unrecognized implementor should not. *

    * That is what happens at deployment (or undeployment) time, when the * jar has been loaded into the target database and the deployment descriptor is * being processed. *

    - * The {@code provides} and {@code requires} attributes matter at + * The {@code provides} attributes matter at * compile time: they are hints to the DDR generator so it will be sure - * to write the SQLAction that tests the condition ahead of the ones that - * depend on the condition having been tested. The example illustrates that an - * SQLAction's {@code implementor} is treated as an implicit {@code requires}. - * Unlike an explicit one, it is weak: if there is nothing declared that - * {@code provides} it, that's not an error; affected SQLActions will just be + * to write the {@code SQLAction} that tests the condition ahead of whatever + * depends on the condition having been tested. The example illustrates that + * {@code implementor} is treated also as an implicit {@code requires}. + *

    + * Note: while ISO SQL/JRT specifies that an {@code } is an + * SQL identifier, which would match case-insensitively unless quoted, PL/Java + * treats {@code provides} elements as arbitrary strings that can only be + * matched with identical spelling and case. Therefore, the matching of the + * implicit {@code requires} of an {@code } and the explicit + * {@code provides} on an {@code SQLAction} depends on the {@code implementor} + * and {@code provides} values being supplied with identical spelling and case, + *

    + * The dependency created when matching {@code implementor} to {@code provides} + * differs in three ways from an explicit dependency between {@code requires} + * and {@code provides}: + *

      + *
    • It is weak: if there is nothing declared that {@code provides} it, + * that's not an error; affected {@code }s will just be * placed as late in the generated DDR as other dependencies allow, in case - * something in the preceding actions will be setting those implementor tags. + * something in the preceding actions will be setting those implementor names. + *
    • It does not have its sense reversed when generating + * the {@code REMOVE} actions of the deployment descriptor. Ordinary + * requirements do, so the dependent objects get dropped before the things they + * depend on. + * But the code for setting a conditional implementor name has to be placed + * ahead of the uses of the name, whether deploying or undeploying. + *
    • An {@code SQLAction} setting an implementor name does not need to have + * any {@code remove=} actions. If it does not (the usual case), its + * {@code install=} actions will be used in both sections of the deployment + * descriptor. + *
    *

    * This example adds {@code LifeIsGood} ahead of the prior content of * {@code pljava.implementors}. Simply replacing the value would stop the * default implementor PostgreSQL being recognized, probably not what's wanted. * The final {@code true} argument to {@code set_config} makes the setting * local, so it is reverted when the transaction completes. + *

    + * In addition to the goodness-of-life examples, this file also generates + * one or more statements setting PostgreSQL-version-based implementor names + * that are relied on by various other examples in this directory. */ -@SQLActions({ - @SQLAction(provides={"LifeIsGood","LifeIsNotGood"}, install= - "SELECT CASE 42 WHEN 42 THEN " + - " set_config('pljava.implementors', 'LifeIsGood,' || " + - " current_setting('pljava.implementors'), true) " + - "ELSE " + - " set_config('pljava.implementors', 'LifeIsNotGood,' || " + - " current_setting('pljava.implementors'), true) " + - "END" - ), +@SQLAction(provides={"LifeIsGood","LifeIsNotGood"}, install= + "SELECT CASE 42 WHEN 42 THEN " + + " set_config('pljava.implementors', 'LifeIsGood,' || " + + " current_setting('pljava.implementors'), true) " + + "ELSE " + + " set_config('pljava.implementors', 'LifeIsNotGood,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) + +@SQLAction(implementor="LifeIsGood", install= + "SELECT javatest.logmessage('INFO', 'ConditionalDDR looking good!')" +) + +@SQLAction(implementor="LifeIsNotGood", install= + "SELECT javatest.logmessage('WARNING', " + + " 'ConditionalDDR: This should not be executed')" +) - @SQLAction(implementor="LifeIsGood", install= - "SELECT javatest.logmessage('INFO', 'Looking good!')" - ), +@SQLAction(provides="postgresql_ge_100000", install= + "SELECT CASE WHEN" + + " 100000 <= CAST(current_setting('server_version_num') AS integer)" + + " THEN set_config('pljava.implementors', 'postgresql_ge_100000,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) +public class ConditionalDDR +{ + private ConditionalDDR() { } // do not instantiate - @SQLAction(implementor="LifeIsNotGood", install= - "SELECT javatest.logmessage('WARNING', 'This should not be executed')" - ) -}) -public class ConditionalDDR { } + /** + * Tests class names in the supplied order, returning false as soon as any + * cannot be found by the class loader(s) available to the examples jar, or + * true if all can be found. + */ + @Function(variadic = true, provides = "presentOnClassPath") + public static boolean presentOnClassPath(String[] className) + { + try + { + ClassLoader myLoader = ConditionalDDR.class.getClassLoader(); + for ( String cn : className ) + Class.forName(cn, false, myLoader); + return true; + } + catch ( ClassNotFoundException e ) + { + return false; + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java index 17d1fa4c..359bb287 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Enumeration.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,7 +15,6 @@ import java.util.Arrays; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.SQLType; import org.postgresql.pljava.annotation.Function; @@ -23,21 +22,19 @@ * Confirms the mapping of PG enum and Java String, and arrays of each, as * parameter and return types. */ -@SQLActions({ - @SQLAction(provides="mood type", - install="CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')", - remove="DROP TYPE mood" - ), - @SQLAction( - requires={"textToMood", "moodToText", "textsToMoods", "moodsToTexts"}, - install={ - "SELECT textToMood('happy')", - "SELECT moodToText('happy'::mood)", - "SELECT textsToMoods(array['happy','happy','sad','ok'])", - "SELECT moodsToTexts(array['happy','happy','sad','ok']::mood[])" - } - ) -}) +@SQLAction(provides="mood type", + install="CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')", + remove="DROP TYPE mood" +) +@SQLAction( + requires={"textToMood", "moodToText", "textsToMoods", "moodsToTexts"}, + install={ + "SELECT textToMood('happy')", + "SELECT moodToText('happy'::mood)", + "SELECT textsToMoods(array['happy','happy','sad','ok'])", + "SELECT moodsToTexts(array['happy','happy','sad','ok']::mood[])" + } +) public class Enumeration { @Function(requires="mood type", provides="textToMood", type="mood") diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Holdability.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Holdability.java new file mode 100644 index 00000000..709b27df --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Holdability.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2018- Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.postgresql.pljava.ResultSetHandle; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * Demonstrate holdability of ResultSets (test for issue 168). + *

    + * The {@code stashResultSet} method will execute a query and save its + * {@code ResultSet} (wrapped in a {@code ResultSetHandle} in a static + * for later retrieval. The {@code unstashResultSet} method, called later + * in the same transaction, retrieves and returns the result set. A call after + * the transaction has ended will fail. + *

    + * The query selects all rows from {@code pg_description}, a table that should + * always exist, with more rows than the default connection {@code fetchSize}, + * to ensure the stashed {@code ResultSet} has work to do. + */ +@SQLAction(requires={"Holdability.stash", "Holdability.unstash"}, install={ + + "SELECT javatest.stashResultSet()", + + "SELECT " + + " CASE" + + " WHEN 1000 < count(*) THEN javatest.logmessage('INFO', 'Holdability OK')"+ + " ELSE javatest.logmessage('WARNING', 'Holdability suspicious')" + + " END" + + " FROM javatest.unstashResultSet()" +}) +public class Holdability implements ResultSetHandle +{ + private static Holdability s_stash; + + private ResultSet m_resultSet; + private Statement m_stmt; + + private Holdability(Statement s, ResultSet rs) + { + m_stmt = s; + m_resultSet = rs; + } + + /** + * Query all rows from {@code pg_description}, but stash the + * {@code ResultSet} for retrieval later in the same transaction by + * {@code unstashResultSet}. + *

    + * This must be called in an open, multiple-statement (non-auto) transaction + * to have any useful effect. + */ + @Function(schema="javatest", provides="Holdability.stash") + public static void stashResultSet() throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + PreparedStatement s = c.prepareStatement( + "SELECT * FROM pg_catalog.pg_description"); + ResultSet rs = s.executeQuery(); + s_stash = new Holdability(s, rs); + } + + /** + * Return the results stashed earlier in the same transaction by + * {@code stashResultSet}. + */ + @Function( + schema="javatest", + type="pg_catalog.pg_description", + provides="Holdability.unstash" + ) + public static ResultSetHandle unstashResultSet() throws SQLException + { + return s_stash; + } + + /* + * Necessary methods to implement ResultSetHandle follow. + */ + + @Override + public ResultSet getResultSet() throws SQLException + { + return m_resultSet; + } + + @Override + public void close() throws SQLException + { + Connection c = m_stmt.getConnection(); + m_stmt.close(); + c.close(); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java index ae02eb8d..a31928a6 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2016 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -12,12 +12,16 @@ */ package org.postgresql.pljava.example.annotation; +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; import java.sql.SQLData; import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.SQLInput; import java.sql.SQLOutput; +import java.sql.Statement; +import org.postgresql.pljava.annotation.Cast; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; @@ -48,23 +52,13 @@ *

    * Of course this example more or less duplicates what you could do in two lines * with CREATE DOMAIN. But it is enough to illustrate the process. - *

    - * Certainly, it would be less tedious with some more annotation support and - * autogeneration of the ordering dependencies that are now added by hand here. */ -@SQLAction(requires={"IntWithMod type", "IntWithMod modApply"}, - remove="DROP CAST (javatest.IntWithMod AS javatest.IntWithMod)", +@SQLAction(requires="IntWithMod modCast", install={ - "CREATE CAST (javatest.IntWithMod AS javatest.IntWithMod)" + - " WITH FUNCTION javatest.intwithmod_typmodapply(" + - " javatest.IntWithMod, integer, boolean)", - - "COMMENT ON CAST (javatest.IntWithMod AS javatest.IntWithMod) IS '" + - "Cast that applies/verifies the type modifier on an IntWithMod.'" + "SELECT CAST('42' AS javatest.IntWithMod(even))" } ) @BaseUDT(schema="javatest", provides="IntWithMod type", - requires={"IntWithMod modIn", "IntWithMod modOut"}, typeModifierInput="javatest.intwithmod_typmodin", typeModifierOutput="javatest.intwithmod_typmodout", like="pg_catalog.int4") @@ -102,6 +96,19 @@ public String getSQLTypeName() { public void readSQL(SQLInput stream, String typeName) throws SQLException { m_value = stream.readInt(); m_typeName = typeName; + + /* + * This bit here is completely extraneous to the IntWithMod example, but + * simply included to verify that PL/Java works right if a UDT's readSQL + * method ends up invoking some other PL/Java function. + */ + try ( + Connection c = getConnection("jdbc:default:connection"); + Statement s = c.createStatement(); + ) + { + s.execute("SELECT javatest.java_addone(42)"); + } } @Function(effects=IMMUTABLE, onNullInput=RETURNS_NULL) @@ -121,9 +128,8 @@ public void writeSQL(SQLOutput stream) throws SQLException { * "even" or "odd". The modifier value is 0 for even or 1 for odd. */ @Function(schema="javatest", name="intwithmod_typmodin", - provides="IntWithMod modIn", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static int modIn(@SQLType("cstring[]") String[] toks) + public static int modIn(@SQLType("pg_catalog.cstring[]") String[] toks) throws SQLException { if ( 1 != toks.length ) throw new SQLDataException( @@ -140,7 +146,7 @@ public static int modIn(@SQLType("cstring[]") String[] toks) * Type modifier output function for IntWithMod type. */ @Function(schema="javatest", name="intwithmod_typmodout", - provides="IntWithMod modOut", type="cstring", + type="pg_catalog.cstring", effects=IMMUTABLE, onNullInput=RETURNS_NULL) public static String modOut(int mod) throws SQLException { switch ( mod ) { @@ -155,12 +161,13 @@ public static String modOut(int mod) throws SQLException { * Function backing the type-modifier application cast for IntWithMod type. */ @Function(schema="javatest", name="intwithmod_typmodapply", - requires="IntWithMod type", provides="IntWithMod modApply", - type="javatest.IntWithMod", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static IntWithMod modApply( - @SQLType("javatest.IntWithMod") IntWithMod iwm, - int mod, boolean explicit) throws SQLException { - + effects=IMMUTABLE, onNullInput=RETURNS_NULL) + @Cast(comment= + "Cast that applies/verifies the type modifier on an IntWithMod.", + provides="IntWithMod modCast") + public static IntWithMod modApply(IntWithMod iwm, int mod, boolean explicit) + throws SQLException + { if ( -1 == mod ) return iwm; if ( (iwm.m_value & 1) != mod ) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java new file mode 100644 index 00000000..dde2eaab --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/JDBC42_21.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.SQLException; + +import org.postgresql.pljava.SessionManager; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * Exercise new mappings between date/time types and java.time classes + * (JDBC 4.2 change 21). + *

    + * Defines a method {@link #javaSpecificationGE javaSpecificationGE} that may be + * of use for other examples. + */ +@SQLAction( + requires="TypeRoundTripper.roundTrip", + install={ + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.LocalDate passes')" + + " ELSE javatest.logmessage('WARNING', 'java.time.LocalDate fails')" + + " END" + + " FROM" + + " (VALUES" + + " (date 'infinity')," + + " (date '2017-08-21')," + + " (date '1970-03-07')," + + " (date '1919-05-29')," + + " (date '-infinity')" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.LocalDate')" + + " AS r(roundtripped date)", + + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.LocalTime passes')" + + " ELSE javatest.logmessage('WARNING', 'java.time.LocalTime fails')" + + " END" + + " FROM" + + " (VALUES" + + " (current_time::time)," + + " ('00:00:00')," + + " ('24:00:00')" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.LocalTime')" + + " AS r(roundtripped time)", + + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.OffsetTime passes')" + + " ELSE javatest.logmessage('WARNING', 'java.time.OffsetTime fails')" + + " END" + + " FROM" + + " (VALUES" + + " (current_time::timetz)," + + " ('00:00:00')," + + " ('24:00:00')" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.OffsetTime')" + + " AS r(roundtripped timetz)", + + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.LocalDateTime passes')" + + " ELSE javatest.logmessage('WARNING','java.time.LocalDateTime fails')"+ + " END" + + " FROM" + + " (SELECT 'on' = current_setting('integer_datetimes')) AS ck(idt)," + + " LATERAL (" + + " SELECT" + + " value" + + " FROM" + + " (VALUES" + + " (true, timestamp '2017-08-21 18:25:29.900005')," + + " (true, timestamp '1970-03-07 17:37:49.300009')," + + " (true, timestamp '1919-05-29 13:08:33.600001')," + + " (idt, timestamp 'infinity')," + + " (idt, timestamp '-infinity')" + + " ) AS vs(cond, value)" + + " WHERE cond" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.LocalDateTime')" + + " AS r(roundtripped timestamp)", + + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'java.time.OffsetDateTime passes')"+ + " ELSE javatest.logmessage(" + + " 'WARNING','java.time.OffsetDateTime fails')"+ + " END" + + " FROM" + + " (SELECT 'on' = current_setting('integer_datetimes')) AS ck(idt)," + + " LATERAL (" + + " SELECT" + + " value" + + " FROM" + + " (VALUES" + + " (true, timestamptz '2017-08-21 18:25:29.900005Z')," + + " (true, timestamptz '1970-03-07 17:37:49.300009Z')," + + " (true, timestamptz '1919-05-29 13:08:33.600001Z')," + + " (idt, timestamptz 'infinity')," + + " (idt, timestamptz '-infinity')" + + " ) AS vs(cond, value)" + + " WHERE cond" + + " ) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.OffsetDateTime')" + + " AS r(roundtripped timestamptz)", + + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'OffsetTime as stmt param passes')"+ + " ELSE javatest.logmessage(" + + " 'WARNING','java.time.OffsetTime as stmt param fails')"+ + " END" + + " FROM" + + " (SELECT current_time::timetz) AS p(orig)," + + " javatest.roundtrip(p, 'java.time.OffsetTime', true)" + + " AS r(roundtripped timetz)" +}) +public class JDBC42_21 +{ + /** + * Return true if running under a Java specification version at least as + * recent as the argument ('1.6', '1.7', '1.8', '9', '10', '11', ...). + */ + @Function(schema="javatest", provides="javaSpecificationGE") + public static boolean javaSpecificationGE(String want) throws SQLException + { + String got = SessionManager.current().frozenSystemProperties() + .getProperty("java.specification.version"); + if ( want.startsWith("1.") ) + want = want.substring(2); + if ( got.startsWith("1.") ) + got = got.substring(2); + return 0 <= Integer.valueOf(got).compareTo(Integer.valueOf(want)); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MishandledExceptions.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MishandledExceptions.java new file mode 100644 index 00000000..95073d3f --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/MishandledExceptions.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2025 + Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.SQLException; +import java.sql.Statement; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Illustrates how not to handle an exception thrown by a call into PostgreSQL. + *

    + * Such an exception must either be rethrown (or result in some higher-level + * exception being rethrown) or cleared by rolling back the transaction or + * a previously-established savepoint. If it is simply caught and not propagated + * and the error condition is not cleared, no further calls into PostgreSQL + * functionality can be made within the containing transaction. + * + * @see Catching PostgreSQL exceptions + * in Java + */ +public interface MishandledExceptions +{ + /** + * Executes an SQL statement that produces an error (twice, if requested), + * catching the resulting exception but not propagating it or rolling back + * a savepoint; then throws an unrelated exception if succeed is false. + */ + @Function(schema = "javatest") + static String mishandle( + boolean twice, @SQLType(defaultValue="true")boolean succeed) + throws SQLException + { + String rslt = null; + do + { + try + ( + Connection c = getConnection("jdbc:default:connection"); + Statement s = c.createStatement(); + ) + { + s.execute("DO LANGUAGE \"no such language\" 'no such thing'"); + } + catch ( SQLException e ) + { + rslt = e.toString(); + /* nothing rethrown, nothing rolled back <- BAD PRACTICE */ + } + } + while ( ! (twice ^= true) ); + + if ( succeed ) + return rslt; + + throw new SQLException("unrelated"); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Modules.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Modules.java new file mode 100644 index 00000000..b2e9826a --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Modules.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.lang.module.ModuleDescriptor; + +import java.sql.ResultSet; +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.Objects; + +import java.util.stream.Stream; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.STABLE; + +/** + * Example code to support querying for the modules in Java's boot layer. + */ +public class Modules implements ResultSetProvider.Large { + /** + * Returns information on the named modules in Java's boot module layer. + */ + @Function( + effects = STABLE, + out = { + "name pg_catalog.text", + "any_unqualified_exports boolean", + "any_unqualified_opens boolean" + } + ) + public static ResultSetProvider java_modules() + { + return new Modules( + ModuleLayer.boot().modules().stream().map(Module::getDescriptor) + .filter(Objects::nonNull)); + } + + private final Iterator iterator; + private final Runnable closer; + + private Modules(Stream s) + { + iterator = s.iterator(); + closer = s::close; + } + + @Override + public boolean assignRowValues(ResultSet receiver, long currentRow) + throws SQLException + { + if ( ! iterator.hasNext() ) + return false; + + ModuleDescriptor md = iterator.next(); + + receiver.updateString(1, md.name()); + + receiver.updateBoolean(2, + md.exports().stream().anyMatch(e -> ! e.isQualified())); + + receiver.updateBoolean(3, + md.isOpen() || + md.opens().stream().anyMatch(o -> ! o.isQualified())); + + return true; + } + + @Override + public void close() + { + closer.run(); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/OnInterface.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/OnInterface.java new file mode 100644 index 00000000..e98a8cfb --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/OnInterface.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import org.postgresql.pljava.annotation.Function; + +/** + * Illustrates PL/Java functions on an interface instead of a class. + *

    + * The SQL/JRT standard has always just said "class", but there is no technical + * obstacle to permitting a PL/Java function to be a static interface method, so + * that earlier restriction has been relaxed. + */ +public interface OnInterface +{ + /** + * Returns the answer. + */ + @Function(schema = "javatest") + static int answer() + { + return 42; + } + + interface A + { + /** + * Again the answer. + */ + @Function(schema = "javatest") + static int nestedAnswer() + { + return 42; + } + } + + class B + { + /** + * Still the answer. + */ + @Function(schema = "javatest") + public static int nestedClassAnswer() + { + return 42; + } + + public static class C + { + /** + * That answer again. + */ + @Function(schema = "javatest") + public static int moreNestedAnswer() + { + return 42; + } + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java index b30c8ea7..747f2ef6 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PGF1010962.java @@ -14,12 +14,15 @@ @SQLAction(requires="1010962 func", install={ "CREATE TYPE javatest.B1010962 AS ( b1_val float8, b2_val int)", + "CREATE TYPE javatest.C1010962 AS ( c1_val float8, c2_val float8)", + "CREATE TYPE javatest.A1010962 as (" + " b B1010962," + " c C1010962," + " a_val int" + ")", + "SELECT javatest.complexParam(array_agg(" + " CAST(" + " (" + diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Parameters.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Parameters.java new file mode 100644 index 00000000..5f97236d --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Parameters.java @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.TimeZone; +import java.util.logging.Logger; + +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Some methods used for testing parameter and return value coersion and + * resolution of overloaded methods. + *

    + * About the {@code @SQLAction} here: the original, hand-crafted deployment + * descriptor declared two SQL functions both implemented by the same + * {@link #getTimestamp() getTimestamp} method here. Only one declaration can be + * automatically generated from a {@code @Function} annotation on the method + * itself. This {@code @SQLAction} takes care of the other declaration. + * Of course, there is now a burden on the author to get this declaration right + * and to keep it up to date if the method evolves, but at least it is here in + * the same file, rather than in a separate hand-maintained DDR file. + * @author Thomas Hallgren + */ +@SQLAction(install = { + "CREATE OR REPLACE FUNCTION javatest.java_getTimestamptz()" + + " RETURNS timestamptz" + + " AS 'org.postgresql.pljava.example.annotation.Parameters.getTimestamp'" + + " LANGUAGE java" + }, + remove = "DROP FUNCTION javatest.java_getTimestamptz()" +) +public class Parameters { + public static double addNumbers(short a, int b, long c, BigDecimal d, + BigDecimal e, float f, double g) { + return d.doubleValue() + e.doubleValue() + a + b + c + f + g; + } + + public static int addOne(int value) { + return value + 1; + } + + @Function(schema = "javatest", name = "java_addOne", effects = IMMUTABLE) + public static int addOne(Integer value) { + return value.intValue() + 1; + } + + public static int addOneLong(long value) { + return (int) value + 1; + } + + @Function(schema = "javatest") + public static int countNulls(Integer[] intArray) throws SQLException { + int nullCount = 0; + int top = intArray.length; + for (int idx = 0; idx < top; ++idx) { + if (intArray[idx] == null) + nullCount++; + } + return nullCount; + } + + @Function(schema = "javatest") + public static int countNulls(ResultSet input) throws SQLException { + int nullCount = 0; + int top = input.getMetaData().getColumnCount(); + for (int idx = 1; idx <= top; ++idx) { + input.getObject(idx); + if (input.wasNull()) + nullCount++; + } + return nullCount; + } + + public static Date getDate() { + return new Date(System.currentTimeMillis()); + } + + public static Time getTime() { + return new Time(System.currentTimeMillis()); + } + + @Function(schema = "javatest", name = "java_getTimestamp") + public static Timestamp getTimestamp() { + return new Timestamp(System.currentTimeMillis()); + } + + static void log(String msg) { + Logger.getAnonymousLogger().info(msg); + } + + @Function(schema = "javatest", effects = IMMUTABLE) + public static Integer nullOnEven(int value) { + return (value % 2) == 0 ? null : value; + } + + /* + * Declare parameter and return type as the PostgreSQL-specific "char" + * (the quoted one, not SQL CHAR) type ... that's how it was declared + * in the original hand-generated deployment descriptor. PL/Java's SQL + * generator would otherwise have emitted smallint by default for the + * Java byte type. + * + * Note that the SQL rules for quoted vs. regular identifiers are complex, + * and PL/Java has not yet precisely specified how the identifiers given in + * annotations are to be treated. A future release may lay down more precise + * rules, which may affect code supplying quoted identifiers like this. + */ + @Function(schema = "javatest", type = "\"char\"") + public static byte print(@SQLType("\"char\"") byte value) { + log("byte " + value); + return value; + } + + @Function(schema = "javatest") + public static byte[] print(byte[] byteArray) { + StringBuffer buf = new StringBuffer(); + int top = byteArray.length; + buf.append("byte[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(byteArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(byteArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return byteArray; + } + + @Function(schema = "javatest") + public static void print(Date value) { + DateFormat p = DateFormat.getDateInstance(DateFormat.FULL); + log("Local Date is " + p.format(value)); + p.setTimeZone(TimeZone.getTimeZone("UTC")); + log("UTC Date is " + p.format(value)); + log("TZ = " + TimeZone.getDefault().getDisplayName()); + } + + @Function(schema = "javatest") + public static double print(double value) { + log("double " + value); + return value; + } + + @Function(schema = "javatest") + public static double[] print(double[] doubleArray) { + StringBuffer buf = new StringBuffer(); + int top = doubleArray.length; + buf.append("double[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(doubleArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(doubleArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return doubleArray; + } + + @Function(schema = "javatest") + public static float print(float value) { + log("float " + value); + return value; + } + + @Function(schema = "javatest") + public static float[] print(float[] floatArray) { + StringBuffer buf = new StringBuffer(); + int top = floatArray.length; + buf.append("float[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(floatArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(floatArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return floatArray; + } + + @Function(schema = "javatest") + public static int print(int value) { + log("int " + value); + return value; + } + + @Function(schema = "javatest") + public static int[] print(int[] intArray) { + StringBuffer buf = new StringBuffer(); + int top = intArray.length; + buf.append("int[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(intArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(intArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return intArray; + } + + @Function(schema = "javatest", name = "printObj") + public static Integer[] print(Integer[] intArray) { + StringBuffer buf = new StringBuffer(); + int top = intArray.length; + buf.append("Integer[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(intArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(intArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return intArray; + } + + @Function(schema = "javatest") + public static long print(long value) { + log("long " + value); + return value; + } + + @Function(schema = "javatest") + public static long[] print(long[] longArray) { + StringBuffer buf = new StringBuffer(); + int top = longArray.length; + buf.append("long[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(longArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(longArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return longArray; + } + + @Function(schema = "javatest") + public static short print(short value) { + log("short " + value); + return value; + } + + @Function(schema = "javatest") + public static short[] print(short[] shortArray) { + StringBuffer buf = new StringBuffer(); + int top = shortArray.length; + buf.append("short[] of size " + top); + if (top > 0) { + buf.append(" {"); + buf.append(shortArray[0]); + for (int idx = 1; idx < top; ++idx) { + buf.append(','); + buf.append(shortArray[idx]); + } + buf.append('}'); + } + log(buf.toString()); + return shortArray; + } + + /* + * Declare the parameter type to be timetz in SQL, to match what the + * original hand-crafted deployment descriptor did. The SQL generator + * would otherwise assume time (without time zone). + */ + @Function(schema = "javatest") + public static void print(@SQLType("timetz") Time value) { + DateFormat p = new SimpleDateFormat("HH:mm:ss z Z"); + log("Local Time is " + p.format(value)); + p.setTimeZone(TimeZone.getTimeZone("UTC")); + log("UTC Time is " + p.format(value)); + log("TZ = " + TimeZone.getDefault().getDisplayName()); + } + + /* + * Declare the parameter type to be timestamptz in SQL, to match what the + * original hand-crafted deployment descriptor did. The SQL generator + * would otherwise assume timestamp (without time zone). + */ + @Function(schema = "javatest") + public static void print(@SQLType("timestamptz") Timestamp value) { + DateFormat p = DateFormat.getDateTimeInstance(DateFormat.FULL, + DateFormat.FULL); + log("Local Timestamp is " + p.format(value)); + p.setTimeZone(TimeZone.getTimeZone("UTC")); + log("UTC Timestamp is " + p.format(value)); + log("TZ = " + TimeZone.getDefault().getDisplayName()); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java new file mode 100644 index 00000000..d6dd14bf --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java @@ -0,0 +1,1771 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLData; +import java.sql.SQLInput; +import java.sql.SQLOutput; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Types; + +import java.sql.SQLDataException; +import java.sql.SQLException; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.io.Writer; + +import java.io.IOException; + +import java.util.List; +import java.util.Map; +import java.util.HashMap; + +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; + +import static javax.xml.transform.OutputKeys.ENCODING; +import javax.xml.transform.Result; +import javax.xml.transform.Source; +import javax.xml.transform.Templates; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerFactory; + +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerConfigurationException; + +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; +import javax.xml.transform.dom.DOMResult; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.sax.SAXResult; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.stax.StAXResult; +import javax.xml.transform.stax.StAXSource; + +import javax.xml.validation.Schema; +import javax.xml.validation.SchemaFactory; + +import org.postgresql.pljava.Adjusting; +import static org.postgresql.pljava.Adjusting.XML.setFirstSupported; +import org.postgresql.pljava.SessionManager; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.MappedUDT; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +import static org.postgresql.pljava.example.LoggerTest.logMessage; + +/* Imports needed just for the SAX flavor of "low-level XML echo" below */ +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.parsers.SAXParserFactory; +import org.xml.sax.XMLReader; +import org.xml.sax.ContentHandler; +import org.xml.sax.DTDHandler; +import org.xml.sax.ext.LexicalHandler; + +/* Imports needed just for the StAX flavor of "low-level XML echo" below */ +import javax.xml.stream.XMLEventReader; +import javax.xml.stream.XMLEventWriter; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamException; +import org.xml.sax.SAXException; + +/* Imports needed just for xmlTextNode below (serializing via SAX, StAX, DOM) */ +import org.xml.sax.helpers.AttributesImpl; +import org.w3c.dom.Document; +import org.w3c.dom.DocumentFragment; +import org.w3c.dom.bootstrap.DOMImplementationRegistry; + + +/** + * Class illustrating use of {@link SQLXML} to operate on XML data. + *

    + * This class also serves as the mapping class for a composite type + * {@code javatest.onexml}, the better to verify that {@link SQLData} + * input/output works too. That's why it has to implement SQLData. + *

    + * Everything mentioning the type XML here needs a conditional implementor tag + * in case of being loaded into a PostgreSQL instance built without that type. + */ +@SQLAction(provides="postgresql_xml", install= + "SELECT CASE (SELECT 1 FROM pg_type WHERE typname = 'xml') WHEN 1" + + " THEN set_config('pljava.implementors', 'postgresql_xml,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) + +@SQLAction(implementor="postgresql_xml", requires="echoXMLParameter", + install= + "WITH" + + " s(how) AS (SELECT generate_series(1, 7))," + + " t(x) AS (" + + " SELECT table_to_xml('pg_catalog.pg_operator', true, false, '')" + + " )," + + " r(howin, howout, isdoc) AS (" + + " SELECT" + + " i.how, o.how," + + " javatest.echoxmlparameter(x, i.how, o.how) IS DOCUMENT" + + " FROM" + + " t, s AS i, s AS o" + + " WHERE" + + " NOT (i.how = 6 and o.how = 7)" + // 6->7 unreliable in some JREs + " ) " + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'SQLXML echos succeeded')" + + " ELSE javatest.logmessage('WARNING', 'SQLXML echos had problems')" + + " END " + + "FROM" + + " r" +) + +@SQLAction(implementor="postgresql_xml", requires="proxiedXMLEcho", + install= + "WITH" + + " s(how) AS (SELECT unnest('{1,2,4,5,6,7}'::int[]))," + + " t(x) AS (" + + " SELECT table_to_xml('pg_catalog.pg_operator', true, false, '')" + + " )," + + " r(how, isdoc) AS (" + + " SELECT" + + " how," + + " javatest.proxiedxmlecho(x, how) IS DOCUMENT" + + " FROM" + + " t, s" + + " )" + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'proxied SQLXML echos succeeded')" + + " ELSE javatest.logmessage('WARNING'," + + " 'proxied SQLXML echos had problems')" + + " END " + + "FROM" + + " r" +) + +@SQLAction(implementor="postgresql_xml", requires="lowLevelXMLEcho", + install={ + "SELECT" + + " preparexmlschema('schematest', $$" + + "" + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + "" + + "$$, 'http://www.w3.org/2001/XMLSchema', 5)", + + "WITH" + + " s(how) AS (SELECT unnest('{4,5,7}'::int[]))," + + " r(isdoc) AS (" + + " SELECT" + + " javatest.lowlevelxmlecho(" + + " query_to_xml(" + + " 'SELECT ''hi'' AS textcol, 1 AS intcol', true, true, 'urn:testme'"+ + " ), how, params) IS DOCUMENT" + + " FROM" + + " s," + + " (SELECT 'schematest' AS schema) AS params" + + " )" + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'XML Schema tests succeeded')" + + " ELSE javatest.logmessage('WARNING'," + + " 'XML Schema tests had problems')" + + " END " + + "FROM" + + " r" + } +) + +@SQLAction(implementor="postgresql_xml", + requires={"prepareXMLTransform", "transformXML"}, + install={ + "REVOKE EXECUTE ON FUNCTION javatest.prepareXMLTransformWithJava" + + " (pg_catalog.varchar, pg_catalog.xml, integer, boolean, boolean," + + " pg_catalog.RECORD)" + + " FROM PUBLIC", + + "SELECT" + + " javatest.prepareXMLTransform('distinctElementNames'," + + "'" + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + " " + + "', how => 5, enableExtensionFunctions => true)", + + "SELECT" + + " javatest.prepareXMLTransformWithJava('getPLJavaVersion'," + + "'" + + " " + + " " + + " " + + "', enableExtensionFunctions => true)", + + "SELECT" + + " CASE WHEN" + + " javatest.transformXML('distinctElementNames'," + + " '', 5, 5)::text" + + " =" + + " 'abcde'"+ + " THEN javatest.logmessage('INFO', 'XSLT 1.0 test succeeded')" + + " ELSE javatest.logmessage('WARNING', 'XSLT 1.0 test failed')" + + " END", + + "SELECT" + + " CASE WHEN" + + " javatest.transformXML('getPLJavaVersion', '')::text" + + " OPERATOR(pg_catalog.=) extversion" + + " THEN javatest.logmessage('INFO', 'XSLT 1.0 with Java succeeded')" + + " ELSE javatest.logmessage('WARNING', 'XSLT 1.0 with Java failed')" + + " END" + + " FROM pg_catalog.pg_extension" + + " WHERE extname = 'pljava'" + } +) + +@SQLAction(implementor="postgresql_xml", + provides="xml_java_ge_22", requires="javaSpecificationGE", install= + "SELECT CASE WHEN" + + " javatest.javaSpecificationGE('22')" + + " THEN set_config('pljava.implementors', 'xml_java_ge_22,' || " + + " current_setting('pljava.implementors'), true) " + + "END" +) + +@SQLAction(implementor="xml_java_ge_22", requires="lowLevelXMLEcho", install= + "WITH" + + " s(how) AS (SELECT unnest('{5,6,7}'::int[]))," + + " r(isdoc) AS (" + + " SELECT" + + " javatest.lowlevelxmlecho(" + + /* + * A truly minimal DTD, , cannot be ignored by Java 22's SAX/DOM + * parser (though it can be, when using the StAX API). NullPointerException + * calling getActiveGrammar().isImmutable() is the result. Bug: JDK-8329295 + * Including either an externalID or an internal subset (like the empty [] + * here) avoids the issue. + */ + " ''::xml, how, params) IS DOCUMENT" + + " FROM" + + " s," + + " (SELECT null::void AS ignoreDTD) AS params" + + " )" + + "SELECT" + + " CASE WHEN every(isdoc)" + + " THEN javatest.logmessage('INFO', 'jdk.xml.dtd.support=ignore OK')" + + " ELSE javatest.logmessage('WARNING', 'jdk.xml.dtd.support=ignore NG')" + + " END " + + "FROM" + + " r" +) + +@MappedUDT(schema="javatest", name="onexml", structure="c1 xml", + implementor="postgresql_xml", + comment="A composite type mapped by the PassXML example class") +public class PassXML implements SQLData +{ + static SQLXML s_sx; + + static TransformerFactory s_tf = TransformerFactory.newDefaultInstance(); + + static Map s_tpls = new HashMap<>(); + + static Map s_schemas = new HashMap<>(); + + @Function(schema="javatest", implementor="postgresql_xml") + public static String inXMLoutString(SQLXML in) throws SQLException + { + return in.getString(); + } + + @Function(schema="javatest", implementor="postgresql_xml") + public static SQLXML inStringoutXML(String in) throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + SQLXML result = c.createSQLXML(); + result.setString(in); + return result; + } + + /** + * Echo an XML parameter back, exercising seven different ways + * (howin => 1-7) of reading an SQLXML object, and seven + * (howout => 1-7) of returning one. + *

    + * If howin => 0, the XML parameter is simply saved in a static. It can + * be read in a subsequent call with sx => null, but only in the same + * transaction. + *

    + * The "echoing" is done (in the {@code echoXML} method below) using a + * {@code Transformer}, that is, the "TrAX" Transformation API for XML + * supplied in Java. It illustrates how an identity {@code Transformer} can + * be used to get the XML content from the source to the result for any of + * the APIs selectable by howin and howout. + *

    + * It also illustrates something else. When using StAX (6 for howin + * or howout) and XML of the {@code CONTENT} flavor (multiple top-level + * elements, characters outside the top element, etc.), it is easy to + * construct examples that fail. The fault is not really with the StAX API, + * nor with TrAX proper, but with the small handful of bridge classes that + * were added to the JRE with StAX's first appearance, to make it + * interoperate with TrAX. It is not that those classes completely overlook + * the {@code CONTENT} case: they make some efforts to handle it. Just not + * the right ones, and given the Java developers' usual reluctance to change + * such longstanding behavior, that's probably not getting fixed. + *

    + * Moral: StAX is a nice API, have no fear to use it directly in + * freshly-developed code, but: when using TrAX, make every effort to supply + * a {@code Transformer} with {@code Source} and {@code Result} objects of + * any kind other than StAX. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="echoXMLParameter") + public static SQLXML echoXMLParameter(SQLXML sx, int howin, int howout) + throws SQLException + { + if ( null == sx ) + sx = s_sx; + if ( 0 == howin ) + { + s_sx = sx; + return null; + } + return echoSQLXML(sx, howin, howout); + } + + /** + * Echo an XML parameter back, but with parameter and return types of + * PostgreSQL {@code text}. + *

    + * The other version of this method needs a conditional implementor tag + * because it cannot be declared in a PostgreSQL instance that was built + * without {@code libxml} support and the PostgreSQL {@code XML} type. + * But this version can, simply by mapping the {@code SQLXML} parameter + * and return types to the SQL {@code text} type. The Java code is no + * different. + *

    + * Note that it's possible for both declarations to coexist in PostgreSQL + * (because as far as it is concerned, their signatures are different), but + * these two Java methods cannot have the same name (because they differ + * only in annotations, not in the declared Java types). So, this one needs + * a slightly tweaked name, and a {@code name} attribute in the annotation + * so PostgreSQL sees the right name. + */ + @Function(schema="javatest", name="echoXMLParameter", type="text") + public static SQLXML echoXMLParameter_( + @SQLType("text") SQLXML sx, int howin, int howout) + throws SQLException + { + return echoXMLParameter(sx, howin, howout); + } + + /** + * "Echo" an XML parameter not by creating a new writable {@code SQLXML} + * object at all, but simply returning the passed-in readable one untouched. + */ + @Function(schema="javatest", implementor="postgresql_xml") + public static SQLXML bounceXMLParameter(SQLXML sx) throws SQLException + { + return sx; + } + + /** + * Just like {@link bounceXMLParameter} but with parameter and return typed + * as {@code text}, and so usable on a PostgreSQL instance lacking the XML + * type. + */ + @Function(schema="javatest", type="text", name="bounceXMLParameter") + public static SQLXML bounceXMLParameter_(@SQLType("text") SQLXML sx) + throws SQLException + { + return sx; + } + + /** + * Just like {@link bounceXMLParameter} but with the parameter typed as + * {@code text} and the return type left as XML, so functions as a cast. + *

    + * Slower than the other cases, because it must verify that the input really + * is XML before blindly calling it a PostgreSQL XML type. But the speed + * compares respectably to PostgreSQL's own CAST(text AS xml), at least for + * larger values; I am seeing Java pull ahead right around 32kB of XML data + * and beat PG by a factor of 2 or better at sizes of 1 or 2 MB. + * Unsurprisingly, PG has the clear advantage when values are very short. + */ + @Function(schema="javatest", implementor="postgresql_xml") + public static SQLXML castTextXML(@SQLType("text") SQLXML sx) + throws SQLException + { + return sx; + } + + /** + * Precompile an XSL transform {@code source} and save it (for the + * current session) as {@code name}. + *

    + * Each value of {@code how}, 1-7, selects a different way of presenting + * the {@code SQLXML} object to the XSL processor. + *

    + * Passing {@code true} for {@code enableExtensionFunctions} allows the + * transform to use extensions that the Java XSLT implementation supports, + * such as functions from EXSLT. Those are disabled by default. + *

    + * Passing {@code false} for {@code builtin} will allow a + * {@code TransformerFactory} other than Java's built-in one to be found + * using the usual search order and the context class loader (normally + * the PL/Java class path for the schema where this function is declared). + * The default of {@code true} ensures that the built-in Java XSLT 1.0 + * implementation is used. A transformer implementation other than Xalan + * may not recognize the feature controlled by + * {@code enableExtensionFunctions}, so failure to configure that feature + * will be logged as a warning if {@code builtin} is {@code false}, instead + * of thrown as an exception. + *

    + * Out of the box, Java's transformers only support XSLT 1.0. See the S9 + * example for more capabilities (at the cost of downloading the Saxon jar). + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="prepareXMLTransform") + public static void prepareXMLTransform(String name, SQLXML source, + @SQLType(defaultValue="0") int how, + @SQLType(defaultValue="false") boolean enableExtensionFunctions, + @SQLType(defaultValue="true") boolean builtin, + @SQLType(defaultValue={}) ResultSet adjust) + throws SQLException + { + prepareXMLTransform( + name, source, how, enableExtensionFunctions, adjust, builtin, + /* withJava */ false); + } + + /** + * Precompile an XSL transform {@code source} and save it (for the + * current session) as {@code name}, where the transform may call Java + * methods. + *

    + * Otherwise identical to {@code prepareXMLTransform}, this version sets the + * {@code TransformerFactory}'s {@code extensionClassLoader} (to the context + * class loader, normally the PL/Java class path for the schema where this + * function is declared), so the transform will be able to use + * xalan's Java call syntax to call any public Java methods that would be + * accessible to this class. (That can make a big difference in usefulness + * for the otherwise rather limited XSLT 1.0.) + *

    + * As with {@code enableExtensionFunctions}, failure by the transformer + * implementation to recognize or allow the {@code extensionClassLoader} + * property will be logged as a warning if {@code builtin} is {@code false}, + * rather than thrown as an exception. + *

    + * This example function will be installed with {@code EXECUTE} permission + * revoked from {@code PUBLIC}, as it essentially confers the ability to + * create arbitrary new Java functions, so should only be granted to roles + * you would be willing to grant {@code USAGE ON LANGUAGE java}. + *

    + * Because this function only prepares the transform, and + * {@link #transformXML transformXML} applies it, there is some division of + * labor in determining what limits apply to its behavior. The use of this + * method instead of {@code prepareXMLTransform} determines whether the + * transform is allowed to see external Java methods at all; it will be + * the policy permissions granted to {@code transformXML} that control what + * those methods can do when the transform is applied. For now, that method + * is defined in the trusted/sandboxed {@code java} language, so this + * function could reasonably be granted to any role with {@code USAGE} on + * {@code java}. If, by contrast, {@code transformXML} were declared in the + * 'untrusted' {@code javaU}, it would be prudent to allow only superusers + * access to this function, just as only they can {@code CREATE FUNCTION} in + * an untrusted language. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="prepareXMLTransform") + public static void prepareXMLTransformWithJava(String name, SQLXML source, + @SQLType(defaultValue="0") int how, + @SQLType(defaultValue="false") boolean enableExtensionFunctions, + @SQLType(defaultValue="true") boolean builtin, + @SQLType(defaultValue={}) ResultSet adjust) + throws SQLException + { + prepareXMLTransform( + name, source, how, enableExtensionFunctions, adjust, builtin, + /* withJava */ true); + } + + private static void prepareXMLTransform(String name, SQLXML source, int how, + boolean enableExtensionFunctions, ResultSet adjust, boolean builtin, + boolean withJava) + throws SQLException + { + TransformerFactory tf = + builtin + ? TransformerFactory.newDefaultInstance() + : TransformerFactory.newInstance(); + + String legacy_pfx = "http://www.oracle.com/xml/jaxp/properties/"; + String java17_pfx = "jdk.xml."; + String exf_sfx = "enableExtensionFunctions"; + + String ecl_legacy = "jdk.xml.transform.extensionClassLoader"; + String ecl_java17 = "jdk.xml.extensionClassLoader"; + + Source src = sxToSource(source, how, adjust); + + try + { + Exception e; + + e = setFirstSupported(tf::setFeature, enableExtensionFunctions, + List.of(TransformerConfigurationException.class), null, + java17_pfx + exf_sfx, legacy_pfx + exf_sfx); + + if ( null != e ) + { + if ( builtin ) + throw new SQLException( + "Configuring XML transformation: " + e.getMessage(), e); + else + logMessage("WARNING", + "non-builtin transformer: ignoring " + e.getMessage()); + } + + if ( withJava ) + { + e = setFirstSupported(tf::setAttribute, + Thread.currentThread().getContextClassLoader(), + List.of(IllegalArgumentException.class), null, + ecl_java17, ecl_legacy); + + if ( null != e ) + { + if ( builtin ) + throw new SQLException( + "Configuring XML transformation: " + + e.getMessage(), e); + else + logMessage("WARNING", + "non-builtin transformer: ignoring " + + e.getMessage()); + } + } + + s_tpls.put(name, tf.newTemplates(src)); + } + catch ( TransformerException te ) + { + throw new SQLException( + "Preparing XML transformation: " + te.getMessage(), te); + } + } + + /** + * Transform some XML according to a named transform prepared with + * {@code prepareXMLTransform}. + *

    + * Pass null for {@code transformName} to get a plain identity transform + * (not such an interesting thing to do, unless you also specify indenting). + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="transformXML") + public static SQLXML transformXML( + String transformName, SQLXML source, + @SQLType(defaultValue="0") int howin, + @SQLType(defaultValue="0") int howout, + @SQLType(defaultValue={}) ResultSet adjust, + @SQLType(optional=true) Boolean indent, + @SQLType(optional=true) Integer indentWidth) + throws SQLException + { + Templates tpl = null == transformName? null: s_tpls.get(transformName); + Source src = sxToSource(source, howin, adjust); + + if ( Boolean.TRUE.equals(indent) && 0 == howout ) + howout = 4; // transformer only indents if writing a StreamResult + + Connection c = DriverManager.getConnection("jdbc:default:connection"); + SQLXML result = c.createSQLXML(); + Result rlt = sxToResult(result, howout, adjust); + + try + { + Transformer t = + null == tpl ? s_tf.newTransformer() : tpl.newTransformer(); + /* + * For the non-SAX/StAX/DOM flavors of output, you're responsible + * for setting the Transformer to use the server encoding. + */ + if ( rlt instanceof StreamResult ) + t.setOutputProperty(ENCODING, + SessionManager.current().frozenSystemProperties() + .getProperty("org.postgresql.server.encoding")); + else if ( Boolean.TRUE.equals(indent) ) + logMessage("WARNING", + "indent requested, but howout specifies a non-stream " + + "Result type; no indenting will happen"); + + if ( null != indent ) + t.setOutputProperty("indent", indent ? "yes" : "no"); + if ( null != indentWidth ) + t.setOutputProperty( + "{http://xml.apache.org/xalan}indent-amount", + "" + indentWidth); + + t.transform(src, rlt); + } + catch ( TransformerException te ) + { + throw new SQLException("Transforming XML: " + te.getMessage(), te); + } + + return ensureClosed(rlt, result, howout); + } + + /** + * Precompile a schema {@code source} in schema language {@code lang} + * and save it (for the current session) as {@code name}. + *

    + * Each value of {@code how}, 1-7, selects a different way of presenting + * the {@code SQLXML} object to the schema parser. + *

    + * The {@code lang} parameter is a URI that identifies a known schema + * language. The only language a Java runtime is required to support is + * W3C XML Schema 1.0, with URI {@code http://www.w3.org/2001/XMLSchema}. + */ + @Function(schema="javatest", implementor="postgresql_xml") + public static void prepareXMLSchema( + String name, SQLXML source, String lang, int how) + throws SQLException + { + try + { + s_schemas.put(name, + SchemaFactory.newInstance(lang) + .newSchema(sxToSource(source, how))); + } + catch ( SAXException e ) + { + throw new SQLException( + "failed to prepare schema: " + e.getMessage(), e); + } + } + + private static SQLXML echoSQLXML(SQLXML sx, int howin, int howout) + throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + SQLXML rx = c.createSQLXML(); + Source src = sxToSource(sx, howin); + Result rlt = sxToResult(rx, howout); + + try + { + Transformer t = s_tf.newTransformer(); + /* + * For the non-SAX/StAX/DOM flavors of output, you're responsible + * for setting the Transformer to use the server encoding. + */ + if ( howout < 5 ) + t.setOutputProperty(ENCODING, + SessionManager.current().frozenSystemProperties() + .getProperty("org.postgresql.server.encoding")); + t.transform(src, rlt); + } + catch ( TransformerException te ) + { + throw new SQLException("XML transformation failed", te); + } + + return ensureClosed(rlt, rx, howout); + } + + /** + * Echo the XML parameter back, using lower-level manipulations than + * {@code echoXMLParameter}. + *

    + * This illustrates how the simple use of {@code t.transform(src,rlt)} + * in {@code echoSQLXML} substitutes for a lot of fiddly case-by-case code, + * but when coding for a specific case, all the generality of {@code + * transform} may not be needed. It can be interesting to compare memory use + * when XML values are large. + *

    + * This method has been revised to demonstrate, even for low-level + * manipulations, how much fiddliness can now be avoided through use of the + * {@link Adjusting.XML.SourceResult} class, and how to make adjustments to + * parsing restrictions by passing the optional row-typed parameter + * adjust, which defaults to an empty row. For example, passing + *

    +	 * adjust => (select a from
    +	 *            (true as allowdtd, true as expandentityreferences) as a)
    +	 *
    + * would allow a document that contains an internal DTD subset and uses + * entities defined there. + *

    + * The older, pre-{@code SourceResult} code for doing low-level XML echo + * has been moved to the {@code oldSchoolLowLevelEcho} method below. It can + * still be exercised by calling this method, explicitly passing + * {@code adjust => NULL}. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="lowLevelXMLEcho") + public static SQLXML lowLevelXMLEcho( + SQLXML sx, int how, @SQLType(defaultValue={}) ResultSet adjust) + throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + SQLXML rx = c.createSQLXML(); + + if ( null == adjust ) + return oldSchoolLowLevelEcho(rx, sx, how); + + Adjusting.XML.SourceResult axsr = + rx.setResult(Adjusting.XML.SourceResult.class); + + switch ( how ) + { + /* + * The first four cases all present the content as unparsed bytes or + * characters, so there is nothing to adjust on the source side. + */ + case 1: + axsr.set(new StreamSource(sx.getBinaryStream())); + break; + case 2: + axsr.set(new StreamSource(sx.getCharacterStream())); + break; + case 3: + axsr.set(sx.getString()); + break; + case 4: + axsr.set(sx.getSource(StreamSource.class)); + break; + /* + * The remaining cases present the content in parsed form, and therefore + * may involve parsers that can be adjusted according to the supplied + * preferences. + */ + case 5: + axsr.set(applyAdjustments(adjust, + sx.getSource(Adjusting.XML.SAXSource.class))); + break; + case 6: + axsr.set(applyAdjustments(adjust, + sx.getSource(Adjusting.XML.StAXSource.class))); + break; + case 7: + axsr.set(applyAdjustments(adjust, + sx.getSource(Adjusting.XML.DOMSource.class))); + break; + default: + throw new SQLDataException( + "how must be 1-7 for lowLevelXMLEcho", "22003"); + } + + /* + * Adjustments can also be applied to the SourceResult itself, where + * they will affect any implicitly-created parser used to verify or + * re-encode the content, if it was supplied in unparsed form. + */ + return applyAdjustments(adjust, axsr).get().getSQLXML(); + } + + /** + * Apply adjustments (supplied as a row type with a named column for each + * desired adjustment and its value) to an instance of + * {@link Adjusting.XML.Parsing}. + *

    + * Column names in the adjust row are case-insensitive versions of + * the method names in {@link Adjusting.XML.Parsing}, and the value of each + * column should be of the appropriate type (if the method has a parameter). + * @param adjust A row type as described above, possibly of no columns if no + * adjustments are wanted + * @param axp An instance of Adjusting.XML.Parsing + * @return axp, after applying any adjustments + */ + public static > + T applyAdjustments(ResultSet adjust, T axp) + throws SQLException + { + ResultSetMetaData rsmd = adjust.getMetaData(); + int n = rsmd.getColumnCount(); + + for ( int i = 1; i <= n; ++i ) + { + String k = rsmd.getColumnLabel(i); + if ( "lax".equalsIgnoreCase(k) ) + axp.lax(adjust.getBoolean(i)); + else if ( "allowDTD".equalsIgnoreCase(k) ) + axp.allowDTD(adjust.getBoolean(i)); + else if ( "ignoreDTD".equalsIgnoreCase(k) ) + axp.ignoreDTD(); + else if ( "externalGeneralEntities".equalsIgnoreCase(k) ) + axp.externalGeneralEntities(adjust.getBoolean(i)); + else if ( "externalParameterEntities".equalsIgnoreCase(k) ) + axp.externalParameterEntities(adjust.getBoolean(i)); + else if ( "loadExternalDTD".equalsIgnoreCase(k) ) + axp.loadExternalDTD(adjust.getBoolean(i)); + else if ( "xIncludeAware".equalsIgnoreCase(k) ) + axp.xIncludeAware(adjust.getBoolean(i)); + else if ( "expandEntityReferences".equalsIgnoreCase(k) ) + axp.expandEntityReferences(adjust.getBoolean(i)); + else if ( "elementAttributeLimit".equalsIgnoreCase(k) ) + axp.elementAttributeLimit(adjust.getInt(i)); + else if ( "entityExpansionLimit".equalsIgnoreCase(k) ) + axp.entityExpansionLimit(adjust.getInt(i)); + else if ( "entityReplacementLimit".equalsIgnoreCase(k) ) + axp.entityReplacementLimit(adjust.getInt(i)); + else if ( "maxElementDepth".equalsIgnoreCase(k) ) + axp.maxElementDepth(adjust.getInt(i)); + else if ( "maxGeneralEntitySizeLimit".equalsIgnoreCase(k) ) + axp.maxGeneralEntitySizeLimit(adjust.getInt(i)); + else if ( "maxParameterEntitySizeLimit".equalsIgnoreCase(k) ) + axp.maxParameterEntitySizeLimit(adjust.getInt(i)); + else if ( "maxXMLNameLimit".equalsIgnoreCase(k) ) + axp.maxXMLNameLimit(adjust.getInt(i)); + else if ( "totalEntitySizeLimit".equalsIgnoreCase(k) ) + axp.totalEntitySizeLimit(adjust.getInt(i)); + else if ( "accessExternalDTD".equalsIgnoreCase(k) ) + axp.accessExternalDTD(adjust.getString(i)); + else if ( "accessExternalSchema".equalsIgnoreCase(k) ) + axp.accessExternalSchema(adjust.getString(i)); + else if ( "schema".equalsIgnoreCase(k) ) + { + try + { + axp.schema(s_schemas.get(adjust.getString(i))); + } + catch (UnsupportedOperationException e) + { + } + } + else + throw new SQLDataException( + "unrecognized name \"" + k + "\" for parser adjustment", + "22000"); + } + return axp; + } + + /** + * An obsolescent example, showing what was required to copy from one + * {@code SQLXML} object to another, using the various supported APIs, + * without using {@link Adjusting.XML.SourceResult}, or at least without + * using it much. It is still used in case 4 to be sure of getting a + * {@code StreamResult} that matches the byte-or-character-ness of the + * {@code StreamSource}. How to handle that case without + * {@code SourceResult} is left as an exercise. + */ + private static SQLXML oldSchoolLowLevelEcho(SQLXML rx, SQLXML sx, int how) + throws SQLException + { + try + { + switch ( how ) + { + case 1: + InputStream is = sx.getBinaryStream(); + OutputStream os = rx.setBinaryStream(); + shovelBytes(is, os); + break; + case 2: + Reader r = sx.getCharacterStream(); + Writer w = rx.setCharacterStream(); + shovelChars(r, w); + break; + case 3: + rx.setString(sx.getString()); + break; + case 4: + StreamSource ss = sx.getSource(StreamSource.class); + Adjusting.XML.StreamResult sr = + rx.setResult(Adjusting.XML.StreamResult.class); + is = ss.getInputStream(); + r = ss.getReader(); + if ( null != is ) + { + os = sr.preferBinaryStream().get().getOutputStream(); + shovelBytes(is, os); + break; + } + if ( null != r ) + { + w = sr.preferCharacterStream().get().getWriter(); + shovelChars(r, w); + break; + } + throw new SQLDataException( + "StreamSource contained neither InputStream nor Reader"); + case 5: + SAXSource sxs = sx.getSource(SAXSource.class); + SAXResult sxr = rx.setResult(SAXResult.class); + XMLReader xr = sxs.getXMLReader(); + if ( null == xr ) + { + SAXParserFactory spf = SAXParserFactory.newInstance(); + spf.setNamespaceAware(true); + xr = spf.newSAXParser().getXMLReader(); + /* + * Important: before copying this example code for another + * use, consider whether the input XML might be untrusted. + * If so, the new XMLReader created here should have several + * features given safe default settings as outlined in the + * OWASP guidelines. (This branch is not reached when sx is + * a PL/Java native SQLXML instance, as xr will be non-null + * and already configured.) + */ + } + ContentHandler ch = sxr.getHandler(); + xr.setContentHandler(ch); + if ( ch instanceof DTDHandler ) + xr.setDTDHandler((DTDHandler)ch); + LexicalHandler lh = sxr.getLexicalHandler(); + if ( null == lh && ch instanceof LexicalHandler ) + lh = (LexicalHandler)ch; + if ( null != lh ) + xr.setProperty( + "http://xml.org/sax/properties/lexical-handler", lh); + xr.parse(sxs.getInputSource()); + break; + case 6: + StAXSource sts = sx.getSource(StAXSource.class); + StAXResult str = rx.setResult(StAXResult.class); + XMLOutputFactory xof = XMLOutputFactory.newInstance(); + /* + * The Source has either an event reader or a stream reader. Use + * the event reader directly, or create one around the stream + * reader. + */ + XMLEventReader xer = sts.getXMLEventReader(); + if ( null == xer ) + { + XMLInputFactory xif = XMLInputFactory .newInstance(); + xif.setProperty(xif.IS_NAMESPACE_AWARE, true); + /* + * Important: before copying this example code for another + * use, consider whether the input XML might be untrusted. + * If so, the new XMLInputFactory created here might want + * several properties given safe default settings as + * outlined in the OWASP guidelines. (When sx is a PL/Java + * native SQLXML instance, the XMLStreamReader obtained + * below will already have been so configured.) + */ + xer = xif.createXMLEventReader(sts.getXMLStreamReader()); + } + /* + * Were you thinking the above could be simply + * createXMLEventReader(sts) by analogy with the writer below? + * Good thought, but the XMLInputFactory implementation that's + * included in OpenJDK doesn't implement the case where the + * Source argument is a StAXSource! Two lines would do it. + */ + /* + * Because of a regression in Java 9 and later, the line below, + * while working in Java 8 and earlier, will produce a + * ClassCastException in Java 9 through (for sure) 12, (almost + * certainly) 13, and on until some future version fixes the + * regression, if ever, if 'str' wraps any XMLStreamWriter + * implementation other than the inaccessible one from the guts + * of the JDK itself. The bug has been reported but (as of this + * writing) is still in the maddening limbo phase of the Java + * bug reporting cycle, where no bug number can refer to it. See + * lowLevelXMLEcho() above for code to do this copy successfully + * using an Adjusting.XML.SourceResult. + */ + XMLEventWriter xew = xof.createXMLEventWriter(str); + xew.add(xer); + xew.close(); + xer.close(); + break; + case 7: + DOMSource ds = sx.getSource(DOMSource.class); + DOMResult dr = rx.setResult(DOMResult.class); + dr.setNode(ds.getNode()); + break; + default: + throw new SQLDataException( + "how must be 1-7 for lowLevelXMLEcho", "22003"); + } + } + catch ( IOException e ) + { + throw new SQLException( + "IOException in lowLevelXMLEcho", "58030", e); + } + catch ( + ParserConfigurationException | SAXException | XMLStreamException e ) + { + throw new SQLException( + "XML exception in lowLevelXMLEcho", "22000", e); + } + return rx; + } + + /** + * Proxy a PL/Java SQLXML source object as if it were of a non-PL/Java + * implementing class, to confirm that it can still be returned successfully + * to PostgreSQL. + * @param sx readable {@code SQLXML} object to proxy + * @param how 1,2,4,5,6,7 determines what subclass of {@code Source} will be + * returned by {@code getSource}. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="proxiedXMLEcho") + public static SQLXML proxiedXMLEcho(SQLXML sx, int how) + throws SQLException + { + return new SQLXMLProxy(sx, how); + } + + /** + * Supply a sequence of bytes to be the exact (encoded) content of an XML + * value, which will be returned; if the encoding is not UTF-8, the value + * should begin with an XML Decl that names the encoding. + *

    + * Constructs an {@code SQLXML} instance that will return the supplied + * content as a {@code StreamSource} wrapping an {@code InputStream}, or via + * {@code getBinaryStream}, but fail if asked for any other form. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="mockedXMLEchoB") + public static SQLXML mockedXMLEcho(byte[] bytes) + throws SQLException + { + return new SQLXMLMock(bytes); + } + + /** + * Supply a sequence of characters to be the exact (Unicode) content of an + * XML value, which will be returned; if the value begins with an XML Decl + * that names an encoding, the content will be assumed to contain only + * characters representable in that encoding. + *

    + * Constructs an {@code SQLXML} instance that will return the supplied + * content as a {@code StreamSource} wrapping a {@code Reader}, or via + * {@code getCharacterStream}, but fail if asked for any other form. + */ + @Function(schema="javatest", implementor="postgresql_xml", + provides="mockedXMLEchoC") + public static SQLXML mockedXMLEcho(String chars) + throws SQLException + { + return new SQLXMLMock(chars); + } + + /** + * Text-typed variant of lowLevelXMLEcho (does not require XML type). + */ + @Function(schema="javatest", name="lowLevelXMLEcho", + type="text") + public static SQLXML lowLevelXMLEcho_(@SQLType("text") SQLXML sx, int how, + @SQLType(defaultValue={}) ResultSet adjust) + throws SQLException + { + return lowLevelXMLEcho(sx, how, adjust); + } + + /** + * Low-level XML echo where the Java parameter and return type are String. + */ + @Function(schema="javatest", implementor="postgresql_xml", type="xml") + public static String lowLevelXMLEcho(@SQLType("xml") String x) + throws SQLException + { + return x; + } + + /** + * Create some XML, pass it to a {@code SELECT ?} prepared statement, + * retrieve it from the result set, and return it via the out-parameter + * result set of this {@code RECORD}-returning function. + */ + @Function(schema="javatest", type="RECORD") + public static boolean xmlInStmtAndRS(ResultSet out) throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + SQLXML x = c.createSQLXML(); + x.setString(""); + PreparedStatement ps = c.prepareStatement("SELECT ?"); + ps.setObject(1, x, Types.SQLXML); + ResultSet rs = ps.executeQuery(); + rs.next(); + if ( Types.SQLXML != rs.getMetaData().getColumnType(1) ) + logMessage("WARNING", + "ResultSetMetaData.getColumnType() misreports SQLXML"); + x = rs.getSQLXML(1); + ps.close(); + out.updateObject(1, x); + return true; + } + + /** + * Test serialization into the PostgreSQL server encoding by returning + * a text node, optionally wrapped in an element, containing the supplied + * stuff. + *

    + * The stuff is supplied as a {@code bytea} and a named encoding, + * so it is easy to supply stuff that isn't in the server encoding and see + * what the serializer does with it. + *

    + * As of this writing, if the stuff, decoded according to + * encoding, contains characters that are not representable in the + * server encoding, the serializers supplied in the JRE will: + *

    + * @param stuff Content to be used in the text node + * @param encoding Name of an encoding; stuff will be decoded to Unicode + * according to this encoding, and then serialized into the server encoding, + * where possible. + * @param how Integer specifying which XML API to test, like every other how + * in this class; here the only valid choices are 5 (SAX), 6 (StAX), or + * 7 (DOM). + * @param inElement True if the text node should be wrapped in an element. + * @return The resulting XML content. + */ + @Function(schema="javatest", implementor="postgresql_xml") + public static SQLXML xmlTextNode( + byte[] stuff, String encoding, int how, boolean inElement) + throws Exception + { + if ( 5 > how || how > 7 ) + throw new SQLDataException( + "how must be 5-7 for xmlTextNode", "22003"); + + String stuffString = new String(stuff, encoding); + Connection c = DriverManager.getConnection("jdbc:default:connection"); + SQLXML rx = c.createSQLXML(); + + switch ( how ) + { + case 5: + SAXResult sxr = rx.setResult(SAXResult.class); + sxr.getHandler().startDocument(); + if ( inElement ) + sxr.getHandler().startElement("", "sax", "sax", + new AttributesImpl()); + sxr.getHandler().characters( + stuffString.toCharArray(), 0, stuffString.length()); + if ( inElement ) + sxr.getHandler().endElement("", "sax", "sax"); + sxr.getHandler().endDocument(); + break; + case 6: + StAXResult stxr = rx.setResult(StAXResult.class); + stxr.getXMLStreamWriter().writeStartDocument(); + if ( inElement ) + stxr.getXMLStreamWriter().writeStartElement("", "stax", ""); + stxr.getXMLStreamWriter().writeCharacters(stuffString); + if ( inElement ) + stxr.getXMLStreamWriter().writeEndElement(); + stxr.getXMLStreamWriter().writeEndDocument(); + break; + case 7: + DOMResult dr = rx.setResult(DOMResult.class); + /* + * Why request features XML and Traversal? + * If the only features requested are from the set + * {Core, XML, LS} and maybe XPath, you get a brain-damaged + * DOMImplementation that violates the org.w3c.dom.DOMImplementation + * contract, as createDocument still tries to make a document + * element even when passed null,null,null when, according to the + * contract, it should not. To get the real implementation that + * works, ask for some feature it supports outside of that core set. + * I don't really need Traversal, but by asking for it, I get what + * I do need. + */ + Document d = DOMImplementationRegistry.newInstance() + .getDOMImplementation("XML Traversal") + .createDocument(null, null, null); + DocumentFragment df = d.createDocumentFragment(); + ( inElement ? df.appendChild(d.createElement("dom")) : df ) + .appendChild(d.createTextNode(stuffString)); + dr.setNode(df); + break; + } + return rx; + } + + /** + * Create and leave some number of SQLXML objects unclosed, unused, and + * unreferenced, as a test of reclamation. + * @param howmany Number of SQLXML instances to create. + * @param how If nonzero, the flavor of writing to request on the object + * before abandoning it; if zero, it is left in its initial, writable state. + */ + @Function(schema="javatest") + public static void unclosedSQLXML(int howmany, int how) throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + while ( howmany --> 0 ) + { + SQLXML sx = c.createSQLXML(); + if ( 0 < how ) + sxToResult(sx, how); + } + } + + + /** + * Return some instance of {@code Source} for reading an {@code SQLXML} + * object, depending on the parameter {@code how}. + *

    + * Note that this method always returns a {@code Source}, even for cases + * 1 and 2 (obtaining readable streams directly from the {@code SQLXML} + * object; this method wraps them in {@code Source}), and case 3 + * ({@code getString}; this method creates a {@code StringReader} and + * returns it wrapped in a {@code Source}. + */ + private static Source sxToSource(SQLXML sx, int how) throws SQLException + { + switch ( how ) + { + case 1: return new StreamSource(sx.getBinaryStream()); + case 2: return new StreamSource(sx.getCharacterStream()); + case 3: return new StreamSource(new StringReader(sx.getString())); + case 4: return sx.getSource(StreamSource.class); + case 5: return sx.getSource(SAXSource.class); + case 6: return sx.getSource(StAXSource.class); + case 7: return sx.getSource(DOMSource.class); + default: throw new SQLDataException("how should be 1-7", "22003"); + } + } + + /** + * Return some instance of {@code Result} for writing an {@code SQLXML} + * object, depending on the parameter {@code how}. + *

    + * Note that this method always returns a {@code Result}, even for cases + * 1 and 2 (obtaining writable streams directly from the {@code SQLXML} + * object; this method wraps them in {@code Result}), and case 3 + * ({@code setString}; this method creates a {@code StringWriter} and + * returns it wrapped in a {@code Result}. + *

    + * In case 3, it will be necessary, after writing, to get the {@code String} + * from the {@code StringWriter}, and call {@code setString} with it. + */ + private static Result sxToResult(SQLXML sx, int how) throws SQLException + { + switch ( how ) + { + case 1: return new StreamResult(sx.setBinaryStream()); + case 2: return new StreamResult(sx.setCharacterStream()); + case 3: return new StreamResult(new StringWriter()); + case 4: return sx.setResult(StreamResult.class); + case 5: return sx.setResult(SAXResult.class); + case 6: return sx.setResult(StAXResult.class); + case 7: + DOMResult r = sx.setResult(DOMResult.class); + allowFragment(r); // else it'll accept only DOCUMENT form + return r; + default: throw new SQLDataException("how should be 1-7", "22003"); + } + } + + /** + * Return some instance of {@code Source} for reading an {@code SQLXML} + * object, depending on the parameter {@code how}, applying any adjustments + * in {@code adjust}. + *

    + * Allows {@code how} to be zero, meaning to let the implementation choose + * what kind of {@code Source} to present. Otherwise identical to the other + * {@code sxToSource}. + */ + private static Source sxToSource(SQLXML sx, int how, ResultSet adjust) + throws SQLException + { + Source s; + switch ( how ) + { + case 0: s = sx.getSource(Adjusting.XML.Source.class); break; + case 1: + case 2: + case 3: + case 4: + return sxToSource(sx, how); // no adjustments on a StreamSource + case 5: s = sx.getSource(Adjusting.XML.SAXSource.class); break; + case 6: s = sx.getSource(Adjusting.XML.StAXSource.class); break; + case 7: s = sx.getSource(Adjusting.XML.DOMSource.class); break; + default: throw new SQLDataException("how should be 0-7", "22003"); + } + + if ( s instanceof Adjusting.XML.Source ) + return applyAdjustments(adjust, (Adjusting.XML.Source)s).get(); + return s; + } + + /** + * Return some instance of {@code Result} for writing an {@code SQLXML} + * object, depending on the parameter {@code how} applying any adjustments + * in {@code adjust}. + *

    + * Allows {@code how} to be zero, meaning to let the implementation choose + * what kind of {@code Result} to present. Otherwise identical to the other + * {@code sxToResult}. + */ + private static Result sxToResult(SQLXML sx, int how, ResultSet adjust) + throws SQLException + { + Result r; + switch ( how ) + { + case 1: // you might wish you could adjust a raw BinaryStream + case 2: // or CharacterStream + case 3: // or String, but you can't. Ask for a StreamResult. + case 5: // SAXResult needs no adjustment + case 6: // StAXResult needs no adjustment + case 7: // DOMResult needs no adjustment + return sxToResult(sx, how); + case 4: r = sx.setResult(Adjusting.XML.StreamResult.class); break; + case 0: r = sx.setResult(Adjusting.XML.Result.class); break; + default: throw new SQLDataException("how should be 0-7", "22003"); + } + + if ( r instanceof Adjusting.XML.Result ) + return applyAdjustments(adjust, (Adjusting.XML.Result)r).get(); + return r; + } + + /** + * Ensure the closing of whatever method was used to add content to + * an {@code SQLXML} object. + *

    + * Before a {@code SQLXML} object that has been written to can be used by + * PostgreSQL (returned as a function result, plugged in as a prepared + * statement parameter or into a {@code ResultSet}, etc.), the method used + * for writing it must be "closed" to ensure the writing is complete. + *

    + * If it is set with {@link SQLXML#setString setString}, nothing more is + * needed; {@code setString} obviously sets the whole value at once. Any + * {@code OutputStream} or {@code Writer} obtained from + * {@link SQLXML#setBinaryStream setBinaryStream} or + * {@link SQLXML#setCharacterStream setCharacterStream}, or from + * {@link SQLXML#setResult setResult}{@code (StreamResult.class)}, has to be + * explicitly closed (a {@link Transformer} does not close its + * {@link Result} when the transformation is complete!). + * Those are cases 1, 2, and 4 here. + *

    + * Cases 5 ({@code SAXResult}) and 6 ({@code StAXResult}) need no special + * attention; though the {@code Transformer} does not close them, the ones + * returned by this {@code SQLXML} implementation are set up to close + * themselves when the {@code endDocument} event is written. + *

    + * Case 3 (test of {@code setString} is handled specially here. As this + * class allows testing of all techniques for writing the {@code SQLXML} + * object, and most of those involve a {@code Result}, case 3 is handled + * by also constructing a {@code Result} over a {@link StringWriter} and + * having the content written into that; this method then extracts the + * content from the {@code StringWriter} and passes it to {@code setString}. + * For cases 1 and 2, likewise, the stream obtained with + * {@code getBinaryStream} or {@code getCharacterStream} has been wrapped in + * a {@code Result} for generality in this example. + *

    + * A typical application will not need the generality seen here; it + * will usually know which technique it is using to write the {@code SQLXML} + * object, and only needs to know how to close that if it needs closing. + * @param r The {@code Result} onto which writing was done. + * @param sx The {@code SQLXML} object being written. + * @param how The integer used in this example class to select which method + * of writing the {@code SQLXML} object was to be tested. + * @return The {@code SQLXML} object {@code sx}, because why not? + */ + public static SQLXML ensureClosed(Result r, SQLXML sx, int how) + throws SQLException + { + switch ( how ) + { + case 1: + case 2: + case 4: + StreamResult sr = (StreamResult)r; + OutputStream os = sr.getOutputStream(); + Writer w = sr.getWriter(); + try + { + if ( null != os ) + os.close(); + if ( null != w ) + w.close(); + } + catch ( IOException ioe ) + { + throw new SQLException( + "Failure closing SQLXML result", "XX000"); + } + break; + case 3: + StringWriter sw = (StringWriter)((StreamResult)r).getWriter(); + String s = sw.toString(); + sx.setString(s); + break; + } + return sx; + } + + /** + * Configure a {@code DOMResult} to accept {@code CONTENT} (a/k/a + * document fragment), not only the more restrictive {@code DOCUMENT}. + *

    + * The other forms of {@code Result} that can be requested will happily + * accept {@code XML(CONTENT)} and not just {@code XML(DOCUMENT)}. + * The {@code DOMResult} is pickier, however: if you first call + * {@link DOMResult#setNode setNode} with a {@code DocumentFragment}, it + * will accept either form, but if you leave the node unset when passing the + * {@code DOMResult} to a transformer, the transformer will default to + * putting a {@code Document} node there, and then it will not accept a + * fragment. + *

    + * If you need to handle fragments, this method illustrates how to pre-load + * the {@code DOMResult} with an empty {@code DocumentFragment}. Note that + * if you use some XML processing package that supplies its own classes + * implementing DOM nodes, you may need to use a {@code DocumentFragment} + * instance obtained from that package. + */ + public static void allowFragment(DOMResult r) throws SQLException + { + try + { + r.setNode(DocumentBuilderFactory.newInstance() + .newDocumentBuilder().newDocument() + .createDocumentFragment()); + } + catch ( ParserConfigurationException pce ) + { + throw new SQLException("Failed initializing DOMResult", pce); + } + } + + private static void shovelBytes(InputStream is, OutputStream os) + throws IOException + { + byte[] b = new byte[8192]; + int got; + while ( -1 != (got = is.read(b)) ) + os.write(b, 0, got); + is.close(); + os.close(); + } + + private static void shovelChars(Reader r, Writer w) + throws IOException + { + char[] b = new char[8192]; + int got; + while ( -1 != (got = r.read(b)) ) + w.write(b, 0, got); + r.close(); + w.close(); + } + + /** + * Test the MappedUDT (in one direction anyway). + *

    + * Creates a {@code PassXML} object, the Java class that maps the + * {@code javatest.onexml} composite type, which has one member, of XML + * type. Stores a {@code SQLXML} value in that field of the {@code PassXML} + * object, and passes that to an SQL query that expects and returns + * {@code javatest.onexml}. Retrieves the XML from the value field of the + * {@code PassXML} object created to map the result of the query. + * @return The original XML value, if all goes well. + */ + @Function(schema="javatest", implementor="postgresql_xml") + public static SQLXML xmlFromComposite() throws SQLException + { + Connection c = DriverManager.getConnection("jdbc:default:connection"); + PreparedStatement ps = + c.prepareStatement("SELECT CAST(? AS javatest.onexml)"); + SQLXML x = c.createSQLXML(); + x.setString(""); + PassXML obj = new PassXML(); + obj.m_value = x; + obj.m_typeName = "javatest.onexml"; + ps.setObject(1, obj); + ResultSet r = ps.executeQuery(); + r.next(); + obj = (PassXML)r.getObject(1); + ps.close(); + return obj.m_value; + } + + /* + * Required to serve as a MappedUDT: + */ + /** + * No-arg constructor required of objects that will implement + * {@link SQLData}. + */ + public PassXML() { } + + private String m_typeName; + private SQLXML m_value; + + @Override + public String getSQLTypeName() { return m_typeName; } + + @Override + public void readSQL(SQLInput stream, String typeName) throws SQLException + { + m_typeName = typeName; + m_value = (SQLXML) stream.readObject(); + } + + @Override + public void writeSQL(SQLOutput stream) throws SQLException + { + stream.writeSQLXML(m_value); + } + + /** + * Class that will proxy methods to another {@code SQLXML} class. + *

    + * Used for testing the PL/Java can accept input for PostgreSQL from an + * {@code SQLXML} object not of its own implementation (for example, one + * obtained from a different JDBC driver from some other database). + *

    + * Only the {@code getSource} method is specially treated, to allow + * exercising the various flavors of source. + */ + public static class SQLXMLProxy implements SQLXML + { + private SQLXML m_sx; + private int m_how; + + public SQLXMLProxy(SQLXML sx, int how) + { + if ( null == sx ) + throw new NullPointerException("Null SQLXMLProxy target"); + if ( 1 > how || how > 7 || how == 3 ) + throw new IllegalArgumentException( + "\"how\" must be 1, 2, 4, 5, 6, or 7"); + m_sx = sx; + m_how = how; + } + + @Override + public void free() throws SQLException { m_sx.free(); } + + @Override + public InputStream getBinaryStream() throws SQLException + { + return m_sx.getBinaryStream(); + } + + @Override + public OutputStream setBinaryStream() throws SQLException + { + return m_sx.setBinaryStream(); + } + + @Override + public Reader getCharacterStream() throws SQLException + { + return m_sx.getCharacterStream(); + } + + @Override + public Writer setCharacterStream() throws SQLException + { + return m_sx.setCharacterStream(); + } + + @Override + public String getString() throws SQLException + { + return m_sx.getString(); + } + + @Override + public void setString(String value) throws SQLException + { + m_sx.setString(value); + } + + @Override + @SuppressWarnings("unchecked") // all the fun's when sourceClass is null + public T getSource(Class sourceClass) + throws SQLException + { + if ( null == sourceClass ) + { + switch ( m_how ) + { + case 1: + return (T)new StreamSource(m_sx.getBinaryStream()); + case 2: + return (T)new StreamSource(m_sx.getCharacterStream()); + case 4: + sourceClass = (Class)StreamSource.class; + break; + case 5: + sourceClass = (Class)SAXSource.class; + break; + case 6: + sourceClass = (Class)StAXSource.class; + break; + case 7: + sourceClass = (Class)DOMSource.class; + break; + } + } + return m_sx.getSource(sourceClass); + } + + @Override + public T setResult(Class resultClass) + throws SQLException + { + return m_sx.setResult(resultClass); + } + } + + /** + * Class that will mock an {@code SQLXML} instance, returning only binary or + * character stream data from a byte array or string supplied at + * construction. + */ + public static class SQLXMLMock implements SQLXML + { + private String m_chars; + private byte[] m_bytes; + + public SQLXMLMock(String content) + { + if ( null == content ) + throw new NullPointerException("Null SQLXMLMock content"); + m_chars = content; + } + + public SQLXMLMock(byte[] content) + { + if ( null == content ) + throw new NullPointerException("Null SQLXMLMock content"); + m_bytes = content; + } + + @Override + public void free() throws SQLException { } + + @Override + public InputStream getBinaryStream() throws SQLException + { + if ( null != m_bytes ) + return new ByteArrayInputStream(m_bytes); + throw new UnsupportedOperationException( + "SQLXMLMock.getBinaryStream"); + } + + @Override + public OutputStream setBinaryStream() throws SQLException + { + throw new UnsupportedOperationException( + "SQLXMLMock.setBinaryStream"); + } + + @Override + public Reader getCharacterStream() throws SQLException + { + if ( null != m_chars ) + return new StringReader(m_chars); + throw new UnsupportedOperationException( + "SQLXMLMock.getCharacterStream"); + } + + @Override + public Writer setCharacterStream() throws SQLException + { + throw new UnsupportedOperationException( + "SQLXMLMock.setCharacterStream"); + } + + @Override + public String getString() throws SQLException + { + if ( null != m_chars ) + return m_chars; + throw new UnsupportedOperationException( + "SQLXMLMock.getString"); + } + + @Override + public void setString(String value) throws SQLException + { + throw new UnsupportedOperationException( + "SQLXMLMock.setString"); + } + + @Override + @SuppressWarnings("unchecked") // sourceClass==StreamSource is verified + public T getSource(Class sourceClass) + throws SQLException + { + if ( null != sourceClass && StreamSource.class != sourceClass ) + throw new UnsupportedOperationException( + "SQLXMLMock.getSource(" + sourceClass.getName() + ")"); + if ( null != m_chars ) + return (T) new StreamSource(new StringReader(m_chars)); + return (T) new StreamSource(new ByteArrayInputStream(m_bytes)); + } + + @Override + public T setResult(Class resultClass) + throws SQLException + { + throw new UnsupportedOperationException( + "SQLXMLMock.setResult"); + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Point.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Point.java index bc6d9372..6ef06f27 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Point.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Point.java @@ -21,7 +21,6 @@ import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.MappedUDT; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLType; import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; import static @@ -46,9 +45,9 @@ public class Point implements SQLData { * @param pt any instance of the type this UDT mirrors * @return the same instance passed in */ - @Function(schema="javatest", type="point", requires="point mirror type", + @Function(schema="javatest", requires="point mirror type", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static Point logAndReturn(@SQLType("point") Point pt) { + public static Point logAndReturn(Point pt) { s_logger.info(pt.getSQLTypeName() + pt); return pt; } @@ -64,9 +63,7 @@ public static Point logAndReturn(@SQLType("point") Point pt) { @Function(schema="javatest", requires="point mirror type", provides="point assertHasValues", effects=IMMUTABLE, onNullInput=RETURNS_NULL) - public static void assertHasValues( - @SQLType("point") Point pt, - double x, double y) + public static void assertHasValues(Point pt, double x, double y) throws SQLException { if ( pt.m_x != x || pt.m_y != y ) diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java new file mode 100644 index 00000000..726d46a5 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PreJSR310.java @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import java.sql.Date; +import java.sql.DriverManager; +import java.sql.Statement; +import java.sql.ResultSet; +import java.sql.Savepoint; +import java.sql.SQLException; + +import static java.util.logging.Logger.getAnonymousLogger; +import java.util.TimeZone; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * Some tests of pre-JSR 310 date/time/timestamp conversions. + *

    + * For now, just {@code java.sql.Date}, thanks to issue #199. + */ +@SQLAction(provides="language java_tzset", install={ + "SELECT sqlj.alias_java_language('java_tzset', true)" +}, remove={ + "DROP LANGUAGE java_tzset" +}) + +@SQLAction( + requires="issue199", install={ + "SELECT javatest.issue199()" +}) +public class PreJSR310 +{ + private static final String TZPRAGUE = "Europe/Prague"; + + static + { + TimeZone oldZone = TimeZone.getDefault(); + TimeZone tzPrague = TimeZone.getTimeZone(TZPRAGUE); + + try + { + TimeZone.setDefault(tzPrague); + } + finally + { + TimeZone.setDefault(oldZone); + } + } + + /** + * Test for a regression in PG date to/from java.sql.Date conversion + * identified in issue #199. + *

    + * Checks that two months of consecutive dates in October/November 2018 + * are converted correctly in the Europe/Prague timezone. The actual issue + * was by no means limited to that timezone, but this test reproducibly + * detects it. + *

    + * This function is defined in the 'alias' language {@code java_tzset}, for + * which there is an entry in the default {@code pljava.policy} granting + * permission to adjust the time zone, which is temporarily done here. + */ + @Function( + schema="javatest", language="java_tzset", + requires="language java_tzset", provides="issue199" + ) + public static void issue199() throws SQLException + { + TimeZone oldZone = TimeZone.getDefault(); + TimeZone tzPrague = TimeZone.getTimeZone(TZPRAGUE); + Connection c = DriverManager.getConnection("jdbc:default:connection"); + Statement s = c.createStatement(); + Savepoint svpt = c.setSavepoint(); + boolean ok = true; + try + { + TimeZone.setDefault(tzPrague); + s.execute("SET LOCAL TIME ZONE '" + TZPRAGUE + "'"); + + ResultSet rs = s.executeQuery( + "SELECT" + + " d, to_char(d, 'YYYY-MM-DD')" + + " FROM" + + " generate_series(0, 60) AS s(i)," + + " LATERAL (SELECT date '2018-10-01' + i) AS t(d)"); + while ( rs.next() ) + { + Date dd = rs.getDate(1); + String ds = rs.getString(2); + if ( ! ds.equals(dd.toString()) ) + ok = false; + } + } + finally + { + TimeZone.setDefault(oldZone); + c.rollback(svpt); // restore prior PG timezone + s.close(); + c.close(); + } + + if ( ok ) + getAnonymousLogger().info("issue 199 test ok"); + else + getAnonymousLogger().warning("issue 199 test not ok"); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java new file mode 100644 index 00000000..291eb990 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/RecordParameterDefaults.java @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +import static java.util.Arrays.fill; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Example demonstrating the use of a {@code RECORD} parameter as a way to + * supply an arbitrary sequence of named, typed parameters to a PL/Java + * function. + *

    + * Also tests the proper DDR generation of defaults for such parameters. + */ +public class RecordParameterDefaults implements ResultSetProvider +{ + /** + * Return the names, types, and values of parameters supplied as a single + * anonymous RECORD type; the parameter is given an empty-record default, + * allowing it to be omitted in calls, or used with the named-parameter + * call syntax. + *

    + * For example, this function could be called as: + *

    +	 * SELECT (paramDefaultsRecord()).*;
    +	 *
    + * or as: + *
    +	 * SELECT (paramDefaultsRecord(params => s)).*
    +	 * FROM (SELECT 42 AS a, '42' AS b, 42.0 AS c) AS s;
    +	 *
    + */ + @Function( + schema = "javatest", + out = { + "name text", "pgtypename text", "javaclass text", "tostring text" + } + ) + public static ResultSetProvider paramDefaultsRecord( + @SQLType(defaultValue={})ResultSet params) + throws SQLException + { + return new RecordParameterDefaults(params); + } + + /** + * Like paramDefaultsRecord but illustrating the use of a named row type + * with known structure, and supplying a default for the function + * parameter. + *

    + *

    +	 * SELECT paramDefaultsNamedRow();
    +	 *
    +	 * SELECT paramDefaultsNamedRow(userWithNum => ('fred', 3.14));
    +	 *
    + */ + @Function( + requires = "foobar tables", // created in Triggers.java + schema = "javatest" + ) + public static String paramDefaultsNamedRow( + @SQLType(value="javatest.foobar_2", defaultValue={"bob", "42"}) + ResultSet userWithNum) + throws SQLException + { + return String.format("username is %s and value is %s", + userWithNum.getObject("username"), userWithNum.getObject("value")); + } + + + + private final ResultSetMetaData m_paramrsmd; + private final Object[] m_values; + + RecordParameterDefaults(ResultSet paramrs) throws SQLException + { + m_paramrsmd = paramrs.getMetaData(); + /* + * Grab the values from the parameter SingleRowResultSet now; it isn't + * guaranteed to stay valid for the life of the set-returning function. + */ + m_values = new Object [ m_paramrsmd.getColumnCount() ]; + for ( int i = 0; i < m_values.length; ++ i ) + m_values[i] = paramrs.getObject( 1 + i); + } + + @Override + public boolean assignRowValues(ResultSet receiver, int currentRow) + throws SQLException + { + int col = 1 + currentRow; + if ( col > m_paramrsmd.getColumnCount() ) + return false; + receiver.updateString("name", m_paramrsmd.getColumnLabel(col)); + receiver.updateString("pgtypename", m_paramrsmd.getColumnTypeName(col)); + Object o = m_values[col - 1]; + receiver.updateString("javaclass", o.getClass().getName()); + receiver.updateString("tostring", o.toString()); + return true; + } + + @Override + public void close() throws SQLException + { + fill(m_values, null); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ReturnComposite.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ReturnComposite.java new file mode 100644 index 00000000..fb9965d0 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/ReturnComposite.java @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2020-2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +import java.util.Iterator; +import java.util.List; + +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Demonstrates {@code @Function(out={...})} for a function that returns a + * non-predeclared composite type. + */ +@SQLAction(requires = { "helloOutParams", "helloTable" }, install = { + "SELECT" + + " CASE WHEN want IS NOT DISTINCT FROM helloOutParams()" + + " THEN javatest.logmessage('INFO', 'composite return passes')" + + " ELSE javatest.logmessage('WARNING', 'composite return fails')" + + " END" + + " FROM" + + " (SELECT 'Hello' ::text, 'world' ::text) AS want", + + "WITH" + + " expected AS (VALUES" + + " ('Hello' ::text, 'twelve' ::text)," + + " ('Hello', 'thirteen')," + + " ('Hello', 'love')" + + " )" + + "SELECT" + + " CASE WHEN every(want IS NOT DISTINCT FROM got)" + + " THEN javatest.logmessage('INFO', 'set of composite return passes')" + + " ELSE javatest.logmessage('WARNING', 'set of composite return fails')" + + " END" + + " FROM" + + " (SELECT row_number() OVER (), * FROM expected) AS want" + + " LEFT JOIN (SELECT row_number() OVER (), * FROM hellotable()) AS got" + + " USING (row_number)" +}) +public class ReturnComposite implements ResultSetProvider.Large +{ + /** + * Returns a two-column composite result that does not have to be + * a predeclared composite type, or require the calling SQL query to + * follow the function call with a result column definition list, as is + * needed for a bare {@code RECORD} return type. + */ + @Function( + schema = "javatest", out = { "greeting text", "addressee text" }, + provides = "helloOutParams" + ) + public static boolean helloOutParams(ResultSet out) throws SQLException + { + out.updateString(1, "Hello"); + out.updateString(2, "world"); + return true; + } + + /** + * A function that does not return a composite type, despite having + * a similar Java form. + *

    + * Without the {@code type=} element, this would not be mistaken for + * composite. With the {@code type=} element (a contrived example, will cast + * the method's boolean result to text), PL/Java would normally match the + * method to the composite pattern (other than {@code pg_catalog.RECORD}, + * PL/Java does not pretend to know at compile time which types might be + * composite). The explicit {@code SQLType} annotation on the trailing + * {@code ResultSet} parameter forces it to be seen as an input, and the + * method to be seen as an ordinary method that happens to return boolean. + */ + @Function( + schema = "javatest", type = "text" + ) + public static boolean + notOutParams(@SQLType("pg_catalog.record") ResultSet in) + throws SQLException + { + return true; + } + + /** + * Returns a two-column table result that does not have to be + * a predeclared composite type, or require the calling SQL query to + * follow the function call with a result column definition list, as is + * needed for a bare {@code RECORD} return type. + */ + @Function( + schema = "javatest", out = { "greeting text", "addressee text" }, + provides = "helloTable" + ) + public static ResultSetProvider helloTable() + throws SQLException + { + return new ReturnComposite(); + } + + Iterator addressees = + List.of("twelve", "thirteen", "love").iterator(); + + @Override + public boolean assignRowValues(ResultSet out, long currentRow) + throws SQLException + { + if ( ! addressees.hasNext() ) + return false; + + out.updateString(1, "Hello"); + out.updateString(2, addressees.next()); + return true; + } + + @Override + public void close() + { + } + + /** + * Returns a result described by one {@code out} parameter. + *

    + * Such a method is written in the style of any method that returns + * a scalar value, rather than receiving a writable {@code ResultSet} + * as a parameter. + */ + @Function( + schema = "javatest", out = { "greeting text" } + ) + public static String helloOneOut() throws SQLException + { + return "Hello"; + } + + /** + * Has a boolean result described by one {@code out} parameter. + *

    + * Because this method returns boolean and has a trailing row-typed + * input parameter, that parameter must have an {@code SQLType} + * annotation so that the method will not look like the more-than-one-OUT + * composite form, which would be rejected as a likely mistake. + */ + @Function( + schema = "javatest", out = { "exquisite boolean" } + ) + public static boolean boolOneOut(@SQLType("pg_catalog.record") ResultSet in) + throws SQLException + { + return true; + } + + /** + * Returns a table result described by one {@code out} parameter. + *

    + * Such a method is written in the style of any method that returns a set + * of some scalar value, using an {@code Iterator} rather than a + * {@code ResultSetProvider} or {@code ResultSetHandle}. + */ + @Function( + schema = "javatest", out = { "addressee text" } + ) + public static Iterator helloOneOutTable() throws SQLException + { + return new ReturnComposite().addressees; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java new file mode 100644 index 00000000..c969ad09 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SPIActions.java @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import java.sql.Date; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Time; +import java.util.logging.Logger; + +import org.postgresql.pljava.SavepointListener; +import org.postgresql.pljava.Session; +import org.postgresql.pljava.SessionManager; +import org.postgresql.pljava.TransactionListener; + +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.*; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * Some methods used for testing the SPI JDBC driver. + * + * @author Thomas Hallgren + */ +@SQLAction(provides = "employees tables", install = { + "CREATE TABLE javatest.employees1" + + " (" + + " id int PRIMARY KEY," + + " name varchar(200)," + + " salary int" + + " )", + + "CREATE TABLE javatest.employees2" + + " (" + + " id int PRIMARY KEY," + + " name varchar(200)," + + " salary int," + + " transferDay date," + + " transferTime time" + + " )" + }, remove = { + "DROP TABLE javatest.employees2", + "DROP TABLE javatest.employees1" +} +) +@SQLAction(requires = "issue228", install = "SELECT javatest.issue228()") +public class SPIActions { + private static final String SP_CHECKSTATE = "sp.checkState"; + + private static final SavepointListener spListener = new SavepointListener() { + @Override + public void onAbort(Session session, Savepoint savepoint, + Savepoint parent) throws SQLException { + log("Abort of savepoint " + savepoint.getSavepointId()); + nextState(session, 3, 0); + } + + @Override + public void onCommit(Session session, Savepoint savepoint, + Savepoint parent) throws SQLException { + log("Commit of savepoint " + savepoint.getSavepointId()); + nextState(session, 3, 4); + } + + @Override + public void onStart(Session session, Savepoint savepoint, + Savepoint parent) throws SQLException { + log("Start of savepoint " + savepoint.getSavepointId()); + nextState(session, 0, 1); + } + }; + + @Function(schema="javatest", effects=STABLE) + public static String getDateAsString() throws SQLException { + ResultSet rs = null; + Statement stmt = null; + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + try { + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT CURRENT_DATE"); + if (rs.next()) + return rs.getDate(1).toString(); + return "Date could not be retrieved"; + } finally { + if (rs != null) + rs.close(); + if (stmt != null) + stmt.close(); + conn.close(); + } + } + + @Function(schema="javatest", effects=STABLE) + public static String getTimeAsString() throws SQLException { + ResultSet rs = null; + Statement stmt = null; + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + try { + stmt = conn.createStatement(); + rs = stmt.executeQuery("SELECT CURRENT_TIME"); + if (rs.next()) + return rs.getTime(1).toString(); + return "Time could not be retrieved"; + } finally { + if (rs != null) + rs.close(); + if (stmt != null) + stmt.close(); + conn.close(); + } + } + + static void log(String msg) throws SQLException { + // GCJ has a somewhat serious bug (reported) + // + if ("GNU libgcj" + .equals( + SessionManager.current().frozenSystemProperties() + .getProperty("java.vm.name"))) { + System.out.print("INFO: "); + System.out.println(msg); + } else + Logger.getAnonymousLogger().config(msg); + } + + static void warn(String msg) throws SQLException { + // GCJ has a somewhat serious bug (reported) + // + if ("GNU libgcj" + .equals( + SessionManager.current().frozenSystemProperties() + .getProperty("java.vm.name"))) { + System.out.print("WARNING: "); + System.out.println(msg); + } else + Logger.getAnonymousLogger().warning(msg); + } + + @Function(schema="javatest", effects=IMMUTABLE) + public static int maxFromSetReturnExample(int base, int increment) + throws SQLException { + int max = Integer.MIN_VALUE; + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + PreparedStatement stmt = null; + ResultSet rs = null; + + try { + stmt = conn + .prepareStatement("SELECT base FROM setReturnExample(?, ?)"); + stmt.setInt(1, base); + stmt.setInt(2, increment); + rs = stmt.executeQuery(); + while (rs.next()) { + base = rs.getInt(1); + if (base > max) + max = base; + } + return base; + } finally { + if (rs != null) + rs.close(); + if (stmt != null) + stmt.close(); + conn.close(); + } + } + + /** + * Test of bug #1556 + */ + @Function(schema="javatest") + public static void nestedStatements(int innerCount) throws SQLException { + Connection connection = DriverManager + .getConnection("jdbc:default:connection"); + Statement statement = connection.createStatement(); + + // Create a set of ID's so that we can do somthing semi-useful during + // the long loop. + // + statement.execute("DELETE FROM javatest.employees1"); + statement.execute("INSERT INTO javatest.employees1 VALUES(" + + "1, 'Calvin Forrester', 10000)"); + statement.execute("INSERT INTO javatest.employees1 VALUES(" + + "2, 'Edwin Archer', 20000)"); + statement.execute("INSERT INTO javatest.employees1 VALUES(" + + "3, 'Rebecka Shawn', 30000)"); + statement.execute("INSERT INTO javatest.employees1 VALUES(" + + "4, 'Priscilla Johnson', 25000)"); + + int idx = 1; + ResultSet results = statement + .executeQuery("SELECT * FROM javatest.hugeResult(" + innerCount + + ")"); + while (results.next()) { + Statement innerStatement = connection.createStatement(); + innerStatement + .executeUpdate("UPDATE javatest.employees1 SET salary = salary + 1 WHERE id=" + + idx); + innerStatement.close(); + if (++idx == 5) + idx = 0; + } + results.close(); + statement.close(); + connection.close(); + } + + @SuppressWarnings("removal") // getAttribute / setAttribute + private static void nextState(Session session, int expected, int next) + throws SQLException { + Integer state = (Integer) session.getAttribute(SP_CHECKSTATE); + if (state == null || state.intValue() != expected) + throw new SQLException(SP_CHECKSTATE + ": Expected " + expected + + ", got " + state); + session.setAttribute(SP_CHECKSTATE, next); + } + + @Function(schema="javatest", effects=IMMUTABLE) + @SuppressWarnings("removal") // setAttribute + public static int testSavepointSanity() throws SQLException { + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + + // Create an anonymous savepoint. + // + log("Attempting to set an anonymous savepoint"); + Session currentSession = SessionManager.current(); + currentSession.setAttribute(SP_CHECKSTATE, 0); + currentSession.addSavepointListener(spListener); + + Savepoint sp = conn.setSavepoint(); + nextState(currentSession, 1, 2); + try { + Statement stmt = conn.createStatement(); + log("Attempting to set a SAVEPOINT using SQL (should fail)"); + stmt.execute("SAVEPOINT foo"); + } catch (SQLException e) { + log("It failed allright. Everything OK then"); + log("Rolling back to anonymous savepoint"); + + nextState(currentSession, 2, 3); + conn.rollback(sp); + nextState(currentSession, 1, 5); + return 1; + } finally { + currentSession.removeSavepointListener(spListener); + } + throw new SQLException( + "SAVEPOINT through SQL succeeded. That's bad news!"); + } + + /** + * Confirm JDBC behavior of Savepoint, in particular that a Savepoint + * rolled back to still exists and can be rolled back to again or released. + */ + @Function(schema="javatest", provides="issue228") + public static void issue228() throws SQLException + { + boolean ok = true; + Connection conn = + DriverManager.getConnection("jdbc:default:connection"); + Statement s = conn.createStatement(); + try + { + Savepoint alice = conn.setSavepoint("alice"); + s.execute("SET LOCAL TIME ZONE 1"); + Savepoint bob = conn.setSavepoint("bob"); + s.execute("SET LOCAL TIME ZONE 2"); + conn.rollback(bob); + s.execute("SET LOCAL TIME ZONE 3"); + conn.releaseSavepoint(bob); + try + { + conn.rollback(bob); + ok = false; + warn("Savepoint \"bob\" should be invalid after release"); + } + catch ( SQLException e ) + { + if ( ! "3B001".equals(e.getSQLState()) ) + throw e; + } + conn.rollback(alice); + bob = conn.setSavepoint("bob"); + s.execute("SET LOCAL TIME ZONE 4"); + conn.rollback(alice); + try + { + conn.releaseSavepoint(bob); + ok = false; + warn( + "Savepoint \"bob\" should be invalid after outer rollback"); + } + catch ( SQLException e ) + { + if ( ! "3B001".equals(e.getSQLState()) ) + throw e; + } + conn.rollback(alice); + s.execute("SET LOCAL TIME ZONE 5"); + conn.releaseSavepoint(alice); + } + finally + { + s.close(); + if ( ok ) + log("issue 228 tests ok"); + } + } + + @Function(schema="javatest", effects=IMMUTABLE) + @SuppressWarnings("removal") // setAttribute + public static int testTransactionRecovery() throws SQLException { + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + + // Create an anonymous savepoint. + // + log("Attempting to set an anonymous savepoint"); + Session currentSession = SessionManager.current(); + currentSession.setAttribute(SP_CHECKSTATE, 0); + currentSession.addSavepointListener(spListener); + + Statement stmt = conn.createStatement(); + Savepoint sp = conn.setSavepoint(); + nextState(currentSession, 1, 2); + + try { + log("Attempting to execute a statement with a syntax error"); + stmt.execute("THIS MUST BE A SYNTAX ERROR"); + } catch (SQLException e) { + log("It failed. Let's try to recover " + + "by rolling back to anonymous savepoint"); + nextState(currentSession, 2, 3); + conn.rollback(sp); + nextState(currentSession, 1, 5); + log("Rolled back."); + log("Now let's try to execute a correct statement."); + + currentSession.setAttribute(SP_CHECKSTATE, 0); + sp = conn.setSavepoint(); + nextState(currentSession, 1, 2); + ResultSet rs = stmt.executeQuery("SELECT 'OK'"); + while (rs.next()) { + log("Expected: OK; Retrieved: " + rs.getString(1)); + } + rs.close(); + stmt.close(); + nextState(currentSession, 2, 3); + conn.releaseSavepoint(sp); + nextState(currentSession, 4, 5); + return 1; + } finally { + currentSession.removeSavepointListener(spListener); + } + + // Should never get here + return -1; + } + + @Function(schema="javatest", name="transferPeople") + public static int transferPeopleWithSalary(int salary) throws SQLException { + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + PreparedStatement select = null; + PreparedStatement insert = null; + PreparedStatement delete = null; + ResultSet rs = null; + + String stmt; + try { + stmt = "SELECT id, name, salary FROM employees1 WHERE salary > ?"; + log(stmt); + select = conn.prepareStatement(stmt); + + stmt = "INSERT INTO employees2(id, name, salary, transferDay, transferTime) VALUES (?, ?, ?, ?, ?)"; + log(stmt); + insert = conn.prepareStatement(stmt); + + stmt = "DELETE FROM employees1 WHERE id = ?"; + log(stmt); + delete = conn.prepareStatement(stmt); + + log("assigning parameter value " + salary); + select.setInt(1, salary); + log("Executing query"); + rs = select.executeQuery(); + int rowNo = 0; + log("Doing next"); + while (rs.next()) { + log("Processing row " + ++rowNo); + int id = rs.getInt(1); + String name = rs.getString(2); + int empSal = rs.getInt(3); + + insert.setInt(1, id); + insert.setString(2, name); + insert.setInt(3, empSal); + long now = System.currentTimeMillis(); + insert.setDate(4, new Date(now)); + insert.setTime(5, new Time(now)); + int nRows = insert.executeUpdate(); + log("Insert processed " + nRows + " rows"); + + delete.setInt(1, id); + nRows = delete.executeUpdate(); + log("Delete processed " + nRows + " rows"); + log("Doing next"); + } + if (rowNo == 0) + log("No row found"); + return rowNo; + } finally { + if (select != null) + select.close(); + if (insert != null) + insert.close(); + if (delete != null) + delete.close(); + conn.close(); + } + } + + static TransactionListener s_tlstnr; + + public static void registerTransactionListener() throws SQLException + { + Session currentSession = SessionManager.current(); + if ( null == s_tlstnr ) + { + s_tlstnr = new XactListener(); + currentSession.addTransactionListener(s_tlstnr); + } + else + { + currentSession.removeTransactionListener(s_tlstnr); + s_tlstnr = null; + } + } + + static class XactListener implements TransactionListener + { + public void onAbort(Session s) + { + System.err.println("aborting a transaction"); + } + public void onCommit(Session s) + { + System.err.println("committing a transaction"); + } + public void onPrepare(Session s) + { + System.err.println("preparing a transaction"); + } + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java new file mode 100644 index 00000000..49abf138 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/SetOfRecordTest.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.postgresql.pljava.ResultSetHandle; +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; + +/** + * Example implementing the {@code ResultSetHandle} interface, to return + * the {@link ResultSet} from any SQL {@code SELECT} query passed as a string + * to the {@link #executeSelect executeSelect} function. + */ +@SQLAction(requires="selecttorecords fn", +install= +" SELECT " + +" CASE WHEN r IS DISTINCT FROM ROW('Foo'::varchar, 1::integer, 1.5::float, " + +" 23.67::decimal(8,2), '2005-06-01'::date, '20:56'::time, " + +" '192.168'::cidr) " + +" THEN javatest.logmessage('WARNING', 'SetOfRecordTest not ok') " + +" ELSE javatest.logmessage('INFO', 'SetOfRecordTest ok') " + +" END " + +" FROM " + +" javatest.executeselecttorecords( " + +" 'select ''Foo'', 1, 1.5::float, 23.67, ''2005-06-01'', " + +" ''20:56''::time, ''192.168.0''') " + +" AS r(t_varchar varchar, t_integer integer, t_float float, " + +" t_decimal decimal(8,2), t_date date, t_time time, t_cidr cidr)" +) +public class SetOfRecordTest implements ResultSetHandle { + + @Function(schema="javatest", name="executeselecttorecords", + provides="selecttorecords fn") + public static ResultSetHandle executeSelect(String selectSQL) + throws SQLException { + return new SetOfRecordTest(selectSQL); + } + + private final PreparedStatement m_statement; + + public SetOfRecordTest(String selectSQL) throws SQLException { + Connection conn = DriverManager + .getConnection("jdbc:default:connection"); + m_statement = conn.prepareStatement(selectSQL); + } + + @Override + public void close() throws SQLException { + m_statement.close(); + } + + @Override + public ResultSet getResultSet() throws SQLException { + return m_statement.executeQuery(); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java index 214b8a44..bfdbf8c0 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,41 +9,71 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava.example.annotation; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.SQLIntegrityConstraintViolationException; +import java.sql.Statement; import org.postgresql.pljava.TriggerData; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.Trigger; import static org.postgresql.pljava.annotation.Trigger.Called.*; +import static org.postgresql.pljava.annotation.Trigger.Constraint.*; import static org.postgresql.pljava.annotation.Trigger.Event.*; +import static org.postgresql.pljava.annotation.Trigger.Scope.*; import static org.postgresql.pljava.annotation.Function.Security.*; +import static org.postgresql.pljava.example.LoggerTest.logMessage; + /** * Example creating a couple of tables, and a function to be called when - * triggered by insertion into either table. + * triggered by insertion into either table. In PostgreSQL 10 or later, + * also create a function and trigger that uses transition tables. + *

    + * This example relies on {@code implementor} tags reflecting the PostgreSQL + * version, set up in the {@link ConditionalDDR} example. Transition tables + * appear in PG 10. + */ +@SQLAction( + provides = "foobar tables", + install = { + "CREATE TABLE javatest.foobar_1 ( username text, stuff text )", + "CREATE TABLE javatest.foobar_2 ( username text, value numeric )" + }, + remove = { + "DROP TABLE javatest.foobar_2", + "DROP TABLE javatest.foobar_1" + } +) +@SQLAction( + requires = "constraint triggers", + install = "INSERT INTO javatest.foobar_2(value) VALUES (45)" +) +@SQLAction( + requires = "foobar triggers", + provides = "foobar2_42", + install = "INSERT INTO javatest.foobar_2(value) VALUES (42)" +) +@SQLAction( + requires = { "transition triggers", "foobar2_42" }, + install = "UPDATE javatest.foobar_2 SET value = 43 WHERE value = 42" +) +/* + * Note for another day: this would seem an excellent place to add a + * regression test for github issue #134 (make sure invocations of a + * trigger do not fail with SPI_ERROR_UNCONNECTED). However, any test + * here that runs from the deployment descriptor will be running when + * SPI is already connected, so a regression would not be caught. + * A proper test for it will have to wait for a proper testing harness + * invoking tests from outside PL/Java itself. */ -@SQLActions({ - @SQLAction( - provides = "foobar tables", - install = { - "CREATE TABLE javatest.foobar_1 ( username text, stuff text )", - "CREATE TABLE javatest.foobar_2 ( username text, value numeric )" - }, - remove = { - "DROP TABLE javatest.foobar_2", - "DROP TABLE javatest.foobar_1" - } - ), - @SQLAction( - requires = "foobar triggers", - install = "INSERT INTO javatest.foobar_2(value) VALUES (42)" - ) -}) public class Triggers { /** @@ -56,11 +86,72 @@ public class Triggers security = INVOKER, triggers = { @Trigger(called = BEFORE, table = "foobar_1", events = { INSERT } ), - @Trigger(called = BEFORE, table = "foobar_2", events = { INSERT } ) + @Trigger(called = BEFORE, scope = ROW, table = "foobar_2", + events = { INSERT } ) }) public static void insertUsername(TriggerData td) throws SQLException { + ResultSet nrs = td.getNew(); // expect NPE in a DELETE/STATEMENT trigger + String col2asString = nrs.getString(2); + if ( "43".equals(col2asString) ) + td.suppress(); + nrs.updateString( "username", "bob"); + } + + /** + * Examine old and new rows in reponse to a trigger. + * Transition tables first became available in PostgreSQL 10. + */ + @Function( + implementor = "postgresql_ge_100000", + requires = "foobar tables", + provides = "transition triggers", + schema = "javatest", + security = INVOKER, + triggers = { + @Trigger(called = AFTER, table = "foobar_2", events = { UPDATE }, + tableOld = "oldrows", tableNew = "newrows" ) + }) + + public static void examineRows(TriggerData td) + throws SQLException + { + Connection co = DriverManager.getConnection("jdbc:default:connection"); + Statement st = co.createStatement(); + ResultSet rs = st.executeQuery( + "SELECT o.value, n.value" + + " FROM oldrows o FULL JOIN newrows n USING (username)"); + rs.next(); + int oval = rs.getInt(1); + int nval = rs.getInt(2); + if ( 42 == oval && 43 == nval ) + logMessage( "INFO", "trigger transition table test ok"); + else + logMessage( "WARNING", String.format( + "trigger transition table oval %d nval %d", oval, nval)); + } + + /** + * Throw exception if value to be inserted is 44. + */ + @Function( + requires = "foobar tables", + provides = "constraint triggers", + schema = "javatest", + security = INVOKER, + triggers = { + @Trigger(called = AFTER, table = "foobar_2", events = { INSERT }, + scope = ROW, constraint = NOT_DEFERRABLE ) + }) + + public static void disallow44(TriggerData td) + throws SQLException + { + ResultSet nrs = td.getNew(); + if ( 44 == nrs.getInt( "value") ) + throw new SQLIntegrityConstraintViolationException( + "44 shall not be inserted", "23000"); } } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java new file mode 100644 index 00000000..8c315155 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/TypeRoundTripper.java @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import static java.lang.reflect.Modifier.isPublic; +import static java.lang.reflect.Modifier.isStatic; + +import java.lang.reflect.Array; + +import java.sql.Connection; +import static java.sql.DriverManager.getConnection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Types; +import static java.sql.Types.VARCHAR; + +import java.sql.SQLException; +import java.sql.SQLDataException; +import java.sql.SQLNonTransientException; + +import java.util.Arrays; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * A class to simplify testing of PL/Java's mappings between PostgreSQL and + * Java/JDBC types. + *

    + * Provides one function, {@link #roundTrip roundTrip()}. Its single input + * parameter is an unspecified row type, so you can pass it a row that has + * exactly one column of any type. + *

    + * Its return type is also an unspecified row type, so you need to follow the + * function call with a column definition list of up to six columns. Each + * requested output column must have its name (case-insensitively) and type + * drawn from this table: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Items the roundTrip function can return
    Column nameColumn typeWhat is returned
    TYPEPGany text/varcharThe PostgreSQL type name
    TYPEJDBCany text/varcharThe JDBC Types constant
    CLASSJDBCany text/varcharName of the Java class JDBC claims (in metadata) it will instantiate
    CLASSany text/varcharName of the Java class JDBC did instantiate
    TOSTRINGany text/varcharResult of {@code toString()} on the object returned by + * {@code ResultSet.getObject()} ({@code Arrays.toString} if it is a primitive + * array, {@code Arrays.deepToString} if an array of reference type)
    ROUNDTRIPPEDsame as input columnResult of passing the object returned by {@code ResultSet.getObject()} + * directly to {@code ResultSet.updateObject()}
    + *

    + * Serving suggestion: + *

    + *SELECT
    + *  orig = roundtripped AS good, *
    + *FROM
    + *  (VALUES (timestamptz '2017-08-21 18:25:29.900005Z')) AS p(orig),
    + *  roundtrip(p) AS (roundtripped timestamptz);
    + *
    + */ +@SQLAction( + requires = {"TypeRoundTripper.roundTrip", "point mirror type"}, + install = { + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'timestamp roundtrip passes')" + + " ELSE javatest.logmessage('WARNING', 'timestamp roundtrip fails')" + + " END" + + " FROM" + + " (VALUES" + + " (timestamp '2017-08-21 18:25:29.900005')," + + " (timestamp '1970-03-07 17:37:49.300009')," + + " (timestamp '1919-05-29 13:08:33.600001')" + + " ) AS p(orig)," + + " roundtrip(p) AS (roundtripped timestamp)", + + " SELECT" + + " CASE WHEN every(orig = roundtripped)" + + " THEN javatest.logmessage('INFO', 'timestamptz roundtrip passes')" + + " ELSE javatest.logmessage('WARNING', 'timestamptz roundtrip fails')" + + " END" + + " FROM" + + " (VALUES" + + " (timestamptz '2017-08-21 18:25:29.900005Z')," + + " (timestamptz '1970-03-07 17:37:49.300009Z')," + + " (timestamptz '1919-05-29 13:08:33.600001Z')" + + " ) AS p(orig)," + + " roundtrip(p) AS (roundtripped timestamptz)", + + " SELECT" + + " CASE WHEN classjdbc = 'org.postgresql.pljava.example.annotation.Point'" + + " THEN javatest.logmessage('INFO', 'issue192 test passes')" + + " ELSE javatest.logmessage('WARNING', 'issue192 test fails')" + + " END" + + " FROM" + + " (VALUES (point '0,0')) AS p," + + " roundtrip(p) AS (classjdbc text)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'boolean[] passes')" + + " ELSE javatest.logmessage('WARNING', 'boolean[] fails')" + + " END" + + " FROM" + + " (SELECT '{t,null,f}'::boolean[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Boolean;'), ('[Z')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped boolean[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, false)" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', '\"char\"[] passes')" + + " ELSE javatest.logmessage('WARNING', '\"char\"[] fails')" + + " END" + + " FROM" + + " (SELECT '{A,null,B}'::\"char\"[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Byte;'), ('[B')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped \"char\"[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, 0::\"char\")" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'bytea passes')" + + " ELSE javatest.logmessage('WARNING', 'bytea fails')" + + " END" + + " FROM" + + " (SELECT '\\x010203'::bytea) AS p(orig)," + + " (VALUES (''), ('[B')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped bytea)," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped = orig" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'int2[] passes')" + + " ELSE javatest.logmessage('WARNING', 'int2[] fails')" + + " END" + + " FROM" + + " (SELECT '{1,null,3}'::int2[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Short;'), ('[S')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped int2[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, 0::int2)" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'int4[] passes')" + + " ELSE javatest.logmessage('WARNING', 'int4[] fails')" + + " END" + + " FROM" + + " (SELECT '{1,null,3}'::int4[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Integer;'), ('[I')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped int4[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, 0::int4)" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'int8[] passes')" + + " ELSE javatest.logmessage('WARNING', 'int8[] fails')" + + " END" + + " FROM" + + " (SELECT '{1,null,3}'::int8[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Long;'), ('[J')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped int8[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, 0::int8)" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'float4[] passes')" + + " ELSE javatest.logmessage('WARNING', 'float4[] fails')" + + " END" + + " FROM" + + " (SELECT '{1,null,3}'::float4[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Float;'), ('[F')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped float4[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, 0::float4)" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'float8[] passes')" + + " ELSE javatest.logmessage('WARNING', 'float8[] fails')" + + " END" + + " FROM" + + " (SELECT '{1,null,3}'::float8[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.Double;'), ('[D')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped float8[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped =" + + " CASE WHEN class LIKE '[_' THEN array_replace(orig, null, 0::float8)" + + " ELSE orig END" + + " ) AS outcome(ok)", + + " SELECT" + + " CASE WHEN every(outcome.ok)" + + " THEN javatest.logmessage('INFO', 'text[] passes')" + + " ELSE javatest.logmessage('WARNING', 'text[] fails')" + + " END" + + " FROM" + + " (SELECT '{foo,null,bar}'::text[]) AS p(orig)," + + " (VALUES (''), ('[Ljava.lang.String;')) as q(rqcls)," + + " roundtrip(p, rqcls) AS (class text, roundtripped text[])," + + " LATERAL (SELECT" + + " (rqcls = class OR rqcls = '')" + + " AND roundtripped = orig" + + " ) AS outcome(ok)", + } +) +public class TypeRoundTripper +{ + private TypeRoundTripper() { } + + /** + * Function accepting one parameter of row type (one column, any type) + * and returning a row with up to six columns (use a column definition list + * after the function call, choose column names from TYPEPG, TYPEJDBC, + * CLASSJDBC, CLASS, TOSTRING, ROUNDTRIPPED where any of the first five + * must have text/varchar type, while ROUNDTRIPPED must match the type of + * the input column). + * @param in The input row value (required to have exactly one column). + * @param classname Name of class to be explicitly requested (JDBC 4.1 + * feature) from {@code getObject}; pass an empty string (the default) to + * make no such explicit request. Accepts the form {@code Class.getName} + * would produce: canonical names or spelled-out primitives if not an array + * type, otherwise prefix left-brackets and primitive letter codes or + * {@code L}classname{@code ;}. + * @param prepare Whether the object retrieved from {@code in} should be + * passed as a parameter to an identity {@code PreparedStatement} and the + * result of that be returned. If false (the default), the value from + * {@code in} is simply forwarded directly to {@code out}. + * @param out The output row (supplied by PL/Java, representing the column + * definition list that follows the call of this function in SQL). + * @throws SQLException if {@code in} does not have exactly one column, if + * {@code out} has more than six, if a requested column name in {@code out} + * is not among those recognized, if a column of {@code out} is not of its + * required type, or if other stuff goes wrong. + */ + @Function( + schema = "javatest", + type = "RECORD", + provides = "TypeRoundTripper.roundTrip" + ) + public static boolean roundTrip( + ResultSet in, @SQLType(defaultValue="") String classname, + @SQLType(defaultValue="false") boolean prepare, ResultSet out) + throws SQLException + { + ResultSetMetaData inmd = in.getMetaData(); + ResultSetMetaData outmd = out.getMetaData(); + + Class clazz = null; + if ( ! "".equals(classname) ) + clazz = loadClass(classname); + + if ( 1 != inmd.getColumnCount() ) + throw new SQLDataException( + "in parameter must be a one-column row type", "22000"); + + int outcols = outmd.getColumnCount(); + if ( 6 < outcols ) + throw new SQLDataException( + "result description may have no more than six columns", + "22000"); + + String inTypePG = inmd.getColumnTypeName(1); + int inTypeJDBC = inmd.getColumnType(1); + Object val = (null == clazz) ? in.getObject(1) : in.getObject(1, clazz); + + if ( prepare ) + { + Connection c = getConnection("jdbc:default:connection"); + PreparedStatement ps = c.prepareStatement("SELECT ?"); + ps.setObject(1, val); + ResultSet rs = ps.executeQuery(); + rs.next(); + val = (null == clazz) ? rs.getObject(1) : rs.getObject(1, clazz); + rs.close(); + ps.close(); + c.close(); + } + + for ( int i = 1; i <= outcols; ++ i ) + { + String what = outmd.getColumnLabel(i); + + if ( "TYPEPG".equalsIgnoreCase(what) ) + { + assertTypeJDBC(outmd, i, VARCHAR); + out.updateObject(i, inTypePG); + } + else if ( "TYPEJDBC".equalsIgnoreCase(what) ) + { + assertTypeJDBC(outmd, i, VARCHAR); + out.updateObject(i, typeNameJDBC(inTypeJDBC)); + } + else if ( "CLASSJDBC".equalsIgnoreCase(what) ) + { + assertTypeJDBC(outmd, i, VARCHAR); + out.updateObject(i, inmd.getColumnClassName(1)); + } + else if ( "CLASS".equalsIgnoreCase(what) ) + { + assertTypeJDBC(outmd, i, VARCHAR); + out.updateObject(i, val.getClass().getName()); + } + else if ( "TOSTRING".equalsIgnoreCase(what) ) + { + assertTypeJDBC(outmd, i, VARCHAR); + out.updateObject(i, toString(val)); + } + else if ( "ROUNDTRIPPED".equalsIgnoreCase(what) ) + { + if ( ! inTypePG.equals(outmd.getColumnTypeName(i)) ) + throw new SQLDataException( + "Result ROUNDTRIPPED column must have same type as input", + "22000"); + out.updateObject(i, val); + } + else + throw new SQLDataException( + "Output column label \""+ what + "\" should be one of: " + + "TYPEPG, TYPEJDBC, CLASSJDBC, CLASS, TOSTRING, " + + "ROUNDTRIPPED", + "22000"); + } + + return true; + } + + static void assertTypeJDBC(ResultSetMetaData md, int i, int t) + throws SQLException + { + if ( md.getColumnType(i) != t ) + throw new SQLDataException( + "Result column " + i + " must be of JDBC type " + + typeNameJDBC(t)); + } + + static String typeNameJDBC(int t) + { + for ( Field f : Types.class.getFields() ) + { + int m = f.getModifiers(); + if ( isPublic(m) && isStatic(m) && int.class == f.getType() ) + try + { + if ( f.getInt(null) == t ) + return f.getName(); + } + catch ( IllegalAccessException e ) { } + } + return String.valueOf(t); + } + + private static Class loadClass(String className) + throws SQLException + { + String noBrackets = className.replaceFirst("^\\[++", ""); + int ndims = (className.length() - noBrackets.length()); + + /* + * The naming conventions from Class.getName() could hardly be less + * convenient. If *not* an array, it's the same as the canonical name, + * with the primitive names spelled out. If it *is* an array, the + * primitives get their one-letter codes, and other class names have L + * prefix and ; suffix. Condense the two cases here into one offbeat + * hybrid form that will be used below. + */ + if ( 0 == ndims ) + noBrackets = + ("L" + noBrackets + + ":booleanZ:byteB:shortS:charC:intI:longJ:floatF:doubleD") + .replaceFirst( + "^L(\\w++)(?=:)(?:\\w*+:)*\\1(\\w)(?::.*+)?+$|:.++$", + "$2"); + else + noBrackets = noBrackets.replaceFirst(";$", ""); + + /* + * Invariant: thanks to the above normalization, whether array or not, + * noBrackets will now have this form: either the first (and only) + * character is one of the primitive character codes, or the first + * character is L and the rest is a class name (with no ; at the end). + */ + + Class c; + + switch ( noBrackets.charAt(0) ) + { + case 'Z': c = boolean.class; break; + case 'B': c = byte.class; break; + case 'S': c = short.class; break; + case 'C': c = char.class; break; + case 'I': c = int.class; break; + case 'J': c = long.class; break; + case 'F': c = float.class; break; + case 'D': c = double.class; break; + default: + try + { + noBrackets = noBrackets.substring(1); + c = Class.forName(noBrackets); + } + catch ( ClassNotFoundException e ) + { + throw new SQLNonTransientException( + "No such class: " + noBrackets, "46103", e); + } + } + + if ( 0 != ndims ) + c = Array.newInstance(c, new int[ndims]).getClass(); + + return c; + } + + private static String toString(Object o) + { + if ( ! o.getClass().isArray() ) + return o.toString(); + if (Object[].class.isInstance(o)) + return Arrays.deepToString(Object[].class.cast(o)); + if (boolean[].class.isInstance(o)) + return Arrays.toString(boolean[].class.cast(o)); + if (byte[].class.isInstance(o)) + return Arrays.toString(byte[].class.cast(o)); + if (short[].class.isInstance(o)) + return Arrays.toString(short[].class.cast(o)); + if (int[].class.isInstance(o)) + return Arrays.toString(int[].class.cast(o)); + if (long[].class.isInstance(o)) + return Arrays.toString(long[].class.cast(o)); + if (char[].class.isInstance(o)) + return Arrays.toString(char[].class.cast(o)); + if (float[].class.isInstance(o)) + return Arrays.toString(float[].class.cast(o)); + if (double[].class.isInstance(o)) + return Arrays.toString(double[].class.cast(o)); + return null; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java index 7d970b41..659a5e2a 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UDTScalarIOTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Tada AB and other contributors, as listed below. + * Copyright (c) 2016-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,6 +15,8 @@ import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.net.MalformedURLException; @@ -25,7 +27,7 @@ import java.io.StringReader; import java.nio.ByteBuffer; import java.nio.CharBuffer; -import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.charset.CharacterCodingException; import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; @@ -54,9 +56,8 @@ * supported JDBC data types, which it writes on output, and reads/verifies on * input. */ -@SQLAction(requires={"udtscalariotest type", "udtscalariotest boot fn"}, - install={ - "SELECT javatest.udtscalariotest()", // force class to resolve +@SQLAction(requires= { "udtscalariotest type" }, + install = { "SELECT CAST('' AS javatest.udtscalariotest)" // test send/recv }) @BaseUDT(schema="javatest", provides="udtscalariotest type") @@ -96,18 +97,16 @@ public class UDTScalarIOTest implements SQLData s_gedicht = s_gedicht + s_gedicht + s_gedicht; // x3 s_gedicht = s_gedicht + s_gedicht + s_gedicht; // x9 - ByteBuffer bb = Charset.forName("UTF-8").newEncoder().encode( + ByteBuffer bb = UTF_8.newEncoder().encode( CharBuffer.wrap(s_gedicht)); s_utfgedicht = new byte[bb.limit()]; bb.get(s_utfgedicht); - s_url = new URL("http://tada.github.io/pljava/"); + s_url = new URI("http://tada.github.io/pljava/").toURL(); } - catch ( CharacterCodingException e ) - { - throw new RuntimeException(e); - } - catch ( MalformedURLException e ) + catch ( + CharacterCodingException | + URISyntaxException | MalformedURLException e ) { throw new RuntimeException(e); } @@ -217,23 +216,4 @@ public void readSQL(SQLInput stream, String typeName) throws SQLException if ( ! s_url.equals(stream.readURL()) ) throw new SQLException("url mismatch"); } - - /** - * A no-op function that forces the UDTScalarIOTest class to be loaded. - * This is only necessary because the deployment-descriptor install - * actions contain a query making use of this type, and PostgreSQL does - * not expect type in/out/send/recv functions to need an updated - * snapshot, so it will try to find this class in the snapshot from - * before the jar was installed, and fail. By providing this function, - * which defaults to volatile so it gets an updated snapshot, and - * calling it first, the class will be found and loaded; once it is - * loaded, the user-defined type operations are able to find it. - *

    - * Again, this is only an issue when trying to make use of the newly - * loaded UDT from right within the deployment descriptor for the jar. - */ - @Function(schema="javatest", provides="udtscalariotest boot fn") - public static void udtscalariotest() - { - } } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java index 1911a760..c317dab2 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UnicodeRoundTripTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -15,7 +15,6 @@ import java.sql.SQLException; import org.postgresql.pljava.annotation.SQLAction; -import org.postgresql.pljava.annotation.SQLActions; import org.postgresql.pljava.annotation.Function; /** @@ -35,65 +34,58 @@ * calls this function on each (1k array, 1k string) pair, and counts a failure * if {@code matched} is false or the original and returned arrays or strings * do not match as seen in SQL. + *

    + * This example sets an {@code implementor} tag based on a PostgreSQL condition, + * as further explained in the {@link ConditionalDDR} example. */ -@SQLActions({ - @SQLAction(provides="postgresql_unicodetest", install= -" select case " + -" when 90000 <= cast(current_setting('server_version_num') as integer) " + -" and 'UTF8' = current_setting('server_encoding') " + -" then set_config('pljava.implementors', 'postgresql_unicodetest,' || " + -" current_setting('pljava.implementors'), true) " + -" end" - ), - @SQLAction(requires="unicodetest fn", - implementor="postgresql_unicodetest", - install= -" with " + -" usable_codepoints ( cp ) as ( " + -" select generate_series(1,x'd7ff'::int) " + -" union all " + -" select generate_series(x'e000'::int,x'10ffff'::int) " + +@SQLAction(provides="postgresql_unicodetest", install= + "SELECT CASE" + + " WHEN 'UTF8' = current_setting('server_encoding')" + + " THEN set_config('pljava.implementors', 'postgresql_unicodetest,' ||" + + " current_setting('pljava.implementors'), true) " + + "END" +) +@SQLAction(requires="unicodetest fn", +implementor="postgresql_unicodetest", +install= +" WITH " + +" usable_codepoints ( cp ) AS ( " + +" SELECT generate_series(1,x'd7ff'::int) " + +" UNION ALL " + +" SELECT generate_series(x'e000'::int,x'10ffff'::int) " + " ), " + -" test_inputs ( groupnum, cparray, s ) as ( " + -" select " + -" cp / 1024 as groupnum, " + -" array_agg(cp order by cp), string_agg(chr(cp), '' order by cp) " + -" from usable_codepoints " + -" group by groupnum " + +" test_inputs ( groupnum, cparray, s ) AS ( " + +" SELECT " + +" cp / 1024 AS groupnum, " + +" array_agg(cp ORDER BY cp), string_agg(chr(cp), '' ORDER BY cp) " + +" FROM usable_codepoints " + +" GROUP BY groupnum " + " ), " + -" test_outputs as ( " + -" select groupnum, cparray, s, unicodetest(s, cparray) as roundtrip " + -" from test_inputs " + +" test_outputs AS ( " + +" SELECT groupnum, cparray, s, unicodetest(s, cparray) AS roundtrip " + +" FROM test_inputs " + " ), " + -" test_failures as ( " + -" select * " + -" from test_outputs " + -" where " + -" cparray != (roundtrip).cparray or s != (roundtrip).s " + -" or not (roundtrip).matched " + +" test_failures AS ( " + +" SELECT * " + +" FROM test_outputs " + +" WHERE " + +" cparray != (roundtrip).cparray OR s != (roundtrip).s " + +" OR NOT (roundtrip).matched " + " ), " + -" test_summary ( n_failing_groups, first_failing_group ) as ( " + -" select count(*), min(groupnum) from test_failures " + +" test_summary ( n_failing_groups, first_failing_group ) AS ( " + +" SELECT count(*), min(groupnum) FROM test_failures " + " ) " + -" select " + -" case when n_failing_groups > 0 then " + -" javatest.logmessage('WARNING', format( " + -" '%s 1k codepoint ranges had mismatches, first is block starting 0x%s', " + -" n_failing_groups, to_hex(1024 * first_failing_group))) " + -" else " + +" SELECT " + +" CASE WHEN n_failing_groups > 0 THEN " + +" javatest.logmessage('WARNING', n_failing_groups || " + +" ' 1k codepoint ranges had mismatches, first is block starting 0x' || " + +" to_hex(1024 * first_failing_group)) " + +" ELSE " + " javatest.logmessage('INFO', " + " 'all Unicode codepoint ranges roundtripped successfully.') " + -" end " + -" from test_summary" - ), - @SQLAction( - install= - "CREATE TYPE unicodetestrow AS " + - "(matched boolean, cparray integer[], s text)", - remove="DROP TYPE unicodetestrow", - provides="unicodetestrow type" - ) -}) +" END " + +" FROM test_summary" +) public class UnicodeRoundTripTest { /** * This function takes a string and an array of ints constructed in PG, @@ -111,8 +103,8 @@ public class UnicodeRoundTripTest { * @param rs OUT (matched, cparray, s) as described above * @return true to indicate the OUT tuple is not null */ - @Function(type="unicodetestrow", - requires="unicodetestrow type", provides="unicodetest fn") + @Function(out={"matched boolean", "cparray integer[]", "s text"}, + provides="unicodetest fn") public static boolean unicodetest(String s, int[] ints, ResultSet rs) throws SQLException { boolean ok = true; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java index ddd9f497..417853db 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingProperties.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -9,6 +9,7 @@ * Contributors: * Tada AB * Purdue University + * Chapman Flack */ package org.postgresql.pljava.example.annotation; @@ -22,9 +23,11 @@ import java.util.Iterator; import java.util.Map; import java.util.Properties; +import java.util.ResourceBundle; import java.util.logging.Logger; import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.annotation.SQLAction; /** * An example that retrieves a {@code Properties} resource, and returns @@ -32,7 +35,36 @@ * interface. * @author Thomas Hallgren */ -public class UsingProperties implements ResultSetProvider +@SQLAction(requires = {"propertyExampleAnno", "propertyExampleRB"}, install = { + "WITH" + + " expected AS (VALUES" + + " ('adjective' ::varchar(200), 'avaricious' ::varchar(200))," + + " ('noun', 'platypus')" + + " )" + + "SELECT" + + " CASE WHEN" + + " 2 = count(prop) AND every(prop IN (SELECT expected FROM expected))" + + " THEN javatest.logmessage('INFO', 'get resource passes')" + + " ELSE javatest.logmessage('WARNING', 'get resource fails')" + + " END" + + " FROM" + + " propertyExampleAnno() AS prop", + + "WITH" + + " expected AS (VALUES" + + " ('adjective' ::varchar(200), 'avaricious' ::varchar(200))," + + " ('noun', 'platypus')" + + " )" + + "SELECT" + + " CASE WHEN" + + " 2 = count(prop) AND every(prop IN (SELECT expected FROM expected))" + + " THEN javatest.logmessage('INFO', 'get ResourceBundle passes')" + + " ELSE javatest.logmessage('WARNING', 'get ResourceBundle fails')" + + " END" + + " FROM" + + " propertyExampleRB() AS prop" +}) +public class UsingProperties implements ResultSetProvider.Large { private static Logger s_logger = Logger.getAnonymousLogger(); private final Iterator m_propertyIterator; @@ -41,7 +73,9 @@ public UsingProperties() throws IOException { Properties v = new Properties(); - InputStream propStream = this.getClass().getResourceAsStream("example.properties"); + InputStream propStream = + this.getClass().getResourceAsStream("example.properties"); + if(propStream == null) { s_logger.fine("example.properties was null"); @@ -56,7 +90,34 @@ public UsingProperties() } } - public boolean assignRowValues(ResultSet receiver, int currentRow) + /** + * This constructor (distinguished by signature) reads the same property + * file, but using the {@code ResourceBundle} machinery instead of + * {@code Properties}. + */ + private UsingProperties(Void usingResourceBundle) + { + ResourceBundle b = + ResourceBundle.getBundle(getClass().getPackageName() + ".example"); + + Iterator keys = b.getKeys().asIterator(); + + m_propertyIterator = new Iterator>() + { + public boolean hasNext() + { + return keys.hasNext(); + } + + public Map.Entry next() + { + String k = keys.next(); + return Map.entry(k, b.getString(k)); + } + }; + } + + public boolean assignRowValues(ResultSet receiver, long currentRow) throws SQLException { if(!m_propertyIterator.hasNext()) @@ -75,7 +136,7 @@ public boolean assignRowValues(ResultSet receiver, int currentRow) * Return the contents of the {@code example.properties} resource, * one (key,value) row per entry. */ - @Function( type = "javatest._properties") + @Function(type = "javatest._properties", provides = "propertyExampleAnno") public static ResultSetProvider propertyExampleAnno() throws SQLException { @@ -89,6 +150,17 @@ public static ResultSetProvider propertyExampleAnno() } } + /** + * Return the contents of the {@code example.properties} resource, + * one (key,value) row per entry, using {@code ResourceBundle} to load it. + */ + @Function(type = "javatest._properties", provides = "propertyExampleRB") + public static ResultSetProvider propertyExampleRB() + throws SQLException + { + return new UsingProperties(null); + } + public void close() { } diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingPropertiesAsScalarSet.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingPropertiesAsScalarSet.java index 0e8a2582..3d5226a8 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingPropertiesAsScalarSet.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/UsingPropertiesAsScalarSet.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2016 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -37,7 +37,7 @@ public static Iterator getProperties() throws SQLException { StringBuilder bld = new StringBuilder(); - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList<>(); Connection conn = DriverManager.getConnection("jdbc:default:connection"); Statement stmt = conn.createStatement(); try diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Variadic.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Variadic.java new file mode 100644 index 00000000..58e8c83a --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Variadic.java @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import static java.util.Arrays.stream; +import java.util.Objects; + +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; +import static + org.postgresql.pljava.annotation.Function.OnNullInput.RETURNS_NULL; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +/** + * Provides example methods to illustrate variadic functions. + *

    + * The key is the {@code @Function} annotation declaring the function variadic + * to PostgreSQL. The Java method parameter is declared as an ordinary array, + * not with Java's {@code ...} syntax; in fact, that would be impossible for a + * function with a composite return type (where the Java signature would have to + * include a {@code ResultSet} parameter after the variadic input parameter). + */ +@SQLAction( + requires = { "sumOfSquares", "sumOfSquaresBoxed" }, + install = { + /* + * In addition to the two sumOfSquares functions that are defined in + * this class using annotations, emit some direct SQL to declare a + * javaformat function that refers directly to java.lang.String.format, + * which is a function declared variadic in Java. + */ + "CREATE FUNCTION javatest.javaformat(" + + " format pg_catalog.text," + + " VARIADIC args pg_catalog.anyarray" + + " DEFAULT CAST(ARRAY[] AS pg_catalog.text[]))" + + " RETURNS pg_catalog.text" + + " RETURNS NULL ON NULL INPUT" + + " LANGUAGE java" + + " AS 'java.lang.String=" + + " java.lang.String.format(java.lang.String,java.lang.Object[])'", + + "COMMENT ON FUNCTION javatest.javaformat(" + + " pg_catalog.text, VARIADIC pg_catalog.anyarray) IS '" + + "Invoke Java''s String.format with a format string and any number of " + + "arguments. This is not quite as general as the Java method implies, " + + "because, while the variadic argument is declared ''anyarray'' and " + + "its members can have any type, PostgreSQL requires all of them to " + + "have the same type in any given call. Furthermore, in the VARIADIC " + + "anyarray case, as here, the actual arguments must not all have " + + "''unknown'' type; if supplying bare literals, one must be cast to " + + "a type. PostgreSQL will not recognize a call of a variadic function " + + "unless at least one argument to populate the variadic parameter is " + + "supplied; to allow calls that don''t pass any, give the variadic " + + "parameter an empty-array default, as done here.'", + + /* + * Test a bunch of variadic calls. + */ + "SELECT" + + " CASE" + + " WHEN s.ok AND d.ok" + + " THEN javatest.logmessage('INFO', 'variadic calls ok')" + + " ELSE javatest.logmessage('WARNING', 'variadic calls ng')" + + " END" + + " FROM" + + " (SELECT" + + " pg_catalog.every(expect IS NOT DISTINCT FROM got)" + + " FROM" + + " (VALUES" + + " (" + + " 'Hello, world'," + + " javatest.javaformat('Hello, %s', 'world'::text)" + + " )" + + " ) AS t(expect, got)" + + " ) AS s(ok)," + + " (SELECT" + + " pg_catalog.every(expect IS NOT DISTINCT FROM got)" + + " FROM" + + " (VALUES" + + " (14.0, javatest.sumOfSquares(1, 2, 3))," + + " (14.0, javatest.sumOfSquares(1, 2, null, 3))," + + " ( 0.0, javatest.sumOfSquares())," + + " (14.0, javatest.sumOfSquaresBoxed(1, 2, 3))," + + " (null, javatest.sumOfSquaresBoxed(1, 2, null, 3))" + + " ) AS t(expect, got)" + + " ) AS d(ok)" + }, + + remove = "DROP FUNCTION javatest.javaformat(pg_catalog.text,anyarray)" +) +public class Variadic { + private Variadic() { } // do not instantiate + + /** + * Compute a double-precision sum of squares, returning null if any input + * value is null. + *

    + * The {@code RETURNS_NULL} annotation does not mean the array collecting + * the variadic arguments cannot have null entries; it only means PostgreSQL + * will never call this function with null for the array itself. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL, + variadic = true, provides = "sumOfSquaresBoxed" + ) + public static Double sumOfSquaresBoxed(Double[] vals) + { + if ( stream(vals).anyMatch(Objects::isNull) ) + return null; + + return + stream(vals).mapToDouble(Double::doubleValue).map(v -> v*v).sum(); + } + + /** + * Compute a double-precision sum of squares, treating any null input + * as zero. + *

    + * The {@code RETURNS_NULL} annotation does not mean the array collecting + * the variadic arguments cannot have null entries; it only means PostgreSQL + * will never call this function with null for the array itself. Because + * the Java parameter type here is primitive and cannot represent nulls, + * PL/Java will have silently replaced any nulls in the input with zeros. + *

    + * This version also demonstrates using {@link SQLType @SQLType} to give + * the variadic parameter an empty-array default, so PostgreSQL will allow + * the function to be called with no corresponding arguments. Without that, + * PostgreSQL won't recognize a call to the function unless at least one + * argument corresponding to the variadic parameter is supplied. + */ + @Function( + schema = "javatest", effects = IMMUTABLE, onNullInput = RETURNS_NULL, + variadic = true, provides = "sumOfSquares" + ) + public static double sumOfSquares(@SQLType(defaultValue={}) double[] vals) + { + return stream(vals).map(v -> v*v).sum(); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java index e2526f00..c9982490 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/VarlenaUDTTest.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -18,6 +18,8 @@ import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.BaseUDT; +import org.postgresql.pljava.annotation.Function; +import static org.postgresql.pljava.annotation.Function.Effects.IMMUTABLE; /** * A User Defined Type with varlena storage, testing github issue 52. @@ -30,8 +32,9 @@ */ @SQLAction(requires="varlena UDT", install= " SELECT CASE v::text = v::javatest.VarlenaUDTTest::text " + -" WHEN true THEN javatest.logmessage('INFO', 'works for ' || v) " + -" ELSE javatest.logmessage('WARNING', 'fails for ' || v) " + +" WHEN true " + +" THEN javatest.logmessage('INFO', 'VarlenaUDTTest works for ' || v) " + +" ELSE javatest.logmessage('WARNING', 'VarlenaUDTTest fails for ' || v) " + " END " + " FROM (VALUES (('32767')), (('32768')), (('65536')), (('1048576'))) " + " AS t ( v )" @@ -43,6 +46,7 @@ public class VarlenaUDTTest implements SQLData { public VarlenaUDTTest() { } + @Function(effects=IMMUTABLE) public static VarlenaUDTTest parse( String s, String typname) { int i = Integer.parseInt( s); VarlenaUDTTest u = new VarlenaUDTTest(); @@ -51,6 +55,7 @@ public static VarlenaUDTTest parse( String s, String typname) { return u; } + @Function(effects=IMMUTABLE) public String toString() { return String.valueOf( apop); } @@ -59,11 +64,13 @@ public String getSQLTypeName() { return typname; } + @Function(effects=IMMUTABLE) public void writeSQL( SQLOutput stream) throws SQLException { for ( int i = 0 ; i < apop ; ++ i ) stream.writeByte( (byte)'a'); } + @Function(effects=IMMUTABLE) public void readSQL( SQLInput stream, String typname) throws SQLException { this.typname = typname; int i = 0; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java new file mode 100644 index 00000000..812233d1 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/XMLRenderedTypes.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.annotation; + +import java.sql.SQLXML; + +import java.sql.SQLException; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; + +import static org.postgresql.pljava.example.LoggerTest.logMessage; + +/** + * Class illustrating use of {@link SQLXML} to operate on non-XML data types + * for which PL/Java provides an XML rendering. + *

    + * Everything mentioning the type XML here needs a conditional implementor tag + * in case of being loaded into a PostgreSQL instance built without that type. + */ +@SQLAction(implementor="postgresql_xml", requires="pgNodeTreeAsXML", install= +"WITH" + +" a(t) AS (SELECT adbin FROM pg_catalog.pg_attrdef LIMIT 1)" + +" SELECT" + +" CASE WHEN pgNodeTreeAsXML(t) IS DOCUMENT" + +" THEN javatest.logmessage('INFO', 'pgNodeTreeAsXML ok')" + +" ELSE javatest.logmessage('WARNING', 'pgNodeTreeAsXML ng')" + +" END" + +" FROM a" +) +public class XMLRenderedTypes +{ + @Function( + schema="javatest", implementor="postgresql_xml", + provides="pgNodeTreeAsXML" + ) + public static SQLXML pgNodeTreeAsXML(@SQLType("pg_node_tree") SQLXML pgt) + throws SQLException + { + return pgt; + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/package-info.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/package-info.java index f8c64f3e..20ea8b40 100644 --- a/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/package-info.java +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/package-info.java @@ -1,8 +1,6 @@ /** - * The first examples that were converted to test the annotation-driven SQL generator instead of using hand-written SQL - * deployment code. + * The first examples that were converted to test the annotation-driven + * SQL generator instead of using hand-written SQL deployment code. * @author Thomas Hallgren * @author Chapman Flack */ diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/package-info.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/package-info.java new file mode 100644 index 00000000..8a348b8c --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/package-info.java @@ -0,0 +1,7 @@ +/** + * The examples that have been around the longest, and are deployed using + * hand-written SQL deployment code (see {@code src/main/resources/deployment}), + * not having been reworked to use annotations yet. + * @author Thomas Hallgren + */ +package org.postgresql.pljava.example; diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java new file mode 100644 index 00000000..d14c8f5e --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/S9.java @@ -0,0 +1,3386 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.example.saxon; + +import java.math.BigDecimal; +import java.math.BigInteger; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import static java.sql.ResultSetMetaData.columnNoNulls; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Types; + +import java.sql.SQLException; +import java.sql.SQLDataException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; + +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.OffsetTime; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import static java.time.ZoneOffset.UTC; + +import static java.util.Arrays.asList; +import static java.util.Arrays.fill; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.xml.transform.Source; +import javax.xml.transform.Result; + +import static javax.xml.XMLConstants.W3C_XML_SCHEMA_NS_URI; +import static javax.xml.XMLConstants.XML_NS_URI; +import static javax.xml.XMLConstants.XML_NS_PREFIX; +import static javax.xml.XMLConstants.XMLNS_ATTRIBUTE_NS_URI; +import static javax.xml.XMLConstants.XMLNS_ATTRIBUTE; + +import net.sf.saxon.event.Receiver; + +import net.sf.saxon.lib.ConversionRules; +import net.sf.saxon.lib.NamespaceConstant; + +import static net.sf.saxon.om.NameChecker.isValidNCName; + +import net.sf.saxon.query.StaticQueryContext; + +import net.sf.saxon.regex.RegexIterator; +import net.sf.saxon.regex.RegularExpression; + +import net.sf.saxon.s9api.Destination; +import net.sf.saxon.s9api.DocumentBuilder; +import net.sf.saxon.s9api.ItemType; +import net.sf.saxon.s9api.ItemTypeFactory; +import net.sf.saxon.s9api.OccurrenceIndicator; +import net.sf.saxon.s9api.Processor; +import net.sf.saxon.s9api.QName; +import net.sf.saxon.s9api.SAXDestination; +import net.sf.saxon.s9api.SequenceType; +import static net.sf.saxon.s9api.SequenceType.makeSequenceType; +import net.sf.saxon.s9api.XdmAtomicValue; +import static net.sf.saxon.s9api.XdmAtomicValue.makeAtomicValue; +import net.sf.saxon.s9api.XdmEmptySequence; +import net.sf.saxon.s9api.XdmItem; +import net.sf.saxon.s9api.XdmNode; +import static net.sf.saxon.s9api.XdmNodeKind.DOCUMENT; +import net.sf.saxon.s9api.XdmValue; +import net.sf.saxon.s9api.XdmSequenceIterator; +import net.sf.saxon.s9api.XQueryCompiler; +import net.sf.saxon.s9api.XQueryEvaluator; +import net.sf.saxon.s9api.XQueryExecutable; + +import net.sf.saxon.s9api.SaxonApiException; + +import net.sf.saxon.trans.XPathException; + +import net.sf.saxon.serialize.SerializationProperties; + +import net.sf.saxon.type.AtomicType; +import net.sf.saxon.type.Converter; + +import net.sf.saxon.value.AtomicValue; +import net.sf.saxon.value.Base64BinaryValue; +import net.sf.saxon.value.CalendarValue; +import net.sf.saxon.value.HexBinaryValue; +import net.sf.saxon.value.StringValue; +import static net.sf.saxon.value.StringValue.getStringLength; + +import org.postgresql.pljava.ResultSetProvider; + +import org.postgresql.pljava.annotation.Function; +import org.postgresql.pljava.annotation.SQLAction; +import org.postgresql.pljava.annotation.SQLType; +import static org.postgresql.pljava.annotation.Function.OnNullInput.CALLED; + +/* For the xmltext function, which only needs plain SAX and not Saxon */ + +import javax.xml.transform.sax.SAXResult; +import org.xml.sax.ContentHandler; +import org.xml.sax.SAXException; + +/** + * Class illustrating use of XQuery with Saxon as the + * implementation, using its native "s9api". + *

    + * Supplies alternative, XML Query-based (as the SQL/XML standard dictates) + * implementation of some of SQL/XML, where the implementation in core + * PostgreSQL is limited to the capabilities of XPath (and XPath 1.0, at that). + *

    + * Without the syntatic sugar built into the core PostgreSQL parser, calls to + * a function in this class can look a bit more verbose in SQL, but reflect a + * straightforward rewriting from the standard syntax. For example, suppose + * there is a table {@code catalog_as_xml} with a single row whose {@code x} + * column is a (respectably sized) XML document recording the stuff in + * {@code pg_catalog}. It could be created like this: + *

    + * CREATE TABLE catalog_as_xml(x) AS
    + *   SELECT schema_to_xml('pg_catalog', false, true, '');
    + *
    + *

    Functions/predicates from ISO 9075-14 SQL/XML

    + *

    XMLQUERY

    + *

    + * In the syntax of the SQL/XML standard, here is a query that would return + * an XML element representing the declaration of a function with a specified + * name: + *

    + * SELECT XMLQUERY('/pg_catalog/pg_proc[proname eq $FUNCNAME]'
    + *                 PASSING BY VALUE x, 'numeric_avg' AS FUNCNAME
    + *                 RETURNING CONTENT EMPTY ON EMPTY)
    + * FROM catalog_as_xml;
    + *
    + *

    + * It binds the 'context item' of the query to {@code x}, and the {@code NAME} + * parameter to the given value, then evaluates the query and returns XML + * "CONTENT" (a tree structure with a document node at the root, but not + * necessarily meeting all the requirements of an XML "DOCUMENT"). It can be + * rewritten as this call to the {@link #xq_ret_content xq_ret_content} method: + *

    + * SELECT javatest.xq_ret_content('/pg_catalog/pg_proc[proname eq $FUNCNAME]',
    + *                                PASSING => p, nullOnEmpty => false)
    + * FROM catalog_as_xml,
    + * LATERAL (SELECT x AS ".", 'numeric_avg' AS "FUNCNAME") AS p;
    + *
    + *

    + * In the rewritten form, the form of result wanted ({@code RETURNING CONTENT}) + * is implicit in the called function name ({@code xq_ret_content}), and the + * parameters to pass to the query are moved out to a separate {@code SELECT} + * that supplies their values, types, and names (with the context item now given + * the name ".") and is passed by its alias into the query function. + *

    + * Because of an unconditional uppercasing that PL/Java's JDBC driver currently + * applies to column names, any parameter names, such as {@code FUNCNAME} above, + * must be spelled in uppercase where used in the XQuery text, or they will not + * be recognized. Because the unconditional uppercasing is highly likely to be + * dropped in a future PL/Java release, it is wisest until then to use only + * parameter names that really are uppercase, both in the XQuery text where they + * are used and in the SQL expression that supplies them. In PostgreSQL, + * identifiers that are not quoted are lowercased, so they must be both + * uppercase and quoted, in the SQL syntax, to be truly uppercase. + *

    + * In the standard, parameters and results (of XML types) can be passed + * {@code BY VALUE} or {@code BY REF}, where the latter means that the same + * nodes will retain their XQuery node identities over calls (note that this is + * a meaning unrelated to what "by value" and "by reference" usually mean in + * PostgreSQL's documentation). PostgreSQL's implementation of the XML type + * provides no way for {@code BY REF} semantics to be implemented, so everything + * happening here happens {@code BY VALUE} implicitly, and does not need to be + * specified. + *

    XMLEXISTS

    + *

    + * The function {@link #xmlexists xmlexists} here implements the + * standard function of the same name. Because it is the same name, it has to + * be either schema-qualified or double-quoted in a call to avoid confusion + * with the reserved word. In the syntax of the SQL/XML standard, here is a + * query returning a boolean value indicating whether a function with the + * specified name is declared: + *

    + * SELECT XMLEXISTS('/pg_catalog/pg_proc[proname eq $FUNCNAME]'
    + *                  PASSING BY VALUE x, 'numeric_avg' AS FUNCNAME)
    + * FROM catalog_as_xml;
    + *
    + *

    + * It can be rewritten as this call to the {@link #xmlexists xmlexists} method: + *

    + * SELECT "xmlexists"('/pg_catalog/pg_proc[proname eq $FUNCNAME]',
    + *                    PASSING => p)
    + * FROM catalog_as_xml,
    + * LATERAL (SELECT x AS ".", 'numeric_avg' AS "FUNCNAME") AS p;
    + *
    + *

    XMLTABLE

    + *

    + * The function {@link #xmltable xmltable} here implements (much of) the + * standard function of the same name. Because it is the same name, it has to + * be either schema-qualified or double-quoted in a call to avoid confusion + * with the reserved word. A rewritten form of the first example in the PostgreSQL manual could be: + *

    + * SELECT xmltable.*
    + * FROM
    + *	xmldata,
    + *
    + *	LATERAL (SELECT data AS ".", 'not specified'::text AS "DPREMIER") AS p,
    + *
    + *	"xmltable"('//ROWS/ROW', PASSING => p, COLUMNS => ARRAY[
    + *	 'data(@id)', null, 'COUNTRY_NAME',
    + *	 'COUNTRY_ID', 'SIZE[@unit eq "sq_km"]',
    + *	 'concat(SIZE[@unit ne "sq_km"], " ", SIZE[@unit ne "sq_km"]/@unit)',
    + *	 'let $e := PREMIER_NAME
    + *	  return if ( empty($e) )then $DPREMIER else $e'
    + *	]) AS (
    + *	 id int, ordinality int8, "COUNTRY_NAME" text, country_id text,
    + *	 size_sq_km float, size_other text, premier_name text
    + *	);
    + *
    + *

    + * In the first column expression, without the {@code data()} function, the + * result would be a bare attribute node (one not enclosed in an XML element). + * Many implementations will accept a bare attribute as a column expression + * result, and simply assume the attribute's value is wanted, but it appears + * that a strict implementation of the spec must raise {@code err:XPTY0004} in + * such a case. This implementation is meant to be strict, so the attribute is + * wrapped in {@code data()} to extract and return its value. (See + * "About bare attribute nodes" in {@link #assignRowValues assignRowValues} + * for more explanation.) + *

    + * The {@code DPREMIER} parameter passed from SQL to the XQuery expression is + * spelled in uppercase (and also, in the SQL expression supplying it, quoted), + * for the reasons explained above for the {@code xq_ret_content} function. + *

    XMLCAST

    + *

    + * An ISO standard cast expression like + *

    + * XMLCAST(v AS wantedtype)
    + *
    + * can be rewritten with this idiom and the {@link #xmlcast xmlcast} function + * provided here: + *
    + * (SELECT r FROM (SELECT v) AS o, xmlcast(o) AS (r wantedtype))
    + *
    + *

    XQuery regular-expression functions in ISO 9075-2 Foundations

    + * The methods {@link #like_regex like_regex}, + * {@link #occurrences_regex occurrences_regex}, + * {@link #position_regex position_regex}, + * {@link #substring_regex substring_regex}, and + * {@link #translate_regex translate_regex} provide, with slightly altered + * syntax, the ISO SQL predicate and functions of the same names. + *

    + * For the moment, they will only match newlines in the way W3C XQuery + * specifies, not in the more-flexible Unicode-compatible way ISO SQL specifies, + * and for the ones where ISO SQL allows {@code USING CHARACTERS} or + * {@code USING OCTETS}, only {@code USING CHARACTERS} will work. + *

    Extensions

    + *

    XQuery module prolog allowed

    + *

    + * Where any function here accepts an XQuery + *"expression" according to the SQL specification, in fact an XQuery + *"main module" will be accepted. Therefore, the query can be preceded by + * a prolog declaring namespaces, options, local variables and functions, etc. + *

    Saxon extension to XQuery regular expressions

    + *

    + * Saxon's implementation of XQuery regular expressions will accept a + * nonstandard flag string ending with {@code ;j} to use Java regular + * expressions rather than XQuery ones. That extension is available in the + * XQuery regular-expression methods provided here. + * @author Chapman Flack + */ +@SQLAction( + implementor = "postgresql_xml", // skip it all if no xml support + requires = "presentOnClassPath", + provides = "saxon9api", + install = + "SELECT CASE WHEN" + + " presentOnClassPath('net.sf.saxon.s9api.QName')" + + "THEN" + + " CAST(" + + " set_config('pljava.implementors', 'saxon9api,' || " + + " current_setting('pljava.implementors'), true)" + + " AS void" + + " )" + + "ELSE" + + " logMessage('INFO', 'Saxon examples skipped: s9api classes missing')" + + "END" +) +public class S9 implements ResultSetProvider.Large +{ + private S9( + XdmSequenceIterator xsi, + XQueryEvaluator[] columnXQEs, + SequenceType[] columnStaticTypes, + XMLBinary enc) + { + m_sequenceIterator = xsi; + m_columnXQEs = columnXQEs; + m_columnStaticTypes = columnStaticTypes; + m_atomize = new AtomizingFunction [ columnStaticTypes.length ]; + m_xmlbinary = enc; + } + + final XdmSequenceIterator m_sequenceIterator; + final XQueryEvaluator[] m_columnXQEs; + final SequenceType[] m_columnStaticTypes; + final SequenceType s_01untypedAtomic = makeSequenceType( + ItemType.UNTYPED_ATOMIC, OccurrenceIndicator.ZERO_OR_ONE); + final AtomizingFunction[] m_atomize; + final XMLBinary m_xmlbinary; + Binding.Assemblage m_outBindings; + + static final Connection s_dbc; + static final Processor s_s9p = new Processor(false); + static final ItemTypeFactory s_itf = new ItemTypeFactory(s_s9p); + + static final Pattern s_intervalSigns; + static final Pattern s_intervalSignSite; + + enum XMLBinary { HEX, BASE64 }; + enum Nulls { ABSENT, NIL }; + + static + { + try + { + s_dbc = DriverManager.getConnection("jdbc:default:connection"); + + /* + * XML Schema thinks an ISO 8601 duration must have no sign + * anywhere but at the very beginning before the P. PostgreSQL + * thinks that's the one place a sign must never be, and instead + * it should appear in front of every numeric field. (PostgreSQL + * accepts input where the signs vary, and there are cases where it + * cannot be normalized away: P1M-1D is a thing, and can't be + * simplified until anchored at a date to know how long the month + * is! The XML Schema type simply can't represent that, so mapping + * of such a value must simply fail, as we'll ensure below.) + * So, here's a regex with a capturing group for a leading -, and + * one for any field-leading -, and one for the absence of a field- + * leading -. Any PostgreSQL or XS duration ought to match overall, + * but the capturing group matches should be either (f,f,t) or + * (f,t,f) for a PostgreSQL duration, or either (f,f,t) or (t,f,t) + * for an XS duration. (f,t,t) would be a PostgreSQL interval with + * mixed signs, and inconvertible. + */ + s_intervalSigns = Pattern.compile( + "(-)?+(?:[PYMWDTH](?:(?:(-)|())\\d++)?+)++(?:(?:[.,]\\d*+)?+S)?+"); + /* + * To convert from the leading-sign form, need to find every spot + * where a digit follows a [PYMWDTH] to insert a - there. + */ + s_intervalSignSite = Pattern.compile("(?<=[PYMWDTH])(?=\\d)"); + } + catch ( SQLException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + static class PredefinedQueryHolders + { + static final XQueryCompiler s_xqc = s_s9p.newXQueryCompiler(); + static final QName s_qEXPR = new QName("EXPR"); + + static class DocumentWrapUnwrap + { + static final XQueryExecutable INSTANCE; + + static + { + try + { + INSTANCE = s_xqc.compile( + "declare construction preserve;" + + "declare variable $EXPR as item()* external;" + + "data(document{$EXPR}/child::node())"); + } + catch ( SaxonApiException e ) + { + throw new ExceptionInInitializerError(e); + } + } + } + } + + /** + * PostgreSQL (as of 12) lacks the XMLTEXT function, so here it is. + *

    + * As long as PostgreSQL does not have the {@code XML(SEQUENCE)} type, + * this can only be the {@code XMLTEXT(sve RETURNING CONTENT)} flavor, which + * does create a text node with {@code sve} as its value, but returns the + * text node wrapped in a document node. + *

    + * This function doesn't actually require Saxon, but otherwise fits in with + * the theme here, implementing missing parts of SQL/XML for PostgreSQL. + * @param sve SQL string value to use in a text node + * @return XML content, the text node wrapped in a document node + */ + @Function(implementor="saxon9api", schema="javatest") + public static SQLXML xmltext(String sve) throws SQLException + { + SQLXML rx = s_dbc.createSQLXML(); + ContentHandler ch = rx.setResult(SAXResult.class).getHandler(); + + try + { + ch.startDocument(); + /* + * It seems XMLTEXT() should be such a trivial function to write, + * but already it reveals a subtlety in the SAX API docs. They say + * the third argument to characters() is "the number of characters + * to read from the array" and that follows a long discussion of how + * individual characters can (with code points above U+FFFF) consist + * of more than one Java char value. + * + * And yet, when you try it out (and include some characters above + * U+FFFF in the input), you discover the third argument isn't the + * number of characters, has to be the number of Java char values. + */ + ch.characters(sve.toCharArray(), 0, sve.length()); + ch.endDocument(); + } + catch ( SAXException e ) + { + rx.free(); + throw new SQLException(e.getMessage(), e); + } + + return rx; + } + + /** + * An implementation of XMLCAST. + *

    + * Will be declared to take and return type {@code RECORD}, where each must + * have exactly one component, just because that makes it easy to use + * existing JDBC metadata queries to find out the operand and target SQL + * data types. + *

    + * Serving suggestion: rewrite this ISO standard expression + *

    +	 * XMLCAST(v AS wantedtype)
    +	 *
    + * to this idiomatic one: + *
    +	 * (SELECT r FROM (SELECT v) AS o, xmlcast(o) AS (r wantedtype))
    +	 *
    + * @param operand a one-row, one-column record supplied by the caller, whose + * one typed value is the operand to be cast. + * @param base64 true if binary SQL values should be base64-encoded in XML; + * if false (the default), values will be encoded in hex. + * @param target a one-row, one-column record supplied by PL/Java from the + * {@code AS} clause after the function call, whose one column's type is the + * type to be cast to. + */ + @Function( + implementor="saxon9api", + schema="javatest", + type="pg_catalog.record", + onNullInput=CALLED, + settings="IntervalStyle TO iso_8601" + ) + public static boolean xmlcast( + ResultSet operand, @SQLType(defaultValue="false") Boolean base64, + ResultSet target) + throws SQLException + { + if ( null == operand ) + throw new SQLDataException( + "xmlcast \"operand\" must be (in this implementation) " + + "a non-null row type", "22004"); + + if ( null == base64 ) + throw new SQLDataException( + "xmlcast \"base64\" must be true or false, not null", "22004"); + XMLBinary enc = base64 ? XMLBinary.BASE64 : XMLBinary.HEX; + + assert null != target : "PL/Java supplied a null output record???"; + + if ( 1 != operand.getMetaData().getColumnCount() ) + throw new SQLDataException( + "xmlcast \"operand\" must be a row type with exactly " + + "one component", "22000"); + + if ( 1 != target.getMetaData().getColumnCount() ) + throw new SQLDataException( + "xmlcast \"target\" must be a row type with exactly " + + "one component", "22000"); + + Binding.Parameter op = + new BindingsFromResultSet(operand, false).iterator().next(); + + Binding.Parameter tg = + new BindingsFromResultSet(target, null).iterator().next(); + + int sd = op.typeJDBC(); + int td = tg.typeJDBC(); + + int castcase = + (Types.SQLXML == sd ? 2 : 0) | (Types.SQLXML == td ? 1 : 0); + + switch ( castcase ) + { + case 0: // neither sd nor td is an XML type + throw new SQLSyntaxErrorException( + "at least one of xmlcast \"operand\" or \"target\" must " + + "be of XML type", "42804"); + case 3: // both XML + /* + * In an implementation closely following the spec, this case would + * be handled in parse analysis and rewritten from an XMLCAST to a + * plain CAST, and this code would never see it. This is a plain + * example function without benefit of a parser that can do that. + * In a DBMS with all the various SQL:2006 XML subtypes, there would + * be nontrivial work to do here, but casting from PostgreSQL's one + * XML type to itself is more of a warm-up exercise. + */ + target.updateSQLXML(1, operand.getSQLXML(1)); + return true; + case 1: // something non-XML being cast to XML + assertCanCastAsXmlSequence(sd, "operand"); + Object v = op.valueJDBC(); + if ( null == v ) + { + target.updateNull(1); + return true; + } + ItemType xsbt = + mapSQLDataTypeToXMLSchemaDataType(op, enc, Nulls.ABSENT); + Iterator tv = + xmlCastAsSequence(v, enc, xsbt).iterator(); + try + { + target.updateSQLXML(1, + returnContent(tv, /*nullOnEmpty*/ false)); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + return true; + case 2: // XML being cast to something non-XML + assertCanCastAsXmlSequence(td, "target"); + SQLXML sx = operand.getSQLXML(1); + if ( null == sx ) + { + target.updateNull(1); + return true; + } + DocumentBuilder dBuilder = s_s9p.newDocumentBuilder(); + Source source = sx.getSource(null); + try + { + XdmValue xv = dBuilder.build(source); + XQueryEvaluator xqe = + PredefinedQueryHolders.DocumentWrapUnwrap.INSTANCE.load(); + xqe.setExternalVariable(PredefinedQueryHolders.s_qEXPR, xv); + xv = xqe.evaluate(); + /* + * It's zero-or-one, or XPTY0004 was thrown here. + */ + if ( 0 == xv.size() ) + { + target.updateNull(1); + return true; + } + XdmAtomicValue av = (XdmAtomicValue)xv; + xmlCastAsNonXML( + av, ItemType.UNTYPED_ATOMIC, tg, target, 1, enc); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + return true; + } + + throw new SQLFeatureNotSupportedException( + "cannot yet xmlcast from " + op.typePG() + + " to " + tg.typePG(), "0A000"); + } + + /** + * A simple example corresponding to {@code XMLQUERY(expression + * PASSING BY VALUE passing RETURNING CONTENT {NULL|EMPTY} ON EMPTY)}. + * @param expression An XQuery expression. Must not be {@code null} (in the + * SQL standard {@code XMLQUERY} syntax, it is not even allowed to be an + * SQL expression at all, only a string literal). + * @param nullOnEmpty pass {@code true} to get a null return in place of + * an empty sequence, or {@code false} to just get the empty sequence. + * @param passing A row value whose columns will be supplied to the query + * as parameters. Columns with names (typically supplied with {@code AS}) + * appear as predeclared external variables with matching names (in no + * namespace) in the query, with types derived from the SQL types of the + * row value's columns. There may be one (and no more than one) + * column with {@code AS "."} which, if present, will be bound as the + * context item. (The name {@code ?column?}, which PostgreSQL uses for an + * otherwise-unnamed column, is also accepted, which will often allow the + * context item to be specified with no {@code AS} at all. Beware, though, + * that PostgreSQL likes to invent column names from any function or type + * name that may appear in the value expression, so this shorthand will not + * always work, while {@code AS "."} will.) PL/Java's internal JDBC uppercases all column + * names, so any uses of the corresponding variables in the query must have + * the names in upper case. It is safest to also uppercase their appearances + * in the SQL (for which, in PostgreSQL, they must be quoted), so that the + * JDBC uppercasing is not being relied on. It is likely to be dropped in a + * future PL/Java release. + * @param namespaces An even-length String array where, of each pair of + * consecutive entries, the first is a namespace prefix and the second is + * the URI to which to bind it. The zero-length prefix sets the default + * element and type namespace; if the prefix has zero length, the URI may + * also have zero length, to declare that unprefixed elements are in no + * namespace. + */ + @Function( + implementor="saxon9api", + schema="javatest", + onNullInput=CALLED, + settings="IntervalStyle TO iso_8601" + ) + public static SQLXML xq_ret_content( + String expression, Boolean nullOnEmpty, + @SQLType(defaultValue={}) ResultSet passing, + @SQLType(defaultValue={}) String[] namespaces) + throws SQLException + { + /* + * The expression itself may not be null (in the standard, it isn't + * even allowed to be dynamic, and can only be a string literal!). + */ + if ( null == expression ) + throw new SQLDataException( + "XMLQUERY expression may not be null", "22004"); + + if ( null == nullOnEmpty ) + throw new SQLDataException( + "XMLQUERY nullOnEmpty may not be null", "22004"); + + try + { + XdmSequenceIterator x1 = + evalXQuery(expression, passing, namespaces); + return null == x1 ? null : returnContent(x1, nullOnEmpty); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + } + + /** + * An implementation of {@code XMLEXISTS(expression + * PASSING BY VALUE passing)}, using genuine XQuery. + * @param expression An XQuery expression. Must not be {@code null} (in the + * SQL standard {@code XMLQUERY} syntax, it is not even allowed to be an + * SQL expression at all, only a string literal). + * @param passing A row value whose columns will be supplied to the query + * as parameters. Columns with names (typically supplied with {@code AS}) + * appear as predeclared external variables with matching names (in no + * namespace) in the query, with types derived from the SQL types of the + * row value's columns. There may be one (and no more than one) + * column with {@code AS "."} which, if present, will be bound as the + * context item. (The name {@code ?column?}, which PostgreSQL uses for an + * otherwise-unnamed column, is also accepted, which will often allow the + * context item to be specified with no {@code AS} at all. Beware, though, + * that PostgreSQL likes to invent column names from any function or type + * name that may appear in the value expression, so this shorthand will not + * always work, while {@code AS "."} will.) PL/Java's internal JDBC uppercases all column + * names, so any uses of the corresponding variables in the query must have + * the names in upper case. It is safest to also uppercase their appearances + * in the SQL (for which, in PostgreSQL, they must be quoted), so that the + * JDBC uppercasing is not being relied on. It is likely to be dropped in a + * future PL/Java release. + * @param namespaces An even-length String array where, of each pair of + * consecutive entries, the first is a namespace prefix and the second is + * the URI to which to bind it. The zero-length prefix sets the default + * element and type namespace; if the prefix has zero length, the URI may + * also have zero length, to declare that unprefixed elements are in no + * namespace. + * @return True if the expression evaluates to a nonempty sequence, false if + * it evaluates to an empty one. Null if a context item is passed and its + * SQL value is null. + */ + @Function( + implementor="saxon9api", + schema="javatest", + onNullInput=CALLED, + settings="IntervalStyle TO iso_8601" + ) + public static Boolean xmlexists( + String expression, + @SQLType(defaultValue={}) ResultSet passing, + @SQLType(defaultValue={}) String[] namespaces) + throws SQLException + { + /* + * The expression itself may not be null (in the standard, it isn't + * even allowed to be dynamic, and can only be a string literal!). + */ + if ( null == expression ) + throw new SQLDataException( + "XMLEXISTS expression may not be null", "22004"); + + XdmSequenceIterator x1 = + evalXQuery(expression, passing, namespaces); + if ( null == x1 ) + return null; + if ( ! x1.hasNext() ) + return false; + x1.close(); + return true; + } + + /** + * Implementation factor of XMLEXISTS and XMLQUERY. + * @return null if a context item is passed and its SQL value is null + */ + private static XdmSequenceIterator evalXQuery( + String expression, ResultSet passing, String[] namespaces) + throws SQLException + { + Binding.Assemblage bindings = new BindingsFromResultSet(passing, true); + + try + { + XQueryCompiler xqc = createStaticContextWithPassedTypes( + bindings, namespaceBindings(namespaces)); + + XQueryEvaluator xqe = xqc.compile(expression).load(); + + if ( storePassedValuesInDynamicContext(xqe, bindings, true) ) + return null; + + /* + * For now, punt on whether the is evaluated + * with XML 1.1 or 1.0 lexical rules.... XXX + */ + return xqe.iterator(); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + } + + /** + * Perform the final steps of something {@code RETURNING CONTENT}, + * with or without {@code nullOnEmpty}. + *

    + * The effects are to be the same as if the supplied sequence were passed + * as {@code $EXPR} to {@code document{$EXPR}}. + */ + private static SQLXML returnContent( + Iterator x, boolean nullOnEmpty) + throws SQLException, SaxonApiException, XPathException + { + if ( nullOnEmpty && ! x.hasNext() ) + return null; + + SQLXML rsx = s_dbc.createSQLXML(); + /* + * Keep this simple by requesting a specific type of Result rather + * than letting PL/Java choose. It happens (though this is a detail of + * the implementation) that SAXResult won't be a bad choice. + */ + SAXResult sr = rsx.setResult(SAXResult.class); + /* + * Michael Kay recommends the following as equivalent to the SQL/XML- + * mandated behavior of evaluating document{$x}. + * https://sourceforge.net/p/saxon/mailman/message/36969060/ + */ + SAXDestination d = new SAXDestination(sr.getHandler()); + Receiver r = d.getReceiver( + s_s9p.getUnderlyingConfiguration().makePipelineConfiguration(), + new SerializationProperties()); + r.open(); + while ( x.hasNext() ) + r.append(x.next().getUnderlyingValue()); + r.close(); + return rsx; + } + + /** + * An implementation of (much of) XMLTABLE, using genuine XML Query. + *

    + * The {@code columns} array must supply a valid XML Query expression for + * every column in the column definition list that follows the call of this + * function in SQL, except that the column for ordinality, if wanted, is + * identified by a {@code null} entry in {@code columns}. Syntax sugar in + * the standard allows an omitted column expression to imply an element test + * for an element with the same name as the column; that doesn't work here. + *

    + * For now, this implementation lacks the ability to specify defaults for + * when a column expression produces an empty sequence. It is possible to + * do defaults explicitly by rewriting a query expression expr as + * {@code let $e := }expr{@code return if(empty($e))then $D else $e} + * and supplying the default D as another query parameter, though + * such defaults will be evaluated only once when {@code xmltable} is called + * and will not be able to refer to other values in an output row. + * @param rows The single XQuery expression whose result sequence generates + * the rows of the resulting table. Must not be null. + * @param columns Array of XQuery expressions, exactly as many as result + * columns in the column definition list that follows the SQL call to this + * function. This array must not be null. It is allowed for one element (and + * no more than one) to be null, marking the corresponding column to be + * "FOR ORDINALITY" (the column must be of "exact numeric with scale zero" + * type; PostgreSQL supports 64-bit row counters, so {@code int8} is + * recommended). + * @param passing A row value whose columns will be supplied to the query + * as parameters, just as described for + * {@link #xq_ret_content xq_ret_content()}. If a context item is supplied, + * it is the context item for the {@code rows} query (the {@code columns} + * queries get their context item from the {@code rows} query's result). Any + * named parameters supplied here are available both in the {@code rows} + * expression and (though this goes beyond the standard) in every expression + * of {@code columns}, with their values unchanging from row to row. + * @param namespaces An even-length String array where, of each pair of + * consecutive entries, the first is a namespace prefix and the second is + * to URI to which to bind it, just as described for + * {@link #xq_ret_content xq_ret_content()}. + * @param base64 whether the effective, in-scope 'xmlbinary' setting calls + * for base64 or (the default, false) hexadecimal. + */ + @Function( + implementor="saxon9api", + schema="javatest", + onNullInput=CALLED, + settings="IntervalStyle TO iso_8601" + ) + public static ResultSetProvider xmltable( + String rows, String[] columns, + @SQLType(defaultValue={}) ResultSet passing, + @SQLType(defaultValue={}) String[] namespaces, + @SQLType(defaultValue="false") Boolean base64) + throws SQLException + { + if ( null == rows ) + throw new SQLDataException( + "XMLTABLE row expression may not be null", "22004"); + + if ( null == columns ) + throw new SQLDataException( + "XMLTABLE columns expression array may not be null", "22004"); + + if ( null == base64 ) + throw new SQLDataException( + "XMLTABLE base64 parameter may not be null", "22004"); + XMLBinary enc = base64 ? XMLBinary.BASE64 : XMLBinary.HEX; + + Binding.Assemblage rowBindings = + new BindingsFromResultSet(passing, true); + + Iterable> namespacepairs = + namespaceBindings(namespaces); + + XQueryEvaluator[] columnXQEs = new XQueryEvaluator[ columns.length ]; + SequenceType[] columnStaticTypes = new SequenceType[ columns.length ]; + + try + { + XQueryCompiler rowXQC = createStaticContextWithPassedTypes( + rowBindings, namespacepairs); + + XQueryExecutable rowXQX = rowXQC.compile(rows); + + Binding.Assemblage columnBindings = + new BindingsFromXQX(rowXQX, rowBindings); + + XQueryCompiler columnXQC = createStaticContextWithPassedTypes( + columnBindings, namespacepairs); + + boolean ordinalitySeen = false; + for ( int i = 0; i < columns.length; ++ i ) + { + String expr = columns[i]; + if ( null == expr ) + { + if ( ordinalitySeen ) + throw new SQLSyntaxErrorException( + "No more than one column expression may be null " + + "(=> \"for ordinality\")", "42611"); + ordinalitySeen = true; + continue; + } + XQueryExecutable columnXQX = columnXQC.compile(expr); + columnStaticTypes[i] = makeSequenceType( + columnXQX.getResultItemType(), + columnXQX.getResultCardinality()); + columnXQEs[i] = columnXQX.load(); + storePassedValuesInDynamicContext( + columnXQEs[i], columnBindings, false); + } + + XQueryEvaluator rowXQE = rowXQX.load(); + XdmSequenceIterator rowIterator; + if ( storePassedValuesInDynamicContext(rowXQE, rowBindings, true) ) + rowIterator = (XdmSequenceIterator) + XdmEmptySequence.getInstance().iterator(); + else + rowIterator = rowXQE.iterator(); + return new S9(rowIterator, columnXQEs, columnStaticTypes, enc); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + } + + /** + * Called when PostgreSQL has no need for more rows of the tabular result. + */ + @Override + public void close() + { + m_sequenceIterator.close(); + } + + /** + * Produce and return one row of + * the {@code XMLTABLE} result table per call. + *

    + * The row expression has already been compiled and its evaluation begun, + * producing a sequence iterator. The column XQuery expressions have all + * been compiled and are ready to evaluate, and the compiler's static + * analysis has bounded the data types they will produce. Because of the + * way the set-returning function protocol works, we don't know the types + * of the SQL output columns yet, until the first call of this function, + * when the {@code receive} parameter's {@code ResultSetMetaData} can be + * inspected to find out. So that will be the first thing done when called + * with {@code currentRow} of zero. + *

    + * Each call will then: (a) get the next value from the row expression's + * sequence iterator, then for each column, (b) evaluate that column's + * XQuery expression on the row value, and (c) assign that column's result + * to the SQL output column, casting to the proper type (which the SQL/XML + * spec has very exacting rules on how to do). + *

    + * A note before going any further: this implementation, while fairly + * typical of a PostgreSQL set-returning user function, is not the + * way the SQL/XML spec defines {@code XMLTABLE}. The official behavior of + * {@code XMLTABLE} is defined in terms of a rewriting, at the SQL level, + * into a much-expanded SQL query where each result column appears as an + * {@code XMLQUERY} call applying the column expression, wrapped in an + * {@code XMLCAST} to the result column type (with a + * {@code CASE WHEN XMLEXISTS} thrown in to support column defaults). + *

    + * As an ordinary user function, this example cannot rely on any fancy + * query rewriting during PostgreSQL's parse analysis. The slight syntax + * desugaring needed to transform a standard {@code XMLTABLE} call into a + * call of this "xmltable" is not too hard to learn and do by hand, but no + * one would ever want to write out by hand the whole longwinded "official" + * expansion prescribed in the spec. So this example is a compromise. + *

    + * The main thing lost in the compromise is the handling of column defaults. + * The full rewriting with per-column SQL expressions means that each + * column default expression can be evaluated exactly when/if needed, which + * is often the desired behavior. This implementation as an ordinary + * function, whose arguments all get evaluated ahead of the call, can't + * really do that. Otherwise, there's nothing in the spec that's inherently + * unachievable in this implementation. + *

    + * Which brings us to the matter of casting each column expression result + * to the proper type for its SQL result column. + *

    + * Like any spec, {@code SQL/XML} does not mandate that an implementation + * must be done in exactly the way presented in the spec (rewritten so each + * column value is produced by an {@code XMLQUERY} wrapped in an + * {@code XMLCAST}). The requirement is to produce the equivalent result. + *

    + * A look at the rewritten query shows that each column XQuery result value + * must be representable as some value in SQL's type system, not once, but + * twice: first as the result returned by {@code XMLQUERY} and passed along + * to {@code XMLCAST}, and finally with the output column's type as the + * result of the {@code XMLCAST}. + *

    + * Now, the output column type can be whatever is wanted. Importantly, it + * can be either an XML type, or any ordinary SQL scalar type, like a + * {@code float} or a {@code date}. Likewise, the XQuery column expression + * may have produced some atomic value (like an {@code xs:double} or + * {@code xs:date}), or some XML node, or any sequence of any of those. + *

    + * What are the choices for the type in the middle: the SQL value returned + * by {@code XMLQUERY} and passed on to {@code XMLCAST}? + *

    + * There are two. An ISO-standard SQL {@code XMLQUERY} can specify + * {@code RETURNING SEQUENCE} or {@code RETURNING CONTENT}. The first option + * produces the type {@code XML(SEQUENCE)}, a useful type that PostgreSQL + * does not currently have. {@code XML(SEQUENCE)} can hold exactly whatever + * an XQuery expression can produce: a sequence of any length, of any + * mixture of atomic values and XML nodes (even such oddities as attribute + * nodes outside of any element), in any order. An {@code XML(SEQUENCE)} + * value need not look anything like what "XML" normally brings to mind. + *

    + * With the other option, {@code RETURNING CONTENT}, the result of + * {@code XMLQUERY} has to be something that PostgreSQL's {@code xml} type + * could store: a serialized document with XML structure, but without the + * strict requirements of exactly one root element with no text outside it. + * At the limit, a completely non-XMLish string of ordinary text is + * perfectly acceptable XML {@code CONTENT}, as long as it uses the right + * {@code &...;} escapes for any characters that could look like XML markup. + *

    + * {@code XMLCAST} is able to accept either form as input, and deliver it + * to the output column as whatever type is needed. But the spec leaves no + * wiggle room as to which form to use: + *

      + *
    • If the result column type is {@code XML(SEQUENCE)}, then the + * {@code XMLQUERY} is to specify {@code RETURNING SEQUENCE}. It produces + * the column's result type directly, so the {@code XMLCAST} has nothing + * to do. + *
    • In every other case (every other case), the {@code XMLQUERY} + * is to specify {@code RETURNING CONTENT}. + *
    + *

    + * At first blush, that second rule should sound crazy. Imagine a column + * definition like + *

    +	 * growth float8 PATH 'math:pow(1.0 + $RATE, count(year))'
    +	 *
    + * The expression produces an {@code xs:double}, which can be assigned + * directly to a PostgreSQL {@code float8}, but the rule in the spec will + * have it first converted to a decimal string representation, made into + * a text node, wrapped in a document node, and returned as XML, to be + * passed along to {@code XMLCAST}, which parses it, discards the wrapping + * document node, parses the text content as a double, and returns that as + * a proper value of the result column type (which, in this example, it + * already is). + *

    + * The spec does not go into why this rule was chosen. The only rationale + * that makes sense to me is that the {@code XML(SEQUENCE)} data type + * is an SQL feature (X190) that not every implementation will support, + * so the spec has to define {@code XMLTABLE} using a rewritten query that + * can work on systems that do not have that type. (PostgreSQL itself, at + * present, does not have it.) + *

    + * The first rule, when {@code XML(SEQUENCE)} is the result column type, + * will naturally never be in play except on a system that has that type, in + * which case it can be used directly. But even such a system must still + * produce, in all other cases, results that match what a system without + * that type would produce. All those cases are therefore defined as if + * going the long way through {@code XML(CONTENT)}. + *

    + * Whenever the XQuery expression can be known to produce a (possibly empty + * or) singleton sequence of an atomic type, the long round trip can be + * shown to be idempotent, and we can skip right to casting the atomic type + * to the SQL result column type. A few other cases could be short-circuited + * the same way. But in general, for cases involving nodes or non-singleton + * sequences, it is safest to follow the spec punctiliously; the steps are + * defined in terms of XQuery constructs like {@code document {...}} and + * {@code data()}, which have specs of their own with many traps for the + * unwary, and the XQuery library provides implementations of them that are + * already tested and correct. + *

    + * Though most of the work can be done by the XQuery library, it may be + * helpful to look closely at just what the specification entails. + *

    + * Again, but for the case of an {@code XML(SEQUENCE)} result column, in all + * other cases the result must pass through + * {@code XMLQUERY(... RETURNING CONTENT EMPTY ON EMPTY)}. That, in turn, is + * defined as equivalent to {@code XMLQUERY(... RETURNING SEQUENCE)} with + * the result then passed to {@code XMLDOCUMENT(... RETURNING CONTENT)}, + * whose behavior is that of a + * + * document node constructor in XQuery, with + * + * construction mode {@code preserve}. The first step of that behavior + * is the same as Step 1e in the processing of + * direct element + * constructor content. The remaining steps are those laid out for the + * document node constructor. + *

    + * Clarity demands flattening this nest of specifications into a single + * ordered list of the steps to apply: + *

      + *
    • Any item in the sequence that is an array is flattened (its elements + * become items in the sequence). + *
    • If any item is a function, {@code err:XQTY0105} is raised. + *
    • Any sequence {@code $s} of adjacent atomic values is replaced by + * {@code string-join($s, ' ')}. + *
    • Any XML node in the sequence is copied (as detailed in the spec). + *
    • After all the above, any document node that may exist in the resulting + * sequence is flattened (replaced by its children). + *
    • A single text node is produced for any run of adjacent text nodes in + * the sequence (including any that have newly become adjacent by the + * flattening of document nodes), by concatenation with no separator (unlike + * the earlier step where atomic values were concatenated with a space as + * the separator). + *
    • If the sequence directly contains any attribute or namespace node, + * {@code err:XPTY0004} is raised. More on this below. + *
    • The sequence resulting from the preceding steps is wrapped in one + * new document node (as detailed in the spec). + *
    + *

    + * At this point, the result could be returned to SQL as a value of + * {@code XML(CONTENT(ANY))} type, to be passed to an {@code XMLCAST} + * invocation. This implementation avoids that, and simply proceeds with the + * existing Java in-memory representation of the document tree, to the + * remaining steps entailed in an {@code XMLCAST} to the output column type: + *

      + *
    • If the result column type is an XML type, rewriting would turn the + * {@code XMLCAST} into a simple {@code CAST} and that's that. Otherwise, + * the result column has some non-XML, SQL type, and: + *
    • The algorithm "Removing XQuery document nodes from an XQuery sequence" + * is applied. By construction, we know the only such node is the one the + * whole sequence was recently wrapped in, two steps ago (you get your + * house back, you get your dog back, you get your truck back...). + *
    • That sequence of zero or more XML nodes is passed to the + *fn:data + * function, producing a sequence of zero or more atomic values, which will + * all have type {@code xs:untypedAtomic} (because the document-wrapping + * stringified any original atomic values and wrapped them in text nodes, + * for which the + * + * typed-value is {@code xs:untypedAtomic} by definition). This sequence + * also has cardinality zero-or-more, and may be shorter or longer than the + * original. + *
    • If the sequence is empty, the result column is assigned {@code NULL} + * (or the column's default value, if one was specified). Otherwise, the + * sequence is known to have length one or more, and: + *
    • The spec does not say this (which may be an oversight or bug), but the + * sequence must be checked for length greater than one, raising + * {@code err:XPTY0004} in that case. The following steps require it to be a + * singleton. + *
    • It is labeled as a singleton sequence of {@code xs:anyAtomicType} and + * used as input to an XQuery {@code cast as} expression. (Alternatively, it + * could be labeled a one-or-more sequence of {@code xs:anyAtomicType}, + * leaving the length check to be done by {@code cast as}, which would raise + * the same error {@code err:XPTY0004}, if longer than one.) + *
    • The {@code cast as} is to the XQuery type determined as in + * {@code determineXQueryFormalType} below, based on the SQL type of the + * result column; or, if the SQL type is a date/time type with no time zone, + * there is a first {@code cast as} to a specific XSD date/time type, which + * is (if it has a time zone) first adjusted to UTC, then stripped of its + * time zone, followed by a second {@code cast as} from that type to the one + * determined from the result column type. Often, that will be the same type + * as was used for the time zone adjustment, and the second {@code cast as} + * will have nothing to do. + *
    • The XQuery value resulting from the cast is converted and assigned to + * the SQL-typed result column, a step with many details but few surprises, + * therefore left for the morbidly curious to explore in the code. The flip + * side of the time zone removal described above happens here: if the SQL + * column type expects a time zone and the incoming value lacks one, it is + * given a zone of UTC. + *
    + *

    + * The later steps above, those following the length-one check, are + * handled by {@code xmlCastAsNonXML} below. + *

    + * The earlier steps, from the start through the {@code XMLCAST} early steps + * of document-node unwrapping, can all be applied by letting the original + * result sequence be {@code $EXPR} in the expression: + *

    +	 * declare construction preserve;
    +	 * data(document { $EXPR } / child::node())
    +	 *
    + * which may seem a bit of an anticlimax after seeing how many details lurk + * behind those tidy lines of code. + *

    + * About bare attribute nodes + *

    + * One consequence of the rules above deserves special attention. + * Consider something like: + *

    +	 * XMLTABLE('.' PASSING '<a foo="bar"/>' COLUMNS c1 VARCHAR PATH 'a/@foo');
    +	 *
    + *

    + * The result of the column expression is an XML attribute node all on its + * own, with name {@code foo} and value {@code bar}, not enclosed in any + * XML element. In the data type {@code XML(SEQUENCE)}, an attribute node + * can appear standalone like that, but not in {@code XML(CONTENT)}. + *

    + * Db2, Oracle, and even the XPath-based pseudo-XMLTABLE built into + * PostgreSQL, will all accept that query and produce the result "bar". + *

    + * However, a strict interpretation of the spec cannot produce that result, + * because the result column type ({@code VARCHAR}) is not + * {@code XML(SEQUENCE)}, meaning the result must be as if passed through + * {@code XMLDOCUMENT(... RETURNING CONTENT)}, and the XQuery + * {@code document { ... }} constructor is required to raise + * {@code err:XPTY0004} upon encountering any bare attribute node. The + * apparently common, convenient behavior of returning the attribute node's + * value component is not, strictly, conformant. + *

    + * This implementation will raise {@code err:XPTY0004}. That can be avoided + * by simply wrapping any such bare attribute in {@code data()}: + *

    +	 * ... COLUMNS c1 VARCHAR PATH 'a/data(@foo)');
    +	 *
    + *

    + * It is possible the spec has an editorial mistake and did not intend to + * require an error for this usage, in which case this implementation can + * be changed to match a future clarification of the spec. + */ + @Override + public boolean assignRowValues(ResultSet receive, long currentRow) + throws SQLException + { + if ( 0 == currentRow ) + { + m_outBindings = new BindingsFromResultSet(receive, m_columnXQEs); + int i = -1; + AtomizingFunction atomizer = null; + for ( Binding.Parameter p : m_outBindings ) + { + SequenceType staticType = m_columnStaticTypes [ ++ i ]; + /* + * A null in m_columnXQEs identifies the ORDINALITY column, + * if any. Assign nothing to m_atomize[i], it won't be used. + */ + if ( null == m_columnXQEs [ i ] ) + continue; + + if ( Types.SQLXML == p.typeJDBC() ) + continue; + + /* + * Ok, the output column type is non-XML; choose an atomizer, + * either a simple identity if the result type is statically + * known to be zero-or-one atomic, or the long way through the + * general-purpose one. If the type is statically known to be + * the empty sequence (weird, but not impossible), the identity + * atomizer suffices and we're on to the next column. + */ + OccurrenceIndicator occur = staticType.getOccurrenceIndicator(); + if ( OccurrenceIndicator.ZERO == occur ) + { + m_atomize [ i ] = (v, col) -> v; + continue; + } + + /* So, it isn't known to be empty. If the column + * expression type isn't known to be atomic, or isn't known to + * be zero-or-one, then the general-purpose atomizer--a trip + * through data(document { ... } / child::node())--must be used. + * This atomizer will definitely produce a sequence of length + * zero or one, raising XPTY0004 otherwise. So the staticType + * can be replaced by xs:anyAtomicType?. xmlCastAsNonXML will + * therefore be passed xs:anyAtomicType, as in the spec. + * BUT NO ... Saxon is more likely to find a converter from + * xs:untypedAtomic than from xs:anyAtomicType. + */ + ItemType itemType = staticType.getItemType(); + if ( occur.allowsMany() + || ! ItemType.ANY_ATOMIC_VALUE.subsumes(itemType) + /* + * The following tests may be punctilious to a fault. If we + * have a bare Saxon atomic type of either xs:base64Binary + * or xs:hexBinary type, Saxon will happily and successfully + * convert it to a binary string; but if we have the same + * thing as a less-statically-determinate type that we'll + * put through the atomizer, the conversion will fail unless + * its encoding matches the m_xmlbinary setting. That could + * seem weirdly unpredictable to a user, so we'll just + * (perversely) disallow the optimization (which would + * succeed) in the cases where the specified, unoptimized + * behavior would be to fail. + */ + || ItemType.HEX_BINARY.subsumes(itemType) + && (XMLBinary.HEX != m_xmlbinary) + || ItemType.BASE64_BINARY.subsumes(itemType) + && (XMLBinary.BASE64 != m_xmlbinary) + ) + { + if ( null == atomizer ) + { + XQueryEvaluator docWrapUnwrap = PredefinedQueryHolders + .DocumentWrapUnwrap.INSTANCE.load(); + atomizer = (v, col) -> + { + docWrapUnwrap.setExternalVariable( + PredefinedQueryHolders.s_qEXPR, v); + v = docWrapUnwrap.evaluate(); + /* + * It's already zero-or-one, or XPTY0004 was thrown + */ + return v; + }; + } + m_atomize [ i ] = atomizer; + /* + * The spec wants anyAtomicType below instead of + * untypedAtomic. But Saxon's getConverter is more likely + * to fail to find a converter from anyAtomicType to an + * arbitrary type, than from untypedAtomic. So use that. + */ + m_columnStaticTypes [ i ] = s_01untypedAtomic; + } + else + { + /* + * We know we'll be getting zero-or-one atomic value, so + * the atomizing function can be the identity. + */ + m_atomize [ i ] = (v, col) -> v; + } + } + } + + if ( ! m_sequenceIterator.hasNext() ) + return false; + + ++ currentRow; // for use as 1-based ordinality column + + XdmItem it = m_sequenceIterator.next(); + + int i = 0; + for ( Binding.Parameter p : m_outBindings ) + { + XQueryEvaluator xqe = m_columnXQEs [ i ]; + AtomizingFunction atomizer = m_atomize [ i ]; + SequenceType staticType = m_columnStaticTypes [ i++ ]; + + if ( null == xqe ) + { + receive.updateLong( i, currentRow); + continue; + } + + try + { + xqe.setContextItem(it); + + if ( null == atomizer ) /* => result type was found to be XML */ + { + receive.updateSQLXML( + i, returnContent(xqe.iterator(), false)); + continue; + } + + XdmValue x1 = xqe.evaluate(); + x1 = atomizer.apply(x1, i); + + /* + * The value is now known to be atomic and either exactly + * one or zero-or-one. May as well just use size() to see if + * it's empty. + */ + if ( 0 == x1.size() ) + { + receive.updateNull(i); // XXX Handle defaults some day + continue; + } + XdmAtomicValue av = (XdmAtomicValue)x1.itemAt(0); + xmlCastAsNonXML( + av, staticType.getItemType(), p, receive, i, m_xmlbinary); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + } + return true; + } + + /** + * Store the values of any passed parameters and/or context item into the + * dynamic context, returning true if the overall query should + * short-circuit and return null. + *

    + * The specification requires the overall query to return null if a + * context item is specified in the bindings and its value is null. + * @param xqe XQuery evaluator into which to store the values. + * @param passing The bindings whose values should be installed. + * @param setContextItem True to handle the context item, if present in the + * bindings. False to skip any processing of the context item, in cases + * where the caller will handle that. + * @return True if the overall query's return should be null, false if the + * query should proceed to evaluation. + */ + private static boolean storePassedValuesInDynamicContext( + XQueryEvaluator xqe, Binding.Assemblage passing, boolean setContextItem) + throws SQLException, SaxonApiException + { + /* + * Is there or is there not a context item? + */ + if ( ! setContextItem || null == passing.contextItem() ) + { + /* "... there is no context item in XDC." */ + } + else + { + Object cve = passing.contextItem().valueJDBC(); + if ( null == cve ) + return true; + XdmValue ci; + if ( cve instanceof XdmNode ) // XXX support SEQUENCE input someday + { + ci = (XdmNode)cve; + } + else + ci = xmlCastAsSequence( + cve, XMLBinary.HEX, passing.contextItem().typeXS()); + switch ( ci.size() ) + { + case 0: + /* "... there is no context item in XDC." */ + break; + case 1: + xqe.setContextItem(ci.itemAt(0)); + break; + default: + throw new SQLDataException( + "invalid XQuery context item", "2200V"); + } + } + + /* + * For each XQV: + */ + for ( Binding.Parameter p : passing ) + { + String name = p.name(); + Object v = p.valueJDBC(); + XdmValue vv; + if ( null == v ) + vv = XdmEmptySequence.getInstance(); + else if ( v instanceof XdmNode ) // XXX support SEQUENCE someday + { + vv = (XdmNode)v; + } + else + vv = xmlCastAsSequence( + v, XMLBinary.HEX, p.typeXS().getItemType()); + xqe.setExternalVariable(new QName(name), vv); + } + + return false; + } + + /** + * Return a s9api {@link XQueryCompiler XQueryCompiler} with static context + * preconfigured as the Syntax Rules dictate. + * @param pt The single-row ResultSet representing the passed parameters + * and context item, if any. + * @param namespaces namespace keys and values to be declared. + */ + private static XQueryCompiler createStaticContextWithPassedTypes( + Binding.Assemblage pt, Iterable> namespaces) + throws SQLException, XPathException + { + XQueryCompiler xqc = s_s9p.newXQueryCompiler(); + xqc.declareNamespace( + "sqlxml", "http://standards.iso.org/iso9075/2003/sqlxml"); + // https://sourceforge.net/p/saxon/mailman/message/20318550/ : + xqc.declareNamespace("xdt", W3C_XML_SCHEMA_NS_URI); + + for ( Map.Entry e : namespaces ) + xqc.declareNamespace(e.getKey(), e.getValue()); + + /* + * This business of predeclaring global external named variables + * is not an s9api-level advertised ability in Saxon, hence the + * various getUnderlying.../getStructured... methods here to access + * the things that make it happen. + */ + StaticQueryContext sqc = xqc.getUnderlyingStaticContext(); + + for ( Binding.Parameter p : pt ) + { + String name = p.name(); + int ct = p.typeJDBC(); + assertCanCastAsXmlSequence(ct, name); + SequenceType st = p.typeXS(); + sqc.declareGlobalVariable( + new QName(name).getStructuredQName(), + st.getUnderlyingSequenceType(), null, true); + } + + /* + * Apply syntax rules to the context item, if any. + */ + Binding.ContextItem ci = pt.contextItem(); + if ( null != ci ) + { + int ct = ci.typeJDBC(); + assertCanCastAsXmlSequence(ct, "(context item)"); + ItemType it = ci.typeXS(); + xqc.setRequiredContextItemType(it); + } + + return xqc; + } + + /** + * Check that something's type is "convertible to XML(SEQUENCE) + * according to the Syntax Rules of ... {@code }." + * That turns out not to be a very high bar; not much is excluded + * by those rules except collection, row, structured, or + * reference typed {@code }s. + * @param jdbcType The {@link Types JDBC type} to be checked. + * @param what A string to include in the exception message if the + * check fails. + * @throws SQLException if {@code jdbcType} is one of the prohibited types. + */ + private static void assertCanCastAsXmlSequence(int jdbcType, String what) + throws SQLException + { + if ( Types.ARRAY == jdbcType || Types.STRUCT == jdbcType + || Types.REF == jdbcType ) + throw new SQLSyntaxErrorException( + "The type of \"" + what + "\" is not suitable for " + + "XMLCAST to XML(SEQUENCE).", "42804"); + } + + /** + * The "determination of an XQuery formal type notation" algorithm. + *

    + * This is relied on for parameters and context items passed to + * {@code XMLQUERY} and therefore, {@code XMLTABLE} (and also, in the spec, + * {@code XMLDOCUMENT} and {@code XMLPI}). Note that it does not + * take an {@code XMLBinary} parameter, but rather imposes hexadecimal form + * unconditionally, so in the contexts where this is called, any + * {@code xmlbinary} setting is ignored. + * @param b a {@code Binding} from which the JDBC type can be retrieved + * @param forContextItem whether the type being derived is for a context + * item or (if false) for a named parameter. + * @return a {@code SequenceType} (always a singleton in the + * {@code forContextItem} case) + */ + private static SequenceType determineXQueryFormalType( + Binding b, boolean forContextItem) + throws SQLException + { + int sd = b.typeJDBC(); + OccurrenceIndicator suffix; + /* + * The SQL/XML standard uses a formal type notation straight out of + * the XQuery 1.0 and XPath 2.0 Formal Semantics document, and that is + * strictly more fine-grained and expressive than anything you can + * actually say in the form of XQuery SequenceTypes. This method will + * simply return the nearest approximation in the form of a sequence + * type; some of the standard's distinct formal type notations will + * collapse into the same SequenceType. + * That also means the various cases laid out in the standard will, + * here, all simply assign some ItemType to 'it', and therefore the + * tacking on of the occurrence suffix can be factored out for the + * very end. + */ + ItemType it; + + if ( forContextItem ) + suffix = OccurrenceIndicator.ONE; + // else if sd is XML(SEQUENCE) - we don't have this type yet + // suffix = OccurrenceIndicator.ZERO_OR_MORE; + /* + * Go through the motions of checking isNullable, though PL/Java's JDBC + * currently hardcodes columnNullableUnknown. Maybe someday it won't. + */ + else if ( b.knownNonNull() ) + suffix = OccurrenceIndicator.ONE; + else + suffix = OccurrenceIndicator.ZERO_OR_ONE; + + // Define ET... for {DOCUMENT|CONTENT}(XMLSCHEMA) case ... not supported + + // if SD is XML(DOCUMENT(UNTYPED)) - not currently tracked, can't tell + // it = s_itf.getDocumentTest(item type for xdt:untyped); + // else if SD is XML(DOCUMENT(ANY)) - not currently tracked, can't tell + // it = s_itf.getDocumentTest(item type for xs:anyType); + // else if SD is XML(DOCUMENT(XMLSCHEMA)) - unsupported and can't tell + // it = s_itf.getDocumentTest(the ET... we didn't define earlier) + // else if SD is XML(CONTENT(UNTYPED)) - which we're not tracking ... + // at s9api granularity, there's no test for this that's not same as: + // else if SD is XML(CONTENT(ANY)) - which we must assume for ANY XML + if ( Types.SQLXML == sd ) + it = s_itf.getNodeKindTest(DOCUMENT); + // else if SD is XML(CONTENT(XMLSCHEMA)) - we don't track and can't tell + // at s9api granularity, there's no test that means this anyway. + // else if SD is XML(SEQUENCE) - we really should have this type, but no + // it = it.ANY_ITEM + else // it ain't XML, it's some SQL type + { + ItemType xmlt = mapSQLDataTypeToXMLSchemaDataType( + b, XMLBinary.HEX, Nulls.ABSENT); + // ItemType pt = xmlt.getUnderlyingItemType().getPrimitiveType() + // .somehowGetFromUnderlyingPTBackToS9apiPT() - ugh, the hard part + /* + * The intention here is to replace any derived type with the + * primitive type it is based on, *except* for three types that are + * technically derived: integer (from decimal), yearMonthDuration + * and dayTimeDuration (from duration). Those are not replaced, so + * they stand, as if they were honorary primitive types. + * + * For now, it's simplified greatly by mapSQLDataType... skipping + * the construction of a whole derived XML Schema snippet, and just + * returning the type we want anyway. Also, no need to dive under + * the s9api layer to try to make getPrimitiveType work. + */ + it = xmlt; + } + + SequenceType xftn = makeSequenceType(it, suffix); + return xftn; + } + + @SuppressWarnings("fallthrough") + private static ItemType mapSQLDataTypeToXMLSchemaDataType( + Binding b, XMLBinary xmlbinary, Nulls nulls) + throws SQLException + { + /* + * Nearly all of the fussing about specified in the standard + * for this method is to create XML Schema derived types that + * accurately reflect the typmod information for the SQL type + * in question. Then, in determineXQueryFormalType (the only + * client of this method so far!), all of that is thrown away + * and our painstakingly specified derived type is replaced with + * the primitive type we based it on. That simplifies a lot. :) + * For now, forget the derived XML Schema declarations, and just + * return the primitive types they would be based on. + * + * The need for the nulls parameter vanishes if no XML Schema snippets + * are to be generated. + * + * If the full XML Schema snippet generation ever proves to be + * needed, one hacky way to get it would be with a SELECT + * query_to_xmlschema('SELECT null::type-in-question', false, false, + * '') where the same derivations are already implemented (though it + * produces some different results; that work may have been done from + * an earlier version of the standard). + */ + switch ( b.typeJDBC() ) + { + case Types.CHAR: + case Types.VARCHAR: + case Types.CLOB: + return ItemType.STRING; + + case Types.BINARY: + case Types.VARBINARY: + case Types.BLOB: + return XMLBinary.HEX == xmlbinary ? + ItemType.HEX_BINARY : ItemType.BASE64_BINARY; + + case Types.NUMERIC: + case Types.DECIMAL: + /* + * Go through the motions to get the scale and do this right, + * though PL/Java's getScale currently hardcodes a -1 return. + * Maybe someday it won't. + */ + int scale = b.scale(); + return 0 == scale ? ItemType.INTEGER : ItemType.DECIMAL; + + case Types.INTEGER: + return ItemType.INT; + case Types.SMALLINT: + return ItemType.SHORT; + case Types.BIGINT: + return ItemType.LONG; + + case Types.REAL: + return ItemType.FLOAT; // could check P, MINEXP, MAXEXP here. + case Types.FLOAT: + assert false; // PG should always report either REAL or DOUBLE + /*FALLTHROUGH*/ + case Types.DOUBLE: + return ItemType.DOUBLE; + + case Types.BOOLEAN: + return ItemType.BOOLEAN; + + case Types.DATE: + return ItemType.DATE; + + case Types.TIME: + return ItemType.TIME; + + case Types.TIME_WITH_TIMEZONE: + return ItemType.TIME; // restrictive facet would make sense here + + case Types.TIMESTAMP: + return ItemType.DATE_TIME; + + case Types.TIMESTAMP_WITH_TIMEZONE: + return ItemType.DATE_TIME_STAMP; // xsd 1.1 equivalent of facet! + + // There's no JDBC Types.INTERVAL; handle it after switch + + // Good luck finding out from JDBC if it's a domain + + // PG doesn't have DISTINCT types per se + + // PL/Java's JDBC doesn't support PostgreSQL's arrays as ARRAY + + // PG doesn't seem to have multisets (JDBC doesn't grok them either) + + // Types.SQLXML we could recognize, but for determineFormalTypes it has + // been handled already, and it's not yet clear what would be + // appropriate to return (short of the specified XMLSchema snippet), + // probably just document. + + // So punt all these for now; what hasn't been handled in this switch + // can be handled specially after the switch falls through, and what + // isn't, isn't supported just now. + } + + String typeName = b.typePG(); + if ( "interval".equals(typeName) ) + { + /* + * XXX This isn't right yet; it needs to be refined to a + * YEAR_MONTH_DURATION or a DAY_TIME_DURATION in the appropriate + * cases, and for that it needs access to the typmod information + * for the type, which getColumnTypeName doesn't now provide. + */ + return ItemType.DURATION; + } + + throw new SQLNonTransientException(String.format( + "Mapping SQL type \"%s\" to XML type not supported", typeName), + "0N000"); + } + + /** + * Implement that portion of the {@code } specification where + * the target data type is sequence, and (for now, anyway) the source is + * not an XML type; the only caller, so far, handles that case separately. + * @param v The SQL value to be cast (in the form of an Object from JDBC). + * @param enc Whether binary values should be encoded in hex or base 64. + * @param xst The formal static XS type derived from the SQL type of v. + * @return An {@code XdmValue}, {@code null} if {@code v} is null. + */ + private static XdmValue xmlCastAsSequence( + Object v, XMLBinary enc, ItemType xst) + throws SQLException + { + if ( null == v ) + return null; + /* + * What happens next in the standard is one of the most breathtaking + * feats of obscurantism in the whole document. It begins, plausibly + * enough, by using mapValuesOfSQLTypesToValuesOfXSTypes to produce + * the lexical form of the XS type (but with XML metacharacters escaped, + * if it's a string type). Then: + * 1. That lexical form is to be fed to an XML parser, producing an + * XQuery document node that NEVER can be a well-formed document (it + * is expected to satisfy document { text ? } where the text node is + * just the lexical value form we started with, now with the escaped + * metacharacters unescaped again as a consequence of parsing). For + * some source types, mapValuesOfSQLTypesToValuesOfXSTypes can + * produce a string that parses to XML with element content: row + * types, arrays, multisets, XML. Clearly, those cases can't satisfy + * the formal type assumed here, and they are cases this routine + * won't be expected to handle: XML handled separately by the caller, + * arrays/structs/etc. being ruled out by assertCanCastAsXmlSequence. + * 2. That document node is made the $TEMP parameter of an XML Query, + * '$TEMP cast as XSBTN' (where XSBTN is a QName for the result type + * chosen according to the rules) and the sequence resulting from + * that query is the result of the cast. + * + * Step (1) can only succeed if the XML parser doesn't insist on well- + * formed documents, as the stock JRE parser emphatically does. And the + * ultimate effect of that whole dance is that the cast in (2) casts a + * document node to the target type, which means the document node gets + * atomized, which, for a document node, means everything is thrown away + * save the concatenated values of its descendant text nodes (or node, + * in this case; haven't we seen that value somewhere before?), assigned + * the type xs:untypedAtomic, and then that's operated on by the cast. + * + * Because this implementation's in PL/Java, the value v received here + * has already been mapped from an SQL type to a Java type according to + * JDBC's rules as PL/Java implements them, so there's one degree of + * removal from the specified algorithm anyway. And the s9api + * XdmAtomicValue already has constructors from several of the expected + * Java types, as well as one taking a lexical form and explicit type. + * Beause this is /example/ code, rather than slavishly implementing the + * specified algorithm, it will assume that that is either roughly or + * exactly equivalent to what these s9api constructors in fact do, and + * just use them; conformance-testing code could then check for exact + * equivalence if there's enough interest to write it. + * + * So, we will NOT start with this: + * + * String xmlv = mapValuesOfSQLTypesToValuesOfXSTypes( + * v, enc, Nulls.ABSENT, true); + * + * Instead, we'll derive this type first ... + */ + ItemType xsbt; + // year-month interval type => xsbt = YEAR_MONTH_DURATION + // day-time interval type => xsbt = DAY_TIME_DURATION + xsbt = xst; // we have a winner! + // xs non-built-in atomic type => xsbt = getPrimitiveType(ugh). + + /* + * ... and then use this method instead: + */ + try + { + return mapJDBCofSQLvalueToXdmAtomicValue(v, enc, xsbt); + } + catch ( SaxonApiException | XPathException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + } + + @FunctionalInterface + interface CastingFunction + { + AtomicValue apply(AtomicValue v) throws XPathException; + } + + @FunctionalInterface + interface CasterSupplier + { + CastingFunction get() throws SQLException, XPathException; + } + + @FunctionalInterface + interface AtomizingFunction + { + /** + * @param v sequence to be atomized + * @param columnIndex only to include in exception if result has more + * than one item + */ + XdmValue apply(XdmValue v, int columnIndex) + throws SaxonApiException, XPathException; + } + + private static XPathException noPrimitiveCast(ItemType vt, ItemType xt) + { + return new XPathException( + "Casting from " + vt.getTypeName() + " to " + xt.getTypeName() + + " can never succeed", "XPTY0004"); + } + + /** + * Handle the case of XMLCAST to a non-XML target type when the cast operand + * is already a single atomic value. + *

    + * The caller, if operating on a sequence, must itself handle the case of + * an empty sequence (returning null, per General Rule 4c in :2011), or a + * sequence of length greater than one (raising XPTY0004, which is not + * specified in :2011, but the exclusion of such a sequence is implicit in + * rules 4g and 4h; Db2 silently drops all but the first item, unlike + * Oracle, which raises XPTY0004). + * @param av The atomic operand value + * @param p The parameter binding, recording the needed type information + * @param rs ResultSet into which the value will be stored + * @param col Index of the result column + */ + private static void xmlCastAsNonXML( + XdmAtomicValue av, ItemType vt, + Binding.Parameter p, ResultSet rs, int col, XMLBinary enc) + throws SQLException, XPathException + { + XdmAtomicValue bv; + ItemType xt = p.typeXT(enc); + + CastingFunction caster = p.atomicCaster(vt, () -> + { + ConversionRules rules = vt.getConversionRules(); + Converter c1; + ItemType t1; + Converter c2; + + switch ( p.typeJDBC() ) + { + case Types.TIMESTAMP: + t1 = ItemType.DATE_TIME; + break; + case Types.TIME: + t1 = ItemType.TIME; + break; + case Types.DATE: + t1 = ItemType.DATE; + break; + default: + c1 = rules.getConverter( + (AtomicType)vt.getUnderlyingItemType(), + (AtomicType)xt.getUnderlyingItemType()); + if ( null == c1 ) + throw noPrimitiveCast(vt, xt); + return (AtomicValue v) -> c1.convert(v).asAtomic(); + } + /* + * Nothing left here but the rest of the three date/timey cases + * partly handled above. + */ + c1 = rules.getConverter( + (AtomicType)vt.getUnderlyingItemType(), + (AtomicType)t1.getUnderlyingItemType()); + c2 = rules.getConverter( + (AtomicType)t1.getUnderlyingItemType(), + (AtomicType)xt.getUnderlyingItemType()); + if ( null == c1 || null == c2 ) + throw noPrimitiveCast(vt, xt); + return (AtomicValue v) -> + { + v = c1.convert(v).asAtomic(); + v = ((CalendarValue)v).adjustTimezone(0).removeTimezone(); + return c2.convert(v).asAtomic(); + }; + }); + + bv = makeAtomicValue(caster.apply(av.getUnderlyingValue())); + + if ( ItemType.STRING.subsumes(xt) ) + rs.updateString(col, bv.getStringValue()); + + else if ( ItemType.HEX_BINARY.subsumes(xt) ) + rs.updateBytes(col, + ((HexBinaryValue)bv.getUnderlyingValue()).getBinaryValue()); + else if ( ItemType.BASE64_BINARY.subsumes(xt) ) + rs.updateBytes(col, + ((Base64BinaryValue)bv.getUnderlyingValue()).getBinaryValue()); + + else if ( ItemType.DECIMAL.subsumes(xt) ) + rs.updateObject(col, bv.getValue()); + + /* + * The standard calls for throwing "data exception - numeric value out + * of range" rather than forwarding a float or double inf, -inf, or nan + * to SQL, but PostgreSQL supports those values, and these conversions + * preserve them. + * Because of the collapsing in typeXT(), xt will never be FLOAT, + * only DOUBLE. JDBC is supposed to handle assigning a double to a float + * column, anyway. + */ + else if ( ItemType.DOUBLE.subsumes(xt) ) + rs.updateObject(col, bv.getValue()); + + else if ( ItemType.DATE.subsumes(xt) ) + rs.updateObject(col, bv.getLocalDate()); + else if ( ItemType.DATE_TIME.subsumes(xt) ) + { + if ( ((CalendarValue)bv.getUnderlyingValue()).hasTimezone() ) + rs.updateObject(col, bv.getOffsetDateTime()); + else + { + LocalDateTime jv = bv.getLocalDateTime(); + rs.updateObject(col, + Types.TIMESTAMP_WITH_TIMEZONE == p.typeJDBC() ? + jv.atOffset(UTC) : jv); + } + } + else if ( ItemType.TIME.subsumes(xt) ) // no handy tz/notz distinction + { + if ( ((CalendarValue)bv.getUnderlyingValue()).hasTimezone() ) + rs.updateObject(col, OffsetTime.parse(bv.getStringValue())); + else + { + LocalTime jv = LocalTime.parse(bv.getStringValue()); + rs.updateObject(col, + Types.TIME_WITH_TIMEZONE == p.typeJDBC() ? + jv.atOffset(UTC) : jv); + } + } + + else if ( ItemType.YEAR_MONTH_DURATION.subsumes(xt) ) + rs.updateString(col, toggleIntervalRepr(bv.getStringValue())); + else if ( ItemType.DAY_TIME_DURATION.subsumes(xt) ) + rs.updateString(col, toggleIntervalRepr(bv.getStringValue())); + else if ( ItemType.DURATION.subsumes(xt) ) // need this case for now + rs.updateString(col, toggleIntervalRepr(bv.getStringValue())); + + else if ( ItemType.BOOLEAN.subsumes(xt) ) + rs.updateObject(col, bv.getValue()); + else + throw new SQLNonTransientException(String.format( + "Mapping XML type \"%s\" to SQL value not supported", xt), + "0N000"); + } + + /** + * Like the "Mapping values of SQL data types to values of XML Schema + * data types" algorithm, except after the SQL values have already been + * converted to Java values according to JDBC rules. + *

    + * Also, this uses Saxon s9api constructors for the XML Schema values, which + * accept the Java types directly. As a consequence, where the target type + * {@code xst} is {@code xs:hexBinary} or {@code xs:base64Binary}, that type + * will be produced, regardless of the passed {@code encoding}. This might + * not be strictly correct, but is probably safest until an oddity in the + * spec can be clarified: {@code determineXQueryFormalType} will always + * declare {@code xs:hexBinary} as the type for an SQL byte string, and it + * would violate type safety to construct a value here that honors the + * {@code encoding} parameter but isn't of the declared formal type. + */ + private static XdmAtomicValue mapJDBCofSQLvalueToXdmAtomicValue( + Object dv, XMLBinary encoding, ItemType xst) + throws SQLException, SaxonApiException, XPathException + { + if ( ItemType.STRING.equals(xst) ) + return new XdmAtomicValue((String)dv); + + if ( ItemType.HEX_BINARY.equals(xst) ) + return makeAtomicValue(new HexBinaryValue((byte[])dv)); + if ( ItemType.BASE64_BINARY.equals(xst) ) + return makeAtomicValue(new Base64BinaryValue((byte[])dv)); + + if ( ItemType.INTEGER.equals(xst) ) + return new XdmAtomicValue(((BigInteger)dv).toString(), xst); + if ( ItemType.DECIMAL.equals(xst) ) + return new XdmAtomicValue((BigDecimal)dv); + if ( ItemType.INT.equals(xst) ) + return new XdmAtomicValue((Integer)dv); + if ( ItemType.SHORT.equals(xst) ) + return new XdmAtomicValue((Short)dv); + if ( ItemType.LONG.equals(xst) ) + return new XdmAtomicValue((Long)dv); + if ( ItemType.FLOAT.equals(xst) ) + return new XdmAtomicValue((Float)dv); + if ( ItemType.DOUBLE.equals(xst) ) + return new XdmAtomicValue((Double)dv); + + if ( ItemType.BOOLEAN.equals(xst) ) + return new XdmAtomicValue((Boolean)dv); + + if ( ItemType.DATE.equals(xst) ) + { + if ( dv instanceof LocalDate ) + return new XdmAtomicValue((LocalDate)dv); + return new XdmAtomicValue(dv.toString(), xst); + } + + if ( ItemType.TIME.equals(xst) ) + return new XdmAtomicValue(dv.toString(), xst); + + if ( ItemType.DATE_TIME.equals(xst) ) + { + if ( dv instanceof LocalDateTime ) + return new XdmAtomicValue((LocalDateTime)dv); + return new XdmAtomicValue(dv.toString(), xst); + } + + if ( ItemType.DATE_TIME_STAMP.equals(xst) ) + { + if ( dv instanceof OffsetDateTime ) + return new XdmAtomicValue((OffsetDateTime)dv); + return new XdmAtomicValue(dv.toString(), xst); + } + + if ( ItemType.DURATION.equals(xst) ) + return new XdmAtomicValue(toggleIntervalRepr((String)dv), xst); + + throw new SQLNonTransientException(String.format( + "Mapping SQL value to XML type \"%s\" not supported", xst), + "0N000"); + } + + /* + * Toggle the lexical representation of an interval/duration between the + * form PostgreSQL likes and the form XML Schema likes. Only negative values + * are affected. Positive values are returned unchanged, as are those that + * don't fit any expected form; those will probably be reported as malformed + * by whatever tries to consume them. + */ + static String toggleIntervalRepr(String lex) + { + Matcher m = s_intervalSigns.matcher(lex); + if ( ! m.matches() ) + return lex; // it's weird, just don't touch it + if ( -1 == m.start(1) ) + { + if ( -1 != m.start(2) && -1 == m.start(3) ) // it's PG negative + return '-' + lex.replace("-", ""); // make it XS negative + } + else if ( -1 == m.start(2) && -1 != m.start(3) )// it's XS negative + return m.usePattern(s_intervalSignSite) // make it PG negative + .reset(lex.substring(1)).replaceAll("-"); + return lex; // it's either positive, or weird, just don't touch it + } + + static Iterable> namespaceBindings(String[] nbs) + throws SQLException + { + if ( 1 == nbs.length % 2 ) + throw new SQLSyntaxErrorException( + "Namespace binding array must have even length", "42000"); + Map m = new HashMap<>(); + + for ( int i = 0; i < nbs.length; i += 2 ) + { + String prefix = nbs[i]; + String uri = nbs[1 + i]; + + if ( null == prefix || null == uri ) + throw new SQLDataException( + "Namespace binding array elements must not be null", + "22004"); + + if ( ! "".equals(prefix) ) + { + if ( ! isValidNCName(prefix) ) + throw new SQLSyntaxErrorException( + "Not an XML NCname: \"" + prefix + '"', "42602"); + if ( XML_NS_PREFIX.equals(prefix) + || XMLNS_ATTRIBUTE.equals(prefix) ) + throw new SQLSyntaxErrorException( + "Namespace prefix may not be xml or xmlns", "42939"); + if ( XML_NS_URI.equals(uri) + || XMLNS_ATTRIBUTE_NS_URI.equals(uri) ) + throw new SQLSyntaxErrorException( + "Namespace URI has a disallowed value", "42P17"); + if ( "".equals(uri) ) + throw new SQLSyntaxErrorException( + "URI for non-default namespace may not be zero-length", + "42P17"); + } + + String was = m.put(prefix.intern(), uri.intern()); + + if ( null != was ) + throw new SQLSyntaxErrorException( + "Namespace prefix \"" + prefix + "\" multiply bound (" + + "to \"" + was + "\" and \"" + uri + "\")", "42712"); + } + + return Collections.unmodifiableSet(m.entrySet()); + } + + static class Binding + { + String typePG() throws SQLException + { + if ( null != m_typePG ) + return m_typePG; + return m_typePG = implTypePG(); + } + + int typeJDBC() throws SQLException + { + if ( null != m_typeJDBC ) + return m_typeJDBC; + int tj = implTypeJDBC(); + /* + * The JDBC types TIME_WITH_TIMEZONE and TIMESTAMP_WITH_TIMEZONE + * first appear in JDBC 4.2 / Java 8. PL/Java's JDBC driver does + * not yet return those values. As a workaround until it does, + * recheck here using the PG type name string, if TIME or TIMESTAMP + * is the JDBC type that the driver returned. + * + * Also for backward compatibility, the driver still returns + * Types.OTHER for XML, rather than Types.SQLXML. Check and fix that + * here too. + */ + switch ( tj ) + { + case Types.OTHER: + if ( "xml".equals(typePG()) ) + tj = Types.SQLXML; + break; + case Types.TIME: + if ( "timetz".equals(typePG()) ) + tj = Types.TIME_WITH_TIMEZONE; + break; + case Types.TIMESTAMP: + if ( "timestamptz".equals(typePG()) ) + tj = Types.TIMESTAMP_WITH_TIMEZONE; + break; + default: + } + return m_typeJDBC = tj; + } + + Object valueJDBC() throws SQLException + { + if ( m_valueJDBCValid ) + return m_valueJDBC; + /* + * When JDBC 4.2 added support for the JSR 310 date/time types, for + * back-compatibility purposes, it did not change what types a plain + * getObject(...) would return for them, which could break existing + * code. Instead, it's necessary to use the form of getObject that + * takes a Class, and ask for the new classes explicitly. + * + * Similarly, PL/Java up through 1.5.0 has always returned a String + * from getObject for a PostgreSQL xml type. Here, the JDBC standard + * provides that a SQLXML object should be returned, and that should + * happen in a future major PL/Java release, but for now, the plain + * getObject will still return String, so it is also necessary to + * ask for the SQLXML type explicitly. In fact, we will ask for + * XdmNode, as it might be referred to more than once (if a + * parameter), and a SQLXML can't be read more than once, nor would + * there be any sense in building an XdmNode from it more than once. + */ + switch ( typeJDBC() ) + { + case Types.DATE: + return setValueJDBC(implValueJDBC(LocalDate.class)); + case Types.TIME: + return setValueJDBC(implValueJDBC(LocalTime.class)); + case Types.TIME_WITH_TIMEZONE: + return setValueJDBC(implValueJDBC(OffsetTime.class)); + case Types.TIMESTAMP: + return setValueJDBC(implValueJDBC(LocalDateTime.class)); + case Types.TIMESTAMP_WITH_TIMEZONE: + return setValueJDBC(implValueJDBC(OffsetDateTime.class)); + case Types.SQLXML: + return setValueJDBC(implValueJDBC(XdmNode.class)); + default: + } + return setValueJDBC(implValueJDBC()); + } + + boolean knownNonNull() throws SQLException + { + if ( null != m_knownNonNull ) + return m_knownNonNull; + return m_knownNonNull = implKnownNonNull(); + } + + int scale() throws SQLException + { + if ( null != m_scale ) + return m_scale; + return m_scale = implScale(); + } + + static class ContextItem extends Binding + { + /** + * Return the XML Schema type of this input binding for a context + * item. + *

    + * Because it is based on {@code determinXQueryFormalType}, this + * method is not parameterized by {@code XMLBinary}, and will always + * map a binary-string SQL type to {@code xs:hexBinary}. + */ + ItemType typeXS() throws SQLException + { + if ( null != m_typeXS ) + return m_typeXS; + SequenceType st = implTypeXS(true); + assert OccurrenceIndicator.ONE == st.getOccurrenceIndicator(); + return m_typeXS = st.getItemType(); + } + + protected ItemType m_typeXS; + } + + static class Parameter extends Binding + { + String name() + { + return m_name; + } + + SequenceType typeXS() throws SQLException + { + if ( null != m_typeXS ) + return m_typeXS; + return m_typeXS = implTypeXS(false); + } + + /** + * Return the XML Schema type collapsed according to the Syntax Rule + * deriving {@code XT} for {@code XMLCAST}. + *

    + * The intent of the rule is unclear, but it involves collapsing + * certain sets of more-specific types that {@code typeXS} might + * return into common supertypes, for use only in an intermediate + * step of {@code xmlCastAsNonXML}. Unlike {@code typeXS}, this + * method must be passed an {@code XMLBinary} parameter reflecting + * the hex/base64 choice currently in scope. + * @param enc whether to use {@code xs:hexBinary} or + * {@code xs:base64Binary} as the XML Schema type corresponding to a + * binary-string SQL type. + */ + ItemType typeXT(XMLBinary enc) throws SQLException + { + throw new UnsupportedOperationException( + "typeXT() on synthetic binding"); + } + + /** + * Memoize and return a casting function from a given + * {@code ItemType} to the type of this parameter. + *

    + * Used only by {@code xmlCastAsNonXML}, which does all the work + * of constructing the function; this merely allows it to be + * remembered, if many casts to the same output parameter will be + * made (as by {@code xmltable}). + */ + CastingFunction atomicCaster(ItemType it, CasterSupplier s) + throws SQLException, XPathException + { + throw new UnsupportedOperationException( + "atomicCaster() on synthetic binding"); + } + + protected SequenceType m_typeXS; + + private final String m_name; + + /** + * @param name The SQL name of the parameter + * @param checkName True if the name must be a valid NCName (as for + * an input parameter from SQL to the XML query context), or false + * if the name doesn't matter (as when it describes a result, or the + * sole input value of an XMLCAST. + * @throws SQLException if the name of a checked input parameter + * isn't a valid NCName. + */ + protected Parameter(String name, boolean checkName) + throws SQLException + { + if ( checkName && ! isValidNCName(name) ) + throw new SQLSyntaxErrorException( + "Not an XML NCname: \"" + name + '"', "42602"); + m_name = name; + } + } + + protected String m_typePG; + protected Integer m_typeJDBC; + protected Boolean m_knownNonNull; + protected Integer m_scale; + private Object m_valueJDBC; + private boolean m_valueJDBCValid; + protected Object setValueJDBC(Object v) + { + m_valueJDBCValid = true; + return m_valueJDBC = v; + } + + protected String implTypePG() throws SQLException + { + throw new UnsupportedOperationException( + "typePG() on synthetic binding"); + } + + protected int implTypeJDBC() throws SQLException + { + throw new UnsupportedOperationException( + "typeJDBC() on synthetic binding"); + } + + protected boolean implKnownNonNull() throws SQLException + { + throw new UnsupportedOperationException( + "knownNonNull() on synthetic binding"); + } + + protected int implScale() throws SQLException + { + throw new UnsupportedOperationException( + "scale() on synthetic binding"); + } + + protected Object implValueJDBC() throws SQLException + { + throw new UnsupportedOperationException( + "valueJDBC() on synthetic binding"); + } + + /* + * This implementation just forwards to the type-less version, then + * fails if that did not return the wanted type. Override if a smarter + * behavior is possible. + */ + protected T implValueJDBC(Class type) throws SQLException + { + return type.cast(implValueJDBC()); + } + + protected SequenceType implTypeXS(boolean forContextItem) + throws SQLException + { + return determineXQueryFormalType(this, forContextItem); + } + + static class Assemblage implements Iterable + { + ContextItem contextItem() { return m_contextItem; } + + @Override + public Iterator iterator() + { + return m_params.iterator(); + } + + protected ContextItem m_contextItem; + protected Collection m_params = Collections.emptyList(); + } + } + + static class BindingsFromResultSet extends Binding.Assemblage + { + /** + * Construct the bindings from a ResultSet representing input parameters + * to an XML query. + * @param rs ResultSet representing the input parameters. Column names + * "." and "?COLUMN?" are treated specially, and used to supply the + * query's context item; every other column name must be a valid NCName, + * and neither any named parameter nor the context item may be mentioned + * more than once. + * @param checkNames True if the input parameter names matter (a name of + * "." or "?COLUMN?" will define the context item, and any other name + * must be a valid NCName); false to skip such checking (as for the + * single input value to XMLCAST, whose name doesn't matter). + * @throws SQLException if names are duplicated or invalid. + */ + BindingsFromResultSet(ResultSet rs, boolean checkNames) + throws SQLException + { + m_resultSet = rs; + m_rsmd = rs.getMetaData(); + + int nParams = m_rsmd.getColumnCount(); + ContextItem contextItem = null; + Map n2b = new HashMap<>(); + + if ( 0 < nParams ) + m_dBuilder = s_s9p.newDocumentBuilder(); + + for ( int i = 1; i <= nParams; ++i ) + { + String label = m_rsmd.getColumnLabel(i); + if ( checkNames && + ("?COLUMN?".equals(label) || ".".equals(label)) ) + { + if ( null != contextItem ) + throw new SQLSyntaxErrorException( + "Context item supplied more than once (at " + + contextItem.m_idx + " and " + i + ')', "42712"); + contextItem = new ContextItem(i); + continue; + } + + Parameter was = + (Parameter)n2b.put( + label, new Parameter(label, i, checkNames)); + if ( null != was ) + throw new SQLSyntaxErrorException( + "Name \"" + label + "\" duplicated at positions " + + was.m_idx + " and " + i, "42712"); + } + + m_contextItem = contextItem; + m_params = n2b.values(); + } + + /** + * Construct the bindings from a ResultSet representing output + * parameters (as from XMLTABLE). + * @param rs ResultSet representing the result parameters. Names have + * no particular significance and are not subject to any checks. + * @param exprs Compiled evaluators for the supplied column expressions. + * The number of these must match the number of columns in {@code rs}. + * One of these (and no more than one; the caller will have enforced + * that) is allowed to be null, making the corresponding column + * "FOR ORDINALITY". An ordinality column will be checked to ensure it + * has an SQL type that is (ahem) "exact numeric with scale 0 (zero)." + * May be null if this is some other general-purpose output result set, + * not for an XMLTABLE. + * @throws SQLException if numbers of columns and expressions don't + * match, or there is an ordinality column and its type is not suitable. + */ + @SuppressWarnings("fallthrough") + BindingsFromResultSet(ResultSet rs, XQueryEvaluator[] exprs) + throws SQLException + { + m_resultSet = rs; + m_rsmd = rs.getMetaData(); + + int nParams = m_rsmd.getColumnCount(); + if ( null != exprs && nParams != exprs.length ) + throw new SQLSyntaxErrorException( + "Not as many supplied column expressions as output columns", + "42611"); + + Binding.Parameter[] ps = new Binding.Parameter[ nParams ]; + + for ( int i = 1; i <= nParams; ++i ) + { + String label = m_rsmd.getColumnLabel(i); + Parameter p = new Parameter(label, i, false); + ps [ i - 1 ] = p; + if ( null != exprs && null == exprs [ i - 1 ] ) + { + switch ( p.typeJDBC() ) + { + case Types.INTEGER: + case Types.SMALLINT: + case Types.BIGINT: + break; + case Types.NUMERIC: + case Types.DECIMAL: + int scale = p.scale(); + if ( 0 == scale || -1 == scale ) + break; + /*FALLTHROUGH*/ + default: + throw new SQLSyntaxErrorException( + "Column FOR ORDINALITY must have an exact numeric" + + " type with scale zero.", "42611"); + } + } + } + + m_params = asList(ps); + } + + private ResultSet m_resultSet; + private ResultSetMetaData m_rsmd; + DocumentBuilder m_dBuilder; + + T typedValueAtIndex(int idx, Class type) throws SQLException + { + if ( XdmNode.class != type ) + return m_resultSet.getObject(idx, type); + try + { + SQLXML sx = m_resultSet.getObject(idx, SQLXML.class); + return type.cast( + m_dBuilder.build(sx.getSource((Class)null))); + } + catch ( SaxonApiException e ) + { + throw new SQLException(e.getMessage(), "10000", e); + } + } + + class ContextItem extends Binding.ContextItem + { + final int m_idx; + + ContextItem(int index) { m_idx = index; } + + protected String implTypePG() throws SQLException + { + return m_rsmd.getColumnTypeName(m_idx); + } + + protected int implTypeJDBC() throws SQLException + { + return m_rsmd.getColumnType(m_idx); + } + + protected int implScale() throws SQLException + { + return m_rsmd.getScale(m_idx); + } + + protected Object implValueJDBC() throws SQLException + { + return m_resultSet.getObject(m_idx); + } + + protected T implValueJDBC(Class type) throws SQLException + { + return typedValueAtIndex(m_idx, type); + } + } + + class Parameter extends Binding.Parameter + { + final int m_idx; + private ItemType m_typeXT; + private CastingFunction m_atomCaster; + private ItemType m_lastCastFrom; + + Parameter(String name, int index, boolean isInput) + throws SQLException + { + super(name, isInput); + m_idx = index; + } + + @Override + ItemType typeXT(XMLBinary enc) throws SQLException + { + if ( null != m_typeXT ) + return m_typeXT; + + ItemType it = + mapSQLDataTypeToXMLSchemaDataType(this, enc, Nulls.ABSENT); + if ( ! ItemType.ANY_ATOMIC_VALUE.subsumes(it) ) + return m_typeXT = it; + + if ( it.equals(ItemType.INTEGER) ) + { + int tj = typeJDBC(); + if ( Types.NUMERIC == tj || Types.DECIMAL == tj ) + it = ItemType.DECIMAL; + } + else if ( ItemType.INTEGER.subsumes(it) ) + it = ItemType.INTEGER; + else if ( ItemType.FLOAT.subsumes(it) ) + it = ItemType.DOUBLE; + else if ( ItemType.DATE_TIME_STAMP.subsumes(it) ) + it = ItemType.DATE_TIME; + + return m_typeXT = it; + } + + @Override + CastingFunction atomicCaster(ItemType it, CasterSupplier s) + throws SQLException, XPathException + { + if ( null == m_atomCaster || ! it.equals(m_lastCastFrom) ) + { + m_atomCaster = s.get(); + m_lastCastFrom = it; + } + return m_atomCaster; + } + + protected String implTypePG() throws SQLException + { + return m_rsmd.getColumnTypeName(m_idx); + } + + protected int implTypeJDBC() throws SQLException + { + return m_rsmd.getColumnType(m_idx); + } + + protected boolean implKnownNonNull() throws SQLException + { + return columnNoNulls == m_rsmd.isNullable(m_idx); + } + + protected int implScale() throws SQLException + { + return m_rsmd.getScale(m_idx); + } + + protected Object implValueJDBC() throws SQLException + { + return m_resultSet.getObject(m_idx); + } + + protected T implValueJDBC(Class type) throws SQLException + { + return typedValueAtIndex(m_idx, type); + } + } + } + + static class BindingsFromXQX extends Binding.Assemblage + { + /** + * Construct a new assemblage of bindings for the static context of an + * XMLTABLE column expression. It will have the same named-parameter + * bindings passed to the row expression, but the static type of the + * context item will be the result type of the row expression. The + * {@code ContextItem} in this assemblage will have no associated value; + * the caller is responsible for retrieving that from the row evaluator + * and storing it in the column expression context every iteration. + * @param xqx The result of compiling the row expression; its + * compiler-determined static result type will be used as the static + * context item type. + * @param params The bindings supplied to the row expression. Its named + * parameters will be copied as the named parameters here. + */ + BindingsFromXQX(XQueryExecutable xqx, Binding.Assemblage params) + { + m_params = params.m_params; + m_contextItem = new ContextItem(xqx.getResultItemType()); + } + + static class ContextItem extends Binding.ContextItem + { + ContextItem(ItemType it) + { + m_typeXS = it; + /* + * There needs to be a dummy JDBC type to return when queried + * for purposes of assertCanCastAsXmlSequence. It can literally + * be any type outside of the few that method rejects. Because + * the XS type is already known, nothing else will need to ask + * for this, or care. + */ + m_typeJDBC = Types.OTHER; + } + } + } + + /* + * The XQuery-regular-expression-based functions added in 9075-2:2006. + * + * For each function below, a parameter is marked //strict if the spec + * explicitly says the result is NULL when that parameter is NULL. The + * parameters not marked //strict (including the non-standard w3cNewlines + * added here) all have non-null defaults, so by executive decision, these + * functions will all get the onNullInput=RETURNS_NULL treatment, so none of + * the null-checking has to be done here. At worst, that may result in a + * mystery NULL return rather than an error, if someone explicitly passes + * NULL to one of the parameters with a non-null default. + */ + + /* + * Check valid range of 'from' and supported 'usingOctets'. + * + * Every specified function that has a start position FROM and a USING + * clause starts with a check that the start position is in range. This + * function factors out that test, returning true if the start position is + * /out of range/ (triggering the caller to return the special result + * defined for that case), returning false if the value is in range, or + * throwing an exception if the length unit specified in the USING clause + * isn't supported. + */ + private static boolean usingAndLengthCheck( + String in, int from, boolean usingOctets, String function) + throws SQLException + { + if ( usingOctets ) + throw new SQLFeatureNotSupportedException( + '"' + function + "\" does not yet support USING OCTETS", + "0A000"); + return ( 1 > from || from > getStringLength(in) ); + } + + private static void newlinesCheck(boolean w3cNewlines, String function) + throws SQLException + { + if ( ! w3cNewlines ) + throw new SQLFeatureNotSupportedException( + '"' + function + "\" does not yet support the ISO SQL newline" + + " conventions, only the original W3C XQuery ones" + + " (HINT: pass w3cNewlines => true)", "0A000"); + } + + private static RegularExpression compileRE(String pattern, String flags) + throws SQLException + { + try + { + return s_s9p.getUnderlyingConfiguration() + .compileRegularExpression(pattern, flags, "XP30", null); + } + catch ( XPathException e ) + { + if ( NamespaceConstant.ERR.equals(e.getErrorCodeNamespace()) ) + { + if ( "FORX0001".equals(e.getErrorCodeLocalPart()) ) + throw new SQLDataException( + "invalid XQuery option flag", "2201T", e); + if ( "FORX0002".equals(e.getErrorCodeLocalPart()) ) + throw new SQLDataException( + "invalid XQuery regular expression", "2201S", e); + } + throw new SQLException( + "compiling XQuery regular expression: " + e.getMessage(), e); + } + } + + private static CharSequence replace( + RegularExpression re, CharSequence in, CharSequence with) + throws SQLException + { + /* + * Report the standard-mandated error if replacing a zero-length match. + * Strictly speaking, this is a test of the length of the match, not of + * the input string. Here, though, this private method is only called by + * translate_regex, which always passes only the portion of the input + * string that matched, so the test is equivalent. + * As to why the SQL committee would make such a point of disallowing + * replacement of a zero-length match, that's a good question. See + * s_intervalSignSite in this very file for an example where replacing + * a zero-length match is just what's wanted. (But that pattern relies + * on lookahead/lookbehind operators, which XQuery regular expressions + * don't have.) + * When the underlying library is Saxon, there is an Easter egg: if a + * regular expression is compiled with a 'flags' string ending in ";j", + * a Java regular expression is produced instead of an XQuery one (with + * standards conformance cast to the wind). That can be detected with + * getFlags() on the regular expression: not looking for ";j", because + * that has been stripped out, but for "d" which is a Java regex flag + * that Saxon sets by default, and is not a valid XQuery regex flag. + * If the caller has used Saxon's Easter egg to get a Java regex, here + * is another Easter egg to go with it, allowing zero-length matches + * to be replaced if that's what the caller wants to do. + */ + if ( 0 == in.length() && ! re.getFlags().contains("d") ) + throw new SQLDataException( + "attempt to replace a zero-length string", "2201U"); + try + { + return re.replace(in, with); + } + catch ( XPathException e ) + { + if ( NamespaceConstant.ERR.equals(e.getErrorCodeNamespace()) ) + { + if ( "FORX0003".equals(e.getErrorCodeLocalPart()) ) + throw new SQLDataException( + "attempt to replace a zero-length string", "2201U", e); + if ( "FORX0004".equals(e.getErrorCodeLocalPart()) ) + throw new SQLDataException( + "invalid XQuery replacement string", "2201V", e); + } + throw new SQLException( + "replacing regular expression match: " + e.getMessage(), e); + } + } + + interface MatchVector + { + int groups(); + int position(int group); + int length(int group); + } + + interface ListOfMatchVectors + { + /** + * Return the MatchVector for one occurrence of a match. + *

    + * Any previously-returned MatchVector is invalid after another get. + * In multiple calls to get, the occurrence parameter must be strictly + * increasing. + * After get has returned null, it should not be called again. + */ + MatchVector get(int occurrence) throws SQLException; + void close(); + } + + static class LOMV + implements ListOfMatchVectors, MatchVector, RegexIterator.MatchHandler + { + private RegexIterator m_ri; + private int m_pos; + private int m_occurrence; + + LOMV(int startPos, RegexIterator ri) + { + m_ri = ri; + m_pos = startPos; + } + + static ListOfMatchVectors of( + String pattern, String flags, String in, int from) + throws SQLException + { + RegularExpression re = compileRE(pattern, flags); + return of(re, in, from); + } + + static ListOfMatchVectors of(RegularExpression re, String in, int from) + { + RegexIterator ri = + re.analyze(in.substring(in.offsetByCodePoints(0, from - 1))); + return new LOMV(from, ri); + } + + private int[] m_begPositions; + private int[] m_endPositions; + + @Override // ListOfMatchVectors + public MatchVector get(int occurrence) throws SQLException + { + try + { + StringValue sv; + for ( ;; ) + { + sv = m_ri.next(); + if ( null == sv ) + return null; + if ( m_ri.isMatching() ) + if ( ++ m_occurrence == occurrence ) + break; + m_pos += sv.getStringLength(); + } + + if ( null == m_begPositions ) + { + int groups = m_ri.getNumberOfGroups(); + /* + * Saxon's Apache-derived XQuery engine will report a number + * of groups counting $0 (so it will be 1 even if no capture + * groups were defined in the expression). In contrast, the + * Java regex engine that you get with the Saxon ";j" Easter + * egg does not count $0 (so arrays need groups+1 entries). + * It's hard to tell from here which flavor was used, plus + * the Saxon behavior might change some day, so just spend + * the extra + 1 every time. + */ + m_begPositions = new int [ groups + 1 ]; + m_endPositions = new int [ groups + 1 ]; + } + + m_begPositions [ 0 ] = m_pos; + + fill(m_begPositions, 1, m_begPositions.length, 0); + fill(m_endPositions, 1, m_endPositions.length, 0); + m_ri.processMatchingSubstring(this); + + m_endPositions [ 0 ] = m_pos; + + return this; + } + catch ( XPathException e ) + { + throw new SQLException( + "evaluating XQuery regular expression: " + e.getMessage(), + e); + } + } + + @Override + public void close() + { + m_ri.close(); + } + + @Override // MatchVector + public int groups() + { + return m_begPositions.length - 1; + } + + @Override + public int position(int groupNumber) + { + return m_begPositions [ groupNumber ]; + } + + @Override + public int length(int groupNumber) + { + return + m_endPositions [ groupNumber ] - m_begPositions [ groupNumber ]; + } + + @Override // MatchHandler + public void characters(CharSequence s) + { + m_pos += getStringLength(s); + } + + @Override + public void onGroupStart(int groupNumber) + { + m_begPositions [ groupNumber ] = m_pos; + } + + @Override + public void onGroupEnd(int groupNumber) + { + m_endPositions [ groupNumber ] = m_pos; + } + } + + /** + * Function form of the ISO SQL + * {@code }. + *

    + * Rewrite the standard form + *

    +	 * value LIKE_REGEX pattern FLAG flags
    +	 *
    + * into this form: + *
    +	 * like_regex(value, pattern, flag => flags)
    +	 *
    + * where the {@code flag} parameter defaults to no flags if omitted. + *

    + * The SQL standard specifies that pattern elements sensitive to newlines + * (namely {@code ^}, {@code $}, {@code \s}, {@code \S}, and {@code .}) are + * to support the various representations of newline set out in + * Unicode Technical + * Standard #18, RL1.6. That behavior differs from the standard W3C + * XQuery newline handling, as described for + * the flags + * {@code m} and {@code s} and for + * the + * multicharacter escapes {@code \s} and {@code \S}. As an extension to + * ISO SQL, passing {@code w3cNewlines => true} requests the standard W3C + * XQuery behavior rather than the UTS#18 behevior for newlines. If the + * underlying XQuery library only provides the W3C behavior, calls without + * {@code w3cNewlines => true} will throw exceptions. + * @param value The string to be tested against the pattern. + * @param pattern The XQuery regular expression. + * @param flag Optional string of + * flags adjusting + * the regular expression behavior. + * @param w3cNewlines Pass true to allow the regular expression to recognize + * newlines according to the W3C XQuery rules rather than those of ISO SQL. + * @return True if the supplied value matches the pattern. Null if any + * parameter is null. + * @throws SQLException SQLDataException with SQLSTATE 2201S if the regular + * expression is invalid, 2201T if the flags string is invalid; + * SQLFeatureNotSupportedException (0A000) if (in the current + * implementation) w3cNewlines is false or omitted. + */ + @Function(implementor="saxon9api", schema="javatest") + public static boolean like_regex( + String value, //strict + String pattern, //strict + @SQLType(defaultValue="") String flag, //strict + @SQLType(defaultValue="false") boolean w3cNewlines + ) + throws SQLException + { + newlinesCheck(w3cNewlines, "like_regex"); + return compileRE(pattern, flag).containsMatch(value); + } + + /** + * Syntax-sugar-free form of the ISO SQL + * {@code OCCURRENCES_REGEX} function: + * how many times does a pattern occur in a string? + *

    + * Rewrite the standard form + *

    +	 * OCCURRENCES_REGEX(pattern FLAG flags IN str FROM position USING units)
    +	 *
    + * into this form: + *
    +	 * occurrences_regex(pattern, flag => flags, "in" => str,
    +	 *                   "from" => position, usingOctets => true|false)
    +	 *
    + * where all of the named parameters are optional except pattern and "in", + * and the standard {@code USING CHARACTERS} becomes + * {@code usingOctets => false}, which is the default, and + * {@code USING OCTETS} becomes {@code usingOctets => true}. See also + * {@link #like_regex like_regex} regarding the {@code w3cNewlines} + * parameter. + * @param pattern XQuery regular expression to seek in the input string. + * @param in The input string. + * @param flag Optional string of + * flags adjusting + * the regular expression behavior. + * @param from Starting position in the input string, 1 by default. + * @param usingOctets Whether position is counted in characters (actual + * Unicode characters, not any smaller encoded unit, not even Java char), + * which is the default, or (when true) in octets of the string's encoded + * form. + * @param w3cNewlines Pass true to allow the regular expression to recognize + * newlines according to the W3C XQuery rules rather than those of ISO SQL. + * @return The number of occurrences of the pattern in the input string, + * starting from the specified position. Null if any parameter is null; -1 + * if the start position is less than 1 or beyond the end of the string. + * @throws SQLException SQLDataException with SQLSTATE 2201S if the regular + * expression is invalid, 2201T if the flags string is invalid; + * SQLFeatureNotSupportedException (0A000) if (in the current + * implementation) usingOctets is true, or w3cNewlines is false or omitted. + */ + @Function(implementor="saxon9api", schema="javatest") + public static int occurrences_regex( + String pattern, //strict + @SQLType(name="\"in\"") String in, //strict + @SQLType(defaultValue="") String flag, //strict + @SQLType(name="\"from\"", defaultValue="1") int from, + @SQLType(defaultValue="false") boolean usingOctets, + @SQLType(defaultValue="false") boolean w3cNewlines + ) + throws SQLException + { + if ( usingAndLengthCheck(in, from, usingOctets, "occurrences_regex") ) + return -1; // note: not the same as in position_regex! + newlinesCheck(w3cNewlines, "occurrences_regex"); + + ListOfMatchVectors lomv = LOMV.of(pattern, flag, in, from); + + for ( int i = 1 ;; ++ i ) + if ( null == lomv.get(i) ) + return i - 1; + } + + /** + * Syntax-sugar-free form of the ISO SQL + * {@code POSITION_REGEX} function: + * where does a pattern, or part of it, occur in a string? + *

    + * Rewrite the standard forms + *

    +	 * POSITION_REGEX(START pattern FLAG flags IN str FROM position
    +	 *                OCCURRENCE n GROUP m)
    +	 * POSITION_REGEX(AFTER pattern FLAG flags IN str FROM position
    +	 *                OCCURRENCE n GROUP m)
    +	 *
    + * into these forms, respectively: + *
    +	 * position_regex(pattern, flag => flags, "in" => str,
    +	 *                "from" => position, occurrence => n,
    +	 *                "group" => m)
    +	 * position_regex(pattern, flag => flags, "in" => str,
    +	 *                "from" => position, occurrence => n,
    +	 *                "group" => m, after => true)
    +	 *
    + * where all of the named parameters are optional except pattern and "in". + * See also {@link #occurrences_regex occurrences_regex} regarding the + * {@code usingOctets} parameter, and {@link #like_regex like_regex} + * regarding {@code w3cNewlines}. + * @param pattern XQuery regular expression to seek in the input string. + * @param in The input string. + * @param flag Optional string of + * flags adjusting + * the regular expression behavior. + * @param from Starting position in the input string, 1 by default. + * @param usingOctets Whether position is counted in characters (actual + * Unicode characters, not any smaller encoded unit, not even Java char), + * which is the default, or (when true) in octets of the string's encoded + * form. + * @param after Whether to return the position where the match starts + * (when false, the default), or just after the match ends (when true). + * @param occurrence If specified as an integer n (default 1), returns the + * position starting (or after) the nth match of the pattern in the string. + * @param group If zero (the default), returns the position starting (or + * after) the match of the whole pattern overall, otherwise if an integer m, + * the position starting or after the mth parenthesized group in (the nth + * occurrence of) the pattern. + * @param w3cNewlines Pass true to allow the regular expression to recognize + * newlines according to the W3C XQuery rules rather than those of ISO SQL. + * @return The position, in the specified units, starting or just after, + * the nth occurrence (or mth capturing group of the nth occurrence) of the + * pattern in the input string, starting from the specified position. Null + * if any parameter is null; zero if the start position is less than 1 or + * beyond the end of the string, if occurrence is less than 1 or greater + * than the number of matches, or if group is less than zero or greater than + * the number of parenthesized capturing groups in the pattern. + * @throws SQLException SQLDataException with SQLSTATE 2201S if the regular + * expression is invalid, 2201T if the flags string is invalid; + * SQLFeatureNotSupportedException (0A000) if (in the current + * implementation) usingOctets is true, or w3cNewlines is false or omitted. + */ + @Function(implementor="saxon9api", schema="javatest") + public static int position_regex( + String pattern, //strict + @SQLType(name="\"in\"") String in, //strict + @SQLType(defaultValue="") String flag, //strict + @SQLType(name="\"from\"", defaultValue="1") int from, + @SQLType(defaultValue="false") boolean usingOctets, + @SQLType(defaultValue="false") boolean after, + @SQLType(defaultValue="1") int occurrence, //strict + @SQLType(name="\"group\"", defaultValue="0") int group, //strict + @SQLType(defaultValue="false") boolean w3cNewlines + ) + throws SQLException + { + if ( 1 > occurrence ) + return 0; + if ( 0 > group ) // test group > ngroups after compiling regex + return 0; + if ( usingAndLengthCheck(in, from, usingOctets, "position_regex") ) + return 0; // note: not the same as in occurrences_regex! + newlinesCheck(w3cNewlines, "position_regex"); + + ListOfMatchVectors lomv = LOMV.of(pattern, flag, in, from); + + MatchVector mv = lomv.get(occurrence); + if ( null == mv || mv.groups() < group ) + return 0; + + return mv.position(group) + (after ? mv.length(group) : 0); + } + + /** + * Syntax-sugar-free form of the ISO SQL + * {@code SUBSTRING_REGEX} function: + * return a substring specified by a pattern match in a string. + *

    + * Rewrite the standard form + *

    +	 * SUBSTRING_REGEX(pattern FLAG flags IN str FROM position
    +	 *                 OCCURRENCE n GROUP m)
    +	 *
    + * into this form: + *
    +	 * substring_regex(pattern, flag => flags, "in" => str,
    +	 *                 "from" => position, occurrence => n,
    +	 *                 "group" => m)
    +	 *
    + * where all of the named parameters are optional except pattern and "in". + * See also {@link #position_regex position_regex} regarding the + * {@code occurrence} and {@code "group"} parameters, + * {@link #occurrences_regex occurrences_regex} regarding + * {@code usingOctets}, and {@link #like_regex like_regex} + * regarding {@code w3cNewlines}. + * @param pattern XQuery regular expression to seek in the input string. + * @param in The input string. + * @param flag Optional string of + * flags adjusting + * the regular expression behavior. + * @param from Starting position in the input string, 1 by default. + * @param usingOctets Whether position is counted in characters (actual + * Unicode characters, not any smaller encoded unit, not even Java char), + * which is the default, or (when true) in octets of the string's encoded + * form. + * @param occurrence If specified as an integer n (default 1), returns the + * nth match of the pattern in the string. + * @param group If zero (the default), returns the match of the whole + * pattern overall, otherwise if an integer m, the match of the mth + * parenthesized group in (the nth occurrence of) the pattern. + * @param w3cNewlines Pass true to allow the regular expression to recognize + * newlines according to the W3C XQuery rules rather than those of ISO SQL. + * @return The substring matching the nth occurrence (or mth capturing group + * of the nth occurrence) of the pattern in the input string, starting from + * the specified position. Null if any parameter is null, if the start + * position is less than 1 or beyond the end of the string, if occurrence is + * less than 1 or greater than the number of matches, or if group is less + * than zero or greater than the number of parenthesized capturing groups in + * the pattern. + * @throws SQLException SQLDataException with SQLSTATE 2201S if the regular + * expression is invalid, 2201T if the flags string is invalid; + * SQLFeatureNotSupportedException (0A000) if (in the current + * implementation) usingOctets is true, or w3cNewlines is false or omitted. + */ + @Function(implementor="saxon9api", schema="javatest") + public static String substring_regex( + String pattern, //strict + @SQLType(name="\"in\"") String in, //strict + @SQLType(defaultValue="") String flag, //strict + @SQLType(name="\"from\"", defaultValue="1") int from, + @SQLType(defaultValue="false") boolean usingOctets, + @SQLType(defaultValue="1") int occurrence, //strict + @SQLType(name="\"group\"", defaultValue="0") int group, //strict + @SQLType(defaultValue="false") boolean w3cNewlines + ) + throws SQLException + { + if ( 1 > occurrence ) + return null; + if ( 0 > group ) // test group > ngroups after compiling regex + return null; + if ( usingAndLengthCheck(in, from, usingOctets, "substring_regex") ) + return null; + newlinesCheck(w3cNewlines, "substring_regex"); + + ListOfMatchVectors lomv = LOMV.of(pattern, flag, in, from); + + MatchVector mv = lomv.get(occurrence); + if ( null == mv || mv.groups() < group ) + return null; + + int codePointPos = mv.position(group); + int codePointLen = mv.length(group); + + int utf16pos = in.offsetByCodePoints(0, codePointPos - 1); + int utf16end = in.offsetByCodePoints(utf16pos, codePointLen); + + return in.substring(utf16pos, utf16end); + } + + /** + * Syntax-sugar-free form of the ISO SQL + * {@code TRANSLATE_REGEX} function: + * return a string constructed from the input string by replacing one + * specified occurrence, or all occurrences, of a matching pattern. + *

    + * Rewrite the standard forms + *

    +	 * TRANSLATE_REGEX(pattern FLAG flags IN str WITH repl FROM position
    +	 *                 OCCURRENCE ALL)
    +	 * TRANSLATE_REGEX(pattern FLAG flags IN str WITH repl FROM position
    +	 *                 OCCURRENCE n)
    +	 *
    + * into these forms, respectively: + *
    +	 * translate_regex(pattern, flag => flags, "in" => str,
    +	 *                 "with" => repl, "from" => position)
    +	 * translate_regex(pattern, flag => flags, "in" => str,
    +	 *                 "with" => repl, "from" => position,
    +	 *                 occurrence => n)
    +	 *
    + * where all of the named parameters are optional except pattern and "in" + * (the default for "with" is the empty string, resulting in matches being + * deleted). + * See also {@link #position_regex position_regex} regarding the + * {@code occurrence} parameter, + * {@link #occurrences_regex occurrences_regex} regarding + * {@code usingOctets}, and {@link #like_regex like_regex} + * regarding {@code w3cNewlines}. + *

    + * For the specified occurrence (or all occurrences), the matching portion + * s of the string is replaced as by the XQuery function + * replace(s, pattern, repl, flags). The repl string + * may contain {@code $0} to refer to the entire matched substring, or + * {@code $}m to refer to the mth parenthesized capturing + * group in the pattern. + * @param pattern XQuery regular expression to seek in the input string. + * @param in The input string. + * @param flag Optional string of + * flags adjusting + * the regular expression behavior. + * @param with The replacement string, possibly with $m references. + * @param from Starting position in the input string, 1 by default. + * @param usingOctets Whether position is counted in characters (actual + * Unicode characters, not any smaller encoded unit, not even Java char), + * which is the default, or (when true) in octets of the string's encoded + * form. + * @param occurrence If specified as an integer n (default 0 for "ALL"), + * replace the nth match of the pattern in the string. + * @param w3cNewlines Pass true to allow the regular expression to recognize + * newlines according to the W3C XQuery rules rather than those of ISO SQL. + * @return The input string with one occurrence or all occurences of the + * pattern replaced, as described above. Null if any parameter is null, or + * if the start position is less than 1 or beyond the end of the string. + * The input string unchanged if occurrence is less than zero or exceeds the + * number of matches. + * @throws SQLException SQLDataException with SQLSTATE 2201S if the regular + * expression is invalid, 2201T if the flags string is invalid; 2201U if + * replacing where the pattern has matched a substring of zero length; 2201V + * if the replacement string has improper form (a backslash must be used to + * escape any dollar sign or backslash intended literally); + * SQLFeatureNotSupportedException (0A000) if (in the current + * implementation) usingOctets is true, or w3cNewlines is false or omitted. + */ + @Function(implementor="saxon9api", schema="javatest") + public static String translate_regex( + String pattern, //strict + @SQLType(name="\"in\"") String in, //strict + @SQLType(defaultValue="") String flag, //strict + @SQLType(name="\"with\"", defaultValue="") String with, //strict + @SQLType(name="\"from\"", defaultValue="1") int from, + @SQLType(defaultValue="false") boolean usingOctets, + @SQLType(defaultValue="0" /* ALL */) int occurrence, + @SQLType(defaultValue="false") boolean w3cNewlines + ) + throws SQLException + { + if ( usingAndLengthCheck(in, from, usingOctets, "translate_regex") ) + return null; + newlinesCheck(w3cNewlines, "translate_regex"); + if ( 0 > occurrence ) + return in; + + RegularExpression re = compileRE(pattern, flag); + + ListOfMatchVectors lomv = LOMV.of(re, in, from); + + MatchVector mv; + int codePointPos; + int codePointLen; + int utf16pos; + int utf16end; + + if ( 0 < occurrence ) + { + mv = lomv.get(occurrence); + if ( null == mv ) + return in; + + codePointPos = mv.position(0); + codePointLen = mv.length(0); + + utf16pos = in.offsetByCodePoints(0, codePointPos - 1); + utf16end = in.offsetByCodePoints(utf16pos, codePointLen); + + return + in.substring(0, utf16pos) + + replace(re, in.substring(utf16pos, utf16end), with) + + in.substring(utf16end); + } + + StringBuilder sb = new StringBuilder(); + utf16end = 0; + + for ( int i = 1; null != (mv = lomv.get(i)); ++ i ) + { + codePointPos = mv.position(0); + codePointLen = mv.length(0); + + utf16pos = in.offsetByCodePoints(0, codePointPos - 1); + + sb.append(in.substring(utf16end, utf16pos)); + + utf16end = in.offsetByCodePoints(utf16pos, codePointLen); + + sb.append(replace(re, in.substring(utf16pos, utf16end), with)); + } + + return sb.append(in.substring(utf16end)).toString(); + } +} diff --git a/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/package-info.java b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/package-info.java new file mode 100644 index 00000000..1e842952 --- /dev/null +++ b/pljava-examples/src/main/java/org/postgresql/pljava/example/saxon/package-info.java @@ -0,0 +1,7 @@ +/** + * Examples using the Saxon-HE library for XML processing. + * Only built if {@code -Psaxon-examples} is given on the {@code mvn} command + * line (which will also cause Saxon-HE to be downloaded). + * @author Chapman Flack + */ +package org.postgresql.pljava.example.saxon; diff --git a/pljava-examples/src/main/resources/deployment/examples.ddr b/pljava-examples/src/main/resources/deployment/examples.ddr index 2844414b..cf3bc52f 100644 --- a/pljava-examples/src/main/resources/deployment/examples.ddr +++ b/pljava-examples/src/main/resources/deployment/examples.ddr @@ -3,106 +3,6 @@ SQLActions[ ] = { CREATE SCHEMA javatest; BEGIN PostgreSQL SET search_path TO javatest,public ENd postgreSQL; - CREATE FUNCTION javatest.java_getTimestamp() - RETURNS timestamp - AS 'org.postgresql.pljava.example.Parameters.getTimestamp' - LANGUAGE java; - - CREATE FUNCTION javatest.java_getTimestamptz() - RETURNS timestamptz - AS 'org.postgresql.pljava.example.Parameters.getTimestamp' - LANGUAGE java; - - CREATE FUNCTION javatest.print(date) - RETURNS void - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(timetz) - RETURNS void - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(timestamptz) - RETURNS void - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print("char") - RETURNS "char" - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(bytea) - RETURNS bytea - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(int2) - RETURNS int2 - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(int2[]) - RETURNS int2[] - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(int4) - RETURNS int4 - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(int4[]) - RETURNS int4[] - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(int8) - RETURNS int8 - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(int8[]) - RETURNS int8[] - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(real) - RETURNS real - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(real[]) - RETURNS real[] - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(double precision) - RETURNS double precision - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.print(double precision[]) - RETURNS double precision[] - AS 'org.postgresql.pljava.example.Parameters.print' - LANGUAGE java; - - CREATE FUNCTION javatest.printObj(int[]) - RETURNS int[] - AS 'org.postgresql.pljava.example.Parameters.print(java.lang.Integer[])' - LANGUAGE java; - - CREATE FUNCTION javatest.java_addOne(int) - RETURNS int - AS 'org.postgresql.pljava.example.Parameters.addOne(java.lang.Integer)' - IMMUTABLE LANGUAGE java; - - CREATE FUNCTION javatest.nullOnEven(int) - RETURNS int - AS 'org.postgresql.pljava.example.Parameters.nullOnEven' - IMMUTABLE LANGUAGE java; - CREATE FUNCTION javatest.java_getSystemProperty(varchar) RETURNS varchar AS 'java.lang.System.getProperty' @@ -179,27 +79,6 @@ SQLActions[ ] = { FOR EACH ROW EXECUTE PROCEDURE moddatetime (moddate); - CREATE TABLE javatest.employees1 - ( - id int PRIMARY KEY, - name varchar(200), - salary int - ); - - CREATE TABLE javatest.employees2 - ( - id int PRIMARY KEY, - name varchar(200), - salary int, - transferDay date, - transferTime time - ); - - CREATE FUNCTION javatest.transferPeople(int) - RETURNS int - AS 'org.postgresql.pljava.example.SPIActions.transferPeopleWithSalary' - LANGUAGE java; - CREATE TYPE javatest._testSetReturn AS (base integer, incbase integer, ctime timestamptz); @@ -233,16 +112,6 @@ SQLActions[ ] = { AS 'org.postgresql.pljava.example.HugeResultSet.executeSelect' LANGUAGE java; - CREATE FUNCTION javatest.maxFromSetReturnExample(int, int) - RETURNS int - AS 'org.postgresql.pljava.example.SPIActions.maxFromSetReturnExample' - IMMUTABLE LANGUAGE java; - - CREATE FUNCTION javatest.nestedStatements(int) - RETURNS void - AS 'org.postgresql.pljava.example.SPIActions.nestedStatements' - LANGUAGE java; - CREATE TYPE javatest._properties AS (name varchar(200), value varchar(200)); @@ -276,26 +145,6 @@ SQLActions[ ] = { AS 'org.postgresql.pljava.example.Users.listNonSupers' LANGUAGE java; - CREATE FUNCTION javatest.testSavepointSanity() - RETURNS int - AS 'org.postgresql.pljava.example.SPIActions.testSavepointSanity' - IMMUTABLE LANGUAGE java; - - CREATE FUNCTION javatest.testTransactionRecovery() - RETURNS int - AS 'org.postgresql.pljava.example.SPIActions.testTransactionRecovery' - IMMUTABLE LANGUAGE java; - - CREATE FUNCTION javatest.getDateAsString() - RETURNS varchar - AS 'org.postgresql.pljava.example.SPIActions.getDateAsString' - STABLE LANGUAGE java; - - CREATE FUNCTION javatest.getTimeAsString() - RETURNS varchar - AS 'org.postgresql.pljava.example.SPIActions.getTimeAsString' - STABLE LANGUAGE java; - CREATE FUNCTION javatest.logMessage(varchar, varchar) RETURNS void AS 'org.postgresql.pljava.example.LoggerTest.logMessage' @@ -343,39 +192,6 @@ SQLActions[ ] = { AS 'org.postgresql.pljava.example.ResultSetTest.executeSelect' LANGUAGE java; - CREATE FUNCTION javatest.executeSelectToRecords(varchar) - RETURNS SETOF RECORD - AS 'org.postgresql.pljava.example.SetOfRecordTest.executeSelect' - LANGUAGE java; - - CREATE FUNCTION javatest.countNulls(record) - RETURNS int - AS 'org.postgresql.pljava.example.Parameters.countNulls' - LANGUAGE java; - - CREATE FUNCTION javatest.countNulls(int[]) - RETURNS int - AS 'org.postgresql.pljava.example.Parameters.countNulls(java.lang.Integer[])' - LANGUAGE java; - - /* - * An example using the ANY type - */ - CREATE FUNCTION javatest.loganyelement(anyelement) - RETURNS anyelement - AS 'org.postgresql.pljava.example.AnyTest.logAnyElement' - LANGUAGE java IMMUTABLE STRICT; - - CREATE FUNCTION javatest.logany("any") - RETURNS void - AS 'org.postgresql.pljava.example.AnyTest.logAny' - LANGUAGE java IMMUTABLE STRICT; - - CREATE FUNCTION javatest.makearray(anyelement) - RETURNS anyarray - AS 'org.postgresql.pljava.example.AnyTest.makeArray' - LANGUAGE java IMMUTABLE STRICT; - END INSTALL", "BEGIN REMOVE diff --git a/pljava-examples/src/site/markdown/index.md b/pljava-examples/src/site/markdown/index.md index 36bd108b..1591ea94 100644 --- a/pljava-examples/src/site/markdown/index.md +++ b/pljava-examples/src/site/markdown/index.md @@ -9,6 +9,23 @@ If you arrived here from a search for PL/Java examples, you probably want (Note: the source browser link shows the current development sources, which may differ from a particular release.) +### Optionally-built example code for XML processing with Saxon + +The [optional example code][exsaxon] for providing actual XML Query-based +alternatives to PostgreSQL's XPath 1.0-based query and `XMLTABLE` functions +does not get built by default, because it pulls in the sizeable [Saxon-HE][] +library from Saxonica, and because (unlike the rest of PL/Java) it requires +Java 8. + +To include these optional functions when building the examples, be sure to use +a Java 8 build environment, and add `-Psaxon-examples` to the `mvn` command +line. The functions are [documented here][exsaxon]. + + [esug]: ../examples/examples.html [tbtes]: https://github.com/tada/pljava/tree/master/pljava-examples/src/main/java/org/postgresql/pljava/example [rtj]: apidocs/index.html +[appcds]: ../install/appcds.html +[j9cds]: ../install/oj9vmopt.html#How_to_set_up_class_sharing_in_OpenJ9 +[Saxon-HE]: http://www.saxonica.com/html/products/products.html +[exsaxon]: ../examples/saxon.html diff --git a/pljava-packaging/build.xml b/pljava-packaging/build.xml index 452c73bf..65155b4a 100644 --- a/pljava-packaging/build.xml +++ b/pljava-packaging/build.xml @@ -1,29 +1,7 @@ - + - - - - - - - - - - - - - - - - - - - - - - - + - + + - @@ -273,7 +247,7 @@ jos.close(); - @@ -281,6 +255,94 @@ jos.close(); simple update is possible, just repeat the next entry, with the from-version changed. --> + + + + + + + + + + + + + + + + + + + + + + + excludes="pljava.sql pljava--.sql pljava--unpackaged--.sql"/> + + - +(no script here; see Node.java for the rules to resolve using pg_config) diff --git a/pljava-packaging/pom.xml b/pljava-packaging/pom.xml index e469d4b2..7de1f8dc 100644 --- a/pljava-packaging/pom.xml +++ b/pljava-packaging/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.5.0 + 1.6.10 pljava-packaging PL/Java packaging @@ -22,11 +22,6 @@ pljava ${project.version} - - org.postgresql - pljava-deploy - ${project.version} - org.postgresql pljava-examples @@ -36,7 +31,7 @@ org.postgresql pljava-so ${project.version} - nar + pom @@ -68,6 +63,40 @@ pljava-so-${project.version} + + + + pgjdbc + + + org.postgresql + postgresql + [42.6.0,) + + + + + + + pgjdbc-ng + + + com.impossibl.pgjdbc-ng + pgjdbc-ng-all + [0.8.4,0.8.8),(0.8.8,) + + + @@ -80,10 +109,48 @@ + + org.postgresql + pljava-pgxs + ${pljava.pgxs.version} + + + set-ver-classifier + initialize + + scripted-goal + + + + + + + + org.apache.maven.plugins maven-resources-plugin - 2.7 pljava extension files @@ -105,6 +172,9 @@ compile + + none + @@ -117,7 +187,7 @@ org.apache.ant ant - [1.10.9,) + 1.10.11 @@ -138,4 +208,135 @@ + + + + + org.postgresql + pljava-pgxs + ${pljava.pgxs.version} + + + + scripted-report + + + + + + + + + diff --git a/pljava-packaging/src/main/java/JarX.java b/pljava-packaging/src/main/java/JarX.java index 01892f7d..07b6f84a 100644 --- a/pljava-packaging/src/main/java/JarX.java +++ b/pljava-packaging/src/main/java/JarX.java @@ -26,7 +26,6 @@ import java.util.zip.ZipOutputStream; import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; -import javax.script.ScriptException; /** * Distribute your work as a self-extracting jar file by including one file, @@ -38,7 +37,7 @@ * The text conversion offered by JarX is useful if your distribution will * include text files, source, documentation, scripts, etc., and your recipients * have platforms with different newline conventions. - *

    Text conversion background

    + *

    Text conversion background

    * There are two issues in the cross-platform delivery of text files. *
    1. Different platforms indicate the end of a line differently. * The UNIX convention uses the single character LINE FEED; the (old) Macintosh @@ -57,7 +56,7 @@ * within the archive that are actually known to contain text. * Passing binary data or class files through character and newline * transformations will corrupt them. - *

      The ZIP approach and why it loses

      + *

      The ZIP approach and why it loses

      * The popular zip format on which jar is based already has a provision for * newline (but not character set) conversion. Each entry includes a text/binary * bit, and the unzip program applies newline conversion while extracting, but @@ -79,7 +78,7 @@ * the text bit. That can happen, and has happened, to class files in zip * archives if the recipient uses unzip -a, and causes significant misery if * the package is widely distributed. - *

      A better way

      + *

      A better way

      * Even though the jar format is based on zip, it would be a mistake to make jar * tools that rely on the zip text/binary bit, because common * practice has made that bit unreliable. What's needed is a standard way for @@ -109,7 +108,7 @@ * allows explicit specification of the character encoding used in a jar entry, * and the extracting program can automatically convert into the encoding used * on the local system. (But see Call to action below.) - *

      What JarX Does

      + *

      What JarX Does

      * Content-Type entries in a Manifest were introduced in Java 1.3 * but are compatible with earlier jar specifications; a jar file containing * such entries can be processed without any trouble by any jar tool compliant @@ -133,7 +132,7 @@ * archive that can be executed to unpack itself on any Java 1.6 or later * virtual machine, performing all automatic conversions and requiring no jar * tool at all. - *

      Building a Jar

      + *

      Building a Jar

      * To build a jar file, first prepare the manifest, using any text editor or, * more likely, a script. Include a Name: entry for every file * to be included in the jar. JarX.Build archives only the files named in @@ -157,7 +156,7 @@ * foo.jar names the jar you want to create. * The order of files in the jar will be the order of their names in the * manifest. - *

      Special manifest attributes

      + *

      Special manifest attributes

      * For 2016, JarX now recognizes some special manifest attributes: *
      *
      _JarX_CharsetInArchive
      @@ -222,13 +221,27 @@ * and nestable) are allowed outside of the quoted strings. * *
      - *

      Extracting a jar

      + *

      Alternative to {@code ScriptEngine} for a path resolver

      + * With the removal of Nashorn in Java 15, leaving no scripting language that + * can be assumed present in the Java runtime, a script in the manifest may + * no longer be the simplest way to customize the resolution of path names when + * extracting. This class has been refactored now to expose two methods, + * {@link #prepareResolver(String) prepareResolver} and + * {@link #resolve(String,String) resolve}, easily overridden in a subclass. + * The value of the {@code _JarX_PathResolver} main attribute is passed to + * {@code prepareResolver} as a string (so it can be parsed in any way useful to + * the subclass, not necessarily as described above, or ignored), and + * {@code resolve} is passed the stored path and platform path, and returns the + * platform path unchanged or a replacement. A self-extracting jar with + * resolution can be made without depending on any script engine, by placing + * two classes in the jar, JarX and the subclass, and naming the + * subclass as the jar's {@code Main-Class}. It needs a {@code main} method that + * simply instantiates the class and calls {@code extract()}. + *

      Extracting a jar

      * The command java -jar foo.jar is all it takes * to extract a jar. The Main-Class entry in the manifest * identifies the entry point of JarX so it does not need to be specified. - *

      - * JarX - *

      Call to action

      + *

      Call to action

      * At the moment, Sun's Jar File Specification contains a mistake in the * description of a content type that could lead to implementations * that reject valid content types. Squash this bug before it bites: @@ -238,7 +251,7 @@ * for * *Bug #4310708. - *

      Miscellany

      + *

      Miscellany

      * This class is a little sloppy and relatively slow, especially the Build side * when converting plain text files. The idea for JarX is a natural outgrowth * of the Java 1.3 manifest standard and I have suggested that the functionality @@ -396,9 +409,6 @@ public boolean holdsIgnoreCase( String value, short... type) { return is( type) && value.equalsIgnoreCase( this.value); } - /**Name of the JarX class file as stored in the jar*/ - public static final String me - = JarX.class.getName().replace('.', '/') + ".class"; /**Name of the manifest file as stored in the jar*/ public static final String manifestName = "META-INF/MANIFEST.MF"; /**The (fixed) encoding used for manifest content*/ @@ -438,16 +448,37 @@ public void extract() throws Exception { break; if ( null == mf ) { mf = jis.getManifest(); - if ( null != mf ) - setDefaults( mf.getMainAttributes()); + if ( null != mf ) { + Attributes mainAttributes = mf.getMainAttributes(); + setDefaults( mainAttributes); + if ( null != mainAttributes ) { + String v = mainAttributes.getValue( PATHRESOLVER); + if ( null != v ) + prepareResolver( v); + } + } } - if ( ! je.getName().equals( me) ) + if ( notMe( je.getName()) ) extract( je, jis); jis.closeEntry(); } jis.close(); } + + /** True if the passed name is not the in-jar name of this class or + * related classes that should not be extracted. + *

      + * If not overridden, this method returns false only for names matching the + * class of {@code this} or any ancestral superclass. Interfaces are not + * considered. A subclass could apply a different policy. + */ + public boolean notMe( String name) { + for ( Class c = getClass(); null != c; c = c.getSuperclass() ) + if ( name.equals( c.getName().replace('.', '/') + ".class") ) + return false; + return true; + } /**Examine the main attributes to set any defaults. * Includes loading the required script engine if a name resolver script @@ -466,14 +497,20 @@ public void setDefaults( Attributes mainAttributes) { defaultReadPermission = readPermission; defaultWritePermission = writePermission; defaultExecutePermission = executePermission; + } - if ( null == mainAttributes ) - return; - - String v = mainAttributes.getValue( PATHRESOLVER); - if ( null == v ) - return; - + /**Prepare a resolver of pathnames, given the value of the PATHRESOLVER + * main attribute. + *

      + * If not overridden in a subclass, this method parses it as a MIME type and + * script as described in the class comments, loads a {@code ScriptEngine} + * for the MIME type, and saves references to the engine in + * {@code resolverEngine} and the script in {@code resolverScript}. + * @param v value of the _JarX_PathResolver main attribute + * @throws Exception this implementation throws no checked exceptions, but an + * overriding implementation may + */ + public void prepareResolver( String v) throws Exception { JarX[] toks = structuredFieldBody( v, 0); if ( toks.length < 4 || ! toks[0].is( ATOM) @@ -512,6 +549,30 @@ else if ( toks[i].is( QUOTEDSTRING) ) resolverScript = script.toString(); } + /**Called with every path to be extracted; returns a possibly-corrected path. + *

      + * If not overridden in a subclass, this method returns s unchanged + * if no {@code resolverScript} has been set, and otherwise invokes the script + * with {@code storedPath} bound to orig, {@code platformPath} and + * {@code computedPath} both bound to plat, then returns the value + * bound to {@code computedPath} when the script has returned. + * @param orig The path as stored in the archive, always /-separated + * @param plat The path after only replacing / with the platform separator + * @return plat unchanged, or a corrected location for extracting the entry, + * or null to suppress extracting the entry + * @throws Exception this implementation may throw ScriptException, an + * overriding implementation may throw others + */ + public String resolve(String orig, String plat) throws Exception { + if ( null == resolverScript ) + return plat; + resolverEngine.put( "storedPath", orig); + resolverEngine.put( "platformPath", plat); + resolverEngine.put( "computedPath", plat); + resolverEngine.eval( resolverScript); + return (String)resolverEngine.get( "computedPath"); + } + /**Set instance variables for text/binary and permissions treatment * according to the passed Attributes. * @param atts Usually a per-entry attribute set, but {@code classify} is @@ -649,11 +710,11 @@ protected boolean archiveCharsetFromType( JarX[] type) { /**Extract a single entry, performing any appropriate conversion *@param je JarEntry for the current entry *@param is InputStream with the current entry content - *@throws IOException for any problem involving I/O - *@throws ScriptException for any problem involving the script engine + *@throws Exception IOException for any problem involving I/O, ScriptException + * possible from the non-overridden path resolver, others possible in an + * overridden implementation */ - public void extract( JarEntry je, InputStream is) - throws IOException, ScriptException { + public void extract( JarEntry je, InputStream is) throws Exception { classify( je.getAttributes(), true); String orig = je.getName(); @@ -662,13 +723,9 @@ public void extract( JarEntry je, InputStream is) if ( File.separatorChar != '/' ) s = s.replace( '/', File.separatorChar); - if ( null != resolverScript ) { - resolverEngine.put( "storedPath", orig); - resolverEngine.put( "platformPath", s); - resolverEngine.put( "computedPath", s); - resolverEngine.eval( resolverScript); - s = (String)resolverEngine.get( "computedPath"); - } + s = resolve( orig, s); + if ( null == s ) + return; System.err.print( s + " "); @@ -735,7 +792,10 @@ public void extract( JarEntry je, InputStream is) } } - tmpf.renameTo( f); + if ( ! tmpf.renameTo( f) ) { + if ( ! f.delete() || ! tmpf.renameTo( f) ) + System.err.println( "RENAME FAILED!"); + } } /**Copy content from an input to an output stream until end. diff --git a/pljava-packaging/src/main/java/Node.java b/pljava-packaging/src/main/java/Node.java new file mode 100644 index 00000000..cabed18e --- /dev/null +++ b/pljava-packaging/src/main/java/Node.java @@ -0,0 +1,3135 @@ +/* + * Copyright (c) 2015-2024 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack (this file, 2020) + * PostgreSQL Global Development Group, Michael Paquier, Alvaro Herrera + * (PostgresNode.pm, 2015, of which similar methods here are ports) + */ +package org.postgresql.pljava.packaging; + +import org.gjt.cuspy.JarX; + +import java.io.InputStream; + +import static java.lang.System.getProperty; +import static java.lang.System.setProperty; + +import java.nio.ByteBuffer; +import static java.nio.charset.Charset.defaultCharset; + +import java.util.regex.Matcher; +import static java.util.regex.Pattern.compile; + +/* + * For "Node" behavior: + */ + +import static java.lang.ProcessBuilder.Redirect.INHERIT; +import java.lang.reflect.InvocationHandler; // flexible SAM allowing exceptions +import java.lang.reflect.UndeclaredThrowableException; +import static java.lang.Thread.interrupted; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles.Lookup; +import static java.lang.invoke.MethodHandles.explicitCastArguments; +import static java.lang.invoke.MethodHandles.filterReturnValue; +import static java.lang.invoke.MethodHandles.publicLookup; +import static java.lang.invoke.MethodType.methodType; + +import static java.net.InetAddress.getLoopbackAddress; +import static java.net.URLEncoder.encode; +import java.net.ServerSocket; + +import static java.nio.charset.StandardCharsets.US_ASCII; + +import static java.nio.file.Files.createTempFile; +import static java.nio.file.Files.createTempDirectory; +import static java.nio.file.Files.deleteIfExists; +import static java.nio.file.Files.exists; +import static java.nio.file.Files.getLastModifiedTime; +import static java.nio.file.Files.lines; +import static java.nio.file.Files.walk; +import static java.nio.file.Files.write; +import java.nio.file.Path; +import java.nio.file.Paths; +import static java.nio.file.StandardWatchEventKinds.*; +import java.nio.file.WatchEvent; +import java.nio.file.WatchKey; +import java.nio.file.WatchService; + +import java.nio.file.AccessDeniedException; +import java.nio.file.NoSuchFileException; + +import java.sql.Connection; +import static java.sql.DriverManager.drivers; +import static java.sql.DriverManager.getConnection; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Statement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Types; + +import java.sql.SQLException; +import java.sql.SQLWarning; + +import javax.sql.rowset.RowSetProvider; +import javax.sql.rowset.WebRowSet; +import javax.sql.rowset.RowSetMetaDataImpl; + +import java.util.ArrayDeque; +import java.util.Base64; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Objects; +import static java.util.Objects.requireNonNull; +import java.util.Properties; +import java.util.Random; +import java.util.Spliterator; +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.NONNULL; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterators.spliteratorUnknownSize; +import java.util.WeakHashMap; + +import java.util.concurrent.Callable; // like a Supplier but allows exceptions! +import java.util.concurrent.CancellationException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import java.util.jar.JarFile; + +import java.util.stream.IntStream; +import java.util.stream.Stream; +import static java.util.stream.StreamSupport.stream; + +/** + * Extends the JarX extraction tool to provide a {@code resolve} method that + * replaces prefixes {@code pljava/foo/} in path names stored in the archive + * with the result of {@code pg_config --foo}. + *

      + * As this represents a second extra {@code .class} file that has to be added + * to the installer jar anyway, it will also contain some methods intended to be + * useful for tasks related to installation and testing. The idea is not to go + * overboard, but supply a few methods largely modeled on the most basic ones of + * PostgreSQL's {@code PostgreSQL::Test::Cluster} Perl module (formerly named + * {@code PostgresNode}, from which the name of this class was taken). The + * methods can be invoked from {@code jshell} if its classpath includes the + * installer jar (and one of the PostgreSQL JDBC drivers). + *

      + * An + * introduction with examples + * is available. + *

      + * Unlike the many capabilities of {@code PostgreSQL::Test::Cluster}, this only + * deals in TCP sockets bound to {@code localhost} + * ({@code StandardProtocolFamily.UNIX} + * finally arrived in Java 16 but this class does not support it yet) and only + * a few of the most basic operations. + *

      + * As in JarX itself, some liberties with coding style may be taken here to keep + * this one extra {@code .class} file from proliferating into a bunch of them. + *

      + * As the testing-related methods here are intended for ad-hoc or scripted use + * in {@code jshell}, they are typically declared to throw any checked + * exception, without further specifics. There are many overloads of methods + * named {@code q} and {@code qp} (mnemonic of query and query-print), to make + * interactive use in {@code jshell} comfortable with just a few static imports. + */ +public class Node extends JarX { + + private Matcher m_prefix; + private int m_fsepLength; + private String m_lineSep; + private boolean m_dryrun = false; + + private static Node s_jarxHelper = new Node(null, 0, null, null); + private static boolean s_jarProcessed = false; + private static String s_examplesJar; + private static String s_sharedObject; + + /** + * Performs an ordinary installation, using {@code pg_config} or the + * corresponding system properties to learn where the files belong, and + * unpacking the files (not including this class or its ancestors) there. + */ + public static void main(String[] args) throws Exception + { + if ( args.length > 0 ) + { + System.err.println("usage: java -jar filename.jar"); + System.exit(1); + } + + s_jarxHelper.extract(); + } + + /** + * Extracts the jar contents, just as done in the normal case of running + * this class with {@code java -jar}. + *

      + * Only to be called on the singleton instance {@code s_jarxHelper}. + *

      + * For a version that doesn't really extract anything, but still primes the + * {@code resolve} method to know where things should be extracted, + * see {@link #dryExtract}. + */ + @Override + public void extract() throws Exception + { + super.extract(); + s_jarProcessed = true; + } + + /** + * Prepares the resolver, ignoring the passed string (ordinarily a script or + * rules); this resolver's rules are hardcoded. + */ + @Override + public void prepareResolver(String v) throws Exception + { + m_prefix = compile("^pljava/([^/]+dir)(?![^/])").matcher(""); + m_fsepLength = getProperty("file.separator").length(); + m_lineSep = getProperty("line.separator"); + } + + /** + * Replaces a prefix {@code pljava/}key in a path to be extracted + * with the value of the {@code pgconfig.}key system property, or + * the result of invoking {@code pg_config} (or the exact executable named + * in the {@code pgconfig} system property, if present) with the option + * {@code --}key. + */ + @Override + public String resolve(String storedPath, String platformPath) + throws Exception + { + if ( m_prefix.reset(storedPath).lookingAt() ) + { + int prefixLength = m_prefix.end(); + String key = m_prefix.group(1); + String propkey = "pgconfig." + key; + String replacement = getProperty(propkey); + if ( null == replacement ) + { + String pgc = getProperty("pgconfig", "pg_config"); + ProcessBuilder pb = new ProcessBuilder(pgc, "--"+key); + pb.redirectError(ProcessBuilder.Redirect.INHERIT); + Process proc = pb.start(); + byte[] output; + try ( InputStream instream = proc.getInputStream() ) + { + proc.getOutputStream().close(); + output = instream.readAllBytes(); + } + finally + { + int status = proc.waitFor(); + if ( 0 != status ) + { + System.err.println( + "ERROR: pg_config status is "+status); + System.exit(1); + } + } + /* + * pg_config output is the saved value followed by one \n only. + * However, on Windows, the C library treats stdout as text mode + * by default, and pg_config does nothing to change that, so the + * single \n written by pg_config gets turned to \r\n before it + * arrives here. The earlier use of the trim() method papered + * over the problem, but trim() can remove too much. Simply have + * to assume that the string will end with line.separator, and + * remove that. + */ + replacement = defaultCharset().newDecoder() + .decode(ByteBuffer.wrap(output, 0, output.length)) + .toString(); + assert replacement.endsWith(m_lineSep); + replacement = replacement.substring(0, + replacement.length() - m_lineSep.length()); + setProperty(propkey, replacement); + } + int plen = m_fsepLength - 1; /* original separator had length 1 */ + plen += prefixLength; + replacement += platformPath.substring(plen); + if ( -1 != storedPath.indexOf("/pljava-examples-") ) + s_examplesJar = replacement; + else if ( storedPath.matches( + "pljava/pkglibdir/(?:lib)?+pljava-so-.*") ) + s_sharedObject = replacement; + if ( ! m_dryrun ) + return replacement; + return null; + } + + System.err.println("WARNING: extraneous jar entry not extracted: " + + storedPath); + return null; + } + + /* + * Members below this point represent the state and behavior of an instance + * of this class that is acting as a "Node" rather than as the JarX helper. + */ + + /** + * True if the platform is determined to be Windows. + *

      + * On Windows, {@link #forWindowsCRuntime forWindowsCRuntime} should be + * applied to any {@code ProcessBuilder} before invoking it; the details of + * the transformation applied by + * {@link #asPgCtlInvocation asPgCtlInvocation} change, and + * {@link #use_pg_ctl use_pg_ctl} may prove useful, as {@code pg_ctl} on + * Windows is able to drop administrative privileges that would otherwise + * prevent {@code postgres} from starting. + */ + public static final boolean s_isWindows = + getProperty("os.name").startsWith("Windows"); + + /** + * The first form of PostgreSQL JDBC driver connection URL found to be + * recognized by an available driver, or {@code URL_FORM_NONE}. + */ + public static final int s_urlForm; + + /** + * Value of {@link #s_urlForm s_urlForm} indicating no available JDBC driver + * was found to accept any of the supported connection URL forms. + */ + public static final int URL_FORM_NONE = -1; + + /** + * Value of {@link #s_urlForm s_urlForm} indicating an available JDBC driver + * reported accepting a connection URL in the PGJDBC form starting with + * {@code "jdbc:postgresql:"}. + */ + public static final int URL_FORM_PGJDBC = 0; + + /** + * Value of {@link #s_urlForm s_urlForm} indicating an available JDBC driver + * reported accepting a connection URL in the pgjdbc-ng form starting with + * {@code "jdbc:pgsql:"}. + */ + public static final int URL_FORM_PGJDBCNG = 1; + + /** + * A function to map an {@code SQLWarning} to a rough classification + * (info, warning) of its severity. + *

      + * If the PGJDBC {@code PSQLWarning} class is available for access to the + * severity tag from the backend, "warning" will be returned if that tag is + * {@code WARNING}, and "info" will be returned in any other case. (The next + * more severe backup level is {@code ERROR}, which would not appear here as + * an {@code SQLWarning}.) + *

      + * If the severity tag is not available, "info" will be returned if the + * class (leftmost two positions of SQLState) is 00, otherwise "warning". + */ + private static final Function s_toSeverity; + + private static String s_WARNING_localized = "WARNING"; + + /** + * Changes the severity string used to recognize when the backend is sending + * a {@code WARNING}. + *

      + * When the driver is PGJDBC, the classification done here of + * {@code SQLWarning} instances into actual warning messages or informative + * ones depends on a tag ("WARNING" in English) that the backend delivers + * in the local language. For the classification to happen correctly when + * a different language is selected, use this method to supply the string + * (for example, "PERINGATAN" in Indonesian) that the backend uses for + * warnings in that language. + */ + public static void set_WARNING_localized(String s) + { + s_WARNING_localized = requireNonNull(s); + } + + static + { + String[] candidateURLs = { "jdbc:postgresql:", "jdbc:pgsql:x" }; + s_urlForm = + IntStream.range(0, candidateURLs.length) + .filter(i -> + drivers().anyMatch(d -> + { + try + { + return d.acceptsURL(candidateURLs[i]); + } + catch ( SQLException e ) + { + throw new ExceptionInInitializerError(e); + } + })) + .findFirst() + .orElse(URL_FORM_NONE); + + Function toSeverity = Node::toSeverityFallback; + + try + { + Class psqlWarning = + Class.forName("org.postgresql.util.PSQLWarning"); + Class sErrMessage = + Class.forName("org.postgresql.util.ServerErrorMessage"); + + Lookup pub = publicLookup(); + + MethodHandle getserrm = + pub.findVirtual(psqlWarning, "getServerErrorMessage", + methodType(sErrMessage)); + MethodHandle getSev = + pub.findVirtual(sErrMessage, "getSeverity", + methodType(String.class)); + + MethodHandle h = explicitCastArguments( + filterReturnValue(getserrm, getSev), + methodType(String.class, Object.class)); + + toSeverity = w -> + { + if ( psqlWarning.isInstance(w) ) + { + try + { + String s = (String)h.invokeExact(psqlWarning.cast(w)); + if ( null == s || s_WARNING_localized.equals(s) ) + return "warning"; + return "info"; + } + catch ( Throwable t ) + { + throw new UndeclaredThrowableException(t, t.getMessage()); + } + } + + return toSeverityFallback(w); + }; + } + catch ( ReflectiveOperationException e ) + { + } + + s_toSeverity = toSeverity; + } + + private static String toSeverityFallback(SQLWarning w) + { + if ( w.getSQLState().startsWith("00") ) + return "info"; + else + return "warning"; + } + + /** + * A state (see {@link #stateMachine stateMachine}) that expects nothing + * (if the driver is pgjdbc-ng) or a zero row count (if the driver is + * PGJDBC). + *

      + * For some utility statements (such as {@code CREATE EXTENSION}) with no + * result, the pgjdbc-ng driver will produce no result, while the PGJDBC + * driver produces a zero count, as it would for a DML statement that did + * not affect any rows. This state handles either case. + *

      + * When {@code URL_FORM_PGJDBCNG == s_urlForm}, this state consumes nothing + * and moves to the numerically next state. Otherwise (JDBC), it checks + * that the current object is a zero row count, consuming it and moving to + * the numerically next state if it is, returning false otherwise. + */ + public static final InvocationHandler NOTHING_OR_PGJDBC_ZERO_COUNT=(o,p,q)-> + { + int myStateNum = (int)q[0]; + if ( URL_FORM_PGJDBCNG == s_urlForm ) + return -(1 + myStateNum); + return 0 == as(Long.class, o) ? 1 + myStateNum : false; + }; + + /** + * Name of a "Node"; null for an ordinary Node instance. + */ + private final String m_name; + + /** + * A TCP port on {@code localhost} that was free when {@code get_new_node} + * was called, and is likeliest to still be free if {@code start} is then + * called without undue delay. + */ + private final int m_port; + + /** + * A temporary base directory chosen and created in {@code java.io.tmpdir} + * by {@code get_new_node} and removed by {@code clean_node}. + */ + private final Path m_basedir; + + /** + * A password generated at {@code get_new_node} time, and used by + * {@code init} as the database-superuser password passed to {@code initdb}. + */ + private final String m_password; + + /** + * The server process handle after a successful {@code start} + * via {@code pg_ctl}; null again after a successful {@code stop}. + *

      + * If {@code pg_ctl} was not used, this will be null and {@code m_server} + * will have a value. + */ + private ProcessHandle m_serverHandle; + + /** + * The server process after a successful {@code start}; null again after a + * successful {@code stop}. + *

      + * If {@code pg_ctl} was used to start the server, this will be null and + * {@code m_serverHandle} will have a value after {@code wait_for_pid_file}. + */ + private Process m_server; + + /** + * A count of connections, used to supply a distinct default + * {@code ApplicationName} per connection. + */ + private long m_connCount = 0; + + /** + * Whether to invoke {@code postgres} directly when starting the server, + * or use {@code pg_ctl} to start and stop it. + *

      + * On Windows, {@code pg_ctl} is able to drop administrator rights and + * start the server from an account that would otherwise trigger + * the server's refusal to start from a privileged account. + */ + private boolean m_usePostgres = true; + + /** + * A weakly-held collection of {@link Connection}s, so that any remaining + * unclosed when {@link #stop(UnaryOperator) stop} is called can be closed + * then. + *

      + * Java takes care of removing {@code Connection}s from this map as they + * become unreachable. In case any become unreachable before being closed, + * both supported JDBC drivers have cleaner actions that will eventually + * close them. + */ + private final WeakHashMap m_connections; + + /** + * True during a {@link #stop(UnaryOperator) stop} call. + *

      + * Used to prevent any new unclosed {@code Connection} being added to + * {@link m_connections m_connections} undetected. + */ + private boolean m_stopping = false; + + /** + * Identifying information for a "node" instance, or for the singleton + * extractor instance. + */ + @Override + public String toString() + { + if ( null == m_name ) + return "Extractor instance"; + return "\"Node\": " + m_name; + } + + /** + * Constructs an instance; all nulls for the parameters are passed by the + * static initializer to make the singleton extractor instance, and any + * other instance is constructed by {@code get_new_node} for controlling + * a PostgreSQL instance. + */ + private Node(String nodeName, int port, Path basedir, String password) + { + m_name = nodeName; + m_port = port; + m_basedir = basedir; + m_password = password; + m_connections = null == nodeName ? null : new WeakHashMap<>(); + } + + /** + * Returns a new {@code Node} that can be used to initialize and start a + * PostgreSQL instance. + *

      + * Establishes a VM shutdown hook that will stop the server (if started) + * and recursively remove the basedir before the VM exits. + */ + public static Node get_new_node(String name) throws Exception + { + byte[] pwbytes = new byte [ 6 ]; + new Random().nextBytes(pwbytes); + Node n = new Node( + requireNonNull(name), + get_free_port(), + createTempDirectory("t_pljava_" + name + "_data"), + Base64.getEncoder().encodeToString(pwbytes)); + Thread t = + new Thread(() -> + { + try + { + n.stop(); + n.clean_node(); + } + catch ( Exception e ) + { + e.printStackTrace(); + } + }, "Node " + name + " shutdown"); + Runtime.getRuntime().addShutdownHook(t); + return n; + } + + /** + * Returns a TCP port on the loopback interface that is free at the moment + * this method is called. + */ + public static int get_free_port() throws Exception + { + try (ServerSocket s = new ServerSocket(0, 0, getLoopbackAddress())) + { + return s.getLocalPort(); + } + } + + /** + * Recursively removes the basedir and its descendants. + */ + public void clean_node() throws Exception + { + clean_node(false); + } + + /** + * Recursively removes the basedir (unless keepRoot) + * and its descendants. + * @param keepRoot if true, the descendants are removed, but not the basedir + * itself. + */ + public void clean_node(boolean keepRoot) throws Exception + { + /* + * How can Java *still* not have a deleteTree()? + */ + ArrayDeque stk = new ArrayDeque<>(); + for ( Path p : (Iterable)walk(m_basedir)::iterator ) + { + while ( ! stk.isEmpty() && ! p.startsWith(stk.peek()) ) + { + Path toDelete = stk.pop(); + try + { + deleteIfExists(toDelete); + } + catch ( AccessDeniedException e ) + { + if (!toDelete.equals(data_dir().resolve("postmaster.pid"))) + throw e; + /* + * See comments for stopViaPgCtl regarding this weirdness. + */ + Thread.sleep(500); + deleteIfExists(toDelete); + } + } + stk.push(p); + } + if ( keepRoot ) + stk.pollLast(); + for ( Path p : stk ) + deleteIfExists(p); + } + + /** + * Processes the jar without really extracting, to compute + * the path mappings. + */ + private static void dryExtract() throws Exception + { + if ( s_jarProcessed ) + return; + try + { + s_jarxHelper.m_dryrun = true; + s_jarxHelper.extract(); + } + finally + { + s_jarxHelper.m_dryrun = false; + } + } + + /** + * Given a path from the archive, or any path resembling one in + * the archive (that is, always {@code /} as the separator, and starting + * with {@code pljava/}key where {@code --}key is known + * to {@code pg_config}, returns the platform-specific path where it would + * be installed. + */ + private static String resolve(String archivePath) throws Exception + { + return s_jarxHelper.resolve( + archivePath, Paths.get("", archivePath.split("/")).toString()); + } + + /** + * Returns the directory name to be used as the PostgreSQL data directory + * for this node. + */ + public Path data_dir() + { + return m_basedir.resolve("pgdata"); + } + + /** + * Like {@code init()} but returns an {@code AutoCloseable} that will + * recursively remove the files and directories under the basedir + * (but not the basedir itself) on the exit of a calling + * try-with-resources scope. + */ + public AutoCloseable initialized_cluster() + throws Exception + { + return initialized_cluster(Map.of(), UnaryOperator.identity()); + } + + /** + * Like {@code init()} but returns an {@code AutoCloseable} that will + * recursively remove the files and directories under the basedir + * (but not the basedir itself) on the exit of a calling + * try-with-resources scope. + */ + public AutoCloseable initialized_cluster(Map suppliedOptions) + throws Exception + { + return initialized_cluster(suppliedOptions, UnaryOperator.identity()); + } + + /** + * Like {@link #init(Map,UnaryOperator) init()} but returns + * an {@code AutoCloseable} that will + * recursively remove the files and directories under the basedir + * (but not the basedir itself) on the exit of a calling + * try-with-resources scope. + */ + public AutoCloseable initialized_cluster( + UnaryOperator tweaks) + throws Exception + { + return initialized_cluster(Map.of(), tweaks); + } + + /** + * Like {@link #init(Map,UnaryOperator) init()} but returns + * an {@code AutoCloseable} that will + * recursively remove the files and directories under the basedir + * (but not the basedir itself) on the exit of a calling + * try-with-resources scope. + */ + public AutoCloseable initialized_cluster( + Map suppliedOptions, + UnaryOperator tweaks) + throws Exception + { + init(suppliedOptions, tweaks); + return () -> + { + clean_node(true); + }; + } + + /** + * Invokes {@code initdb} for the node, passing default options appropriate + * for this setting. + */ + public void init() throws Exception + { + init(Map.of(), UnaryOperator.identity()); + } + + /** + * Invokes {@code initdb} for the node, with suppliedOptions + * overriding or supplementing the ones that would be passed by default. + */ + public void init(Map suppliedOptions) throws Exception + { + init(suppliedOptions, UnaryOperator.identity()); + } + + /** + * Invokes {@code initdb} for the node, passing default options appropriate + * for this setting, and {@linkplain #init(Map,UnaryOperator) tweaks} to be + * applied to the {@code ProcessBuilder} before it is started. + */ + public void init(UnaryOperator tweaks) throws Exception + { + init(Map.of(), tweaks); + } + + /** + * Invokes {@code initdb} for the node, with suppliedOptions + * overriding or supplementing the ones that would be passed by default, + * and tweaks to be applied to the {@code ProcessBuilder} + * before it is started. + *

      + * By default, {@code postgres} will be the name of the superuser, UTF-8 + * will be the encoding, {@code auth-local} will be {@code peer} and + * {@code auth-host} will be {@code md5}. The initialization will skip + * {@code fsync} for speed rather than safety (if something goes wrong, just + * {@code clean_node()} and start over). + *

      + * The {@code initdb} that will be run is the one in the {@code bindir} + * reported by {@code pg_config} (or set by {@code -Dpgconfig.bindir}). + * @param suppliedOptions a Map where each key is an option to initdb + * (for example, --encoding), and the value corresponds. + * @param tweaks a lambda applicable to the {@code ProcessBuilder} to + * further configure it. On Windows, the tweaks will be applied ahead of + * transformation of the arguments by + * {@link #forWindowsCRuntime forWindowsCRuntime}. + */ + public void init( + Map suppliedOptions, + UnaryOperator tweaks) throws Exception + { + dryExtract(); + /* + * For extract/install purposes, there is already a resolve() method + * that expands keys like pljava/bindir to pg_config --bindir output. + */ + String initdb = resolve("pljava/bindir/initdb"); + + if ( s_isWindows ) + { + /* + * This is irksome. The mingw64 postgresql package has both + * initdb.exe and initdb, a bash script that runs it under winpty. + * If the script were not there, the .exe suffix would be added + * implicitly, but with both there, we try to exec the bash script. + */ + Path p1 = Paths.get(initdb); + Path p2 = Paths.get(initdb + ".exe"); + if ( exists(p1) && exists(p2) ) + initdb = p2.toString(); + } + + Path pwfile = createTempFile(m_basedir, "pw", ""); + + Map options = new HashMap<>(suppliedOptions); + options.putIfAbsent("--pgdata", data_dir().toString()); + options.putIfAbsent("--username", "postgres"); + options.putIfAbsent("--encoding", "utf-8"); + options.putIfAbsent("--pwfile", pwfile.toString()); + options.putIfAbsent("--auth-local", "peer"); + options.putIfAbsent("--auth-host", "md5"); + options.putIfAbsent("-N", null); + + String[] args = + Stream.concat( + Stream.of(initdb), + options.entrySet().stream() + .flatMap(e -> + null == e.getValue() + ? Stream.of(e.getKey()) + : Stream.of(e.getKey(), e.getValue())) + ) + .toArray(String[]::new); + + try + { + write(pwfile, List.of(m_password), US_ASCII); + ProcessBuilder pb = + new ProcessBuilder(args) + .redirectOutput(INHERIT) + .redirectError(INHERIT); + pb = tweaks.apply(pb); + + if ( s_isWindows ) + pb = forWindowsCRuntime(pb); + + Process p = pb.start(); + p.getOutputStream().close(); + if ( 0 != p.waitFor() ) + throw new AssertionError( + "Nonzero initdb result: " + p.waitFor()); + } + finally + { + deleteIfExists(pwfile); + } + } + + /** + * Like {@code start()} but returns an {@code AutoCloseable} that will + * stop the server on the exit of a calling try-with-resources scope. + */ + public AutoCloseable started_server() + throws Exception + { + return started_server(Map.of(), UnaryOperator.identity()); + } + + /** + * Like {@code start()} but returns an {@code AutoCloseable} that will + * stop the server on the exit of a calling try-with-resources scope. + */ + public AutoCloseable started_server(Map suppliedOptions) + throws Exception + { + return started_server(suppliedOptions, UnaryOperator.identity()); + } + + /** + * Like {@link #start(Map,UnaryOperator) start()} but returns + * an {@code AutoCloseable} that will + * stop the server on the exit of a calling try-with-resources scope. + *

      + * Supplied tweaks will be applied to the {@code ProcessBuilder} + * used to start the server; if {@code pg_ctl} is being used, they will also + * be applied when running {@code pg_ctl stop} to stop it. + */ + public AutoCloseable started_server(UnaryOperator tweaks) + throws Exception + { + return started_server(Map.of(), tweaks); + } + + /** + * Like {@link #start(Map,UnaryOperator) start()} but returns + * an {@code AutoCloseable} that will + * stop the server on the exit of a calling try-with-resources scope. + *

      + * Supplied tweaks will be applied to the {@code ProcessBuilder} + * used to start the server; if {@code pg_ctl} is being used, they will also + * be applied when running {@code pg_ctl stop} to stop it. + */ + public AutoCloseable started_server( + Map suppliedOptions, + UnaryOperator tweaks) + throws Exception + { + start(suppliedOptions, tweaks); + return () -> + { + stop(tweaks); + }; + } + + /** + * Starts a PostgreSQL server for the node with default options appropriate + * for this setting. + */ + public void start() throws Exception + { + start(Map.of(), UnaryOperator.identity()); + } + + /** + * Starts a PostgreSQL server for the node, with suppliedOptions + * overriding or supplementing the ones that would be passed by default. + */ + public void start(Map suppliedOptions) throws Exception + { + start(suppliedOptions, UnaryOperator.identity()); + } + + /** + * Starts a PostgreSQL server for the node, passing default options + * appropriate for this setting, and + * {@linkplain #start(Map,UnaryOperator) tweaks} to be + * applied to the {@code ProcessBuilder} before it is started. + */ + public void start(UnaryOperator tweaks) throws Exception + { + start(Map.of(), tweaks); + } + + /** + * Starts a PostgreSQL server for the node, with suppliedOptions + * overriding or supplementing the ones that would be passed by default, and + * tweaks to be applied to the {@code ProcessBuilder} before it + * is started. + *

      + * By default, the server will listen only on the loopback interface and + * not on any Unix-domain socket, on the port selected when this Node was + * created, and for a maximum of 16 connections. Its cluster name will be + * the name given to this Node, and fsync will be off to favor speed over + * durability. The log line prefix will be shortened to just the node name + * and (when connected) the {@code application_name}. + *

      + * The server that will be run is the one in the {@code bindir} + * reported by {@code pg_config} (or set by {@code -Dpgconfig.bindir}). + *

      + * If the server is PostgreSQL 10 or later, it is definitely ready to accept + * connections when this method returns. If not, it is highly likely to be + * ready, but no test connection has been made to confirm it. + * @param suppliedOptions a Map where the key is a configuration variable + * name as seen in {@code postgresql.conf} or passed to the server with + * {@code -c} and the value corresponds. + * @param tweaks a lambda applicable to the {@code ProcessBuilder} to + * further configure it. Under {@link #use_pg_ctl use_pg_ctl(true)}, the + * tweaks are applied after the arguments have been transformed by + * {@link #asPgCtlInvocation asPgCtlInvocation}. On Windows, they are + * applied ahead of transformation of the arguments by + * {@link #forWindowsCRuntime forWindowsCRuntime}. + */ + public void start( + Map suppliedOptions, + UnaryOperator tweaks) throws Exception + { + if ( null != m_server && m_server.isAlive() ) + throw new IllegalStateException( + "node \"" + m_name + "\" is already running"); + + if ( null != m_serverHandle && m_serverHandle.isAlive() ) + throw new IllegalStateException( + "node \"" + m_name + "\" is already running"); + + dryExtract(); + + Stream cmd = Stream.of(resolve("pljava/bindir/postgres")); + + Map options = new HashMap<>(suppliedOptions); + options.putIfAbsent("data_directory", data_dir().toString()); + options.putIfAbsent("listen_addresses", + getLoopbackAddress().getHostAddress()); + options.putIfAbsent("port", "" + m_port); + options.putIfAbsent("unix_socket_directories", ""); + options.putIfAbsent("max_connections", "16"); + options.putIfAbsent("fsync", "off"); + options.putIfAbsent("cluster_name", m_name); + options.putIfAbsent("log_line_prefix", + m_name.replace("%", "%%") + ":%q%a:"); + + String[] args = + Stream.concat( + cmd, + options.entrySet().stream() + .flatMap(e -> + "data_directory".equals(e.getKey()) + ? Stream.of("-D", e.getValue()) + : Stream.of("-c", e.getKey() + "=" + e.getValue()) + ) + ) + .toArray(String[]::new); + + ProcessBuilder pb = + new ProcessBuilder(args) + .redirectOutput(INHERIT) + .redirectError(INHERIT); + + if ( ! m_usePostgres ) + pb = asPgCtlInvocation(pb); + + pb = tweaks.apply(pb); + + if ( s_isWindows ) + pb = forWindowsCRuntime(pb); + + Process p = pb.start(); + p.getOutputStream().close(); + try + { + wait_for_pid_file(p, p.info()); + if ( m_usePostgres ) + m_server = p; // else wait_for_pid_file has set m_serverHandle + } + finally + { + if ( m_server == p ) + return; + if ( p.isAlive() ) + p.destroy(); + } + } + + + /** + * Stops the server instance associated with this Node. + *

      + * Has the effect of {@link #stop(UnaryOperator) stop(tweaks)} without + * any tweaks. + */ + public void stop() throws Exception + { + stop(UnaryOperator.identity()); + } + + /** + * Stops the server instance associated with this Node. + *

      + * No effect if it has not been started or has already been stopped, but + * a message to standard error is logged if the server had been started and + * the process is found to have exited unexpectedly. + * @param tweaks tweaks to apply to a ProcessBuilder; unused unless + * {@code pg_ctl} will be used to stop the server. When used, they are + * applied ahead of the transformation of the arguments by + * {@link #forWindowsCRuntime forWindowsCRuntime} used on Windows. + */ + public void stop(UnaryOperator tweaks) throws Exception + { + if ( null == ( m_usePostgres ? m_server : m_serverHandle ) ) + return; + + try + { + Connection[] connections; + + synchronized ( this ) + { + m_stopping = true; + connections = // Java >= 10: use a List and List.copyOf + m_connections.keySet().stream().toArray(Connection[]::new); + m_connections.clear(); + } + + for ( Connection c : connections ) + { + try + { + c.close(); + } + catch ( Exception e ) + { + } + } + + if ( ! m_usePostgres ) + { + stopViaPgCtl(tweaks); + return; + } + if ( m_server.isAlive() ) + { + m_server.destroy(); + m_server.waitFor(); + m_server = null; + return; + } + System.err.println("Server had already exited with status " + + m_server.exitValue()); + m_server = null; + } + finally + { + synchronized ( this ) + { + m_stopping = false; + } + } + } + + private void stopViaPgCtl(UnaryOperator tweaks) + throws Exception + { + if ( ! m_serverHandle.isAlive() ) + { + System.err.println("Server had already exited"); + m_serverHandle = null; + return; + } + + String pg_ctl = resolve("pljava/bindir/pg_ctl"); + ProcessBuilder pb = new ProcessBuilder( + pg_ctl, "stop", "-D", data_dir().toString(), "-m", "fast") + .redirectOutput(INHERIT) + .redirectError(INHERIT); + pb = tweaks.apply(pb); + + if ( s_isWindows ) + pb = forWindowsCRuntime(pb); + + Process p = pb.start(); + p.getOutputStream().close(); + + if ( 0 != p.waitFor() ) + { + /* + * Here is a complication. On Windows, pg_ctl suffers from a race + * condition that can occasionally cause it to exit with a nonzero + * status and a "permission denied" message about postmaster.pid, + * while the server is otherwise successfully stopped: + * www.postgresql.org/message-id/16922.1520722108%40sss.pgh.pa.us + * + * Without capturing the stderr of the process (too much bother), we + * won't know for sure if that is the message, but if the exit value + * was nonzero, just wait a bit and see if the server has gone away; + * if it has, don't worry about it. + */ + Thread.sleep(1000); + if ( m_serverHandle.isAlive() ) + throw new AssertionError( + "Nonzero pg_ctl stop result: " + p.waitFor()); + } + m_serverHandle = null; + } + + /** + * Sets whether to use {@code pg_ctl} to start and stop the server + * (if true), or start {@code postgres} and stop it directly (if false, + * the default). + *

      + * On Windows, {@code pg_ctl} is able to drop administrator rights and + * start the server from an account that would otherwise trigger + * the server's refusal to start from a privileged account. + */ + public void use_pg_ctl(boolean setting) + { + if ( null != m_server || null != m_serverHandle ) + throw new IllegalStateException( + "use_pg_ctl may not be called while server is started"); + m_usePostgres = ! setting; + } + + /** + * Returns a {@code Connection} to the server associated with this Node, + * using default properties appropriate for this setting. + */ + public Connection connect() throws Exception + { + return connect(new Properties()); + } + + /** + * Returns a {@code Connection} to the server associated with this Node, + * with suppliedProperties overriding or supplementing the ones + * that would be passed by default. + */ + public Connection connect(Map suppliedProperties) + throws Exception + { + Properties p = new Properties(); + p.putAll(suppliedProperties); + return connect(p); + } + + /** + * Returns a {@code Connection} to the server associated with this Node, + * with supplied properties p overriding or supplementing the ones + * that would be passed by default. + *

      + * By default, the connection is to the {@code postgres} database as the + * {@code postgres} user, using the password internally generated for this + * node, and with an {@code application_name} generated from a counter of + * connections for this node. + */ + public Connection connect(Properties p) throws Exception + { + String url; + String dbNameKey; + String appNameKey; + + switch ( s_urlForm ) + { + case URL_FORM_PGJDBC: + url = "jdbc:postgresql://localhost:" + m_port + '/'; + dbNameKey = "PGDBNAME"; + appNameKey = "ApplicationName"; + break; + case URL_FORM_PGJDBCNG: + url = "jdbc:pgsql://localhost:" + m_port + '/'; + dbNameKey = "database.name"; + appNameKey = "application.name"; + break; + default: + throw new UnsupportedOperationException( + "no recognized JDBC driver found to connect to the node"); + } + + p = (Properties)p.clone(); + p.putIfAbsent(dbNameKey, "postgres"); + p.putIfAbsent("user", "postgres"); + p.putIfAbsent("password", m_password); + p.computeIfAbsent(appNameKey, o -> "Conn" + (m_connCount++)); + + if ( URL_FORM_PGJDBCNG == s_urlForm ) + { + /* + * Contrary to its documentation, pgjdbc-ng does *not* accept a URL + * with the database name omitted. It is no use having it in the + * properties here; it must be appended to the URL. + */ + url += encode(p.getProperty(dbNameKey), "UTF-8"); + } + + Connection c = getConnection(url, p); + + synchronized ( this ) + { + if ( m_stopping ) + { + try + { + throw new IllegalStateException( + "Node " + m_name + " is being stopped"); + } + finally + { + c.close(); // add any exception as 'suppressed' to above + } + } + m_connections.put(c, null); + return c; + } + } + + /** + * Sets a configuration variable on the server. + *

      + * This deserves a convenience method because the most familiar PostgreSQL + * syntax for SET doesn't lend itself to parameterization. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream setConfig( + Connection c, String settingName, String newValue, boolean isLocal) + throws Exception + { + PreparedStatement ps = + c.prepareStatement("SELECT pg_catalog.set_config(?,?,?)"); + ps.setString(1, settingName); + ps.setString(2, newValue); + ps.setBoolean(3, isLocal); + return q(ps, ps::execute); + } + + /** + * Loads PL/Java (with a {@code LOAD} command, + * not {@code CREATE EXTENSION}). + *

      + * This was standard procedure in PostgreSQL versions that pre-dated the + * extension support. It is largely obsolete with the advent of + * {@code CREATE EXTENSION}, but still has one distinct use case: + * this is what will work if you do not have administrative access + * to install PL/Java's files in the standard directories where + * {@code CREATE EXTENSION} expects them, but can only place them in some + * other location the server can read. Then you simply have to make sure + * that {@code pljava.module_path} is set correctly to locate the jar files, + * and give the correct shared-object path to {@code LOAD} (which this + * method does). + *

      + * It is also useful to see better diagnostics if something is going wrong, + * as PostgreSQL severely suppresses diagnostic messages during + * {@code CREATE EXTENSION}. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream loadPLJava(Connection c) throws Exception + { + dryExtract(); + Statement s = c.createStatement(); + String whatToLoad = s_sharedObject; + + /* + * MinGW-w64 does not fail if the .lib suffix is left in place, but + * MSVC does, and MinGW-w64 also allows it to be removed. + */ + if ( s_isWindows ) + whatToLoad = whatToLoad.replaceFirst("\\.lib$", ""); + + String sql = "LOAD " + s.enquoteLiteral(whatToLoad); + return q(s, () -> s.execute(sql)); + } + + /** + * Installs a jar. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream installJar( + Connection c, String uri, String jarName, boolean deploy) + throws Exception + { + PreparedStatement ps = + c.prepareStatement("SELECT sqlj.install_jar(?,?,?)"); + ps.setString(1, uri); + ps.setString(2, jarName); + ps.setBoolean(3, deploy); + return q(ps, ps::execute); + } + + /** + * Removes a jar. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream removeJar( + Connection c, String jarName, boolean undeploy) + throws Exception + { + PreparedStatement ps = + c.prepareStatement("SELECT sqlj.remove_jar(?,?)"); + ps.setString(1, jarName); + ps.setBoolean(2, undeploy); + return q(ps, ps::execute); + } + + /** + * Sets the class path for a schema. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream setClasspath( + Connection c, String schema, String... jarNames) + throws Exception + { + PreparedStatement ps = + c.prepareStatement("SELECT sqlj.set_classpath(?,?)"); + ps.setString(1, schema); + ps.setString(2, String.join(":", jarNames)); + return q(ps, ps::execute); + } + + /** + * Appends a jar to a schema's class path if not already included. + * @return a {@linkplain #q(Statement,Callable) result stream} that + * includes, on success, a one-column {@code void} result set with a single + * row if the jar was added to the path, and no rows if the jar was already + * included. + */ + public static Stream appendClasspathIf( + Connection c, String schema, String jarName) + throws Exception + { + PreparedStatement ps = c.prepareStatement( + "SELECT" + + " sqlj.set_classpath(" + + " schema," + + " pg_catalog.concat_ws(" + + " ':'," + + " VARIADIC oldpath OPERATOR(pg_catalog.||) ARRAY[jar]" + + " )" + + " )" + + "FROM" + + " (VALUES (?, CAST (? AS pg_catalog.text))) AS p(schema, jar)," + + " COALESCE(" + + " pg_catalog.regexp_split_to_array(" + + " sqlj.get_classpath(schema)," + + " ':'" + + " )," + + " CAST (ARRAY[] AS pg_catalog.text[])" + + " ) AS t(oldpath)" + + "WHERE" + + " jar OPERATOR(pg_catalog.<>) ALL (oldpath)" + ); + ps.setString(1, schema); + ps.setString(2, jarName); + return q(ps, ps::execute); + } + + /** + * Executes some arbitrary SQL + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream q(Connection c, String sql) throws Exception + { + Statement s = c.createStatement(); + return q(s, () -> s.execute(sql)); + } + + /** + * Produces a {@code Stream} of the (in JDBC, possibly multiple) results + * from some {@code execute} method on a {@code Statement}. + *

      + * This is how, for example, to prepare, then examine the results of, a + * {@code PreparedStatement}: + *

      +	 * PreparedStatement ps = conn.prepareStatement("select foo(?,?)");
      +	 * ps.setInt(1, 42);
      +	 * ps.setString(2, "surprise!");
      +	 * q(ps, ps::execute);
      +	 *
      + *

      + * Each result in the stream will be an instance of one of: + * {@code ResultSet}, {@code Long} (an update count, positive or zero), + * {@code SQLWarning}, or some other {@code SQLException}. A warning or + * exception may have others chained to it, which its own {@code iterator} + * or {@code forEach} methods should be used to traverse; or, use + * {@code flatMap(}{@link #flattenDiagnostics Node::flattenDiagnostics}) to + * obtain a stream presenting each diagnostic in a chain in turn. The + * {@code Callable} interface supplying the work to be done allows any + * checked exception, but any {@code Throwable} outside the + * {@code SQLException} hierarchy will simply be rethrown from here rather + * than delivered in the stream. Any {@code Throwable} thrown by + * work will result in the {@code Statement} being closed. + * Otherwise, it remains open until the returned stream is closed. + *

      + * Exists mainly to encapsulate the rather fiddly logic of extracting that + * sequence of results using the {@code Statement} API. + * @param s the Statement from which to extract results + * @param work a Callable that will invoke one of the Statement's execute + * methods returning a boolean that indicates whether the first result is a + * ResultSet. Although the Callable interface requires the boolean result to + * be boxed, it must not return null. + * @return a Stream as described above. + */ + public static Stream q(final Statement s, Callable work) + throws Exception + { + final Object[] nextHolder = new Object [ 1 ]; + Object seed; + boolean isResultSet; + + /* + * The Statement must not be closed in a finally, or a + * try-with-resources, because if successful it needs to remain open + * as long as the result stream is being read. It will be closed when + * the stream is. + * + * However, in any exceptional exit, the Statement must be closed here. + */ + try + { + isResultSet = work.call(); + } + catch (Throwable t) + { + s.close(); + if ( t instanceof SQLException ) + return Stream.of(t); + throw t; + } + + final Supplier resultSet = () -> + { + try + { + return s.getResultSet(); + } + catch ( SQLException e ) + { + return e; + } + }; + + final Supplier updateCount = () -> + { + try + { + long count = s.getLargeUpdateCount(); + return ( -1 == count ) ? null : count; + } + catch ( SQLException e ) + { + return e; + } + }; + + final Supplier warnings = () -> + { + try + { + SQLWarning w = s.getWarnings(); + if ( null != w ) + { + try + { + s.clearWarnings(); + } + catch ( SQLException e ) + { + nextHolder [ 0 ] = e; + } + } + return w; + } + catch ( SQLException e ) + { + return e; + } + }; + + /* + * First get warnings, if any. + * There is a remote chance this can return an exception rather than a + * warning, an even more remote chance it returns a warning and leaves + * an exception in nextHolder. + * Only if it did neither is there any point in proceeding to get an + * update count or result set. + * If we do, and there was a warning, we use the warning as the seed and + * save the first update count or result set in nextHolder. + */ + seed = warnings.get(); + if ( (null == seed || seed instanceof SQLWarning) + && null == nextHolder [ 0 ] ) + { + Object t; + if ( isResultSet ) + t = resultSet.get(); + else + t = updateCount.get(); + if ( null == seed ) + seed = t; + else + nextHolder [ 0 ] = t; + } + + UnaryOperator next = o -> + { + if ( o instanceof SQLException && !(o instanceof SQLWarning) ) + return null; + + o = nextHolder [ 0 ]; + if ( null != o ) + { + nextHolder [ 0 ] = null; + return o; + } + + o = warnings.get(); + if ( null != o ) + return o; + + try + { + if ( s.getMoreResults() ) + return resultSet.get(); + return updateCount.get(); + } + catch ( SQLException e ) + { + return e; + } + }; + + return Stream.iterate(seed, Objects::nonNull, next) + .onClose(() -> + { + try + { + s.close(); + } + catch ( SQLException e ) + { + } + } + ); + } + + /** + * Analogously to {@link #q(Statement,Callable) q(Statement,...)}, produces + * a {@code Stream} with an element for each row of a {@code ResultSet}, + * interleaved with any {@code SQLWarning}s reported on the result set, or + * an {@code SQLException} if one is thrown. + *

      + * This is supplied chiefly for use driving a + * {@link #stateMachine state machine} to verify + * contents of a result set. For each row, the element in the stream will be + * an instance of {@code Long}, counting up from 1 (intended to match the + * result set's {@code getRow} but without relying on it, as JDBC does not + * require every implementation to support it). By itself, of course, this + * does not convey any of the content of the row; the lambdas representing + * the machine states should close over the result set and query it for + * content, perhaps not even using the object supplied here (except to + * detect when it is a warning or exception rather than a row number). + * The row position of the result set will have been updated, and should not + * be otherwise modified when this method is being used to walk through the + * results. + *

      + * For the same reason, don't try any funny business like sorting the stream + * in any way. The {@code ResultSet} will only be read forward, and each row + * only once. Simple filtering, {@code dropWhile}/{@code takeWhile}, and so + * on will work, but may be more conveniently rolled into the design of a + * {@link #stateMachine state machine}, as nearly any use of a + * {@code ResultSet} can throw {@code SQLException} and therefore isn't + * convenient in the stream API. + *

      + * Passing this result to {@code qp} as if it came from a {@code Statement} + * could lead to confusion, as the {@code Long} elements would be printed as + * update counts rather than row numbers. + * @param rs a ResultSet + * @return a Stream as described above + */ + public static Stream q(final ResultSet rs) + throws Exception + { + final Object[] nextHolder = new Object [ 1 ]; + final long[] nextRowNumber = new long [ 1 ]; + nextRowNumber [ 0 ] = 1L; + Object seed; + + /* + * The ResultSet must not be closed in a finally, or a + * try-with-resources, because if successful it needs to remain open + * as long as the result stream is being read. It will be closed when + * the stream is. + * + * However, in any exceptional exit, the ResultSet must be closed here. + */ + + final Supplier row = () -> + { + try + { + if ( rs.next() ) + return nextRowNumber [ 0 ] ++; + return null; + } + catch ( SQLException e ) + { + return e; + } + }; + + final Supplier warnings = () -> + { + try + { + SQLWarning w = rs.getWarnings(); + if ( null != w ) + { + try + { + rs.clearWarnings(); + } + catch ( SQLException e ) + { + nextHolder [ 0 ] = e; + } + } + return w; + } + catch ( SQLException e ) + { + return e; + } + }; + + /* + * First get warnings, if any. + * There is a remote chance this can return an exception rather than a + * warning, an even more remote chance it returns a warning and leaves + * an exception in nextHolder. + * Only if it did neither is there any point in proceeding to get a row. + * If we do, and there was a warning, we use the warning as the seed and + * save the first row in nextHolder. + */ + seed = warnings.get(); + if ( (null == seed || seed instanceof SQLWarning) + && null == nextHolder [ 0 ] ) + { + Object t = row.get(); + if ( null == seed ) + seed = t; + else + nextHolder [ 0 ] = t; + } + + UnaryOperator next = o -> + { + if ( o instanceof SQLException && !(o instanceof SQLWarning) ) + return null; + + o = nextHolder [ 0 ]; + if ( null != o ) + { + nextHolder [ 0 ] = null; + return o; + } + + o = warnings.get(); + if ( null != o ) + return o; + + return row.get(); + }; + + return Stream.iterate(seed, Objects::nonNull, next) + .onClose(() -> + { + try + { + rs.close(); + } + catch ( SQLException e ) + { + } + } + ); + } + + /** + * Produces a {@code Stream} with an element for each column of a + * {@code ResultSet}. + *

      + * This is another convenience method for use chiefly in driving a + * {@link #stateMachine state machine} to check per-column values + * or metadata for a {@code ResultSet}. It is, in fact, nothing other than + * {@code IntStream.rangeClosed(1, rsmd.getColumnCount()).boxed()} but typed + * as {@code Stream}. + *

      + * As with {@link #q(ResultSet) q(ResultSet)}, the column number supplied + * here conveys no actual column data or metadata. The lambdas representing + * the machine states should close over the {@code ResultSetMetaData} or + * corresponding {@code ResultSet} object, or both, and use the column + * number from this stream to index them. + * @param rsmd a ResultSetMetaData object + * @return a Stream as described above + */ + public static Stream q(final ResultSetMetaData rsmd) + throws Exception + { + return + IntStream.rangeClosed(1, rsmd.getColumnCount()) + .mapToObj(i -> (Object)i); + } + + /** + * Produces a {@code Stream} with an element for each parameter of a + * {@code PreparedStatement}. + *

      + * This is another convenience method for use chiefly in driving a + * {@link #stateMachine state machine} to check per-parameter metadata. + * It is, in fact, nothing other than + * {@code IntStream.rangeClosed(1, rsmd.getParameterCount()).boxed()} + * but typed as {@code Stream}. + *

      + * As with {@link #q(ResultSet) q(ResultSet)}, the column number supplied + * here conveys no actual parameter metadata. The lambdas representing + * the machine states should close over the {@code ParameterMetaData} object + * and use the parameter number from this stream to index it. + * @param pmd a ParameterMetaData object + * @return a Stream as described above + */ + public static Stream q(final ParameterMetaData pmd) + throws Exception + { + return + IntStream.rangeClosed(1, pmd.getParameterCount()) + .mapToObj(i -> (Object)i); + } + + /** + * Executes some arbitrary SQL and passes + * the {@linkplain #q(Statement,Callable) result stream} + * to {@link #qp(Stream)} for printing to standard output. + */ + public static void qp(Connection c, String sql) throws Exception + { + qp(q(c, sql)); + } + + /** + * Invokes some {@code execute} method on a {@code Statement} and passes + * the {@linkplain #q(Statement,Callable) result stream} + * to {@link #qp(Stream)} for printing to standard output. + *

      + * This is how, for example, to prepare, then print the results of, a + * {@code PreparedStatement}: + *

      +	 * PreparedStatement ps = conn.prepareStatement("select foo(?,?)");
      +	 * ps.setInt(1, 42);
      +	 * ps.setString(2, "surprise!");
      +	 * qp(ps, ps::execute);
      +	 *
      + * The {@code Statement} will be closed. + */ + public static void qp(Statement s, Callable work) throws Exception + { + qp(q(s, work)); + } + + /** + * Returns true if the examples jar includes the + * {@code org.postgresql.pljava.example.saxon.S9} class (meaning the + * appropriate Saxon jar must be installed and on the classpath first before + * the examples jar can be deployed, unless {@code check_function_bodies} + * is {@code off} to skip dependency checking). + */ + public static boolean examplesNeedSaxon() throws Exception + { + dryExtract(); + try ( JarFile jf = new JarFile(s_examplesJar) ) + { + return jf.stream().anyMatch(e -> + "org/postgresql/pljava/example/saxon/S9.class" + .equals(e.getName())); + } + } + + /** + * Installs the examples jar, under the name {@code examples}. + *

      + * The jar is specified by a {@code file:} URI and the path is the one where + * this installer installed (or would have installed) it. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream installExamples(Connection c, boolean deploy) + throws Exception + { + dryExtract(); + String uri = Paths.get(s_examplesJar).toUri() + .toString().replaceFirst("^file:///", "file:/"); + return installJar(c, uri, "examples", deploy); + } + + /** + * Installs the examples jar, under the name {@code examples}, and appends + * it to the class path for schema {@code public}. + *

      + * The return of a concatenated result stream from two consecutive + * statements might be likely to fail in cases where the first + * statement has any appreciable data to return, but the drivers seem to + * handle it at least in this case where each statement just returns one + * row / one column of {@code void}. And it is convenient. + * @return a combined {@linkplain #q(Statement,Callable) result stream} from + * executing the statements + */ + public static Stream installExamplesAndPath( + Connection c, boolean deploy) + throws Exception + { + Stream s1 = installExamples(c, deploy); + Stream s2 = appendClasspathIf(c, "public", "examples"); + return Stream.concat(s1, s2); + } + + /** + * Installs a Saxon jar under the name {@code saxon}, given the path to a + * local Maven repo and the needed version of Saxon, assuming the jar has + * been downloaded there already. + * @return a {@linkplain #q(Statement,Callable) result stream} from + * executing the statement + */ + public static Stream installSaxon( + Connection c, String repo, String version) + throws Exception + { + Path p = Paths.get( + repo, "net", "sf", "saxon", "Saxon-HE", version, + "Saxon-HE-" + version + ".jar"); + return installJar(c, "file:" + p, "saxon", false); + } + + /** + * Installs a Saxon jar under the name {@code saxon}, and appends it to the + * class path for schema {@code public}. + * @return a combined {@linkplain #q(Statement,Callable) result stream} from + * executing the statements + */ + public static Stream installSaxonAndPath( + Connection c, String repo, String version) + throws Exception + { + Stream s1 = installSaxon(c, repo, version); + Stream s2 = appendClasspathIf(c, "public", "saxon"); + return Stream.concat(s1, s2); + } + + /** + * A four-fer: installs Saxon, adds it to the class path, then installs the + * examples jar, and updates the classpath to include both. + * @param repo the base directory of a local Maven repository into which the + * Saxon jar has been downloaded + * @param version the needed version of Saxon + * @param deploy whether to run the example jar's deployment code + * @return a combined {@linkplain #q(Statement,Callable) result stream} from + * executing the statements + */ + public static Stream installSaxonAndExamplesAndPath( + Connection c, String repo, String version, boolean deploy) + throws Exception + { + Stream s1 = installSaxonAndPath(c, repo, version); + Stream s2 = installExamplesAndPath(c, deploy); + return Stream.concat(s1, s2); + } + + /** + * A flat-mapping function to expand any {@code SQLException} or + * {@code SQLWarning} instance in a result stream into the stream of + * possibly multiple linked diagnostics and causes in the encounter order + * of the {@code SQLException} iterator. + *

      + * Any other object is returned in a singleton stream. + *

      + * To flatten just the chain of {@code SQLWarning} or {@code SQLException} + * but with each of those retaining its own list of {@code cause}s, see + * {@link #semiFlattenDiagnostics semiFlattenDiagnostics}. + */ + public static Stream flattenDiagnostics(Object oneResult) + { + if ( oneResult instanceof SQLException ) + { + Spliterator s = spliteratorUnknownSize( + ((SQLException)oneResult).iterator(), + IMMUTABLE | NONNULL | ORDERED); + return stream(s, false); + } + return Stream.of(oneResult); + } + + /** + * A flat-mapping function to expand any {@code SQLException} or + * {@code SQLWarning} instance in a result stream into the stream of + * possibly multiple linked diagnostics in the order produced by + * {@code getNextException} or {@code getNextWarning}. + *

      + * Unlike {@code flattenDiagnostics}, this method does not descend into + * chains of causes; those may be retrieved in the usual way from the + * throwables returned on this stream. + *

      + * Any other object is returned in a singleton stream. + */ + public static Stream semiFlattenDiagnostics(Object oneResult) + { + UnaryOperator next; + + if ( oneResult instanceof SQLWarning ) + next = o -> ((SQLWarning)o).getNextWarning(); + else if ( oneResult instanceof SQLException ) + next = o -> ((SQLException)o).getNextException(); + else + return Stream.of(oneResult); + + return Stream.iterate(oneResult, Objects::nonNull, next); + } + + /** + * Prints streamed results of a {@code Statement} in (somewhat) readable + * fashion. + *

      + * Uses {@code writeXml} of {@code WebRowSet}, which is very verbose, but + * about the easiest way to readably dump a {@code ResultSet} in just a + * couple lines of code. + *

      + * The supplied stream is flattened (see + * {@link #semiFlattenDiagnostics semiFlattenDiagnostics}) so that any + * chained {@code SQLException}s or {@code SQLWarning}s are printed + * in sequence. + */ + public static void qp(Stream s) throws Exception + { + qp(s, Node::semiFlattenDiagnostics); + } + + /** + * Prints streamed results of a {@code Statement} in (somewhat) readable + * fashion, with a choice of flattener for diagnostics. + *

      + * For flattener, see {@link flattenDiagnostics flattenDiagnostics} + * or {@link semiFlattenDiagnostics semiFlattenDiagnostics}. + */ + public static void qp( + Stream s, Function> flattener) + throws Exception + { + try ( Stream flat = s.flatMap(flattener) ) + { + for ( Object o : (Iterable)flat::iterator ) + qp(o); + } + } + + /** + * Overload of {@code qp} for direct application to any one {@code Object} + * obtained from a result stream. + *

      + * Simply applies the specialized treatment appropriate to the class of + * the object. + */ + public static void qp(Object o) throws Exception + { + if ( o instanceof ResultSet ) + { + try (ResultSet rs = (ResultSet)o) + { + qp(rs); + } + } + else if ( o instanceof Long ) + System.out.println(""); + else if ( o instanceof Throwable ) + qp((Throwable)o); + else + System.out.println(""); + } + + /** + * Prints an object in the manner of {@link #qp(Object) qp}, but in a way + * suitable for use in {@link Stream#peek Stream.peek}. + *

      + * If o is a {@code ResultSet}, only its metadata will be printed; + * its position will not be disturbed and it will not be closed. This method + * throws no checked exceptions, as the {@code Stream} API requires; any + * that is caught will be printed as if by {@link #qp(Throwable) qp}. + */ + public static void peek(Object o) + { + try + { + int[] dims = voidResultSetDims(o, true); // only peek + + if ( null != dims ) + { + System.out.printf(voidResultSet, dims[0], dims[1]); + return; + } + + if ( o instanceof ResultSet ) + qp(((ResultSet)o).getMetaData()); + else + qp(o); + } + catch ( Exception e ) + { + qp(e); + } + } + + private static final String voidResultSet = "%n"; + + /** + * Overload of {@code qp} for direct application to a {@code ResultSet}. + *

      + * Sometimes one has a {@code ResultSet} that didn't come from executing + * a query, such as from a JDBC metadata method. This prints it the same way + * {@code qp} on a query result would. The {@code ResultSet} is not closed + * (but will have been read through the last row). + *

      + * A result set with no columns of type other than {@code void} will be + * printed in an abbreviated form, showing its number of rows and columns + * as reported by {@link #voidResultSetDims voidResultSetDims}. + */ + public static void qp(ResultSet rs) throws Exception + { + int[] dims = voidResultSetDims(rs); + + if ( null != dims ) + { + System.out.printf(voidResultSet, dims[0], dims[1]); + return; + } + + WebRowSet wrs = RowSetProvider.newFactory().createWebRowSet(); + try + { + wrs.populate(rs); + wrs.writeXml(System.out); + } + finally + { + wrs.close(); + } + } + + /** + * Overload of {@code qp} for examining {@code ParameterMetaData}. + *

      + * Continuing in the spirit of getting something reasonably usable without + * a lot of code, this fakes up a {@code ResultSetMetaData} with the same + * values present in the {@code ParameterMetaData} (and nothing for the + * ones that aren't, like names), and then uses {@code WebRowSet.writeXml} + * as if dumping a result set. + *

      + * For getting a quick idea what the parameters are, it's good enough. + */ + public static void qp(ParameterMetaData md) throws Exception + { + RowSetMetaDataImpl mdi = new RowSetMetaDataImpl(); + mdi.setColumnCount(md.getParameterCount()); + for ( int i = 1; i <= md.getParameterCount(); ++ i ) + { + mdi.setColumnType(i, md.getParameterType(i)); + mdi.setColumnTypeName(i, md.getParameterTypeName(i)); + int precision = md.getPrecision(i); + mdi.setPrecision(i, precision > 0 ? precision : 0); + mdi.setScale(i, md.getScale(i)); + mdi.setNullable(i, md.isNullable(i)); + mdi.setSigned(i, md.isSigned(i)); + } + qp(mdi); + } + + /** + * Overload of {@code qp} for examining {@code ResultSetMetaData}. + *

      + * This makes an empty {@code WebRowSet} with the copied metadata, and dumps + * it with {@code writeXml}. Owing to a few missing setters on Java's + * {@link RowSetMetaDataImpl}, a few {@code ResultSetMetaData} attributes + * will not have been copied; they'll be wrong (unless the real values + * happen to match the defaults). That could be fixed by extending that + * class, but that would require yet another extra class file added to the + * installer jar. + */ + public static void qp(ResultSetMetaData md) throws Exception + { + RowSetMetaDataImpl mdi = new RowSetMetaDataImpl(); + mdi.setColumnCount(md.getColumnCount()); + for ( int i = 1; i <= md.getColumnCount(); ++ i ) + { + mdi.setColumnType(i, md.getColumnType(i)); + mdi.setColumnTypeName(i, md.getColumnTypeName(i)); + int precision = md.getPrecision(i); + mdi.setPrecision(i, precision > 0 ? precision : 0); + mdi.setScale(i, md.getScale(i)); + mdi.setNullable(i, md.isNullable(i)); + mdi.setSigned(i, md.isSigned(i)); + + mdi.setAutoIncrement(i, md.isAutoIncrement(i)); + mdi.setCaseSensitive(i, md.isCaseSensitive(i)); + mdi.setCatalogName(i, md.getCatalogName(i)); + mdi.setColumnDisplaySize(i, md.getColumnDisplaySize(i)); + mdi.setColumnLabel(i, md.getColumnLabel(i)); + mdi.setColumnName(i, md.getColumnName(i)); + mdi.setCurrency(i, md.isCurrency(i)); + mdi.setSchemaName(i, md.getSchemaName(i)); + mdi.setSearchable(i, md.isSearchable(i)); + mdi.setTableName(i, md.getTableName(i)); + + /* + * Attributes that RowSetMetaDataImpl simply forgets to provide + * setters for. It is what it is. + columnClassName + isDefinitelyWritable + isReadOnly + isWritable + */ + } + qp(mdi); + } + + private static void qp(RowSetMetaDataImpl mdi) throws Exception + { + try (WebRowSet wrs = RowSetProvider.newFactory().createWebRowSet()) + { + wrs.setMetaData(mdi); + wrs.writeXml(System.out); + } + } + + /** + * Prints a {@code Throwable} retrieved from a result stream, with + * special handling for {@code SQLException} and {@code SQLWarning}. + *

      + * In keeping with the XMLish vibe established by + * {@link #qp(Stream) qp} for other items in a result + * stream, this will render a {@code Throwable} as an {@code error}, + * {@code warning}, or {@code info} element (PostgreSQL's finer + * distinctions of severity are not exposed by every JDBC driver's API.) + *

      + * An element will have a {@code message} attribute if it has a message. + * It will have a {@code code} attribute containing the SQLState, if it is + * an instance of {@code SQLException}, unless it is rendered as an + * {@code info} element and the state is {@code 00000}. An instance of + * {@code SQLWarning} will be rendered as a {@code warning} unless its class + * (two leftmost code positions) is {@code 00}, in which case it will be + * {@code info}. Anything else is an {@code error}. + */ + public static void qp(Throwable t) + { + String[] parts = classify(t); + StringBuilder b = new StringBuilder("<" + parts[0]); + if ( null != parts[1] ) + b.append(" code=").append(asAttribute(parts[1])); + if ( null != parts[2] ) + b.append(" message=").append(asAttribute(parts[2])); + System.out.println(b.append("/>")); + } + + /** + * Returns an array of three {@code String}s, element, sqlState, + * and message, as would be printed by {@link #qp(Throwable)}. + *

      + * The first string will be: (1) if the throwable is an {@code SQLWarning}, + * "info" if its class (leftmost two positions of SQLState) is 00, otherwise + * "warning"; (2) for any other throwable, "error". These are constant + * strings and therefore interned. + *

      + * The second string will be null if the throwable is outside the + * {@code SQLException} hierarchy, or if the first string is "info" and the + * SQLState is exactly 00000; otherwise it will be the SQLState. + *

      + * The third string will be as returned by {@code getMessage}, and may be + * null if the throwable was not constructed with a message. + *

      + * If an {@code SQLWarning} is of the PGJDBC driver's {@code PSQLWarning} + * class and the backend's severity tag is available, it will be used to + * determine the first string, in place of the "starts with 00" rule. A tag + * of "WARNING" (or null) produces "warning", while any other tag produces + * "info". + */ + public static String[] classify(Throwable t) + { + String msg = t.getMessage(); + String sqlState = null; + String element = "error"; + if ( t instanceof SQLException ) + { + sqlState = ((SQLException)t).getSQLState(); + if ( t instanceof SQLWarning ) + { + element = s_toSeverity.apply((SQLWarning)t); + if ( "info".equals(element) && "00000".equals(sqlState) ) + sqlState = null; + } + } + return new String[] { element, sqlState, msg }; + } + + /** + * Escapes a string as an XML attribute. + *

      + * Right on the borderline of trivial enough to implement here rather than + * using `java.xml` APIs (even though those are available in `jshell` too, + * transitively supplied by our reliance on `java.sql`). + */ + private static String asAttribute(String s) + { + int[] aposquot = new int[2]; + s.codePoints().forEach(c -> + { + if ( '\'' == c ) + ++ aposquot[0]; + else if ( '"' == c ) + ++ aposquot[1]; + }); + char delim = aposquot[0] > aposquot[1] ? '"' : '\''; + Matcher m = compile('"' == delim ? "[<&\"]" : "[<&']").matcher(s); + s = m.replaceAll(r -> + { + switch (r.group()) + { + case "<": return "<"; + case "&": return "&"; + case "'": return "'"; + case "\"": return """; + } + throw new AssertionError(); + }); + return delim + s + delim; + } + + /** + * Determines whether an object is a {@code ResultSet} with no columns of + * any type other than {@code void}, to allow abbreviated output of result + * sets produced by the common case of queries that call {@code void} + * functions. + *

      + * Returns null if o is not a {@code ResultSet}, or if its columns + * are not all of {@code void} type. Otherwise, returns a two-element + * integer array giving the rows (index 0 in the array) and columns (index + * 1) of the result set. + *

      + * If this method returns non-null, the result set is left positioned on its + * last row. + * @param o Object to check + * @param peek whether to avoid moving the row cursor. If true, and all of + * the columns are indeed void, the result array will have the column count + * at index 1 and -1 at index 0. + * @return null or a two-element int[], as described above + */ + public static int[] voidResultSetDims(Object o, boolean peek) + throws Exception + { + if ( ! (o instanceof ResultSet) ) + return null; + + ResultSet rs = (ResultSet)o; + ResultSetMetaData md = rs.getMetaData(); + int cols = md.getColumnCount(); + int rows = 0; + + for ( int c = 1; c <= cols; ++c ) + if ( Types.OTHER != md.getColumnType(c) + || ! "void".equals(md.getColumnTypeName(c)) ) + return null; + + if ( peek ) + rows = -1; + else if ( URL_FORM_PGJDBCNG == s_urlForm ) + { + rs.last(); // last(), getRow() appears to work, in pgjdbc-ng + rows = rs.getRow(); + } + else + { + while ( rs.next() ) // PGJDBC requires this unless rs is scrollable + ++ rows; + } + + return new int[] { rows, cols }; + } + + /** + * Equivalent to + * {@link #voidResultSetDims(Object,boolean) voidResultSetDims(o,false)}; + */ + public static int[] voidResultSetDims(Object o) throws Exception + { + return voidResultSetDims(o, false); + } + + /** + * A predicate testing that an object is a {@code ResultSet} that has only + * columns of {@code void} type, and the expected number of rows + * and columns. + *

      + * The expected result of a query that calls one {@code void}-typed, + * non-set-returning function could be checked with + * {@code isVoidResultSet(rs, 1, 1)}. + */ + public static boolean isVoidResultSet(Object o, int rows, int columns) + throws Exception + { + int[] dims = voidResultSetDims(o); + + return null != dims && rows == dims[0] && columns == dims[1]; + } + + /** + * Executes a state machine specified in the form of + * a list of lambdas representing its states, to verify that a + * {@linkplain #q(Statement,Callable) result stream} is as expected. + *

      + * Treats the list of lambdas as a set of consecutively-numbered states + * (the first in the list is state number 1, and is the initial state). + * At each step of the machine, the current state is applied to the current + * input object, and may return an {@code Integer} or a {@code Boolean}. + *

      + * If an integer, its absolute value selects the next state. A positive + * integer consumes the current input item, so the next state will be + * applied to the next item of input. A negative integer transitions to the + * selected next state without consuming the current input item, so it will + * be examined again in the newly selected state. + *

      + * If boolean, {@code false} indicates that the machine cannot proceed; the + * supplied reporter will be passed an explanatory string and this + * method returns false. A state that returns {@code true} indicates the + * machine has reached an accepting state. + *

      + * No item of input is allowed to be null; null is reserved to be the + * end-of-input symbol. If a state returns {@code true} (accept) + * when applied to null at the end of input, the machine has matched and + * this method returns true. A state may also return a negative integer in + * this case, to shift to another state while looking at the end of input. + * A positive integer (attempting to consume the end of input), or a false, + * return will cause an explanatory message to the reporter and a + * false return from this method. + *

      + * A state may return {@code true} (accept) when looking at a non-null + * input item, but the input will be checked to confirm it has no more + * elements. Otherwise, the machine has tried to accept before matching + * all the input, and this method will return false. + *

      + * To avoid defining a new functional interface, each state is represented + * by {@link InvocationHandler}, an existing functional interface with a + * versatile argument list and permissive {@code throws} clause. Each state + * must be represented as a lambda with three parameters (the convention + * {@code (o,p,q)} is suggested), of which only the first is normally used. + * If Java ever completes the transition to {@code _} as an unused-parameter + * marker, the suggested convention will be {@code (o,_,_)}, unless the + * third (q) is also needed for special purposes (more below). + *

      + * As the input item passed to each state is typed {@code Object}, and as + * null can only represent the end of input, it may be common for a state to + * both cast an input to an expected type and confirm it is not null. + * The {@link #as as} method combines those operations. If its argument + * either is null or cannot be cast to the wanted type, {@code as} will + * throw a specific instance of {@code ClassCastException}, which will be + * treated, when caught by {@code stateMachine}, just as if the state + * had returned {@code false}. + *

      + * The third parameter to an {@code InvocationHandler} is an {@code Object} + * array, and is here used to pass additional information that may at times + * be of use in a state. The first element of the array holds the boxed form + * of the current (1-based) state number. As a state must indicate the next + * state by returning an absolute state number, having the state's own + * number available opens the possibility of reusable presupplied state + * implementations that do not depend on their absolute position. + * @param name A name for this state machine, used only in exception + * messages if it fails to match all the input + * @param reporter a Consumer to accept a diagnostic string if the machine + * fails to match, defaulting if null to {@code System.err::println} + * @param input A Stream of input items, of which none may be null + * @param states Lambdas representing states of the machine + * @return true if an accepting state was reached coinciding with the end + * of input + * @throws Exception Anything that could be thrown during evaluation of the + * input stream or any state + */ + public static boolean stateMachine( + String name, Consumer reporter, Stream input, + InvocationHandler... states) + throws Exception + { + if ( null == reporter ) + reporter = System.err::println; + + try ( input ) + { + Iterator in = input.iterator(); + int currentState = 0; + int stepCount = 0; + int inputCount = 0; + Object currentInput = null; + boolean hasCurrent = false; + Object result; + + while ( hasCurrent || in.hasNext() ) + { + ++ stepCount; + if ( ! hasCurrent ) + { + currentInput = in.next(); + ++ inputCount; + if ( null == currentInput ) + throw new UnsupportedOperationException( + "Input to stateMachine() must " + + "not contain null values"); + hasCurrent = true; + } + + result = + invoke(states[currentState], currentState, currentInput); + + if ( result instanceof Boolean ) + { + if ( (Boolean)result && ! in.hasNext() ) + return true; + reporter.accept(String.format( + "stateMachine \"%s\" in state %d at step %d: %s", + name, 1 + currentState, stepCount, (Boolean)result + ? String.format( + "transitioned to ACCEPT after %d input items but " + + "with input remaining", inputCount) + : String.format( + "could not proceed, looking at input %d: %s", + inputCount, currentInput))); + return false; + } + + currentState = (Integer)result; + if ( currentState > 0 ) + hasCurrent = false; + else + currentState *= -1; + + -- currentState; + } + + for ( ;; ) + { + ++ stepCount; + result = invoke(states[currentState], currentState, null); + if ( result instanceof Boolean && (Boolean)result ) + return true; + else if ( result instanceof Integer && 0 > (Integer)result ) + { + currentState = -1 - (Integer)result; + continue; + } + break; + } + + reporter.accept(String.format( + "stateMachine \"%s\" in state %d at step %d: " + + "does not accept at end of input after %d items", + name, 1 + currentState, stepCount, inputCount)); + return false; + } + } + + /** + * Casts o to class clazz, testing it also for null. + *

      + * This is meant as a shorthand in implementing states for + * {@link #stateMachine stateMachine}. If o either is null or + * is not castable to the desired type, a distinguished instance of + * {@code ClassCastException} will be thrown, which is treated specially + * if caught by {@code stateMachine} while evaluating a state. + */ + public static T as(Class clazz, Object o) + { + if ( clazz.isInstance(o) ) + return clazz.cast(o); + throw failedAsException; + } + + private static final ClassCastException failedAsException = + new ClassCastException(); + + /** + * Invokes the state handler h, passing it the current object + * o and, for special purposes, the state index (adjusted to + * be 1-based). + *

      + * Conforming to the existing {@code InvocationHandler} interface, the + * state index is passed in boxed form as element zero of an {@code Object} + * array passed as the third argument. + */ + private static Object invoke(InvocationHandler h, int stateIdx, Object o) + throws Exception + { + try + { + return h.invoke(o, null, new Object[] { 1 + stateIdx }); + } + catch ( ClassCastException e ) + { + if ( failedAsException == e ) + return false; + throw e; + } + catch ( Exception e ) + { + throw e; + } + catch ( Throwable t ) + { + throw (Error)t; + } + } + + /* + * For parsing the postmaster.pid file, these have been the lines at least + * back to 9.1, except PM_STATUS appeared in 10. That's too bad; before 10 + * it isn't possible to wait for a status of ready, which may necessitate + * just retrying the initial connection if the timing is unlucky. + * Cribbed from . + */ + private static final int LOCK_FILE_LINE_PID = 1; + private static final int LOCK_FILE_LINE_DATA_DIR = 2; + private static final int LOCK_FILE_LINE_START_TIME = 3; + private static final int LOCK_FILE_LINE_PORT = 4; + private static final int LOCK_FILE_LINE_SOCKET_DIR = 5; + private static final int LOCK_FILE_LINE_LISTEN_ADDR = 6; + private static final int LOCK_FILE_LINE_SHMEM_KEY = 7; + private static final int LOCK_FILE_LINE_PM_STATUS = 8; + private static final String PM_STATUS_READY = "ready "; + + /** + * Waits for the {@code postmaster.pid} file to have the right contents + * (the right pid for process p, and ready status for PG 10+). + *

      + * The {@code PostgreSQL:Test:Cluster} version of this is also used when + * shutting down, and waits for the file to go away; that could be + * implemented here, but not today. + */ + private void wait_for_pid_file(Process p, ProcessHandle.Info info) + throws Exception + { + Path datadir = data_dir(); + Path pidfile = datadir.resolve("postmaster.pid"); + Path pidonly = pidfile.getFileName(); + + /* + * If m_usePostgres is true, the p passed above is the actual postgres + * process, and we can compare its pid to what's in the pidfile. + * If pg_ctl was used, it's just the pid of the pg_ctl process, and + * instead of "checking" the pid in the pidfile, construct a process + * handle from it, to be saved as the handle of the server. + */ + Predicate checkPid = + m_usePostgres + ? (s -> Long.parseLong(s[LOCK_FILE_LINE_PID - 1]) == p.pid()) + : (s -> + { + long pid = Long.parseLong(s[LOCK_FILE_LINE_PID - 1]); + m_serverHandle = ProcessHandle.of(pid).get(); + return true; + } + ); + + /* + * The isAlive check is a simple check on p in the m_usePostgres case. + * Otherwise, p is the pg_ctl process and probably has exited already; + * the handle assigned to m_serverHandle must be checked. If no handle + * has been assigned yet, just assume alive. The prospect of an + * unbounded wait (server process exiting before its pid could be + * collected from the pid file) should not be realizable, as long as + * pg_ctl itself waits long enough for the file to be present. + */ + BooleanSupplier isAlive = + m_usePostgres + ? (() -> p.isAlive()) + : (() -> null != m_serverHandle ? m_serverHandle.isAlive() : true); + + StringBuilder tracepoints = new StringBuilder(); + Matcher dejavu = compile("(.+?)(?:\\1){16,}").matcher(tracepoints); + Consumer trace = c -> + { + tracepoints.insert(0, c); + if ( ! dejavu.reset().lookingAt() ) + return; + tracepoints.reverse(); + String preamble = + tracepoints.substring(0, tracepoints.length() - dejavu.end()); + String cycle = + tracepoints.substring(tracepoints.length() - dejavu.end(1)); + throw new CancellationException( + "Guru Meditation #" + preamble + "." + cycle); + }; + + trace.accept('A'); + if ( ! m_usePostgres ) + if ( 0 != p.waitFor() ) + throw new IllegalStateException( + "pg_ctl exited with status " + p.exitValue()); + trace.accept('B'); + + /* + * Initialize a watch service just in case the postmaster.pid file + * isn't there or has the wrong contents when we first look, + * and we need to wait for something to happen to it. + */ + try (WatchService watcher = datadir.getFileSystem().newWatchService()) + { + WatchKey key = + datadir.register(watcher, ENTRY_CREATE, ENTRY_MODIFY); + + for ( ;; ) + { + trace.accept('C'); + try + { + if ( getLastModifiedTime(pidfile).toInstant().plusSeconds(1) + .isBefore(info.startInstant().get()) ) + throw new NoSuchFileException("honest!"); + /* + * That was kind of a lie, but it's older than the + * process, so catching the exception below and waiting + * for it to change will be the right thing to do. + */ + + trace.accept('D'); + String[] status; + try ( Stream lines = lines(pidfile) ) + { + status = lines.toArray(String[]::new); + } + if ( (status.length == LOCK_FILE_LINE_PM_STATUS) + && checkPid.test(status) + && PM_STATUS_READY.equals( + status[LOCK_FILE_LINE_PM_STATUS - 1]) ) + return; + trace.accept('E'); + if ( + ( + status.length == LOCK_FILE_LINE_SHMEM_KEY + || s_isWindows + && status.length == LOCK_FILE_LINE_LISTEN_ADDR + ) + && checkPid.test(status) + && waitPrePG10() ) + return; + trace.accept('F'); + } + catch (NoSuchFileException e) + { + trace.accept('G'); + } + + /* + * The file isn't there yet, or isn't fully written or "ready" + */ + for ( ;; ) + { + if ( ! isAlive.getAsBoolean() ) + throw new IllegalStateException( + "Server process exited while awaiting \"ready\"" + + ( + m_usePostgres + ? " with status " + p.exitValue() + : "" + ) + ); + trace.accept('H'); + WatchKey k = watcher.poll(250, MILLISECONDS); + trace.accept('I'); + if ( interrupted() ) + throw new InterruptedException(); + trace.accept('J'); + if ( null == k ) + break; // timed out; check again just in case + trace.accept('K'); + assert key.equals(k); // it's the only one we registered + boolean recheck = k.pollEvents().stream() + .anyMatch(e -> + { + WatchEvent.Kind kind = e.kind(); + if ( OVERFLOW == kind ) + return true; + if ( ENTRY_CREATE == kind && + pidonly.equals( + ENTRY_CREATE.type().cast(e.context())) ) + return true; + if ( ENTRY_MODIFY == kind && + pidonly.equals( + ENTRY_MODIFY.type().cast(e.context())) ) + return true; + return false; + } + ); + trace.accept('L'); + if ( recheck ) + break; + trace.accept('M'); + k.reset(); + } + } + } + catch ( final Throwable t ) + { + /* + * In the ! m_usePostgres case, m_serverHandle gets unconditionally + * set in checkPid; don't let that escape if completing abruptly. + */ + m_serverHandle = null; + throw t; + } + } + + /** + * Checks whether the server being started is earlier than PG 10 and, if so, + * sleeps for a period expected to be adequate for it to become ready to + * accept connections, then returns true. + *

      + * This is called from the generic {@code wait_for_pid_file}, only if the + * file has already appeared and has all entries but {@code PM_STATUS}. That + * could mean it is a pre-PG10 server that will not write {@code PM_STATUS}, + * or a PG 10 or later server that was caught in mid-write to the file. + *

      + * Return false if it is PG 10 or later, in which case the caller should + * continue waiting for {@code PM_STATUS_READY} to appear. + *

      + * The fixed wait in the pre-PG10 case should not need to be terribly long, + * because this method isn't called until the PID file has already appeared, + * so that much of server startup has already occurred. + */ + private boolean waitPrePG10() throws Exception + { + if ( lines(data_dir().resolve("PG_VERSION")).limit(1).noneMatch( + s -> s.contains(".")) ) + return false; + Thread.sleep(2000); // and hope + return true; + } + + /* + * Workarounds for ProcessBuilder command argument preservation problems + * in various circumstances. Each of the functions below acts on a + * ProcessBuilder by possibly modifying its 'command' argument vector into + * such a form that the intended target will be correctly invoked with the + * original arguments. + * + * - Java's Windows implementation faces a near-impossible task because of + * the variety of parsing rules that could be applied by some arbitrary + * invoked program. Here, with the simplifying assumption that the program + * will be one of initdb, postgres, or pg_ctl, all C programs using the C + * run-time code to parse command lines, and checking to exclude a few + * cases that can't be reliably handled, the simpler problem is tractable. + * + * - pg_ctl itself is surprisingly problem-ridden. Here the starting point + * is an argument list intended for invoking postgres directly, which will + * be transformed into one to start postgres via pg_ctl. The only options + * handled here are the ones start() might supply: -D for the datadir and + * -c for options, which will be rewritten as -o values for pg_ctl. + */ + + /* + * The same method is duplicated in pljava-pgxs/PGXSUtils.java . While making + * changes to this method, review the other occurrence also and replicate the + * changes there if desirable. + */ + /** + * Adjusts the command arguments of a {@code ProcessBuilder} so that they + * will be recovered correctly on Windows by a target C/C++ program using + * the argument parsing algorithm of the usual C run-time code, when it is + * known that the command will not be handled first by {@code cmd}. + *

      + * This transformation must account for the way the C runtime will + * ultimately parse the parameters apart, and also for the behavior of + * Java's runtime in assembling the command line that the invoked process + * will receive. + * @param pb a ProcessBuilder whose command has been set to an executable + * that parses parameters using the C runtime rules, and arguments as they + * should result from parsing. + * @return The same ProcessBuilder, with the argument list rewritten as + * necessary to produce the original list as a result of Windows C runtime + * parsing, + * @throws IllegalArgumentException if the ProcessBuilder does not have at + * least the first command element (the executable to run) + * @throws UnsupportedOperationException if the arguments passed, or system + * properties in effect, produce a case this transformation cannot handle + */ + public static ProcessBuilder forWindowsCRuntime(ProcessBuilder pb) + { + ListIterator args = pb.command().listIterator(); + if ( ! args.hasNext() ) + throw new IllegalArgumentException( + "ProcessBuilder command must not be empty"); + + /* + * The transformation implemented here must reflect the parsing rules + * of the C run-time code, and the rules are taken from: + * http://www.daviddeley.com/autohotkey/parameters/parameters.htm#WINARGV + * + * It must also take careful account of what the Java runtime does to + * the arguments before the target process is launched, and line numbers + * in comments below refer to this version of the source: + * http://hg.openjdk.java.net/jdk9/jdk9/jdk/file/65464a307408/src/java.base/windows/classes/java/lang/ProcessImpl.java + * + * 1. Throw Unsupported if the jdk.lang.Process.allowAmbiguousCommands + * system property is in force. + * + * Why? + * a. It is never allowed under a SecurityManager, so to allow it + * at all would allow code's behavior to change depending on + * whether a SecurityManager is in place. + * b. It results in a different approach to preparing the arguments + * (line 364) that would have to be separately analyzed. + * + * Do not test this property with Boolean.getBoolean: that returns true + * only if the value equalsIgnoreCase("true"), which does not match the + * test in the Java runtime (line 362). + */ + String propVal = getProperty("jdk.lang.Process.allowAmbiguousCommands"); + if ( null != propVal && ! "false".equalsIgnoreCase(propVal) ) + throw new UnsupportedOperationException( + "forWindowsCRuntime transformation does not support operation" + + " with jdk.lang.Process.allowAmbiguousCommands in effect"); + + /* + * 2. Throw Unsupported if the executable path name contains a " + * + * Why? Because getExecutablePath passes true, unconditionally, to + * isQuoted (line 303), so it will throw IllegalArgumentException if + * there is any " in the executable path. The catch block for that + * exception (line 383) will make a highly non-correctness-preserving + * attempt to join and reparse the arguments, using + * getTokensFromCommand (line 198), which uses a regexp (line 188) + * that does not even remotely resemble the C runtime parsing rules. + * + * Possible future work: this case could be handled by rewriting the + * entire command as an invocation via CMD or another shell. + */ + String executable = args.next(); + if ( executable.contains("\"") ) + throw new UnsupportedOperationException( + "forWindowsCRuntime does not support invoking an executable" + + " whose name contains a \" character"); + + /* + * 3. Throw Unsupported if the executable path ends in .cmd or .bat + * (case-insensitively). + * + * Why? For those extensions, the Java runtime will select different + * rules (line 414). + * a. Those rules would need to be separately analyzed. + * b. They will reject (line 286) any argument that contains a " + * + * Possible future work: this case could be handled by rewriting the + * entire command as an invocation via CMD or another shell (which is + * exactly the suggestion in the exception message that would be + * produced if an argument contains a "). + */ + if ( executable.matches(".*\\.(?i:cmd|bat)$") ) + throw new UnsupportedOperationException( + "forWindowsCRuntime does not support invoking a command" + + " whose name ends in .cmd or .bat"); + + /* + * 4. There is a worrisome condition in the Java needsEscaping check + * (line 277), where it would conclude that escaping is NOT needed + * if an argument both starts and ends with a " character. In other + * words, it would treat that case (and just that case) not as + * characters that are part of the content and need to be escaped, + * but as a sign that its job has somehow already been done. + * + * However, that will not affect this transformation, because our + * rule 5 below will ensure that any leading " has a \ added before, + * and therefore the questionable Java code will never see from us + * an arg that both starts and ends with a ". + * + * There is one edge case where this behavior of the Java runtime + * will be relied on (see rule 7 below). + */ + + while ( args.hasNext() ) + { + String arg = args.next(); + + /* + * 5. While the Java runtime code will add " at both ends of the + * argument IF the argument contains space, tab, <, or >, it does + * so with zero attention to any existing " characters in the + * content of the argument. Those must, of course, be escaped so + * the C runtime parser will not see them as ending the quoted + * region. By those rules, a " is escaped by a \ and a \ is only + * special if it is followed by a " (or in a sequence of \ + * ultimately leading to a "). The needed transformation is to + * find any instance of n backslashes (n may be zero) followed + * by a ", and replace that match with 2n+1 \ followed by the ". + * + * This transformation is needed whether or not the Java runtime + * will be adding " at start and end. If it does not, the same + * \ escaping is needed so the C runtime will not see a " as + * beginning a quoted region. + */ + String transformed = arg.replaceAll("(\\\\*+)(\")", "$1$1\\\\$2"); + + /* + * 6. Only if the Java runtime will be adding " at start and end + * (i.e., only if the arg contains space, tab, <, or >), there is + * one more case where \ can be special: at the very end of the + * arg (where it will end up followed by a " when the Java + * runtime has done its thing). The Java runtime is semi-aware of + * this case (line 244): it will add a single \ if it sees that + * the arg ends with a \. However, that isn't the needed action, + * which is to double ALL consecutive \ characters ending the + * arg. + * + * So the action needed here is to double all-but-one of any + * consecutive \ characters at the end of the arg, leaving one + * that will be doubled by the Java code. + */ + if ( transformed.matches("(?s:[^ \\t<>]*+.++)") ) + transformed = transformed.replaceFirst( + "(\\\\)(\\\\*+)$", "$1$2$2"); + + /* + * 7. If the argument is the empty string, it must be represented + * as "" or it will simply disappear. The Java runtime will not + * do that for us (after all, the empty string does not contain + * space, tab, <, or >), so it has to be done here, replacing the + * arg with exactly "". + * + * This is the one case where we produce a value that both starts + * and ends with a " character, thereby triggering the Java + * runtime behavior described in (4) above, so the Java runtime + * will avoid trying to further "protect" the string we have + * produced here. For this one case, that 'worrisome' behavior is + * just what we want. + */ + if ( transformed.isEmpty() ) + transformed = "\"\""; + + if ( ! transformed.equals(arg) ) + args.set(transformed); + } + + return pb; + } + + /** + * Adjusts the command arguments of a {@code ProcessBuilder} that would + * directly invoke {@code postgres} to start a server, so that it will + * instead start {@code postgres} via {@code pg_ctl}. + *

      + * {@code pg_ctl} constructs a command line for {@code cmd.exe} (on Windows) + * or {@code /bin/sh} (elsewhere), which in turn will launch + * {@code postgres}. The way {@code pg_ctl} handles options ({@code -o}) + * requires this transformation to be platform-aware and quote them + * correctly for {@code sh} or {@code cmd} as appropriate. + *

      + * The result of this transformation still has to be received intact by + * {@code pg_ctl} itself, which requires (on Windows) a subsequent + * application of {@code forWindowsCRuntime} as well. + * @param pb a ProcessBuilder whose command has been set to an executable + * path for {@code postgres}, with only {@code -D} and {@code -c} options. + * @return The same ProcessBuilder, with the argument list rewritten to + * invoke {@code pg_ctl start} with the same {@code -D} and any other + * options supplied by {@code -o}. + * @throws IllegalArgumentException if the ProcessBuilder does not have at + * least the first command element (the executable to run) + * @throws UnsupportedOperationException if the arguments passed + * produce a case this transformation cannot handle + */ + public static ProcessBuilder asPgCtlInvocation(ProcessBuilder pb) + { + ListIterator args = pb.command().listIterator(); + if ( ! args.hasNext() ) + throw new IllegalArgumentException( + "ProcessBuilder command must not be empty"); + + Matcher datadirDisallow = + compile(s_isWindows ? "[\"^%]" : "[\"\\\\$]").matcher(""); + + Path executable = Paths.get(args.next()); + if ( ! executable.endsWith("postgres") ) + throw new UnsupportedOperationException( + "expected executable path to end with postgres"); + executable = executable.getParent().resolve("pg_ctl"); + args.set(executable.toString()); + args.add("start"); + + while ( args.hasNext() ) + { + String arg = args.next(); + switch ( arg ) + { + case "-D": + if ( datadirDisallow.reset(args.next()).find() ) + throw new UnsupportedOperationException( + "datadir with \", " + + (s_isWindows ? "^, or %" : "\\, or $") + + " character is likely to be messed up by pg_ctl"); + break; + + case "-c": + args.set("-o"); + String setting = args.next(); + if ( s_isWindows ) + { + /* + * The result of this transformation will be what pg_ctl + * passes to cmd. Because it will be (a) passed to cmd, and + * then (b) passed to postgres (a C program), it can use + * exactly the simplified "putting it together" rules from + * http://www.daviddeley.com/autohotkey/parameters/parameters.htm#CPP + * + * Because this is only about the handoff from pg_ctl to cmd + * to postgres, it does not need to handle the tricks of + * getting safely through the Java runtime. Getting what + * this transformation produces safely from Java to pg_ctl + * (another C program) is the job of forWindowsCRuntime. + */ + setting = setting.replaceAll("(\\\\++)(\"|$)","$1$1\\\\$2"); + setting = setting.replaceAll("([<>|&()^])", "^$1"); + setting = "^\"" + setting + "^\""; + } + else + { + /* + * The simple Bourne-shell rule for safely quoting an + * argument is like a glass of cool water on a hot day. + */ + setting = "'" + setting.replace("'", "'\\''") + "'"; + } + args.set("-c " + setting); + break; + + default: + throw new UnsupportedOperationException( + "asPgCtlInvocation does not handle postgres option \"" + + arg + "\""); + } + } + + return pb; + } +} diff --git a/pljava-packaging/src/main/resources/pljava--unpackaged--.sql b/pljava-packaging/src/main/resources/pljava--unpackaged--.sql new file mode 100644 index 00000000..9d56fb94 --- /dev/null +++ b/pljava-packaging/src/main/resources/pljava--unpackaged--.sql @@ -0,0 +1,82 @@ +\echo Use "CREATE EXTENSION pljava FROM UNPACKAGED" to load this file. \quit + +/* + This script can "update" from any unpackaged PL/Java version supported by + the automigration code within PL/Java itself. The schema migration is first + touched off by the LOAD command, and then the ALTER EXTENSION commands gather + up the member objects according to the current schema version. + */ + +DROP TABLE IF EXISTS +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"; +CREATE TABLE +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session" +(path, exnihilo) AS +SELECT CAST('${module.pathname}' AS text), false; +LOAD '${module.pathname}'; + +/* + Why the CREATE / DROP? When faced with a LOAD command, PostgreSQL only does it + if the library has not been loaded already in the session (as could have + happened if, for example, a PL/Java function has already been called). If the + LOAD was skipped, there could still be an old-layout schema, because the + migration only happens in an actual LOAD. To avoid confusion later, it's + helpful to fail fast in that case. The loadpath table should have been dropped + by the LOAD actions, so the re-CREATE/DROP here will incur a (cryptic, but + dependable) error if those actions didn't happen. The error message will + include the table name, which is why the table name is phrased as an error + message. + + The solution to a problem detected here is simply to exit the + session and repeat the CREATE EXTENSION in a new session where PL/Java has not + been loaded yet. + */ +CREATE TABLE +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"(); +DROP TABLE +@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"; + +/* + The language-hander functions do not need to be explicitly added, because the + LOAD actions always CREATE OR REPLACE them, which makes them extension members. + Since the validators were added for 1.6.0, the language entries are also always + CREATE OR REPLACEd, so they don't have to be mentioned here either. + */ + +ALTER EXTENSION pljava ADD + FUNCTION sqlj.add_type_mapping(character varying,character varying); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.alias_java_language( + character varying,boolean,boolean,character varying); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.drop_type_mapping(character varying); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.get_classpath(character varying); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.install_jar(bytea,character varying,boolean); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.install_jar(character varying,character varying,boolean); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.remove_jar(character varying,boolean); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.replace_jar(bytea,character varying,boolean); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.replace_jar(character varying,character varying,boolean); +ALTER EXTENSION pljava ADD + FUNCTION sqlj.set_classpath(character varying,character varying); + +ALTER EXTENSION pljava ADD TABLE sqlj.classpath_entry; +ALTER EXTENSION pljava ADD TABLE sqlj.jar_descriptor; +ALTER EXTENSION pljava ADD TABLE sqlj.jar_entry; +ALTER EXTENSION pljava ADD TABLE sqlj.jar_repository; +ALTER EXTENSION pljava ADD TABLE sqlj.typemap_entry; + +ALTER EXTENSION pljava ADD SEQUENCE sqlj.jar_entry_entryid_seq; +ALTER EXTENSION pljava ADD SEQUENCE sqlj.jar_repository_jarid_seq; +ALTER EXTENSION pljava ADD SEQUENCE sqlj.typemap_entry_mapid_seq; + +SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_repository', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_entry', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_descriptor', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.classpath_entry', ''); +SELECT pg_catalog.pg_extension_config_dump('@extschema@.typemap_entry', ''); diff --git a/pljava-packaging/src/main/resources/pljava--unpackaged.sql b/pljava-packaging/src/main/resources/pljava--unpackaged.sql index 86767df4..6dcce382 100644 --- a/pljava-packaging/src/main/resources/pljava--unpackaged.sql +++ b/pljava-packaging/src/main/resources/pljava--unpackaged.sql @@ -1,80 +1,25 @@ -\echo Use "CREATE EXTENSION pljava FROM UNPACKAGED" to load this file. \quit +\echo 'Use "CREATE EXTENSION pljava VERSION unpackaged" to load this file.' +\echo 'Then start a new connection (use \\c in psql) and' +\echo 'use "ALTER EXTENSION pljava UPDATE" to complete packaging.' \quit /* - This script can "update" from any unpackaged PL/Java version supported by - the automigration code within PL/Java itself. The schema migration is first - touched off by the LOAD command, and then the ALTER EXTENSION commands gather - up the member objects according to the current schema version. + * PostgreSQL 13 drops support for CREATE EXTENSION ... FROM unpackaged; + * on the rationale that any sensible site has already updated old unpackaged + * extensions to their extension versions. For PL/Java, though, there is still + * a realistic scenario where it ends up installed as 'unpackaged': if a + * CREATE EXTENSION failed because a setting needed adjustment, the admin + * supplied the right setting, and the installation then succeeded. That leaves + * PL/Java installed, but not as a packaged extension. The old CREATE EXTENSION + * ... FROM unpackaged; syntax was the perfect recovery method for that. It will + * still work in versions < 13. + * + * For PostgreSQL 13, recovery now requires two steps instead. The first step + * is CREATE EXTENSION pljava VERSION unpackaged; which will use this script to + * simply confirm the unpackaged installation has already happened, and + * otherwise do absolutely nothing. The second step (which must happen in a new + * session) is ALTER EXTENSION pljava UPDATE; which will package it as the + * latest extension version, even running the exact script that CREATE EXTENSION + * ... FROM unpackaged; would have run to do it. */ -DROP TABLE IF EXISTS -@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"; -CREATE TABLE -@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session" -(path, exnihilo) AS -SELECT CAST('${module.pathname}' AS text), false; -LOAD '${module.pathname}'; - -/* - Why the CREATE / DROP? When faced with a LOAD command, PostgreSQL only does it - if the library has not been loaded already in the session (as could have - happened if, for example, a PL/Java function has already been called). If the - LOAD was skipped, there could still be an old-layout schema, because the - migration only happens in an actual LOAD. To avoid confusion later, it's - helpful to fail fast in that case. The loadpath table should have been dropped - by the LOAD actions, so the re-CREATE/DROP here will incur a (cryptic, but - dependable) error if those actions didn't happen. The error message will - include the table name, which is why the table name is phrased as an error - message. - - The solution to a problem detected here is simply to exit the - session and repeat the CREATE EXTENSION in a new session where PL/Java has not - been loaded yet. - */ -CREATE TABLE -@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"(); -DROP TABLE -@extschema@."see doc: do CREATE EXTENSION PLJAVA in new session"; - -/* - The language-hander functions do not need to be explicitly added, because the - LOAD actions always CREATE OR REPLACE them, which makes them extension members. - */ - -ALTER EXTENSION pljava ADD LANGUAGE java; -ALTER EXTENSION pljava ADD LANGUAGE javau; - -ALTER EXTENSION pljava ADD - FUNCTION sqlj.add_type_mapping(character varying,character varying); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.drop_type_mapping(character varying); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.get_classpath(character varying); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.install_jar(bytea,character varying,boolean); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.install_jar(character varying,character varying,boolean); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.remove_jar(character varying,boolean); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.replace_jar(bytea,character varying,boolean); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.replace_jar(character varying,character varying,boolean); -ALTER EXTENSION pljava ADD - FUNCTION sqlj.set_classpath(character varying,character varying); - -ALTER EXTENSION pljava ADD TABLE sqlj.classpath_entry; -ALTER EXTENSION pljava ADD TABLE sqlj.jar_descriptor; -ALTER EXTENSION pljava ADD TABLE sqlj.jar_entry; -ALTER EXTENSION pljava ADD TABLE sqlj.jar_repository; -ALTER EXTENSION pljava ADD TABLE sqlj.typemap_entry; - -ALTER EXTENSION pljava ADD SEQUENCE sqlj.jar_entry_entryid_seq; -ALTER EXTENSION pljava ADD SEQUENCE sqlj.jar_repository_jarid_seq; -ALTER EXTENSION pljava ADD SEQUENCE sqlj.typemap_entry_mapid_seq; - -SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_repository', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_entry', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.jar_descriptor', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.classpath_entry', ''); -SELECT pg_catalog.pg_extension_config_dump('@extschema@.typemap_entry', ''); +SELECT sqlj.get_classpath('public'); -- just fail unless already installed diff --git a/pljava-packaging/src/main/resources/pljava.policy b/pljava-packaging/src/main/resources/pljava.policy new file mode 100644 index 00000000..6753bfdd --- /dev/null +++ b/pljava-packaging/src/main/resources/pljava.policy @@ -0,0 +1,131 @@ +// +// Security policy for PL/Java. These grants are intended to add to those +// contained in the java.policy file of the standard Java installation. +// + + +// +// This grant is unconditional. It adds these properties to the standard Java +// list of system properties that any code may read. +// +grant { + // "standard" properties that can be read by anyone, by analogy to the + // ones so treated in Java itself. + // + permission java.util.PropertyPermission + "org.postgresql.version", "read"; + permission java.util.PropertyPermission + "org.postgresql.pljava.version", "read"; + permission java.util.PropertyPermission + "org.postgresql.pljava.native.version", "read"; + + permission java.util.PropertyPermission + "org.postgresql.pljava.udt.byteorder.*", "read"; + + permission java.util.PropertyPermission + "org.postgresql.server.encoding", "read"; + permission java.util.PropertyPermission + "user.language", "read"; + + // PostgreSQL allows SELECT current_database() or SHOW cluster_name anyway. + // + permission java.util.PropertyPermission + "org.postgresql.database", "read"; + permission java.util.PropertyPermission + "org.postgresql.cluster", "read"; + + // SQL/JRT specifies this property. + // + permission java.util.PropertyPermission + "sqlj.defaultconnection", "read"; + + // This property is read in the innards of Java 9 and 10, but they forgot + // to add a permission for it. Not needed for Java 11 and later. + // + permission java.util.PropertyPermission + "jdk.lang.ref.disableClearBeforeEnqueue", "read"; + + // Something similar happened in Java 14 (not yet fixed in 15). + // + permission java.util.PropertyPermission + "java.util.concurrent.ForkJoinPool.common.maximumSpares", "read"; +}; + + +// +// This grant is specific to the internal implementation of PL/Java itself, +// which needs these permissions for its own operations. +// +// Historically, PL/Java has been able to read any file on the server filesystem +// when a file: URL is passed to sqlj.install_jar or sqlj.replace_jar. Such a +// broad grant is not necessary, and can be narrowed below if desired. +// +grant codebase "${org.postgresql.pljava.codesource}" { + permission java.lang.RuntimePermission + "charsetProvider"; + permission java.lang.RuntimePermission + "createClassLoader"; + permission java.lang.RuntimePermission + "getProtectionDomain"; + permission java.net.NetPermission + "specifyStreamHandler"; + permission java.util.logging.LoggingPermission + "control"; + permission java.security.SecurityPermission + "createAccessControlContext"; + + // This gives the PL/Java implementation code permission to read + // any file, which it only exercises on behalf of sqlj.install_jar() + // or sqlj.replace_jar() when called with a file: URL. + // + // There would be nothing wrong with restricting this permission to + // a specific directory, if all jar files to be loaded will be found there, + // or, if they will be hosted on a remote server, a permission like + // java.net.URLPermission "https://example.com/jars/*", "GET:Accept" + // etc. + // + permission java.io.FilePermission + "<>", "read"; +}; + + +// +// This grant defines the mapping onto Java of PostgreSQL's "trusted language" +// category. When PL/Java executes a function whose SQL declaration names +// a language that was declared WITH the TRUSTED keyword, it will have these +// permissions, if any (in addition to whatever others might be granted to all +// code, or to its specific jar, etc.). +// +grant principal org.postgresql.pljava.PLPrincipal$Sandboxed * { +}; + + +// +// This grant defines the mapping onto Java of PostgreSQL's "untrusted language" +// category. When PL/Java executes a function whose SQL declaration names +// a language that was declared WITHOUT the TRUSTED keyword, it will have these +// permissions (in addition to whatever others might be granted to all code, or +// to its specific jar, etc.). +// +grant principal org.postgresql.pljava.PLPrincipal$Unsandboxed * { + + // Java does not circumvent operating system access controls; this grant + // will still be limited to what the OS allows a PostgreSQL backend process + // to do. + permission java.io.FilePermission + "<>", "read,readlink,write,delete"; +}; + + +// +// This grant applies to a specific PL/Java sandboxed language named java_tzset +// (if such a language exists) and grants functions created in that language +// permission to adjust the time zone. There is an example method in the +// org.postgresql.pljava.example.annotation.PreJSR310 class, which needs to +// temporarily adjust the time zone for a test. That example also uses +// sqlj.alias_java_language to create the java_tzset "language" when deployed, +// and DROP LANGUAGE to remove it when undeployed. +// +grant principal org.postgresql.pljava.PLPrincipal$Sandboxed "java_tzset" { + permission java.util.PropertyPermission "user.timezone", "write"; +}; diff --git a/pljava-packaging/src/site/markdown/index.md b/pljava-packaging/src/site/markdown/index.md new file mode 100644 index 00000000..1e0773a4 --- /dev/null +++ b/pljava-packaging/src/site/markdown/index.md @@ -0,0 +1,38 @@ +## About PL/Java packaging + +The `pljava-packaging` subproject builds a single `jar` file that contains +the files (including the API, implementation, and examples `jar` files, +native code shared object, and PostgreSQL extension control files) that must +be unpacked into a PostgreSQL installation so PL/Java can be used. These files +could have been wrapped in a `tar` or `zip` format instead, but any site where +PL/Java will be used necessarily has Java installed, and therefore support for +the `jar` format, so it is an obvious choice. + +The resulting `jar` can be simply extracted using the `jar` tool, and the files +moved to the proper locations, or it can be run with `java -jar`. It contains +two extra `.class` files to give it a very simple self-extracting behavior: +it will run `pg_config` to learn where PostgreSQL is installed, and extract +PL/Java's files into the correct locations. See [Installing PL/Java][install] +for the details. + +If the file is simply extracted using the `jar` tool, those two added class +files will also be extracted, and can be deleted; they are not needed for +PL/Java's operation. + +### Use with `jshell` as a testing environment + +The added classes supply some additional methods, unused during a simple +installation with `java -jar`, but accessible from Java's [JShell][] +scripting tool if it is launched with this `jar` on its classpath. +That allows `jshell` to serve as an environment for scripting tests +of PL/Java in a running PostgreSQL instance, with capabilities similar to +(and modeled on) the [PostgresNode][] Perl module distributed with PostgreSQL. + +See [this introduction][nodetut] and the javadoc for [the Node class][node] +for details. + +[install]: ../install/install.html +[JShell]: https://docs.oracle.com/javase/9/jshell/introduction-jshell.htm +[PostgresNode]: https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/test/perl/PostgresNode.pm;h=aec3b9a;hb=e640093 +[node]: apidocs/org/postgresql/pljava/packaging/Node.html +[nodetut]: ../develop/node.html diff --git a/pljava-pgxs/pom.xml b/pljava-pgxs/pom.xml new file mode 100644 index 00000000..3d66c5fc --- /dev/null +++ b/pljava-pgxs/pom.xml @@ -0,0 +1,241 @@ + + 4.0.0 + + org.postgresql + pljava.app + 1.6.10 + + + pljava-pgxs + maven-plugin + + PL/Java PGXS + The maven plugin to build native code used inside PL/Java + + + + + org.apache.maven + maven-plugin-api + ${maven.version} + + + org.apache.maven.plugin-tools + maven-plugin-annotations + 3.6.0 + + + org.apache.maven.reporting + maven-reporting-impl + 3.0.0 + + + org.apache.maven.reporting + maven-reporting-api + 3.0 + + + + + + nashornmod + + [15,) + + + + org.openjdk.nashorn + nashorn-core + 15.4 + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + none + + + + + org.apache.maven.plugins + maven-plugin-plugin + 3.6.0 + + true + + + + + mojo-descriptor + + descriptor + + + + help-goal + + helpmojo + + + + + + + + + + + org.apache.maven.plugins + maven-plugin-plugin + 3.6.0 + + + + org.postgresql + pljava-pgxs + ${project.version} + + + + scripted-report + + + + + + + + + + + diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/AbstractPGXS.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/AbstractPGXS.java new file mode 100644 index 00000000..09d2a651 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/AbstractPGXS.java @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2020-2024 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Kartik Ohri + * Chapman Flack + */ +package org.postgresql.pljava.pgxs; + +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.regex.MatchResult; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * Class to act as a blueprint for platform-specific build configurations in a + * {@code pom.xml}. + *

      + * A {@code scripted-goal} configuration in the POM should contain a script + * that somehow selects and supplies a concrete implementation of this abstract + * class. + *

      + * In {@code pljava-so/pom.xml}, a block of {@code application/javascript} is + * supplied that contains a {@code configuration} array of JS objects, each of + * which has a {@code name} entry, a {@code probe} function returning true on + * some supported platform, and the necessary functions to serve as an + * implementation of this class. The script selects one whose probe succeeds + * and, using JSR 223 magic, makes an instance of this class from it. + *

      + * The script can make use of convenience methods implemented here, and also + * a number of items (such as a {@code runCommand} function) presupplied in the + * script engine's binding scope by + * {@link PGXSUtils#getScriptEngine PGXSUtils.getScriptEngine} and by + * {@link ScriptingMojo#execute ScriptingMojo.execute}). + */ +public abstract class AbstractPGXS +{ + /** + * Performs platform-specific compilation of a set of {@code .c} files with + * the specified compiler, target path, includes, defines, and flags. + *

      + * An implementation should make any needed adjustments to the includes, + * defines, and flags, format everything appropriately for the compiler + * in question, execute it, and return an exit status (zero on success). + */ + public abstract int compile( + String compiler, List files, Path targetPath, + List includes, Map defines, List flags); + + /** + * Performs platform-specific linking of a set of object files with + * the specified linker and flags, to produce the shared object at the + * specified target path. + *

      + * An implementation should make any needed adjustments to the flags, format + * everything appropriately for the linker in question, execute it, and + * return an exit status (zero on success). + */ + public abstract int link( + String linker, List flags, List files, Path targetPath); + + /** + * Returns a list with all items prefixed with correct include flag symbol. + * + * This is the default implementation for formatting the list of includes, + * and prefixes the includes with {@code -I}. For compilers like MSVC that + * require different formatting, the script should supply an overriding + * implementation of this method. + */ + public List formatIncludes(List includesList) + { + return includesList.stream().map(s -> "-I" + s) + .collect(Collectors.toList()); + } + + /** + * Returns a list with all defines represented correctly. + * + * This is the default implementation for formatting the map of defines. + * Each item is prefixed with {@code -D}. If the name is mapped to a + * non-null value, an {@code =} is appended, followed by the value. For + * compilers like MSVC that require different formatting, the script should + * supply an overriding implementation of this method. + */ + public List formatDefines(Map definesMap) + { + return definesMap.entrySet().stream() + .map(entry -> { + String define = "-D" + entry.getKey(); + if (entry.getValue() != null) + define += "=" + entry.getValue(); + return define; + }) + .collect(Collectors.toList()); + } + + /** + * Returns the requested {@code pg_config} property as a list of individual + * flags split at whitespace, except when quoted, and the quotes removed. + *

      + * The assumed quoting convention is single straight quotes around regions + * to be protected, which do not have to be an entire argument. This method + * doesn't handle a value that contains a single quote as content; + * the intended convention for that case doesn't seem to be documented, and + * PostgreSQL's own build breaks in such a case, so there is little need, + * for now, to support it here. We don't know, for now, whether the + * convention implemented here is also right on Windows. + */ + public List getPgConfigPropertyAsList(String properties) { + Pattern pattern = Pattern.compile("(?:[^\\s']++|'(?:[^']*+)')++"); + Matcher matcher = pattern.matcher(properties); + return matcher.results() + .map(MatchResult::group) + .map(s -> s.replace("'", "")) + .collect(Collectors.toList()); + } +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/GoalScript.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/GoalScript.java new file mode 100644 index 00000000..828dd1d3 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/GoalScript.java @@ -0,0 +1,18 @@ +package org.postgresql.pljava.pgxs; + +import org.apache.maven.plugin.AbstractMojoExecutionException; + +/** + * Enables obtaining an interface from the script using + * {@link javax.script.Invocable} in order to correctly handle errors. + */ +public interface GoalScript { + + /** + * Executes the driver code for running the script. + * @return MojoExecutionException or MojoFailureException in case of error, + * null in case of successful execution + */ + AbstractMojoExecutionException execute(); + +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/PGXSUtils.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/PGXSUtils.java new file mode 100644 index 00000000..bb216106 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/PGXSUtils.java @@ -0,0 +1,806 @@ +/* + * Copyright (c) 2020-2024 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + */ +package org.postgresql.pljava.pgxs; + +import org.apache.maven.plugin.logging.Log; +import org.apache.maven.project.MavenProject; +import org.codehaus.plexus.configuration.PlexusConfiguration; + +import javax.script.ScriptContext; +import javax.script.ScriptEngine; +import javax.script.ScriptEngineManager; +import javax.tools.Diagnostic; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.BinaryOperator; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.ToIntFunction; +import java.util.jar.JarFile; +import java.util.jar.Manifest; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static java.lang.System.getProperty; +import static java.util.stream.Collectors.joining; +import static java.util.stream.Stream.iterate; +import static javax.script.ScriptContext.ENGINE_SCOPE; + +/** + * Utility methods to simplify and hide the bland implementation details + * for writing JavaScript snippets. + */ +public final class PGXSUtils +{ + /** + * maven project for which plugin is executed + */ + private final MavenProject project; + + /** + * maven plugin logger for diagnostics + */ + private final Log log; + + private static final Pattern mustBeQuotedForC = Pattern.compile( + "([\"\\\\]|(?<=\\?)\\?(?=[=(/)'-]))|" + // (just insert backslash) + "([\\a\b\\f\\n\\r\\t\\x0B])|" + // (use specific escapes) + "(\\p{Cc}((?=\\p{XDigit}))?)" // use hex, note whether an XDigit follows + ); + + public PGXSUtils (MavenProject project, Log log) + { + this.project = project; + this.log = log; + } + + /** + * Returns a ScriptEngine with some useful engine-scoped bindings + * supplied for the convenience of the script. + *

      + * These bindings are placed in the engine's scope: + *

      + *
      project
      The Maven project instance + *
      utils
      This object + *
      error, warn, info, debug
      Consumers of {@link CharSequence} that + * will log a message through Maven with the corresponding severity + *
      diag
      A BiConsumer of {@link Diagnostic.Kind} and a + * {@link CharSequence}, to log a message through Maven with its severity + * determined by the {@code Diagnostic.Kind}. + *
      runCommand
      A function from {@link ProcessBuilder} to {@code int} + * that will run the specified command and return its exit status. The + * command and arguments are first logged through Maven at {@code debug} + * level. + *
      runWindowsCRuntimeCommand
      A function from {@link ProcessBuilder} + * to {@code int} that will apply the + * {@link #forWindowsCRuntime forWindowsCRuntime} transformation to the + * arguments and then run the specified command and return its exit + * status. The command and arguments are first logged through Maven at + * {@code debug} level, and before the transformation is applied. + *
      buildPaths
      Separates a list of pathnames into those that belong + * on a class path and those that belong on a module path. + *
      getPgConfigProperty
      Returns the output of {@code pg_config} when + * run with the given single argument. + *
      isProfileActive
      Predicate indicating whether a named Maven profile + * is active. + *
      quoteStringForC
      Transforms a {@code String} into a C string + * literal representing it. + *
      resolve
      A direct reference to the {code Path.resolve} overload + * with {@code Path} parameter types, to work around some versions of + * graaljs being unable to determine which overload a script intends. + *
      setProjectProperty
      Sets a property of the Maven project to a + * supplied value. + *
      + * + * @param script the script block element in the configuration block of the + * plugin in the project object model. Its {@code mimetype} or + * {@code engine} attribute will be used to find a suitable engine + * @return ScriptEngine based on the engine and/or MIME type provided in the + * script block + */ + ScriptEngine getScriptEngine(PlexusConfiguration script) + { + /* + * Set the polyglot.js.nashorn-compat system property to true if it is + * unset and this is Java >= 15. It would be preferable to set this in + * a pom profile rather than hardcoding it here; properties-maven-plugin + * can do it, but that doesn't happen in the 'site' lifecycle, and we + * use scripting in reports too. In Java >= 15, the Nashorn JavaScript + * engine isn't available, and a profile will have arranged for Graal's + * JavaScript engine to be on the classpath, but it doesn't behave + * compatibly with Nashorn unless this property is set. + */ + if ( 0 <= Runtime.version().compareTo(Runtime.Version.parse("15-ea")) ) + System.getProperties() + .putIfAbsent("polyglot.js.nashorn-compat", "true"); + + ScriptEngine engine = null; + try + { + String engineName = script.getAttribute("engine"); + String mimeType = script.getAttribute("mimetype"); + + if (engineName == null && mimeType == null) + throw new IllegalArgumentException("Neither script engine nor" + + " mimetype defined."); + else + { + ScriptEngineManager manager = + new ScriptEngineManager(new ScriptEngineLoader( + ScriptingMojo.class.getClassLoader())); + + if (engineName != null) + engine = manager.getEngineByName(engineName); + + if (mimeType != null) + if (engine != null) + { + if ( ! engine.getFactory().getMimeTypes() + .contains(mimeType) ) + log.warn("Specified engine does " + + "not have given mime type : " + mimeType); + } + else + engine = manager.getEngineByMimeType(mimeType); + + if (engine == null) + throw new IllegalArgumentException("No suitable engine " + + "found for specified engine name or mime type"); + } + log.debug("Loaded script engine " + engine); + } catch (Exception e) { + log.error(e); + } + + ScriptContext context = engine.getContext(); + + /* + * Give the script convenient access to the Maven project and this + * object. + */ + context.setAttribute("project", project, ENGINE_SCOPE); + context.setAttribute("utils", this, ENGINE_SCOPE); + + /* + * Give the script some convenient methods for logging to the Maven log. + * Only supply the versions with one CharSequence parameter, in case of + * a script engine that might not handle overloads well. The script may + * have another way to get access to the Log instance and use its other + * methods; these are just for convenience. + */ + context.setAttribute("error", + (Consumer) log::error, ENGINE_SCOPE); + context.setAttribute("warn", + (Consumer) log::warn, ENGINE_SCOPE); + context.setAttribute("info", + (Consumer) log::info, ENGINE_SCOPE); + context.setAttribute("debug", + (Consumer) log::debug, ENGINE_SCOPE); + + /* + * Also provide a specialized method useful for a script that may + * handle diagnostics from Java tools. + */ + context.setAttribute("diag", + (BiConsumer)((kind,content) -> + { + switch ( kind ) + { + case ERROR: + log.error(content); + break; + case MANDATORY_WARNING: + case WARNING: + log.warn(content); + break; + case NOTE: + log.info(content); + break; + case OTHER: + log.debug(content); + break; + } + } + ), ENGINE_SCOPE); + + /* + * Supply a runCommand function to which the script can supply + * a ProcessBuilder after configuring it as needed, and an alias + * runWindowsCRuntimeCommand that does the same, but applies the + * forWindowsCRuntime transformation to the ProcessBuilder's arguments + * first. Two aliases are used so that the command arguments can be + * logged (at debug level) in either case, and before the transformation + * is applied, in the Windows case. + */ + context.setAttribute("runCommand", + (ToIntFunction) b -> + { + log.debug("To run: " + b.command()); + return runCommand(b); + }, ENGINE_SCOPE); + + context.setAttribute("runWindowsCRuntimeCommand", + (ToIntFunction) b -> + { + log.debug("To run (needs WindowsCRuntime transformation): " + + b.command()); + return runCommand(forWindowsCRuntime(b)); + }, ENGINE_SCOPE); + + /* + * Convenient access to some other methods provided here. + */ + context.setAttribute("buildPaths", + (Function, Map>) this::buildPaths, + ENGINE_SCOPE); + + context.setAttribute("getPgConfigProperty", + (Function) p -> + { + try + { + return getPgConfigProperty(p); + } + catch ( Exception e ) + { + log.error(e); + return null; + } + }, ENGINE_SCOPE); + + context.setAttribute("isProfileActive", + (Function) this::isProfileActive, + ENGINE_SCOPE); + + context.setAttribute("quoteStringForC", + (Function) this::quoteStringForC, ENGINE_SCOPE); + + context.setAttribute("setProjectProperty", + (BiConsumer)this::setProjectProperty, ENGINE_SCOPE); + + /* + * A graaljs bug (graalvm/graaljs#254) means that when you are passing + * a Path object to Path.resolve (which has overloads taking a Path or + * a String), graaljs can't decide which one you mean. Provide a resolve + * (Path,Path) function to make it a little more blindingly obvious. + */ + context.setAttribute("resolve", (BinaryOperator)Path::resolve, + ENGINE_SCOPE); + + return engine; + } + + /** + * Returns the input wrapped in double quotes and with internal characters + * escaped where appropriate using the C conventions. + * + * @param s string to be escaped + * @return a C string literal representing s + */ + public String quoteStringForC (String s) + { + Matcher m = mustBeQuotedForC.matcher(s); + StringBuffer b = new StringBuffer(); + while (m.find()) + { + if (-1 != m.start(1)) // things that just need a backslash + m.appendReplacement(b, "\\\\$1"); + else if (-1 != m.start(2)) // things with specific escapes + { + char ec = 0; + switch (m.group(2)) // switch/case uses === + { + case "\u0007": + ec = 'a'; + break; + case "\b": + ec = 'b'; + break; + case "\f": + ec = 'f'; + break; + case "\n": + ec = 'n'; + break; + case "\r": + ec = 'r'; + break; + case "\t": + ec = 't'; + break; + case "\u000B": + ec = 'v'; + break; + } + m.appendReplacement(b, "\\\\" + ec); + } + else // it's group 3, use hex escaping + { + m.appendReplacement(b, + "\\\\x" + Integer.toHexString( + m.group(3).codePointAt(0)) + + (-1 == m.start(4) ? "" : "\"\"")); // XDigit follows? + } + } + return "\"" + m.appendTail(b) + "\""; + } + + /** + * Returns the string decoded from input bytes using default platform + * charset. + * + * @param bytes byte array to be decoded + * @return string decoded from input bytes + * @throws CharacterCodingException if unable to decode bytes using + * default platform charset + */ + public String defaultCharsetDecodeStrict (byte[] bytes) + throws CharacterCodingException + { + return Charset.defaultCharset().newDecoder() + .decode(ByteBuffer.wrap(bytes)).toString(); + } + + /** + * Returns the output, decoded using default platform charset, of the + * {@code pg_config} command executed with the single supplied argument. + *

      + * If multiple versions of {@code pg_config} are available or + * {@code pg_config} is not present on the path, the system property + * {@code pgsql.pgconfig} should be set as an absolute path to the desired + * executable. + * + * @param pgConfigArgument argument to be passed to the command + * @return output of the input command executed with the input argument + * @throws IOException if unable to read output of the command + * @throws InterruptedException if command does not complete successfully + */ + public String getPgConfigProperty (String pgConfigArgument) + throws IOException, InterruptedException + { + String pgConfigCommand = + System.getProperty("pgsql.pgconfig", "pg_config"); + + ProcessBuilder processBuilder = + new ProcessBuilder(pgConfigCommand, pgConfigArgument); + processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT); + Process process = processBuilder.start(); + process.getOutputStream().close(); + byte[] bytes = process.getInputStream().readAllBytes(); + + int exitCode = process.waitFor(); + if (exitCode != 0) + throw new InterruptedException("pg_config process failed and " + + "exited with " + exitCode); + String pgConfigOutput = defaultCharsetDecodeStrict(bytes); + return pgConfigOutput.substring(0, + pgConfigOutput.length() - System.lineSeparator().length()); + } + + /** + * Reports the detailed {@code PG_VERSION_STR} for the PostgreSQL version + * found to build against. + *

      + * This should be found as a C string literal after + * {@code #define PG_VERSION_STR} in + * includedir_server/{@code pg_config.h}. + *

      + * If the value can be found, it is logged at {@code info} level. Otherwise, + * the exception(s) responsible will be logged at {@code debug} level. + * @param includedir_server pass the result of a previous + * {@code getPgConfigProperty(..., "--includedir_server")} + */ + public void reportPostgreSQLVersion(String includedir_server) + { + Path pg_config_h = Paths.get(includedir_server, "pg_config.h"); + try + { + log.info( + defaultCharsetDecodeStrict(Files.readAllBytes(pg_config_h)) + .replaceFirst( + "(?ms).*^#define\\s++PG_VERSION_STR\\s++(?-s:(.++))$.*+", + "Found $1") + ); + } + catch ( IOException | IndexOutOfBoundsException e ) + { + log.debug( + "in reportPostgreSQLVersion: " + + iterate(e, Objects::nonNull, Throwable::getCause) + .map(Object::toString).collect(joining("\nCaused by: ")) + ); + } + } + + /** + * Sets the value of a property for the current project. + * + * @param property key to use for property + * @param value the value of property to set + */ + public void setProjectProperty (String property, String value) + { + project.getProperties().setProperty(property, value); + } + + /** + * Returns a ProcessBuilder with suitable defaults and arguments added + * by the supplied consumer. + * + * @param consumer function which adds arguments to the ProcessBuilder + * @return ProcessBuilder with input arguments and suitable defaults + */ + public ProcessBuilder processBuilder(Consumer> consumer) + { + ProcessBuilder processBuilder = new ProcessBuilder(); + consumer.accept(processBuilder.command()); + processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT); + processBuilder.redirectOutput(ProcessBuilder.Redirect.INHERIT); + processBuilder.directory(new File(project.getBuild().getDirectory(), + "pljava-pgxs")); + return processBuilder; + } + + /** + * Executes a ProcessBuilder and returns the exit code of the process. + * + * @param processBuilder to execute + * @return exit code of the executed process or -1 if an exception occurs + * during execution + */ + public int runCommand(ProcessBuilder processBuilder) + { + Path outputDirectoryPath = processBuilder.directory().toPath(); + try + { + if (!Files.exists(outputDirectoryPath)) + Files.createDirectories(outputDirectoryPath); + Process process = processBuilder.start(); + return process.waitFor(); + } catch (Exception e) { + log.error(e); + } + return -1; + } + + /** + * Returns true if the profile with given name exists and is active, false + * otherwise. + *

      + * A warning is logged if no profile with the input name exists in the + * current project. + * + * @param profileName name of profile to check + * @return true if profile exists and is active, false otherwise + */ + public boolean isProfileActive(String profileName) + { + boolean isValidProfile = + project.getModel().getProfiles().stream() + .anyMatch(profile -> profile.getId().equals(profileName)); + + if (!isValidProfile) + { + log.warn(profileName + " does not exist in " + project.getName()); + return false; + } + + return project.getActiveProfiles().stream() + .anyMatch(profile -> profile.getId().equals(profileName)); + } + + /** + * Returns a two-element map with with {@code classpath} and + * {@code modulepath} as keys and their joined string paths as the + * respective values. + *

      + * For each supplied element, + * {@link #shouldPlaceOnModulepath shouldPlaceOnModulepath} is used to + * determine which path the element is added to. + * + * @param elements list of elements to build classpath and modulepath from + * @return a map containing the {@code classpath} and {@code modulepath} + * as separate elements + */ + public Map buildPaths(List elements) + { + List modulepathElements = new ArrayList<>(); + List classpathElements = new ArrayList<>(); + String pathSeparator = System.getProperty("path.separator"); + try + { + for (String element : elements) + { + if (element.contains(pathSeparator)) + log.warn(String.format("cannot add %s to path because " + + "it contains path separator %s", + element, pathSeparator)); + else if (shouldPlaceOnModulepath(element)) + modulepathElements.add(element); + else + classpathElements.add(element); + } + } + catch (Exception e) + { + log.error(e); + } + String modulepath = String.join(pathSeparator, modulepathElements); + String classpath = String.join(pathSeparator, classpathElements); + return Map.of("classpath", classpath, "modulepath", modulepath); + } + + /** + * Returns true if the element should be placed on the module path. + *

      + * An file path element should be placed on the module path if it points to + *

        + *
      1. a directory with a top level {@code module-info.class} file + *
      2. a {@code JAR} file having a {@code module-info.class} entry or the + * {@code Automatic-Module-Name} as a manifest attribute + *
      + * + * @param filePath the filepath to check + * @return true if input path should go on modulepath, false otherwise + * @throws IOException any thrown by the underlying file operations + */ + public boolean shouldPlaceOnModulepath(String filePath) + throws IOException + { + Path path = Paths.get(filePath); + if (Files.isDirectory(path)) + { + Path moduleInfoFile = path.resolve("module-info.class"); + return Files.exists(moduleInfoFile); + } + + if (path.getFileName().toString().endsWith(".jar")) + { + try(JarFile jarFile = new JarFile(path.toFile())) + { + if (jarFile.getEntry("module-info.class") != null) + return true; + Manifest manifest = jarFile.getManifest(); + if (manifest == null) + return false; + return manifest.getMainAttributes() + .containsKey("Automatic-Module-Name"); + } + } + return false; + } + + /** + * Returns a list of files with given extension in and below + * the input directory. + * + * @param sourceDirectory root of the tree of files to list + * @param extension to filter files to be selected + * @return list of strings of absolute paths of files + */ + public List getFilesWithExtension(Path sourceDirectory, + String extension) + { + try + { + return Files + .walk(sourceDirectory) + .filter(Files::isRegularFile) + .map(Path::toAbsolutePath) + .map(Path::toString) + .filter(path -> path.endsWith(extension)) + .collect(java.util.stream.Collectors.toList()); + } catch (Exception e) { + log.error(e); + } + return null; + } + + /* + * This method is duplicated in pljava-packaging/Node.java. If making + * changes to this method, review the other occurrence also and replicate + * the changes there if desirable. + */ + /** + * Adjust the command arguments of a {@code ProcessBuilder} so that they + * will be recovered correctly on Windows by a target C/C++ program using + * the argument parsing algorithm of the usual C run-time code, when it is + * known that the command will not be handled first by {@code cmd}. + *

      + * This transformation must account for the way the Windows C runtime will + * ultimately parse the parameters apart, and also for the behavior of + * Java's runtime in assembling the command line that the invoked process + * will receive. + * @param pb a ProcessBuilder whose command has been set to an executable + * that parses parameters using the C runtime rules, and arguments as they + * should result from parsing. + * @return The same ProcessBuilder, with the argument list rewritten as + * necessary to produce the original list as a result of Windows C runtime + * parsing. + * @throws IllegalArgumentException if the ProcessBuilder does not have at + * least the first command element (the executable to run) + * @throws UnsupportedOperationException if the arguments passed, or system + * properties in effect, produce a case this transformation cannot handle + */ + public ProcessBuilder forWindowsCRuntime(ProcessBuilder pb) + { + ListIterator args = pb.command().listIterator(); + if ( ! args.hasNext() ) + throw new IllegalArgumentException( + "ProcessBuilder command must not be empty"); + + /* + * The transformation implemented here must reflect the parsing rules + * of the C run-time code, and the rules are taken from: + * http://www.daviddeley.com/autohotkey/parameters/parameters.htm#WINARGV + * + * It must also take careful account of what the Java runtime does to + * the arguments before the target process is launched, and line numbers + * in comments below refer to this version of the source: + * http://hg.openjdk.java.net/jdk9/jdk9/jdk/file/65464a307408/src/java.base/windows/classes/java/lang/ProcessImpl.java + * + * 1. Throw Unsupported if the jdk.lang.Process.allowAmbiguousCommands + * system property is in force. + * + * Why? + * a. It is never allowed under a SecurityManager, so to allow it + * at all would allow code's behavior to change depending on + * whether a SecurityManager is in place. + * b. It results in a different approach to preparing the arguments + * (line 364) that would have to be separately analyzed. + * + * Do not test this property with Boolean.getBoolean: that returns true + * only if the value equalsIgnoreCase("true"), which does not match the + * test in the Java runtime (line 362). + */ + String propVal = getProperty("jdk.lang.Process.allowAmbiguousCommands"); + if ( null != propVal && ! "false".equalsIgnoreCase(propVal) ) + throw new UnsupportedOperationException( + "forWindowsCRuntime transformation does not support operation" + + " with jdk.lang.Process.allowAmbiguousCommands in effect"); + + /* + * 2. Throw Unsupported if the executable path name contains a " + * + * Why? Because getExecutablePath passes true, unconditionally, to + * isQuoted (line 303), so it will throw IllegalArgumentException if + * there is any " in the executable path. The catch block for that + * exception (line 383) will make a highly non-correctness-preserving + * attempt to join and reparse the arguments, using + * getTokensFromCommand (line 198), which uses a regexp (line 188) + * that does not even remotely resemble the C runtime parsing rules. + * + * Possible future work: this case could be handled by rewriting the + * entire command as an invocation via CMD or another shell. + */ + String executable = args.next(); + if ( executable.contains("\"") ) + throw new UnsupportedOperationException( + "forWindowsCRuntime does not support invoking an executable" + + " whose name contains a \" character"); + + /* + * 3. Throw Unsupported if the executable path ends in .cmd or .bat + * (case-insensitively). + * + * Why? For those extensions, the Java runtime will select different + * rules (line 414). + * a. Those rules would need to be separately analyzed. + * b. They will reject (line 286) any argument that contains a " + * + * Possible future work: this case could be handled by rewriting the + * entire command as an invocation via CMD or another shell (which is + * exactly the suggestion in the exception message that would be + * produced if an argument contains a "). + */ + if ( executable.matches(".*\\.(?i:cmd|bat)$") ) + throw new UnsupportedOperationException( + "forWindowsCRuntime does not support invoking a command" + + " whose name ends in .cmd or .bat"); + + /* + * 4. There is a worrisome condition in the Java needsEscaping check + * (line 277), where it would conclude that escaping is NOT needed + * if an argument both starts and ends with a " character. In other + * words, it would treat that case (and just that case) not as + * characters that are part of the content and need to be escaped, + * but as a sign that its job has somehow already been done. + * + * However, that will not affect this transformation, because our + * rule 5 below will ensure that any leading " has a \ added before, + * and therefore the questionable Java code will never see from us + * an arg that both starts and ends with a ". + * + * There is one edge case where this behavior of the Java runtime + * will be relied on (see rule 7 below). + */ + + while ( args.hasNext() ) + { + String arg = args.next(); + + /* + * 5. While the Java runtime code will add " at both ends of the + * argument IF the argument contains space, tab, <, or >, it does + * so with zero attention to any existing " characters in the + * content of the argument. Those must, of course, be escaped so + * the C runtime parser will not see them as ending the quoted + * region. By those rules, a " is escaped by a \ and a \ is only + * special if it is followed by a " (or in a sequence of \ + * ultimately leading to a "). The needed transformation is to + * find any instance of n backslashes (n may be zero) followed + * by a ", and replace that match with 2n+1 \ followed by the ". + * + * This transformation is needed whether or not the Java runtime + * will be adding " at start and end. If it does not, the same + * \ escaping is needed so the C runtime will not see a " as + * beginning a quoted region. + */ + String transformed = arg.replaceAll("(\\\\*+)(\")", "$1$1\\\\$2"); + + /* + * 6. Only if the Java runtime will be adding " at start and end + * (i.e., only if the arg contains space, tab, <, or >), there is + * one more case where \ can be special: at the very end of the + * arg (where it will end up followed by a " when the Java + * runtime has done its thing). The Java runtime is semi-aware of + * this case (line 244): it will add a single \ if it sees that + * the arg ends with a \. However, that isn't the needed action, + * which is to double ALL consecutive \ characters ending the + * arg. + * + * So the action needed here is to double all-but-one of any + * consecutive \ characters at the end of the arg, leaving one + * that will be doubled by the Java code. + */ + if ( transformed.matches("(?s:[^ \\t<>]*+.++)") ) + transformed = transformed.replaceFirst( + "(\\\\)(\\\\*+)$", "$1$2$2"); + + /* + * 7. If the argument is the empty string, it must be represented + * as "" or it will simply disappear. The Java runtime will not + * do that for us (after all, the empty string does not contain + * space, tab, <, or >), so it has to be done here, replacing the + * arg with exactly "". + * + * This is the one case where we produce a value that both starts + * and ends with a " character, thereby triggering the Java + * runtime behavior described in (4) above, so the Java runtime + * will avoid trying to further "protect" the string we have + * produced here. For this one case, that 'worrisome' behavior is + * just what we want. + */ + if ( transformed.isEmpty() ) + transformed = "\"\""; + + if ( ! transformed.equals(arg) ) + args.set(transformed); + } + + return pb; + } + +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/RelativizingFileManager.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/RelativizingFileManager.java new file mode 100644 index 00000000..85756a47 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/RelativizingFileManager.java @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +package org.postgresql.pljava.pgxs; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Writer; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; + +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; + +import java.nio.file.Path; + +import java.util.Collection; +import java.util.Iterator; + +import java.util.regex.Pattern; + +import java.util.stream.Collectors; +import java.util.stream.Stream; +import static java.util.stream.StreamSupport.stream; + +import javax.tools.DocumentationTool; // mentioned in javadoc +import javax.tools.FileObject; +import javax.tools.ForwardingJavaFileManager; +import javax.tools.ForwardingJavaFileObject; +import javax.tools.JavaFileManager; +import javax.tools.JavaFileObject; +import static javax.tools.JavaFileObject.Kind.HTML; +import javax.tools.StandardJavaFileManager; + +/** + * A {@link ForwardingJavaFileManager} that interposes when asked for an output + * file of type {@code HTML}, and rewrites {@code href} URLs that contain + * {@code RELDOTS} as a component. + *

      Purpose

      + *

      + * This file manager is intended for use with the {@link DocumentationTool} + * when {@code -linkoffline} is used to generate links between subprojects + * (for example, {@code pljava-examples} to {@code pljava-api}). Maven's + * {@code site:stage} will copy the generated per-subproject documentation trees + * into a single {@code staging} directory, which can be relocated, deployed to + * web servers, etc. Therefore, it is both reasonable and desirable for the + * subproject API docs to refer to each other by relative links. However, the + * documentation for {@code -linkoffline} states that the relative links should + * be given as if from the output destination ({@code -d}) directory. That + * implies that the tool will add the right number of {@code ../} components, + * when generating links in a file some levels below the {@code -d} directory, + * so that the resulting relative URL will be correct. And it doesn't. The tool + * simply doesn't. + *

      + * As a workaround, the {@code -linkoffline} option can be told to produce URLs + * that contain {@code RELDOTS}, for example, + * {@code ../../RELDOTS/pljava-api/apidocs}, and this file manager can be used + * when running the tool. As the HTML files are written, any {@code href} URL + * that begins with zero or more {@code ../} followed by {@code RELDOTS} will + * have the {@code RELDOTS} replaced with the right number of {@code ../} to + * ascend from that file's containing directory to the output destination + * directory, resulting in relative URLs that are correct in files at any depth + * in the API docs tree. + *

      + * An alert reader will notice that {@code RELDOTS} is expanded to exactly what + * {@code {@docRoot}} is supposed to expand to. But experiment showed that + * {@code {@docRoot}} does not get expanded in a {@code -linkoffline} URL. + *

      Limitations

      + * The postprocessing is done blindly to any rules of HTML syntax. It will + * simply replace {@code RELDOTS} in any substring of the content resembling + * href="../RELDOTS/ (with any number, zero or more, of + * {@code ../} before the {@code RELDOTS}). The example in the preceding + * sentence was written carefully to avoid being rewritten in this comment. + *

      + * Only the form with a double quote is recognized, as the javadoc tool does not + * appear to generate the single-quoted form. + */ +public class RelativizingFileManager +extends ForwardingJavaFileManager +implements StandardJavaFileManager +{ + private final Charset outputEncoding; + + /** + * Construct a {@code RelativizingFileManager}, given the underlying file + * manager from {@link DocumentationTool#getStandardFileManager}, and the + * output encoding to be used. + *

      + * The javadoc tool requests {@link OutputStream}s for its output files, and + * supplies content already encoded, so the encoding is needed in order to + * decode them here for simple processing (as {@code java.util.regex} does + * not offer byte-domain flavors of patterns and matchers), then re-encode + * the result. + *

      + * The file manager constructed here must still be configured by passing + * the necessary subset of the desired javadoc options to + * {@link #handleFirstOptions handleFirstOptions}. + * @param fileManager the original file manager to be wrapped by this one + * @param outputEncoding the encoding that the caller will be using when + * writing bytes to an output file from this manager + */ + public RelativizingFileManager( + StandardJavaFileManager fileManager, + Charset outputEncoding) + { + super(fileManager); + this.outputEncoding = outputEncoding; + } + + static final Pattern toReplace = Pattern.compile( + "(\\shref=\"(?:\\.\\./)*+)RELDOTS/"); + + /** + * Overridden to return the superclass result unchanged unless the requested + * file is of kind {@code HTML}, and in that case to return a file object + * that will interpose on the {@code OutputStream} and apply the rewriting. + */ + @Override + public FileObject getFileForOutput( + Location location /* location */, + String packageName, + String relativePath, + FileObject sibling) + throws IOException + { + FileObject fo = fileManager.getFileForOutput( + location, packageName, relativePath, sibling); + if ( ! (fo instanceof JavaFileObject) ) + return fo; + JavaFileObject jfo = (JavaFileObject)fo; + if ( ! (HTML == jfo.getKind()) ) + return fo; + + Path fp = asPath(fo); + Path r = + stream(getLocationAsPaths(location).spliterator(), false) + .filter(p -> fp.startsWith(p)).findAny().get(); + + int depth = r.relativize(fp).getNameCount() - 1; // -1 for file name + + if ( location.isModuleOrientedLocation() ) + ++ depth; + + final String dots = Stream.generate(() -> "../").limit(depth) + .collect(Collectors.joining()); + + return new ForwardingJavaFileObject<>(jfo) + { + @Override + public OutputStream openOutputStream() throws IOException + { + final OutputStream os = fileObject.openOutputStream(); + + return new ByteArrayOutputStream() + { + private boolean closed = false; + + @Override + public void close() throws IOException + { + if ( closed ) + return; + closed = true; + super.close(); + + try (os; Writer w = + new OutputStreamWriter(os, + outputEncoding.newEncoder())) + { + ByteBuffer bb = ByteBuffer.wrap(buf, 0, count); + CharBuffer cb = + outputEncoding.newDecoder().decode(bb); + String fixed = toReplace.matcher(cb).replaceAll( + "$1" + dots); + w.append(fixed); + } + } + }; + } + }; + } + + /** + * Call {@link #handleOption handleOption} on as many of the first supplied + * options as the file manager recognizes. + *

      + * Returns when {@link #handleOption handleOption} first returns false, + * indicating an option the file manager does not recognize. + *

      + * As the options recognized by the standard file manager are generally + * those among the "Standard Options" that javadoc inherits from javac + * (including the various location-setting options such as + * {@code -classpath}, as well as {@code -encoding}), with a little care to + * place those first in the argument list to be passed to the tool itself, + * the same list can be passed to this method to configure the file manager, + * without any more complicated option recognition needed here. + * @param firstOptions an Iterable of options, where those recognized by a + * file manager must be first + */ + public void handleFirstOptions(Iterable firstOptions) + { + Iterator it = firstOptions.iterator(); + + while ( it.hasNext() ) + if ( ! handleOption(it.next(), it) ) + break; + } + + /* + * The file manager supplied by the tool is an instance of + * StandardJavaFileManager. There is no forwarding version of that, so we + * must extend ForwardingJavaFileManager and then supply forwarding versions + * of all methods added in StandardJavaFileManager. Those boilerplate + * forwarding methods follow. + */ + + @Override + public Iterable + getJavaFileObjectsFromFiles(Iterable files) + { + return fileManager.getJavaFileObjectsFromFiles(files); + } + + // @Override only when support horizon advances to >= Java 13 + public Iterable + getJavaFileObjectsFromPaths(Collection paths) + { + return fileManager.getJavaFileObjectsFromPaths(paths); + } + + @Override + public Iterable + getJavaFileObjectsFromPaths(Iterable paths) + { + return fileManager.getJavaFileObjectsFromPaths(paths); + } + + @Override + public Iterable + getJavaFileObjects(File... files) + { + return fileManager.getJavaFileObjects(files); + } + + @Override + public Iterable + getJavaFileObjects(Path... paths) + { + return fileManager.getJavaFileObjects(paths); + } + + @Override + public Iterable + getJavaFileObjectsFromStrings(Iterable names) + { + return fileManager.getJavaFileObjectsFromStrings(names); + } + + @Override + public Iterable + getJavaFileObjects(String... names) + { + return fileManager.getJavaFileObjects(names); + } + + @Override + public void setLocation(Location location, Iterable files) + throws IOException + { + fileManager.setLocation(location, files); + } + + @Override + public void setLocationFromPaths( + Location location, + Collection paths) + throws IOException + { + fileManager.setLocationFromPaths(location, paths); + } + + @Override + public void setLocationForModule( + Location location, + String moduleName, + Collection paths) + throws IOException + { + fileManager.setLocationForModule(location, moduleName, paths); + } + + @Override + public Iterable getLocation(Location location) + { + return fileManager.getLocation(location); + } + + @Override + public Iterable getLocationAsPaths(Location location) + { + return fileManager.getLocationAsPaths(location); + } + + @Override + public Path asPath(FileObject file) + { + return fileManager.asPath(file); + } + + @Override + public void setPathFactory(PathFactory f) + { + fileManager.setPathFactory(f); + } +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ReportScript.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ReportScript.java new file mode 100644 index 00000000..8194920c --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ReportScript.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + */ +package org.postgresql.pljava.pgxs; + +import org.apache.maven.reporting.MavenReportException; + +import java.util.Locale; + +/** + * Provides reasonable defaults and other required methods for + * using JavaScript to during {@code Site} lifecycle phase to configure a + * {@code MavenReport}. + */ +public interface ReportScript +{ + /** + * @param report instance of {@link ReportScriptingMojo} + * @return whether the report is an external report + * @see ReportScriptingMojo#isExternalReport() + */ + default boolean isExternalReport(ReportScriptingMojo report) + { + return report.isExternalReportDefault(); + } + + /** + * @param report instance of {@link ReportScriptingMojo} + * @return category name of the report + * @see ReportScriptingMojo#getCategoryName() + */ + default String getCategoryName(ReportScriptingMojo report) + { + return report.getCategoryNameDefault(); + } + + /** + * @param report instance of {@link ReportScriptingMojo} + * @return whether the report can be generated + * @see ReportScriptingMojo#canGenerateReport() + */ + default boolean canGenerateReport(ReportScriptingMojo report) + { + return report.canGenerateReportDefault(); + } + + /** + * @param report instance of {@link ReportScriptingMojo} + * @return path of the report relative to target site directory + * @see ReportScriptingMojo#getCategoryName() + */ + String getOutputName (ReportScriptingMojo report); + + /** + * @param report instance of {@link ReportScriptingMojo} + * @param locale preferred locale for the name + * @return name of the report + * @see ReportScriptingMojo#getName(Locale) + */ + String getName (ReportScriptingMojo report, Locale locale); + + /** + * @param report instance of {@link ReportScriptingMojo} + * @param locale preferred locale for the description + * @return description of the report + * @see ReportScriptingMojo#getDescription(Locale) + */ + String getDescription (ReportScriptingMojo report, Locale locale); + + /** + * @param report instance of {@link ReportScriptingMojo} + * @param locale Locale to use for any locale-sensitive content in + * the report + * @return null if execution completed successfully, Exception that occurred + * during execution otherwise + * @see ReportScriptingMojo#executeReport(Locale) + */ + MavenReportException executeReport(ReportScriptingMojo report, Locale locale); +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ReportScriptingMojo.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ReportScriptingMojo.java new file mode 100644 index 00000000..4990daa1 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ReportScriptingMojo.java @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2020-2024 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + */ +package org.postgresql.pljava.pgxs; + +import org.apache.maven.doxia.sink.Sink; +import org.apache.maven.plugins.annotations.Execute; +import org.apache.maven.plugins.annotations.LifecyclePhase; +import org.apache.maven.plugins.annotations.Mojo; +import org.apache.maven.plugins.annotations.Parameter; +import org.apache.maven.project.MavenProject; +import org.apache.maven.reporting.AbstractMavenReport; +import org.apache.maven.reporting.MavenReportException; +import org.codehaus.plexus.configuration.PlexusConfiguration; + +import javax.script.Invocable; +import javax.script.ScriptEngine; +import java.util.Locale; + +/** + * Maven plugin goal to use JavaScript (or another JSR 223 script engine) + * for configuring + * {@link org.apache.maven.reporting.MavenReport} during the + * {@link LifecyclePhase#SITE}. + *

      + * This plugin goal intends to allow the use of scripting in the {@code SITE} + * lifecycle phase with the help of {@link ReportScript}. The motivation behind + * this is the inability to use Maven AntRun in the {@code SITE} phase. + */ +@Mojo(name = "scripted-report") +@Execute(phase = LifecyclePhase.NONE) +public class ReportScriptingMojo extends AbstractMavenReport +{ + /** + * The script to be used to produce the report, in the scripting language + * identified by its {@code mimetype} or {@code engine} attribute. + *

      + * The scripting language must be supported by an engine that implements + * {@link Invocable}, and the script, when evaluated, must define functions + * that correspond to all of the abstract methods of {@link ReportScript}, + * and any of the default methods that it wishes to override. + */ + @Parameter + public PlexusConfiguration script; + + private ReportScript reportScript; + + private PGXSUtils utils; + + /** + * Creates an instance of {@link ReportScript} using methods defined in + * the JavaScript snippet in configuration of the report in {@code pom.xml}. + * Does nothing if the instance is already initialized. + */ + private void setReportScript() + { + if ( null != reportScript ) + return; + + try + { + utils = new PGXSUtils(project, getLog()); + ScriptEngine engine = utils.getScriptEngine(script); + String scriptText = script.getValue(); + engine.eval(scriptText); + reportScript = ((Invocable)engine).getInterface(ReportScript.class); + } + catch (Exception e) + { + getLog().error(e); + } + } + + /** + * Queries the script for the report output path relative to the target site + * directory. + *

      + * This value will be used by {@code Maven} to provide a link to the report + * from {@code index.html}. + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes + * {@code getOutputName(report)} defined by the script snippet + * associated with the report. No default implementation is provided; the + * script must implement this method. + */ + @Override + public String getOutputName () + { + setReportScript(); + return reportScript.getOutputName(this); + } + + /** + * Queries the script to return false if this report will produce output + * through a supplied {@link Sink}, or true if it is 'external', producing + * its output some other way. + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes + * {@code isExternalReport(report)} if defined in the script + * snippet associated with the report. Otherwise, the implementation + * inherited by this class is effectively invoked. + */ + @Override + public boolean isExternalReport () + { + setReportScript(); + return reportScript.isExternalReport(this); + } + + /** + * Queries the script for the name of this report to be used + * by {@code Maven} for display in {@code index.html}. + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes + * {@code getName(report, locale)} defined by the script + * snippet associated with the report. No default implementation is + * provided; the script must implement this method. + */ + @Override + public String getName (Locale locale) + { + setReportScript(); + return reportScript.getName(this, locale); + } + + /** + * Queries the script for the description of this report, to be used + * by {@code Maven} for display in {@code index.html}. + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes + * {@code getDescription(report, locale)} defined in the script + * snippet associated with the report. No default implementation is + * provided; the script must implement this method. + */ + @Override + public String getDescription (Locale locale) + { + setReportScript(); + return reportScript.getDescription(this, locale); + } + + /** + * Queries the script for the category name of this report, used + * by {@code Maven} to place the report under the correct heading + * in {@code index.html}. + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes + * {@code getCategoryName(report)} if defined by the script + * snippet associated with the report. Otherwise, the implementation + * inherited by this class is effectively invoked. + */ + @Override + public String getCategoryName () + { + setReportScript(); + return reportScript.getCategoryName(this); + } + + /** + * Queries the script as to whether this report can be generated. + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes + * {@code canGenerateReport(report)} if defined by the script + * snippet. Otherwise, the implementation inherited by this class is + * effectively invoked. + */ + @Override + public boolean canGenerateReport () + { + setReportScript(); + return reportScript.canGenerateReport(this); + } + + /** + * {@inheritDoc} + *

      + * Calls {@code setReportScript} to ensure that the instance of + * {@link ReportScript} is available. Invokes its + * {@code executeReport(report, locale)}, passing this instance and + * the supplied locale. + */ + @Override + protected void executeReport (Locale locale) throws MavenReportException + { + setReportScript(); + MavenReportException exception = reportScript.executeReport(this, locale); + if (exception != null) + throw exception; + } + + /** + * {@inheritDoc} + */ + @Override + public MavenProject getProject () + { + return super.getProject(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getInputEncoding () + { + return super.getInputEncoding(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getOutputEncoding () + { + return super.getOutputEncoding(); + } + + /** + * Default implementation for + * {@link ReportScript#isExternalReport(ReportScriptingMojo)}. Invoked if + * {@code isExternalReport(report)} is not defined in the script + * snippet associated with the report. + */ + boolean isExternalReportDefault () + { + return super.isExternalReport(); + } + + /** + * Default implementation of + * {@link ReportScript#getCategoryName(ReportScriptingMojo)}. Invoked if + * {@code getCategoryName(report)} is not defined in the script + * snippet associated with the report. + */ + String getCategoryNameDefault () + { + return super.getCategoryName(); + } + + /** + * Default implementation of + * {@link ReportScript#canGenerateReport(ReportScriptingMojo)}. Invoked if + * {@code canGenerateReport(report)} is not defined in the script + * snippet associated with the report. + */ + boolean canGenerateReportDefault () + { + return super.canGenerateReport(); + } + + /** + * Wraps the input object in a {@link MavenReportException}. + * + * The exception returned is constructed as follows: + *

        + *
      • If {@code object} is null, the exception message indicates the same. + *
      • If {@code object} is already a {@link MavenReportException}, it is + * returned as is. + *
      • If {@code object} is any other {@link Throwable}, it is used as + * the wrapping exception's cause. + *
      • If {@code object} is a {@link String}, it is used as + * the wrapping exception's message. + *
      • If it is any other object, the wrapping exception's message is set in + * this format: Class mame of object: String representation of object. + *
      + * + * @param object to wrap in MavenReportException + * @return object wrapped inside a {@link MavenReportException} + */ + public MavenReportException exceptionWrap(Object object) + { + if (object == null) + return new MavenReportException("Script threw a null value"); + else if (object instanceof MavenReportException) + return (MavenReportException) object; + else if (object instanceof Throwable) + { + Throwable t = (Throwable) object; + MavenReportException exception = + new MavenReportException(t.getMessage()); + exception.initCause(t); + return exception; + } + else if (object instanceof String) + return new MavenReportException((String) object); + else + return new MavenReportException(object.getClass().getCanonicalName() + + ": " + object.toString()); + } +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ScriptEngineLoader.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ScriptEngineLoader.java new file mode 100644 index 00000000..0cd497c9 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ScriptEngineLoader.java @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.pgxs; + +import java.io.IOException; +import java.net.URL; +import java.util.Enumeration; + +/** + * A {@code ClassLoader} with (effectively) two parents, the inherited one + * and Java's platform class loader. + *

      + * This loader will be given to the {@code ScriptEngineManager}. The + * inherited loader supplied by Maven does not have Java's platform + * class loader as its parent (or ancestor), which leaves Java's + * {@code ServiceLoader} mechanism unable to find Nashorn's script engine. + * Therefore, this loader will declare the Java platform class loader + * as its actual parent, and search the Maven-supplied class loader for + * whatever the platform class loader does not find. + *

      + * This could pose a risk of class version conflicts if the Maven-supplied + * loader has defined classes that are also known to the platform loader. + * It would be safer to delegate to Maven's loader first and the parent as + * fallback. That would require overriding more of {@code ClassLoader}'s + * default functionality, though. With any luck, the targeted use of this + * loader only with the {@code ScriptEngineManager} will minimize the risk, + * already low because it would be odd to override classes of the Java + * platform itself. + */ +class ScriptEngineLoader extends ClassLoader +{ + private final ClassLoader mavenLoader; + + ScriptEngineLoader(ClassLoader mavenLoader) + { + super("pgxsScriptLoader", ClassLoader.getPlatformClassLoader()); + this.mavenLoader = mavenLoader; + } + + /** + * Delegate to the Maven-supplied loader. + *

      + * This is called by the {@code super} implementation of + * {@code loadClass} only after the parent loader has drawn a blank, + * so there is nothing left to do but see if the Maven-supplied loader + * has the class. + */ + @Override + protected Class findClass(String name) throws ClassNotFoundException + { + Class rslt = mavenLoader.loadClass(name); + return rslt; + } + + /** + * Delegate to the Maven-supplied loader for finding a resource. + *

      + * This is called by the {@code super} implementation of + * {@code getResource} only after the parent loader has drawn a blank, + * so there is nothing left to do but see if the Maven-supplied loader + * has the resource. + */ + @Override + protected URL findResource(String name) + { + URL rslt = mavenLoader.getResource(name); + return rslt; + } + + /** + * Delegate to the Maven-supplied loader for finding a resource. + *

      + * This is called by the {@code super} implementation of + * {@code getResources} after enumerating the resources available from + * the parent loader. This method needs only to return the resources + * available from the Maven-supplied loader; the caller will combine the + * two enumerations. + */ + @Override + protected Enumeration findResources(String name) throws IOException + { + Enumeration rslt = mavenLoader.getResources(name); + return rslt; + } +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ScriptingMojo.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ScriptingMojo.java new file mode 100644 index 00000000..489f679b --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/ScriptingMojo.java @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2020-2024 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + */ +package org.postgresql.pljava.pgxs; + +import org.apache.maven.execution.MavenSession; +import org.apache.maven.plugin.AbstractMojo; +import org.apache.maven.plugin.AbstractMojoExecutionException; +import org.apache.maven.plugin.MojoExecutionException; +import org.apache.maven.plugin.MojoFailureException; +import org.apache.maven.plugins.annotations.LifecyclePhase; +import org.apache.maven.plugins.annotations.Mojo; +import org.apache.maven.plugins.annotations.Parameter; +import org.apache.maven.plugins.annotations.ResolutionScope; +import org.apache.maven.project.MavenProject; +import org.codehaus.plexus.configuration.PlexusConfiguration; + +import javax.script.Invocable; +import javax.script.ScriptEngine; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static javax.script.ScriptContext.ENGINE_SCOPE; + +/** + * Maven plugin goal to use JavaScript (or another JSR 223 script engine) + * during any of build lifecycle phases. + *

      + * The Mojo provides a limited subset of the functionality of the Maven AntRun + * Plugin. This is intentional to simplify usage, as this Maven plugin is + * specifically targeted at building PL/Java native code. + */ +@Mojo(name = "scripted-goal", defaultPhase = LifecyclePhase.COMPILE, + requiresDependencyResolution = ResolutionScope.TEST) +public class ScriptingMojo extends AbstractMojo +{ + @Parameter(defaultValue = "${project}", readonly = true) + private MavenProject project; + + @Parameter(defaultValue = "${session}", readonly = true) + private MavenSession session; + + + @Parameter + private PlexusConfiguration script; + + private PGXSUtils utils; + + /** + * Executes the script code inside the {@code script} tag in the plugin + * configuration. + *

      + * Uses {@link PGXSUtils#getScriptEngine PGXSUtils.getScriptEngine} + * to instantiate the engine, and then makes these items available in + * the engine's scope (in addition to those placed there by + * {@link PGXSUtils#getScriptEngine getScriptEngine} itself): + *

      + *
      session
      The Maven session object + *
      plugin
      This object + *
      + */ + @Override + public void execute () throws MojoExecutionException, MojoFailureException + { + try + { + utils = new PGXSUtils(project, getLog()); + String scriptText = script.getValue(); + ScriptEngine engine = utils.getScriptEngine(script); + + engine.getContext().setAttribute("session", session, ENGINE_SCOPE); + engine.getContext().setAttribute("plugin", this, ENGINE_SCOPE); + + engine.eval(scriptText); + + GoalScript goal = + ((Invocable) engine).getInterface(GoalScript.class); + + AbstractMojoExecutionException exception = goal.execute(); + if (exception != null) + throw exception; + } + catch (MojoFailureException | MojoExecutionException e) + { + throw e; + } + catch (Exception e) + { + throw (MojoExecutionException) exceptionWrap(e, true); + } + } + + /** + * Wraps the input object in an {@link AbstractMojoExecutionException}. + * + * The returned exception is constructed as follows: + *
        + *
      • If {@code object} is null, then {@link MojoExecutionException} is + * used to wrap and the message indicates that a null value was thrown + * by the script. + *
      • If {@code object} is already a {@link MojoExecutionException}, it is + * returned as is. + *
      • If {@code object} is already a {@link MojoFailureException}, it is + * returned as is. + *
      • For the steps below, the wrapping exception is chosen according to + * the value of the {@code scriptFailure} parameter. + *
      • If {@code object} is any other {@link Throwable}, set it as + * the wrapping exception's cause. + *
      • If {@code object} is a {@link String}, set it as the wrapping + * exception's message. + *
      • For any other object, the message of the exception is set in + * this format: Class name of object: String representation of object. + *
      + * + * @param object an object to wrap in an AbstractMojoExecutionException + * @param scriptFailure if true, use a MojoExecutionException for wrapping, + * otherwise use MojoFailureException. This parameter + * is ignored if the object is null or an instance of + * MojoExecutionException or MojoFailureException + * @return object wrapped inside an {@link AbstractMojoExecutionException} + */ + public AbstractMojoExecutionException exceptionWrap(Object object, + boolean scriptFailure) + { + BiFunction + createException = scriptFailure ? MojoExecutionException::new : + MojoFailureException::new; + + AbstractMojoExecutionException exception; + if (object == null) + exception = new MojoExecutionException("Script threw a null value"); + else if (object instanceof MojoExecutionException) + exception = (MojoExecutionException) object; + else if (object instanceof MojoFailureException) + exception = (MojoFailureException) object; + else if (object instanceof Throwable) + { + Throwable t = (Throwable) object; + exception = createException.apply(t.getMessage(), t); + } + else if (object instanceof String) + exception = createException.apply((String) object, null); + else + { + String message = object.getClass().getCanonicalName() + ": " + + object.toString(); + exception = createException.apply(message, null); + } + return exception; + } + +} diff --git a/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/package-info.java b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/package-info.java new file mode 100644 index 00000000..68ca19d3 --- /dev/null +++ b/pljava-pgxs/src/main/java/org/postgresql/pljava/pgxs/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + * Kartik Ohri + */ +/** + * The PL/Java PGXS package provides the necessary maven plugin goals to build + * the PL/Java Native C code. It also provides a maven plugin goal for utilising + * JavaScript during maven SITE lifecycle phase. + */ +package org.postgresql.pljava.pgxs; \ No newline at end of file diff --git a/pljava-pgxs/src/site/markdown/index.md b/pljava-pgxs/src/site/markdown/index.md new file mode 100644 index 00000000..2a8639bd --- /dev/null +++ b/pljava-pgxs/src/site/markdown/index.md @@ -0,0 +1,8 @@ +## About PL/Java PGXS + +The `pljava-pgxs` subproject is a Maven plugin that builds the +native C code in `pljava-so` and allows using JavaScript for +configuring Maven Reports; this machine-generated page is a +project summary for developers of PL/Java. + +See the `pljava-pgxs` [plugin documentation](plugin-info.html). diff --git a/pljava-pgxs/src/test/java/AbstractPGXSMock.java b/pljava-pgxs/src/test/java/AbstractPGXSMock.java new file mode 100644 index 00000000..69c8ec11 --- /dev/null +++ b/pljava-pgxs/src/test/java/AbstractPGXSMock.java @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Kartik Ohri + */ +import org.postgresql.pljava.pgxs.AbstractPGXS; + +import java.nio.file.Path; +import java.util.List; +import java.util.Map; + +public class AbstractPGXSMock extends AbstractPGXS +{ + @Override + public int compile(String compiler, List files, Path targetPath, + List includes, Map defines, + List flags) + { + throw new UnsupportedOperationException(); + } + + @Override + public int link(String linker, List flags, List files, + Path targetPath) + { + throw new UnsupportedOperationException(); + } +} diff --git a/pljava-pgxs/src/test/java/PgConfigPropertyAsListTest.java b/pljava-pgxs/src/test/java/PgConfigPropertyAsListTest.java new file mode 100644 index 00000000..2455e08b --- /dev/null +++ b/pljava-pgxs/src/test/java/PgConfigPropertyAsListTest.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2020-2021 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Kartik Ohri + * Chapman Flack + */ + +import org.junit.Before; +import org.junit.Test; +import org.postgresql.pljava.pgxs.AbstractPGXS; + +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class PgConfigPropertyAsListTest { + + AbstractPGXS pgxs; + @Before + public void setup() { + pgxs = new AbstractPGXSMock(); + } + + @Test + public void testSimpleExample() + { + List actualResult = pgxs.getPgConfigPropertyAsList( + "foo 'bar' 'foo bar'"); + List expectedResult = List.of("foo", "bar", "foo bar"); + assertEquals(expectedResult, actualResult); + } + + @Test + public void testPracticalExample() + { + List actualResult = pgxs.getPgConfigPropertyAsList( + "-Wl,--as-needed -Wl,-rpath,'/usr/local/pgsql/lib',--enable-new-dtags"); + List expectedResult = List.of("-Wl,--as-needed", + "-Wl,-rpath,/usr/local/pgsql/lib,--enable-new-dtags"); + assertEquals(expectedResult, actualResult); + } + + @Test + public void testWhitespaceInQuotes() + { + List actualResult = pgxs.getPgConfigPropertyAsList( + "-Wl,--as-needed -Wl,-rpath,'/usr/local test/pgsql/lib',--enable-new-dtags"); + List expectedResult = List.of("-Wl,--as-needed", + "-Wl,-rpath,/usr/local test/pgsql/lib,--enable-new-dtags"); + assertEquals(expectedResult, actualResult); + } + + @Test + public void testMultipleSpaceSeparator() + { + List actualResult = pgxs.getPgConfigPropertyAsList( + "-Wl,--as-needed -Wl,-rpath,'/usr/local test/pgsql/lib',--enable-new-dtags"); + List expectedResult = List.of("-Wl,--as-needed", + "-Wl,-rpath,/usr/local test/pgsql/lib,--enable-new-dtags"); + assertEquals(expectedResult, actualResult); + } + +} diff --git a/pljava-so/aol.solaris-gcc.properties b/pljava-so/aol.solaris-gcc.properties deleted file mode 100644 index 03bbc7c0..00000000 --- a/pljava-so/aol.solaris-gcc.properties +++ /dev/null @@ -1,144 +0,0 @@ -## -# AOL properties for compilation on Solaris (sparc and x86, 32 and 64 bit) -# using the GNU tools. Supplied by Eugenie V. Lyzenko: -# http://lists.pgfoundry.org/pipermail/pljava-dev/2016/002508.html -# -# To use these definitions, add -Dnar.aolProperties=path/to/this/file -# on the mvn command line. -## - -# -# Solaris SPARC GNU compilation -# -sparc.SunOS.linker=g++ - -sparc.SunOS.gpp.cpp.compiler=g++ -sparc.SunOS.gpp.cpp.defines=SOLARIS2 -sparc.SunOS.gpp.cpp.options= -sparc.SunOS.gpp.cpp.includes=**/*.cc **/*.cpp **/*.cxx -sparc.SunOS.gpp.cpp.excludes= - -sparc.SunOS.gpp.c.compiler=gcc -sparc.SunOS.gpp.c.defines=SOLARIS2 -sparc.SunOS.gpp.c.options= -sparc.SunOS.gpp.c.includes=**/*.c -sparc.SunOS.gpp.c.excludes= - -sparc.SunOS.gpp.fortran.compiler=gfortran -sparc.SunOS.gpp.fortran.defines=SOLARIS2 -sparc.SunOS.gpp.fortran.options= -sparc.SunOS.gpp.fortran.includes=**/*.f **/*.for **/*.f90 -sparc.SunOS.gpp.fortran.excludes= - -sparc.SunOS.gpp.java.include=include;include/solaris -sparc.SunOS.gpp.java.runtimeDirectory=jre/lib/sparc/server - -sparc.SunOS.gpp.linker.systemLibs=pthread:shared - -sparc.SunOS.gpp.lib.prefix=lib -sparc.SunOS.gpp.shared.prefix=lib -sparc.SunOS.gpp.static.extension=a -sparc.SunOS.gpp.shared.extension=so* -sparc.SunOS.gpp.plugin.extension=so -sparc.SunOS.gpp.jni.extension=so -sparc.SunOS.gpp.executable.extension= - -# FIXME to be removed when NAR-6 -sparc.SunOS.gcc.static.extension=a -sparc.SunOS.gcc.shared.extension=so* -sparc.SunOS.gcc.plugin.extension=so -sparc.SunOS.gcc.jni.extension=so - -# -# Solaris SPARC 64-bit GNU compilation -# -sparcv9.SunOS.linker=g++ - -sparcv9.SunOS.gpp.cpp.compiler=g++ -sparcv9.SunOS.gpp.cpp.defines=SOLARIS2 GNU_GCC -sparcv9.SunOS.gpp.cpp.options=-Wall -Wno-long-long -Wpointer-arith -Wconversion -fPIC -m64 -sparcv9.SunOS.gpp.cpp.includes=**/*.cc **/*.cpp **/*.cxx -sparcv9.SunOS.gpp.cpp.excludes= - -sparcv9.SunOS.gpp.c.compiler=gcc -sparcv9.SunOS.gpp.c.defines=SOLARIS2 GNU_GCC -sparcv9.SunOS.gpp.c.options=-Wall -Wno-long-long -Wpointer-arith -Wconversion -fPIC -m64 -I/usr/sfw/include -sparcv9.SunOS.gpp.c.includes=**/*.c -sparcv9.SunOS.gpp.c.excludes= - -sparcv9.SunOS.gpp.java.include=include;include/solaris -sparcv9.SunOS.gpp.java.runtimeDirectory=jre/lib/sparcv9/server - -# options for gcc linker front end -sparcv9.SunOS.gpp.linker.options=-m64 -sparcv9.SunOS.gpp.linker.systemLibs=pthread:shared - -sparcv9.SunOS.gpp.lib.prefix=lib -sparcv9.SunOS.gpp.shared.prefix=lib -sparcv9.SunOS.gpp.static.extension=a -sparcv9.SunOS.gpp.shared.extension=so* -sparcv9.SunOS.gpp.plugin.extension=so -sparcv9.SunOS.gpp.jni.extension=so -sparcv9.SunOS.gpp.executable.extension= - -# FIXME to be removed when NAR-6 -sparcv9.SunOS.gcc.static.extension=a -sparcv9.SunOS.gcc.shared.extension=so* -sparcv9.SunOS.gcc.plugin.extension=so -sparcv9.SunOS.gcc.jni.extension=so - -# -# Solaris x86 32-bit GNU compilation -# -x86.SunOS.linker=g++ - -x86.SunOS.gpp.cpp.compiler=g++ -x86.SunOS.gpp.cpp.defines=SOLARIS2 GNU_GCC -x86.SunOS.gpp.cpp.options=-Wall -Wno-long-long -Wpointer-arith -Wconversion -x86.SunOS.gpp.cpp.includes=**/*.cc **/*.cpp **/*.cxx -x86.SunOS.gpp.cpp.excludes= - -x86.SunOS.gpp.c.compiler=gcc -x86.SunOS.gpp.c.defines=SOLARIS2 GNU_GCC -x86.SunOS.gpp.c.options=-Wall -Wno-long-long -Wpointer-arith -Wconversion -I/usr/sfw/include -x86.SunOS.gpp.c.includes=**/*.c -x86.SunOS.gpp.c.excludes= - -x86.SunOS.gpp.lib.prefix=lib -x86.SunOS.gpp.shared.prefix=lib -x86.SunOS.gpp.static.extension=a -x86.SunOS.gpp.shared.extension=so - -# -# Solaris x86_64 GNU compilation -# -amd64.SunOS.linker=g++ - -amd64.SunOS.gpp.cpp.compiler=g++ -amd64.SunOS.gpp.cpp.defines=SOLARIS2 GNU_GCC -amd64.SunOS.gpp.cpp.options=-Wall -Wno-long-long -Wpointer-arith -Wconversion -fPIC -m64 -amd64.SunOS.gpp.cpp.includes=**/*.cc **/*.cpp **/*.cxx -amd64.SunOS.gpp.cpp.excludes= - -amd64.SunOS.gpp.c.compiler=gcc -amd64.SunOS.gpp.c.defines=SOLARIS2 GNU_GCC -amd64.SunOS.gpp.c.options=-Wall -Wno-long-long -Wpointer-arith -Wconversion -fPIC -m64 -I/usr/sfw/include -amd64.SunOS.gpp.c.includes=**/*.c -amd64.SunOS.gpp.c.excludes= - -# options for gcc linker front end -amd64.SunOS.gpp.linker.options=-m64 - -amd64.SunOS.gpp.lib.prefix=lib -amd64.SunOS.gpp.shared.prefix=lib -amd64.SunOS.gpp.static.extension=a -amd64.SunOS.gpp.shared.extension=so -amd64.SunOS.gpp.plugin.extension=so -amd64.SunOS.gpp.jni.extension=so -amd64.SunOS.gpp.executable.extension= - -# FIXME to be removed when NAR-6 -amd64.SunOS.gcc.static.extension=a -amd64.SunOS.gcc.shared.extension=so* -amd64.SunOS.gcc.plugin.extension=so -amd64.SunOS.gcc.jni.extension=so diff --git a/pljava-so/build.xml b/pljava-so/build.xml deleted file mode 100644 index 0b77725d..00000000 --- a/pljava-so/build.xml +++ /dev/null @@ -1,107 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/pljava-so/pom.xml b/pljava-so/pom.xml index 4ed89f25..5dbe68fa 100644 --- a/pljava-so/pom.xml +++ b/pljava-so/pom.xml @@ -1,362 +1,430 @@ - + 4.0.0 org.postgresql pljava.app - 1.5.0 + 1.6.10 pljava-so PL/Java backend native code - Generates the pljava (.so, .dll, etc.) library which gets loaded by the PostgreSQL backend + Generates the pljava (.so, .dll, etc.) library which gets loaded + by the PostgreSQL backend - nar - - - - 0 - - - - - - linkpglibs - - - - com.github.maven-nar - nar-maven-plugin - 3.10.1 - - - - - ecpg - shared - ${PGSQL_LIBDIR} - - - pgtypes - shared - ${PGSQL_LIBDIR} - - - pq - shared - ${PGSQL_LIBDIR} - - - - - - - - - - - osx - - - mac os x - - - - - - com.github.maven-nar - nar-maven-plugin - - - - - - - - - - - - - - - compiler-msvc - - - env.VCINSTALLDIR - - - - - - com.github.maven-nar - nar-maven-plugin - - - - - ${MSVC_RINT} - - - ${PGSQL_INCLUDEDIR-SERVER}/port/win32 - ${PGSQL_INCLUDEDIR-SERVER}/port/win32_msvc - ${basedir}/src/main/include/fallback/win32 - - - - - - postgres - shared - ${PGSQL_PKGLIBDIR} - - - - - - - - - - - compiler-mingw64 - - - env.MSYSTEM - MINGW64 - - - - - - com.github.maven-nar - nar-maven-plugin - - - - ${PGSQL_INCLUDEDIR-SERVER}/port/win32 - ${basedir}/src/main/include/fallback/win32 - - - - g++ - - - - - - postgres - shared - ${PGSQL_PKGLIBDIR} - - - - - - - - - - - wnosign - - - - com.github.maven-nar - nar-maven-plugin - 3.10.1 - - - - - - - - - - - - - - - needsrunpath - - - pgsql.runpath - - - - -Wl,-rpath= - - - - - com.github.maven-nar - nar-maven-plugin - 3.10.1 - - - - - - - - - - - - + pom - - - org.apache.maven.plugins - maven-antrun-plugin - 1.7 + org.postgresql + pljava-pgxs + ${pljava.pgxs.version} - - - pg_config to pgsql.properties - initialize - - run - - - - - - - - - - + - - - com.github.maven-nar - nar-maven-plugin - 3.10.1 - true - - - ${nar.cores} - - - - ${basedir}/../pljava/target/classes/ - ${basedir}/../pljava-api/target/classes/ - - ${basedir}/../pljava/target/classes/ - - java.sql.Types - - - - - - - - PLJAVA_SO_VERSION=${project.version} - - - - ${PGSQL_INCLUDEDIR} - ${PGSQL_INCLUDEDIR-SERVER} - ${PGSQL_PKGINCLUDEDIR}/internal - ${basedir}/src/main/include/ - ${basedir}/target/nar/javah-include/ - - - - - - - - - - - plugin - - false - - - - - - - - - - - org.eclipse.m2e - lifecycle-mapping - 1.0.0 - - - - - - org.apache.maven.plugins - maven-antrun-plugin - [1.7,) - - run - - - - - - - - - org.codehaus.mojo - properties-maven-plugin - [1.0-alpha-2,) - - - read-project-properties - - - - - - - - - - com.github.maven-nar - nar-maven-plugin - 3.10.1 - - nar-compile - nar-download - nar-gnu-configure - nar-gnu-make - nar-gnu-process - nar-gnu-resources - nar-javah - nar-system-generate - nar-resources - nar-testCompile - nar-testDownload - nar-testUnpack - nar-unpack - nar-validate - nar-vcproj - - - - - - - - - - - - diff --git a/pljava-so/src/main/c/Backend.c b/pljava-so/src/main/c/Backend.c index 33204fc0..9206ee16 100644 --- a/pljava-so/src/main/c/Backend.c +++ b/pljava-so/src/main/c/Backend.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2009, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB - Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ #include #include @@ -20,12 +23,16 @@ #include #include #include +#include #include #include #include +#ifdef GP_VERSION_NUM +#include +#endif #if PG_VERSION_NUM >= 120000 - #ifdef HAVE_DLOPEN + #if defined(HAVE_DLOPEN) || PG_VERSION_NUM >= 160000 && ! defined(WIN32) #include #endif #define pg_dlopen(f) dlopen((f), RTLD_NOW | RTLD_GLOBAL) @@ -45,6 +52,8 @@ #include #include "org_postgresql_pljava_internal_Backend.h" +#include "org_postgresql_pljava_internal_Backend_EarlyNatives.h" +#include "pljava/DualState.h" #include "pljava/Invocation.h" #include "pljava/InstallHelper.h" #include "pljava/Function.h" @@ -55,11 +64,6 @@ #include "pljava/SPI.h" #include "pljava/type/String.h" -#if PG_VERSION_NUM >= 90300 -#include "utils/timeout.h" -#endif - -#define pg_unreachable() abort() /* Include the 'magic block' that PostgreSQL 8.2 and up will use to ensure * that a module is not loaded into an incompatible server. @@ -103,7 +107,7 @@ extern PLJAVADLLEXPORT void _PG_init(void); #define LOCAL_REFERENCE_COUNT 128 -jlong mainThreadId; +MemoryContext JavaMemoryContext; static pthread_t s_mainThreadIdForHandler; static bool s_handlerSubstituted = false; @@ -111,14 +115,32 @@ static pqsigfunc s_oldHandlerFunc = NULL; static JavaVM* s_javaVM = 0; static jclass s_Backend_class; -static jmethodID s_setTrusted; +static bool s_startingVM; + +/* + * GUC states + */ static char* libjvmlocation; +static char* vmoptions; +static char* modulepath; +static char* implementors; +static char* policy_urls; +static char* allow_unenforced; static int statementCacheSize; +static bool allow_unenforced_udt; +static bool pljavaDebug; static bool pljavaReleaseLingeringSavepoints; -static bool s_currentTrust; +static bool pljavaEnabled; + +static int java_thread_pg_entry; static int s_javaLogLevel; +#if PG_VERSION_NUM < 100000 bool integerDateTimes = false; +static void checkIntTimeType(void); +#endif + +static char s_path_var_sep; extern void Invocation_initialize(void); extern void Exception_initialize(void); @@ -131,12 +153,29 @@ extern void Session_initialize(void); extern void PgSavepoint_initialize(void); extern void XactListener_initialize(void); extern void SubXactListener_initialize(void); +extern void SQLChunkIOOrder_initialize(void); extern void SQLInputFromChunk_initialize(void); extern void SQLOutputToChunk_initialize(void); -extern void SQLInputFromTuple_initialize(void); extern void SQLOutputToTuple_initialize(void); +/* + * These typedefs are not exposed in Java's jni.h. Apparently you are supposed + * to be really determined if you want to use them. These are copy/pasted from + * src/hotspot/share/runtime/arguments.hpp. One silver lining is that they can + * be spelled here without the * used in the original, enabling them to be used + * succinctly to declare matching prototypes. + */ +typedef void JNICALL abort_hook_t(void); +typedef void JNICALL exit_hook_t(jint code); +typedef jint JNICALL vfprintf_hook_t(FILE *fp, const char *fmt, va_list args) + pg_attribute_printf(2, 0); + +/* + * This private type is used here as a dynamically-sized list of JavaVMOption, + * which will later be copied to a struct of type JavaVMInitArgs (a type that + * jni.h does expose). + */ typedef struct { JavaVMOption* options; unsigned int size; @@ -144,18 +183,22 @@ typedef struct { } JVMOptList; static jint initializeJavaVM(JVMOptList*); +static char *get_jni_errmsg(jint jnicode); static void JVMOptList_init(JVMOptList*); static void JVMOptList_delete(JVMOptList*); static void JVMOptList_add(JVMOptList*, const char*, void*, bool); static void JVMOptList_addVisualVMName(JVMOptList*); +static void JVMOptList_addModuleMain(JVMOptList*); static void addUserJVMOptions(JVMOptList*); -static void checkIntTimeType(void); -static char* getClassPath(const char*); -static jint JNICALL my_vfprintf(FILE*, const char*, va_list); +static char* getModulePath(const char*); +static abort_hook_t my_abort; +static exit_hook_t my_exit; +static vfprintf_hook_t my_vfprintf; static void _destroyJavaVM(int, Datum); static void initPLJavaClasses(void); static void initJavaSession(void); static void reLogWithChangedLevel(int); +static void registerGUCOptions(void); #ifndef WIN32 #define USE_PLJAVA_SIGHANDLERS @@ -171,6 +214,7 @@ enum initstage IS_FORMLESS_VOID, IS_GUCS_REGISTERED, IS_CAND_JVMLOCATION, + IS_CAND_POLICYURLS, IS_PLJAVA_ENABLED, IS_CAND_JVMOPENED, IS_CREATEVM_SYM_FOUND, @@ -179,6 +223,7 @@ enum initstage IS_JAVAVM_STARTED, IS_SIGHANDLERS, IS_PLJAVA_FOUND, + IS_PLJAVA_INSTALLING, IS_COMPLETE }; @@ -188,10 +233,311 @@ static bool jvmStartedAtLeastOnce = false; static bool alteredSettingsWereNeeded = false; static bool loadAsExtensionFailed = false; static bool seenVisualVMName; +static bool seenModuleMain; static char const visualVMprefix[] = "-Dvisualvm.display.name="; +static char const moduleMainPrefix[] = "-Djdk.module.main="; +static char const policyUrlsGUC[] = "pljava.policy_urls"; +static char const unenforcedGUC[] = "pljava.allow_unenforced"; + +/* + * In a background worker, _PG_init may be called very early, before much of + * the state needed during PL/Java initialization has even been set up. When + * that case is detected, initsequencer needs to go just as far as + * IS_GUCS_REGISTERED and then bail. The GUC assign hooks may then also be + * invoked as GUC values get copied from the lead process; they also need to + * return quickly (accomplished by checking this flag in ASSIGNRETURNIFNXACT). + * Further initialization is thus deferred until the first actual call arrives + * at the call handler, which resets this flag and rejoins the initsequencer. + * The same lazy approach needs to be followed during a pg_upgrade (which test- + * loads libraries, thus calling _PG_init). This flag is set for either case. + */ +static bool deferInit = false; + +/* + * Whether Backend_warnJEP411() should emit a warning when called. + * Initially true, because it may be called very early from the deferInit check, + * if pg_upgrade is happening, and should always warn in that case. Thereafter + * false, unless set true in the initsequencer because InstallHelper_groundwork + * will be called (PL/Java being installed or upgraded), or in the validator + * handler because a PL/Java function has been declared or redeclared. + */ +static bool warnJEP411 = true; + +/* + * Becomes true upon initialization of the Backend class if the Java property + * setting java.security.manager=disallow was explicitly in pljava.vmoptions. + * That is how to request the fallback nothing-is-enforced mode of operation + * that is the only mode available on Java >= 24. Only when all Java code is + * 100% trusted should PL/Java be run in this mode. + */ +static bool withoutEnforcement = false; + +/* + * Don't bother with the warning unless the JVM in use is later than Java 11. + * 11 is the LTS release prior to the one where JEP 411 gets interesting (17). + * If a site is sticking to LTS releases, there will be plenty of time to warn + * on 17. If a site moves with non-LTS releases, start warning as soon as + * anything > 11 is used. + * + * Initially true, so there will be a warning unconditionally in a case + * (pg_upgrade) where a JVM hasn't been launched to learn its version). + */ +static bool javaGT11 = true; +static bool javaGE17 = false; static void initsequencer(enum initstage is, bool tolerant); +static bool check_libjvm_location( + char **newval, void **extra, GucSource source); +static bool check_vmoptions( + char **newval, void **extra, GucSource source); +static bool check_modulepath( + char **newval, void **extra, GucSource source); +static bool check_policy_urls( + char **newval, void **extra, GucSource source); +static bool check_enabled( + bool *newval, void **extra, GucSource source); +static bool check_allow_unenforced_udt( + bool *newval, void **extra, GucSource source); +static bool check_java_thread_pg_entry( + int *newval, void **extra, GucSource source); + +/* Check hooks will always allow "setting" a value that is the same as + * current; otherwise, it would be frustrating to have just found settings + * that work, and be unable to save them with ALTER DATABASE SET ... because + * the check hook is called for that too, and would say it is too late.... + */ + +static bool check_libjvm_location( + char **newval, void **extra, GucSource source) +{ + if ( initstage < IS_CAND_JVMOPENED ) + return true; + if ( libjvmlocation == *newval ) + return true; + if ( libjvmlocation && *newval && 0 == strcmp(libjvmlocation, *newval) ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.libjvm_location\" setting"); + GUC_check_errdetail( + "Changing the setting can have no effect after " + "PL/Java has found and opened the library it points to."); + GUC_check_errhint( + "To try a different value, exit this session and start a new one."); + return false; +} + +static bool check_vmoptions( + char **newval, void **extra, GucSource source) +{ + if ( initstage < IS_JAVAVM_OPTLIST ) + return true; + if ( vmoptions == *newval ) + return true; + if ( vmoptions && *newval && 0 == strcmp(vmoptions, *newval) ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.vmoptions\" setting"); + GUC_check_errdetail( + "Changing the setting can have no effect after " + "PL/Java has started the Java virtual machine."); + GUC_check_errhint( + "To try a different value, exit this session and start a new one."); + return false; +} + +static bool check_modulepath( + char **newval, void **extra, GucSource source) +{ + if ( initstage < IS_JAVAVM_OPTLIST ) + return true; + if ( modulepath == *newval ) + return true; + if ( modulepath && *newval && 0 == strcmp(modulepath, *newval) ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.module_path\" setting"); + GUC_check_errdetail( + "Changing the setting has no effect after " + "PL/Java has started the Java virtual machine."); + GUC_check_errhint( + "To try a different value, exit this session and start a new one."); + return false; +} + +static bool check_policy_urls( + char **newval, void **extra, GucSource source) +{ + if ( initstage < IS_JAVAVM_OPTLIST ) + return true; + if ( policy_urls == *newval ) + return true; + if ( policy_urls && *newval && 0 == strcmp(policy_urls, *newval) ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.policy_urls\" setting"); + GUC_check_errdetail( + "Changing the setting has no effect after " + "PL/Java has started the Java virtual machine."); + GUC_check_errhint( + "To try a different value, exit this session and start a new one."); + return false; +} + +static bool check_enabled( + bool *newval, void **extra, GucSource source) +{ + if ( initstage < IS_PLJAVA_ENABLED ) + return true; + if ( *newval ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.enable\" setting"); + GUC_check_errdetail( + "Start-up has progressed past the point where it is checked."); + GUC_check_errhint( + "For another chance, exit this session and start a new one."); + return false; +} + +static bool check_allow_unenforced_udt( + bool *newval, void **extra, GucSource source) +{ + if ( initstage < IS_PLJAVA_FOUND ) + return true; + if ( *newval || ! allow_unenforced_udt ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.allow_unenforced_udt\" setting"); + GUC_check_errdetail( + "Once set, it cannot be reset in the same session."); + GUC_check_errhint( + "For another chance, exit this session and start a new one."); + return false; +} + +static bool check_java_thread_pg_entry( + int *newval, void **extra, GucSource source) +{ + if ( initstage < IS_PLJAVA_FOUND ) + return true; + if ( java_thread_pg_entry == *newval ) + return true; + GUC_check_errmsg( + "too late to change \"pljava.java_thread_pg_entry\" setting"); + GUC_check_errdetail( + "Start-up has progressed past the point where it is checked."); + GUC_check_errhint( + "For another chance, exit this session and start a new one."); + return false; +} + +#define ASSIGNHOOK(name,type) \ + static void \ + CppConcat(assign_,name)(type newval, void *extra); \ + static void \ + CppConcat(assign_,name)(type newval, void *extra) +#define ASSIGNRETURN(thing) +#define ASSIGNRETURNIFCHECK(thing) +#define ASSIGNRETURNIFNXACT(thing) \ + if (! deferInit && pljavaViableXact()) ; else return +#define ASSIGNSTRINGHOOK(name) ASSIGNHOOK(name, const char *) + +#define ASSIGNENUMHOOK(name) ASSIGNHOOK(name,int) +#define ENUMBOOTVAL(entry) ((entry).val) +#define ENUMHOOKRET true + +static const struct config_enum_entry java_thread_pg_entry_options[] = { + {"allow", 0, false}, /* numeric value is bit-coded: */ + {"error", 1, false}, /* 1: C code should refuse JNI calls on wrong thread */ + /* 2: C code shouldn't call MonitorEnter/MonitorExit */ + {"block", 3, false}, /* (3: check thread AND skip MonitorEnter/Exit) */ + /* 4: *Java* code should refuse wrong-thread calls */ + {"throw", 6, false}, /* (6: check in Java AND skip C MonitorEnter/Exit) */ + {NULL, 0, false} +}; + +ASSIGNSTRINGHOOK(libjvm_location) +{ + ASSIGNRETURNIFCHECK(newval); + libjvmlocation = (char *)newval; + if ( IS_FORMLESS_VOID < initstage && initstage < IS_CAND_JVMOPENED ) + { + ASSIGNRETURNIFNXACT(newval); + alteredSettingsWereNeeded = true; + initsequencer( initstage, true); + } + ASSIGNRETURN(newval); +} + +ASSIGNSTRINGHOOK(vmoptions) +{ + ASSIGNRETURNIFCHECK(newval); + vmoptions = (char *)newval; + if ( IS_FORMLESS_VOID < initstage && initstage < IS_JAVAVM_OPTLIST ) + { + ASSIGNRETURNIFNXACT(newval); + alteredSettingsWereNeeded = true; + initsequencer( initstage, true); + } + ASSIGNRETURN(newval); +} + +ASSIGNSTRINGHOOK(modulepath) +{ + ASSIGNRETURNIFCHECK(newval); + modulepath = (char *)newval; + if ( IS_FORMLESS_VOID < initstage && initstage < IS_JAVAVM_OPTLIST ) + { + ASSIGNRETURNIFNXACT(newval); + alteredSettingsWereNeeded = true; + initsequencer( initstage, true); + } + ASSIGNRETURN(newval); +} + +ASSIGNSTRINGHOOK(policy_urls) +{ + ASSIGNRETURNIFCHECK(newval); + policy_urls = (char *)newval; + if ( IS_FORMLESS_VOID < initstage && initstage < IS_JAVAVM_OPTLIST ) + { + alteredSettingsWereNeeded = true; + ASSIGNRETURNIFNXACT(newval); + initsequencer( initstage, true); + } + ASSIGNRETURN(newval); +} + +ASSIGNSTRINGHOOK(allow_unenforced) +{ + ASSIGNRETURNIFCHECK(newval); + allow_unenforced = (char *)newval; + if ( IS_PLJAVA_FOUND < initstage ) + Function_clearFunctionCache(); + ASSIGNRETURN(newval); +} + +ASSIGNHOOK(enabled, bool) +{ + ASSIGNRETURNIFCHECK(true); + pljavaEnabled = newval; + if ( IS_FORMLESS_VOID < initstage && initstage < IS_PLJAVA_ENABLED ) + { + ASSIGNRETURNIFNXACT(true); + alteredSettingsWereNeeded = true; + initsequencer( initstage, true); + } + ASSIGNRETURN(true); +} + +ASSIGNENUMHOOK(java_thread_pg_entry) +{ + int val = newval; + ASSIGNRETURNIFCHECK(ENUMHOOKRET); + pljava_JNI_setThreadPolicy( !!(val&1) /*error*/, !(val&2) /*monitorops*/); + ASSIGNRETURN(ENUMHOOKRET); +} /* * There are a few ways to arrive in the initsequencer. * 1. From _PG_init (called exactly once when the library is loaded for ANY @@ -213,6 +559,17 @@ static void initsequencer(enum initstage is, bool tolerant); * to succeed. * 3. From a GUC assign hook, if the user has updated a setting that might allow * initialization to succeed. It resumes from where it left off. + * 4. From the validator handler, if initialization isn't complete yet. That + * will definitely happen during pg_upgrade, which is a case where deferInit + * will have been set. The validator will then clear deferInit and try to get + * further in the init sequence. Importantly, pg_upgrade also sets + * check_function_bodies false, which limits the validator's work to a syntax + * check of the AS string. The validator therefore will not need to obtain a + * schemaLoader or do anything else that requires the sqlj schema to be fully + * populated (as, during pg_upgrade, it may not yet be). However, the + * validator handler must avoid any action that sets pljavaLoadPath, as a + * non-NULL value there would be treated below as case 1a, and trigger an + * attempt to set up the sqlj schema. * * In all cases, the sequence must progress as far as starting the VM and * initializing the PL/Java classes. In all cases except 1a, that's enough, @@ -236,11 +593,51 @@ static void initsequencer(enum initstage is, bool tolerant) { case IS_FORMLESS_VOID: initstage = IS_GUCS_REGISTERED; + if ( deferInit ) + return; + warnJEP411 = false; + /*FALLTHROUGH*/ case IS_GUCS_REGISTERED: - libjvmlocation = strdup("libjvm.so"); + if ( NULL == libjvmlocation ) + { + ereport(WARNING, ( + errmsg("Java virtual machine not yet loaded"), + errdetail("location of libjvm is not configured"), + errhint("SET pljava.libjvm_location TO the correct " + "path to the jvm library (libjvm.so or jvm.dll, etc.)"))); + goto check_tolerant; + } + initstage = IS_CAND_JVMLOCATION; + /*FALLTHROUGH*/ + + case IS_CAND_JVMLOCATION: + if ( NULL == policy_urls ) + { + ereport(WARNING, ( + errmsg("Java virtual machine not yet loaded"), + errdetail("Java policy URL(s) not configured"), + errhint("SET pljava.policy_urls TO the security policy " + "files PL/Java is to use."))); + goto check_tolerant; + } + initstage = IS_CAND_POLICYURLS; + /*FALLTHROUGH*/ + case IS_CAND_POLICYURLS: + if ( ! pljavaEnabled ) + { + ereport(WARNING, ( + errmsg("Java virtual machine not yet loaded"), + errdetail( + "Pausing because \"pljava.enable\" is set \"off\". "), + errhint( + "After changing any other settings as necessary, set it " + "\"on\" to proceed."))); + goto check_tolerant; + } initstage = IS_PLJAVA_ENABLED; + /*FALLTHROUGH*/ case IS_PLJAVA_ENABLED: libjvm_handle = pg_dlopen(libjvmlocation); @@ -252,6 +649,7 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_CAND_JVMOPENED; + /*FALLTHROUGH*/ case IS_CAND_JVMOPENED: pljava_createvm = @@ -274,10 +672,13 @@ static void initsequencer(enum initstage is, bool tolerant) goto check_tolerant; } initstage = IS_CREATEVM_SYM_FOUND; + /*FALLTHROUGH*/ case IS_CREATEVM_SYM_FOUND: s_javaLogLevel = INFO; +#if PG_VERSION_NUM < 100000 checkIntTimeType(); +#endif HashMap_initialize(); /* creates things in TopMemoryContext */ #ifdef PLJAVA_DEBUG /* Hard setting for debug. Don't forget to recompile... @@ -285,27 +686,41 @@ static void initsequencer(enum initstage is, bool tolerant) pljava_debug = 1; #endif initstage = IS_MISC_ONCE_DONE; + /*FALLTHROUGH*/ case IS_MISC_ONCE_DONE: JVMOptList_init(&optList); /* uses CurrentMemoryContext */ seenVisualVMName = false; + seenModuleMain = false; addUserJVMOptions(&optList); if ( ! seenVisualVMName ) JVMOptList_addVisualVMName(&optList); + if ( ! seenModuleMain ) + JVMOptList_addModuleMain(&optList); + JVMOptList_add(&optList, "abort", (void*)my_abort, true); + JVMOptList_add(&optList, "exit", (void*)my_exit, true); JVMOptList_add(&optList, "vfprintf", (void*)my_vfprintf, true); JVMOptList_add(&optList, "-Xss2m", 0, true); #ifndef GCJ JVMOptList_add(&optList, "-Xrs", 0, true); #endif - effectiveClassPath = getClassPath("-Djava.class.path="); - if(effectiveClassPath != 0) + effectiveModulePath = getModulePath("--module-path="); + if(effectiveModulePath != 0) { - JVMOptList_add(&optList, effectiveClassPath, 0, true); + JVMOptList_add(&optList, effectiveModulePath, 0, true); } initstage = IS_JAVAVM_OPTLIST; + /*FALLTHROUGH*/ case IS_JAVAVM_OPTLIST: + /* Register an on_proc_exit handler that destroys the VM if it has + * been started. It will also log a last-ditch message if the VM happens + * to rudely call exit() rather than returning a non-OK result. + */ + on_proc_exit(_destroyJavaVM, 0); + s_startingVM = true; JNIresult = initializeJavaVM(&optList); /* frees the optList */ + s_startingVM = false; if( JNI_OK != JNIresult ) { initstage = IS_MISC_ONCE_DONE; /* optList has been freed */ @@ -313,8 +728,8 @@ static void initsequencer(enum initstage is, bool tolerant) "jint wider than long int?!"); ereport(WARNING, (errmsg("failed to create Java virtual machine"), - errdetail("JNI_CreateJavaVM returned an error code: %ld", - (long int)JNIresult), + errdetail("JNI_CreateJavaVM returned an error code: %ld (%s)", + (long int)JNIresult, get_jni_errmsg(JNIresult)), jvmStartedAtLeastOnce ? errhint("Because an earlier attempt during this session " "did start a VM before failing, this probably means your " @@ -326,16 +741,15 @@ static void initsequencer(enum initstage is, bool tolerant) jvmStartedAtLeastOnce = true; elog(DEBUG2, "successfully created Java virtual machine"); initstage = IS_JAVAVM_STARTED; + /*FALLTHROUGH*/ case IS_JAVAVM_STARTED: #ifdef USE_PLJAVA_SIGHANDLERS pqsignal(SIGINT, pljavaStatementCancelHandler); pqsignal(SIGTERM, pljavaDieHandler); #endif - /* Register an on_proc_exit handler that destroys the VM - */ - on_proc_exit(_destroyJavaVM, 0); initstage = IS_SIGHANDLERS; + /*FALLTHROUGH*/ case IS_SIGHANDLERS: Invocation_pushBootContext(&ctx); @@ -373,26 +787,38 @@ static void initsequencer(enum initstage is, bool tolerant) { /* JVM initialization failed for some reason. Destroy * the VM if it exists. Perhaps the user will try - * fixing the pljava.classpath and make a new attempt. + * fixing the pljava.module_path and make a new attempt. */ ereport(WARNING, ( errmsg("failed to load initial PL/Java classes"), - errhint("The most common reason is that \"pljava_classpath\" " - "needs to be set, naming the proper \"pljava.jar\" file.") + errhint("The most common reason is that \"pljava.module_path\" " + "needs to be set, naming the proper \"pljava.jar\" " + "and \"pljava-api.jar\" files, separated by the correct " + "path separator for this platform.") )); + pljava_DualState_unregister(); _destroyJavaVM(0, 0); goto check_tolerant; } + /*FALLTHROUGH*/ case IS_PLJAVA_FOUND: - greeting = InstallHelper_hello(); + greeting = InstallHelper_hello(); /*adjusts, freezes system properties*/ ereport(NULL != pljavaLoadPath ? NOTICE : DEBUG1, ( errmsg("PL/Java loaded"), errdetail("versions:\n%s", greeting))); pfree(greeting); + initstage = IS_PLJAVA_INSTALLING; + /*FALLTHROUGH*/ + + case IS_PLJAVA_INSTALLING: if ( NULL != pljavaLoadPath ) + { + warnJEP411 = javaGT11; InstallHelper_groundwork(); /* sqlj schema, language handlers, ...*/ + } initstage = IS_COMPLETE; + /*FALLTHROUGH*/ case IS_COMPLETE: pljavaLoadingAsExtension = false; @@ -411,18 +837,13 @@ static void initsequencer(enum initstage is, bool tolerant) * are just function parameters with evaluation order unknown. */ StringInfoData buf; -#if PG_VERSION_NUM >= 90200 -#define MOREHINT \ - appendStringInfo(&buf, \ - "using ALTER DATABASE %s SET ... FROM CURRENT or ", \ - pljavaDbName()), -#else -#define MOREHINT -#endif + ereport(NOTICE, ( errmsg("PL/Java successfully started after adjusting settings"), (initStringInfo(&buf), - MOREHINT + appendStringInfo(&buf, \ + "using ALTER DATABASE %s SET ... FROM CURRENT or ", \ + pljavaDbName()), errhint("The settings that worked should be saved (%s" "in the \"%s\" file). For a reminder of what has been set, " "try: SELECT name, setting FROM pg_settings WHERE name LIKE" @@ -431,9 +852,18 @@ static void initsequencer(enum initstage is, bool tolerant) superuser() ? PG_GETCONFIGOPTION("config_file") : "postgresql.conf")))); -#undef MOREHINT + if ( loadAsExtensionFailed ) { +#if PG_VERSION_NUM < 130000 +#define MOREHINT \ + "\"CREATE EXTENSION pljava FROM unpackaged\"" +#else +#define MOREHINT \ + "\"CREATE EXTENSION pljava VERSION unpackaged\", " \ + "then (after starting another new session) " \ + "\"ALTER EXTENSION pljava UPDATE\"" +#endif ereport(NOTICE, (errmsg( "PL/Java load successful after failed CREATE EXTENSION"), errdetail( @@ -443,9 +873,11 @@ static void initsequencer(enum initstage is, bool tolerant) "the working settings are saved, exit this session, and " "in a new session, either: " "1. if committed, run " - "\"CREATE EXTENSION pljava FROM unpackaged\", or 2. " + MOREHINT + ", or 2. " "if rolled back, simply \"CREATE EXTENSION pljava\" again." ))); +#undef MOREHINT } } return; @@ -506,7 +938,7 @@ static void reLogWithChangedLevel(int level) else if ( ERRCODE_WARNING == category || ERRCODE_NO_DATA == category || ERRCODE_SUCCESSFUL_COMPLETION == category ) sqlstate = ERRCODE_INTERNAL_ERROR; -#if PG_VERSION_NUM >= 90500 + edata->elevel = level; edata->sqlerrcode = sqlstate; PG_TRY(); @@ -520,56 +952,44 @@ static void reLogWithChangedLevel(int level) } PG_END_TRY(); FreeErrorData(edata); -#else - if (!errstart(level, edata->filename, edata->lineno, - edata->funcname, NULL)) - { - FreeErrorData(edata); - return; - } - - errcode(sqlstate); - if (edata->message) - errmsg("%s", edata->message); - if (edata->detail) - errdetail("%s", edata->detail); - if (edata->detail_log) - errdetail_log("%s", edata->detail_log); - if (edata->hint) - errhint("%s", edata->hint); - if (edata->context) - errcontext("%s", edata->context); /* this may need to be trimmed */ -#if PG_VERSION_NUM >= 90300 - if (edata->schema_name) - err_generic_string(PG_DIAG_SCHEMA_NAME, edata->schema_name); - if (edata->table_name) - err_generic_string(PG_DIAG_TABLE_NAME, edata->table_name); - if (edata->column_name) - err_generic_string(PG_DIAG_COLUMN_NAME, edata->column_name); - if (edata->datatype_name) - err_generic_string(PG_DIAG_DATATYPE_NAME, edata->datatype_name); - if (edata->constraint_name) - err_generic_string(PG_DIAG_CONSTRAINT_NAME, edata->constraint_name); -#endif - if (edata->internalquery) - internalerrquery(edata->internalquery); - - FreeErrorData(edata); - errfinish(0); -#endif } void _PG_init() { - if ( IS_PLJAVA_FOUND == initstage ) + char *sep; + + if ( IS_PLJAVA_INSTALLING == initstage ) return; /* creating handler functions will cause recursive call */ - pljavaCheckExtension( NULL); + + InstallHelper_earlyHello(); + + /* + * Find the platform's path separator. Java knows it, but that's no help in + * preparing the launch options before it is launched. PostgreSQL knows what + * it is, but won't directly say; give it some choices and it'll pick one. + * Alternatively, let Maven or Ant determine and add a -D at build time from + * the path.separator property. Maybe that's cleaner? + */ + sep = first_path_var_separator(":;"); + if ( NULL == sep ) + elog(ERROR, + "PL/Java cannot determine the path separator this platform uses"); + s_path_var_sep = *sep; + + if ( IS_FORMLESS_VOID == initstage ) + registerGUCOptions(); + + if ( InstallHelper_shouldDeferInit() ) + deferInit = true; + else + pljavaCheckExtension( NULL); initsequencer( initstage, true); } static void initPLJavaClasses(void) { - jfieldID tlField; + jfieldID fID; + int javaMajor; JNINativeMethod backendMethods[] = { { @@ -583,11 +1003,6 @@ static void initPLJavaClasses(void) Java_org_postgresql_pljava_internal_Backend_isReleaseLingeringSavepoints }, { - "_getLibraryPath", - "()Ljava/lang/String;", - Java_org_postgresql_pljava_internal_Backend__1getLibraryPath - }, - { "_getConfigOption", "(Ljava/lang/String;)Ljava/lang/String;", Java_org_postgresql_pljava_internal_Backend__1getConfigOption @@ -612,62 +1027,87 @@ static void initPLJavaClasses(void) "()Z", Java_org_postgresql_pljava_internal_Backend__1isCreatingExtension }, + { + "_allowingUnenforcedUDT", + "()Z", + Java_org_postgresql_pljava_internal_Backend__1allowingUnenforcedUDT + }, + { + "_myLibraryPath", + "()Ljava/lang/String;", + Java_org_postgresql_pljava_internal_Backend__1myLibraryPath + }, + { + "_pokeJEP411", + "(Ljava/lang/Class;Ljava/lang/Object;)V", + Java_org_postgresql_pljava_internal_Backend__1pokeJEP411 + }, + { 0, 0, 0 } + }; + + JNINativeMethod earlyMethods[] = + { + { + "_forbidOtherThreads", + "()Z", + Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1forbidOtherThreads + }, + { + "_defineClass", + "(Ljava/lang/String;Ljava/lang/ClassLoader;[B)Ljava/lang/Class;", + Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1defineClass + }, { 0, 0, 0 } }; + jclass cls; + + JavaMemoryContext = AllocSetContextCreate(TopMemoryContext, + "PL/Java", + ALLOCSET_DEFAULT_SIZES); Exception_initialize(); - elog(DEBUG2, "checking for a PL/Java Backend class on the given classpath"); - s_Backend_class = PgObject_getJavaClass( - "org/postgresql/pljava/internal/Backend"); + elog(DEBUG2, + "checking for a PL/Java Backend class on the given module path"); + + cls = PgObject_getJavaClass( + "org/postgresql/pljava/internal/Backend$EarlyNatives"); + PgObject_registerNatives2(cls, earlyMethods); + + cls = PgObject_getJavaClass("org/postgresql/pljava/internal/Backend"); elog(DEBUG2, "successfully loaded Backend class"); + s_Backend_class = JNI_newGlobalRef(cls); PgObject_registerNatives2(s_Backend_class, backendMethods); - tlField = PgObject_getStaticJavaField(s_Backend_class, "THREADLOCK", "Ljava/lang/Object;"); - JNI_setThreadLock(JNI_getStaticObjectField(s_Backend_class, tlField)); + fID = PgObject_getStaticJavaField(s_Backend_class, "JAVA_MAJOR", "I"); + javaMajor = JNI_getStaticIntField(s_Backend_class, fID); + javaGT11 = 11 < javaMajor; + javaGE17 = 17 <= javaMajor; + + fID = PgObject_getStaticJavaField(s_Backend_class,\ + "WITHOUT_ENFORCEMENT", "Z"); + withoutEnforcement = JNI_getStaticBooleanField(s_Backend_class, fID); + + fID = PgObject_getStaticJavaField(s_Backend_class, + "THREADLOCK", "Ljava/lang/Object;"); + JNI_setThreadLock(JNI_getStaticObjectField(s_Backend_class, fID)); Invocation_initialize(); Exception_initialize2(); SPI_initialize(); Type_initialize(); + pljava_DualState_initialize(); Function_initialize(); Session_initialize(); PgSavepoint_initialize(); XactListener_initialize(); SubXactListener_initialize(); + SQLChunkIOOrder_initialize(); /* safely caches relevant system properties */ SQLInputFromChunk_initialize(); SQLOutputToChunk_initialize(); - SQLInputFromTuple_initialize(); SQLOutputToTuple_initialize(); InstallHelper_initialize(); - - s_setTrusted = PgObject_getStaticJavaMethod(s_Backend_class, "setTrusted", "(Z)V"); -} - -/** - * Initialize security - */ -void Backend_setJavaSecurity(bool trusted) -{ - if(trusted != s_currentTrust) - { - /* GCJ has major issues here. Real work on SecurityManager and - * related classes has just started in version 4.0.0. - */ -#ifndef GCJ - JNI_callStaticVoidMethod(s_Backend_class, s_setTrusted, (jboolean)trusted); - if(JNI_exceptionCheck()) - { - JNI_exceptionDescribe(); - JNI_exceptionClear(); - ereport(ERROR, ( - errcode(ERRCODE_INTERNAL_ERROR), - errmsg("Unable to initialize java security"))); - } -#endif - s_currentTrust = trusted; - } } int Backend_setJavaLogLevel(int logLevel) @@ -676,18 +1116,147 @@ int Backend_setJavaLogLevel(int logLevel) s_javaLogLevel = logLevel; return oldLevel; } - + +static const char DEATH_HINT[] = + "Depending on log_min_messages and whether logging_collector is active, " + "relevant information may be near this message in the server log. If " + "during VM startup, pljava.vmoptions and other pljava.* settings should " + "be checked for mistakes or incompatibility with the Java version of the " + "library pljava.libjvm_location points to. Causes can include a misspelled " + "entry in pljava.module_path or a jar that can't be opened on that path. " + "If during \"CREATE EXTENSION pljava\" and there is little information in " + "the log, try in a new session with LOAD rather than CREATE EXTENSION."; + +static void onJVMExitOrAbort(void); + +static void JNICALL my_abort() +{ + onJVMExitOrAbort(); + ereport(FATAL, ( + errcode(ERRCODE_CLASS_SQLJRT), + errmsg("PostgreSQL backend exiting because Java VM requested abort"), + errdetail("Abort requested %s.", + s_startingVM ? "during VM startup" : "by already started VM"), + errhint(DEATH_HINT) + )); +} + +static void JNICALL my_exit(jint code) +{ + onJVMExitOrAbort(); + ereport(FATAL, ( + errcode(ERRCODE_CLASS_SQLJRT), + errmsg("PostgreSQL backend exiting because Java VM requested exit " + "with code %d", (int)code), + errdetail("Exit requested %s.", + s_startingVM ? "during VM startup" : "by already started VM"), + errhint(DEATH_HINT) + )); +} + +static void onJVMExitOrAbort() +{ + /* + * We will later hit the proc_exit handler, which will try to destroy the + * already-gone JVM if this reference is non-null. + */ + s_javaVM = NULL; + /* + * This does a PostgreSQL UnregisterResourceReleaseCallback, which should + * be painless if the callback hasn't been registered yet. The key is to + * avoid triggering a DualState callback that tries a JNI upcall into + * the already-gone JVM. + */ + pljava_DualState_unregister(); +} + /** * Special purpose logging function called from JNI when verbose is enabled. */ static jint JNICALL my_vfprintf(FILE* fp, const char* format, va_list args) { + static char const * const cap_format = + "WARNING: JNI local refs: %u, exceeds capacity: %u"; + static char const at_prefix[] = "\tat "; + static char const locked_prefix[] = "\t- locked <"; + static char const class_prefix[] = "(a "; + static char const culprit[] = + " com.sun.management.internal.DiagnosticCommandImpl."; + static char const nostack[] = + "No stacktrace, probably called from PostgreSQL"; + static enum matchstate + { + VFP_INITIAL, + VFP_MAYBE, + VFP_ATE_AT, + VFP_ATE_LOCKED + } + state = VFP_INITIAL; + static unsigned int lastlive, lastcap; + char buf[1024]; char* ep; char* bp = buf; + unsigned int live, cap; + int got; + char const *detail; vsnprintf(buf, sizeof(buf), format, args); + /* Try to eliminate annoying -Xcheck:jni messages from deep in JMX that + * nothing can be done about here. + */ + for ( ;; state = VFP_INITIAL ) + { + switch ( state ) + { + case VFP_INITIAL: + got = sscanf(buf, cap_format, &live, &cap); + if ( 2 != got ) + break; + lastlive = live; + lastcap = cap; + state = VFP_MAYBE; + return 0; + + case VFP_MAYBE: + if ( 0 != strncmp(buf, at_prefix, sizeof at_prefix - 1) ) + detail = nostack; + else + { + detail = buf; + state = VFP_ATE_AT; + if ( NULL != strstr(buf, culprit) ) + return 0; + } + ereport(INFO, ( + errmsg_internal(cap_format, lastlive, lastcap), + errdetail_internal("%s", detail), + errhint( + "To pinpoint location, set a breakpoint on this ereport " + "and follow stacktrace to a functionExit(), its caller " + "(a JNI method), and the immediate caller of that."))); + if ( nostack == detail ) + continue; + return 0; + + case VFP_ATE_AT: + if ( 0 == strncmp(buf, at_prefix, sizeof at_prefix - 1) ) + return 0; /* remain in ATE_AT state */ + if ( 0 != strncmp(buf, locked_prefix, sizeof locked_prefix - 1) ) + continue; + state = VFP_ATE_LOCKED; + return 0; + + case VFP_ATE_LOCKED: + if ( 0 != strncmp(buf, class_prefix, sizeof class_prefix - 1) ) + continue; + state = VFP_ATE_AT; + return 0; + } + break; + } + /* Trim off trailing newline and other whitespace. */ ep = bp + strlen(bp) - 1; @@ -714,30 +1283,32 @@ static void appendPathParts(const char* path, StringInfoData* bld, HashMap uniqu for (;;) { char* pathPart; + char* sep; size_t len; + if(*path == 0) break; - len = strcspn(path, ";:"); + sep = first_path_var_separator(path); - if(len == 1 && *(path+1) == ':' && isalnum(*path)) - /* - * Windows drive designator, leave it "as is". - */ - len = strcspn(path+2, ";:") + 2; - else - if(len == 0) - { + if(sep == path) + { /* Ignore zero length components. */ ++path; continue; - } + } + + if ( NULL == sep ) + len = strlen(path); + else + len = sep - path; initStringInfo(&buf); if(*path == '$') { - if(len == 7 || (strcspn(path, "/\\") == 7 && strncmp(path, "$libdir", 7) == 0)) + if( (len == 7 || first_dir_separator(path) == path + 7) + && strncmp(path, "$libdir", 7) == 0) { char pathbuf[MAXPGPATH]; get_pkglib_path(my_exec_path, pathbuf); @@ -748,7 +1319,8 @@ static void appendPathParts(const char* path, StringInfoData* bld, HashMap uniqu else ereport(ERROR, ( errcode(ERRCODE_INVALID_NAME), - errmsg("invalid macro name '%*s' in PL/Java classpath", (int)len, path))); + errmsg("invalid macro name '%*s' in PL/Java module path", + (int)len, path))); } if(len > 0) @@ -763,58 +1335,29 @@ static void appendPathParts(const char* path, StringInfoData* bld, HashMap uniqu if(HashMap_size(unique) == 0) appendStringInfo(bld, "%s", prefix); else -#if defined(WIN32) - appendStringInfoChar(bld, ';'); -#else - appendStringInfoChar(bld, ':'); -#endif + appendStringInfoChar(bld, s_path_var_sep); appendStringInfo(bld, "%s", pathPart); HashMap_putByString(unique, pathPart, (void*)1); } pfree(pathPart); if(*path == 0) break; - ++path; /* Skip ':' */ + ++path; /* Skip path var separator */ } } /* - * Get the CLASSPATH. Result is always freshly palloc'd. + * Get the module path. Result is always freshly palloc'd. + * No longer relies on an environment variable. What CLASSPATH variable might + * happen to be randomly set in the environment of a PostgreSQL backend? */ -static char* getClassPath(const char* prefix) +static char* getModulePath(const char* prefix) { char* path; HashMap unique = HashMap_create(13, CurrentMemoryContext); StringInfoData buf; initStringInfo(&buf); - - /* Put the pljava installed in the $libdir first in the path */ - appendPathParts("$libdir/java/pljava.jar", &buf, unique, prefix); - -#if 0 - /* - * Currently pljava.classpath is user setable, which makes this a - * security problem. If CLASSPATH needs to be setable beyond simply - * locating the pljava.jar file then this requires modification. - * - * The Greenplum version of pljava currently uses the classpath guc - * differently anyhow due to differences in storing the jar files - * in the filesystem rather than in the database. - */ - appendPathParts(pljava_classpath, &buf, unique, prefix); - - /* - * For this to be useful it needs to be propagated from the - * master to all the segments, otherwise it wouldn't be the - * same everyplace and that would be a problem. - * - * Using a jvm_classpath GUC makes more architectural sense, - * for it to be secure it would need to be super-user only, - * possibly conf file only. - */ - appendPathParts(getenv("CLASSPATH"), &buf, unique, prefix); -#endif - + appendPathParts(modulepath, &buf, unique, prefix); PgObject_free((PgObject)unique); path = buf.data; if(strlen(path) == 0) @@ -873,12 +1416,7 @@ static void PLCatchupInterruptHandler(int signo) } static sigjmp_buf recoverBuf; -static void terminationTimeoutHandler( -#if PG_VERSION_NUM >= 90300 -#else - int signum -#endif -) +static void terminationTimeoutHandler() { kill(MyProcPid, SIGQUIT); @@ -902,42 +1440,27 @@ static void _destroyJavaVM(int status, Datum dummy) { Invocation ctx; #ifdef USE_PLJAVA_SIGHANDLERS - -#if PG_VERSION_NUM >= 90300 TimeoutId tid; -#else - pqsigfunc saveSigAlrm; -#endif - Invocation_pushInvocation(&ctx, false); + Invocation_pushBootContext(&ctx); if(sigsetjmp(recoverBuf, 1) != 0) { elog(DEBUG2, "needed to forcibly shut down the Java virtual machine"); s_javaVM = 0; + currentInvocation = 0; return; } -#if PG_VERSION_NUM >= 90300 - InitializeTimeouts(); /* establishes SIGALRM handler */ tid = RegisterTimeout(USER_TIMEOUT, terminationTimeoutHandler); -#else - saveSigAlrm = pqsignal(SIGALRM, terminationTimeoutHandler); - enable_sig_alarm(5000, false); -#endif + enable_timeout_after(tid, 5000); elog(DEBUG2, "shutting down the Java virtual machine"); JNI_destroyVM(s_javaVM); -#if PG_VERSION_NUM >= 90300 disable_timeout(tid, false); #else - disable_sig_alarm(false); - pqsignal(SIGALRM, saveSigAlrm); -#endif - -#else - Invocation_pushInvocation(&ctx, false); + Invocation_pushBootContext(&ctx); elog(DEBUG2, "shutting down the Java virtual machine"); JNI_destroyVM(s_javaVM); #endif @@ -1004,6 +1527,9 @@ static void JVMOptList_add(JVMOptList* jol, const char* optString, void* extraIn if ( 0 == strncmp(optString, visualVMprefix, sizeof visualVMprefix - 1) ) seenVisualVMName = true; + if ( 0 == strncmp(optString, moduleMainPrefix, sizeof moduleMainPrefix-1) ) + seenModuleMain = true; + elog(DEBUG2, "Added JVM option string \"%s\"", optString); } @@ -1021,6 +1547,15 @@ static void JVMOptList_addVisualVMName(JVMOptList* jol) JVMOptList_add(jol, buf.data, 0, false); } +static void JVMOptList_addModuleMain(JVMOptList* jol) +{ + StringInfoData buf; + initStringInfo(&buf); + appendStringInfo(&buf, "%s%s", + moduleMainPrefix, "org.postgresql.pljava"); + JVMOptList_add(jol, buf.data, 0, false); +} + /* Split JVM options. The string is split on whitespace unless the * whitespace is found within a string or is escaped by backslash. A * backslash escaped quote is not considered a string delimiter. @@ -1102,8 +1637,8 @@ static void addUserJVMOptions(JVMOptList* optList) static void initJavaSession(void) { jclass sessionClass = PgObject_getJavaClass("org/postgresql/pljava/internal/Session"); - jmethodID init = PgObject_getStaticJavaMethod(sessionClass, "init", "()J"); - mainThreadId = JNI_callStaticLongMethod(sessionClass, init); + jmethodID init = PgObject_getStaticJavaMethod(sessionClass, "init", "()V"); + JNI_callStaticVoidMethod(sessionClass, init); JNI_deleteLocalRef(sessionClass); if(JNI_exceptionCheck()) @@ -1116,6 +1651,7 @@ static void initJavaSession(void) } } +#if PG_VERSION_NUM < 100000 static void checkIntTimeType(void) { const char* idt = PG_GETCONFIGOPTION("integer_datetimes"); @@ -1123,6 +1659,7 @@ static void checkIntTimeType(void) integerDateTimes = (strcmp(idt, "on") == 0); elog(DEBUG2, integerDateTimes ? "Using integer_datetimes" : "Not using integer_datetimes"); } +#endif static char *get_jni_errmsg(jint jnicode) { @@ -1152,7 +1689,7 @@ static jint initializeJavaVM(JVMOptList *optList) vm_args.nOptions = optList->size; vm_args.options = optList->options; - vm_args.version = JNI_VERSION_1_4; + vm_args.version = JNI_VERSION_9; vm_args.ignoreUnrecognized = JNI_FALSE; elog(DEBUG2, "creating Java virtual machine"); @@ -1170,31 +1707,230 @@ static jint initializeJavaVM(JVMOptList *optList) return jstat; } -static Datum internalCallHandler(bool trusted, PG_FUNCTION_ARGS); - -/* -* this is for backward compatibility with 4.x versions as -* we have pljavau_call_handler in the pg_pltemplate -*/ +#define GUCBOOTVAL(v) (v), +#define GUCBOOTASSIGN(a, v) +#define GUCFLAGS(f) (f), +#define GUCCHECK(h) (h), + +#define BOOL_GUC(name, short_desc, long_desc, valueAddr, bootValue, context, \ + flags, check_hook, assign_hook, show_hook) \ + GUCBOOTASSIGN((valueAddr), (bootValue)) \ + DefineCustomBoolVariable((name), (short_desc), (long_desc), (valueAddr), \ + GUCBOOTVAL(bootValue) (context), GUCFLAGS(flags) GUCCHECK(check_hook) \ + (assign_hook), (show_hook)) + +#define INT_GUC(name, short_desc, long_desc, valueAddr, bootValue, minValue, \ + maxValue, context, flags, check_hook, assign_hook, show_hook) \ + GUCBOOTASSIGN((valueAddr), (bootValue)) \ + DefineCustomIntVariable((name), (short_desc), (long_desc), (valueAddr), \ + GUCBOOTVAL(bootValue) (minValue), (maxValue), (context), \ + GUCFLAGS(flags) GUCCHECK(check_hook) (assign_hook), (show_hook)) + +#define STRING_GUC(name, short_desc, long_desc, valueAddr, bootValue, context, \ + flags, check_hook, assign_hook, show_hook) \ + GUCBOOTASSIGN((char const **)(valueAddr), (bootValue)) \ + DefineCustomStringVariable((name), (short_desc), (long_desc), (valueAddr), \ + GUCBOOTVAL(bootValue) (context), GUCFLAGS(flags) GUCCHECK(check_hook) \ + (assign_hook), (show_hook)) + +#define ENUM_GUC(name, short_desc, long_desc, valueAddr, bootValue, options, \ + context, flags, check_hook, assign_hook, show_hook) \ + GUCBOOTASSIGN((valueAddr), (bootValue)) \ + DefineCustomEnumVariable((name), (short_desc), (long_desc), (valueAddr), \ + GUCBOOTVAL(bootValue) (options), (context), GUCFLAGS(flags) \ + GUCCHECK(check_hook) (assign_hook), (show_hook)) + +#ifndef PLJAVA_LIBJVMDEFAULT +#define PLJAVA_LIBJVMDEFAULT "libjvm" +#endif -extern PLJAVADLLEXPORT Datum pljavau_call_handler(PG_FUNCTION_ARGS); -PG_FUNCTION_INFO_V1(pljavau_call_handler); -Datum pljavau_call_handler(PG_FUNCTION_ARGS) -{ - return internalCallHandler(false, fcinfo); -} +#define PLJAVA_ENABLE_DEFAULT true -extern PLJAVADLLEXPORT Datum pljava_call_handler(PG_FUNCTION_ARGS); -PG_FUNCTION_INFO_V1(pljava_call_handler); +#if PG_VERSION_NUM < 110000 +#define PLJAVA_IMPLEMENTOR_FLAGS GUC_LIST_INPUT | GUC_LIST_QUOTE +#else +#define PLJAVA_IMPLEMENTOR_FLAGS GUC_LIST_INPUT +#endif -/* - * This is the entry point for all trusted calls. - */ -Datum pljava_call_handler(PG_FUNCTION_ARGS) +static void registerGUCOptions(void) { - return internalCallHandler(true, fcinfo); + static char pathbuf[MAXPGPATH]; + + STRING_GUC( + "pljava.libjvm_location", + "Path to the libjvm (.so, .dll, etc.) file in Java's jre/lib area", + NULL, /* extended description */ + &libjvmlocation, + PLJAVA_LIBJVMDEFAULT, + PGC_SUSET, + GUC_SUPERUSER_ONLY, /* flags */ + check_libjvm_location, + assign_libjvm_location, + NULL); /* show hook */ + + STRING_GUC( + "pljava.vmoptions", + "Options sent to the JVM when it is created", + NULL, /* extended description */ + &vmoptions, + NULL, /* boot value */ + PGC_SUSET, + GUC_SUPERUSER_ONLY, /* flags */ + check_vmoptions, + assign_vmoptions, + NULL); /* show hook */ + + STRING_GUC( + "pljava.module_path", + "Module path to be used by the JVM", + NULL, /* extended description */ + &modulepath, + InstallHelper_defaultModulePath(pathbuf,s_path_var_sep),/* boot value */ + PGC_SUSET, + GUC_SUPERUSER_ONLY, /* flags */ + check_modulepath, + assign_modulepath, + NULL); /* show hook */ + + STRING_GUC( + policyUrlsGUC, + "URLs to Java security policy file(s) for PL/Java's use", + "Quote each URL and separate with commas. Any URL may begin (inside " + "the quotes) with n= where n is the index of the Java " + "policy.url.n property to set. If not specified, the first will " + "become policy.url.2 (following the JRE-installed policy) with " + "subsequent entries following in sequence. The last entry may be a " + "bare = (still quoted) to prevent use of any higher-numbered policy " + "URLs from the java.security file.", + &policy_urls, + "\"file:${org.postgresql.sysconfdir}/pljava.policy\",\"=\"", + PGC_SUSET, + PLJAVA_IMPLEMENTOR_FLAGS | GUC_SUPERUSER_ONLY, + check_policy_urls, /* check hook */ + assign_policy_urls, + NULL); /* show hook */ + + STRING_GUC( + unenforcedGUC, + "Which PL/Java-based PLs may execute without security enforcement", + "List the language names (such as javau) separated by commas. When " + "PL/Java is loaded with -Djava.security.manager=disallow (as is " + "needed on Java 24 and later), only functions in the languages named " + "here can be executed.", + &allow_unenforced, + NULL, /* boot value */ + PGC_SUSET, + PLJAVA_IMPLEMENTOR_FLAGS | GUC_SUPERUSER_ONLY, + NULL, /* check hook */ + assign_allow_unenforced, + NULL); /* show hook */ + + BOOL_GUC( + "pljava.debug", + "Stop the backend to attach a debugger", + NULL, /* extended description */ + &pljavaDebug, + false, /* boot value */ + PGC_USERSET, + 0, /* flags */ + NULL, /* check hook */ + NULL, NULL); /* assign hook, show hook */ + + INT_GUC( + "pljava.statement_cache_size", + "Size of the prepared statement MRU cache", + NULL, /* extended description */ + &statementCacheSize, + 11, /* boot value */ + 0, 512, /* min, max values */ + PGC_USERSET, + 0, /* flags */ + NULL, /* check hook */ + NULL, NULL); /* assign hook, show hook */ + + BOOL_GUC( + "pljava.release_lingering_savepoints", + "If true, lingering savepoints will be released on function exit. " + "If false, they will be rolled back", + NULL, /* extended description */ + &pljavaReleaseLingeringSavepoints, + false, /* boot value */ + PGC_USERSET, + 0, /* flags */ + NULL, /* check hook */ + NULL, NULL); /* assign hook, show hook */ + + BOOL_GUC( + "pljava.enable", + "If off, the Java virtual machine will not be started until set on.", + "This is mostly of use on PostgreSQL versions < 9.2, where option " + "settings changed before LOADing PL/Java may be rejected, so they must " + "be made after LOAD, but before the virtual machine is started.", + &pljavaEnabled, + PLJAVA_ENABLE_DEFAULT, /* boot value */ + PGC_USERSET, + 0, /* flags */ + check_enabled, /* check hook */ + assign_enabled, + NULL); /* show hook */ + + BOOL_GUC( + "pljava.allow_unenforced_udt", + "Whether PL/Java-based \"mapped UDT\" data conversion functions are " + "allowed to execute without security enforcement", + NULL, /* extended description */ + &allow_unenforced_udt, + false, /* boot value */ + PGC_SUSET, + GUC_SUPERUSER_ONLY, /* flags */ + check_allow_unenforced_udt, /* check hook */ + NULL, NULL); /* assign hook, show hook */ + + STRING_GUC( + "pljava.implementors", + "Implementor names recognized in deployment descriptors", + NULL, /* extended description */ + &implementors, + "postgresql", /* boot value */ + PGC_USERSET, + PLJAVA_IMPLEMENTOR_FLAGS, + NULL, /* check hook */ + NULL, NULL); /* assign hook, show hook */ + + ENUM_GUC( + "pljava.java_thread_pg_entry", + "Policy for entry to PG code by Java threads other than the main one", + "If 'allow', any Java thread can enter PG while the main thread has " + "entered Java. If 'error', any thread other than the main one will " + "incur an exception if it tries to enter PG. If 'block', the main " + "thread will never release its lock, so any other thread that tries " + "to enter PG will indefinitely block. If 'throw', like 'error', other " + "threads will incur an exception, but earlier: it will be thrown " + "in Java, before the JNI boundary into C is even crossed.", + &java_thread_pg_entry, + ENUMBOOTVAL(java_thread_pg_entry_options[0]), /* allow */ + java_thread_pg_entry_options, + PGC_USERSET, + 0, /* flags */ + check_java_thread_pg_entry, /* check hook */ + assign_java_thread_pg_entry, + NULL); /* display hook */ + + EmitWarningsOnPlaceholders("pljava"); } +#undef GUCBOOTVAL +#undef GUCBOOTASSIGN +#undef GUCFLAGS +#undef GUCCHECK +#undef BOOL_GUC +#undef INT_GUC +#undef STRING_GUC +#undef ENUM_GUC +#undef PLJAVA_ENABLE_DEFAULT +#undef PLJAVA_IMPLEMENTOR_FLAGS + +static inline Datum internalCallHandler(bool trusted, PG_FUNCTION_ARGS); extern PLJAVADLLEXPORT Datum javau_call_handler(PG_FUNCTION_ARGS); PG_FUNCTION_INFO_V1(javau_call_handler); @@ -1218,10 +1954,13 @@ Datum java_call_handler(PG_FUNCTION_ARGS) return internalCallHandler(true, fcinfo); } -static Datum internalCallHandler(bool trusted, PG_FUNCTION_ARGS) +static inline Datum +internalCallHandler(bool trusted, PG_FUNCTION_ARGS) { Invocation ctx; Datum retval = 0; + Oid funcoid = fcinfo->flinfo->fn_oid; + bool forTrigger = CALLED_AS_TRIGGER(fcinfo); #ifdef USE_PLJAVA_SIGHANDLERS /* @@ -1246,42 +1985,200 @@ static Datum internalCallHandler(bool trusted, PG_FUNCTION_ARGS) * It's cheap, and can be followed back to the right language and * handler function entries later if needed. */ - *(trusted ? &pljavaTrustedOid : &pljavaUntrustedOid) - = fcinfo->flinfo->fn_oid; + *(trusted ? &pljavaTrustedOid : &pljavaUntrustedOid) = funcoid; if ( IS_COMPLETE != initstage ) { + deferInit = false; initsequencer( initstage, false); - - /* Force initial setting - */ - s_currentTrust = !trusted; } - Invocation_pushInvocation(&ctx, trusted); + Invocation_pushInvocation(&ctx); PG_TRY(); { - Function function = Function_getFunction(fcinfo); - if(CALLED_AS_TRIGGER(fcinfo)) + retval = Function_invoke( + funcoid, trusted, forTrigger, false, true, fcinfo); + Invocation_popInvocation(false); + } + PG_CATCH(); + { + Invocation_popInvocation(true); + PG_RE_THROW(); + } + PG_END_TRY(); + return retval; +} + +static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS); + +extern PLJAVADLLEXPORT Datum javau_validator(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(javau_validator); + +Datum javau_validator(PG_FUNCTION_ARGS) +{ + return internalValidator(false, fcinfo); +} + +extern PLJAVADLLEXPORT Datum java_validator(PG_FUNCTION_ARGS); +PG_FUNCTION_INFO_V1(java_validator); + +Datum java_validator(PG_FUNCTION_ARGS) +{ + return internalValidator(true, fcinfo); +} + +static Datum internalValidator(bool trusted, PG_FUNCTION_ARGS) +{ + Oid funcoid = PG_GETARG_OID(0); + Invocation ctx; + Oid *oidSaveLocation = NULL; + +#ifdef GP_VERSION_NUM + /* QEs should not run validator logic that can do DDL. */ + if ( ! IS_QD_OR_SINGLENODE() ) + PG_RETURN_VOID(); +#endif + + bool ok = CheckFunctionValidatorAccess(fcinfo->flinfo->fn_oid, funcoid); + /* + * CheckFunctionValidatorAccess reserves a possible future behavior where + * it returns false and this validator should immediately return. Here we + * abuse that convention slightly by first checking an additional constraint + * on function creation in withoutEnforcing mode. That, arguably, is a check + * that should never be skipped, just like the permission checks made in + * CheckFunctionValidatorAccess itself. + */ + if ( withoutEnforcement && trusted && ! superuser() ) + ereport(ERROR, ( + errmsg( + "trusted PL/Java language restricted to superuser when " + "\"java.security.manager\"=\"disallow\""), + errdetail( + "This PL/Java version enforces security policy using important " + "Java features that upstream Java has disabled as of Java 24, " + "as described in JEP 486. In Java 18 through 23, enforcement is " + "still available, but requires " + "\"-Djava.security.manager=allow\" in \"pljava.vmoptions\". " + "The alternative \"-Djava.security.manager=disallow\" permits " + "use on Java 24 and later, but with no enforcement and no " + "distinction between trusted and untrusted. In this mode, only " + "a superuser may use even a 'trusted' PL/Java language") + )); + if ( ! ok ) + PG_RETURN_VOID(); + + /* + * In the call handler, which could be called heavily, funcoid gets + * unconditionally stored to one of these two locations, rather than + * spending extra cycles deciding whether to store it or not. A validator + * will not be called as heavily, and can afford to check here whether + * an Oid needs to be stored or not. The situation to avoid is where + * funcoid gets stored here, as an Oid from which PL/Java's library path can + * be found, but the function then gets rejected by the validator, leaving + * the stored Oid invalid and useless for that purpose. Therefore, choose + * here whether and where to store it, but store it only within the PG_TRY + * block, and replace with InvalidOid again in the PG_CATCH. + */ + if ( trusted ) + { + if ( InvalidOid == pljavaTrustedOid ) + oidSaveLocation = &pljavaTrustedOid; + } + else + { + if ( InvalidOid == pljavaUntrustedOid ) + oidSaveLocation = &pljavaUntrustedOid; + } + + if ( IS_PLJAVA_INSTALLING > initstage ) + { + if ( check_function_bodies ) /* We're gonna need a JVM */ { - /* Called as a trigger procedure - */ - retval = Function_invokeTrigger(function, fcinfo); + deferInit = false; + initsequencer( initstage, false); } - else + else /* Can try to start one, but if no go, just assume function's ok */ { - /* Called as a function - */ - retval = Function_invoke(function, fcinfo); + initsequencer( initstage, true); + if ( IS_PLJAVA_INSTALLING > initstage ) + { + if ( javaGT11 ) + warnJEP411 = true; + PG_RETURN_VOID(); + } } + } + + Invocation_pushInvocation(&ctx); + PG_TRY(); + { + if ( NULL != oidSaveLocation ) + *oidSaveLocation = funcoid; + + Function_invoke( + funcoid, trusted, false, true, check_function_bodies, NULL); Invocation_popInvocation(false); } PG_CATCH(); { + if ( NULL != oidSaveLocation ) + *oidSaveLocation = InvalidOid; + Invocation_popInvocation(true); PG_RE_THROW(); } PG_END_TRY(); - return retval; + + if ( javaGT11 ) + warnJEP411 = true; + PG_RETURN_VOID(); +} + +/* + * Called at the ends of committing transactions to emit a warning about future + * JEP 411 impacts, at most once per session, if any PL/Java functions were + * declared or redeclared in the transaction, or if PL/Java was installed or + * upgraded. Also called from InstallHelper, if pg_upgrade is happening. + * Yes, this is a bit tangled. The tracking of function declaration happens + * above in the validator handler, and PL/Java installation/upgrade is detected + * in the initsequencer. + */ +void Backend_warnJEP411(bool isCommit) +{ + static bool warningEmitted = false; /* once only per session */ + + if ( ! warnJEP411 || withoutEnforcement || warningEmitted ) + return; + + if ( ! isCommit ) + { + warnJEP411 = false; + return; + } + + warningEmitted = true; + + ereport(javaGE17 ? WARNING : NOTICE, ( + errmsg( + "[JEP 411] migration advisory: Java version 24 and later " + "cannot run PL/Java %s with policy enforcement", SO_VERSION_STRING), + errdetail( + "This PL/Java version enforces security policy using important " + "Java features that upstream Java has disabled as of Java 24, " + "as described in JEP 486. In Java 18 through 23, enforcement is " + "still available, but requires " + "\"-Djava.security.manager=allow\" in \"pljava.vmoptions\". "), + errhint( + "For migration planning, this version of PL/Java can still " + "enforce policy in Java versions up to and including 23, " + "and Java 17 and 21 are positioned as long-term support releases. " + "Java 24 and later can be used, if wanted, WITH ABSOLUTELY NO " + "EXPECTATIONS OF SECURITY POLICY ENFORCEMENT, by adding " + "\"-Djava.security.manager=disallow\" in \"pljava.vmoptions\". " + "This mode should be considered only if all Java code to be used " + "is considered well vetted and trusted. " + "For details on how PL/Java will adapt, please bookmark " + "https://github.com/tada/pljava/wiki/JEP-411") + )); } /**************************************** @@ -1289,7 +2186,7 @@ static Datum internalCallHandler(bool trusted, PG_FUNCTION_ARGS) ****************************************/ JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void* reserved) { - return JNI_VERSION_1_4; + return JNI_VERSION_9; } /* @@ -1308,7 +2205,22 @@ JNICALL Java_org_postgresql_pljava_internal_Backend__1getConfigOption(JNIEnv* en { PG_TRY(); { - const char *value = PG_GETCONFIGOPTION(key); + const char *value; + if ( 0 != strncmp(policyUrlsGUC, key, 7) ) + goto fallback; + if ( 0 == strcmp(policyUrlsGUC+7, key+7) ) + { + value = policy_urls; + goto finish; + } + if ( 0 == strcmp(unenforcedGUC+7, key+7) ) + { + value = allow_unenforced; + goto finish; + } +fallback: + value = PG_GETCONFIGOPTION(key); +finish: pfree(key); if(value != 0) result = String_createJavaStringFromNTS(value); @@ -1411,24 +2323,141 @@ Java_org_postgresql_pljava_internal_Backend__1isCreatingExtension(JNIEnv *env, j /* * Class: org_postgresql_pljava_internal_Backend - * Method: _getLibraryPath + * Method: _allowingUnenforcedUDT + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL +Java_org_postgresql_pljava_internal_Backend__1allowingUnenforcedUDT(JNIEnv *env, jclass cls) +{ + return allow_unenforced_udt; +} + +/* + * Class: org_postgresql_pljava_internal_Backend + * Method: _myLibraryPath * Signature: ()Ljava/lang/String; */ JNIEXPORT jstring JNICALL -JNICALL Java_org_postgresql_pljava_internal_Backend__1getLibraryPath(JNIEnv* env, jclass cls) +Java_org_postgresql_pljava_internal_Backend__1myLibraryPath(JNIEnv *env, jclass cls) { - jstring result = 0; + jstring result = NULL; BEGIN_NATIVE - PG_TRY(); - { - result = String_createJavaStringFromNTS(pkglib_path); - } - PG_CATCH(); + + if ( NULL == pljavaLoadPath ) { - Exception_throw_ERROR("GetLibraryPath"); + Oid funcoid = pljavaTrustedOid; + + if ( InvalidOid == funcoid ) + funcoid = pljavaUntrustedOid; + if ( InvalidOid == funcoid ) + return NULL; + + /* + * Result not needed, but pljavaLoadPath is set as a side effect. + */ + InstallHelper_isPLJavaFunction(funcoid, NULL, NULL); } - PG_END_TRY(); + + if ( NULL != pljavaLoadPath ) + result = String_createJavaStringFromNTS(pljavaLoadPath); + END_NATIVE + return result; } + +/* + * Class: org_postgresql_pljava_internal_Backend + * Method: _pokeJEP411 + * Signature: (Ljava/lang/Class;Ljava/lang/Object;)V + * + * This method is hideously dependent on unexposed JDK internals. But then, + * the fact that it's needed at all is hideous already. Java, any language, + * is classic infrastructure. Other layers, like this, are built atop it, and + * others in turn use those layers. The idea that the language developers would + * arrogate to themselves the act of sending an inappropriately low-level + * message directly to ultimate users, insisting that the stack layers above + * cannot intercept it and notify the higher-level users in terms that fit + * the abstractions meaningful there, leaves an uneasy picture of how + * a development team can begin to lose sight of who is providing what to whom + * and why. + * + * At least as of the time of this writing, System has a CallersHolder class + * holding a map recording classes for which the warning has already been sent. + * Poking the 'caller' class into that map works to suppress the warning. + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_Backend__1pokeJEP411(JNIEnv *env, jclass cls, jclass caller, jobject token) +{ + jclass callersHolder; + jfieldID callers; + jobject map; + jclass mapClass; + jmethodID put; + + BEGIN_NATIVE + + callersHolder = JNI_findClass("java/lang/System$CallersHolder"); + if ( NULL == callersHolder ) + goto failed; + + callers = JNI_getStaticFieldID(callersHolder, "callers", "Ljava/util/Map;"); + if ( NULL == callers ) + goto failed; + + map = JNI_getStaticObjectField(callersHolder, callers); + if ( NULL == map ) + goto failed; + + mapClass = JNI_getObjectClass(map); + put = JNI_getMethodID(mapClass, + "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); + + JNI_callObjectMethodLocked(map, put, caller, token); + goto done; + +failed: + JNI_exceptionClear(); + +done: + END_NATIVE +} + +/* + * Class: org_postgresql_pljava_internal_Backend_EarlyNatives + * Method: _forbidOtherThreads + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL +Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1forbidOtherThreads(JNIEnv *env, jclass cls) +{ + return (java_thread_pg_entry & 4) ? JNI_TRUE : JNI_FALSE; +} + +/* + * Class: org_postgresql_pljava_internal_Backend_EarlyNatives + * Method: _defineClass + * Signature: (Ljava/lang/String;Ljava/lang/ClassLoader;[B)Ljava/lang/Class; + */ +JNIEXPORT jclass JNICALL +Java_org_postgresql_pljava_internal_Backend_00024EarlyNatives__1defineClass(JNIEnv *env, jclass cls, jstring name, jobject loader, jbyteArray image) +{ + const char *utfName; + jbyte *bytes; + jsize nbytes; + jclass newcls; + static bool oneShot = false; + + if ( oneShot ) + return NULL; + oneShot = true; + + utfName = (*env)->GetStringUTFChars(env, name, NULL); + bytes = (*env)->GetByteArrayElements(env, image, NULL); + nbytes = (*env)->GetArrayLength(env, image); + newcls = (*env)->DefineClass(env, utfName, loader, bytes, nbytes); + (*env)->ReleaseByteArrayElements(env, image, bytes, JNI_ABORT); + (*env)->ReleaseStringUTFChars(env, name, utfName); +return newcls; +} diff --git a/pljava-so/src/main/c/DualState.c b/pljava-so/src/main/c/DualState.c new file mode 100644 index 00000000..1732b6e5 --- /dev/null +++ b/pljava-so/src/main/c/DualState.c @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack + */ + +#include "org_postgresql_pljava_internal_DualState_SinglePfree.h" +#include "org_postgresql_pljava_internal_DualState_SingleMemContextDelete.h" +#include "org_postgresql_pljava_internal_DualState_SingleFreeTupleDesc.h" +#include "org_postgresql_pljava_internal_DualState_SingleHeapFreeTuple.h" +#include "org_postgresql_pljava_internal_DualState_SingleFreeErrorData.h" +#include "org_postgresql_pljava_internal_DualState_SingleSPIfreeplan.h" +#include "org_postgresql_pljava_internal_DualState_SingleSPIcursorClose.h" +#include "pljava/DualState.h" + +#include "pljava/Backend.h" +#include "pljava/Exception.h" +#include "pljava/Invocation.h" +#include "pljava/PgObject.h" +#include "pljava/JNICalls.h" +#include "pljava/SPI.h" + +/* + * Includes for objects dependent on DualState, so they can be initialized here. + * (If there's a .c file that has no corresponding .h file because there would + * be only an ..._initialize method in it and nothing else at all, just declare + * its init method here.) + */ +#include "pljava/type/ErrorData.h" +extern void pljava_ExecutionPlan_initialize(void); +#include "pljava/type/Portal.h" +#include "pljava/type/Relation.h" +#include "pljava/type/SingleRowReader.h" +#include "pljava/type/TriggerData.h" +#include "pljava/type/Tuple.h" +#include "pljava/type/TupleDesc.h" +#include "pljava/SQLInputFromTuple.h" +#include "pljava/VarlenaWrapper.h" + +static jclass s_DualState_class; + +static jmethodID s_DualState_resourceOwnerRelease; +static jmethodID s_DualState_cleanEnqueuedInstances; + +static jobject s_DualState_key; + +static void resourceReleaseCB(ResourceReleasePhase phase, + bool isCommit, bool isTopLevel, void *arg); + +/* + * Return a capability that is only expected to be accessible to native code. + */ +jobject pljava_DualState_key(void) +{ + return s_DualState_key; +} + +/* + * Rather than using finalizers (deprecated in recent Java anyway), which can + * increase the number of threads needing to interact with PG, DualState objects + * will be enqueued on a ReferenceQueue when their referents become unreachable, + * and this function should be called from strategically-chosen points in native + * code so the thread already interacting with PG will clean the enqueued items. + */ +void pljava_DualState_cleanEnqueuedInstances(void) +{ + JNI_callStaticVoidMethodLocked(s_DualState_class, + s_DualState_cleanEnqueuedInstances); +} + +/* + * Called when the lifespan/scope of a particular PG resource owner is about to + * expire, to make the associated DualState objects inaccessible from Java. As + * described in DualState.java, the argument will often be a PG ResourceOwner + * (when this function is called by resourceReleaseCB), but pointers to other + * structures can also be used (such a pointer clearly can't be confused with a + * ResourceOwner existing at the same time). In PG 9.5+, it could be a + * MemoryContext, with a MemoryContextCallback established to call this + * function. For items whose scope is limited to a single PL/Java function + * invocation, this can be a pointer to the Invocation. + */ +void pljava_DualState_nativeRelease(void *ro) +{ + /* + * This static assertion does not need to be in every file that uses + * PointerGetJLong, but it should be somewhere once, so here it is. + */ + StaticAssertStmt(sizeof (uintptr_t) <= sizeof (jlong), + "uintptr_t will not fit in jlong on this platform"); + + JNI_callStaticVoidMethodLocked(s_DualState_class, + s_DualState_resourceOwnerRelease, + PointerGetJLong(ro)); +} + +void pljava_DualState_initialize(void) +{ + jclass clazz; + jmethodID ctor; + + JNINativeMethod singlePfreeMethods[] = + { + { + "_pfree", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SinglePfree__1pfree + }, + { 0, 0, 0 } + }; + + JNINativeMethod singleMemContextDeleteMethods[] = + { + { + "_memContextDelete", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleMemContextDelete__1memContextDelete + }, + { 0, 0, 0 } + }; + + JNINativeMethod singleFreeTupleDescMethods[] = + { + { + "_freeTupleDesc", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleFreeTupleDesc__1freeTupleDesc + }, + { 0, 0, 0 } + }; + + JNINativeMethod singleHeapFreeTupleMethods[] = + { + { + "_heapFreeTuple", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleHeapFreeTuple__1heapFreeTuple + }, + { 0, 0, 0 } + }; + + JNINativeMethod singleFreeErrorDataMethods[] = + { + { + "_freeErrorData", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleFreeErrorData__1freeErrorData + }, + { 0, 0, 0 } + }; + + JNINativeMethod singleSPIfreeplanMethods[] = + { + { + "_spiFreePlan", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleSPIfreeplan__1spiFreePlan + }, + { 0, 0, 0 } + }; + + JNINativeMethod singleSPIcursorCloseMethods[] = + { + { + "_spiCursorClose", + "(J)V", + Java_org_postgresql_pljava_internal_DualState_00024SingleSPIcursorClose__1spiCursorClose + }, + { 0, 0, 0 } + }; + + s_DualState_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState")); + s_DualState_resourceOwnerRelease = PgObject_getStaticJavaMethod( + s_DualState_class, "resourceOwnerRelease", "(J)V"); + s_DualState_cleanEnqueuedInstances = PgObject_getStaticJavaMethod( + s_DualState_class, "cleanEnqueuedInstances", "()V"); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$Key"); + ctor = PgObject_getJavaMethod(clazz, "", "()V"); + s_DualState_key = JNI_newGlobalRef(JNI_newObject(clazz, ctor)); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SinglePfree"); + PgObject_registerNatives2(clazz, singlePfreeMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleMemContextDelete"); + PgObject_registerNatives2(clazz, singleMemContextDeleteMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleFreeTupleDesc"); + PgObject_registerNatives2(clazz, singleFreeTupleDescMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleHeapFreeTuple"); + PgObject_registerNatives2(clazz, singleHeapFreeTupleMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleFreeErrorData"); + PgObject_registerNatives2(clazz, singleFreeErrorDataMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleSPIfreeplan"); + PgObject_registerNatives2(clazz, singleSPIfreeplanMethods); + JNI_deleteLocalRef(clazz); + + clazz = (jclass)PgObject_getJavaClass( + "org/postgresql/pljava/internal/DualState$SingleSPIcursorClose"); + PgObject_registerNatives2(clazz, singleSPIcursorCloseMethods); + JNI_deleteLocalRef(clazz); + + RegisterResourceReleaseCallback(resourceReleaseCB, NULL); + + /* + * Call initialize() methods of known classes built upon DualState. + */ + pljava_ErrorData_initialize(); + pljava_ExecutionPlan_initialize(); + pljava_Portal_initialize(); + pljava_Relation_initialize(); + pljava_SingleRowReader_initialize(); + pljava_SQLInputFromTuple_initialize(); + pljava_TriggerData_initialize(); + pljava_TupleDesc_initialize(); + pljava_Tuple_initialize(); + pljava_VarlenaWrapper_initialize(); +} + +void pljava_DualState_unregister(void) +{ + UnregisterResourceReleaseCallback(resourceReleaseCB, NULL); +} + +static void resourceReleaseCB(ResourceReleasePhase phase, + bool isCommit, bool isTopLevel, void *arg) +{ + /* + * The way ResourceOwnerRelease is implemented, callbacks to loadable + * modules (like us!) happen /after/ all of the built-in releasey actions + * for a particular phase. So, by looking for RESOURCE_RELEASE_LOCKS here, + * we actually end up executing after all the built-in lock-related stuff + * has been released, but before any of the built-in stuff released in the + * RESOURCE_RELEASE_AFTER_LOCKS phase. Which, at least for the currently + * implemented DualState subclasses, is about the right time. + */ + if ( RESOURCE_RELEASE_LOCKS != phase ) + return; + + pljava_DualState_nativeRelease(CurrentResourceOwner); + + if ( isTopLevel ) + Backend_warnJEP411(isCommit); +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SinglePfree + * Method: _pfree + * Signature: (J)V + * + * Cadged from JavaWrapper.c + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SinglePfree__1pfree( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + pfree(JLongGet(void *, pointer)); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleMemContextDelete + * Method: _memContextDelete + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleMemContextDelete__1memContextDelete( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + MemoryContextDelete(JLongGet(MemoryContext, pointer)); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleFreeTupleDesc + * Method: _freeTupleDesc + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleFreeTupleDesc__1freeTupleDesc( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + FreeTupleDesc(JLongGet(TupleDesc, pointer)); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleHeapFreeTuple + * Method: _heapFreeTuple + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleHeapFreeTuple__1heapFreeTuple( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + heap_freetuple(JLongGet(HeapTuple, pointer)); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleFreeErrorData + * Method: _freeErrorData + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleFreeErrorData__1freeErrorData( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + FreeErrorData(JLongGet(ErrorData *, pointer)); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleSPIfreeplan + * Method: _spiFreePlan + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleSPIfreeplan__1spiFreePlan( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + PG_TRY(); + { + SPI_freeplan(JLongGet(SPIPlanPtr, pointer)); + } + PG_CATCH(); + { + Exception_throw_ERROR("SPI_freeplan"); + } + PG_END_TRY(); + END_NATIVE +} + + + +/* + * Class: org_postgresql_pljava_internal_DualState_SingleSPIcursorClose + * Method: _spiCursorClose + * Signature: (J)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_DualState_00024SingleSPIcursorClose__1spiCursorClose( + JNIEnv* env, jobject _this, jlong pointer) +{ + BEGIN_NATIVE_NO_ERRCHECK + PG_TRY(); + { + /* + * This code copied from its former location in Portal.c, for reasons + * not really explained there, is different from most of the other + * javaStateReleased actions here, by virtue of being conditional; it + * does nothing if the current Invocation's errorOccurred flag is set, + * or during an end-of-expression-context callback from the executor. + */ + if ( NULL != currentInvocation && ! currentInvocation->errorOccurred + && ! currentInvocation->inExprContextCB ) + SPI_cursor_close(JLongGet(Portal, pointer)); + } + PG_CATCH(); + { + Exception_throw_ERROR("SPI_cursor_close"); + } + PG_END_TRY(); + END_NATIVE +} diff --git a/pljava-so/src/main/c/Exception.c b/pljava-so/src/main/c/Exception.c index f37e9812..a6bfb673 100644 --- a/pljava-so/src/main/c/Exception.c +++ b/pljava-so/src/main/c/Exception.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -19,15 +25,19 @@ jclass Class_class; jmethodID Class_getName; +jmethodID Class_getCanonicalName; jclass ServerException_class; jmethodID ServerException_getErrorData; -jmethodID ServerException_init; +jmethodID ServerException_obtain; jclass Throwable_class; jmethodID Throwable_getMessage; jmethodID Throwable_printStackTrace; +static jclass UnhandledPGException_class; +static jmethodID UnhandledPGException_obtain; + jclass IllegalArgumentException_class; jmethodID IllegalArgumentException_init; @@ -38,8 +48,14 @@ jmethodID SQLException_getSQLState; jclass UnsupportedOperationException_class; jmethodID UnsupportedOperationException_init; +jclass NoSuchFieldError_class; jclass NoSuchMethodError_class; +bool Exception_isPGUnhandled(jthrowable ex) +{ + return JNI_isInstanceOf(ex, UnhandledPGException_class); +} + void Exception_featureNotSupported(const char* requestedFeature, const char* introVersion) { @@ -53,7 +69,13 @@ Exception_featureNotSupported(const char* requestedFeature, const char* introVer appendStringInfoString(&buf, requestedFeature); appendStringInfoString(&buf, " lacks support in PostgreSQL version "); appendStringInfo(&buf, "%d.%d", - PG_VERSION_NUM / 10000, (PG_VERSION_NUM / 100) % 100); + PG_VERSION_NUM / 10000, +#if PG_VERSION_NUM >= 100000 + (PG_VERSION_NUM) % 10000 +#else + (PG_VERSION_NUM / 100) % 100 +#endif + ); appendStringInfoString(&buf, ". It was introduced in version "); appendStringInfoString(&buf, introVersion); @@ -149,17 +171,34 @@ void Exception_throwSPI(const char* function, int errCode) SPI_result_code_string(errCode)); } +void Exception_throw_unhandled() +{ + jobject ex; + PG_TRY(); + { + ex = JNI_callStaticObjectMethodLocked( + UnhandledPGException_class, UnhandledPGException_obtain); + JNI_throw(ex); + } + PG_CATCH(); + { + elog(WARNING, "Exception while generating exception"); + } + PG_END_TRY(); +} + void Exception_throw_ERROR(const char* funcName) { jobject ex; PG_TRY(); { - jobject ed = ErrorData_getCurrentError(); + jobject ed = pljava_ErrorData_getCurrentError(); FlushErrorState(); - ex = JNI_newObject(ServerException_class, ServerException_init, ed); - currentInvocation->errorOccured = true; + ex = JNI_callStaticObjectMethodLocked( + ServerException_class, ServerException_obtain, ed); + currentInvocation->errorOccurred = true; elog(DEBUG2, "Exception in function %s", funcName); @@ -192,16 +231,29 @@ void Exception_initialize(void) UnsupportedOperationException_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass("java/lang/UnsupportedOperationException")); UnsupportedOperationException_init = PgObject_getJavaMethod(UnsupportedOperationException_class, "", "(Ljava/lang/String;)V"); + NoSuchFieldError_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass("java/lang/NoSuchFieldError")); NoSuchMethodError_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass("java/lang/NoSuchMethodError")); Class_getName = PgObject_getJavaMethod(Class_class, "getName", "()Ljava/lang/String;"); + Class_getCanonicalName = PgObject_getJavaMethod(Class_class, + "getCanonicalName", "()Ljava/lang/String;"); } extern void Exception_initialize2(void); void Exception_initialize2(void) { ServerException_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/ServerException")); - ServerException_init = PgObject_getJavaMethod(ServerException_class, "", "(Lorg/postgresql/pljava/internal/ErrorData;)V"); + ServerException_obtain = PgObject_getStaticJavaMethod( + ServerException_class, "obtain", + "(Lorg/postgresql/pljava/internal/ErrorData;)" + "Lorg/postgresql/pljava/internal/ServerException;"); ServerException_getErrorData = PgObject_getJavaMethod(ServerException_class, "getErrorData", "()Lorg/postgresql/pljava/internal/ErrorData;"); + + UnhandledPGException_class = (jclass)JNI_newGlobalRef( + PgObject_getJavaClass( + "org/postgresql/pljava/internal/UnhandledPGException")); + UnhandledPGException_obtain = PgObject_getStaticJavaMethod( + UnhandledPGException_class, "obtain", + "()Lorg/postgresql/pljava/internal/UnhandledPGException;"); } diff --git a/pljava-so/src/main/c/ExecutionPlan.c b/pljava-so/src/main/c/ExecutionPlan.c index 6f81a151..4ece3b11 100644 --- a/pljava-so/src/main/c/ExecutionPlan.c +++ b/pljava-so/src/main/c/ExecutionPlan.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -11,6 +17,7 @@ #include #include "org_postgresql_pljava_internal_ExecutionPlan.h" +#include "pljava/DualState.h" #include "pljava/Invocation.h" #include "pljava/Exception.h" #include "pljava/Function.h" @@ -28,16 +35,26 @@ /* Class 07 - Dynamic SQL Error */ #define ERRCODE_PARAMETER_COUNT_MISMATCH MAKE_SQLSTATE('0','7', '0','0','1') +#define SPI_READONLY_DEFAULT \ + org_postgresql_pljava_internal_ExecutionPlan_SPI_READONLY_FORCED +#define SPI_READONLY_FORCED \ + org_postgresql_pljava_internal_ExecutionPlan_SPI_READONLY_FORCED +#define SPI_READONLY_CLEARED \ + org_postgresql_pljava_internal_ExecutionPlan_SPI_READONLY_CLEARED + +static jclass s_ExecutionPlan_class; +static jmethodID s_ExecutionPlan_init; + /* Make this datatype available to the postgres system. */ -extern void ExecutionPlan_initialize(void); -void ExecutionPlan_initialize(void) +extern void pljava_ExecutionPlan_initialize(void); +void pljava_ExecutionPlan_initialize(void) { JNINativeMethod methods[] = { { "_cursorOpen", - "(JJLjava/lang/String;[Ljava/lang/Object;)Lorg/postgresql/pljava/internal/Portal;", + "(JLjava/lang/String;[Ljava/lang/Object;S)Lorg/postgresql/pljava/internal/Portal;", Java_org_postgresql_pljava_internal_ExecutionPlan__1cursorOpen }, { @@ -47,25 +64,28 @@ void ExecutionPlan_initialize(void) }, { "_execute", - "(JJ[Ljava/lang/Object;I)I", + "(J[Ljava/lang/Object;SI)I", Java_org_postgresql_pljava_internal_ExecutionPlan__1execute }, { "_prepare", - "(JLjava/lang/String;[Lorg/postgresql/pljava/internal/Oid;)J", + "(Ljava/lang/Object;Ljava/lang/String;[Lorg/postgresql/pljava/internal/Oid;)Lorg/postgresql/pljava/internal/ExecutionPlan;", Java_org_postgresql_pljava_internal_ExecutionPlan__1prepare }, - { - "_invalidate", - "(J)V", - Java_org_postgresql_pljava_internal_ExecutionPlan__1invalidate - }, { 0, 0, 0 } }; PgObject_registerNatives("org/postgresql/pljava/internal/ExecutionPlan", methods); + + s_ExecutionPlan_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/ExecutionPlan")); + s_ExecutionPlan_init = PgObject_getJavaMethod(s_ExecutionPlan_class, + "", + "(Lorg/postgresql/pljava/internal/DualState$Key;J" + "Ljava/lang/Object;J)V"); } -static bool coerceObjects(void* ePlan, jobjectArray jvalues, Datum** valuesPtr, char** nullsPtr) +static bool coerceObjects( + SPIPlanPtr ePlan, jobjectArray jvalues, Datum** valuesPtr, char** nullsPtr) { char* nulls = 0; Datum* values = 0; @@ -91,7 +111,7 @@ static bool coerceObjects(void* ePlan, jobjectArray jvalues, Datum** valuesPtr, jobject value = JNI_getObjectArrayElement(jvalues, idx); if(value != 0) { - values[idx] = Type_coerceObject(type, value); + values[idx] = Type_coerceObjectBridged(type, value); JNI_deleteLocalRef(value); } else @@ -119,33 +139,37 @@ static bool coerceObjects(void* ePlan, jobjectArray jvalues, Datum** valuesPtr, /* * Class: org_postgresql_pljava_internal_ExecutionPlan * Method: _cursorOpen - * Signature: (JJLjava/lang/String;[Ljava/lang/Object;)Lorg/postgresql/pljava/internal/Portal; + * Signature: (JLjava/lang/String;[Ljava/lang/Object;S)Lorg/postgresql/pljava/internal/Portal; */ JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_internal_ExecutionPlan__1cursorOpen(JNIEnv* env, jclass clazz, jlong _this, jlong threadId, jstring cursorName, jobjectArray jvalues) +Java_org_postgresql_pljava_internal_ExecutionPlan__1cursorOpen(JNIEnv* env, jobject jplan, jlong _this, jstring cursorName, jobjectArray jvalues, jshort readonly_spec) { jobject jportal = 0; if(_this != 0) { BEGIN_NATIVE STACK_BASE_VARS - STACK_BASE_PUSH(threadId) + STACK_BASE_PUSH(env) PG_TRY(); { - Ptr2Long p2l; + SPIPlanPtr plan = JLongGet(SPIPlanPtr, _this); Datum* values = 0; char* nulls = 0; - p2l.longVal = _this; - if(coerceObjects(p2l.ptrVal, jvalues, &values, &nulls)) + if(coerceObjects(plan, jvalues, &values, &nulls)) { Portal portal; char* name = 0; + bool read_only; if(cursorName != 0) name = String_createNTS(cursorName); Invocation_assertConnect(); + if ( SPI_READONLY_DEFAULT == readonly_spec ) + read_only = Function_isCurrentReadOnly(); + else + read_only = (SPI_READONLY_FORCED == readonly_spec); portal = SPI_cursor_open( - name, p2l.ptrVal, values, nulls, Function_isCurrentReadOnly()); + name, plan, values, nulls, read_only); if(name != 0) pfree(name); if(values != 0) @@ -153,7 +177,7 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1cursorOpen(JNIEnv* env, jcla if(nulls != 0) pfree(nulls); - jportal = Portal_create(portal); + jportal = pljava_Portal_create(portal, jplan); } } PG_CATCH(); @@ -182,10 +206,8 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1isCursorPlan(JNIEnv* env, jc BEGIN_NATIVE PG_TRY(); { - Ptr2Long p2l; - p2l.longVal = _this; Invocation_assertConnect(); - result = (jboolean)SPI_is_cursor_plan(p2l.ptrVal); + result = (jboolean)SPI_is_cursor_plan(JLongGet(SPIPlanPtr, _this)); } PG_CATCH(); { @@ -200,28 +222,32 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1isCursorPlan(JNIEnv* env, jc /* * Class: org_postgresql_pljava_internal_ExecutionPlan * Method: _execute - * Signature: (JJ[Ljava/lang/Object;I)V + * Signature: (J[Ljava/lang/Object;SI)V */ JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_ExecutionPlan__1execute(JNIEnv* env, jclass clazz, jlong _this, jlong threadId, jobjectArray jvalues, jint count) +Java_org_postgresql_pljava_internal_ExecutionPlan__1execute(JNIEnv* env, jclass clazz, jlong _this, jobjectArray jvalues, jshort readonly_spec, jint count) { jint result = 0; if(_this != 0) { BEGIN_NATIVE STACK_BASE_VARS - STACK_BASE_PUSH(threadId) + STACK_BASE_PUSH(env) PG_TRY(); { - Ptr2Long p2l; + SPIPlanPtr plan = JLongGet(SPIPlanPtr, _this); Datum* values = 0; char* nulls = 0; - p2l.longVal = _this; - if(coerceObjects(p2l.ptrVal, jvalues, &values, &nulls)) + if(coerceObjects(plan, jvalues, &values, &nulls)) { + bool read_only; Invocation_assertConnect(); + if ( SPI_READONLY_DEFAULT == readonly_spec ) + read_only = Function_isCurrentReadOnly(); + else + read_only = (SPI_READONLY_FORCED == readonly_spec); result = (jint)SPI_execute_plan( - p2l.ptrVal, values, nulls, Function_isCurrentReadOnly(), (int)count); + plan, values, nulls, read_only, (int)count); if(result < 0) Exception_throwSPI("execute_plan", result); @@ -245,19 +271,20 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1execute(JNIEnv* env, jclass /* * Class: org_postgresql_pljava_internal_ExecutionPlan * Method: _prepare - * Signature: (JLjava/lang/String;[Lorg/postgresql/pljava/internal/Oid;)J; + * Signature: (Ljava/lang/Object;Ljava/lang/String;[Lorg/postgresql/pljava/internal/Oid;)Lorg/postgresql/pljava/internal/ExecutionPlan; */ -JNIEXPORT jlong JNICALL -Java_org_postgresql_pljava_internal_ExecutionPlan__1prepare(JNIEnv* env, jclass clazz, jlong threadId, jstring jcmd, jobjectArray paramTypes) +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_ExecutionPlan__1prepare(JNIEnv* env, jclass clazz, jobject key, jstring jcmd, jobjectArray paramTypes) { - jlong result = 0; + jobject result = 0; + int spi_ret; BEGIN_NATIVE STACK_BASE_VARS - STACK_BASE_PUSH(threadId) + STACK_BASE_PUSH(env) PG_TRY(); { char* cmd; - void* ePlan; + SPIPlanPtr ePlan; int paramCount = 0; Oid* paramOids = 0; @@ -286,14 +313,16 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1prepare(JNIEnv* env, jclass Exception_throwSPI("prepare", SPI_result); else { - Ptr2Long p2l; - /* Make the plan durable */ - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = SPI_saveplan(ePlan); - result = p2l.longVal; - SPI_freeplan(ePlan); /* Get rid of the original, nobody can see it anymore */ + spi_ret = SPI_keepplan(ePlan); + if ( 0 != spi_ret ) + Exception_throwSPI("keepplan", spi_ret); + + result = JNI_newObjectLocked( + s_ExecutionPlan_class, s_ExecutionPlan_init, + /* (jlong)0 as resource owner: the saved plan isn't transient */ + pljava_DualState_key(), (jlong)0, key, PointerGetJLong(ePlan)); } } PG_CATCH(); @@ -305,32 +334,3 @@ Java_org_postgresql_pljava_internal_ExecutionPlan__1prepare(JNIEnv* env, jclass END_NATIVE return result; } - -/* - * Class: org_postgresql_pljava_internal_ExecutionPlan - * Method: _invalidate - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_ExecutionPlan__1invalidate(JNIEnv* env, jclass clazz, jlong _this) -{ - /* The plan is not cached as a normal JavaHandle since its made - * persistent. - */ - if(_this != 0) - { - BEGIN_NATIVE_NO_ERRCHECK - PG_TRY(); - { - Ptr2Long p2l; - p2l.longVal = _this; - SPI_freeplan(p2l.ptrVal); - } - PG_CATCH(); - { - Exception_throw_ERROR("SPI_freeplan"); - } - PG_END_TRY(); - END_NATIVE - } -} diff --git a/pljava-so/src/main/c/Function.c b/pljava-so/src/main/c/Function.c index c8f8c495..8a03d65d 100644 --- a/pljava-so/src/main/c/Function.c +++ b/pljava-so/src/main/c/Function.c @@ -1,11 +1,17 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ +#include "org_postgresql_pljava_internal_Function.h" +#include "org_postgresql_pljava_internal_Function_EarlyNatives.h" #include "pljava/PgObject_priv.h" #include "pljava/Exception.h" #include "pljava/InstallHelper.h" @@ -13,6 +19,8 @@ #include "pljava/Function.h" #include "pljava/HashMap.h" #include "pljava/Iterator.h" +#include "pljava/JNICalls.h" +#include "pljava/type/Composite.h" #include "pljava/type/Oid.h" #include "pljava/type/String.h" #include "pljava/type/TriggerData.h" @@ -20,13 +28,20 @@ #include +#include #include #include #include #include #include +#ifndef pg_unreachable #define pg_unreachable() abort() +#endif + +#if PG_VERSION_NUM >= 160000 +#define PG_FUNCNAME_MACRO __func__ +#endif #ifdef _MSC_VER # define strcasecmp _stricmp @@ -35,12 +50,37 @@ #define PARAM_OIDS(procStruct) (procStruct)->proargtypes.values -static jclass s_Loader_class; -static jclass s_ClassLoader_class; -static jmethodID s_Loader_getSchemaLoader; -static jmethodID s_Loader_getTypeMap; -static jmethodID s_ClassLoader_loadClass; +#define COUNTCHECK(refs, prims) ((jshort)(((refs) << 8) | ((prims) & 0xff))) + +jobject pljava_Function_NO_LOADER; + +static jclass s_Function_class; +static jclass s_ParameterFrame_class; +static jclass s_EntryPoints_class; +static jmethodID s_Function_create; +static jmethodID s_Function_getClassIfUDT; +static jmethodID s_Function_udtReadHandle; +static jmethodID s_Function_udtParseHandle; +static jmethodID s_Function_udtWriteHandle; +static jmethodID s_Function_udtToStringHandle; +static jmethodID s_ParameterFrame_push; +static jmethodID s_ParameterFrame_pop; +static jmethodID s_EntryPoints_invoke; +static jmethodID s_EntryPoints_udtWriteInvoke; +static jmethodID s_EntryPoints_udtToStringInvoke; +static jmethodID s_EntryPoints_udtReadInvoke; +static jmethodID s_EntryPoints_udtParseInvoke; static PgObjectClass s_FunctionClass; +static Type s_pgproc_Type; + +static inline Datum invokeTrigger(Function self, PG_FUNCTION_ARGS); + +static jobjectArray s_referenceParameters; +static jvalue s_primitiveParameters [ 1 + 255 ]; + +static jshort * const s_countCheck = + (jshort *)(((char *)s_primitiveParameters) + + org_postgresql_pljava_internal_Function_s_offset_paramCounts); struct Function_ { @@ -64,6 +104,12 @@ struct Function_ */ jclass clazz; + /** + * Global reference to the class loader for the schema in which this + * function is declared. + */ + jobject schemaLoader; + union { struct @@ -75,9 +121,14 @@ struct Function_ bool isMultiCall; /* - * The number of parameters + * The number of reference parameters + */ + uint16 numRefParams; + + /* + * The number of primitive parameters */ - int32 numParams; + uint16 numPrimParams; /* * Array containing one type for eeach parameter. @@ -92,14 +143,17 @@ struct Function_ /* * The type map used when mapping parameter and return types. We * need to store it here in order to cope with dynamic types (any - * and anyarray) + * and anyarray). This is now slightly redundant, as it could be got + * from schemaLoader at the cost of a couple JNI calls, but this was + * here first. */ jobject typeMap; - /* - * The static method that should be called. + /** + * EntryPoints.Invocable to the resolved Java method implementing + * the function. */ - jmethodID method; + jobject invocable; } nonudt; struct @@ -125,29 +179,16 @@ static struct Function_ s_initWriter; Function Function_INIT_WRITER = &s_initWriter; -typedef struct ParseResultData -{ - char* buffer; /* The buffer to pfree once we are done */ - const char* returnType; - const char* className; - const char* methodName; - const char* parameters; - bool isUDT; -} ParseResultData; - -typedef ParseResultData *ParseResult; - static HashMap s_funcMap = 0; -static jclass s_Loader_class; -static jmethodID s_Loader_getSchemaLoader; - static void _Function_finalize(PgObject func) { Function self = (Function)func; JNI_deleteGlobalRef(self->clazz); + JNI_deleteGlobalRef(self->schemaLoader); if(!self->isUDT) { + JNI_deleteGlobalRef(self->func.nonudt.invocable); if(self->func.nonudt.typeMap != 0) JNI_deleteGlobalRef(self->func.nonudt.typeMap); if(self->func.nonudt.paramTypes != 0) @@ -158,287 +199,368 @@ static void _Function_finalize(PgObject func) extern void Function_initialize(void); void Function_initialize(void) { + JNINativeMethod earlyMethods[] = + { + { + "_parameterArea", + "([Ljava/lang/Object;)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_internal_Function_00024EarlyNatives__1parameterArea + }, + { 0, 0, 0 } + }; + + JNINativeMethod functionMethods[] = + { + { + "_storeToNonUDT", + "(JLjava/lang/ClassLoader;Ljava/lang/Class;ZZLjava/util/Map;IILjava/lang/String;[I[Ljava/lang/String;[Ljava/lang/String;)Z", + Java_org_postgresql_pljava_internal_Function__1storeToNonUDT + }, + { + "_storeToUDT", + "(JLjava/lang/ClassLoader;Ljava/lang/Class;ZII" + ")V", + Java_org_postgresql_pljava_internal_Function__1storeToUDT + }, + { + "_reconcileTypes", + "(J[Ljava/lang/String;[Ljava/lang/String;I)V", + Java_org_postgresql_pljava_internal_Function__1reconcileTypes + }, + { 0, 0, 0 } + }; + + jclass cls; + jfieldID fld; + + StaticAssertStmt(org_postgresql_pljava_internal_Function_s_sizeof_jvalue + == sizeof (jvalue), "Function.java has wrong size for Java JNI jvalue"); + s_funcMap = HashMap_create(59, TopMemoryContext); - - s_Loader_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/sqlj/Loader")); - s_Loader_getSchemaLoader = PgObject_getStaticJavaMethod(s_Loader_class, "getSchemaLoader", "(Ljava/lang/String;)Ljava/lang/ClassLoader;"); - s_Loader_getTypeMap = PgObject_getStaticJavaMethod(s_Loader_class, "getTypeMap", "(Ljava/lang/String;)Ljava/util/Map;"); - s_ClassLoader_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/ClassLoader")); - s_ClassLoader_loadClass = PgObject_getJavaMethod(s_ClassLoader_class, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;"); + cls = PgObject_getJavaClass( + "org/postgresql/pljava/internal/Function$EarlyNatives"); + PgObject_registerNatives2(cls, earlyMethods); + JNI_deleteLocalRef(cls); + + s_ParameterFrame_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/Function$ParameterFrame")); + s_ParameterFrame_push = PgObject_getStaticJavaMethod(s_ParameterFrame_class, + "push", "()V"); + s_ParameterFrame_pop = PgObject_getStaticJavaMethod(s_ParameterFrame_class, + "pop", "()V"); + + s_Function_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/Function")); + s_Function_create = PgObject_getStaticJavaMethod(s_Function_class, "create", + "(JLjava/sql/ResultSet;Ljava/lang/String;Ljava/lang/String;ZZZZ)" + "Lorg/postgresql/pljava/internal/EntryPoints$Invocable;"); + s_Function_getClassIfUDT = PgObject_getStaticJavaMethod(s_Function_class, + "getClassIfUDT", + "(Ljava/sql/ResultSet;Ljava/lang/String;)" + "Ljava/lang/Class;"); + + s_EntryPoints_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/EntryPoints")); + s_EntryPoints_invoke = PgObject_getStaticJavaMethod( + s_EntryPoints_class, + "invoke", + "(Lorg/postgresql/pljava/internal/EntryPoints$Invocable;)" + "Ljava/lang/Object;"); + + s_EntryPoints_udtWriteInvoke = PgObject_getStaticJavaMethod( + s_EntryPoints_class, + "udtWriteInvoke", + "(Lorg/postgresql/pljava/internal/EntryPoints$Invocable;" + "Ljava/sql/SQLData;Ljava/sql/SQLOutput;" + ")V"); + s_EntryPoints_udtToStringInvoke = PgObject_getStaticJavaMethod( + s_EntryPoints_class, + "udtToStringInvoke", + "(Lorg/postgresql/pljava/internal/EntryPoints$Invocable;" + "Ljava/sql/SQLData;)Ljava/lang/String;"); + s_EntryPoints_udtReadInvoke = PgObject_getStaticJavaMethod( + s_EntryPoints_class, + "udtReadInvoke", + "(Lorg/postgresql/pljava/internal/EntryPoints$Invocable;" + "Ljava/sql/SQLInput;" + "Ljava/lang/String;)Ljava/sql/SQLData;"); + s_EntryPoints_udtParseInvoke = PgObject_getStaticJavaMethod( + s_EntryPoints_class, + "udtParseInvoke", + "(Lorg/postgresql/pljava/internal/EntryPoints$Invocable;" + "Ljava/lang/String;" + "Ljava/lang/String;)Ljava/sql/SQLData;"); + + s_Function_udtReadHandle = PgObject_getStaticJavaMethod(s_Function_class, + "udtReadHandle", "(Ljava/lang/Class;Ljava/lang/String;Z)" + "Lorg/postgresql/pljava/internal/EntryPoints$Invocable;"); + s_Function_udtParseHandle = PgObject_getStaticJavaMethod(s_Function_class, + "udtParseHandle", "(Ljava/lang/Class;Ljava/lang/String;Z)" + "Lorg/postgresql/pljava/internal/EntryPoints$Invocable;"); + s_Function_udtWriteHandle = PgObject_getStaticJavaMethod(s_Function_class, + "udtWriteHandle", "(Ljava/lang/Class;Ljava/lang/String;Z)" + "Lorg/postgresql/pljava/internal/EntryPoints$Invocable;"); + s_Function_udtToStringHandle = + PgObject_getStaticJavaMethod(s_Function_class, + "udtToStringHandle", "(Ljava/lang/Class;Ljava/lang/String;Z)" + "Lorg/postgresql/pljava/internal/EntryPoints$Invocable;"); + + PgObject_registerNatives2(s_Function_class, functionMethods); + + cls = PgObject_getJavaClass("org/postgresql/pljava/sqlj/Loader"); + fld = PgObject_getStaticJavaField(cls, + "SENTINEL", "Ljava/lang/ClassLoader;"); + pljava_Function_NO_LOADER = + JNI_newGlobalRef(JNI_getStaticObjectField(cls, fld)); + JNI_deleteLocalRef(cls); s_FunctionClass = PgObjectClass_create("Function", sizeof(struct Function_), _Function_finalize); + + s_pgproc_Type = Composite_obtain(ProcedureRelation_Rowtype_Id); } -static void buildSignature(Function self, StringInfo sign, Type retType, bool alt) +jobject pljava_Function_refInvoke(Function self) { - Type* tp = self->func.nonudt.paramTypes; - Type* ep = tp + self->func.nonudt.numParams; - - appendStringInfoChar(sign, '('); - while(tp < ep) - appendStringInfoString(sign, Type_getJNISignature(*tp++)); - - if(!self->func.nonudt.isMultiCall && Type_isOutParameter(retType)) - appendStringInfoString(sign, Type_getJNISignature(retType)); - - appendStringInfoChar(sign, ')'); - appendStringInfoString(sign, Type_getJNIReturnSignature(retType, self->func.nonudt.isMultiCall, alt)); + return JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); } -static void parseParameters(Function self, Oid* dfltIds, const char* paramDecl) +void pljava_Function_voidInvoke(Function self) { - char c; - int idx = 0; - int top = self->func.nonudt.numParams; - bool lastIsOut = !self->func.nonudt.isMultiCall - && Type_isOutParameter(self->func.nonudt.returnType); - StringInfoData sign; - Type deflt; - const char* jtName; - bool gotone = false; - for( ; ; ++ paramDecl ) - { - c = *paramDecl; - /* all whitespace has already been stripped by getAS() */ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); +} - if ( '\0' != c && ',' != c ) - { - if ( ! gotone ) /* first character of a param type has been seen. */ - { - if(idx >= top) - { - if(!(lastIsOut && idx == top)) - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("AS (Java): expected %d parameter types, " - "found more", top))); - } - gotone = true; - initStringInfo(&sign); - } - appendStringInfoChar(&sign, c); - continue; - } +jboolean pljava_Function_booleanInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].z; +} - if ( ! gotone ) - { - if ( '\0' == c ) - break; - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("AS (Java): expected parameter type, found comma"))); - } +jbyte pljava_Function_byteInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].b; +} - /* so, got one. */ - deflt = (idx == top) - ? self->func.nonudt.returnType : self->func.nonudt.paramTypes[idx]; - jtName = Type_getJavaTypeName(deflt); - if ( strcmp(jtName, sign.data) != 0 ) - { - Oid did; - Type repl; - if(idx == top) - /* - * Last parameter is the OUT parameter. It has no corresponding - * entry in the dfltIds array. - */ - did = InvalidOid; - else - did = dfltIds[idx]; +jshort pljava_Function_shortInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].s; +} - repl = Type_fromJavaType(did, sign.data); - if(!Type_canReplaceType(repl, deflt)) - repl = Type_getCoerceIn(repl, deflt); +jchar pljava_Function_charInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].c; +} - if(idx == top) - self->func.nonudt.returnType = repl; - else - self->func.nonudt.paramTypes[idx] = repl; - } - pfree(sign.data); +jint pljava_Function_intInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].i; +} - ++idx; - if ( '\0' == c ) - break; - gotone = false; - } +jfloat pljava_Function_floatInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].f; +} - /* - * We are done. - */ - if(lastIsOut) - ++top; - if(idx != top) - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("AS (Java): expected %d parameter types, found fewer", - top))); -} - -static char* getAS(HeapTuple procTup, char** epHolder) -{ - char c; - char* cp1; - char* cp2; - char* bp; - bool atStart = true; - bool passedFirst = false; - bool isNull = false; - Datum tmp = SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_prosrc, &isNull); - if(isNull) - { - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("'AS' clause of Java function cannot be NULL"))); - } +jlong pljava_Function_longInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].j; +} - bp = pstrdup(DatumGetCString(DirectFunctionCall1(textout, tmp))); +jdouble pljava_Function_doubleInvoke(Function self) +{ + JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, self->func.nonudt.invocable); + return s_primitiveParameters[0].d; +} - /* Strip all whitespace except the first one if it occures after - * some alpha numeric characers and before some other alpha numeric - * characters. We insert a '=' when that happens since it delimits - * the return value from the method name. +/* + * 'Reserve' the static parameter frame for (refArgCount,primArgCount) reference + * and primitive parameters, respectively, pushing temporarily out of the way + * any current contents, detected by a non-(0,0) existing reservation. Returns + * the sum of its two arguments. + * + * The corresponding pop of the earlier contents will happen at + * Invocation_popInvocation time, so this scheme is only appropriately used for + * calls that happen within the scope of an Invocation, as conventional PL/Java + * function calls do. + * + * It is possible to reserve (0,0) space, though no existing frame will be + * saved/restored in that case. Caution: the two count arguments here count only + * parameters, not the possibility that a primitive-returning function uses a + * slot in the frame for its return. The primitive call wrappers must make their + * own arrangements to save the typically-only-one-jvalue affected by that use + * and restore it on both normal and exceptional return paths. To streamline the + * most common case, Invocation_{push,pop}Invocation will unconditionally save + * the first jvalue slot, and restore it if the more heavyweight frame-pushing + * has not been used. That spares a primitive call wrapper the cycles of + * managing another PG_TRY block. Any wrapper that will use more than the first + * jvalue slot for returns, though, must handle its own normal and exceptional + * cleanup. + */ +static inline jsize +reserveParameterFrame(jsize refArgCount, jsize primArgCount) +{ + jshort newCounts = COUNTCHECK(refArgCount, primArgCount); + + /* The *s_countCheck field in the parameter area will be zero unless + * this is a recursive invocation (believed only possible via a UDT + * function called while converting the parameters for some outer + * invocation). It could also be zero if this is a recursive invocation + * but the outer one involves no parameters, which won't happen if UDT + * conversion for a parameter is the only way to get here, and even if + * it happens, we still don't need to save its frame because there is + * nothing there that we'll clobber. */ - cp1 = cp2 = bp; - while((c = *cp1++) != 0) + if ( 0 != newCounts && 0 != *s_countCheck ) { - if(isspace(c)) - { - if(atStart || passedFirst) - continue; - - while((c = *cp1++) != 0) - if(!isspace(c)) - break; - - if(c == 0) - break; - - if(isalpha(c)) - *cp2++ = '='; - passedFirst = true; - } - atStart = false; - if(!isalnum(c)) - passedFirst = true; - *cp2++ = c; + JNI_callStaticVoidMethodLocked( + s_ParameterFrame_class, s_ParameterFrame_push); + /* Record, in currentInvocation, that a frame was pushed; the pop + * will happen in Invocation_popInvocation, which our caller + * arranges for both normal return and PG_CATCH cases. + */ + currentInvocation->frameLimits = FRAME_LIMITS_PUSHED; } - *cp2 = 0; - *epHolder = cp2; - return bp; + *s_countCheck = newCounts; + + return refArgCount + primArgCount; } -static void parseUDT(ParseResult info, char* bp, char* ep) +/* + * This should happen everywhere reserveParameterFrame happens, but is factored + * out to allow a couple of call sites to optimize out one or the other. As with + * reserveParameterFrame, the undoing of this happens in popFrame below. + * + * currentInvocation->savedLoader can have a "not known" value (which has to be + * distinct from null, because null is a perfectly cromulent context classloader + * as far as Java is concerned). Leaving it that way will mean no restoration at + * popInvocation time. The loaderUpdater may leave it that way in some cases, + * in a bid to reduce overhead if the same loader's wanted again. + */ +static inline void +installContextLoader(Function self) { - char* ip = ep - 1; - while(ip > bp && *ip != ']') - --ip; - - if(ip == bp) - { - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("Missing ending ']' in UDT declaration"))); - } - *ip = 0; /* Terminate class name */ - info->className = bp; - info->methodName = ip + 1; - info->isUDT = true; + (*JNI_loaderUpdater)(self->schemaLoader); } /* - * Zeros info before setting any of its fields. + * Not intended for any caller but Invocation_popInvocation. */ -static void parseFunction(ParseResult info, HeapTuple procTup) +void pljava_Function_popFrame(bool heavy) { - /* The user's function definition must be the fully - * qualified name of a java method short of parameter - * signature. - */ - char* ip; - char* ep; - char* bp = getAS(procTup, &ep); + if ( heavy ) + JNI_callStaticVoidMethod(s_ParameterFrame_class, s_ParameterFrame_pop); + + if ( pljava_Function_NO_LOADER == currentInvocation->savedLoader ) + return; - memset(info, 0, sizeof(ParseResultData)); - info->buffer = bp; + (*JNI_loaderRestorer)(); +} - /* The AS clause can have two formats +/* + * Invoke an Invocable that was obtained by invoking an Invocable for a + * set-returning-function that returns results in value-per-call style. + * Pass true for 'close' when no more results are wanted. Will always overwrite + * *result; check the boolean return value to determine whether that is a real + * result (true) or the end of results was reached (false). + */ +jboolean pljava_Function_vpcInvoke( + Function self, jobject invocable, jobject rowcollect, jlong call_cntr, + jboolean close, jobject *result) +{ + /* + * When retrieving the very first row, this call happens under the same + * Invocation as the call to the user function itself that returned this + * invocable (and may, rarely, have pushed a ParameterFrame). What does + * the reservation below imply for ParameterFrame management? * - * "." [ "(" ["," ... ] ")" ] - * or - * "UDT" "[" "]" - * where is one of "input", "output", "receive" or "send" + * It's ok, because the user function's invocation will have cleared the + * static area parameter counts; this reservation will therefore not see a + * need to push a frame. If one was pushed for the user function itself, it + * remains on top, to be popped when the Invocation is. */ - if(ep - bp >= 4 && strncasecmp(bp, "udt[", 4) == 0) - { - parseUDT(info, bp + 4, ep); - return; - } + reserveParameterFrame(1, 2); + + JNI_setObjectArrayElement(s_referenceParameters, 0, rowcollect); + s_primitiveParameters[0].j = call_cntr; + s_primitiveParameters[1].z = close; + *result = JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_invoke, invocable); + return s_primitiveParameters[0].z; +} - info->isUDT = false; +void pljava_Function_udtWriteInvoke( + jobject invocable, jobject value, jobject stream) +{ + JNI_callStaticVoidMethod(s_EntryPoints_class, + s_EntryPoints_udtWriteInvoke, invocable, value, stream); +} - /* Scan backwards from ep. - */ - ip = ep - 1; - if(*ip == ')') - { - /* We have an explicit parameter type declaration - */ - *ip-- = 0; - while(ip > bp && *ip != '(') - --ip; +jstring pljava_Function_udtToStringInvoke(jobject invocable, jobject value) +{ + return JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_udtToStringInvoke, invocable, value); +} - if(ip == bp) - { - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("Unbalanced parenthesis"))); - } +jobject pljava_Function_udtReadInvoke( + jobject invocable, jobject stream, jstring typeName) +{ + return JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_udtReadInvoke, invocable, stream, typeName); +} - info->parameters = ip + 1; - *ip-- = 0; - } +jobject pljava_Function_udtParseInvoke( + jobject parseInvocable, jstring stringRep, jstring typeName) +{ + return JNI_callStaticObjectMethod(s_EntryPoints_class, + s_EntryPoints_udtParseInvoke, parseInvocable, stringRep, typeName); +} - /* Find last '.' occurrence. - */ - while(ip > bp && *ip != '.') - --ip; +static jobject obtainUDTHandle( + jmethodID which, jclass clazz, char *langName, bool trusted); - if(ip == bp) - { - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("Did not find ."))); - } - info->methodName = ip + 1; - *ip = 0; - - /* Check if we have a return type declaration - */ - while(--ip > bp) - { - if(*ip == '=') - { - info->className = ip + 1; - *ip = 0; - break; - } - } +jobject pljava_Function_udtReadHandle( + jclass clazz, char *langName, bool trusted) +{ + return obtainUDTHandle( + s_Function_udtReadHandle, clazz, langName, trusted); +} - if(info->className != 0) - info->returnType = bp; - else - info->className = bp; +jobject pljava_Function_udtWriteHandle( + jclass clazz, char *langName, bool trusted) +{ + return obtainUDTHandle( + s_Function_udtWriteHandle, clazz, langName, trusted); +} - elog(DEBUG3, "className = '%s', methodName = '%s', parameters = '%s', returnType = '%s'", - info->className == 0 ? "null" : info->className, - info->methodName == 0 ? "null" : info->methodName, - info->parameters == 0 ? "null" : info->parameters, - info->returnType == 0 ? "null" : info->returnType); +static inline jobject +obtainUDTHandle( + jmethodID which, jclass clazz, char *langName, bool trusted) +{ + jstring jname = String_createJavaStringFromNTS(langName); + jobject result = JNI_callStaticObjectMethod(s_Function_class, + which, clazz, jname, trusted ? JNI_TRUE : JNI_FALSE); + JNI_deleteLocalRef(jname); + return result; } -static jstring getSchemaName(int namespaceOid) +static inline jstring +getSchemaName(int namespaceOid) { HeapTuple nspTup = PgObject_getValidTuple(NAMESPACEOID, namespaceOid, "namespace"); Form_pg_namespace nspStruct = (Form_pg_namespace)GETSTRUCT(nspTup); @@ -447,284 +569,269 @@ static jstring getSchemaName(int namespaceOid) return schemaName; } -static void setupTriggerParams(Function self, ParseResult info) -{ - if(info->parameters != 0) - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("Triggers can not have a java parameter declaration"))); - - self->func.nonudt.returnType = Type_fromJavaType(InvalidOid, "void"); - - /* Parameters are not used when calling triggers. - */ - self->func.nonudt.numParams = 1; - self->func.nonudt.paramTypes = (Type*)MemoryContextAlloc(GetMemoryChunkContext(self), sizeof(Type)); - self->func.nonudt.paramTypes[0] = Type_fromJavaType(InvalidOid, "org.postgresql.pljava.TriggerData"); -} - -static void setupUDT(Function self, ParseResult info, Form_pg_proc procStruct) +/* + * This checks specifically for a "Java-based scalar" a/k/a BaseUDT, + * and will call UDT_registerUDT accordingly if it is. + */ +Type Function_checkTypeBaseUDT(Oid typeId, Form_pg_type typeStruct) { - Oid udtId = 0; - HeapTuple typeTup; - Form_pg_type pgType; + HeapTuple procTup; + Datum d; + Form_pg_proc procStruct; + Type t = NULL; + jstring schemaName; + jclass clazz = NULL; + jclass t_clazz = NULL; - if(strcasecmp("input", info->methodName) == 0) + Oid procId[4] = { - self->func.udt.udtFunction = UDT_input; - udtId = procStruct->prorettype; - } - else if(strcasecmp("output", info->methodName) == 0) + typeStruct->typinput, typeStruct->typreceive, + typeStruct->typsend, typeStruct->typoutput + }; + jmethodID getter[4] = { - self->func.udt.udtFunction = UDT_output; - udtId = PARAM_OIDS(procStruct)[0]; - } - else if(strcasecmp("receive", info->methodName) == 0) + s_Function_udtParseHandle, s_Function_udtReadHandle, + s_Function_udtWriteHandle, s_Function_udtToStringHandle + }; + char *langName[4] = { NULL, NULL, NULL, NULL }; + bool trusted[4]; + jobject handle[4] = { NULL, NULL, NULL, NULL }; + int i; + + for ( i = 0; i < 4; ++ i ) { - self->func.udt.udtFunction = UDT_receive; - udtId = procStruct->prorettype; + if ( ! InstallHelper_isPLJavaFunction( + procId[i], &langName[i], &trusted[i]) ) + break; } - else if(strcasecmp("send", info->methodName) == 0) + + /* + * If that loop did not find all four support functions to be PL/Java ones, + * we have struck out; pfree anything it did find and return the bad news. + */ + if ( i < 4 ) { - self->func.udt.udtFunction = UDT_send; - udtId = PARAM_OIDS(procStruct)[0]; + for ( ; i >= 0 ; -- i ) + if ( NULL != langName[i] ) + pfree(langName[i]); + return NULL; } - else + + /* + * At this point, it is looking like a PL/Java BaseUDT; we have the four + * support function oids and the language names and trusted flags to go + * with them. + * + * Must still confirm that (1) each one is declared with a UDT[classname] + * style of AS string (getClassIfUDT returns null if not), (2) the named + * class inherits from SQLData (ClassCastException happens if not), and + * (3) that's the same class for all four of them (bail from this loop + * and goto classMismatch if not). We'll also consider it a classMismatch + * if some but not all getClassIfUDT results are null. + * + * Provided none of that goes wrong, obtain their handles in this loop too. + */ + for ( i = 0; i < 4; ++ i ) { - ereport(ERROR, ( - errcode(ERRCODE_SYNTAX_ERROR), - errmsg("Unknown UDT function %s", info->methodName))); - } + /* + * Get the pg_proc info corresponding to support function i, + * needed by getClassIfUDT(). + */ + procTup = PgObject_getValidTuple(PROCOID, procId[i], "function"); + procStruct = (Form_pg_proc)GETSTRUCT(procTup); + schemaName = getSchemaName(procStruct->pronamespace); + d = heap_copy_tuple_as_datum( + procTup, Type_getTupleDesc(s_pgproc_Type, 0)); - typeTup = PgObject_getValidTuple(TYPEOID, udtId, "type"); - pgType = (Form_pg_type)GETSTRUCT(typeTup); - self->func.udt.udt = UDT_registerUDT(self->clazz, udtId, pgType, 0, true); - ReleaseSysCache(typeTup); -} + t_clazz = (jclass)JNI_callStaticObjectMethod(s_Function_class, + s_Function_getClassIfUDT, Type_coerceDatum(s_pgproc_Type, d), + schemaName); -static jclass Function_loadClass(jstring schemaName, char const *className); + pfree((void *)d); + JNI_deleteLocalRef(schemaName); + ReleaseSysCache(procTup); -Type Function_checkTypeUDT(Oid typeId, Form_pg_type typeStruct) -{ - ParseResultData info; - HeapTuple procTup; - Form_pg_proc procStruct; - Type t = NULL; - jstring schemaName; - jclass clazz; + /* + * Save the first clazz returned; for subsequent ones, just confirm it's + * not different, then delete the extra local ref. + */ + if ( 0 == i ) + clazz = t_clazz; + else + { + if ( JNI_FALSE == JNI_isSameObject(clazz, t_clazz) ) + goto classMismatch; + JNI_deleteLocalRef(t_clazz); + } - if ( ! InstallHelper_isPLJavaFunction(typeStruct->typinput) - || ! InstallHelper_isPLJavaFunction(typeStruct->typoutput) - || ! InstallHelper_isPLJavaFunction(typeStruct->typreceive) - || ! InstallHelper_isPLJavaFunction(typeStruct->typsend) ) - return NULL; + if ( NULL == clazz ) + continue; - /* typinput as good as any, all four had better be in same class */ - procTup = PgObject_getValidTuple(PROCOID, typeStruct->typinput, "function"); - parseFunction(&info, procTup); - if ( ! info.isUDT ) - goto finally; + handle[i] = obtainUDTHandle(getter[i], clazz, langName[i], trusted[i]); + } - procStruct = (Form_pg_proc)GETSTRUCT(procTup); - schemaName = getSchemaName(procStruct->pronamespace); - clazz = Function_loadClass(schemaName, info.className); - JNI_deleteLocalRef(schemaName); - t = (Type)UDT_registerUDT(clazz, typeId, typeStruct, 0, true); + /* + * We can only be here if getClassIfUDT returned the same value for clazz + * all four times. But that value could have been NULL; no UDT if so. + */ + if ( NULL != clazz ) + t = (Type)UDT_registerUDT(clazz, typeId, typeStruct, 0, true, + handle[0], handle[1], handle[2], handle[3]); + /* + * UDT_registerUDT will already have called JNI_deleteLocalRef on the + * four handles. (Or clazz was NULL and there aren't any to delete anyway.) + */ + JNI_deleteLocalRef(clazz); + for ( i = 0; i < 4; ++ i ) + pfree(langName[i]); -finally: - pfree(info.buffer); - ReleaseSysCache(procTup); return t; + +classMismatch: + while ( i --> 0 ) + JNI_deleteLocalRef(handle[i]); + for ( i = 0; i < 4; ++ i ) + pfree(langName[i]); + JNI_deleteLocalRef(clazz); + JNI_deleteLocalRef(t_clazz); + ereport(ERROR, (errmsg( + "PL/Java UDT with oid %u declares input/output/send/recv functions " + "in more than one class", typeId))); + pg_unreachable(); /* MSVC otherwise is not convinced */ } -static void setupFunctionParams(Function self, ParseResult info, Form_pg_proc procStruct, PG_FUNCTION_ARGS) +static Function Function_create( + Oid funcOid, bool trusted, bool forTrigger, + bool forValidator, bool checkBody) { - Oid* paramOids; - MemoryContext ctx = GetMemoryChunkContext(self); - int32 top = (int32)procStruct->pronargs;; + Function self; + HeapTuple procTup = + PgObject_getValidTuple(PROCOID, funcOid, "function"); + Form_pg_proc procStruct = (Form_pg_proc)GETSTRUCT(procTup); + HeapTuple lngTup = + PgObject_getValidTuple(LANGOID, procStruct->prolang, "language"); + Form_pg_language lngStruct = (Form_pg_language)GETSTRUCT(lngTup); + jstring lname = String_createJavaStringFromNTS(NameStr(lngStruct->lanname)); + bool ltrust = lngStruct->lanpltrusted; + jstring schemaName; + Datum d; + jobject invocable; - self->func.nonudt.numParams = top; - self->func.nonudt.isMultiCall = procStruct->proretset; - self->func.nonudt.returnType = Type_fromOid(procStruct->prorettype, self->func.nonudt.typeMap); + if ( trusted != ltrust ) + elog(ERROR, + "function with oid %u invoked through wrong call handler " + "for %strusted language %s", funcOid, ltrust ? "" : "un", + NameStr(lngStruct->lanname)); - if(top > 0) - { - int idx; - paramOids = PARAM_OIDS(procStruct); - self->func.nonudt.paramTypes = (Type*)MemoryContextAlloc(ctx, top * sizeof(Type)); + d = heap_copy_tuple_as_datum(procTup, Type_getTupleDesc(s_pgproc_Type, 0)); - for(idx = 0; idx < top; ++idx) - self->func.nonudt.paramTypes[idx] = Type_fromOid(paramOids[idx], self->func.nonudt.typeMap); - } - else - { - self->func.nonudt.paramTypes = 0; - paramOids = 0; - } + schemaName = getSchemaName(procStruct->pronamespace); - if(info->parameters != 0) - parseParameters(self, paramOids, info->parameters); + self = /* will rely on the fact that allocInstance zeroes memory */ + (Function)PgObjectClass_allocInstance(s_FunctionClass,TopMemoryContext); - if(info->returnType != 0) + PG_TRY(); { - const char* jtName = Type_getJavaTypeName(self->func.nonudt.returnType); - if(strcmp(jtName, info->returnType) != 0) - { - Type repl = Type_fromJavaType(Type_getOid(self->func.nonudt.returnType), info->returnType); - if(!Type_canReplaceType(repl, self->func.nonudt.returnType)) - repl = Type_getCoerceOut(repl, self->func.nonudt.returnType); - self->func.nonudt.returnType = repl; - } + invocable = + JNI_callStaticObjectMethod(s_Function_class, s_Function_create, + PointerGetJLong(self), Type_coerceDatum(s_pgproc_Type, d), lname, + schemaName, + trusted ? JNI_TRUE : JNI_FALSE, + forTrigger ? JNI_TRUE : JNI_FALSE, + forValidator ? JNI_TRUE : JNI_FALSE, + checkBody ? JNI_TRUE : JNI_FALSE); } -} - -static void Function_init(Function self, ParseResult info, Form_pg_proc procStruct, PG_FUNCTION_ARGS) -{ - StringInfoData sign; - - /* Get the ClassLoader for the schema that this function belongs to - */ - jstring schemaName = getSchemaName(procStruct->pronamespace); - - /* Install the type map for the current schema. This must be done ASAP since - * many other functions (including obtaining the loader) depends on it. - */ - jobject tmp = JNI_callStaticObjectMethod(s_Loader_class, s_Loader_getTypeMap, schemaName); - self->func.nonudt.typeMap = JNI_newGlobalRef(tmp); - JNI_deleteLocalRef(tmp); - - self->readOnly = (procStruct->provolatile != PROVOLATILE_VOLATILE); - self->isUDT = info->isUDT; - - currentInvocation->function = self; - - self->clazz = Function_loadClass(schemaName, info->className); + PG_CATCH(); + { + JNI_deleteLocalRef(schemaName); + ReleaseSysCache(lngTup); + ReleaseSysCache(procTup); + pfree(self); /* would otherwise leak into TopMemoryContext */ + PG_RE_THROW(); + } + PG_END_TRY(); JNI_deleteLocalRef(schemaName); + ReleaseSysCache(lngTup); + ReleaseSysCache(procTup); - if(self->isUDT) - { - setupUDT(self, info, procStruct); - return; - } + /* + * One of four things has happened, the product of two binary choices: + * - This Function turns out to be either a UDT function, or a nonUDT one. + * - it is now fully initialized and should be returned, or it isn't, and + * should be pfree()d. (Validator calls don't have to do the whole job.) + * + * If Function.create returned a non-NULL result, this is a fully + * initialized, non-UDT function, ready to save and use. (That can happen + * even during validation; if checkBody is true, enough work is done to get + * a complete result, so we might as well save it.) + * + * If it returned NULL, this is either an incompletely-initialized non-UDT + * function, or it is a UDT function (whether fully initialized or not; it + * is always NULL for a UDT function). If it is a UDT function and not + * complete, it should be pfree()d. If complete, it has already been + * registered with the UDT machinery and should be saved. We can arrange + * (see _storeToUDT below) for the isUDT flag to be left false if the UDT + * initialization isn't complete; that collapses the need-to-pfree cases + * into one case here (Function.create returned NULL && ! isUDT). + * + * Because allocInstance zeroes memory, isUDT is reliably false even if + * the Java code bailed early. + */ - if(CALLED_AS_TRIGGER(fcinfo)) + if ( NULL != invocable ) { - self->func.nonudt.typeMap = 0; - setupTriggerParams(self, info); + self->func.nonudt.invocable = JNI_newGlobalRef(invocable); + JNI_deleteLocalRef(invocable); } - else + else if ( ! self->isUDT ) { - setupFunctionParams(self, info, procStruct, fcinfo); + pfree(self); + if ( forValidator ) + return NULL; + elog(ERROR, + "failed to create a PL/Java function (oid %u) and not validating", + funcOid); } - - initStringInfo(&sign); - buildSignature(self, &sign, self->func.nonudt.returnType, false); - - elog(DEBUG2, "Obtaining method %s.%s %s", info->className, info->methodName, sign.data); - self->func.nonudt.method = JNI_getStaticMethodIDOrNull(self->clazz, info->methodName, sign.data); - - if(self->func.nonudt.method == 0) - { - char* origSign = sign.data; - Type altType = 0; - Type realRetType = self->func.nonudt.returnType; - - elog(DEBUG2, "Method %s.%s %s not found", info->className, info->methodName, origSign); - - if(Type_isPrimitive(self->func.nonudt.returnType)) - { - /* - * One valid reason for not finding the method is when - * the return type used in the signature is a primitive and - * the true return type of the method is the object class that - * corresponds to that primitive. - */ - altType = Type_getObjectType(self->func.nonudt.returnType); - realRetType = altType; - } - else if(strcmp(Type_getJavaTypeName(self->func.nonudt.returnType), "java.sql.ResultSet") == 0) - { - /* - * Another reason might be that we expected a ResultSetProvider - * but the implementation returns a ResultSetHandle that needs to be - * wrapped. The wrapping is internal so we retain the original - * return type anyway. - */ - altType = realRetType; - } - - if(altType != 0) - { - JNI_exceptionClear(); - initStringInfo(&sign); - buildSignature(self, &sign, altType, true); - - elog(DEBUG2, "Obtaining method %s.%s %s", info->className, info->methodName, sign.data); - self->func.nonudt.method = JNI_getStaticMethodIDOrNull(self->clazz, info->methodName, sign.data); - - if(self->func.nonudt.method != 0) - self->func.nonudt.returnType = realRetType; - } - if(self->func.nonudt.method == 0) - PgObject_throwMemberError(self->clazz, info->methodName, origSign, true, true); - - if(sign.data != origSign) - pfree(origSign); - } - pfree(sign.data); + return self; } /* - * Return a global ref to the loaded class. + * Get a Function using a function Oid. If the function is not found, one + * will be created based on the class and method name denoted in the "AS" + * clause, the parameter types, and the return value of the function + * description. If "forTrigger" is true, the parameter type and + * return value of the function will be fixed to: + * void (org.postgresql.pljava.TriggerData td) + * + * If forValidator is true, forTrigger is disregarded, and will be determined + * from the function's pg_proc entry. If forValidator is false, checkBody has no + * meaning. + * + * If called with forValidator true, may return NULL. The validator doesn't + * use the result. + * + * In all other cases, this Function has been stored + * in currentInvocation->function upon successful return from here. */ -static jclass Function_loadClass(jstring schemaName, char const *className) -{ - jobject tmp; - jobject loader; - jstring classJstr; - jclass clazz; - /* Get the ClassLoader for the schema that this function belongs to - */ - loader = JNI_callStaticObjectMethod(s_Loader_class, - s_Loader_getSchemaLoader, schemaName); - - elog(DEBUG2, "Loading class %s", className); - classJstr = String_createJavaStringFromNTS(className); - - tmp = JNI_callObjectMethod(loader, s_ClassLoader_loadClass, classJstr); - JNI_deleteLocalRef(loader); - JNI_deleteLocalRef(classJstr); - - clazz = (jclass)JNI_newGlobalRef(tmp); - JNI_deleteLocalRef(tmp); - return clazz; -} - -static Function Function_create(PG_FUNCTION_ARGS) +static inline Function +getFunction( + Oid funcOid, bool trusted, bool forTrigger, + bool forValidator, bool checkBody) { - ParseResultData info; - Function self = (Function)PgObjectClass_allocInstance(s_FunctionClass, TopMemoryContext); - HeapTuple procTup = PgObject_getValidTuple(PROCOID, fcinfo->flinfo->fn_oid, "function"); - - parseFunction(&info, procTup); - Function_init(self, &info, (Form_pg_proc)GETSTRUCT(procTup), fcinfo); - - pfree(info.buffer); - ReleaseSysCache(procTup); - return self; -} + Function func = + forValidator ? NULL : (Function)HashMap_getByOid(s_funcMap, funcOid); -Function Function_getFunction(PG_FUNCTION_ARGS) -{ - Oid funcOid = fcinfo->flinfo->fn_oid; - Function func = (Function)HashMap_getByOid(s_funcMap, funcOid); - if(func == 0) + if ( NULL == func ) { - func = Function_create(fcinfo); - HashMap_putByOid(s_funcMap, funcOid, func); + func = Function_create( + funcOid, trusted, forTrigger, forValidator, checkBody); + if ( NULL != func ) + HashMap_putByOid(s_funcMap, funcOid, func); } + + currentInvocation->function = func; return func; } @@ -776,88 +883,189 @@ void Function_clearFunctionCache(void) PgObject_free((PgObject)oldMap); } -Datum Function_invoke(Function self, PG_FUNCTION_ARGS) +/* + * Type_isPrimitive() by itself returns true for both, say, int and int[]. + * That is sometimes relied on, as in the code that would accept Integer[] + * as a replacement for int[]. + * + * However, it isn't correct for determining whether the thing should be passed + * to Java as a primitive or a reference, because of course no Java array is a + * primitive. Hence this method, which requires both Type_isPrimitive to be true + * and that the type is not an array. + */ +static inline bool +passAsPrimitive(Type t) +{ + return Type_isPrimitive(t) && (NULL == Type_getElementType(t)); +} + +Datum +Function_invoke( + Oid funcoid, bool trusted, bool forTrigger, bool forValidator, + bool checkBody, PG_FUNCTION_ARGS) { + Function self; Datum retVal; - int32 top; - jvalue* args; - Type invokerType; + Size passedArgCount; + Type invokerType; + bool skipParameterConversion = false; + + self = getFunction(funcoid, trusted, forTrigger, forValidator, checkBody); + + if ( forValidator ) + PG_RETURN_VOID(); + + if ( forTrigger ) + return invokeTrigger(self, fcinfo); fcinfo->isnull = false; - currentInvocation->function = self; if(self->isUDT) return self->func.udt.udtFunction(self->func.udt.udt, fcinfo); - if(self->func.nonudt.isMultiCall && SRF_IS_FIRSTCALL()) - Invocation_assertDisconnect(); + if ( self->func.nonudt.isMultiCall ) + { + if ( SRF_IS_FIRSTCALL() ) + { + /* A class loader or other mechanism might have connected already. + * This connection must be dropped since its parent context + * is wrong. + */ + Invocation_assertDisconnect(); + } + else + { + /* In PL/Java's implementation of the ValuePerCall SRF protocol, the + * passed parameters from SQL only matter on the first call. All + * subsequent calls are either hasNext()/next() on an Iterator, or + * assignRowValues on a ResultSetProvider, and none of those methods + * will receive the SQL-passed parameters. So there is no need to + * spend cycles to convert them and populate the parameter area. + */ + skipParameterConversion = true; + } + } + + passedArgCount = PG_NARGS(); + + if ( ! skipParameterConversion ) + { + jsize reservedArgCount = reserveParameterFrame( + self->func.nonudt.numRefParams, self->func.nonudt.numPrimParams); + + if ( passedArgCount != reservedArgCount + && passedArgCount + 1 != reservedArgCount ) /* the OUT arg case */ + elog(ERROR, "function expecting %u arguments passed %u", + (unsigned int)reservedArgCount, (unsigned int)passedArgCount); + } + + installContextLoader(self); - top = self->func.nonudt.numParams; - - /* Leave room for one extra parameter. Functions that returns unmapped - * composite types must have a single row ResultSet as an OUT parameter. - */ - args = (jvalue*)palloc((top + 1) * sizeof(jvalue)); invokerType = self->func.nonudt.returnType; - if(top > 0) + if ( passedArgCount > 0 && ! skipParameterConversion ) { int32 idx; + int32 refIdx = 0; + int32 primIdx = 0; Type* types = self->func.nonudt.paramTypes; + jvalue coerced; - /* a class loader or other mechanism might have connected already. This - * connection must be dropped since its parent context is wrong. - */ if(Type_isDynamic(invokerType)) - invokerType = Type_getRealType(invokerType, get_fn_expr_rettype(fcinfo->flinfo), self->func.nonudt.typeMap); + invokerType = Type_getRealType(invokerType, + get_fn_expr_rettype(fcinfo->flinfo), self->func.nonudt.typeMap); - for(idx = 0; idx < top; ++idx) + for(idx = 0; idx < passedArgCount; ++idx) { + Type paramType = types[idx]; + bool passPrimitive = passAsPrimitive(paramType); + if(PG_ARGISNULL(idx)) + { /* * Set this argument to zero (or null in case of object) */ - args[idx].j = 0L; + if ( passPrimitive ) + s_primitiveParameters[primIdx++].j = 0L; + else + ++ refIdx; /* array element is already initially null */ + } else { - Type paramType = types[idx]; if(Type_isDynamic(paramType)) - paramType = Type_getRealType(paramType, get_fn_expr_argtype(fcinfo->flinfo, idx), self->func.nonudt.typeMap); - args[idx] = Type_coerceDatum(paramType, PG_GETARG_DATUM(idx)); + paramType = Type_getRealType(paramType, + get_fn_expr_argtype(fcinfo->flinfo, idx), + self->func.nonudt.typeMap); + coerced = Type_coerceDatum(paramType, PG_GETARG_DATUM(idx)); + if ( passPrimitive ) + s_primitiveParameters[primIdx++] = coerced; + else + JNI_setObjectArrayElement( + s_referenceParameters, refIdx++, coerced.l); } } } retVal = self->func.nonudt.isMultiCall - ? Type_invokeSRF(invokerType, self->clazz, self->func.nonudt.method, args, fcinfo) - : Type_invoke(invokerType, self->clazz, self->func.nonudt.method, args, fcinfo); + ? Type_invokeSRF(invokerType, self, fcinfo) + : Type_invoke(invokerType, self, fcinfo); - pfree(args); return retVal; } -Datum Function_invokeTrigger(Function self, PG_FUNCTION_ARGS) +/* + * Invoke a trigger. Wrap the TriggerData in org.postgresql.pljava.TriggerData + * object, make the call, and unwrap the resulting Tuple. + */ +static inline Datum +invokeTrigger(Function self, PG_FUNCTION_ARGS) { - jvalue arg; + jobject jtd; Datum ret; - arg.l = TriggerData_create((TriggerData*)fcinfo->context); - if(arg.l == 0) + TriggerData *td = (TriggerData*)fcinfo->context; + jtd = pljava_TriggerData_create(td); + if(jtd == 0) return 0; - currentInvocation->function = self; - Type_invoke(self->func.nonudt.returnType, self->clazz, self->func.nonudt.method, &arg, fcinfo); + reserveParameterFrame(1, 0); + installContextLoader(self); + + JNI_setObjectArrayElement(s_referenceParameters, 0, jtd); + +#if PG_VERSION_NUM >= 100000 + currentInvocation->triggerData = td; + /* Also starting in PG 10, Invocation_assertConnect must be called before + * the getTriggerReturnTuple below. That could be done right here, but at + * the risk of changing the memory context from what the invoked trigger + * function expects. More cautiously, add the assertConnect later, after + * the trigger function has returned. + */ +#endif + Type_invoke(self->func.nonudt.returnType, self, fcinfo); fcinfo->isnull = false; if(JNI_exceptionCheck()) ret = 0; else { - /* A new Tuple may or may not be created here. If it is, ensure that - * it is created in the upper SPI context. + /* A new Tuple may or may not be created here. Ensure that, if it is, + * it is created in the upper context (even after connecting SPI, should + * that be necessary). + */ + MemoryContext currCtx; +#if PG_VERSION_NUM >= 100000 + /* If the invoked trigger function didn't connect SPI, do that here + * (getTriggerReturnTuple now needs it), but there will be no need to + * register the triggerData in that case. */ - MemoryContext currCtx = Invocation_switchToUpperContext(); - ret = PointerGetDatum(TriggerData_getTriggerReturnTuple(arg.l, &fcinfo->isnull)); + currentInvocation->triggerData = NULL; + Invocation_assertConnect(); +#endif + currCtx = Invocation_switchToUpperContext(); + ret = PointerGetDatum( + pljava_TriggerData_getTriggerReturnTuple( + jtd, &fcinfo->isnull)); /* Triggers are not allowed to set the fcinfo->isnull, even when * they return null. @@ -867,10 +1075,30 @@ Datum Function_invokeTrigger(Function self, PG_FUNCTION_ARGS) MemoryContextSwitchTo(currCtx); } - JNI_deleteLocalRef(arg.l); + JNI_deleteLocalRef(jtd); return ret; } +/* + * Most slots in the parameter area are set directly in invoke() or + * invokeTrigger() above. The only caller of this is Composite_invoke, which + * needs to set one parameter (always the last one, and a reference type). + * So this function, though with an API that could be general, for now only + * handles the case where index is -1 and the last parameter has reference type. + */ +void pljava_Function_setParameter(Function self, int index, jvalue value) +{ + int numRefs = self->func.nonudt.numRefParams; + if ( -1 != index || 1 > numRefs ) + elog(ERROR, "unsupported index in pljava_Function_setParameter"); + /* + * Thinking to Assert(!passAsPrimitive(self->func.nonudt.paramTypes[...]))? + * Nice idea, but you would index beyond paramTypes; that synthetic last + * OUT tuple entry isn't represented there. + */ + JNI_setObjectArrayElement(s_referenceParameters, numRefs - 1, value.l); +} + bool Function_isCurrentReadOnly(void) { /* function will be 0 during resolve of class and java function. At @@ -881,3 +1109,308 @@ bool Function_isCurrentReadOnly(void) return currentInvocation->function->readOnly; } +jobject Function_currentLoader(void) +{ + Function f; + + if ( NULL == currentInvocation ) + return NULL; + f = currentInvocation->function; + if ( NULL == f ) + return NULL; + return f->schemaLoader; +} + +/* + * Class: org_postgresql_pljava_internal_Function_EarlyNatives + * Method: _parameterArea + * Signature: ([Ljava/lang/Object;)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL + Java_org_postgresql_pljava_internal_Function_00024EarlyNatives__1parameterArea( + JNIEnv *env, jclass cls, jobjectArray referenceParams) +{ + /* + * This native method will use *env directly, not BEGIN_NATIVE / END_NATIVE: + * it is only called once in early initialization on the primordial thread. + */ + s_referenceParameters = (*env)->NewGlobalRef(env, referenceParams); + pljava_Invocation_shareFrame(s_primitiveParameters, s_countCheck); + return (*env)->NewDirectByteBuffer( + env, &s_primitiveParameters, sizeof s_primitiveParameters); +} + +/* + * Class: org_postgresql_pljava_internal_Function + * Method: _storeToNonUDT + * Signature: (JLjava/lang/ClassLoader;Ljava/lang/Class;ZZLjava/util/Map;IILjava/lang/String;[I[Ljava/lang/String;[Ljava/lang/String;)Z + */ +JNIEXPORT jboolean JNICALL + Java_org_postgresql_pljava_internal_Function__1storeToNonUDT( + JNIEnv *env, jclass jFunctionClass, jlong wrappedPtr, jobject schemaLoader, + jclass clazz, jboolean readOnly, jboolean isMultiCall, jobject typeMap, + jint numParams, jint returnType, jstring returnJType, + jintArray paramTypes, jobjectArray paramJTypes, jobjectArray outJTypes) +{ + Function self; + MemoryContext ctx; + jstring jtn; + int i = 0; + uint16 refParams = 0; + uint16 primParams = 0; + bool returnTypeIsOutParameter = false; + + self = JLongGet(Function, wrappedPtr); + ctx = GetMemoryChunkContext(self); + + BEGIN_NATIVE_NO_ERRCHECK + PG_TRY(); + { + self->isUDT = false; + self->readOnly = (JNI_TRUE == readOnly); + self->schemaLoader = JNI_newGlobalRef(schemaLoader); + self->clazz = JNI_newGlobalRef(clazz); + self->func.nonudt.isMultiCall = (JNI_TRUE == isMultiCall); + self->func.nonudt.typeMap = + (NULL == typeMap) ? NULL : JNI_newGlobalRef(typeMap); + + if ( NULL != returnJType ) + { + char *rjtc = String_createNTS(returnJType); + self->func.nonudt.returnType = Type_fromJavaType(returnType, rjtc); + pfree(rjtc); + } + else + self->func.nonudt.returnType = Type_fromOid(returnType, typeMap); + + if ( 0 < numParams ) + { + jint *paramOids; + self->func.nonudt.paramTypes = + (Type *)MemoryContextAlloc(ctx, numParams * sizeof (Type)); + paramOids = JNI_getIntArrayElements(paramTypes, NULL); + for ( i = 0 ; i < numParams ; ++ i ) + { + if ( NULL != paramJTypes ) + { + jstring pjt = JNI_getObjectArrayElement(paramJTypes, i); + if ( NULL != pjt ) + { + char *pjtc = String_createNTS(pjt); + JNI_deleteLocalRef(pjt); + self->func.nonudt.paramTypes[i] = + Type_fromJavaType(paramOids[i], pjtc); + pfree(pjtc); + continue; + } + } + self->func.nonudt.paramTypes[i] = + Type_fromOid(paramOids[i], typeMap); + } + JNI_releaseIntArrayElements(paramTypes, paramOids, JNI_ABORT); + + for ( i = 0 ; i < numParams ; ++ i ) + { + jtn = String_createJavaStringFromNTS(Type_getJavaTypeName( + self->func.nonudt.paramTypes[i])); + JNI_setObjectArrayElement(outJTypes, i, jtn); + JNI_deleteLocalRef(jtn); + if ( passAsPrimitive(self->func.nonudt.paramTypes[i]) ) + ++ primParams; + else + ++ refParams; + } + } + + /* Store Java type name of return type at outJTypes[i], where i (after + * all of the above) indexes the last element of outJTypes. + */ + jtn = String_createJavaStringFromNTS(Type_getJavaTypeName( + self->func.nonudt.returnType)); + JNI_setObjectArrayElement(outJTypes, i, jtn); + JNI_deleteLocalRef(jtn); + + returnTypeIsOutParameter = + Type_isOutParameter(self->func.nonudt.returnType); + } + PG_CATCH(); + { + Exception_throw_ERROR(PG_FUNCNAME_MACRO); + } + PG_END_TRY(); + + if ( returnTypeIsOutParameter && JNI_TRUE != isMultiCall ) + ++ refParams; + + self->func.nonudt.numRefParams = refParams; + self->func.nonudt.numPrimParams = primParams; + + END_NATIVE + return returnTypeIsOutParameter; +} + +/* + * Class: org_postgresql_pljava_internal_Function + * Method: _storeToUDT + * Signature: (JLjava/lang/ClassLoader;Ljava/lang/Class;ZII)V + */ +JNIEXPORT void JNICALL + Java_org_postgresql_pljava_internal_Function__1storeToUDT( + JNIEnv *env, jclass jFunctionClass, jlong wrappedPtr, jobject schemaLoader, + jclass clazz, jboolean readOnly, jint funcInitial, jint udtId) +{ + Function self; + HeapTuple typeTup; + Form_pg_type pgType; + + self = JLongGet(Function, wrappedPtr); + + BEGIN_NATIVE_NO_ERRCHECK + PG_TRY(); + { + typeTup = PgObject_getValidTuple(TYPEOID, udtId, "type"); + pgType = (Form_pg_type)GETSTRUCT(typeTup); + + /* + * Check typisdefined first. During validation, it will probably be + * false, as the functions are created while the type is just a shell. + * In that case, leave isUDT false, which will trigger Function_create + * to pfree the unusable proto-Function. + * + * In that case, don't store anything needing special deallocation + * such as JNI references; Function_create will do a blind pfree only. + */ + if ( pgType->typisdefined ) + { + self->isUDT = true; + self->readOnly = (JNI_TRUE == readOnly); + self->schemaLoader = JNI_newGlobalRef(schemaLoader); + self->clazz = JNI_newGlobalRef(clazz); + + /* + * Only a BaseUDT has SQL-declared PL/Java I/O functions, so only + * a BaseUDT can arrive at this code. Its four I/O functions are + * most easily looked up by Function_checkTypeBaseUDT, which has to + * exist separately anyway in case the UDT is first encountered by + * the Type machinery instead of an explicit function invocation. + */ + self->func.udt.udt = (UDT) + Function_checkTypeBaseUDT((Oid)udtId, pgType); + + switch ( funcInitial ) + { + case 'i': self->func.udt.udtFunction = UDT_input; break; + case 'o': self->func.udt.udtFunction = UDT_output; break; + case 'r': self->func.udt.udtFunction = UDT_receive; break; + case 's': self->func.udt.udtFunction = UDT_send; break; + default: + elog(ERROR, + "PL/Java jar/native code mismatch: unexpected UDT func ID"); + } + } + ReleaseSysCache(typeTup); + } + PG_CATCH(); + { + Exception_throw_ERROR(PG_FUNCNAME_MACRO); + } + PG_END_TRY(); + END_NATIVE +} + +/* + * Class: org_postgresql_pljava_internal_Function + * Method: _reconcileTypes + * Signature: (J[Ljava/lang/String;[Ljava/lang/String;I)V + */ +JNIEXPORT void JNICALL + Java_org_postgresql_pljava_internal_Function__1reconcileTypes( + JNIEnv *env, jclass jFunctionClass, jlong wrappedPtr, + jobjectArray resolvedTypes, jobjectArray explicitTypes, jint index) +{ + Function self; + Type origType; + Type replType; + Oid typeId; + char *javaName; + jstring javaNameString; + + /* The Java code will pass index -1 to indicate the special case of + * reconciling the return type instead of a parameter type. This is + * a bit convoluted in order to reproduce the behavior of the + * original C parseParameters. The explicit return type is at numParams. + * OR ... the Java code will pass -2 in a *different* case of adapting the + * return type, which in this case is the only element in a length-one + * explicitTypes array ... and in this case a coercer, if needed, will be + * built with getCoerceOut instead of getCoerceIn. (The use of getCoerceIn + * for the -1 case seems unconvincing; it is a faithful copy of what the + * C parseParameters did, but applying it to the return type may have been + * an oversight.) The resolvedTypes array in this case is still full length, + * and the resulting return type name still goes at the end of it. + */ + bool actOnReturnType = ( -1 == index || -2 == index ); + bool coerceOutAndSingleton = ( -2 == index ); + + self = JLongGet(Function, wrappedPtr); + + BEGIN_NATIVE_NO_ERRCHECK + PG_TRY(); + { + if ( actOnReturnType ) + { + index = JNI_getArrayLength(resolvedTypes) - 1; + origType = self->func.nonudt.returnType; + typeId = InvalidOid; + } + else + { + origType = self->func.nonudt.paramTypes[index]; + typeId = Type_getOid(origType); + } + + javaNameString = JNI_getObjectArrayElement(explicitTypes, + coerceOutAndSingleton ? 0 : index); + + javaName = String_createNTS(javaNameString); + + replType = Type_fromJavaType(typeId, javaName); + pfree(javaName); + + if ( ! Type_canReplaceType(replType, origType) ) + { + if ( coerceOutAndSingleton ) + replType = Type_getCoerceOut(replType, origType); + else + replType = Type_getCoerceIn(replType, origType); + } + + if ( actOnReturnType ) + self->func.nonudt.returnType = replType; + else + { + self->func.nonudt.paramTypes[index] = replType; + if ( passAsPrimitive(origType) != passAsPrimitive(replType) ) + { + if ( Type_isPrimitive(replType) ) + { + -- self->func.nonudt.numRefParams; + ++ self->func.nonudt.numPrimParams; + } + else + { + ++ self->func.nonudt.numRefParams; + -- self->func.nonudt.numPrimParams; + } + } + } + + JNI_setObjectArrayElement(resolvedTypes, index, javaNameString); + } + PG_CATCH(); + { + Exception_throw_ERROR(PG_FUNCNAME_MACRO); + } + PG_END_TRY(); + + END_NATIVE +} diff --git a/pljava-so/src/main/c/HashMap.c b/pljava-so/src/main/c/HashMap.c index f1b6978e..069599bf 100644 --- a/pljava-so/src/main/c/HashMap.c +++ b/pljava-so/src/main/c/HashMap.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "pljava/HashMap_priv.h" @@ -42,6 +46,7 @@ HashKeyClass HashKeyClass_alloc(const char* className, Size instanceSize, Finali static HashKeyClass s_OidKeyClass; static HashKeyClass s_StringKeyClass; static HashKeyClass s_OpaqueKeyClass; +static HashKeyClass s_StringOidKeyClass; static PgObjectClass s_EntryClass; static PgObjectClass s_HashMapClass; @@ -125,14 +130,12 @@ static void StringKey_init(StringKey self, const char* keyVal) } /* - * We use the Oid itself as the hashCode. + * We use the pointer itself (32 bits of it, without the low 3) as the hashCode. */ static uint32 _OpaqueKey_hashCode(HashKey self) { - Ptr2Long p2l; - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = ((OpaqueKey)self)->key; - return (uint32)(p2l.longVal >> 3); + uintptr_t p = (uintptr_t) ((OpaqueKey)self)->key; + return (uint32)(p >> 3); } /* @@ -150,6 +153,41 @@ static void OpaqueKey_init(OpaqueKey self, void* keyVal) self->key = keyVal; } +/* + * Create a copy of this StringOidKey + */ +static HashKey _StringOidKey_clone(HashKey self, MemoryContext ctx) +{ + HashKey clone = _StringKey_clone(self, ctx); + ((StringOidKey)clone)->oid = ((StringOidKey)self)->oid; + return clone; +} + +/* + * Compare with another HashKey. + */ +static bool _StringOidKey_equals(HashKey self, HashKey other) +{ + return other->m_class == self->m_class /* Same class */ + && strcmp(((StringKey)self)->key, ((StringKey)other)->key) == 0 + && ((StringOidKey)self)->oid == ((StringOidKey)other)->oid; +} + +/* + * @return a hash code value for this object. + */ +static uint32 _StringOidKey_hashCode(HashKey self) +{ + return ((uint32)((StringOidKey)self)->oid) ^ _StringKey_hashCode(self); +} + +static void StringOidKey_init(StringOidKey self, const char* string, Oid oid) +{ + StringKey_init((StringKey)self, string); + ((HashKey)self)->m_class = s_StringOidKeyClass; + self->oid = oid; +} + /* * An Entry that holds the binding between the * key and an associated value. @@ -369,6 +407,13 @@ void* HashMap_getByString(HashMap self, const char* key) return HashMap_get(self, (HashKey)&stringKey); } +void* HashMap_getByStringOid(HashMap self, const char* string, Oid oid) +{ + struct StringOidKey_ stringOidKey; + StringOidKey_init(&stringOidKey, string, oid); + return HashMap_get(self, (HashKey)&stringOidKey); +} + void* HashMap_putByOid(HashMap self, Oid oid, void* value) { struct OidKey_ oidKey; @@ -390,6 +435,14 @@ void* HashMap_putByString(HashMap self, const char* key, void* value) return HashMap_put(self, (HashKey)&stringKey, value); } +void* HashMap_putByStringOid(HashMap self, const char* string, Oid oid, + void* value) +{ + struct StringOidKey_ stringOidKey; + StringOidKey_init(&stringOidKey, string, oid); + return HashMap_put(self, (HashKey)&stringOidKey, value); +} + void* HashMap_removeByOid(HashMap self, Oid oid) { struct OidKey_ oidKey; @@ -411,6 +464,13 @@ void* HashMap_removeByString(HashMap self, const char* key) return HashMap_remove(self, (HashKey)&stringKey); } +void* HashMap_removeByStringOid(HashMap self, const char* string, Oid oid) +{ + struct StringOidKey_ stringOidKey; + StringOidKey_init(&stringOidKey, string, oid); + return HashMap_remove(self, (HashKey)&stringOidKey); +} + uint32 HashMap_size(HashMap self) { return self->size; @@ -437,4 +497,10 @@ void HashMap_initialize(void) s_StringKeyClass->hashCode = _StringKey_hashCode; s_StringKeyClass->equals = _StringKey_equals; s_StringKeyClass->clone = _StringKey_clone; + + s_StringOidKeyClass = HashKeyClass_alloc("StringOidKey", + sizeof(struct StringOidKey_), _StringKey_finalize); /* same finalize */ + s_StringOidKeyClass->hashCode = _StringOidKey_hashCode; + s_StringOidKeyClass->equals = _StringOidKey_equals; + s_StringOidKeyClass->clone = _StringOidKey_clone; } diff --git a/pljava-so/src/main/c/InstallHelper.c b/pljava-so/src/main/c/InstallHelper.c index d36025bc..59e3089b 100644 --- a/pljava-so/src/main/c/InstallHelper.c +++ b/pljava-so/src/main/c/InstallHelper.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2024 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -10,37 +10,34 @@ * Chapman Flack */ #include -#if PG_VERSION_NUM >= 90300 #include -#else -#include -#endif #include #include #include -#include -#if PG_VERSION_NUM >= 90100 +#include #include -#endif #include #include #include #include +#include #include #include #include #include +#include #include +#ifdef GP_VERSION_NUM +#include +#endif + #if PG_VERSION_NUM >= 120000 #include #define GetNamespaceOid(k1) \ GetSysCacheOid1(NAMESPACENAME, Anum_pg_namespace_oid, k1) -#elif PG_VERSION_NUM >= 90000 -#define GetNamespaceOid(k1) GetSysCacheOid1(NAMESPACENAME, k1) #else -#define SearchSysCache1(cid, k1) SearchSysCache(cid, k1, 0, 0, 0) -#define GetNamespaceOid(k1) GetSysCacheOid(NAMESPACENAME, k1, 0, 0, 0) +#define GetNamespaceOid(k1) GetSysCacheOid1(NAMESPACENAME, k1) #endif #include "pljava/InstallHelper.h" @@ -51,34 +48,21 @@ #include "pljava/PgObject.h" #include "pljava/type/String.h" -#define pg_unreachable() abort() - -/* - * CppAsString2 first appears in PG8.4. Once the compatibility target reaches - * 8.4, this fallback will not be needed. - */ -#ifndef CppAsString2 -#define CppAsString2(x) CppAsString(x) -#endif - +#if PG_VERSION_NUM < 170000 +#define AmAutoVacuumWorkerProcess() IsAutoVacuumWorkerProcess() +#define AmBackgroundWorkerProcess() IsBackgroundWorker /* - * Before 9.1, there was no creating_extension. Before 9.5, it did not have - * PGDLLIMPORT and so was not visible in Windows. In either case, just define - * it to be false, but also define CREATING_EXTENSION_HACK if on Windows and - * it needs to be tested for in some roundabout way. + * As of 9.6.1, IsBackgroundWorker still does not + * have PGDLLIMPORT, but MyBgworkerEntry != NULL can be used in MSVC instead. + * + * One thing it's needed for is to avoid dereferencing MyProcPort in a + * background worker, where it's not set. */ -#if PG_VERSION_NUM < 90100 || defined(_MSC_VER) && PG_VERSION_NUM < 90500 -#define creating_extension false -#if PG_VERSION_NUM >= 90100 -#define CREATING_EXTENSION_HACK -#endif -#endif - -#ifndef PLJAVA_SO_VERSION -#error "PLJAVA_SO_VERSION needs to be defined to compile this file." -#else -#define SO_VERSION_STRING CppAsString2(PLJAVA_SO_VERSION) +#if defined(_MSC_VER) +#include +#define IsBackgroundWorker (MyBgworkerEntry != NULL) #endif +#endif /* PG_VERSION_NUM < 170000 */ /* * The name of the table the extension scripts will create to pass information @@ -91,11 +75,13 @@ static jclass s_InstallHelper_class; static jmethodID s_InstallHelper_hello; static jmethodID s_InstallHelper_groundwork; +static jfieldID s_InstallHelper_MANAGE_CONTEXT_LOADER; static bool extensionExNihilo = false; -static void checkLoadPath( bool *livecheck); -static void getExtensionLoadPath(); +static void checkLoadPath(void); +static void getExtensionLoadPath(void); +static char *origUserName(); char const *pljavaLoadPath = NULL; @@ -112,28 +98,55 @@ bool pljavaViableXact() char *pljavaDbName() { + if ( AmAutoVacuumWorkerProcess() || AmBackgroundWorkerProcess() ) + { + char *shortlived; + static char *longlived; + if ( NULL == longlived ) + { + shortlived = get_database_name(MyDatabaseId); + if ( NULL != shortlived ) + { + longlived = MemoryContextStrdup(TopMemoryContext, shortlived); + pfree(shortlived); + } + } + return longlived; + } return MyProcPort->database_name; } +static char *origUserName() +{ + if ( AmAutoVacuumWorkerProcess() || AmBackgroundWorkerProcess() ) + { + char *shortlived; + static char *longlived; + if ( NULL == longlived ) + { + shortlived = GetUserNameFromId(GetAuthenticatedUserId(), false); + longlived = MemoryContextStrdup(TopMemoryContext, shortlived); + pfree(shortlived); + } + return longlived; + } + return MyProcPort->user_name; +} + char const *pljavaClusterName() { /* - * If PostgreSQL isn't at least 9.5, there can't BE a cluster name, and if - * it is, then there's always one (even if it is an empty string), so - * PG_GETCONFIGOPTION is safe. + * In PostgreSQL of at least 9.5, there's always one (even if it is an empty + * string), so PG_GETCONFIGOPTION is safe. */ -#if PG_VERSION_NUM < 90500 - return ""; -#else return PG_GETCONFIGOPTION("cluster_name"); -#endif } void pljavaCheckExtension( bool *livecheck) { if ( ! creating_extension ) { - checkLoadPath( livecheck); + checkLoadPath(); return; } if ( NULL != livecheck ) @@ -156,19 +169,17 @@ void pljavaCheckExtension( bool *livecheck) * on Windows. So if livecheck isn't null, this function only needs to proceed * as far as the CREATING_EXTENSION_HACK and then return. */ -static void checkLoadPath( bool *livecheck) +static void checkLoadPath() { List *l; Node *ut; LoadStmt *ls; + PlannedStmt *ps; -#ifndef CREATING_EXTENSION_HACK - if ( NULL != livecheck ) - return; -#endif if ( NULL == ActivePortal ) return; l = ActivePortal->stmts; + if ( NULL == l ) return; if ( 1 < list_length( l) ) @@ -179,23 +190,26 @@ static void checkLoadPath( bool *livecheck) elog(DEBUG2, "got null for first statement from ActivePortal"); return; } - if ( T_LoadStmt != nodeTag(ut) ) -#ifdef CREATING_EXTENSION_HACK - if ( T_CreateExtensionStmt == nodeTag(ut) ) + + if ( T_PlannedStmt == nodeTag(ut) ) + { + ps = (PlannedStmt *)ut; + if ( CMD_UTILITY != ps->commandType ) { - if ( NULL != livecheck ) - { - *livecheck = true; - return; - } - getExtensionLoadPath(); - if ( NULL != pljavaLoadPath ) - pljavaLoadingAsExtension = true; + elog(DEBUG2, "ActivePortal has PlannedStmt command type %u", + ps->commandType); + return; } -#endif - return; - if ( NULL != livecheck ) + ut = ps->utilityStmt; + if ( NULL == ut ) + { + elog(DEBUG2, "got null for utilityStmt from PlannedStmt"); + return; + } + } + if ( T_LoadStmt != nodeTag(ut) ) return; + ls = (LoadStmt *)ut; if ( NULL == ls->filename ) { @@ -208,6 +222,10 @@ static void checkLoadPath( bool *livecheck) static void getExtensionLoadPath() { +#ifdef GP_VERSION_NUM + if ( ! IS_QD_OR_SINGLENODE() ) + return; +#endif MemoryContext curr; Datum dtm; bool isnull; @@ -252,7 +270,7 @@ static void getExtensionLoadPath() * * If a string is returned, it has been palloc'd in the current context. */ -char *pljavaFnOidToLibPath(Oid fnOid) +char *pljavaFnOidToLibPath(Oid fnOid, char **langName, bool *trusted) { bool isnull; HeapTuple procTup; @@ -292,13 +310,15 @@ char *pljavaFnOidToLibPath(Oid fnOid) elog(ERROR, "cache lookup failed for language %u", langId); langStruct = (Form_pg_language) GETSTRUCT(langTup); handlerOid = langStruct->lanplcallfoid; - ReleaseSysCache(langTup); /* * PL/Java has certainly got a function call handler, so if this language * hasn't, PL/Java it's not. */ if ( InvalidOid == handlerOid ) + { + ReleaseSysCache(langTup); return NULL; + } /* * Da capo al coda ... handlerOid is another function to be looked up. @@ -311,7 +331,10 @@ char *pljavaFnOidToLibPath(Oid fnOid) * If the call handler's not a C function, this isn't PL/Java.... */ if ( ClanguageId != procStruct->prolang ) + { + ReleaseSysCache(langTup); return NULL; + } /* * Now that the handler is known to be a C function, it should have a @@ -321,6 +344,11 @@ char *pljavaFnOidToLibPath(Oid fnOid) SysCacheGetAttr(PROCOID, procTup, Anum_pg_proc_probin, &isnull); if ( isnull ) elog(ERROR, "null probin for C function %u", handlerOid); + if ( NULL != langName ) + *langName = pstrdup(NameStr(langStruct->lanname)); + if ( NULL != trusted ) + *trusted = langStruct->lanpltrusted; + ReleaseSysCache(langTup); probinstring = /* TextDatumGetCString(probinattr); */ DatumGetCString(DirectFunctionCall1(textout, probinattr)); /*archaic*/ ReleaseSysCache(procTup); @@ -336,13 +364,25 @@ char *pljavaFnOidToLibPath(Oid fnOid) return probinstring; } -bool InstallHelper_isPLJavaFunction(Oid fn) +bool InstallHelper_shouldDeferInit() +{ + if ( AmAutoVacuumWorkerProcess() || AmBackgroundWorkerProcess() ) + return true; + + if ( ! IsBinaryUpgrade ) + return false; + + Backend_warnJEP411(true); + return true; +} + +bool InstallHelper_isPLJavaFunction(Oid fn, char **langName, bool *trusted) { char *itsPath; char *pljPath; bool result = false; - itsPath = pljavaFnOidToLibPath(fn); + itsPath = pljavaFnOidToLibPath(fn, langName, trusted); if ( NULL == itsPath ) return false; @@ -350,9 +390,9 @@ bool InstallHelper_isPLJavaFunction(Oid fn) { pljPath = NULL; if ( InvalidOid != pljavaTrustedOid ) - pljPath = pljavaFnOidToLibPath(pljavaTrustedOid); + pljPath = pljavaFnOidToLibPath(pljavaTrustedOid, NULL, NULL); if ( NULL == pljPath && InvalidOid != pljavaUntrustedOid ) - pljPath = pljavaFnOidToLibPath(pljavaUntrustedOid); + pljPath = pljavaFnOidToLibPath(pljavaUntrustedOid, NULL, NULL); if ( NULL == pljPath ) { elog(WARNING, "unable to determine PL/Java's load path"); @@ -368,35 +408,60 @@ bool InstallHelper_isPLJavaFunction(Oid fn) return result; } -char const *InstallHelper_defaultClassPath(char *pathbuf) +char const *InstallHelper_defaultModulePath(char *pathbuf, char pathsep) { char * const pbend = pathbuf + MAXPGPATH; char *pbp = pathbuf; size_t remaining; - size_t verlen = strlen(SO_VERSION_STRING); + int would_have_sprinted; get_share_path(my_exec_path, pathbuf); join_path_components(pathbuf, pathbuf, "pljava"); - join_path_components(pathbuf, pathbuf, "pljava-"); + join_path_components(pathbuf, pathbuf, "pljava"); /* puts \0 where - goes */ for ( ; pbp < pbend && '\0' != *pbp ; ++ pbp ) ; if ( pbend == pbp ) return NULL; - remaining = pbend - pbp; - if ( remaining < verlen + 5 ) + /* + * pbp now points to a \0 that should later be replaced with a hyphen. + * The \0-terminated string starting at pathbuf can, for now, be reused + * as an argument to snprintf. + */ + + remaining = (pbend - pbp) - 1; + + would_have_sprinted = snprintf(pbp + 1, remaining, "%s.jar%c%s-api-%s.jar", + SO_VERSION_STRING, pathsep, pathbuf, SO_VERSION_STRING); + + if ( would_have_sprinted >= remaining ) return NULL; - snprintf(pbp, remaining, "%s.jar", SO_VERSION_STRING); + *pbp = '-'; /* overwrite the \0 so now it's a single string. */ return pathbuf; } +void InstallHelper_earlyHello() +{ + elog(DEBUG2, + "pljava-so-" SO_VERSION_STRING " built for (" PG_VERSION_STR ")"); +} + char *InstallHelper_hello() { char pathbuf[MAXPGPATH]; Invocation ctx; jstring nativeVer; + jstring serverBuiltVer; + jstring serverRunningVer; +#if PG_VERSION_NUM >= 120000 + FunctionCallInfoBaseData +#else + FunctionCallInfoData +#endif + fcinfo; + text *runningVer; jstring user; jstring dbname; jstring clustername; @@ -407,11 +472,24 @@ char *InstallHelper_hello() jstring greeting; char *greetingC; char const *clusternameC = pljavaClusterName(); + jboolean manageContext = JNI_getStaticBooleanField(s_InstallHelper_class, + s_InstallHelper_MANAGE_CONTEXT_LOADER); + + pljava_JNI_threadInitialize(JNI_TRUE == manageContext); Invocation_pushBootContext(&ctx); nativeVer = String_createJavaStringFromNTS(SO_VERSION_STRING); - user = String_createJavaStringFromNTS(MyProcPort->user_name); - dbname = String_createJavaStringFromNTS(MyProcPort->database_name); + serverBuiltVer = String_createJavaStringFromNTS(PG_VERSION_STR); + + InitFunctionCallInfoData(fcinfo, NULL, 0, + InvalidOid, /* collation */ + NULL, NULL); + runningVer = DatumGetTextP(pgsql_version(&fcinfo)); + serverRunningVer = String_createJavaString(runningVer); + pfree(runningVer); + + user = String_createJavaStringFromNTS(origUserName()); + dbname = String_createJavaStringFromNTS(pljavaDbName()); if ( '\0' == *clusternameC ) clustername = NULL; else @@ -430,9 +508,13 @@ char *InstallHelper_hello() greeting = JNI_callStaticObjectMethod( s_InstallHelper_class, s_InstallHelper_hello, - nativeVer, user, dbname, clustername, ddir, ldir, sdir, edir); + nativeVer, serverBuiltVer, serverRunningVer, + user, dbname, clustername, + ddir, ldir, sdir, edir); JNI_deleteLocalRef(nativeVer); + JNI_deleteLocalRef(serverBuiltVer); + JNI_deleteLocalRef(serverRunningVer); JNI_deleteLocalRef(user); JNI_deleteLocalRef(dbname); if ( NULL != clustername ) @@ -450,8 +532,14 @@ char *InstallHelper_hello() void InstallHelper_groundwork() { Invocation ctx; - Invocation_pushInvocation(&ctx, false); + bool snapshot_set = false; + Invocation_pushInvocation(&ctx); ctx.function = Function_INIT_WRITER; + if ( ! ActiveSnapshotSet() ) + { + PushActiveSnapshot(GetTransactionSnapshot()); + snapshot_set = true; + } PG_TRY(); { char const *lpt = LOADPATH_TBL_NAME; @@ -469,10 +557,18 @@ void InstallHelper_groundwork() JNI_deleteLocalRef(pljlp); JNI_deleteLocalRef(jlpt); JNI_deleteLocalRef(jlptq); + if ( snapshot_set ) + { + PopActiveSnapshot(); + } Invocation_popInvocation(false); } PG_CATCH(); { + if ( snapshot_set ) + { + PopActiveSnapshot(); + } Invocation_popInvocation(true); PG_RE_THROW(); } @@ -483,11 +579,14 @@ void InstallHelper_initialize() { s_InstallHelper_class = (jclass)JNI_newGlobalRef(PgObject_getJavaClass( "org/postgresql/pljava/internal/InstallHelper")); + s_InstallHelper_MANAGE_CONTEXT_LOADER = PgObject_getStaticJavaField( + s_InstallHelper_class, "MANAGE_CONTEXT_LOADER", "Z"); s_InstallHelper_hello = PgObject_getStaticJavaMethod(s_InstallHelper_class, "hello", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;" "Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;" - "Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"); + "Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;" + "Ljava/lang/String;)Ljava/lang/String;"); s_InstallHelper_groundwork = PgObject_getStaticJavaMethod( s_InstallHelper_class, "groundwork", "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;ZZ)V"); diff --git a/pljava-so/src/main/c/Invocation.c b/pljava-so/src/main/c/Invocation.c index da46497d..43f3c141 100644 --- a/pljava-so/src/main/c/Invocation.c +++ b/pljava-so/src/main/c/Invocation.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -15,39 +19,46 @@ #include "pljava/PgObject.h" #include "pljava/JNICalls.h" #include "pljava/Backend.h" +#include "pljava/DualState.h" +#include "pljava/Exception.h" #define pg_unreachable() abort() #define LOCAL_FRAME_SIZE 128 -struct CallLocal_ -{ - /** - * Pointer to the call local structure. - */ - void* pointer; - - /** - * The invocation where this CallLocal was allocated - */ - Invocation* invocation; - - /** - * Next CallLocal in a double linked list - */ - CallLocal* next; - - /** - * Previous CallLocal in a double linked list - */ - CallLocal* prev; -}; - +static jclass s_Invocation_class; static jmethodID s_Invocation_onExit; +static jfieldID s_Invocation_s_unhandled; static unsigned int s_callLevel = 0; Invocation* currentInvocation; +/* + * Two features of the calling convention for PL/Java functions will be handled + * here in Invocation to keep wrappers in Function simple. A PL/Java function + * may use static primitive slot 0 to return a primitive value, so that will + * always be saved in an Invocation struct and restored on both normal and + * exceptional return paths, when the heavier-weight full pushing of a Java + * ParameterFrame has not occurred. Likewise, the heavy full push is skipped if + * either the current or the new frame limits are (0,0), which means for such + * cases the frame limits themselves must be saved and restored the same way. + */ +static jvalue *s_primSlot0; +static jshort *s_frameLimits; + +/* + * To keep these values somewhat encapsulated, Function.c calls this function + * during its initialization to share them, rather than simply making them + * global. + */ +void pljava_Invocation_shareFrame(jvalue *slot0, jshort *limits) +{ + if ( 0 != s_primSlot0 || 0 != s_frameLimits ) + return; + s_primSlot0 = slot0; + s_frameLimits = limits; +} + extern void Invocation_initialize(void); void Invocation_initialize(void) { @@ -78,16 +89,32 @@ void Invocation_initialize(void) }; cls = PgObject_getJavaClass("org/postgresql/pljava/jdbc/Invocation"); + s_Invocation_class = JNI_newGlobalRef(cls); PgObject_registerNatives2(cls, invocationMethods); - s_Invocation_onExit = PgObject_getJavaMethod(cls, "onExit", "()V"); + s_Invocation_onExit = PgObject_getJavaMethod(cls, "onExit", "(Z)V"); + s_Invocation_s_unhandled = PgObject_getStaticJavaField( + cls, "s_unhandled", "Ljava/sql/SQLException;"); JNI_deleteLocalRef(cls); } void Invocation_assertConnect(void) { + int rslt; if(!currentInvocation->hasConnected) { - SPI_connect(); + rslt = SPI_connect(); + if ( SPI_OK_CONNECT != rslt ) + elog(ERROR, "SPI_connect returned %s", + SPI_result_code_string(rslt)); +#if PG_VERSION_NUM >= 100000 + if ( NULL != currentInvocation->triggerData ) + { + rslt = SPI_register_trigger_data(currentInvocation->triggerData); + if ( SPI_OK_TD_REGISTER != rslt ) + elog(WARNING, "SPI_register_trigger_data returned %s", + SPI_result_code_string(rslt)); + } +#endif currentInvocation->hasConnected = true; } } @@ -101,6 +128,13 @@ void Invocation_assertDisconnect(void) } } +/* + * Return the type map held by the innermost executing PL/Java function's + * schema loader (the initiating loader that was used to resolve the function). + * The type map is a map from Java Oid objects to Class class objects, + * as resolved by that loader. This is effectively Function_currentLoader() + * followed by JNI-invoking getTypeMap on the loader, but cached to avoid JNI). + */ jobject Invocation_getTypeMap(void) { Function f = currentInvocation->function; @@ -109,157 +143,125 @@ jobject Invocation_getTypeMap(void) void Invocation_pushBootContext(Invocation* ctx) { + JNI_pushLocalFrame(LOCAL_FRAME_SIZE); ctx->invocation = 0; ctx->function = 0; - ctx->trusted = false; + ctx->frameLimits = 0; + ctx->primSlot0.j = 0L; + ctx->savedLoader = 0; ctx->hasConnected = false; ctx->upperContext = CurrentMemoryContext; - ctx->errorOccured = false; + ctx->errorOccurred = false; ctx->inExprContextCB = false; ctx->previous = 0; - ctx->callLocals = 0; +#if PG_VERSION_NUM >= 100000 + ctx->triggerData = 0; +#endif currentInvocation = ctx; ++s_callLevel; } void Invocation_popBootContext(void) { + JNI_popLocalFrame(0); currentInvocation = 0; --s_callLevel; + /* + * Nothing is done here with savedLoader. It is just set to 0 in + * pushBootContext (uses can precede allocation of the sentinel value), + * and PL/Java functions (which could save a value) aren't called in a + * boot context. + */ } -void Invocation_pushInvocation(Invocation* ctx, bool trusted) +void Invocation_pushInvocation(Invocation* ctx) { JNI_pushLocalFrame(LOCAL_FRAME_SIZE); ctx->invocation = 0; ctx->function = 0; - ctx->trusted = trusted; + ctx->frameLimits = *s_frameLimits; + ctx->primSlot0 = *s_primSlot0; + ctx->savedLoader = pljava_Function_NO_LOADER; ctx->hasConnected = false; ctx->upperContext = CurrentMemoryContext; - ctx->errorOccured = false; + ctx->errorOccurred = false; ctx->inExprContextCB = false; ctx->previous = currentInvocation; - ctx->callLocals = 0; +#if PG_VERSION_NUM >= 100000 + ctx->triggerData = 0; +#endif currentInvocation = ctx; - Backend_setJavaSecurity(trusted); ++s_callLevel; } void Invocation_popInvocation(bool wasException) { - CallLocal* cl; Invocation* ctx = currentInvocation->previous; + bool heavy = FRAME_LIMITS_PUSHED == currentInvocation->frameLimits; + bool unhandled = currentInvocation->errorOccurred; - if(currentInvocation->invocation != 0) + /* + * If the more heavyweight parameter-frame push wasn't done, do + * the lighter cleanup here. + */ + if ( ! heavy ) { - if(!wasException) - JNI_callVoidMethod(currentInvocation->invocation, s_Invocation_onExit); - JNI_deleteGlobalRef(currentInvocation->invocation); + /* + * The lighter-weight cleanup. + */ + *s_frameLimits = currentInvocation->frameLimits; + *s_primSlot0 = currentInvocation->primSlot0; } + pljava_Function_popFrame(heavy); - if(currentInvocation->hasConnected) - SPI_finish(); - - JNI_popLocalFrame(0); - if(ctx != 0) - { - PG_TRY(); - { - Backend_setJavaSecurity(ctx->trusted); - } - PG_CATCH(); - { - elog(FATAL, "Failed to reinstate untrusted security after a trusted call or vice versa"); - } - PG_END_TRY(); - MemoryContextSwitchTo(ctx->upperContext); - } - - /** - * Reset all local wrappers that has been allocated during this call. Yank them - * from the double linked list but do *not* remove them. + /* + * If a Java Invocation instance was created and associated with this + * invocation, delete the reference (after calling its onExit method, + * indicating whether the return is exceptional or not). */ - cl = currentInvocation->callLocals; - if(cl != 0) + if(currentInvocation->invocation != 0) { - CallLocal* first = cl; - do - { - cl->pointer = 0; - cl->invocation = 0; - cl = cl->next; - } while(cl != first); + JNI_callVoidMethodLocked( + currentInvocation->invocation, s_Invocation_onExit, + (wasException || unhandled) + ? JNI_TRUE : JNI_FALSE); + JNI_deleteGlobalRef(currentInvocation->invocation); } - currentInvocation = ctx; - --s_callLevel; -} -void Invocation_freeLocalWrapper(jlong wrapper) -{ - Ptr2Long p2l; - Invocation* ctx; - CallLocal* cl; - CallLocal* prev; - - p2l.longVal = wrapper; - cl = (CallLocal*)p2l.ptrVal; - prev = cl->prev; - if(prev != cl) + if ( unhandled ) { - /* Disconnect - */ - CallLocal* next = cl->next; - prev->next = next; - next->prev = prev; - } + jthrowable ex = (jthrowable)JNI_getStaticObjectField( + s_Invocation_class, s_Invocation_s_unhandled); + bool already_hit = Exception_isPGUnhandled(ex); + JNI_setStaticObjectField( + s_Invocation_class, s_Invocation_s_unhandled, NULL); - /* If this CallLocal is freed before its owning invocation was - * popped then there's a risk that this is the first CallLocal - * in the list. - */ - ctx = cl->invocation; - if(ctx != 0 && ctx->callLocals == cl) - { - if(prev == cl) - prev = 0; - ctx->callLocals = prev; + JNI_exceptionStacktraceAtLevel(ex, + wasException ? DEBUG2 : already_hit ? WARNING : DEBUG1); } - pfree(cl); -} -void* Invocation_getWrappedPointer(jlong wrapper) -{ - Ptr2Long p2l; - p2l.longVal = wrapper; - return ((CallLocal*)p2l.ptrVal)->pointer; -} + /* + * Do nativeRelease for any DualState instances scoped to this invocation. + */ + pljava_DualState_nativeRelease(currentInvocation); -jlong Invocation_createLocalWrapper(void* pointer) -{ - /* Create a local wrapper for the pointer + /* + * Check for any DualState objects that became unreachable and can be freed. */ - Ptr2Long p2l; - CallLocal* cl = (CallLocal*)MemoryContextAlloc(JavaMemoryContext, sizeof(CallLocal)); - CallLocal* prev = currentInvocation->callLocals; - if(prev == 0) + pljava_DualState_cleanEnqueuedInstances(); + + if(currentInvocation->hasConnected) + SPI_finish(); + + JNI_popLocalFrame(0); + + if(ctx != 0) { - currentInvocation->callLocals = cl; - cl->prev = cl; - cl->next = cl; + MemoryContextSwitchTo(ctx->upperContext); } - else - { - CallLocal* next = prev->next; - cl->prev = prev; - cl->next = next; - prev->next = cl; - next->prev = cl; - } - cl->pointer = pointer; - cl->invocation = currentInvocation; - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = cl; - return p2l.longVal; + + currentInvocation = ctx; + --s_callLevel; } MemoryContext @@ -298,7 +300,7 @@ Java_org_postgresql_pljava_jdbc_Invocation__1getCurrent(JNIEnv* env, jclass cls) JNIEXPORT void JNICALL Java_org_postgresql_pljava_jdbc_Invocation__1clearErrorCondition(JNIEnv* env, jclass cls) { - currentInvocation->errorOccured = false; + currentInvocation->errorOccurred = false; } /* @@ -309,5 +311,15 @@ Java_org_postgresql_pljava_jdbc_Invocation__1clearErrorCondition(JNIEnv* env, jc JNIEXPORT void JNICALL Java_org_postgresql_pljava_jdbc_Invocation__1register(JNIEnv* env, jobject _this) { - currentInvocation->invocation = (*env)->NewGlobalRef(env, _this); + if ( NULL == currentInvocation->invocation ) + { + currentInvocation->invocation = (*env)->NewGlobalRef(env, _this); + return; + } + if ( (*env)->IsSameObject(env, currentInvocation->invocation, _this) ) + return; + BEGIN_NATIVE + Exception_throw(ERRCODE_INTERNAL_ERROR, + "mismanaged PL/Java invocation stack"); + END_NATIVE } diff --git a/pljava-so/src/main/c/JNICalls.c b/pljava-so/src/main/c/JNICalls.c index 4a9ad543..2ce8e386 100644 --- a/pljava-so/src/main/c/JNICalls.c +++ b/pljava-so/src/main/c/JNICalls.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -21,14 +27,113 @@ JNIEnv* jniEnv; jint (JNICALL *pljava_createvm)(JavaVM **, void **, void *); +void* mainThreadId; /* declared in pljava.h */ + +static JNIEnv* primordialJNIEnv; + static jobject s_threadLock; +static bool s_refuseOtherThreads = false; +static bool s_doMonitorOps = true; + +static jclass s_Thread_class; +static jmethodID s_Thread_currentThread; +static jfieldID s_Thread_contextLoader; + +static jobject s_threadObject; + +void pljava_JNI_setThreadPolicy(bool refuseOtherThreads, bool doMonitorOps) +{ + s_refuseOtherThreads = refuseOtherThreads; + s_doMonitorOps = doMonitorOps; +} + +/* + * This file contains very specialized methods for updating the context + * class loader of a thread, because this is where they can be implemented + * without the overhead of several calls to wrappers defined here. + * + * More lightweight implementations of those can be chosen if the selected + * thread policy precludes native access from any but the primordial thread. + */ +JNI_ContextLoaderUpdater *JNI_loaderUpdater; +JNI_ContextLoaderRestorer *JNI_loaderRestorer; + +static JNI_ContextLoaderUpdater _noopUpdater; +static JNI_ContextLoaderRestorer _noopRestorer; + +static JNI_ContextLoaderUpdater _lightUpdater; +static JNI_ContextLoaderRestorer _lightRestorer; + +static JNI_ContextLoaderUpdater _heavyUpdater; +static JNI_ContextLoaderRestorer _heavyRestorer; + +void pljava_JNI_threadInitialize(bool manageLoader) +{ + if ( ! manageLoader ) + { + JNI_loaderUpdater = _noopUpdater; + JNI_loaderRestorer = _noopRestorer; + return; + } + + s_Thread_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/lang/Thread")); + s_Thread_currentThread = PgObject_getStaticJavaMethod( + s_Thread_class, + "currentThread", + "()" + "Ljava/lang/Thread;"); + s_Thread_contextLoader = JNI_getFieldIDOrNull(s_Thread_class, + "contextClassLoader", "Ljava/lang/ClassLoader;"); + + if ( NULL == s_Thread_contextLoader ) + { + ereport(WARNING, ( + errmsg("unable to manage thread context classloaders in this JVM") + )); + JNI_loaderUpdater = _noopUpdater; + JNI_loaderRestorer = _noopRestorer; + } + else if ( s_refuseOtherThreads || ! s_doMonitorOps ) + { + s_threadObject = + JNI_newGlobalRef( + JNI_callStaticObjectMethod( + s_Thread_class, s_Thread_currentThread)); + JNI_loaderUpdater = _lightUpdater; + JNI_loaderRestorer = _lightRestorer; + } + else + { + JNI_loaderUpdater = _heavyUpdater; + JNI_loaderRestorer = _heavyRestorer; + } +} + + +/* + * BEGIN_JAVA and END_JAVA are used in JNI wrappers that are not expected to + * invoke Java methods; all they do is play the game with the scope of the JNI + * env value that was devised to fail fast if the intended pattern for PL/Java's + * JNI usage isn't followed. + * + * BEGIN_CALL and END_CALL add to that the releasing of the "THREADLOCK" + * monitor when calling into Java, and reacquiring it on return, that support + * the java_thread_pg_entry=allow mode of operation, and also checking for + * exceptions and turning them into PostgreSQL ereports. + * + * The _MONITOR_HELD flavors of those skip the monitor operations but still do + * the exception checks. They are used in a select few *Locked flavors of + * method call wrappers used where only known and lightweight Java methods will + * be invoked and not arbitrary methods of user code. + */ #define BEGIN_JAVA { JNIEnv* env = jniEnv; jniEnv = 0; #define END_JAVA jniEnv = env; } #define BEGIN_CALL \ BEGIN_JAVA \ - if((*env)->MonitorExit(env, s_threadLock) < 0) \ + if(s_doMonitorOps && ((*env)->MonitorExit(env, s_threadLock) < 0)) \ elog(ERROR, "Java exit monitor failure"); #define END_CALL endCall(env); } @@ -42,18 +147,34 @@ static void elogExceptionMessage(JNIEnv* env, jthrowable exh, int logLevel) { StringInfoData buf; int sqlState = ERRCODE_INTERNAL_ERROR; - jclass exhClass = (*env)->GetObjectClass(env, exh); - jstring jtmp = (jstring)(*env)->CallObjectMethod(env, exhClass, Class_getName); JNIEnv* saveEnv = jniEnv; + jclass exhClass = (*env)->GetObjectClass(env, exh); + jstring jtmp = + (jstring)(*env)->CallObjectMethod(env, exhClass, Class_getName); + /* ExceptionOccurred check is found below ... */ initStringInfo(&buf); jniEnv = env; /* Used by the String operations */ - String_appendJavaString(&buf, jtmp); + + if ( 0 == (*env)->ExceptionOccurred(env) ) /* ... here */ + String_appendJavaString(&buf, jtmp); + else + { + (*env)->ExceptionClear(env); + appendStringInfoString(&buf, ""); + } + (*env)->DeleteLocalRef(env, exhClass); (*env)->DeleteLocalRef(env, jtmp); jtmp = (jstring)(*env)->CallObjectMethod(env, exh, Throwable_getMessage); + if ( 0 != (*env)->ExceptionOccurred(env) ) + { + (*env)->ExceptionClear(env); + jtmp = 0; + } + if(jtmp != 0) { appendStringInfoString(&buf, ": "); @@ -64,6 +185,12 @@ static void elogExceptionMessage(JNIEnv* env, jthrowable exh, int logLevel) if((*env)->IsInstanceOf(env, exh, SQLException_class)) { jtmp = (*env)->CallObjectMethod(env, exh, SQLException_getSQLState); + if ( 0 != (*env)->ExceptionOccurred(env) ) + { + (*env)->ExceptionClear(env); + jtmp = 0; + } + if(jtmp != 0) { char* s = String_createNTS(jtmp); @@ -78,10 +205,13 @@ static void elogExceptionMessage(JNIEnv* env, jthrowable exh, int logLevel) ereport(logLevel, (errcode(sqlState), errmsg("%s", buf.data))); } -static void printStacktrace(JNIEnv* env, jobject exh) +static void printStacktrace(JNIEnv* env, jobject exh, int elevel) { -#ifndef _MSC_VER - if(DEBUG1 >= log_min_messages || DEBUG1 >= client_min_messages) +#if 100002<=PG_VERSION_NUM || \ + 90607<=PG_VERSION_NUM && PG_VERSION_NUM<100000 || \ + 90511<=PG_VERSION_NUM && PG_VERSION_NUM< 90600 || \ + ! defined(_MSC_VER) + if(elevel >= log_min_messages || elevel >= client_min_messages) #else /* This is gross, but only happens as often as an exception escapes Java * code to be rethrown. There is some renewed interest on pgsql-hackers to @@ -92,8 +222,9 @@ static void printStacktrace(JNIEnv* env, jobject exh) || 0 == strncmp("debug", PG_GETCONFIGOPTION("client_min_messages"), 5) ) #endif { - int currLevel = Backend_setJavaLogLevel(DEBUG1); + int currLevel = Backend_setJavaLogLevel(elevel); (*env)->CallVoidMethod(env, exh, Throwable_printStackTrace); + (*env)->ExceptionOccurred(env); /* sop for JNI exception-check check */ Backend_setJavaLogLevel(currLevel); } } @@ -104,20 +235,26 @@ static void endCall(JNIEnv* env) if(exh != 0) (*env)->ExceptionClear(env); - if((*env)->MonitorEnter(env, s_threadLock) < 0) + if(s_doMonitorOps && ((*env)->MonitorEnter(env, s_threadLock) < 0)) elog(ERROR, "Java enter monitor failure"); jniEnv = env; if(exh != 0) { - printStacktrace(env, exh); + printStacktrace(env, exh, DEBUG1); if((*env)->IsInstanceOf(env, exh, ServerException_class)) { /* Rethrow the server error. */ jobject jed = (*env)->CallObjectMethod(env, exh, ServerException_getErrorData); + if ( 0 != (*env)->ExceptionOccurred(env) ) + { + (*env)->ExceptionClear(env); + jed = 0; + } + if(jed != 0) - ReThrowError(ErrorData_getErrorData(jed)); + ReThrowError(pljava_ErrorData_getErrorData(jed)); } /* There's no return from this call. */ @@ -134,14 +271,20 @@ static void endCallMonitorHeld(JNIEnv* env) jniEnv = env; if(exh != 0) { - printStacktrace(env, exh); + printStacktrace(env, exh, DEBUG1); if((*env)->IsInstanceOf(env, exh, ServerException_class)) { /* Rethrow the server error. */ jobject jed = (*env)->CallObjectMethod(env, exh, ServerException_getErrorData); + if ( 0 != (*env)->ExceptionOccurred(env) ) + { + (*env)->ExceptionClear(env); + jed = 0; + } + if(jed != 0) - ReThrowError(ErrorData_getErrorData(jed)); + ReThrowError(pljava_ErrorData_getErrorData(jed)); } /* There's no return from this call. */ @@ -151,6 +294,15 @@ static void endCallMonitorHeld(JNIEnv* env) bool beginNativeNoErrCheck(JNIEnv* env) { + if ( s_refuseOtherThreads && env != primordialJNIEnv ) + { + env = JNI_setEnv(env); + Exception_throw(ERRCODE_INTERNAL_ERROR, + "Attempt by non-initial thread to enter PostgreSQL from Java"); + JNI_setEnv(env); + return false; + } + if((env = JNI_setEnv(env)) != 0) { /* The backend is *not* awaiting the return of a call to the JVM @@ -175,15 +327,14 @@ bool beginNative(JNIEnv* env) return false; } - if(currentInvocation->errorOccured) + if(currentInvocation->errorOccurred) { /* An elog with level higher than ERROR was issued. The transaction * state is unknown. There's no way the JVM is allowed to enter the * backend at this point. */ env = JNI_setEnv(env); - Exception_throw(ERRCODE_INTERNAL_ERROR, - "An attempt was made to call a PostgreSQL backend function after an elog(ERROR) had been issued"); + Exception_throw_unhandled(); JNI_setEnv(env); return false; } @@ -323,6 +474,25 @@ jlong JNI_callLongMethodV(jobject object, jmethodID methodID, va_list args) return result; } +jlong JNI_callLongMethodLocked(jobject object, jmethodID methodID, ...) +{ + jlong result; + va_list args; + va_start(args, methodID); + result = JNI_callLongMethodLockedV(object, methodID, args); + va_end(args); + return result; +} + +jlong JNI_callLongMethodLockedV(jobject object, jmethodID methodID, va_list args) +{ + jlong result; + BEGIN_CALL_MONITOR_HELD + result = (*env)->CallLongMethodV(env, object, methodID, args); + END_CALL_MONITOR_HELD + return result; +} + jshort JNI_callShortMethod(jobject object, jmethodID methodID, ...) { jshort result; @@ -380,6 +550,16 @@ jobject JNI_callObjectMethodLockedV(jobject object, jmethodID methodID, va_list return result; } +jboolean JNI_callStaticBooleanMethod(jclass clazz, jmethodID methodID, ...) +{ + jboolean result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticBooleanMethodV(clazz, methodID, args); + va_end(args); + return result; +} + jboolean JNI_callStaticBooleanMethodA(jclass clazz, jmethodID methodID, jvalue* args) { jboolean result; @@ -389,6 +569,25 @@ jboolean JNI_callStaticBooleanMethodA(jclass clazz, jmethodID methodID, jvalue* return result; } +jboolean JNI_callStaticBooleanMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jboolean result; + BEGIN_CALL + result = (*env)->CallStaticBooleanMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + +jbyte JNI_callStaticByteMethod(jclass clazz, jmethodID methodID, ...) +{ + jbyte result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticByteMethodV(clazz, methodID, args); + va_end(args); + return result; +} + jbyte JNI_callStaticByteMethodA(jclass clazz, jmethodID methodID, jvalue* args) { jbyte result; @@ -398,6 +597,63 @@ jbyte JNI_callStaticByteMethodA(jclass clazz, jmethodID methodID, jvalue* args) return result; } +jbyte JNI_callStaticByteMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jbyte result; + BEGIN_CALL + result = (*env)->CallStaticByteMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + +jshort JNI_callStaticShortMethod(jclass clazz, jmethodID methodID, ...) +{ + jshort result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticShortMethodV(clazz, methodID, args); + va_end(args); + return result; +} + +jshort JNI_callStaticShortMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jshort result; + BEGIN_CALL + result = (*env)->CallStaticShortMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + +jchar JNI_callStaticCharMethod(jclass clazz, jmethodID methodID, ...) +{ + jchar result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticCharMethodV(clazz, methodID, args); + va_end(args); + return result; +} + +jchar JNI_callStaticCharMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jchar result; + BEGIN_CALL + result = (*env)->CallStaticCharMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + +jdouble JNI_callStaticDoubleMethod(jclass clazz, jmethodID methodID, ...) +{ + jdouble result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticDoubleMethodV(clazz, methodID, args); + va_end(args); + return result; +} + jdouble JNI_callStaticDoubleMethodA(jclass clazz, jmethodID methodID, jvalue* args) { jdouble result; @@ -407,6 +663,25 @@ jdouble JNI_callStaticDoubleMethodA(jclass clazz, jmethodID methodID, jvalue* ar return result; } +jdouble JNI_callStaticDoubleMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jdouble result; + BEGIN_CALL + result = (*env)->CallStaticDoubleMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + +jfloat JNI_callStaticFloatMethod(jclass clazz, jmethodID methodID, ...) +{ + jfloat result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticFloatMethodV(clazz, methodID, args); + va_end(args); + return result; +} + jfloat JNI_callStaticFloatMethodA(jclass clazz, jmethodID methodID, jvalue* args) { jfloat result; @@ -416,6 +691,25 @@ jfloat JNI_callStaticFloatMethodA(jclass clazz, jmethodID methodID, jvalue* args return result; } +jfloat JNI_callStaticFloatMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jfloat result; + BEGIN_CALL + result = (*env)->CallStaticFloatMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + +jint JNI_callStaticIntMethod(jclass clazz, jmethodID methodID, ...) +{ + jint result; + va_list args; + va_start(args, methodID); + result = JNI_callStaticIntMethodV(clazz, methodID, args); + va_end(args); + return result; +} + jint JNI_callStaticIntMethodA(jclass clazz, jmethodID methodID, jvalue* args) { jint result; @@ -425,6 +719,15 @@ jint JNI_callStaticIntMethodA(jclass clazz, jmethodID methodID, jvalue* args) return result; } +jint JNI_callStaticIntMethodV(jclass clazz, jmethodID methodID, va_list args) +{ + jint result; + BEGIN_CALL + result = (*env)->CallStaticIntMethodV(env, clazz, methodID, args); + END_CALL + return result; +} + jlong JNI_callStaticLongMethod(jclass clazz, jmethodID methodID, ...) { jlong result; @@ -531,6 +834,21 @@ void JNI_callStaticVoidMethodV(jclass clazz, jmethodID methodID, va_list args) END_CALL } +void JNI_callStaticVoidMethodLocked(jclass clazz, jmethodID methodID, ...) +{ + va_list args; + va_start(args, methodID); + JNI_callStaticVoidMethodLockedV(clazz, methodID, args); + va_end(args); +} + +void JNI_callStaticVoidMethodLockedV(jclass clazz, jmethodID methodID, va_list args) +{ + BEGIN_CALL_MONITOR_HELD + (*env)->CallStaticVoidMethodV(env, clazz, methodID, args); + END_CALL_MONITOR_HELD +} + void JNI_callVoidMethod(jobject object, jmethodID methodID, ...) { va_list args; @@ -566,7 +884,11 @@ jint JNI_createVM(JavaVM** javaVM, JavaVMInitArgs* vmArgs) JNIEnv* env = 0; jint jstat = pljava_createvm(javaVM, (void **)&env, vmArgs); if(jstat == JNI_OK) + { jniEnv = env; + primordialJNIEnv = env; + mainThreadId = env; + } return jstat; } @@ -632,12 +954,20 @@ void JNI_exceptionDescribe(void) if(exh != 0) { (*env)->ExceptionClear(env); - printStacktrace(env, exh); + printStacktrace(env, exh, DEBUG1); elogExceptionMessage(env, exh, WARNING); } END_JAVA } +void JNI_exceptionStacktraceAtLevel(jthrowable exh, int elevel) +{ + BEGIN_JAVA + elogExceptionMessage(env, exh, elevel); + printStacktrace(env, exh, elevel); + END_JAVA +} + jthrowable JNI_exceptionOccurred(void) { jthrowable result; @@ -722,6 +1052,33 @@ jfieldID JNI_getFieldID(jclass clazz, const char* name, const char* sig) return result; } +jfieldID JNI_getFieldIDOrNull(jclass clazz, const char* name, const char* sig) +{ + jfieldID result; + jobject exh; + BEGIN_CALL + result = (*env)->GetFieldID(env, clazz, name, sig); + if(result == 0) { + exh = (*env)->ExceptionOccurred(env); + if ( 0 != exh ) + { + /* + * Ignore a NoSuchFieldError, but not any other exception. + * This operation order (first clear the pending exception, then + * do the IsInstanceOf check, then Throw again if not the expected + * class) avoids a benign -Xcheck:JNI warning about calling + * IsInstanceOf while an exception is pending. + */ + (*env)->ExceptionClear(env); + if ( ! (*env)->IsInstanceOf(env, exh, NoSuchFieldError_class) ) + (*env)->Throw(env, exh); + (*env)->DeleteLocalRef(env, exh); + } + } + END_CALL + return result; +} + jfloat* JNI_getFloatArrayElements(jfloatArray array, jboolean* isCopy) { jfloat* result; @@ -857,15 +1214,43 @@ jmethodID JNI_getStaticMethodIDOrNull(jclass clazz, const char* name, const char result = (*env)->GetStaticMethodID(env, clazz, name, sig); if(result == 0) { exh = (*env)->ExceptionOccurred(env); - if ( 0 == exh - || (*env)->IsInstanceOf(env, exh, NoSuchMethodError_class) ) - (*env)->ExceptionClear(env); /* NoSuch... is only thing to ignore */ - (*env)->DeleteLocalRef(env, exh); + if ( 0 != exh ) + { + /* + * Ignore a NoSuchMethodError, but not any other exception. + * This operation order (first clear the pending exception, then + * do the IsInstanceOf check, then Throw again if not the expected + * class) avoids a benign -Xcheck:JNI warning about calling + * IsInstanceOf while an exception is pending. + */ + (*env)->ExceptionClear(env); + if ( ! (*env)->IsInstanceOf(env, exh, NoSuchMethodError_class) ) + (*env)->Throw(env, exh); + (*env)->DeleteLocalRef(env, exh); + } } END_CALL return result; } +jboolean JNI_getStaticBooleanField(jclass clazz, jfieldID field) +{ + jboolean result; + BEGIN_JAVA + result = (*env)->GetStaticBooleanField(env, clazz, field); + END_JAVA + return result; +} + +jint JNI_getStaticIntField(jclass clazz, jfieldID field) +{ + jint result; + BEGIN_JAVA + result = (*env)->GetStaticIntField(env, clazz, field); + END_JAVA + return result; +} + jobject JNI_getStaticObjectField(jclass clazz, jfieldID field) { jobject result; @@ -1078,6 +1463,25 @@ jobject JNI_newObjectV(jclass clazz, jmethodID ctor, va_list args) return result; } +jobject JNI_newObjectLocked(jclass clazz, jmethodID ctor, ...) +{ + jobject result; + va_list args; + va_start(args, ctor); + result = JNI_newObjectLockedV(clazz, ctor, args); + va_end(args); + return result; +} + +jobject JNI_newObjectLockedV(jclass clazz, jmethodID ctor, va_list args) +{ + jobject result; + BEGIN_CALL_MONITOR_HELD + result = (*env)->NewObjectV(env, clazz, ctor, args); + END_CALL_MONITOR_HELD + return result; +} + void JNI_releaseByteArrayElements(jbyteArray array, jbyte* elems, jint mode) { BEGIN_JAVA @@ -1192,6 +1596,13 @@ void JNI_setLongArrayRegion(jlongArray array, jsize start, jsize len, jlong* buf END_JAVA } +void JNI_setIntField(jobject object, jfieldID field, jint value) +{ + BEGIN_JAVA + (*env)->SetIntField(env, object, field, value); + END_JAVA +} + void JNI_setLongField(jobject object, jfieldID field, jlong value) { BEGIN_JAVA @@ -1213,11 +1624,18 @@ void JNI_setShortArrayRegion(jshortArray array, jsize start, jsize len, jshort* END_JAVA } +void JNI_setStaticObjectField(jclass clazz, jfieldID field, jobject value) +{ + BEGIN_JAVA + (*env)->SetStaticObjectField(env, clazz, field, value); + END_JAVA +} + void JNI_setThreadLock(jobject lockObject) { BEGIN_JAVA s_threadLock = (*env)->NewGlobalRef(env, lockObject); - if((*env)->MonitorEnter(env, s_threadLock) < 0) + if(NULL != s_threadLock && (*env)->MonitorEnter(env, s_threadLock) < 0) elog(ERROR, "Java enter monitor failure (initial)"); END_JAVA } @@ -1230,3 +1648,124 @@ jint JNI_throw(jthrowable obj) END_JAVA return result; } + +/* + * Implementations of the context class loader updater and restorer. + * The loader reference passed in is not to be deleted. If saved anywhere, + * a new global ref is to be taken, and later deleted when restored. + */ + +static inline void _updaterCommon(JNIEnv *env, jobject thread, jobject loader) +{ + jobject old = (*env)->GetObjectField(env, thread, s_Thread_contextLoader); + + /* + * If it is not already the loader we want, change it, and set + * currentInvocation->savedLoader to restore it later. If this is + * a top-level invocation, we don't care what it gets restored to, so lie, + * and save loader there instead of old. If there are many consecutive + * top-level calls with the same context loader, that will save work later. + * + * If it is already the loader we want, again we check for a top-level call, + * and can leave currentInvocation->savedLoader completely unset in that + * case, so the restore call will be skipped completely. If not a top-level + * call, though, see that it gets restored to what the caller might expect, + * even if it somehow got changed. + */ + + if ( ! (*env)->IsSameObject(env, old, loader) ) + { + (*env)->SetObjectField(env, thread, s_Thread_contextLoader, loader); + + currentInvocation->savedLoader = (*env)->NewGlobalRef(env, + ( NULL == currentInvocation->previous ) ? loader : old); + } + else if ( NULL != currentInvocation->previous ) + currentInvocation->savedLoader = (*env)->NewGlobalRef(env, old); + + (*env)->DeleteLocalRef(env, old); +} + +static void _heavyUpdater(jobject loader) +{ + jobject thread; + jobject exh; + + BEGIN_JAVA + + thread = + (*env)->CallStaticObjectMethod(env, + s_Thread_class, s_Thread_currentThread); /* should never fail */ + + exh = (*env)->ExceptionOccurred(env); /* but mollify -Xcheck:jni anyway */ + if(exh != 0) + { + (*env)->ExceptionClear(env); + elogExceptionMessage(env, exh, ERROR); + } + + _updaterCommon(env, thread, loader); + + (*env)->DeleteLocalRef(env, thread); + + END_JAVA +} + +void _heavyRestorer() +{ + jobject thread; + jobject value; + jobject exh; + + BEGIN_JAVA + + thread = + (*env)->CallStaticObjectMethod(env, + s_Thread_class, s_Thread_currentThread); /* should never fail */ + + exh = (*env)->ExceptionOccurred(env); /* but mollify -Xcheck:jni anyway */ + if(exh != 0) + { + (*env)->ExceptionClear(env); + elogExceptionMessage(env, exh, ERROR); + } + + value = currentInvocation->savedLoader; + + (*env)->SetObjectField(env, thread, s_Thread_contextLoader, value); + (*env)->DeleteGlobalRef(env, value); + (*env)->DeleteLocalRef(env, thread); + + END_JAVA +} + +static void _lightUpdater(jobject loader) +{ + BEGIN_JAVA + + _updaterCommon(env, s_threadObject, loader); + + END_JAVA +} + +void _lightRestorer() +{ + jobject value; + + BEGIN_JAVA + + value = currentInvocation->savedLoader; + + (*env)->SetObjectField(env, s_threadObject, s_Thread_contextLoader, value); + (*env)->DeleteGlobalRef(env, value); + + END_JAVA +} + +static void _noopUpdater(jobject loader) +{ +} + +void _noopRestorer() +{ +} diff --git a/pljava-so/src/main/c/PgObject.c b/pljava-so/src/main/c/PgObject.c index 902e2ac3..3b0b5f7c 100644 --- a/pljava-so/src/main/c/PgObject.c +++ b/pljava-so/src/main/c/PgObject.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB - Thomas Hallgren + * Chapman Flack */ #include #include @@ -18,9 +22,9 @@ static bool s_loopLock = false; static jclass s_Class_class = 0; static jmethodID s_Class_getName = 0; -/* effectiveClassPath is set at initialization time (in Backend.c) +/* effectiveModulePath is set at initialization time (in Backend.c) */ -const char* effectiveClassPath; +const char* effectiveModulePath; void PgObject_free(PgObject object) { @@ -64,12 +68,7 @@ const char* PgObjectClass_getName(PgObjectClass self) return self->name; } -void _PgObject_pureVirtualCalled(PgObject object) -{ - ereport(ERROR, (errmsg("Pure virtual method called"))); -} - -static char* PgObject_getClassName(jclass cls) +char* PgObject_getClassName(jclass cls) { jstring jstr; char* tmp; @@ -114,8 +113,9 @@ jclass PgObject_getJavaClass(const char* className) JNI_exceptionClear(); } ereport(ERROR, ( - errmsg("Unable to load class %s using CLASSPATH '%s'", - className, effectiveClassPath == 0 ? "null" : effectiveClassPath))); + errmsg("Unable to load class %s using module path '%s'", + className, effectiveModulePath == 0 ? "null" : + effectiveModulePath))); } return cls; } diff --git a/pljava-so/src/main/c/PgSavepoint.c b/pljava-so/src/main/c/PgSavepoint.c index ac6cdeee..45fcca81 100644 --- a/pljava-so/src/main/c/PgSavepoint.c +++ b/pljava-so/src/main/c/PgSavepoint.c @@ -1,56 +1,125 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2024 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ #include +#include #include #include #include "org_postgresql_pljava_internal_PgSavepoint.h" +#include "pljava/PgSavepoint.h" #include "pljava/Exception.h" +#include "pljava/Invocation.h" #include "pljava/type/String.h" #include "pljava/SPI.h" -#define pg_unreachable() abort() +/* + * Workaround for issue #260, PostgreSQL API breakage by EnterpriseDB. They + * added a ReleaseCurrentSubTransactionEx function with an added argument, and + * made ReleaseCurrentSubTransaction call it, passing false. But instead of + * leaving ReleaseCurrentSubTransaction an actual function that does so, which + * would not have been an API break, they made it a macro instead, with the + * result that its address cannot be taken. The reporter of the issue had an + * inquiry open with EDB for four months trying to get specifics on what + * versions have that issue, with no useful response. So this workaround is just + * conditioned on finding ReleaseCurrentSubTransaction defined as a macro. + */ +#ifdef ReleaseCurrentSubTransaction +static void addressableRelease(void); +static void addressableRelease() +{ + ReleaseCurrentSubTransaction(); +} +#undef ReleaseCurrentSubTransaction +#define ReleaseCurrentSubTransaction addressableRelease +#endif +static jclass s_PgSavepoint_class; +static jmethodID s_forId; +static jfieldID s_nestLevel; extern void PgSavepoint_initialize(void); +static void unwind(void (*f)(void), jint xid, jint nestingLevel); +static void assertXid(SubTransactionId); + +jobject pljava_PgSavepoint_forId(SubTransactionId subId) +{ + return JNI_callStaticObjectMethodLocked(s_PgSavepoint_class, s_forId, + (jint)subId); +} + void PgSavepoint_initialize(void) { JNINativeMethod methods[] = { { "_set", - "(Ljava/lang/String;)J", + "(Ljava/lang/String;)I", Java_org_postgresql_pljava_internal_PgSavepoint__1set }, { "_release", - "(J)V", + "(II)V", Java_org_postgresql_pljava_internal_PgSavepoint__1release }, { "_rollback", - "(J)V", + "(II)V", Java_org_postgresql_pljava_internal_PgSavepoint__1rollback }, - { - "_getName", - "(J)Ljava/lang/String;", - Java_org_postgresql_pljava_internal_PgSavepoint__1getName - }, - { - "_getId", - "(J)I", - Java_org_postgresql_pljava_internal_PgSavepoint__1getId - }, { 0, 0, 0 } }; - PgObject_registerNatives("org/postgresql/pljava/internal/PgSavepoint", methods); + PgObject_registerNatives("org/postgresql/pljava/internal/PgSavepoint", + methods); + + /* + * I would rather put this at the top, but it counts as a statement, and + * would trigger a declaration-after-statement warning. + */ + StaticAssertStmt(sizeof(SubTransactionId) <= sizeof(jint), + "SubTransactionId wider than jint?!"); + + s_PgSavepoint_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/PgSavepoint")); + s_forId = + PgObject_getStaticJavaMethod(s_PgSavepoint_class, "forId", + "(I)Lorg/postgresql/pljava/internal/PgSavepoint;"); + s_nestLevel = + PgObject_getJavaField(s_PgSavepoint_class, "m_nestLevel", "I"); +} + +static void unwind(void (*f)(void), jint xid, jint nestingLevel) +{ + while ( nestingLevel < GetCurrentTransactionNestLevel() ) + f(); + + if ( nestingLevel == GetCurrentTransactionNestLevel() ) + { + assertXid((SubTransactionId)xid); + f(); + } +} + +static void assertXid(SubTransactionId xid) +{ + if(xid != GetCurrentSubTransactionId()) + { + /* Oops. Rollback to top level transaction. + */ + ereport(ERROR, ( + errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), + errmsg("Subtransaction mismatch at txlevel %d", + GetCurrentTransactionNestLevel()))); + } } /**************************************** @@ -59,119 +128,73 @@ void PgSavepoint_initialize(void) /* * Class: org_postgresql_pljava_internal_PgSavepoint * Method: _set - * Signature: (Ljava/lang/String;)J; + * Signature: (Ljava/lang/String;)I; */ -JNIEXPORT jlong JNICALL -Java_org_postgresql_pljava_internal_PgSavepoint__1set(JNIEnv* env, jclass cls, jstring jname) +JNIEXPORT jint JNICALL +Java_org_postgresql_pljava_internal_PgSavepoint__1set(JNIEnv* env, jobject this, jstring jname) { - jlong result = 0; + jint xid = 0; BEGIN_NATIVE PG_TRY(); { - Ptr2Long p2l; char* name = String_createNTS(jname); - MemoryContext currCtx = MemoryContextSwitchTo(JavaMemoryContext); - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = SPI_setSavepoint(name); - result = p2l.longVal; - MemoryContextSwitchTo(currCtx); - pfree(name); + Invocation_assertConnect(); + JNI_setIntField(this, s_nestLevel, 1+GetCurrentTransactionNestLevel()); + BeginInternalSubTransaction(name); + xid = GetCurrentSubTransactionId(); + if ( NULL != name ) + pfree(name); } PG_CATCH(); { - Exception_throw_ERROR("SPI_setSavepoint"); + Exception_throw_ERROR("setSavepoint"); } PG_END_TRY(); END_NATIVE - return result; + return xid; } /* * Class: org_postgresql_pljava_internal_PgSavepoint - * Method: _getName - * Signature: (J)Ljava/lang/String; + * Method: _release + * Signature: (II)V */ -JNIEXPORT jstring JNICALL -Java_org_postgresql_pljava_internal_PgSavepoint__1getName(JNIEnv* env, jclass clazz, jlong _this) +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_PgSavepoint__1release(JNIEnv* env, jclass clazz, jint xid, jint nestLevel) { - jstring result = 0; - if(_this != 0) + BEGIN_NATIVE + PG_TRY(); { - BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((Savepoint*)p2l.ptrVal)->name); - END_NATIVE + unwind(ReleaseCurrentSubTransaction, xid, nestLevel); } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_PgSavepoint - * Method: _getId - * Signature: (J)I - */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_PgSavepoint__1getId(JNIEnv* env, jclass clazz, jlong _this) -{ - jint result = (jint)InvalidSubTransactionId; - if(_this != 0) + PG_CATCH(); { - Ptr2Long p2l; - p2l.longVal = _this; - result = (jint)((Savepoint*)p2l.ptrVal)->xid; + Exception_throw_ERROR("releaseSavepoint"); } - return result; + PG_END_TRY(); + END_NATIVE } /* * Class: org_postgresql_pljava_internal_PgSavepoint - * Method: _release - * Signature: (J)V + * Method: _rollback + * Signature: (II)V */ JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_PgSavepoint__1release(JNIEnv* env, jclass clazz, jlong _this) +Java_org_postgresql_pljava_internal_PgSavepoint__1rollback(JNIEnv* env, jclass clazz, jint xid, jint nestLevel) { - if(_this != 0) + BEGIN_NATIVE + PG_TRY(); { - BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; - PG_TRY(); - { - SPI_releaseSavepoint((Savepoint*)p2l.ptrVal); - } - PG_CATCH(); - { - Exception_throw_ERROR("SPI_releaseSavepoint"); - } - PG_END_TRY(); - END_NATIVE + unwind(RollbackAndReleaseCurrentSubTransaction, xid, nestLevel); +#if PG_VERSION_NUM < 100000 + SPI_restore_connection(); +#endif } -} - -/* - * Class: org_postgresql_pljava_internal_PgSavepoint - * Method: _rollback - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_PgSavepoint__1rollback(JNIEnv* env, jclass clazz, jlong _this) -{ - if(_this != 0) + PG_CATCH(); { - BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; - PG_TRY(); - { - SPI_rollbackSavepoint((Savepoint*)p2l.ptrVal); - } - PG_CATCH(); - { - Exception_throw_ERROR("SPI_rollbackSavepoint"); - } - PG_END_TRY(); - END_NATIVE + Exception_throw_ERROR("rollbackSavepoint"); } + PG_END_TRY(); + END_NATIVE } diff --git a/pljava-so/src/main/c/SPI.c b/pljava-so/src/main/c/SPI.c index fd7a657b..3d891b30 100644 --- a/pljava-so/src/main/c/SPI.c +++ b/pljava-so/src/main/c/SPI.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "org_postgresql_pljava_internal_SPI.h" #include "pljava/SPI.h" @@ -19,10 +23,9 @@ #include #endif -#define pg_unreachable() abort() - - -Savepoint* infant = 0; +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == (org_postgresql_pljava_internal_##c), \ + "Java/C value mismatch for " #c) extern void SPI_initialize(void); void SPI_initialize(void) @@ -30,12 +33,12 @@ void SPI_initialize(void) JNINativeMethod methods[] = { { "_exec", - "(JLjava/lang/String;I)I", - Java_org_postgresql_pljava_internal_SPI__1exec + "(Ljava/lang/String;I)I", + Java_org_postgresql_pljava_internal_SPI__1exec }, { "_getProcessed", - "()I", + "()J", Java_org_postgresql_pljava_internal_SPI__1getProcessed }, { @@ -56,6 +59,54 @@ void SPI_initialize(void) { 0, 0, 0 }}; PgObject_registerNatives("org/postgresql/pljava/internal/SPI", methods); + + /* + * Statically assert that the Java code has the right values for these. + * I would rather have this at the top, but these count as statements and + * would trigger a declaration-after-statment warning. + */ + CONFIRMCONST(SPI_ERROR_CONNECT); + CONFIRMCONST(SPI_ERROR_COPY); + CONFIRMCONST(SPI_ERROR_OPUNKNOWN); + CONFIRMCONST(SPI_ERROR_UNCONNECTED); + CONFIRMCONST(SPI_ERROR_CURSOR); + CONFIRMCONST(SPI_ERROR_ARGUMENT); + CONFIRMCONST(SPI_ERROR_PARAM); + CONFIRMCONST(SPI_ERROR_TRANSACTION); + CONFIRMCONST(SPI_ERROR_NOATTRIBUTE); + CONFIRMCONST(SPI_ERROR_NOOUTFUNC); + CONFIRMCONST(SPI_ERROR_TYPUNKNOWN); +#if PG_VERSION_NUM >= 100000 + CONFIRMCONST(SPI_ERROR_REL_DUPLICATE); + CONFIRMCONST(SPI_ERROR_REL_NOT_FOUND); +#endif + + CONFIRMCONST(SPI_OK_CONNECT); + CONFIRMCONST(SPI_OK_FINISH); + CONFIRMCONST(SPI_OK_FETCH); + CONFIRMCONST(SPI_OK_UTILITY); + CONFIRMCONST(SPI_OK_SELECT); + CONFIRMCONST(SPI_OK_SELINTO); + CONFIRMCONST(SPI_OK_INSERT); + CONFIRMCONST(SPI_OK_DELETE); + CONFIRMCONST(SPI_OK_UPDATE); + CONFIRMCONST(SPI_OK_CURSOR); + CONFIRMCONST(SPI_OK_INSERT_RETURNING); + CONFIRMCONST(SPI_OK_DELETE_RETURNING); + CONFIRMCONST(SPI_OK_UPDATE_RETURNING); + CONFIRMCONST(SPI_OK_REWRITTEN); +#if PG_VERSION_NUM >= 100000 + CONFIRMCONST(SPI_OK_REL_REGISTER); + CONFIRMCONST(SPI_OK_REL_UNREGISTER); + CONFIRMCONST(SPI_OK_TD_REGISTER); +#endif +#if PG_VERSION_NUM >= 150000 + CONFIRMCONST(SPI_OK_MERGE); +#endif + +#if PG_VERSION_NUM >= 110000 + CONFIRMCONST(SPI_OPT_NONATOMIC); +#endif } /**************************************** @@ -64,10 +115,10 @@ void SPI_initialize(void) /* * Class: org_postgresql_pljava_internal_SPI * Method: _exec - * Signature: (JLjava/lang/String;I)I + * Signature: (Ljava/lang/String;I)I */ JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_SPI__1exec(JNIEnv* env, jclass cls, jlong threadId, jstring cmd, jint count) +Java_org_postgresql_pljava_internal_SPI__1exec(JNIEnv* env, jclass cls, jstring cmd, jint count) { jint result = 0; @@ -76,7 +127,7 @@ Java_org_postgresql_pljava_internal_SPI__1exec(JNIEnv* env, jclass cls, jlong th if(command != 0) { STACK_BASE_VARS - STACK_BASE_PUSH(threadId) + STACK_BASE_PUSH(env) PG_TRY(); { Invocation_assertConnect(); @@ -100,12 +151,12 @@ Java_org_postgresql_pljava_internal_SPI__1exec(JNIEnv* env, jclass cls, jlong th /* * Class: org_postgresql_pljava_internal_SPI * Method: _getProcessed - * Signature: ()I + * Signature: ()J */ -JNIEXPORT jint JNICALL +JNIEXPORT jlong JNICALL Java_org_postgresql_pljava_internal_SPI__1getProcessed(JNIEnv* env, jclass cls) { - return (jint)SPI_processed; + return (jlong)SPI_processed; } /* @@ -153,56 +204,3 @@ Java_org_postgresql_pljava_internal_SPI__1freeTupTable(JNIEnv* env, jclass cls) END_NATIVE } } - -static void assertXid(SubTransactionId xid) -{ - if(xid != GetCurrentSubTransactionId()) - { - /* Oops. Rollback to top level transaction. - */ - ereport(ERROR, ( - errcode(ERRCODE_INVALID_TRANSACTION_TERMINATION), - errmsg("Subtransaction mismatch at txlevel %d", - GetCurrentTransactionNestLevel()))); - } -} - -Savepoint* SPI_setSavepoint(const char* name) -{ - Savepoint* sp = (Savepoint*)palloc(sizeof(Savepoint) + strlen(name)); - Invocation_assertConnect(); - sp->nestingLevel = GetCurrentTransactionNestLevel() + 1; - strcpy(sp->name, name); - infant = sp; - BeginInternalSubTransaction(sp->name); - infant = 0; - sp->xid = GetCurrentSubTransactionId(); - return sp; -} - -void SPI_releaseSavepoint(Savepoint* sp) -{ - while(sp->nestingLevel < GetCurrentTransactionNestLevel()) - ReleaseCurrentSubTransaction(); - - if(sp->nestingLevel == GetCurrentTransactionNestLevel()) - { - assertXid(sp->xid); - ReleaseCurrentSubTransaction(); - } - pfree(sp); -} - -void SPI_rollbackSavepoint(Savepoint* sp) -{ - while(sp->nestingLevel < GetCurrentTransactionNestLevel()) - RollbackAndReleaseCurrentSubTransaction(); - - if(sp->nestingLevel == GetCurrentTransactionNestLevel()) - { - assertXid(sp->xid); - RollbackAndReleaseCurrentSubTransaction(); - } - SPI_restore_connection(); - pfree(sp); -} diff --git a/pljava-so/src/main/c/SQLChunkIOOrder.c b/pljava-so/src/main/c/SQLChunkIOOrder.c new file mode 100644 index 00000000..8663eeaf --- /dev/null +++ b/pljava-so/src/main/c/SQLChunkIOOrder.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2025 TADA AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#include + +#include "pljava/PgObject.h" + +extern void SQLChunkIOOrder_initialize(void); +void SQLChunkIOOrder_initialize(void) +{ + /* + * Nothing more is needed here than to cause the class's static initializer + * to run (at the chosen time, from native code, before user Java code could + * have altered the needed system properties). + * + * The JNI_FindClass mentions that it initializes the named class, but only + * says so in one place, does not clearly say it returns an initialized + * class, and does not mention ExceptionInInitializerError as a possible + * exception. + * + * GetStaticFieldID clearly says it causes an uninitialized class to be + * initialized, and lists ExceptionInInitializerError as a possible + * exception. So, just to be sure, a field ID is fetched here. + */ + jclass cls = PgObject_getJavaClass( + "org/postgresql/pljava/jdbc/SQLChunkIOOrder"); + PgObject_getStaticJavaField(cls, "MIRROR_J2P", "Ljava/nio/ByteOrder;"); +} diff --git a/pljava-so/src/main/c/SQLInputFromTuple.c b/pljava-so/src/main/c/SQLInputFromTuple.c index bf12d1be..56388771 100644 --- a/pljava-so/src/main/c/SQLInputFromTuple.c +++ b/pljava-so/src/main/c/SQLInputFromTuple.c @@ -1,88 +1,50 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #include -#include "pljava/type/HeapTupleHeader.h" +#include "pljava/type/SingleRowReader.h" #include "pljava/type/TupleDesc.h" +#include "pljava/DualState.h" #include "pljava/Invocation.h" #include "pljava/SQLInputFromTuple.h" -#include "org_postgresql_pljava_jdbc_SQLInputFromTuple.h" - -#define pg_unreachable() abort() - - static jclass s_SQLInputFromTuple_class; static jmethodID s_SQLInputFromTuple_init; -jobject SQLInputFromTuple_create(HeapTupleHeader hth, TupleDesc td) +jobject pljava_SQLInputFromTuple_create(HeapTupleHeader hth) { - jobject tupleDesc; + jlong heapTup = PointerGetJLong(hth); + jlong lifespan = PointerGetJLong(currentInvocation); jobject result; - jlong pointer; + jobject jtd = pljava_SingleRowReader_getTupleDesc(hth); - if(hth == 0) - return 0; + result = + JNI_newObjectLocked(s_SQLInputFromTuple_class, s_SQLInputFromTuple_init, + pljava_DualState_key(), lifespan, heapTup, jtd); - tupleDesc = TupleDesc_create(td); - pointer = Invocation_createLocalWrapper(hth); - result = JNI_newObject(s_SQLInputFromTuple_class, s_SQLInputFromTuple_init, pointer, tupleDesc); - JNI_deleteLocalRef(tupleDesc); + JNI_deleteLocalRef(jtd); return result; } /* Make this datatype available to the postgres system. */ -extern void SQLInputFromTuple_initialize(void); -void SQLInputFromTuple_initialize(void) -{ - JNINativeMethod methods[] = - { - { - "_getObject", - "(JJI)Ljava/lang/Object;", - Java_org_postgresql_pljava_jdbc_SQLInputFromTuple__1getObject - }, - { - "_free", - "(J)V", - Java_org_postgresql_pljava_jdbc_SQLInputFromTuple__1free - }, - { 0, 0, 0 } - }; - - s_SQLInputFromTuple_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/jdbc/SQLInputFromTuple")); - PgObject_registerNatives2(s_SQLInputFromTuple_class, methods); - s_SQLInputFromTuple_init = PgObject_getJavaMethod(s_SQLInputFromTuple_class, "", "(JLorg/postgresql/pljava/internal/TupleDesc;)V"); -} - -/**************************************** - * JNI methods - ****************************************/ - -/* - * Class: org_postgresql_pljava_jdbc_SQLInputFromTuple - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_jdbc_SQLInputFromTuple__1free(JNIEnv* env, jobject _this, jlong hth) -{ - HeapTupleHeader_free(env, hth); -} - -/* - * Class: org_postgresql_pljava_jdbc_SQLInputFromTuple - * Method: _getObject - * Signature: (JJI)Ljava/lang/Object; - */ -JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_jdbc_SQLInputFromTuple__1getObject(JNIEnv* env, jclass clazz, jlong hth, jlong jtd, jint attrNo) +void pljava_SQLInputFromTuple_initialize(void) { - return HeapTupleHeader_getObject(env, hth, jtd, attrNo); + jclass cls = + PgObject_getJavaClass("org/postgresql/pljava/jdbc/SQLInputFromTuple"); + s_SQLInputFromTuple_init = PgObject_getJavaMethod(cls, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJLorg/postgresql/pljava/internal/TupleDesc;)V"); + s_SQLInputFromTuple_class = JNI_newGlobalRef(cls); + JNI_deleteLocalRef(cls); } diff --git a/pljava-so/src/main/c/SQLOutputToChunk.c b/pljava-so/src/main/c/SQLOutputToChunk.c index df1d00c4..88bca855 100644 --- a/pljava-so/src/main/c/SQLOutputToChunk.c +++ b/pljava-so/src/main/c/SQLOutputToChunk.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -22,14 +28,11 @@ static jmethodID s_Buffer_position; jobject SQLOutputToChunk_create(StringInfo data, bool isJavaBasedScalar) { jobject dbb; - Ptr2Long p2l; - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = data; dbb = JNI_newDirectByteBuffer(data->data, data->maxlen); if ( 0 < data->len ) JNI_callObjectMethodLocked(dbb, s_Buffer_position, data->len); return JNI_newObject(s_SQLOutputToChunk_class, s_SQLOutputToChunk_init, - p2l.longVal, dbb, isJavaBasedScalar ? JNI_TRUE : JNI_FALSE); + PointerGetJLong(data), dbb, isJavaBasedScalar ? JNI_TRUE : JNI_FALSE); } void SQLOutputToChunk_close(jobject stream) @@ -80,15 +83,11 @@ void SQLOutputToChunk_initialize(void) JNIEXPORT jobject JNICALL Java_org_postgresql_pljava_jdbc_SQLOutputToChunk__1ensureCapacity (JNIEnv *env, jclass cls, jlong hdl, jobject bb, jint pos, jint needed) { - Ptr2Long p2l; - StringInfo str; + StringInfo str = JLongGet(StringInfo, hdl); char *oldp; int oldmax; BEGIN_NATIVE - p2l.ptrVal = (void *)0; /* ensure that the rest is zeroed out */ - p2l.longVal = hdl; - str = (StringInfo)p2l.ptrVal; str->len = pos; oldp = str->data; oldmax = str->maxlen; diff --git a/pljava-so/src/main/c/SQLOutputToTuple.c b/pljava-so/src/main/c/SQLOutputToTuple.c index 53c54e0f..bd6938f4 100644 --- a/pljava-so/src/main/c/SQLOutputToTuple.c +++ b/pljava-so/src/main/c/SQLOutputToTuple.c @@ -1,14 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #include +#include "pljava/type/Type.h" #include "pljava/type/TupleDesc.h" -#include "pljava/type/JavaWrapper.h" #include "pljava/SQLOutputToTuple.h" #define pg_unreachable() abort() @@ -20,7 +26,7 @@ static jmethodID s_SQLOutputToTuple_getTuple; jobject SQLOutputToTuple_create(TupleDesc td) { - jobject tupleDesc = TupleDesc_create(td); + jobject tupleDesc = pljava_TupleDesc_create(td); jobject result = JNI_newObject(s_SQLOutputToTuple_class, s_SQLOutputToTuple_init, tupleDesc); JNI_deleteLocalRef(tupleDesc); return result; @@ -28,15 +34,15 @@ jobject SQLOutputToTuple_create(TupleDesc td) HeapTuple SQLOutputToTuple_getTuple(jobject sqlOutput) { - Ptr2Long p2l; + jlong jTup; if(sqlOutput == 0) return 0; - p2l.longVal = JNI_callLongMethod(sqlOutput, s_SQLOutputToTuple_getTuple); - if(p2l.longVal == 0) + jTup = JNI_callLongMethod(sqlOutput, s_SQLOutputToTuple_getTuple); + if(jTup == 0) return 0; - return (HeapTuple)p2l.ptrVal; + return JLongGet(HeapTuple, jTup); } /* Make this datatype available to the postgres system. diff --git a/pljava-so/src/main/c/Session.c b/pljava-so/src/main/c/Session.c index b4ab4696..fa5d9b66 100644 --- a/pljava-so/src/main/c/Session.c +++ b/pljava-so/src/main/c/Session.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -12,9 +16,6 @@ #include "pljava/Session.h" #include "pljava/type/AclId.h" -#define pg_unreachable() abort() - - extern void Session_initialize(void); void Session_initialize(void) { @@ -44,9 +45,21 @@ Java_org_postgresql_pljava_internal_Session__1setUser( /* No error checking since this might be a restore of user in * a finally block after an exception. */ + bool wasLocalChange = false; + int secContext; + Oid dummy; BEGIN_NATIVE_NO_ERRCHECK - SetUserIdAndContext(AclId_getAclId(aclId), true); + if (InSecurityRestrictedOperation()) + ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg( + "cannot set parameter \"%s\" within security-restricted operation", + "role"))); + GetUserIdAndSecContext(&dummy, &secContext); + wasLocalChange = 0 != ( secContext & SECURITY_LOCAL_USERID_CHANGE ); + if ( isLocalChange ) + secContext |= SECURITY_LOCAL_USERID_CHANGE; + else + secContext &= ~SECURITY_LOCAL_USERID_CHANGE; + SetUserIdAndSecContext(AclId_getAclId(aclId), secContext); END_NATIVE - return JNI_TRUE; + return wasLocalChange ? JNI_TRUE : JNI_FALSE; } - diff --git a/pljava-so/src/main/c/SubXactListener.c b/pljava-so/src/main/c/SubXactListener.c index c07e7545..65ce670d 100644 --- a/pljava-so/src/main/c/SubXactListener.c +++ b/pljava-so/src/main/c/SubXactListener.c @@ -1,44 +1,59 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "pljava/Backend.h" #include "pljava/Exception.h" -#include "pljava/SPI.h" +#include "pljava/PgSavepoint.h" #include "org_postgresql_pljava_internal_SubXactListener.h" #include static jclass s_SubXactListener_class; -static jmethodID s_SubXactListener_onStart; -static jmethodID s_SubXactListener_onCommit; -static jmethodID s_SubXactListener_onAbort; +static jmethodID s_SubXactListener_invokeListeners; -static void subXactCB(SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid, void* arg) +static void subXactCB( + SubXactEvent event, SubTransactionId mySubid, SubTransactionId parentSubid, + void* arg) { - Ptr2Long p2l; - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = arg; + /* + * Map the subids to PgSavepoints first - this function upcalls into Java + * without releasing the Backend.THREADLOCK monitor, so the called methods + * can know they're on the PG thread; Backend.threadMayEnterPG() is true. + * It is important to look up mySubid before parentSubid, as it is possible + * a new PgSavepoint instance is under construction in the 'nursery', and + * will be assigned the first id to be looked up. + */ + jobject sp = pljava_PgSavepoint_forId(mySubid); + jobject parent = pljava_PgSavepoint_forId(parentSubid); + + /* + * These upcalls are made with the monitor released. We are, of course, ON + * the PG thread, but this time with no monitor held to prevent another + * thread from stepping in. These methods should not blindly assert + * Backend.threadMayEnterPG(), as for some java_thread_pg_entry settings it + * won't be true. This is the legacy behavior, so not changed for 1.5.x. + * + * The event ordinal can simply be passed to Java, as long as upstream + * hasn't changed the order; list the known ones in a switch, for a better + * chance that a clever compiler will warn if upstream has added any. + */ switch(event) { case SUBXACT_EVENT_START_SUB: - { - Ptr2Long infant2l; - infant->xid = mySubid; - infant2l.longVal = 0L; /* ensure that the rest is zeroed out */ - infant2l.ptrVal = infant; - JNI_callStaticVoidMethod(s_SubXactListener_class, s_SubXactListener_onStart, p2l.longVal, infant2l.longVal, parentSubid); - } - break; case SUBXACT_EVENT_COMMIT_SUB: - JNI_callStaticVoidMethod(s_SubXactListener_class, s_SubXactListener_onCommit, p2l.longVal, mySubid, parentSubid); - break; case SUBXACT_EVENT_ABORT_SUB: - JNI_callStaticVoidMethod(s_SubXactListener_class, s_SubXactListener_onAbort, p2l.longVal, mySubid, parentSubid); + case SUBXACT_EVENT_PRE_COMMIT_SUB: + JNI_callStaticVoidMethod(s_SubXactListener_class, + s_SubXactListener_invokeListeners, (jint)event, sp, parent); } } @@ -48,38 +63,49 @@ void SubXactListener_initialize(void) JNINativeMethod methods[] = { { "_register", - "(J)V", + "()V", Java_org_postgresql_pljava_internal_SubXactListener__1register }, { "_unregister", - "(J)V", + "()V", Java_org_postgresql_pljava_internal_SubXactListener__1unregister }, - { 0, 0, 0 }}; + { 0, 0, 0 } + }; PgObject_registerNatives("org/postgresql/pljava/internal/SubXactListener", methods); - s_SubXactListener_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/SubXactListener")); - s_SubXactListener_onAbort = PgObject_getStaticJavaMethod(s_SubXactListener_class, "onAbort", "(JII)V"); - s_SubXactListener_onCommit = PgObject_getStaticJavaMethod(s_SubXactListener_class, "onCommit", "(JII)V"); - s_SubXactListener_onStart = PgObject_getStaticJavaMethod(s_SubXactListener_class, "onStart", "(JJI)V"); + s_SubXactListener_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/SubXactListener")); + s_SubXactListener_invokeListeners = + PgObject_getStaticJavaMethod(s_SubXactListener_class, "invokeListeners", + "(ILorg/postgresql/pljava/internal/PgSavepoint;" + "Lorg/postgresql/pljava/internal/PgSavepoint;)V"); + +#define CONFIRMCONST(c) \ +StaticAssertStmt((SUBXACT_EVENT_##c) == \ +(org_postgresql_pljava_internal_SubXactListener_##c), \ + "Java/C value mismatch for " #c) + + CONFIRMCONST( START_SUB ); + CONFIRMCONST( COMMIT_SUB ); + CONFIRMCONST( ABORT_SUB ); + CONFIRMCONST( PRE_COMMIT_SUB ); } /* * Class: org_postgresql_pljava_internal_SubXactListener * Method: _register - * Signature: (J)V + * Signature: ()V */ JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_SubXactListener__1register(JNIEnv* env, jclass cls, jlong listenerId) +Java_org_postgresql_pljava_internal_SubXactListener__1register(JNIEnv* env, jclass cls) { BEGIN_NATIVE PG_TRY(); { - Ptr2Long p2l; - p2l.longVal = listenerId; - RegisterSubXactCallback(subXactCB, p2l.ptrVal); + RegisterSubXactCallback(subXactCB, NULL); } PG_CATCH(); { @@ -92,17 +118,15 @@ Java_org_postgresql_pljava_internal_SubXactListener__1register(JNIEnv* env, jcla /* * Class: org_postgresql_pljava_internal_SubXactListener * Method: _unregister - * Signature: (J)V + * Signature: ()V */ JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_SubXactListener__1unregister(JNIEnv* env, jclass cls, jlong listenerId) +Java_org_postgresql_pljava_internal_SubXactListener__1unregister(JNIEnv* env, jclass cls) { BEGIN_NATIVE PG_TRY(); { - Ptr2Long p2l; - p2l.longVal = listenerId; - UnregisterSubXactCallback(subXactCB, p2l.ptrVal); + UnregisterSubXactCallback(subXactCB, NULL); } PG_CATCH(); { diff --git a/pljava-so/src/main/c/TypeOid.c b/pljava-so/src/main/c/TypeOid.c new file mode 100644 index 00000000..81005326 --- /dev/null +++ b/pljava-so/src/main/c/TypeOid.c @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +#include +#include + +#include "pljava/pljava.h" +#include "org_postgresql_pljava_jdbc_TypeOid.h" + +/* + * A compilation unit with no run-time purpose, merely to hold a bunch of + * StaticAssertStmts to confirm at compile time that we haven't fat-fingered + * any of the OID constants that are known to the Java code. + */ + +#define CONFIRMCONST(c) \ +StaticAssertStmt((c) == (org_postgresql_pljava_jdbc_TypeOid_##c), \ + "Java/C value mismatch for " #c) + +/* + * Class: org_postgresql_pljava_jdbc_TypeOid + * Method: _dummy + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_org_postgresql_pljava_jdbc_TypeOid__1dummy(JNIEnv * env, jclass cls) +{ + CONFIRMCONST(InvalidOid); + CONFIRMCONST(INT2OID); + CONFIRMCONST(INT4OID); + CONFIRMCONST(INT8OID); + CONFIRMCONST(TEXTOID); + CONFIRMCONST(NUMERICOID); + CONFIRMCONST(FLOAT4OID); + CONFIRMCONST(FLOAT8OID); + CONFIRMCONST(BOOLOID); + CONFIRMCONST(DATEOID); + CONFIRMCONST(TIMEOID); + CONFIRMCONST(TIMESTAMPOID); + CONFIRMCONST(TIMESTAMPTZOID); + CONFIRMCONST(BYTEAOID); + CONFIRMCONST(VARCHAROID); + CONFIRMCONST(OIDOID); + CONFIRMCONST(BPCHAROID); + CONFIRMCONST(PG_NODE_TREEOID); + CONFIRMCONST(TRIGGEROID); +} diff --git a/pljava-so/src/main/c/VarlenaWrapper.c b/pljava-so/src/main/c/VarlenaWrapper.c new file mode 100644 index 00000000..079852bf --- /dev/null +++ b/pljava-so/src/main/c/VarlenaWrapper.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack + */ + +#include + +#if PG_VERSION_NUM < 130000 +#include +#define detoast_external_attr heap_tuple_fetch_attr +#else +#include +#endif + +#include +#include +#include + +#include "org_postgresql_pljava_internal_VarlenaWrapper_Input_State.h" +#include "org_postgresql_pljava_internal_VarlenaWrapper_Output_State.h" +#include "pljava/VarlenaWrapper.h" +#include "pljava/DualState.h" + +#include "pljava/PgObject.h" +#include "pljava/JNICalls.h" + +#if PG_VERSION_NUM < 90600 +#define get_toast_snapshot() NULL +#elif PG_VERSION_NUM < 180000 +#define get_toast_snapshot() GetOldestSnapshot() +#else +#include +#endif + +#define _VL_TYPE struct varlena * + +#if PG_VERSION_NUM < 140000 +#define VARATT_EXTERNAL_GET_EXTSIZE(toast_pointer) ((toast_pointer).va_extsize) +#endif + +#define INITIALSIZE 1024 + +static jclass s_VarlenaWrapper_class; +static jmethodID s_VarlenaWrapper_adopt; + +static jclass s_VarlenaWrapper_Input_class; +static jclass s_VarlenaWrapper_Output_class; + +static jmethodID s_VarlenaWrapper_Input_init; + +static jmethodID s_VarlenaWrapper_Output_init; + +static jfieldID s_VarlenaWrapper_Input_State_varlena; + +/* + * For VarlenaWrapper.Output, define a dead-simple "expanded object" format + * consisting of linked allocated blocks, so if a long value is being written, + * it does not have to get repeatedly reallocated and copied. The "expanded + * object" form is a valid sort of PostgreSQL Datum, and can be passed around + * in that form, and reparented between memory contexts with different + * lifetimes; when a time comes that PostgreSQL needs it in a 'flattened' + * form, it will use these 'methods' to flatten it, and that's when the one + * final reallocation and copy will happen. + */ + +static Size VOS_get_flat_size(ExpandedObjectHeader *eohptr); +static void VOS_flatten_into(ExpandedObjectHeader *eohptr, + void *result, Size allocated_size); + +static const ExpandedObjectMethods VOS_methods = +{ + VOS_get_flat_size, + VOS_flatten_into +}; + +typedef struct ExpandedVarlenaOutputStreamNode ExpandedVarlenaOutputStreamNode; + +struct ExpandedVarlenaOutputStreamNode +{ + ExpandedVarlenaOutputStreamNode *next; + Size size; +}; + +typedef struct ExpandedVarlenaOutputStreamHeader +{ + ExpandedObjectHeader hdr; + ExpandedVarlenaOutputStreamNode *tail; + Size total_size; +} ExpandedVarlenaOutputStreamHeader; + + + +/* + * Create and return a VarlenaWrapper.Input allowing Java to read the content + * of an existing Datum d, which must be a varlena type (assumed, not checked + * here). + * + * The datum will be copied (detoasting if need be) into a memory context with + * parent as its parent, so it can be efficiently reparented later if adopted, + * and the VarlenaWrapper will be associated with the ResourceOwner ro, which + * determines its lifespan (if not adopted). The ResourceOwner needs to be one + * that will be released no later than the memory context itself. + */ +jobject pljava_VarlenaWrapper_Input( + Datum d, MemoryContext parent, ResourceOwner ro) +{ + jobject vr; + jobject dbb; + MemoryContext mc; + MemoryContext prevcxt; + _VL_TYPE vl; + jlong jro; + jlong jcxt; + jlong jpin; + jlong jdatum; + Size parked; + Size actual; + Snapshot pin = NULL; + + vl = (_VL_TYPE) DatumGetPointer(d); + + if ( VARATT_IS_EXTERNAL_INDIRECT(vl) ) /* at most once; can't be nested */ + { + struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, vl); + vl = (_VL_TYPE)redirect.pointer; + d = PointerGetDatum(vl); + } + + parked = VARSIZE_ANY(vl); + actual = toast_raw_datum_size(d) - VARHDRSZ; + + mc = AllocSetContextCreate(parent, "PL/Java VarlenaWrapper.Input", + ALLOCSET_START_SMALL_SIZES); + + prevcxt = MemoryContextSwitchTo(mc); + + if ( actual < 4096 || (actual >> 1) < parked ) + goto justDetoastEagerly; + if ( VARATT_IS_EXTERNAL_EXPANDED(vl) ) + goto justDetoastEagerly; + if ( VARATT_IS_EXTERNAL_ONDISK(vl) ) + { + pin = get_toast_snapshot(); + if ( NULL == pin ) + { + /* + * Unable to register a snapshot and just park the tiny pointer. + * If it points to compressed data, can still park that rather than + * fully detoasting. + */ + struct varatt_external toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, vl); + parked = VARATT_EXTERNAL_GET_EXTSIZE(toast_pointer) + VARHDRSZ; + if ( (actual >> 1) < parked ) /* not compressed enough to bother */ + goto justDetoastEagerly; + vl = detoast_external_attr(vl); /* fetch without decompressing */ + d = PointerGetDatum(vl); + dbb = NULL; + goto constructResult; + } + pin = RegisterSnapshotOnOwner(pin, ro); + } + +/* parkAndDetoastLazily: */ + vl = (_VL_TYPE) DatumGetPointer(datumCopy(d, false, -1)); + dbb = NULL; + goto constructResult; + +justDetoastEagerly: + vl = (_VL_TYPE) PG_DETOAST_DATUM_COPY(d); + parked = actual + VARHDRSZ; + dbb = JNI_newDirectByteBuffer(VARDATA(vl), actual); + +constructResult: + MemoryContextSwitchTo(prevcxt); + + jro = PointerGetJLong(ro); + jcxt = PointerGetJLong(mc); + jpin = PointerGetJLong(pin); + jdatum = PointerGetJLong(vl); + + vr = JNI_newObjectLocked(s_VarlenaWrapper_Input_class, + s_VarlenaWrapper_Input_init, pljava_DualState_key(), + jro, jcxt, jpin, jdatum, + (jlong)parked, (jlong)actual, dbb); + + if ( NULL != dbb ) + JNI_deleteLocalRef(dbb); + + return vr; +} + +/* + * Create and return a VarlenaWrapper.Output, initially empty, into which Java + * can write. + * + * The datum will be assembled in the memory context mc, and the VarlenaWrapper + * will be associated with the ResourceOwner ro, which determines its lifespan. + * The ResourceOwner needs to be one that will be released no later than + * the memory context itself. + * + * After Java has written the content, native code can obtain the Datum by + * calling pljava_VarlenaWrapper_Output_adopt(). + */ +jobject pljava_VarlenaWrapper_Output(MemoryContext parent, ResourceOwner ro) +{ + ExpandedVarlenaOutputStreamHeader *evosh; + jobject vos; + jobject dbb; + MemoryContext mc; + jlong jro; + jlong jcxt; + jlong jdatum; + + mc = AllocSetContextCreate(parent, "PL/Java VarlenaWrapper.Output", + ALLOCSET_START_SMALL_SIZES); + /* + * Allocate an initial chunk sized to contain the expanded V.O.S. header, + * plus the header and data for one node to hold INITIALSIZE data bytes. + */ + evosh = MemoryContextAlloc(mc, + sizeof *evosh + sizeof *(evosh->tail) + INITIALSIZE); + /* + * Initialize the expanded object header and its pointer to the first node. + */ + EOH_init_header(&(evosh->hdr), &VOS_methods, mc); + evosh->total_size = VARHDRSZ; + evosh->tail = (ExpandedVarlenaOutputStreamNode *)(evosh + 1); + /* + * Initialize that first node. + */ + evosh->tail->next = evosh->tail; + /* evosh->tail->size will be filled in by _nextBuffer() later */ + + jro = PointerGetJLong(ro); + jcxt = PointerGetJLong(mc); + jdatum = PointerGetJLong(DatumGetPointer(EOHPGetRWDatum(&(evosh->hdr)))); + + /* + * The data bytes begin right after the node header struct. + */ + dbb = JNI_newDirectByteBuffer(evosh->tail + 1, INITIALSIZE); + + vos = JNI_newObjectLocked(s_VarlenaWrapper_Output_class, + s_VarlenaWrapper_Output_init, pljava_DualState_key(), + jro, jcxt, jdatum, dbb); + JNI_deleteLocalRef(dbb); + + return vos; +} + +/* + * Adopt a VarlenaWrapper (if Output, after Java code has written and closed it) + * and leave it no longer accessible from Java. It may be an 'expanded' datum, + * in PG 9.5+ where there are such things. Otherwise, it will be an ordinary + * flat one (the ersatz 'expanded' form used internally here then being only an + * implementation detail, not exposed to the caller); its memory context is + * unchanged. + */ +Datum pljava_VarlenaWrapper_adopt(jobject vlw) +{ + jlong adopted; + + adopted = JNI_callLongMethodLocked(vlw, s_VarlenaWrapper_adopt, + pljava_DualState_key()); + + return PointerGetDatum(JLongGet(Pointer, adopted)); +} + +static Size VOS_get_flat_size(ExpandedObjectHeader *eohptr) +{ + ExpandedVarlenaOutputStreamHeader *evosh = + (ExpandedVarlenaOutputStreamHeader *)eohptr; + return evosh->total_size; +} + +static void VOS_flatten_into(ExpandedObjectHeader *eohptr, + void *result, Size allocated_size) +{ + ExpandedVarlenaOutputStreamHeader *evosh = + (ExpandedVarlenaOutputStreamHeader *)eohptr; + ExpandedVarlenaOutputStreamNode *node = evosh->tail; + + Assert(allocated_size == evosh->total_size); + SET_VARSIZE(result, allocated_size); + result = VARDATA(result); + + do + { + node = node->next; + memcpy(result, node + 1, node->size); + result = (char *)result + node->size; + } + while ( node != evosh->tail ); +} + +void pljava_VarlenaWrapper_initialize(void) +{ + jclass clazz; + JNINativeMethod methodsIn[] = + { + { + "_unregisterSnapshot", + "(JJ)V", + Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Input_00024State__1unregisterSnapshot + }, + { + "_detoast", + "(JJJJ)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Input_00024State__1detoast + }, + { + "_fetch", + "(JJ)J", + Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Input_00024State__1fetch + }, + { 0, 0, 0 } + }; + JNINativeMethod methodsOut[] = + { + { + "_nextBuffer", + "(JII)Ljava/nio/ByteBuffer;", + Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Output_00024State__1nextBuffer + }, + { 0, 0, 0 } + }; + + s_VarlenaWrapper_class = + (jclass)JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/VarlenaWrapper")); + s_VarlenaWrapper_Input_class = + (jclass)JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/VarlenaWrapper$Input")); + s_VarlenaWrapper_Output_class = + (jclass)JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/VarlenaWrapper$Output")); + + s_VarlenaWrapper_Input_init = PgObject_getJavaMethod( + s_VarlenaWrapper_Input_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;" + "JJJJJJLjava/nio/ByteBuffer;)V"); + + s_VarlenaWrapper_Output_init = PgObject_getJavaMethod( + s_VarlenaWrapper_Output_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;" + "JJJLjava/nio/ByteBuffer;)V"); + + s_VarlenaWrapper_adopt = PgObject_getJavaMethod( + s_VarlenaWrapper_class, "adopt", + "(Lorg/postgresql/pljava/internal/DualState$Key;)J"); + + clazz = PgObject_getJavaClass( + "org/postgresql/pljava/internal/VarlenaWrapper$Input$State"); + + PgObject_registerNatives2(clazz, methodsIn); + + s_VarlenaWrapper_Input_State_varlena = PgObject_getJavaField( + clazz, "m_varlena", "J"); + + JNI_deleteLocalRef(clazz); + + clazz = PgObject_getJavaClass( + "org/postgresql/pljava/internal/VarlenaWrapper$Output$State"); + + PgObject_registerNatives2(clazz, methodsOut); + + JNI_deleteLocalRef(clazz); +} + +/* + * Class: org_postgresql_pljava_internal_VarlenaWrapper_Input_State + * Method: _unregisterSnapshot + * Signature: (JJ)V + */ +JNIEXPORT void JNICALL +Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Input_00024State__1unregisterSnapshot + (JNIEnv *env, jobject _this, jlong snapshot, jlong ro) +{ + BEGIN_NATIVE_NO_ERRCHECK + UnregisterSnapshotFromOwner( + JLongGet(Snapshot, snapshot), JLongGet(ResourceOwner, ro)); + END_NATIVE +} + +/* + * Class: org_postgresql_pljava_internal_VarlenaWrapper_Input_State + * Method: _detoast + * Signature: (JJJJ)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Input_00024State__1detoast + (JNIEnv *env, jobject _this, jlong vl, jlong cxt, jlong snap, jlong resOwner) +{ + _VL_TYPE vlp = JLongGet(_VL_TYPE, vl); + _VL_TYPE detoasted; + MemoryContext prevcxt; + jobject dbb = NULL; + + BEGIN_NATIVE_NO_ERRCHECK + + prevcxt = MemoryContextSwitchTo(JLongGet(MemoryContext, cxt)); + + detoasted = (_VL_TYPE) PG_DETOAST_DATUM_COPY(PointerGetDatum(vlp)); + + MemoryContextSwitchTo(prevcxt); + + JNI_setLongField(_this, + s_VarlenaWrapper_Input_State_varlena, PointerGetJLong(detoasted)); + pfree(vlp); + + if ( 0 != snap ) + UnregisterSnapshotFromOwner( + JLongGet(Snapshot, snap), JLongGet(ResourceOwner, resOwner)); + + dbb = JNI_newDirectByteBuffer( + VARDATA(detoasted), VARSIZE_ANY_EXHDR(detoasted)); + + END_NATIVE + + return dbb; +} + +/* + * Class: org_postgresql_pljava_internal_VarlenaWrapper_Input_State + * Method: _fetch + * Signature: (JJ)J + * + * Assumption: this is only called when a snapshot has been registered (meaning + * the varlena is EXTERNAL_ONDISK) and the snapshot is soon to be unregistered. + * All that's needed is to 'fetch' the representation from disk, in case the + * toast rows could be subject to vacuuming after the snapshot is unregistered. + * A fetch is not a full detoast; if what's fetched is compressed, it stays + * compressed. This method does not need to unregister the snapshot, as that + * will happen soon anyway. It does pfree the toast pointer. + */ +JNIEXPORT jlong JNICALL Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Input_00024State__1fetch + (JNIEnv *env, jobject _this, jlong varlena, jlong memContext) +{ + _VL_TYPE vl = JLongGet(_VL_TYPE, varlena); + MemoryContext prevcxt; + _VL_TYPE fetched = NULL; + + BEGIN_NATIVE_NO_ERRCHECK; + prevcxt = MemoryContextSwitchTo(JLongGet(MemoryContext, memContext)); + fetched = detoast_external_attr(vl); + pfree(vl); + MemoryContextSwitchTo(prevcxt); + END_NATIVE; + + return PointerGetJLong(fetched); +} + +/* + * Class: org_postgresql_pljava_internal_VarlenaWrapper_Output_State + * Method: _nextBuffer + * Signature: (JII)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_VarlenaWrapper_00024Output_00024State__1nextBuffer + (JNIEnv *env, jobject _this, + jlong varlenaPtr, jint currentBufPosition, jint desiredCapacity) +{ + ExpandedVarlenaOutputStreamHeader *evosh; + ExpandedVarlenaOutputStreamNode *node; + Datum d = PointerGetDatum(JLongGet(Pointer, varlenaPtr)); + jobject dbb = NULL; + + evosh = (ExpandedVarlenaOutputStreamHeader *)DatumGetEOHP(d); + evosh->tail->size = currentBufPosition; + evosh->total_size += currentBufPosition; + + if ( 0 == desiredCapacity ) + return NULL; + + BEGIN_NATIVE + /* + * This adjustment of desiredCapacity is arbitrary and amenable to + * performance experimentation. For initial signs of life, ignore the + * desiredCapacity hint completely and use a hardwired size. + */ + desiredCapacity = 8180; + + node = (ExpandedVarlenaOutputStreamNode *) + MemoryContextAlloc(evosh->hdr.eoh_context, desiredCapacity); + node->next = evosh->tail->next; + evosh->tail->next = node; + evosh->tail = node; + + dbb = JNI_newDirectByteBuffer(node + 1, desiredCapacity - sizeof *node); + END_NATIVE + + return dbb; +} diff --git a/pljava-so/src/main/c/XactListener.c b/pljava-so/src/main/c/XactListener.c index ec96793f..09632618 100644 --- a/pljava-so/src/main/c/XactListener.c +++ b/pljava-so/src/main/c/XactListener.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ #include "pljava/Backend.h" #include "pljava/Exception.h" @@ -13,27 +17,37 @@ #include "access/xact.h" static jclass s_XactListener_class; -static jmethodID s_XactListener_onAbort; -static jmethodID s_XactListener_onCommit; -static jmethodID s_XactListener_onPrepare; +static jmethodID s_XactListener_invokeListeners; static void xactCB(XactEvent event, void* arg) { - Ptr2Long p2l; - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = arg; + /* + * Upstream has, regrettably, not merely added events over the years, but + * changed their order, so a mapping is needed. Use a switch with the known + * cases enumerated, to improve the chance that a clever compiler will warn + * if yet more have been added, and initialize 'mapped' to a value that the + * Java code won't mistake for a real one. + */ +#define CASE(c) \ +case XACT_EVENT_##c: \ + mapped = org_postgresql_pljava_internal_XactListener_##c; \ + break + + jint mapped = -1; switch(event) { - case XACT_EVENT_ABORT: - JNI_callStaticVoidMethod(s_XactListener_class, s_XactListener_onAbort, p2l.longVal); - break; - case XACT_EVENT_COMMIT: - JNI_callStaticVoidMethod(s_XactListener_class, s_XactListener_onCommit, p2l.longVal); - break; - case XACT_EVENT_PREPARE: - JNI_callStaticVoidMethod(s_XactListener_class, s_XactListener_onPrepare, p2l.longVal); - break; + CASE( COMMIT ); + CASE( ABORT ); + CASE( PREPARE ); + CASE( PRE_COMMIT ); + CASE( PRE_PREPARE ); + CASE( PARALLEL_COMMIT ); + CASE( PARALLEL_ABORT ); + CASE( PARALLEL_PRE_COMMIT ); } + + JNI_callStaticVoidMethod(s_XactListener_class, + s_XactListener_invokeListeners, mapped); } extern void XactListener_initialize(void); @@ -42,38 +56,37 @@ void XactListener_initialize(void) JNINativeMethod methods[] = { { "_register", - "(J)V", - Java_org_postgresql_pljava_internal_XactListener__1register + "()V", + Java_org_postgresql_pljava_internal_XactListener__1register }, { "_unregister", - "(J)V", - Java_org_postgresql_pljava_internal_XactListener__1unregister + "()V", + Java_org_postgresql_pljava_internal_XactListener__1unregister }, - { 0, 0, 0 }}; + { 0, 0, 0 } + }; PgObject_registerNatives("org/postgresql/pljava/internal/XactListener", methods); - s_XactListener_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/XactListener")); - s_XactListener_onAbort = PgObject_getStaticJavaMethod(s_XactListener_class, "onAbort", "(J)V"); - s_XactListener_onCommit = PgObject_getStaticJavaMethod(s_XactListener_class, "onCommit", "(J)V"); - s_XactListener_onPrepare = PgObject_getStaticJavaMethod(s_XactListener_class, "onPrepare", "(J)V"); + s_XactListener_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/internal/XactListener")); + s_XactListener_invokeListeners = PgObject_getStaticJavaMethod( + s_XactListener_class, "invokeListeners", "(I)V"); } /* * Class: org_postgresql_pljava_internal_XactListener * Method: _register - * Signature: (J)V + * Signature: ()V */ JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_XactListener__1register(JNIEnv* env, jclass cls, jlong listenerId) +Java_org_postgresql_pljava_internal_XactListener__1register(JNIEnv* env, jclass cls) { BEGIN_NATIVE PG_TRY(); { - Ptr2Long p2l; - p2l.longVal = listenerId; - RegisterXactCallback(xactCB, p2l.ptrVal); + RegisterXactCallback(xactCB, NULL); } PG_CATCH(); { @@ -86,17 +99,15 @@ Java_org_postgresql_pljava_internal_XactListener__1register(JNIEnv* env, jclass /* * Class: org_postgresql_pljava_internal_XactListener * Method: _unregister - * Signature: (J)V + * Signature: ()V */ JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_XactListener__1unregister(JNIEnv* env, jclass cls, jlong listenerId) +Java_org_postgresql_pljava_internal_XactListener__1unregister(JNIEnv* env, jclass cls) { BEGIN_NATIVE PG_TRY(); { - Ptr2Long p2l; - p2l.longVal = listenerId; - UnregisterXactCallback(xactCB, p2l.ptrVal); + UnregisterXactCallback(xactCB, NULL); } PG_CATCH(); { diff --git a/pljava-so/src/main/c/type/AclId.c b/pljava-so/src/main/c/type/AclId.c index f3620802..723543bc 100644 --- a/pljava-so/src/main/c/type/AclId.c +++ b/pljava-so/src/main/c/type/AclId.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -22,7 +22,11 @@ #include "org_postgresql_pljava_internal_AclId.h" #include "pljava/Exception.h" -#define pg_unreachable() abort() +#if PG_VERSION_NUM >= 160000 +#include +#define pg_namespace_aclcheck(oid,rid,mode) \ + object_aclcheck(NamespaceRelationId, (oid), (rid), (mode)) +#endif static jclass s_AclId_class; static jmethodID s_AclId_init; @@ -186,11 +190,7 @@ Java_org_postgresql_pljava_internal_AclId__1getName(JNIEnv* env, jobject aclId) { result = String_createJavaStringFromNTS( GetUserNameFromId( -#if PG_VERSION_NUM >= 90500 AclId_getAclId(aclId), /* noerr= */ false -#else - AclId_getAclId(aclId) -#endif ) ); } diff --git a/pljava-so/src/main/c/type/Array.c b/pljava-so/src/main/c/type/Array.c index d65173ce..64c09ddb 100644 --- a/pljava-so/src/main/c/type/Array.c +++ b/pljava-so/src/main/c/type/Array.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -82,7 +86,6 @@ static jvalue _Array_coerceDatum(Type self, Datum arg) values = att_addlength_datum(values, elemLength, PointerGetDatum(values)); values = (char*)att_align_nominal(values, elemAlign); - } } result.l = (jobject)objArray; @@ -130,12 +133,21 @@ static Datum _Array_coerceObject(Type self, jobject objArray) PG_RETURN_ARRAYTYPE_P(v); } +/* + * For an array, canReplaceType can be computed a bit more generously. + * The primitive types are coded so that a boxed scalar can replace its + * corresponding primitive but not vice versa. For primitive arrays, we can also + * accept the other direction, that is, when getObjectType(self) == other. That + * will work because every primitive Type foo does contain _fooArray_coerceDatum + * and _fooArray_coerceObject and can handle both directions. + */ static bool _Array_canReplaceType(Type self, Type other) { Type oe = Type_getElementType(other); if ( oe == 0 ) return false; - return Type_canReplaceType(Type_getElementType(self), oe); + return Type_canReplaceType(Type_getElementType(self), oe) + || Type_getObjectType(self) == other; } Type Array_fromOid(Oid typeId, Type elementType) @@ -177,4 +189,3 @@ Type Array_fromOid2(Oid typeId, Type elementType, DatumCoercer coerceDatum, Obje self->objectType = Array_fromOid(typeId, Type_getObjectType(elementType)); return self; } - diff --git a/pljava-so/src/main/c/type/Boolean.c b/pljava-so/src/main/c/type/Boolean.c index d30ebc04..fc93ac14 100644 --- a/pljava-so/src/main/c/type/Boolean.c +++ b/pljava-so/src/main/c/type/Boolean.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -14,16 +17,15 @@ static TypeClass s_booleanClass; static jclass s_Boolean_class; -static jclass s_BooleanArray_class; static jmethodID s_Boolean_init; static jmethodID s_Boolean_booleanValue; /* * boolean primitive type. */ -static Datum _boolean_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _boolean_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - jboolean v = JNI_callStaticBooleanMethodA(cls, method, args); + jboolean v = pljava_Function_booleanInvoke(fn); return BoolGetDatum(v); } @@ -75,20 +77,8 @@ static Datum _booleanArray_coerceObject(Type self, jobject booleanArray) v = createArrayType(nElems, sizeof(jboolean), BOOLOID, false); - if(!JNI_isInstanceOf( booleanArray, s_BooleanArray_class)) - JNI_getBooleanArrayRegion((jbooleanArray)booleanArray, 0, + JNI_getBooleanArrayRegion((jbooleanArray)booleanArray, 0, nElems, (jboolean*)ARR_DATA_PTR(v)); - else - { - int idx = 0; - jboolean *array = (jboolean*)ARR_DATA_PTR(v); - - for(idx = 0; idx < nElems; ++idx) - { - array[idx] = JNI_callBooleanMethod(JNI_getObjectArrayElement(booleanArray, idx), - s_Boolean_booleanValue); - } - } PG_RETURN_ARRAYTYPE_P(v); } @@ -129,7 +119,6 @@ void Boolean_initialize(void) TypeClass cls; s_Boolean_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/Boolean")); - s_BooleanArray_class = JNI_newGlobalRef(PgObject_getJavaClass("[Ljava/lang/Boolean;")); s_Boolean_init = PgObject_getJavaMethod(s_Boolean_class, "", "(Z)V"); s_Boolean_booleanValue = PgObject_getJavaMethod(s_Boolean_class, "booleanValue", "()Z"); diff --git a/pljava-so/src/main/c/type/Byte.c b/pljava-so/src/main/c/type/Byte.c index 8d9fbe7d..ec8fb21e 100644 --- a/pljava-so/src/main/c/type/Byte.c +++ b/pljava-so/src/main/c/type/Byte.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -23,9 +27,9 @@ static jmethodID s_Byte_byteValue; /* * byte primitive type. */ -static Datum _byte_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _byte_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - jbyte v = JNI_callStaticByteMethodA(cls, method, args); + jbyte v = pljava_Function_byteInvoke(fn); return CharGetDatum(v); } diff --git a/pljava-so/src/main/c/type/Coerce.c b/pljava-so/src/main/c/type/Coerce.c index 20ae39af..f898d5b8 100644 --- a/pljava-so/src/main/c/type/Coerce.c +++ b/pljava-so/src/main/c/type/Coerce.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Coerce.h" @@ -32,10 +36,10 @@ struct Coerce_ typedef struct Coerce_* Coerce; -static Datum _Coerce_invoke(Type type, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _Coerce_invoke(Type type, Function fn, PG_FUNCTION_ARGS) { Coerce self = (Coerce)type; - Datum arg = Type_invoke(self->innerType, cls, method, args, fcinfo); + Datum arg = Type_invoke(self->innerType, fn, fcinfo); if(arg != 0) { MemoryContext currCtx = Invocation_switchToUpperContext(); diff --git a/pljava-so/src/main/c/type/Composite.c b/pljava-so/src/main/c/type/Composite.c index de04b2d6..5e14c058 100644 --- a/pljava-so/src/main/c/type/Composite.c +++ b/pljava-so/src/main/c/type/Composite.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -13,7 +17,7 @@ #include "pljava/type/Type_priv.h" #include "pljava/type/Composite.h" #include "pljava/type/TupleDesc.h" -#include "pljava/type/HeapTupleHeader.h" +#include "pljava/type/SingleRowReader.h" #include "pljava/Invocation.h" #include "org_postgresql_pljava_jdbc_SingleRowReader.h" @@ -45,9 +49,6 @@ static jclass s_ResultSetHandle_class; static jclass s_ResultSetPicker_class; static jmethodID s_ResultSetPicker_init; -static jclass s_SingleRowReader_class; -static jmethodID s_SingleRowReader_init; - static jclass s_SingleRowWriter_class; static jmethodID s_SingleRowWriter_init; static jmethodID s_SingleRowWriter_getTupleAndClear; @@ -61,13 +62,11 @@ static jobject _createWriter(jobject tupleDesc) static HeapTuple _getTupleAndClear(jobject jrps) { - Ptr2Long p2l; - if(jrps == 0) return 0; - p2l.longVal = JNI_callLongMethod(jrps, s_SingleRowWriter_getTupleAndClear); - return (HeapTuple)p2l.ptrVal; + return JLongGet(HeapTuple, + JNI_callLongMethod(jrps, s_SingleRowWriter_getTupleAndClear)); } /* @@ -78,19 +77,21 @@ static HeapTuple _getTupleAndClear(jobject jrps) * true. If so, the values are obtained in the form of a HeapTuple which in * turn is returned (as a Datum) from this method. */ -static Datum _Composite_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _Composite_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { bool hasRow; Datum result = 0; TupleDesc tupleDesc = Type_getTupleDesc(self, fcinfo); - jobject jtd = TupleDesc_create(tupleDesc); - jobject singleRowWriter = _createWriter(jtd); - int numArgs = fcinfo->nargs; + jobject jtd = pljava_TupleDesc_create(tupleDesc); + jvalue singleRowWriter; + singleRowWriter.l = _createWriter(jtd); + /* + * Caller guarantees room for one extra reference parameter, so it will go + * at index (length - 1). + */ + pljava_Function_setParameter(fn, -1, singleRowWriter); - // Caller guarantees room for one extra slot - // - args[numArgs].l = singleRowWriter; - hasRow = (JNI_callStaticBooleanMethodA(cls, method, args) == JNI_TRUE); + hasRow = (pljava_Function_booleanInvoke(fn) == JNI_TRUE); if(hasRow) { @@ -98,7 +99,7 @@ static Datum _Composite_invoke(Type self, jclass cls, jmethodID method, jvalue* * durable context. */ MemoryContext currCtx = Invocation_switchToUpperContext(); - HeapTuple tuple = _getTupleAndClear(singleRowWriter); + HeapTuple tuple = _getTupleAndClear(singleRowWriter.l); result = HeapTupleGetDatum(tuple); MemoryContextSwitchTo(currCtx); } @@ -106,22 +107,10 @@ static Datum _Composite_invoke(Type self, jclass cls, jmethodID method, jvalue* fcinfo->isnull = true; JNI_deleteLocalRef(jtd); - JNI_deleteLocalRef(singleRowWriter); + JNI_deleteLocalRef(singleRowWriter.l); return result; } -static jobject _Composite_getSRFProducer(Type self, jclass cls, jmethodID method, jvalue* args) -{ - jobject tmp = JNI_callStaticObjectMethodA(cls, method, args); - if(tmp != 0 && JNI_isInstanceOf(tmp, s_ResultSetHandle_class)) - { - jobject wrapper = JNI_newObject(s_ResultSetPicker_class, s_ResultSetPicker_init, tmp); - JNI_deleteLocalRef(tmp); - tmp = wrapper; - } - return tmp; -} - static jobject _Composite_getSRFCollector(Type self, PG_FUNCTION_ARGS) { jobject tmp1; @@ -130,24 +119,14 @@ static jobject _Composite_getSRFCollector(Type self, PG_FUNCTION_ARGS) if(tupleDesc == 0) ereport(ERROR, (errmsg("Unable to find tuple descriptor"))); - tmp1 = TupleDesc_create(tupleDesc); + tmp1 = pljava_TupleDesc_create(tupleDesc); tmp2 = _createWriter(tmp1); JNI_deleteLocalRef(tmp1); return tmp2; } -static bool _Composite_hasNextSRF(Type self, jobject rowProducer, jobject rowCollector, jint callCounter) -{ - /* Obtain next row using the RowCollector as a parameter to the - * ResultSetProvider.assignRowValues method. - */ - return (JNI_callBooleanMethod(rowProducer, - s_ResultSetProvider_assignRowValues, - rowCollector, - callCounter) == JNI_TRUE); -} - -static Datum _Composite_nextSRF(Type self, jobject rowProducer, jobject rowCollector) +static Datum _Composite_datumFromSRF( + Type self, jobject row, jobject rowCollector) { Datum result = 0; HeapTuple tuple = _getTupleAndClear(rowCollector); @@ -156,29 +135,19 @@ static Datum _Composite_nextSRF(Type self, jobject rowProducer, jobject rowColle return result; } -static void _Composite_closeSRF(Type self, jobject rowProducer) -{ - JNI_callVoidMethod(rowProducer, s_ResultSetProvider_close); -} - /* Assume that the Datum is a HeapTupleHeader and convert it into * a SingleRowReader instance. */ static jvalue _Composite_coerceDatum(Type self, Datum arg) { - jobject tupleDesc; jvalue result; - jlong pointer; HeapTupleHeader hth = DatumGetHeapTupleHeader(arg); result.l = 0; if(hth == 0) return result; - tupleDesc = HeapTupleHeader_getTupleDesc(hth); - pointer = Invocation_createLocalWrapper(hth); - result.l = JNI_newObject(s_SingleRowReader_class, s_SingleRowReader_init, pointer, tupleDesc); - JNI_deleteLocalRef(tupleDesc); + result.l = pljava_SingleRowReader_create(hth); return result; } @@ -229,15 +198,6 @@ static TupleDesc _Composite_getTupleDesc(Type self, PG_FUNCTION_ARGS) return td; } -static const char* _Composite_getJNIReturnSignature(Type self, bool forMultiCall, bool useAltRepr) -{ - return forMultiCall - ? (useAltRepr - ? "Lorg/postgresql/pljava/ResultSetHandle;" - : "Lorg/postgresql/pljava/ResultSetProvider;") - : "Z"; -} - Type Composite_obtain(Oid typeId) { Composite infant = (Composite)TypeClass_allocInstance(s_CompositeClass, typeId); @@ -257,25 +217,6 @@ Type Composite_obtain(Oid typeId) extern void Composite_initialize(void); void Composite_initialize(void) { - JNINativeMethod methods[] = - { - { - "_getObject", - "(JJI)Ljava/lang/Object;", - Java_org_postgresql_pljava_jdbc_SingleRowReader__1getObject - }, - { - "_free", - "(J)V", - Java_org_postgresql_pljava_jdbc_SingleRowReader__1free - }, - { 0, 0, 0 } - }; - - s_SingleRowReader_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/jdbc/SingleRowReader")); - PgObject_registerNatives2(s_SingleRowReader_class, methods); - s_SingleRowReader_init = PgObject_getJavaMethod(s_SingleRowReader_class, "", "(JLorg/postgresql/pljava/internal/TupleDesc;)V"); - s_SingleRowWriter_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/jdbc/SingleRowWriter")); s_SingleRowWriter_init = PgObject_getJavaMethod(s_SingleRowWriter_class, "", "(Lorg/postgresql/pljava/internal/TupleDesc;)V"); s_SingleRowWriter_getTupleAndClear = PgObject_getJavaMethod(s_SingleRowWriter_class, "getTupleAndClear", "()J"); @@ -294,39 +235,9 @@ void Composite_initialize(void) s_CompositeClass->getTupleDesc = _Composite_getTupleDesc; s_CompositeClass->coerceDatum = _Composite_coerceDatum; s_CompositeClass->invoke = _Composite_invoke; - s_CompositeClass->getSRFProducer = _Composite_getSRFProducer; s_CompositeClass->getSRFCollector = _Composite_getSRFCollector; - s_CompositeClass->hasNextSRF = _Composite_hasNextSRF; - s_CompositeClass->nextSRF = _Composite_nextSRF; - s_CompositeClass->closeSRF = _Composite_closeSRF; - s_CompositeClass->getJNIReturnSignature = _Composite_getJNIReturnSignature; + s_CompositeClass->datumFromSRF = _Composite_datumFromSRF; s_CompositeClass->outParameter = true; Type_registerType2(InvalidOid, "java.sql.ResultSet", Composite_obtain); } - -/**************************************** - * JNI methods - ****************************************/ - -/* - * Class: org_postgresql_pljava_jdbc_SingleRowReader - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_jdbc_SingleRowReader__1free(JNIEnv* env, jobject _this, jlong hth) -{ - HeapTupleHeader_free(env, hth); -} - -/* - * Class: org_postgresql_pljava_jdbc_SingleRowReader - * Method: _getObject - * Signature: (JJI)Ljava/lang/Object; - */ -JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_jdbc_SingleRowReader__1getObject(JNIEnv* env, jclass clazz, jlong hth, jlong jtd, jint attrNo) -{ - return HeapTupleHeader_getObject(env, hth, jtd, attrNo); -} diff --git a/pljava-so/src/main/c/type/Date.c b/pljava-so/src/main/c/type/Date.c index 84c23cbd..c3ec4347 100644 --- a/pljava-so/src/main/c/type/Date.c +++ b/pljava-so/src/main/c/type/Date.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -21,6 +27,69 @@ static jclass s_Date_class; static jmethodID s_Date_init; static jmethodID s_Date_getTime; +static TypeClass s_LocalDateClass; +/* + * The following statics are specific to Java 8 +, and will be initialized + * only on demand (pre-8 application code will have no way to demand them). + */ +static jclass s_LocalDate_class; +static jmethodID s_LocalDate_ofEpochDay; +static jmethodID s_LocalDate_toEpochDay; + +/* + * LocalDate data type. This is introduced with JDBC 4.2 and Java 8. For + * backward-compatibility reasons it does not become the default class returned + * by getObject() for a PostgreSQL date, but application code in Java 8+ can and + * should prefer it, by passing LocalDate.class to getObject. It represents a + * purely local, non-zoned, notion of date, which is exactly what PostgreSQL + * date represents, so the correspondence is direct, with no need to fudge up + * some timezone info just to shoehorn the data into java.sql.Date. + */ + +/* + * This only answers true for (same class or) DATEOID. + * The obtainer (below) only needs to construct and remember one instance. + */ +static bool _LocalDate_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return Type_getClass(self) == cls || Type_getOid(other) == DATEOID; +} + +static jvalue _LocalDate_coerceDatum(Type self, Datum arg) +{ + DateADT pgDate = DatumGetDateADT(arg); + jlong days = (jlong)pgDate + EPOCH_DIFF; + jvalue result; + result.l = JNI_callStaticObjectMethod( + s_LocalDate_class, s_LocalDate_ofEpochDay, days); + return result; +} + +static Datum _LocalDate_coerceObject(Type self, jobject date) +{ + jlong days = + JNI_callLongMethod(date, s_LocalDate_toEpochDay) - EPOCH_DIFF; + return DateADTGetDatum((DateADT)(days)); +} + +static Type _LocalDate_obtain(Oid typeId) +{ + static Type instance = NULL; + if ( NULL == instance ) + { + s_LocalDate_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/time/LocalDate")); + s_LocalDate_ofEpochDay = PgObject_getStaticJavaMethod(s_LocalDate_class, + "ofEpochDay", "(J)Ljava/time/LocalDate;"); + s_LocalDate_toEpochDay = PgObject_getJavaMethod(s_LocalDate_class, + "toEpochDay", "()J"); + + instance = TypeClass_allocInstance(s_LocalDateClass, DATEOID); + } + return instance; +} + /* * Date data type. Postgres will pass and expect number of days since * Jan 01 2000. Java uses number of millisecs since midnight Jan 01 1970. @@ -28,8 +97,8 @@ static jmethodID s_Date_getTime; static jvalue _Date_coerceDatum(Type self, Datum arg) { DateADT pgDate = DatumGetDateADT(arg); - int64 ts = (int64)pgDate * INT64CONST(86400000000); - int tz = Timestamp_getTimeZone_id(ts); + int64 ts = (int64)pgDate * INT64CONST(43200000000); + int tz = Timestamp_getTimeZone_id(ts); /* ts in 2 usec units */ jlong date = (jlong)(pgDate + EPOCH_DIFF); @@ -42,8 +111,12 @@ static jvalue _Date_coerceDatum(Type self, Datum arg) static Datum _Date_coerceObject(Type self, jobject date) { - jlong milliSecs = JNI_callLongMethod(date, s_Date_getTime) - INT64CONST(86400000) * EPOCH_DIFF; - jlong secs = milliSecs / 1000 - Timestamp_getTimeZone_id(milliSecs * 1000); + jlong milliSecs = + JNI_callLongMethod(date, s_Date_getTime) + - INT64CONST(86400000) * EPOCH_DIFF; + jlong secs = + milliSecs / 1000 + - Timestamp_getTimeZone_id(milliSecs * 500); /* those 2 usec units */ return DateADTGetDatum((DateADT)(secs / 86400)); } @@ -62,4 +135,13 @@ void Date_initialize(void) s_Date_class = JNI_newGlobalRef(PgObject_getJavaClass("java/sql/Date")); s_Date_init = PgObject_getJavaMethod(s_Date_class, "", "(J)V"); s_Date_getTime = PgObject_getJavaMethod(s_Date_class, "getTime", "()J"); + + cls = TypeClass_alloc("type.LocalDate"); + cls->JNISignature = "Ljava/time/LocalDate;"; + cls->javaTypeName = "java.time.LocalDate"; + cls->canReplaceType = _LocalDate_canReplaceType; + cls->coerceDatum = _LocalDate_coerceDatum; + cls->coerceObject = _LocalDate_coerceObject; + s_LocalDateClass = cls; + Type_registerType2(InvalidOid, "java.time.LocalDate", _LocalDate_obtain); } diff --git a/pljava-so/src/main/c/type/Double.c b/pljava-so/src/main/c/type/Double.c index da4258fc..241ba15b 100644 --- a/pljava-so/src/main/c/type/Double.c +++ b/pljava-so/src/main/c/type/Double.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -14,7 +17,6 @@ static TypeClass s_doubleClass; static jclass s_Double_class; -static jclass s_DoubleArray_class; static jmethodID s_Double_init; static jmethodID s_Double_doubleValue; @@ -29,9 +31,9 @@ static Datum _asDatum(jdouble v) return ret; } -static Datum _double_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _double_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - return _asDatum(JNI_callStaticDoubleMethodA(cls, method, args)); + return _asDatum(pljava_Function_doubleInvoke(fn)); } static jvalue _double_coerceDatum(Type self, Datum arg) @@ -80,21 +82,8 @@ static Datum _doubleArray_coerceObject(Type self, jobject doubleArray) nElems = JNI_getArrayLength((jarray)doubleArray); v = createArrayType(nElems, sizeof(jdouble), FLOAT8OID, false); - if(!JNI_isInstanceOf( doubleArray, s_DoubleArray_class)) - JNI_getDoubleArrayRegion((jdoubleArray)doubleArray, 0, + JNI_getDoubleArrayRegion((jdoubleArray)doubleArray, 0, nElems, (jdouble*)ARR_DATA_PTR(v)); - else - { - int idx = 0; - jdouble *array = (jdouble*)ARR_DATA_PTR(v); - - for(idx = 0; idx < nElems; ++idx) - { - array[idx] = JNI_callDoubleMethod(JNI_getObjectArrayElement(doubleArray, idx), - s_Double_doubleValue); - } - - } PG_RETURN_ARRAYTYPE_P(v); } @@ -135,7 +124,6 @@ void Double_initialize(void) TypeClass cls; s_Double_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/Double")); - s_DoubleArray_class = JNI_newGlobalRef(PgObject_getJavaClass("[Ljava/lang/Double;")); s_Double_init = PgObject_getJavaMethod(s_Double_class, "", "(D)V"); s_Double_doubleValue = PgObject_getJavaMethod(s_Double_class, "doubleValue", "()D"); diff --git a/pljava-so/src/main/c/type/ErrorData.c b/pljava-so/src/main/c/type/ErrorData.c index 4bc5186d..a9d86591 100644 --- a/pljava-so/src/main/c/type/ErrorData.c +++ b/pljava-so/src/main/c/type/ErrorData.c @@ -1,12 +1,19 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #include "org_postgresql_pljava_internal_ErrorData.h" +#include "pljava/DualState.h" #include "pljava/Exception.h" #include "pljava/type/Type_priv.h" #include "pljava/type/ErrorData.h" @@ -16,32 +23,34 @@ static jclass s_ErrorData_class; static jmethodID s_ErrorData_init; static jmethodID s_ErrorData_getNativePointer; -jobject ErrorData_getCurrentError(void) +jobject pljava_ErrorData_getCurrentError(void) { - Ptr2Long p2l; jobject jed; MemoryContext curr = MemoryContextSwitchTo(JavaMemoryContext); ErrorData* errorData = CopyErrorData(); MemoryContextSwitchTo(curr); - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = errorData; - jed = JNI_newObject(s_ErrorData_class, s_ErrorData_init, p2l.longVal); + /* + * Passing (jlong)0 as the ResourceOwner means this will never be matched by + * a nativeRelease call; that's appropriate (for now) as the ErrorData copy + * is being made into JavaMemoryContext, which never gets reset, so only + * unreachability from the Java side will free it. + */ + jed = JNI_newObjectLocked(s_ErrorData_class, s_ErrorData_init, + pljava_DualState_key(), (jlong)0, PointerGetJLong(errorData)); return jed; } -ErrorData* ErrorData_getErrorData(jobject jed) +ErrorData* pljava_ErrorData_getErrorData(jobject jed) { - Ptr2Long p2l; - p2l.longVal = JNI_callLongMethod(jed, s_ErrorData_getNativePointer); - return (ErrorData*)p2l.ptrVal; + return JLongGet(ErrorData *, + JNI_callLongMethod(jed, s_ErrorData_getNativePointer)); } /* Make this datatype available to the postgres system. */ -extern void ErrorData_initialize(void); -void ErrorData_initialize(void) +void pljava_ErrorData_initialize(void) { JNINativeMethod methods[] = { { @@ -124,17 +133,13 @@ void ErrorData_initialize(void) "(J)I", Java_org_postgresql_pljava_internal_ErrorData__1getSavedErrno }, - { - "_free", - "(J)V", - Java_org_postgresql_pljava_internal_ErrorData__1free - }, { 0, 0, 0 } }; s_ErrorData_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/ErrorData")); PgObject_registerNatives2(s_ErrorData_class, methods); - s_ErrorData_init = PgObject_getJavaMethod(s_ErrorData_class, "", "(J)V"); + s_ErrorData_init = PgObject_getJavaMethod(s_ErrorData_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); s_ErrorData_getNativePointer = PgObject_getJavaMethod(s_ErrorData_class, "getNativePointer", "()J"); } @@ -150,9 +155,7 @@ void ErrorData_initialize(void) JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getErrorLevel(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return ((ErrorData*)p2l.ptrVal)->elevel; + return JLongGet(ErrorData *, _this)->elevel; } /* @@ -164,9 +167,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getMes { jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->message); + result = + String_createJavaStringFromNTS(JLongGet(ErrorData *, _this)->message); END_NATIVE return result; } @@ -183,12 +185,10 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getSql char buf[6]; int errCode; int idx; - Ptr2Long p2l; - p2l.longVal = _this; /* unpack MAKE_SQLSTATE code */ - errCode = ((ErrorData*)p2l.ptrVal)->sqlerrcode; + errCode = JLongGet(ErrorData *, _this)->sqlerrcode; for (idx = 0; idx < 5; ++idx) { buf[idx] = (char)PGUNSIXBIT(errCode); /*why not cast in macro?*/ @@ -208,9 +208,7 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getSql */ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_ErrorData__1isOutputToServer(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jboolean)((ErrorData*)p2l.ptrVal)->output_to_server; + return (jboolean)JLongGet(ErrorData *, _this)->output_to_server; } /* @@ -220,9 +218,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_ErrorData__1isOut */ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_ErrorData__1isOutputToClient(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jboolean)((ErrorData*)p2l.ptrVal)->output_to_client; + return (jboolean)JLongGet(ErrorData *, _this)->output_to_client; } /* @@ -232,9 +228,11 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_ErrorData__1isOut */ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_ErrorData__1isShowFuncname(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jboolean)((ErrorData*)p2l.ptrVal)->show_funcname; +#if PG_VERSION_NUM < 140000 + return (jboolean)JLongGet(ErrorData *, _this)->show_funcname; +#else + return JNI_FALSE; +#endif } /* @@ -246,9 +244,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getFil { jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->filename); + result = + String_createJavaStringFromNTS(JLongGet(ErrorData *, _this)->filename); END_NATIVE return result; } @@ -260,9 +257,7 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getFil */ JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getLineno(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jint)((ErrorData*)p2l.ptrVal)->lineno; + return (jint)JLongGet(ErrorData *, _this)->lineno; } /* @@ -274,9 +269,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getFun { jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->funcname); + result = + String_createJavaStringFromNTS(JLongGet(ErrorData *, _this)->funcname); END_NATIVE return result; } @@ -290,9 +284,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getDet { jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->detail); + result = + String_createJavaStringFromNTS(JLongGet(ErrorData *, _this)->detail); END_NATIVE return result; } @@ -306,9 +299,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getHin { jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->hint); + result = + String_createJavaStringFromNTS(JLongGet(ErrorData *, _this)->hint); END_NATIVE return result; } @@ -322,9 +314,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getCon { jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->context); + result = + String_createJavaStringFromNTS(JLongGet(ErrorData *, _this)->context); END_NATIVE return result; } @@ -336,9 +327,7 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getCon */ JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getCursorPos(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jint)((ErrorData*)p2l.ptrVal)->cursorpos; + return (jint)JLongGet(ErrorData *, _this)->cursorpos; } /* @@ -348,9 +337,7 @@ JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getCursor */ JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getInternalPos(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jint)((ErrorData*)p2l.ptrVal)->internalpos; + return (jint)JLongGet(ErrorData *, _this)->internalpos; } /* @@ -363,9 +350,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getInt jstring result = 0; BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((ErrorData*)p2l.ptrVal)->internalquery); + result = String_createJavaStringFromNTS( + JLongGet(ErrorData *, _this)->internalquery); END_NATIVE return result; @@ -378,22 +364,5 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getInt */ JNIEXPORT jint JNICALL Java_org_postgresql_pljava_internal_ErrorData__1getSavedErrno(JNIEnv* env, jclass cls, jlong _this) { - Ptr2Long p2l; - p2l.longVal = _this; - return (jint)((ErrorData*)p2l.ptrVal)->saved_errno; -} - -/* - * Class: org_postgresql_pljava_internal_ErrorData - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_ErrorData__1free(JNIEnv* env, jobject _this, jlong pointer) -{ - BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = pointer; - FreeErrorData(p2l.ptrVal); - END_NATIVE + return (jint)JLongGet(ErrorData *, _this)->saved_errno; } diff --git a/pljava-so/src/main/c/type/Float.c b/pljava-so/src/main/c/type/Float.c index 2b8df105..f853097f 100644 --- a/pljava-so/src/main/c/type/Float.c +++ b/pljava-so/src/main/c/type/Float.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -14,7 +17,6 @@ static TypeClass s_floatClass; static jclass s_Float_class; -static jclass s_FloatArray_class; static jmethodID s_Float_init; static jmethodID s_Float_floatValue; @@ -29,9 +31,9 @@ static Datum _asDatum(jfloat v) return ret; } -static Datum _float_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _float_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - return _asDatum(JNI_callStaticFloatMethodA(cls, method, args)); + return _asDatum(pljava_Function_floatInvoke(fn)); } static jvalue _float_coerceDatum(Type self, Datum arg) @@ -82,20 +84,8 @@ static Datum _floatArray_coerceObject(Type self, jobject floatArray) v = createArrayType(nElems, sizeof(jfloat), FLOAT4OID, false); - if(!JNI_isInstanceOf( floatArray, s_FloatArray_class)) - JNI_getFloatArrayRegion((jfloatArray)floatArray, 0, + JNI_getFloatArrayRegion((jfloatArray)floatArray, 0, nElems, (jfloat*)ARR_DATA_PTR(v)); - else - { - int idx = 0; - jfloat *array = (jfloat*)ARR_DATA_PTR(v); - - for(idx = 0; idx < nElems; ++idx) - { - array[idx] = JNI_callFloatMethod(JNI_getObjectArrayElement(floatArray, idx), - s_Float_floatValue); - } - } PG_RETURN_ARRAYTYPE_P(v); } @@ -136,7 +126,6 @@ void Float_initialize(void) TypeClass cls; s_Float_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/Float")); - s_FloatArray_class = JNI_newGlobalRef(PgObject_getJavaClass("[Ljava/lang/Float;")); s_Float_init = PgObject_getJavaMethod(s_Float_class, "", "(F)V"); s_Float_floatValue = PgObject_getJavaMethod(s_Float_class, "floatValue", "()F"); diff --git a/pljava-so/src/main/c/type/HeapTupleHeader.c b/pljava-so/src/main/c/type/HeapTupleHeader.c deleted file mode 100644 index dd27f44b..00000000 --- a/pljava-so/src/main/c/type/HeapTupleHeader.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2012 PostgreSQL Global Development Group - * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License - * - * @author Thomas Hallgren - */ -#include "pljava/type/Type_priv.h" -#include "pljava/type/HeapTupleHeader.h" - -#include -#include -#include - -#include "pljava/Exception.h" -#include "pljava/Invocation.h" -#include "pljava/type/TupleDesc.h" - -jobject HeapTupleHeader_getTupleDesc(HeapTupleHeader ht) -{ - jobject result = 0; - TupleDesc tupleDesc = - lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(ht), - HeapTupleHeaderGetTypMod(ht)); - - if (tupleDesc != NULL) - { - result = TupleDesc_create(tupleDesc); - /* - * TupleDesc_create() creates a copy of the tuple descriptor, so - * can release this now - */ - ReleaseTupleDesc(tupleDesc); - } - - return result; -} - -jobject HeapTupleHeader_getObject(JNIEnv* env, jlong hth, jlong jtd, jint attrNo) -{ - jobject result = 0; - HeapTupleHeader self = (HeapTupleHeader)Invocation_getWrappedPointer(hth); - if(self != 0 && jtd != 0) - { - Ptr2Long p2l; - p2l.longVal = jtd; - BEGIN_NATIVE - PG_TRY(); - { - Oid typeId = SPI_gettypeid((TupleDesc)p2l.ptrVal, (int)attrNo); - if(!OidIsValid(typeId)) - { - Exception_throw(ERRCODE_INVALID_DESCRIPTOR_INDEX, - "Invalid attribute number \"%d\"", (int)attrNo); - } - else - { - Datum binVal; - bool wasNull = false; - Type type = Type_fromOid(typeId, Invocation_getTypeMap()); - if(Type_isPrimitive(type)) - /* - * This is a primitive type - */ - type = Type_getObjectType(type); - - binVal = GetAttributeByNum(self, (AttrNumber)attrNo, &wasNull); - if(!wasNull) - result = Type_coerceDatum(type, binVal).l; - } - } - PG_CATCH(); - { - Exception_throw_ERROR("GetAttributeByNum"); - } - PG_END_TRY(); - END_NATIVE - } - return result; - -} - -void HeapTupleHeader_free(JNIEnv* env, jlong hth) -{ - BEGIN_NATIVE_NO_ERRCHECK - Invocation_freeLocalWrapper(hth); - END_NATIVE -} diff --git a/pljava-so/src/main/c/type/Integer.c b/pljava-so/src/main/c/type/Integer.c index 729c9baf..fde680b5 100644 --- a/pljava-so/src/main/c/type/Integer.c +++ b/pljava-so/src/main/c/type/Integer.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -14,16 +17,15 @@ static TypeClass s_intClass; static jclass s_Integer_class; -static jclass s_IntegerArray_class; static jmethodID s_Integer_init; static jmethodID s_Integer_intValue; /* * int primitive type. */ -static Datum _int_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _int_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - jint iv = JNI_callStaticIntMethodA(cls, method, args); + jint iv = pljava_Function_intInvoke(fn); return Int32GetDatum(iv); } @@ -75,20 +77,8 @@ static Datum _intArray_coerceObject(Type self, jobject intArray) v = createArrayType(nElems, sizeof(jint), INT4OID, false); - if(!JNI_isInstanceOf( intArray, s_IntegerArray_class)) - JNI_getIntArrayRegion((jintArray)intArray, 0, nElems, (jint*)ARR_DATA_PTR(v)); - else - { - int idx = 0; - jint *array = (jint*)ARR_DATA_PTR(v); - - for(idx = 0; idx < nElems; ++idx) - { - array[idx] = JNI_callIntMethod(JNI_getObjectArrayElement(intArray, idx), - s_Integer_intValue); - } - } - + JNI_getIntArrayRegion( + (jintArray)intArray, 0, nElems, (jint*)ARR_DATA_PTR(v)); PG_RETURN_ARRAYTYPE_P(v); } @@ -129,7 +119,6 @@ void Integer_initialize(void) TypeClass cls; s_Integer_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/Integer")); - s_IntegerArray_class = JNI_newGlobalRef(PgObject_getJavaClass("[Ljava/lang/Integer;")); s_Integer_init = PgObject_getJavaMethod(s_Integer_class, "", "(I)V"); s_Integer_intValue = PgObject_getJavaMethod(s_Integer_class, "intValue", "()I"); diff --git a/pljava-so/src/main/c/type/JavaWrapper.c b/pljava-so/src/main/c/type/JavaWrapper.c deleted file mode 100644 index 2300d9a4..00000000 --- a/pljava-so/src/main/c/type/JavaWrapper.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html - * - * @author Thomas Hallgren - */ -#include "org_postgresql_pljava_internal_JavaWrapper.h" -#include "pljava/type/Type_priv.h" -#include "pljava/type/JavaWrapper.h" -#include "pljava/Backend.h" -#include "pljava/Exception.h" - -static jclass s_JavaWrapper_class; -static jfieldID s_JavaWrapper_m_pointer; - -MemoryContext JavaMemoryContext; - -static jlong _getPointer(jobject managed) -{ - if(managed == 0) - { - Exception_throw(ERRCODE_INTERNAL_ERROR, "Null JavaWrapper object"); - return 0; - } - - return JNI_getLongField(managed, s_JavaWrapper_m_pointer); -} - -static Datum _JavaWrapper_coerceObject(Type self, jobject nStruct) -{ - Ptr2Long p2l; - p2l.longVal = _getPointer(nStruct); - return PointerGetDatum(p2l.ptrVal); -} - -TypeClass JavaWrapperClass_alloc(const char* name) -{ - TypeClass self = TypeClass_alloc(name); - self->coerceObject = _JavaWrapper_coerceObject; - return self; -} - -extern void JavaWrapper_initialize(void); -void JavaWrapper_initialize(void) -{ - JNINativeMethod methods[] = - { - { - "_free", - "(J)V", - Java_org_postgresql_pljava_internal_JavaWrapper__1free - }, - { 0, 0, 0 } - }; - - s_JavaWrapper_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/JavaWrapper")); - PgObject_registerNatives2(s_JavaWrapper_class, methods); - s_JavaWrapper_m_pointer = PgObject_getJavaField(s_JavaWrapper_class, "m_pointer", "J"); - - JavaMemoryContext = AllocSetContextCreate(TopMemoryContext, - "PL/Java", - ALLOCSET_DEFAULT_SIZES); -} - -/* - * Class: org_postgresql_pljava_internal_JavaWrapper - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_JavaWrapper__1free(JNIEnv* env, jobject _this, jlong pointer) -{ - BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = pointer; - pfree(p2l.ptrVal); - END_NATIVE -} diff --git a/pljava-so/src/main/c/type/LargeObject.c b/pljava-so/src/main/c/type/LargeObject.c deleted file mode 100644 index 537d77bc..00000000 --- a/pljava-so/src/main/c/type/LargeObject.c +++ /dev/null @@ -1,432 +0,0 @@ -/* - * Copyright (c) 2004-2016 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB - * Chapman Flack - */ -#include - -#include "org_postgresql_pljava_internal_LargeObject.h" -#include "pljava/Exception.h" -#include "pljava/Invocation.h" -#include "pljava/type/Type_priv.h" -#include "pljava/type/Oid.h" -#include "pljava/type/LargeObject.h" - -static jclass s_LargeObject_class; -static jmethodID s_LargeObject_init; - -#if PG_VERSION_NUM < 90300 -#define OFFSETNARROWCAST (int) -#else -#define OFFSETNARROWCAST -#endif - -/* - * org.postgresql.pljava.type.LargeObject type. - */ -jobject LargeObject_create(LargeObjectDesc* lo) -{ - jobject jlo; - Ptr2Long loH; - - if(lo == 0) - return 0; - - loH.longVal = 0L; /* ensure that the rest is zeroed out */ - loH.ptrVal = lo; - jlo = JNI_newObject(s_LargeObject_class, s_LargeObject_init, loH.longVal); - return jlo; -} - -extern void LargeObject_initialize(void); -void LargeObject_initialize(void) -{ - TypeClass cls; - JNINativeMethod methods[] = - { - { - "_create", - "(I)Lorg/postgresql/pljava/internal/Oid;", - Java_org_postgresql_pljava_internal_LargeObject__1create - }, - { - "_drop", - "(Lorg/postgresql/pljava/internal/Oid;)I", - Java_org_postgresql_pljava_internal_LargeObject__1drop - }, - { - "_open", - "(Lorg/postgresql/pljava/internal/Oid;I)Lorg/postgresql/pljava/internal/LargeObject;", - Java_org_postgresql_pljava_internal_LargeObject__1open - }, - { - "_close", - "(J)V", - Java_org_postgresql_pljava_internal_LargeObject__1close - }, - { - "_getId", - "(J)Lorg/postgresql/pljava/internal/Oid;", - Java_org_postgresql_pljava_internal_LargeObject__1getId - }, - { - "_length", - "(J)J", - Java_org_postgresql_pljava_internal_LargeObject__1length - }, - { - "_seek", - "(JJI)J", - Java_org_postgresql_pljava_internal_LargeObject__1seek - }, - { - "_tell", - "(J)J", - Java_org_postgresql_pljava_internal_LargeObject__1tell - }, - { - "_truncate", - "(JJ)V", - Java_org_postgresql_pljava_internal_LargeObject__1truncate - }, - { - "_read", - "(J[B)I", - Java_org_postgresql_pljava_internal_LargeObject__1read - }, - { - "_write", - "(J[B)I", - Java_org_postgresql_pljava_internal_LargeObject__1write - }, - { 0, 0, 0 } - }; - - s_LargeObject_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/LargeObject")); - PgObject_registerNatives2(s_LargeObject_class, methods); - s_LargeObject_init = PgObject_getJavaMethod(s_LargeObject_class, "", "(J)V"); - - cls = TypeClass_alloc("type.LargeObject"); - cls->JNISignature = "Lorg/postgresql/pljava/internal/LargeObject;"; - cls->javaTypeName = "org.postgresql.pljava.internal.LargeObject"; - Type_registerType("org.postgresql.pljava.internal.LargeObject", TypeClass_allocInstance(cls, InvalidOid)); -} - -/**************************************** - * JNI methods - ****************************************/ -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _create - * Signature: (I)Lorg/postgresql/pljava/internal/LargeObject; - */ -JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1create(JNIEnv* env, jclass cls, jint flags) -{ - jobject result = 0; - - BEGIN_NATIVE - PG_TRY(); - { - result = Oid_create(inv_create((int)flags)); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_create"); - } - PG_END_TRY(); - END_NATIVE - - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _drop - * Signature: (Lorg/postgresql/pljava/internal/Oid;)I - */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1drop(JNIEnv* env, jclass cls, jobject oid) -{ - jint result = -1; - BEGIN_NATIVE - PG_TRY(); - { - result = inv_drop(Oid_getOid(oid)); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_drop"); - } - PG_END_TRY(); - END_NATIVE - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _open - * Signature: (Lorg/postgresql/pljava/internal/Oid;I)Lorg/postgresql/pljava/internal/LargeObject; - */ -JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1open(JNIEnv* env, jclass cls, jobject oid, jint flags) -{ - jobject result = 0; - BEGIN_NATIVE - PG_TRY(); - { - result = LargeObject_create(inv_open(Oid_getOid(oid), (int)flags, JavaMemoryContext)); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_open"); - } - PG_END_TRY(); - END_NATIVE - return result; -} - - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _close - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1close(JNIEnv* env, jclass cls, jlong _this) -{ - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - if(self != 0) - { - BEGIN_NATIVE - PG_TRY(); - { - inv_close(self); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_close"); - } - PG_END_TRY(); - END_NATIVE - } -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _getId - * Signature: (J)Lorg/postgresql/pljava/internal/Oid; - */ -JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1getId(JNIEnv* env, jclass cls, jlong _this) -{ - jobject result = 0; - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - if(self != 0) - { - BEGIN_NATIVE - result = Oid_create(self->id); - END_NATIVE - } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _length - * Signature: (J)J - */ -JNIEXPORT jlong JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1length(JNIEnv* env, jclass cls, jlong _this) -{ - jlong result = 0; - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - if(self != 0) - { - BEGIN_NATIVE - PG_TRY(); - { - /* There's no inv_length call so we use inv_seek on - * a temporary LargeObjectDesc. - */ - LargeObjectDesc lod; - memcpy(&lod, self, sizeof(LargeObjectDesc)); - result = (jlong)inv_seek(&lod, OFFSETNARROWCAST 0L, SEEK_END); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_seek"); - } - PG_END_TRY(); - END_NATIVE - } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _seek - * Signature: (JJI)J - */ -JNIEXPORT jlong JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1seek(JNIEnv* env, jclass cls, jlong _this, jlong pos, jint whence) -{ - jlong result = 0; - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - if(self != 0) - { - BEGIN_NATIVE - PG_TRY(); - { - result = (jlong)inv_seek(self, OFFSETNARROWCAST pos, (int)whence); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_seek"); - } - PG_END_TRY(); - END_NATIVE - } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _tell - * Signature: (J)J - */ -JNIEXPORT jlong JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1tell(JNIEnv* env, jclass cls, jlong _this) -{ - jlong result = 0; - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - if(self != 0) - { - BEGIN_NATIVE - PG_TRY(); - { - result = (jlong)inv_tell(self); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_tell"); - } - PG_END_TRY(); - END_NATIVE - } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _truncate - * Signature: (JJ)V - */ -JNIEXPORT void JNICALL Java_org_postgresql_pljava_internal_LargeObject__1truncate - (JNIEnv *env, jclass cls, jlong _this, jlong pos) -{ -#if PG_VERSION_NUM < 80200 - Exception_featureNotSupported("truncate() for large object", "8.2"); -#else - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - if(self != 0) - { - BEGIN_NATIVE - PG_TRY(); - { - inv_truncate(self, OFFSETNARROWCAST pos); - } - PG_CATCH(); - { - Exception_throw_ERROR("inv_truncate"); - } - PG_END_TRY(); - END_NATIVE - } -#endif -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _read - * Signature: (J[B)I - */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1read(JNIEnv* env, jclass cls, jlong _this, jbyteArray buf) -{ - jint result = -1; - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - - if(self != 0 && buf != 0) - { - BEGIN_NATIVE - jint nBytes = JNI_getArrayLength(buf); - if(nBytes != 0) - { - jbyte* byteBuf = JNI_getByteArrayElements(buf, 0); - if(byteBuf != 0) - { - PG_TRY(); - { - result = (jint)inv_read(self, (char*)byteBuf, (int)nBytes); - JNI_releaseByteArrayElements(buf, byteBuf, 0); - } - PG_CATCH(); - { - JNI_releaseByteArrayElements(buf, byteBuf, JNI_ABORT); - Exception_throw_ERROR("inv_read"); - } - PG_END_TRY(); - } - } - END_NATIVE - } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_LargeObject - * Method: _write - * Signature: (J[B)I - */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_LargeObject__1write(JNIEnv* env, jclass cls, jlong _this, jbyteArray buf) -{ - jint result = -1; - LargeObjectDesc* self = Invocation_getWrappedPointer(_this); - - if(self != 0 && buf != 0) - { - BEGIN_NATIVE - jint nBytes = JNI_getArrayLength(buf); - if(nBytes != 0) - { - jbyte* byteBuf = JNI_getByteArrayElements(buf, 0); - if(byteBuf != 0) - { - PG_TRY(); - { - result = (jint)inv_write(self, (char*)byteBuf, nBytes); - - /* No need to copy bytes back, hence the JNI_ABORT */ - JNI_releaseByteArrayElements(buf, byteBuf, JNI_ABORT); - } - PG_CATCH(); - { - JNI_releaseByteArrayElements(buf, byteBuf, JNI_ABORT); - Exception_throw_ERROR("inv_write"); - } - PG_END_TRY(); - } - } - END_NATIVE - } - return result; -} diff --git a/pljava-so/src/main/c/type/Long.c b/pljava-so/src/main/c/type/Long.c index 4e326b7d..45c9ef37 100644 --- a/pljava-so/src/main/c/type/Long.c +++ b/pljava-so/src/main/c/type/Long.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -14,7 +17,6 @@ static TypeClass s_longClass; static jclass s_Long_class; -static jclass s_LongArray_class; static jmethodID s_Long_init; static jmethodID s_Long_longValue; @@ -29,9 +31,9 @@ static Datum _asDatum(jlong v) return ret; } -static Datum _long_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _long_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - return _asDatum(JNI_callStaticLongMethodA(cls, method, args)); + return _asDatum(pljava_Function_longInvoke(fn)); } static jvalue _long_coerceDatum(Type self, Datum arg) @@ -82,19 +84,8 @@ static Datum _longArray_coerceObject(Type self, jobject longArray) v = createArrayType(nElems, sizeof(jlong), INT8OID, false); - if(!JNI_isInstanceOf( longArray, s_LongArray_class)) - JNI_getLongArrayRegion((jlongArray)longArray, 0, nElems, (jlong*)ARR_DATA_PTR(v)); - else - { - int idx = 0; - jlong *array = (jlong*)ARR_DATA_PTR(v); - - for(idx = 0; idx < nElems; ++idx) - { - array[idx] = JNI_callLongMethod(JNI_getObjectArrayElement(longArray, idx), - s_Long_longValue); - } - } + JNI_getLongArrayRegion( + (jlongArray)longArray, 0, nElems, (jlong*)ARR_DATA_PTR(v)); PG_RETURN_ARRAYTYPE_P(v); } @@ -135,7 +126,6 @@ void Long_initialize(void) TypeClass cls; s_Long_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/Long")); - s_LongArray_class = JNI_newGlobalRef(PgObject_getJavaClass("[Ljava/lang/Long;")); s_Long_init = PgObject_getJavaMethod(s_Long_class, "", "(J)V"); s_Long_longValue = PgObject_getJavaMethod(s_Long_class, "longValue", "()J"); diff --git a/pljava-so/src/main/c/type/Oid.c b/pljava-so/src/main/c/type/Oid.c index fce5899c..8fcb13bd 100644 --- a/pljava-so/src/main/c/type/Oid.c +++ b/pljava-so/src/main/c/type/Oid.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include @@ -18,6 +22,7 @@ #include "pljava/type/Oid.h" #include "pljava/type/String.h" #include "pljava/Exception.h" +#include "pljava/Function.h" #include "pljava/Invocation.h" #define pg_unreachable() abort() @@ -34,6 +39,14 @@ static jobject s_OidOid; jobject Oid_create(Oid oid) { jobject joid; + /* + * This is a natural place to have a StaticAssertStmt making sure the + * ubiquitous PG type 'Oid' fits in a jint. If it is ever removed from here + * or this code goes away, it should go someplace else. If it ever produces + * an error, don't assume the only things that need fixing will be in this + * file or nearby.... + */ + StaticAssertStmt(sizeof(Oid) <= sizeof(jint), "Oid wider than jint?!"); if(OidIsValid(oid)) joid = JNI_newObject(s_Oid_class, s_Oid_init, oid); else @@ -105,13 +118,39 @@ Oid Oid_forSqlType(int sqlType) case java_sql_Types_DATALINK: typeId = TEXTOID; break; -/* case java_sql_Types_NULL: + case java_sql_Types_NULL: case java_sql_Types_OTHER: case java_sql_Types_JAVA_OBJECT: case java_sql_Types_DISTINCT: case java_sql_Types_STRUCT: case java_sql_Types_ARRAY: - case java_sql_Types_REF: */ + case java_sql_Types_REF: + typeId = InvalidOid; /* Not yet mapped */ + break; + + /* JDBC 4.0 - present in Java 6 and later, no need to conditionalize */ + case java_sql_Types_SQLXML: +#ifdef XMLOID /* but PG can have been built without libxml */ + typeId = XMLOID; +#else + typeId = InvalidOid; +#endif + break; + case java_sql_Types_ROWID: + case java_sql_Types_NCHAR: + case java_sql_Types_NVARCHAR: + case java_sql_Types_LONGNVARCHAR: + case java_sql_Types_NCLOB: + typeId = InvalidOid; /* Not yet mapped */ + break; + + case java_sql_Types_TIME_WITH_TIMEZONE: + typeId = TIMETZOID; + break; + case java_sql_Types_TIMESTAMP_WITH_TIMEZONE: + typeId = TIMESTAMPTZOID; + break; + case java_sql_Types_REF_CURSOR: default: typeId = InvalidOid; /* Not yet mapped */ break; @@ -158,6 +197,11 @@ void Oid_initialize(void) "(I)Ljava/lang/String;", Java_org_postgresql_pljava_internal_Oid__1getJavaClassName }, + { + "_getCurrentLoader", + "()Ljava/lang/ClassLoader;", + Java_org_postgresql_pljava_internal_Oid__1getCurrentLoader + }, { 0, 0, 0 }}; jobject tmp; @@ -218,11 +262,7 @@ Java_org_postgresql_pljava_internal_Oid__1forTypeName(JNIEnv* env, jclass cls, j PG_TRY(); { int32 typmod = 0; -#if PG_VERSION_NUM < 90400 - parseTypeString(typeNameOrOid, &typeId, &typmod); -#else parseTypeString(typeNameOrOid, &typeId, &typmod, 0); -#endif } PG_CATCH(); { @@ -269,3 +309,18 @@ Java_org_postgresql_pljava_internal_Oid__1getJavaClassName(JNIEnv* env, jclass c END_NATIVE return result; } + +/* + * Class: org_postgresql_pljava_internal_Oid + * Method: _getCurrentLoader + * Signature: ()Ljava/lang/ClassLoader; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_internal_Oid__1getCurrentLoader(JNIEnv *env, jclass cls) +{ + jobject result = NULL; + BEGIN_NATIVE + result = Function_currentLoader(); + END_NATIVE + return result; +} diff --git a/pljava-so/src/main/c/type/Portal.c b/pljava-so/src/main/c/type/Portal.c index 2edc7864..6804943c 100644 --- a/pljava-so/src/main/c/type/Portal.c +++ b/pljava-so/src/main/c/type/Portal.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2008, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include #include @@ -15,6 +18,7 @@ #include "org_postgresql_pljava_internal_Portal.h" #include "pljava/Backend.h" +#include "pljava/DualState.h" #include "pljava/Exception.h" #include "pljava/Invocation.h" #include "pljava/HashMap.h" @@ -31,73 +35,26 @@ static jclass s_Portal_class; static jmethodID s_Portal_init; -static jfieldID s_Portal_pointer; - -typedef void (*PortalCleanupProc)(Portal portal); - -static HashMap s_portalMap = 0; -static PortalCleanupProc s_originalCleanupProc = 0; - -static void _pljavaPortalCleanup(Portal portal) -{ - /* - * Remove this object from the cache and clear its - * handle. - */ - jobject jportal = (jobject)HashMap_removeByOpaque(s_portalMap, portal); - if(jportal) - { - - JNI_setLongField(jportal, s_Portal_pointer, 0); - JNI_deleteGlobalRef(jportal); - } - - portal->cleanup = s_originalCleanupProc; - if(s_originalCleanupProc != 0) - { - (*s_originalCleanupProc)(portal); - } -} /* - * org.postgresql.pljava.type.Tuple type. + * org.postgresql.pljava.type.Portal type. */ -jobject Portal_create(Portal portal) +jobject pljava_Portal_create(Portal portal, jobject jplan) { - jobject jportal = 0; - if(portal != 0) - { - jportal = (jobject)HashMap_getByOpaque(s_portalMap, portal); - if(jportal == 0) - { - Ptr2Long p2l; - p2l.longVal = 0L; /* ensure that the rest is zeroed out */ - p2l.ptrVal = portal; - - /* We need to know when a portal is dropped so that we - * don't attempt to drop it twice. - */ - if(s_originalCleanupProc == 0) - s_originalCleanupProc = portal->cleanup; + jobject jportal; + if(portal == 0) + return NULL; - jportal = JNI_newObject(s_Portal_class, s_Portal_init, p2l.longVal); - HashMap_putByOpaque(s_portalMap, portal, JNI_newGlobalRef(jportal)); + jportal = JNI_newObjectLocked(s_Portal_class, s_Portal_init, + pljava_DualState_key(), + PointerGetJLong(portal->resowner), PointerGetJLong(portal), jplan); - /* - * Fail the day the backend decides to utilize the pointer for multiple - * purposes. - */ - Assert(portal->cleanup == s_originalCleanupProc); - portal->cleanup = _pljavaPortalCleanup; - } - } return jportal; } /* Make this datatype available to the postgres system. */ -extern void Portal_initialize(void); -void Portal_initialize(void) +void pljava_Portal_initialize(void) { JNINativeMethod methods[] = { @@ -108,7 +65,7 @@ void Portal_initialize(void) }, { "_getPortalPos", - "(J)I", + "(J)J", Java_org_postgresql_pljava_internal_Portal__1getPortalPos }, { @@ -118,15 +75,10 @@ void Portal_initialize(void) }, { "_fetch", - "(JJZI)I", + "(JZJ)J", Java_org_postgresql_pljava_internal_Portal__1fetch }, { - "_close", - "(J)V", - Java_org_postgresql_pljava_internal_Portal__1close - }, - { "_isAtEnd", "(J)Z", Java_org_postgresql_pljava_internal_Portal__1isAtEnd @@ -137,13 +89,8 @@ void Portal_initialize(void) Java_org_postgresql_pljava_internal_Portal__1isAtStart }, { - "_isPosOverflow", - "(J)Z", - Java_org_postgresql_pljava_internal_Portal__1isPosOverflow - }, - { "_move", - "(JJZI)I", + "(JZJ)J", Java_org_postgresql_pljava_internal_Portal__1move }, { 0, 0, 0 } @@ -151,9 +98,8 @@ void Portal_initialize(void) s_Portal_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/Portal")); PgObject_registerNatives2(s_Portal_class, methods); - s_Portal_init = PgObject_getJavaMethod(s_Portal_class, "", "(J)V"); - s_Portal_pointer = PgObject_getJavaField(s_Portal_class, "m_pointer", "J"); - s_portalMap = HashMap_create(13, TopMemoryContext); + s_Portal_init = PgObject_getJavaMethod(s_Portal_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJLorg/postgresql/pljava/internal/ExecutionPlan;)V"); } /**************************************** @@ -163,17 +109,15 @@ void Portal_initialize(void) /* * Class: org_postgresql_pljava_internal_Portal * Method: _getPortalPos - * Signature: (J)I + * Signature: (J)J */ -JNIEXPORT jint JNICALL +JNIEXPORT jlong JNICALL Java_org_postgresql_pljava_internal_Portal__1getPortalPos(JNIEnv* env, jclass clazz, jlong _this) { - jint result = 0; + jlong result = 0; if(_this != 0) { - Ptr2Long p2l; - p2l.longVal = _this; - result = (jint)((Portal)p2l.ptrVal)->portalPos; + result = (jlong)JLongGet(Portal, _this)->portalPos; } return result; } @@ -181,24 +125,34 @@ Java_org_postgresql_pljava_internal_Portal__1getPortalPos(JNIEnv* env, jclass cl /* * Class: org_postgresql_pljava_internal_Portal * Method: _fetch - * Signature: (JJZI)I + * Signature: (JZJ)J */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_Portal__1fetch(JNIEnv* env, jclass clazz, jlong _this, jlong threadId, jboolean forward, jint count) +JNIEXPORT jlong JNICALL +Java_org_postgresql_pljava_internal_Portal__1fetch(JNIEnv* env, jclass clazz, jlong _this, jboolean forward, jlong count) { - jint result = 0; + jlong result = 0; if(_this != 0) { BEGIN_NATIVE - Ptr2Long p2l; STACK_BASE_VARS - STACK_BASE_PUSH(threadId) + STACK_BASE_PUSH(env) + + /* + * One call to cleanEnqueued... is made in Invocation_popInvocation, + * when any PL/Java function returns to PostgreSQL. But what of a + * PL/Java function that loops through a lot of data before returning? + * It could be important to call cleanEnqueued... from some other + * strategically-chosen places, and this seems a good one. We get here + * every fetchSize (default 1000? See SPIStatement) rows retrieved. + */ + pljava_DualState_cleanEnqueuedInstances(); - p2l.longVal = _this; PG_TRY(); { - SPI_cursor_fetch((Portal)p2l.ptrVal, forward == JNI_TRUE, (int)count); - result = (jint)SPI_processed; + Invocation_assertConnect(); + SPI_cursor_fetch(JLongGet(Portal, _this), forward == JNI_TRUE, + (long)count); + result = (jlong)SPI_processed; } PG_CATCH(); { @@ -223,9 +177,7 @@ Java_org_postgresql_pljava_internal_Portal__1getName(JNIEnv* env, jclass clazz, if(_this != 0) { BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; - result = String_createJavaStringFromNTS(((Portal)p2l.ptrVal)->name); + result = String_createJavaStringFromNTS(JLongGet(Portal, _this)->name); END_NATIVE } return result; @@ -243,51 +195,12 @@ Java_org_postgresql_pljava_internal_Portal__1getTupleDesc(JNIEnv* env, jclass cl if(_this != 0) { BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; - result = TupleDesc_create(((Portal)p2l.ptrVal)->tupDesc); + result = pljava_TupleDesc_create(JLongGet(Portal, _this)->tupDesc); END_NATIVE } return result; } -/* - * Class: org_postgresql_pljava_internal_Portal - * Method: _invalidate - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_Portal__1close(JNIEnv* env, jclass clazz, jlong _this) -{ - /* We don't use error checking here since we don't want an exception - * caused by another exception when we attempt to close. - */ - if(_this != 0) - { - Ptr2Long p2l; - p2l.longVal = _this; - BEGIN_NATIVE_NO_ERRCHECK - Portal portal = (Portal)p2l.ptrVal; - - /* Reset our own cleanup callback if needed. No need to come in - * the backway - */ - - jobject jportal = (jobject)HashMap_removeByOpaque(s_portalMap, portal); - if(jportal) - { - JNI_deleteGlobalRef(jportal); - } - - if(portal->cleanup == _pljavaPortalCleanup) - portal->cleanup = s_originalCleanupProc; - - if(!(currentInvocation->errorOccured || currentInvocation->inExprContextCB)) - SPI_cursor_close(portal); - END_NATIVE - } -} - /* * Class: org_postgresql_pljava_internal_Portal * Method: _isAtStart @@ -299,9 +212,7 @@ Java_org_postgresql_pljava_internal_Portal__1isAtStart(JNIEnv* env, jclass clazz jboolean result = JNI_FALSE; if(_this != 0) { - Ptr2Long p2l; - p2l.longVal = _this; - result = (jboolean)((Portal)p2l.ptrVal)->atStart; + result = (jboolean)JLongGet(Portal, _this)->atStart; } return result; } @@ -317,28 +228,7 @@ Java_org_postgresql_pljava_internal_Portal__1isAtEnd(JNIEnv* env, jclass clazz, jboolean result = JNI_FALSE; if(_this != 0) { - Ptr2Long p2l; - p2l.longVal = _this; - result = (jboolean)((Portal)p2l.ptrVal)->atEnd; - } - return result; -} - -/* - * Class: org_postgresql_pljava_internal_Portal - * Method: _isPosOverflow - * Signature: (J)Z - */ -JNIEXPORT jboolean JNICALL -Java_org_postgresql_pljava_internal_Portal__1isPosOverflow(JNIEnv* env, jclass clazz, jlong _this) -{ - jboolean result = JNI_FALSE; - if(_this != 0) - { - Ptr2Long p2l; - p2l.longVal = _this; - /* should not overflow as the pos is int64 */ - result = JNI_FALSE; + result = (jboolean)JLongGet(Portal, _this)->atEnd; } return result; } @@ -346,24 +236,24 @@ Java_org_postgresql_pljava_internal_Portal__1isPosOverflow(JNIEnv* env, jclass c /* * Class: org_postgresql_pljava_internal_Portal * Method: _move - * Signature: (JJZI)I + * Signature: (JZJ)J */ -JNIEXPORT jint JNICALL -Java_org_postgresql_pljava_internal_Portal__1move(JNIEnv* env, jclass clazz, jlong _this, jlong threadId, jboolean forward, jint count) +JNIEXPORT jlong JNICALL +Java_org_postgresql_pljava_internal_Portal__1move(JNIEnv* env, jclass clazz, jlong _this, jboolean forward, jlong count) { - jint result = 0; + jlong result = 0; if(_this != 0) { BEGIN_NATIVE - Ptr2Long p2l; STACK_BASE_VARS - STACK_BASE_PUSH(threadId) + STACK_BASE_PUSH(env) - p2l.longVal = _this; PG_TRY(); { - SPI_cursor_move((Portal)p2l.ptrVal, forward == JNI_TRUE, (int)count); - result = (jint)SPI_processed; + Invocation_assertConnect(); + SPI_cursor_move( + JLongGet(Portal, _this), forward == JNI_TRUE, (long)count); + result = (jlong)SPI_processed; } PG_CATCH(); { diff --git a/pljava-so/src/main/c/type/Relation.c b/pljava-so/src/main/c/type/Relation.c index 91c915ed..8455d1f3 100644 --- a/pljava-so/src/main/c/type/Relation.c +++ b/pljava-so/src/main/c/type/Relation.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -10,6 +16,7 @@ #include #include "org_postgresql_pljava_internal_Relation.h" +#include "pljava/DualState.h" #include "pljava/Exception.h" #include "pljava/Invocation.h" #include "pljava/SPI.h" @@ -27,24 +34,23 @@ static jmethodID s_Relation_init; /* * org.postgresql.pljava.Relation type. */ -jobject Relation_create(Relation td) +jobject pljava_Relation_create(Relation r) { - return (td == 0) ? 0 : JNI_newObject( + if ( NULL == r ) + return NULL; + + return JNI_newObjectLocked( s_Relation_class, s_Relation_init, - Invocation_createLocalWrapper(td)); + pljava_DualState_key(), + PointerGetJLong(currentInvocation), + PointerGetJLong(r)); } -extern void Relation_initialize(void); -void Relation_initialize(void) +void pljava_Relation_initialize(void) { JNINativeMethod methods[] = { - { - "_free", - "(J)V", - Java_org_postgresql_pljava_internal_Relation__1free - }, { "_getName", "(J)Ljava/lang/String;", @@ -70,25 +76,13 @@ void Relation_initialize(void) s_Relation_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/Relation")); PgObject_registerNatives2(s_Relation_class, methods); - s_Relation_init = PgObject_getJavaMethod(s_Relation_class, "", "(J)V"); + s_Relation_init = PgObject_getJavaMethod(s_Relation_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); } /**************************************** * JNI methods ****************************************/ -/* - * Class: org_postgresql_pljava_internal_Relation - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_Relation__1free(JNIEnv* env, jobject _this, jlong pointer) -{ - BEGIN_NATIVE_NO_ERRCHECK - Invocation_freeLocalWrapper(pointer); - END_NATIVE -} - /* * Class: org_postgresql_pljava_internal_Relation * Method: _getName @@ -98,7 +92,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_Relation__1getName(JNIEnv* env, jclass clazz, jlong _this) { jstring result = 0; - Relation self = Invocation_getWrappedPointer(_this); + Relation self = JLongGet(Relation, _this); + if(self != 0) { BEGIN_NATIVE @@ -127,7 +122,8 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_Relation__1getSchema(JNIEnv* env, jclass clazz, jlong _this) { jstring result = 0; - Relation self = Invocation_getWrappedPointer(_this); + Relation self = JLongGet(Relation, _this); + if(self != 0) { BEGIN_NATIVE @@ -156,11 +152,12 @@ JNIEXPORT jobject JNICALL Java_org_postgresql_pljava_internal_Relation__1getTupleDesc(JNIEnv* env, jclass clazz, jlong _this) { jobject result = 0; - Relation self = Invocation_getWrappedPointer(_this); + Relation self = JLongGet(Relation, _this); + if(self != 0) { BEGIN_NATIVE - result = TupleDesc_create(self->rd_att); + result = pljava_TupleDesc_create(self->rd_att); END_NATIVE } return result; @@ -170,19 +167,27 @@ Java_org_postgresql_pljava_internal_Relation__1getTupleDesc(JNIEnv* env, jclass * Class: org_postgresql_pljava_internal_Relation * Method: _modifyTuple * Signature: (JJ[I[Ljava/lang/Object;)Lorg/postgresql/internal/pljava/Tuple; + * + * Note: starting with PostgreSQL 10, SPI_modifytuple must be run with SPI + * 'connected'. However, the caller likely wants a result living in a memory + * context longer-lived than SPI's. (At present, the only calls of this method + * originate in Function_invokeTrigger, which does switchToUpperContext() just + * for that reason.) Blindly adding Invocation_assertConnect() here would alter + * the behavior of subsequent palloc()s (not just in SPI_modifytuple, but also + * in, e.g., Tuple_create). So, given there's only one caller, let it be the + * caller's responsibility to ensure SPI is connected AND that a suitable + * memory context is selected for the result the caller wants. */ JNIEXPORT jobject JNICALL Java_org_postgresql_pljava_internal_Relation__1modifyTuple(JNIEnv* env, jclass clazz, jlong _this, jlong _tuple, jintArray _indexes, jobjectArray _values) { - Relation self = Invocation_getWrappedPointer(_this); jobject result = 0; + Relation self = JLongGet(Relation, _this); + if(self != 0 && _tuple != 0) { - Ptr2Long p2l; - p2l.longVal = _tuple; - BEGIN_NATIVE - HeapTuple tuple = (HeapTuple)p2l.ptrVal; + HeapTuple tuple = JLongGet(HeapTuple, _tuple); PG_TRY(); { jint idx; @@ -227,7 +232,7 @@ Java_org_postgresql_pljava_internal_Relation__1modifyTuple(JNIEnv* env, jclass c type = Type_fromOid(typeId, typeMap); value = JNI_getObjectArrayElement(_values, idx); if(value != 0) - values[idx] = Type_coerceObject(type, value); + values[idx] = Type_coerceObjectBridged(type, value); else { if(nulls == 0) @@ -261,7 +266,7 @@ Java_org_postgresql_pljava_internal_Relation__1modifyTuple(JNIEnv* env, jclass c } PG_END_TRY(); if(tuple != 0) - result = Tuple_create(tuple); + result = pljava_Tuple_create(tuple); END_NATIVE } return result; diff --git a/pljava-so/src/main/c/type/SQLXMLImpl.c b/pljava-so/src/main/c/type/SQLXMLImpl.c new file mode 100644 index 00000000..157f4251 --- /dev/null +++ b/pljava-so/src/main/c/type/SQLXMLImpl.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2018-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#include + +#include "org_postgresql_pljava_jdbc_SQLXMLImpl.h" + +#include "pljava/type/Type_priv.h" +#include "pljava/VarlenaWrapper.h" + +static TypeClass s_SQLXMLClass; +static jclass s_SQLXML_class; +static jmethodID s_SQLXML_adopt; +static jclass s_SQLXML_Readable_PgXML_class; +static jmethodID s_SQLXML_Readable_PgXML_init; +static jclass s_SQLXML_Writable_class; +static jmethodID s_SQLXML_Writable_init; + +static TypeClass s_SQLXMLClass_Synthetic; +static jclass s_SQLXML_Readable_Synthetic_class; +static jmethodID s_SQLXML_Readable_Synthetic_init; + +static bool _SQLXML_canReplaceType(Type self, Type other); +static jvalue _SQLXML_coerceDatum(Type self, Datum arg); +static Datum _SQLXML_coerceObject(Type self, jobject sqlxml); +static Type _SQLXML_obtain(Oid typeId); + +/* + * It is possible to install PL/Java in a PostgreSQL instance that was built + * without libxml and the native XML data type. It could even be useful for + * SQLXML to be usable in those circumstances, so the canReplaceType method + * will return true if the native type is text. (An exact match on TEXTOID is + * required, for now at least, because over in String.c, canReplaceType answers + * true for any native type that has text in/out conversions, and we do NOT want + * SQLXML to willy/nilly expose the internals of just any of those. + */ +static bool _SQLXML_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return + Type_getClass(self) == cls || +#if defined(XMLOID) + Type_getOid(other) == XMLOID || +#endif + Type_getOid(other) == PG_NODE_TREEOID || /* a synthetic rendering */ + Type_getOid(other) == TEXTOID; +} + +static jvalue _SQLXML_coerceDatum(Type self, Datum arg) +{ + jvalue result; + jobject vwi = pljava_VarlenaWrapper_Input( + arg, TopTransactionContext, TopTransactionResourceOwner); + result.l = JNI_newObject( + s_SQLXML_Readable_PgXML_class, s_SQLXML_Readable_PgXML_init, + vwi, Type_getOid(self)); + JNI_deleteLocalRef(vwi); + return result; +} + +static jvalue _SQLXML_coerceDatum_synthetic(Type self, Datum arg) +{ + jvalue result; + jobject vwi = pljava_VarlenaWrapper_Input( + arg, TopTransactionContext, TopTransactionResourceOwner); + result.l = JNI_newObject( + s_SQLXML_Readable_Synthetic_class, s_SQLXML_Readable_Synthetic_init, + vwi, Type_getOid(self)); + JNI_deleteLocalRef(vwi); + return result; +} + +static Datum _SQLXML_coerceObject(Type self, jobject sqlxml) +{ + jobject vw = JNI_callStaticObjectMethodLocked( + s_SQLXML_class, s_SQLXML_adopt, sqlxml, Type_getOid(self)); + Datum d = pljava_VarlenaWrapper_adopt(vw); + JNI_deleteLocalRef(vw); + if ( VARATT_IS_EXTERNAL_EXPANDED_RW(DatumGetPointer(d)) ) + return TransferExpandedObject(d, CurrentMemoryContext); + + MemoryContextSetParent( + GetMemoryChunkContext(DatumGetPointer(d)), CurrentMemoryContext); + + return d; +} + +/* + * A Type can be 'registered' two ways. In one case, a single instance can be + * created with TypeClass_allocInstance(2)? and assigned a fixed Oid, and that + * instance then passed to Type_registerType along with the Java name. + * + * The other way is not to allocate any Type instance up front, but instead + * to call Type_registerType2, passing just the type's canonical Oid, the Java + * name, and an 'obtainer' function, like this one. + * + * The difference appears when this TypeClass has a _canReplaceType function + * that allows it to serve more than one PostgreSQL type (as, indeed, SQLXML + * now does and can). With the first registration style, the same Type instance + * will be used for any of the PostgreSQL types accepted by the _canReplaceType + * function. With the second style, the obtainer will be called to produce a + * distinct Type instance (sharing the same TypeClass) for each one, recording + * its own PostgreSQL Oid. + * + * SQLXML has a need to run a content verifier when 'bouncing' a readable + * instance back to PostgreSQL, and ideally only to do so when the Oids at + * create and adopt time are different, so it cannot make do with the singleton + * type instance, and needs to use Type_registerType2 with an obtainer. + * + * The obtainer can, however, cache a single instance per supported oid, of + * which there are, so far, only two (one, in PG instances without XML). + */ +static Type _SQLXML_obtain(Oid typeId) +{ + static Type textInstance; + Oid allowedId = InvalidOid; + bool synthetic = false; + Type *cache; +#if defined(XMLOID) + static Type xmlInstance; +#endif + static Type pgNodeTreeInstance; + + switch ( typeId ) + { + case PG_NODE_TREEOID: + allowedId = PG_NODE_TREEOID; + synthetic = true; + cache = &pgNodeTreeInstance; + break; + default: + if ( TEXTOID == typeId ) + { + cache = &textInstance; + allowedId = TEXTOID; + } +#if defined(XMLOID) + else + { + allowedId = XMLOID; + cache = &xmlInstance; + } +#endif + } + if ( NULL == *cache ) + *cache = TypeClass_allocInstance( + synthetic ? s_SQLXMLClass_Synthetic : s_SQLXMLClass, allowedId); + return *cache; +} + +/* Make this datatype available to the postgres system. + */ +extern void pljava_SQLXMLImpl_initialize(void); +void pljava_SQLXMLImpl_initialize(void) +{ + jclass clazz; + JNINativeMethod methods[] = + { + { + "_newWritable", + "()Ljava/sql/SQLXML;", + Java_org_postgresql_pljava_jdbc_SQLXMLImpl__1newWritable + }, + { 0, 0, 0 } + }; + + TypeClass cls = TypeClass_alloc("type.SQLXML"); + cls->JNISignature = "Ljava/sql/SQLXML;"; + cls->javaTypeName = "java.sql.SQLXML"; + cls->canReplaceType = _SQLXML_canReplaceType; + cls->coerceDatum = _SQLXML_coerceDatum; + cls->coerceObject = _SQLXML_coerceObject; + s_SQLXMLClass = cls; + + Type_registerType2(InvalidOid, "java.sql.SQLXML", _SQLXML_obtain); + + cls = TypeClass_alloc("type.SQLXML"); + cls->JNISignature = "Ljava/sql/SQLXML;"; + cls->javaTypeName = "java.sql.SQLXML"; + cls->canReplaceType = _SQLXML_canReplaceType; + cls->coerceDatum = _SQLXML_coerceDatum_synthetic; + /* cls->coerceObject = _SQLXML_coerceObject; */ + s_SQLXMLClass_Synthetic = cls; /* what happens if I don't register it? */ + + s_SQLXML_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/jdbc/SQLXMLImpl")); + s_SQLXML_adopt = PgObject_getStaticJavaMethod(s_SQLXML_class, "adopt", + "(Ljava/sql/SQLXML;I)Lorg/postgresql/pljava/internal/VarlenaWrapper;"); + + s_SQLXML_Readable_PgXML_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/jdbc/SQLXMLImpl$Readable$PgXML")); + s_SQLXML_Readable_PgXML_init = PgObject_getJavaMethod( + s_SQLXML_Readable_PgXML_class, + "", "(Lorg/postgresql/pljava/internal/VarlenaWrapper$Input;I)V"); + + s_SQLXML_Readable_Synthetic_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/jdbc/SQLXMLImpl$Readable$Synthetic")); + s_SQLXML_Readable_Synthetic_init = PgObject_getJavaMethod( + s_SQLXML_Readable_Synthetic_class, + "", "(Lorg/postgresql/pljava/internal/VarlenaWrapper$Input;I)V"); + + s_SQLXML_Writable_class = JNI_newGlobalRef(PgObject_getJavaClass( + "org/postgresql/pljava/jdbc/SQLXMLImpl$Writable")); + s_SQLXML_Writable_init = PgObject_getJavaMethod(s_SQLXML_Writable_class, + "", "(Lorg/postgresql/pljava/internal/VarlenaWrapper$Output;)V"); + + clazz = PgObject_getJavaClass("org/postgresql/pljava/jdbc/SQLXMLImpl"); + PgObject_registerNatives2(clazz, methods); + JNI_deleteLocalRef(clazz); +} + +/* + * Class: org_postgresql_pljava_jdbc_SQLXMLImpl + * Method: _newWritable + * Signature: ()Ljava/sql/SQLXML; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_jdbc_SQLXMLImpl__1newWritable + (JNIEnv *env, jclass sqlxml_class) +{ + jobject sqlxml = NULL; + jobject vwo; + BEGIN_NATIVE + vwo = pljava_VarlenaWrapper_Output( + TopTransactionContext, TopTransactionResourceOwner); + sqlxml = JNI_newObjectLocked( + s_SQLXML_Writable_class, s_SQLXML_Writable_init, vwo); + END_NATIVE + return sqlxml; +} diff --git a/pljava-so/src/main/c/type/Short.c b/pljava-so/src/main/c/type/Short.c index 265690c4..88a6d9ae 100644 --- a/pljava-so/src/main/c/type/Short.c +++ b/pljava-so/src/main/c/type/Short.c @@ -1,12 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #include "pljava/type/Type_priv.h" #include "pljava/type/Array.h" @@ -14,16 +17,15 @@ static TypeClass s_shortClass; static jclass s_Short_class; -static jclass s_ShortArray_class; static jmethodID s_Short_init; static jmethodID s_Short_shortValue; /* * short primitive type. */ -static Datum _short_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _short_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - jshort v = JNI_callStaticShortMethodA(cls, method, args); + jshort v = pljava_Function_shortInvoke(fn); return Int16GetDatum(v); } @@ -74,19 +76,8 @@ static Datum _shortArray_coerceObject(Type self, jobject shortArray) nElems = JNI_getArrayLength((jarray)shortArray); v = createArrayType(nElems, sizeof(jshort), INT2OID, false); - if(!JNI_isInstanceOf( shortArray, s_ShortArray_class)) - JNI_getShortArrayRegion((jshortArray)shortArray, 0, nElems, (jshort*)ARR_DATA_PTR(v)); - else - { - int idx = 0; - jshort *array = (jshort*)ARR_DATA_PTR(v); - - for(idx = 0; idx < nElems; ++idx) - { - array[idx] = JNI_callShortMethod(JNI_getObjectArrayElement(shortArray, idx), - s_Short_shortValue); - } - } + JNI_getShortArrayRegion( + (jshortArray)shortArray, 0, nElems, (jshort*)ARR_DATA_PTR(v)); PG_RETURN_ARRAYTYPE_P(v); } @@ -127,7 +118,6 @@ void Short_initialize(void) TypeClass cls; s_Short_class = JNI_newGlobalRef(PgObject_getJavaClass("java/lang/Short")); - s_ShortArray_class = JNI_newGlobalRef(PgObject_getJavaClass("[Ljava/lang/Short;")); s_Short_init = PgObject_getJavaMethod(s_Short_class, "", "(S)V"); s_Short_shortValue = PgObject_getJavaMethod(s_Short_class, "shortValue", "()S"); diff --git a/pljava-so/src/main/c/type/SingleRowReader.c b/pljava-so/src/main/c/type/SingleRowReader.c new file mode 100644 index 00000000..50736ebb --- /dev/null +++ b/pljava-so/src/main/c/type/SingleRowReader.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack + * + * @author Thomas Hallgren + */ +#include "pljava/type/Type_priv.h" +#include "pljava/type/SingleRowReader.h" + +#include +#include +#include + +#include "pljava/DualState.h" +#include "pljava/Exception.h" +#include "pljava/Invocation.h" +#include "pljava/type/TupleDesc.h" + +#include "org_postgresql_pljava_jdbc_SingleRowReader.h" + +static jclass s_SingleRowReader_class; +static jmethodID s_SingleRowReader_init; + +jobject pljava_SingleRowReader_getTupleDesc(HeapTupleHeader ht) +{ + jobject result; + TupleDesc tupleDesc = + lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(ht), + HeapTupleHeaderGetTypMod(ht)); + result = pljava_TupleDesc_create(tupleDesc); + /* + * pljava_TupleDesc_create() creates a copy of the tuple descriptor, so + * can release this now + */ + ReleaseTupleDesc(tupleDesc); + return result; +} + +jobject pljava_SingleRowReader_create(HeapTupleHeader ht) +{ + jobject result; + jobject jtd = pljava_SingleRowReader_getTupleDesc(ht); + + result = + JNI_newObjectLocked(s_SingleRowReader_class, s_SingleRowReader_init, + pljava_DualState_key(), PointerGetJLong(currentInvocation), + PointerGetJLong(ht), jtd); + + JNI_deleteLocalRef(jtd); + return result; +} + +/* Make this datatype available to the postgres system. + */ +void pljava_SingleRowReader_initialize(void) +{ + JNINativeMethod methods[] = + { + { + "_getObject", + "(JJILjava/lang/Class;)Ljava/lang/Object;", + Java_org_postgresql_pljava_jdbc_SingleRowReader__1getObject + }, + { 0, 0, 0 } + }; + jclass cls = + PgObject_getJavaClass("org/postgresql/pljava/jdbc/SingleRowReader"); + PgObject_registerNatives2(cls, methods); + s_SingleRowReader_init = PgObject_getJavaMethod(cls, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJLorg/postgresql/pljava/internal/TupleDesc;)V"); + s_SingleRowReader_class = JNI_newGlobalRef(cls); + JNI_deleteLocalRef(cls); +} + + +/**************************************** + * JNI methods + ****************************************/ + +/* + * Class: org_postgresql_pljava_jdbc_SingleRowReader + * Method: _getObject + * Signature: (JJILjava/lang/Class;)Ljava/lang/Object; + */ +JNIEXPORT jobject JNICALL +Java_org_postgresql_pljava_jdbc_SingleRowReader__1getObject(JNIEnv* env, jclass clazz, jlong hth, jlong jtd, jint attrNo, jclass rqcls) +{ + jobject result = 0; + if(hth != 0 && jtd != 0) + { + BEGIN_NATIVE + PG_TRY(); + { + Type type = pljava_TupleDesc_getColumnType( + JLongGet(TupleDesc, jtd), (int) attrNo); + if (type != 0) + { + Datum binVal; + bool wasNull = false; + binVal = GetAttributeByNum( + JLongGet(HeapTupleHeader, hth), + (AttrNumber)attrNo, &wasNull); + if(!wasNull) + result = Type_coerceDatumAs(type, binVal, rqcls).l; + } + } + PG_CATCH(); + { + Exception_throw_ERROR("GetAttributeByNum"); + } + PG_END_TRY(); + END_NATIVE + } + return result; +} diff --git a/pljava-so/src/main/c/type/String.c b/pljava-so/src/main/c/type/String.c index dfe386ce..8b462394 100644 --- a/pljava-so/src/main/c/type/String.c +++ b/pljava-so/src/main/c/type/String.c @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB - Thomas Hallgren + * Chapman Flack + * Francisco Miguel Biete Banon */ #include "pljava/type/String_priv.h" #include "pljava/HashMap.h" @@ -55,9 +60,9 @@ jvalue _String_coerceDatum(Type self, Datum arg) { jvalue result; char* tmp = DatumGetCString(FunctionCall3( - &((String)self)->textOutput, + &((PLJString)self)->textOutput, arg, - ObjectIdGetDatum(((String)self)->elementType), + ObjectIdGetDatum(((PLJString)self)->elementType), Int32GetDatum(-1))); result.l = String_createJavaStringFromNTS(tmp); pfree(tmp); @@ -79,19 +84,19 @@ Datum _String_coerceObject(Type self, jobject jstr) JNI_deleteLocalRef(jstr); ret = FunctionCall3( - &((String)self)->textInput, + &((PLJString)self)->textInput, CStringGetDatum(tmp), - ObjectIdGetDatum(((String)self)->elementType), + ObjectIdGetDatum(((PLJString)self)->elementType), Int32GetDatum(-1)); pfree(tmp); return ret; } -static String String_create(TypeClass cls, Oid typeId) +static PLJString String_create(TypeClass cls, Oid typeId) { HeapTuple typeTup = PgObject_getValidTuple(TYPEOID, typeId, "type"); Form_pg_type pgType = (Form_pg_type)GETSTRUCT(typeTup); - String self = (String)TypeClass_allocInstance(cls, typeId); + PLJString self = (PLJString)TypeClass_allocInstance(cls, typeId); MemoryContext ctx = GetMemoryChunkContext(self); fmgr_info_cxt(pgType->typoutput, &self->textOutput, ctx); fmgr_info_cxt(pgType->typinput, &self->textInput, ctx); @@ -105,7 +110,7 @@ Type String_obtain(Oid typeId) return (Type)StringClass_obtain(s_StringClass, typeId); } -String StringClass_obtain(TypeClass self, Oid typeId) +PLJString StringClass_obtain(TypeClass self, Oid typeId) { return String_create(self, typeId); } @@ -122,12 +127,17 @@ jstring String_createJavaString(text* t) Size srcLen = VARSIZE(t) - VARHDRSZ; if(srcLen == 0) return s_the_empty_string; - + if ( s_two_step_conversion ) { utf8 = (char*)pg_do_encoding_conversion((unsigned char*)src, (int)srcLen, s_server_encoding, PG_UTF8); - srcLen = strlen(utf8); + /* pg_do_encoding_conversion may return the source argument + * unchanged in more circumstances than you'd expect. As the source + * argument isn't NUL-terminated, don't call strlen on it. + */ + if (utf8 != src) + srcLen = strlen(utf8); } bytebuf = JNI_newDirectByteBuffer(utf8, srcLen); charbuf = JNI_callObjectMethodLocked(s_CharsetDecoder_instance, @@ -159,7 +169,11 @@ jstring String_createJavaStringFromNTS(const char* cp) { utf8 = (char*)pg_do_encoding_conversion((unsigned char*)cp, (int)sz, s_server_encoding, PG_UTF8); - sz = strlen(utf8); + /* Here the source is NUL-terminated, so calling strlen on it + * would be safe, but unnecessary all the same. + */ + if ( utf8 != cp ) + sz = strlen(utf8); } bytebuf = JNI_newDirectByteBuffer((void *)utf8, sz); charbuf = JNI_callObjectMethodLocked(s_CharsetDecoder_instance, @@ -200,7 +214,12 @@ text* String_createText(jstring javaString) { denc = (char*)pg_do_encoding_conversion( (unsigned char*)denc, (int)dencLen, PG_UTF8, s_server_encoding); - dencLen = strlen(denc); + /* pg_do_encoding_conversion may return the source argument + * unchanged in more circumstances than you'd expect. As the source + * argument isn't NUL-terminated, don't call strlen on it. + */ + if (denc != sid.data) + dencLen = strlen(denc); } varSize = dencLen + VARHDRSZ; @@ -373,19 +392,19 @@ void String_initialize(void) static void String_initialize_codec() { + /* + * Wondering why this function doesn't bother deleting its many local refs? + * The call is wrapped in pushLocalFrame/popLocalFrame in the caller. + */ jmethodID string_intern = PgObject_getJavaMethod(s_String_class, "intern", "()Ljava/lang/String;"); jstring empty = JNI_newStringUTF( ""); - jstring u8Name = JNI_newStringUTF( "UTF-8"); - jclass charset_class = PgObject_getJavaClass("java/nio/charset/Charset"); - jmethodID charset_forName = PgObject_getStaticJavaMethod(charset_class, - "forName", "(Ljava/lang/String;)Ljava/nio/charset/Charset;"); + jclass charset_class = + PgObject_getJavaClass("java/nio/charset/Charset"); jmethodID charset_newDecoder = PgObject_getJavaMethod(charset_class, "newDecoder", "()Ljava/nio/charset/CharsetDecoder;"); jmethodID charset_newEncoder = PgObject_getJavaMethod(charset_class, "newEncoder", "()Ljava/nio/charset/CharsetEncoder;"); - jobject u8cs = JNI_callStaticObjectMethod(charset_class, charset_forName, - u8Name); jclass decoder_class = PgObject_getJavaClass("java/nio/charset/CharsetDecoder"); jclass encoder_class = @@ -398,11 +417,44 @@ static void String_initialize_codec() jfieldID underflow = PgObject_getStaticJavaField(result_class, "UNDERFLOW", "Ljava/nio/charset/CoderResult;"); jclass buffer_class = PgObject_getJavaClass("java/nio/Buffer"); + jobject servercs; + + /* + * Records what the final state of s_two_step_conversion will be, but the + * static is left at its initial value until all preparations are complete. + */ + bool two_step_when_ready = s_two_step_conversion; + + s_server_encoding = GetDatabaseEncoding(); + + if ( PG_SQL_ASCII == s_server_encoding ) + { + jmethodID forname = + PgObject_getStaticJavaMethod(charset_class, + "forName", "(Ljava/lang/String;)Ljava/nio/charset/Charset;"); + jstring sql_ascii = JNI_newStringUTF("X-PGSQL_ASCII"); + + two_step_when_ready = false; + + servercs = JNI_callStaticObjectMethodLocked(charset_class, + forname, sql_ascii); + } + else + { + jclass scharset_class = + PgObject_getJavaClass("java/nio/charset/StandardCharsets"); + jfieldID scharset_UTF_8 = PgObject_getStaticJavaField(scharset_class, + "UTF_8", "Ljava/nio/charset/Charset;"); + + two_step_when_ready = PG_UTF8 != s_server_encoding; + + servercs = JNI_getStaticObjectField(scharset_class, scharset_UTF_8); + } s_CharsetDecoder_instance = - JNI_newGlobalRef(JNI_callObjectMethod(u8cs, charset_newDecoder)); + JNI_newGlobalRef(JNI_callObjectMethod(servercs, charset_newDecoder)); s_CharsetEncoder_instance = - JNI_newGlobalRef(JNI_callObjectMethod(u8cs, charset_newEncoder)); + JNI_newGlobalRef(JNI_callObjectMethod(servercs, charset_newEncoder)); s_CharsetDecoder_decode = PgObject_getJavaMethod(decoder_class, "decode", "(Ljava/nio/ByteBuffer;)Ljava/nio/CharBuffer;"); s_CharsetEncoder_encode = PgObject_getJavaMethod(encoder_class, "encode", @@ -428,7 +480,6 @@ static void String_initialize_codec() s_the_empty_string = JNI_newGlobalRef( JNI_callObjectMethod(empty, string_intern)); - s_server_encoding = GetDatabaseEncoding(); - s_two_step_conversion = PG_UTF8 != s_server_encoding; + s_two_step_conversion = two_step_when_ready; uninitialized = false; } diff --git a/pljava-so/src/main/c/type/Time.c b/pljava-so/src/main/c/type/Time.c index 819f9b84..31fd16ed 100644 --- a/pljava-so/src/main/c/type/Time.c +++ b/pljava-so/src/main/c/type/Time.c @@ -1,12 +1,18 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ -#include +#include #include #include @@ -17,23 +23,215 @@ #include "pljava/type/Time.h" #include "pljava/type/Timestamp.h" -#define pg_unreachable() abort() - /* - * Time type. Postgres will pass (and expect in return) a local Time. - * The Java java.sql.Time is UTC time and not a perfect fit. Perhaps - * a LocalTime object should be added to the Java domain? + * Types time and timetz. This compilation unit supplies code for both + * PostgreSQL types. The legacy JDBC mapping for both is to java.sql.Time, which + * holds an implicit timezone offset and therefore can't be an equally good fit + * for both. Also, it loses precision: PostgreSQL maintains microseconds, but + * java.sql.Time only holds milliseconds. + * + * Java 8 and JDBC 4.2 introduce java.time.LocalTime and java.time.OffsetTime, + * which directly fit PG's time and timetz, respectively. For compatibility + * reasons, the legacy behavior of getObject (with no Class parameter) is + * unchanged, and still returns the data weirdly shoehorned into java.sql.Time. + * But Java 8 application code can and should use the form of getObject with a + * Class parameter to request java.time.LocalTime or java.time.OffsetTime, as + * appropriate. + * + * The legacy shoehorning adjusts the PostgreSQL-maintained time by its + * associated offset (in the timetz case), or by the current value of the server + * timezone offset (in the time case). Which convention is weirder? */ static jclass s_Time_class; static jmethodID s_Time_init; static jmethodID s_Time_getTime; +static TypeClass s_LocalTimeClass; +static TypeClass s_OffsetTimeClass; +/* + * The following statics are specific to Java 8 +, and will be initialized + * only on demand (pre-8 application code will have no way to demand them). + */ +static Type s_LocalTimeInstance; +static jclass s_LocalTime_class; +static jmethodID s_LocalTime_ofNanoOfDay; +static jmethodID s_LocalTime_toNanoOfDay; +static Type s_OffsetTimeInstance; +static jclass s_OffsetTime_class; +static jmethodID s_OffsetTime_of; +static jmethodID s_OffsetTime_toLocalTime; +static jmethodID s_OffsetTime_getOffset; +static jclass s_ZoneOffset_class; +static jmethodID s_ZoneOffset_ofTotalSeconds; +static jmethodID s_ZoneOffset_getTotalSeconds; + +/* + * This only answers true for (same class or) TIMEOID. + * The obtainer (below) only needs to construct and remember one instance. + */ +static bool _LocalTime_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return Type_getClass(self) == cls || Type_getOid(other) == TIMEOID; +} + +static jvalue _LocalTime_coerceDatum(Type self, Datum arg) +{ + jlong nanos = +#if PG_VERSION_NUM < 100000 + (!integerDateTimes) ? (jlong)floor(1e9 * DatumGetFloat8(arg)) : +#endif + 1000 * DatumGetInt64(arg); + jvalue result; + if ( 1000L * USECS_PER_DAY == nanos ) + -- nanos; + result.l = JNI_callStaticObjectMethod( + s_LocalTime_class, s_LocalTime_ofNanoOfDay, nanos); + return result; +} + +static Datum _LocalTime_coerceObject(Type self, jobject time) +{ + jlong nanos = JNI_callLongMethod(time, s_LocalTime_toNanoOfDay); + return +#if PG_VERSION_NUM < 100000 + (!integerDateTimes) ? Float8GetDatum(((double)nanos) / 1e9) : +#endif + Int64GetDatum((nanos + 1) / 1000); +} + +static Type _LocalTime_obtain(Oid typeId) +{ + if ( NULL == s_LocalTimeInstance ) + { + s_LocalTime_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/time/LocalTime")); + s_LocalTime_ofNanoOfDay = PgObject_getStaticJavaMethod(s_LocalTime_class, + "ofNanoOfDay", "(J)Ljava/time/LocalTime;"); + s_LocalTime_toNanoOfDay = PgObject_getJavaMethod(s_LocalTime_class, + "toNanoOfDay", "()J"); + + s_LocalTimeInstance = + TypeClass_allocInstance(s_LocalTimeClass, TIMEOID); + } + return s_LocalTimeInstance; +} + +/* + * This only answers true for (same class or) TIMETZOID. + * The obtainer (below) only needs to construct and remember one instance. + */ +static bool _OffsetTime_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return Type_getClass(self) == cls || Type_getOid(other) == TIMETZOID; +} + +static jvalue _OffsetTime_coerceDatum(Type self, Datum arg) +{ + jvalue localTime; + jobject zoneOffset; + int32 offsetSecs; + jvalue result; + +#if PG_VERSION_NUM < 100000 + if ( !integerDateTimes ) + { + TimeTzADT_dd* tza = (TimeTzADT_dd*)DatumGetPointer(arg); + localTime = + Type_coerceDatum(s_LocalTimeInstance, Float8GetDatum(tza->time)); + offsetSecs = tza->zone; + } + else +#endif + { + TimeTzADT_id* tza = (TimeTzADT_id*)DatumGetPointer(arg); + localTime = + Type_coerceDatum(s_LocalTimeInstance, Int64GetDatum(tza->time)); + offsetSecs = tza->zone; + } + + zoneOffset = JNI_callStaticObjectMethod(s_ZoneOffset_class, + s_ZoneOffset_ofTotalSeconds, - offsetSecs); /* PG/Java signs differ */ + + result.l = JNI_callStaticObjectMethod( + s_OffsetTime_class, s_OffsetTime_of, localTime.l, zoneOffset); + + JNI_deleteLocalRef(localTime.l); + JNI_deleteLocalRef(zoneOffset); + + return result; +} + +static Datum _OffsetTime_coerceObject(Type self, jobject time) +{ + jobject localTime = JNI_callObjectMethod(time, s_OffsetTime_toLocalTime); + jobject zoneOffset = JNI_callObjectMethod(time, s_OffsetTime_getOffset); + jint offsetSecs = + - /* PG/Java signs differ */ + JNI_callIntMethod(zoneOffset, s_ZoneOffset_getTotalSeconds); + Datum result; + +#if PG_VERSION_NUM < 100000 + if ( !integerDateTimes ) + { + TimeTzADT_dd* tza = (TimeTzADT_dd*)palloc(sizeof(TimeTzADT_dd)); + tza->zone = offsetSecs; + tza->time = + DatumGetFloat8(Type_coerceObject(s_LocalTimeInstance, localTime)); + result = PointerGetDatum(tza); + } + else +#endif + { + TimeTzADT_id* tza = (TimeTzADT_id*)palloc(sizeof(TimeTzADT_id)); + tza->zone = offsetSecs; + tza->time = + DatumGetInt64(Type_coerceObject(s_LocalTimeInstance, localTime)); + result = PointerGetDatum(tza); + } + + JNI_deleteLocalRef(localTime); + JNI_deleteLocalRef(zoneOffset); + return result; +} + +static Type _OffsetTime_obtain(Oid typeId) +{ + if ( NULL == s_OffsetTimeInstance ) + { + _LocalTime_obtain(TIMEOID); /* Make sure LocalTime statics are there */ + + s_OffsetTime_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/time/OffsetTime")); + s_OffsetTime_of = PgObject_getStaticJavaMethod(s_OffsetTime_class, "of", + "(Ljava/time/LocalTime;Ljava/time/ZoneOffset;)" + "Ljava/time/OffsetTime;"); + s_OffsetTime_toLocalTime = PgObject_getJavaMethod(s_OffsetTime_class, + "toLocalTime", "()Ljava/time/LocalTime;"); + s_OffsetTime_getOffset = PgObject_getJavaMethod(s_OffsetTime_class, + "getOffset", "()Ljava/time/ZoneOffset;"); + + s_ZoneOffset_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/time/ZoneOffset")); + s_ZoneOffset_ofTotalSeconds = PgObject_getStaticJavaMethod( + s_ZoneOffset_class, "ofTotalSeconds", "(I)Ljava/time/ZoneOffset;"); + s_ZoneOffset_getTotalSeconds = PgObject_getJavaMethod( + s_ZoneOffset_class, "getTotalSeconds", "()I"); + + s_OffsetTimeInstance = + TypeClass_allocInstance(s_OffsetTimeClass, TIMETZOID); + } + return s_OffsetTimeInstance; +} + static jlong msecsAtMidnight(void) { pg_time_t now = (pg_time_t)time(NULL) / 86400; return INT64CONST(1000) * (jlong)(now * 86400); } +#if PG_VERSION_NUM < 100000 static jvalue Time_coerceDatumTZ_dd(Type self, double t, bool tzAdjust) { jlong mSecs; @@ -45,6 +243,7 @@ static jvalue Time_coerceDatumTZ_dd(Type self, double t, bool tzAdjust) result.l = JNI_newObject(s_Time_class, s_Time_init, mSecs + msecsAtMidnight()); return result; } +#endif static jvalue Time_coerceDatumTZ_id(Type self, int64 t, bool tzAdjust) { @@ -65,11 +264,13 @@ static jlong Time_getMillisecsToday(Type self, jobject jt, bool tzAdjust) return mSecs; } +#if PG_VERSION_NUM < 100000 static double Time_coerceObjectTZ_dd(Type self, jobject jt, bool tzAdjust) { jlong mSecs = Time_getMillisecsToday(self, jt, tzAdjust); return ((double)mSecs) / 1000.0; /* Convert to seconds */ } +#endif static int64 Time_coerceObjectTZ_id(Type self, jobject jt, bool tzAdjust) { @@ -79,16 +280,22 @@ static int64 Time_coerceObjectTZ_id(Type self, jobject jt, bool tzAdjust) static jvalue _Time_coerceDatum(Type self, Datum arg) { - return integerDateTimes - ? Time_coerceDatumTZ_id(self, DatumGetInt64(arg), true) - : Time_coerceDatumTZ_dd(self, DatumGetFloat8(arg), true); + return +#if PG_VERSION_NUM < 100000 + (!integerDateTimes) ? + Time_coerceDatumTZ_dd(self, DatumGetFloat8(arg), true) : +#endif + Time_coerceDatumTZ_id(self, DatumGetInt64(arg), true); } static Datum _Time_coerceObject(Type self, jobject time) { - return integerDateTimes - ? Int64GetDatum(Time_coerceObjectTZ_id(self, time, true)) - : Float8GetDatum(Time_coerceObjectTZ_dd(self, time, true)); + return +#if PG_VERSION_NUM < 100000 + (!integerDateTimes) ? + Float8GetDatum(Time_coerceObjectTZ_dd(self, time, true)) : +#endif + Int64GetDatum(Time_coerceObjectTZ_id(self, time, true)); } /* @@ -99,38 +306,42 @@ static Datum _Time_coerceObject(Type self, jobject time) static jvalue _Timetz_coerceDatum(Type self, Datum arg) { jvalue val; - if(integerDateTimes) - { - TimeTzADT_id* tza = (TimeTzADT_id*)DatumGetPointer(arg); - int64 t = tza->time + (int64)tza->zone * 1000000; /* Convert to UTC */ - val = Time_coerceDatumTZ_id(self, t, false); - } - else +#if PG_VERSION_NUM < 100000 + if(!integerDateTimes) { TimeTzADT_dd* tza = (TimeTzADT_dd*)DatumGetPointer(arg); double t = tza->time + tza->zone; /* Convert to UTC */ val = Time_coerceDatumTZ_dd(self, t, false); } + else +#endif + { + TimeTzADT_id* tza = (TimeTzADT_id*)DatumGetPointer(arg); + int64 t = tza->time + (int64)tza->zone * 1000000; /* Convert to UTC */ + val = Time_coerceDatumTZ_id(self, t, false); + } return val; } static Datum _Timetz_coerceObject(Type self, jobject time) { Datum datum; - if(integerDateTimes) +#if PG_VERSION_NUM < 100000 + if(!integerDateTimes) { - TimeTzADT_id* tza = (TimeTzADT_id*)palloc(sizeof(TimeTzADT_id)); - tza->time = Time_coerceObjectTZ_id(self, time, false); + TimeTzADT_dd* tza = (TimeTzADT_dd*)palloc(sizeof(TimeTzADT_dd)); + tza->time = Time_coerceObjectTZ_dd(self, time, false); tza->zone = Timestamp_getCurrentTimeZone(); - tza->time -= (int64)tza->zone * 1000000; /* Convert UTC to local time */ + tza->time -= tza->zone; /* Convert UTC to local time */ datum = PointerGetDatum(tza); } else +#endif { - TimeTzADT_dd* tza = (TimeTzADT_dd*)palloc(sizeof(TimeTzADT_dd)); - tza->time = Time_coerceObjectTZ_dd(self, time, false); + TimeTzADT_id* tza = (TimeTzADT_id*)palloc(sizeof(TimeTzADT_id)); + tza->time = Time_coerceObjectTZ_id(self, time, false); tza->zone = Timestamp_getCurrentTimeZone(); - tza->time -= tza->zone; /* Convert UTC to local time */ + tza->time -= (int64)tza->zone * 1000000; /* Convert UTC to local time */ datum = PointerGetDatum(tza); } return datum; @@ -157,4 +368,22 @@ void Time_initialize(void) cls->coerceDatum = _Timetz_coerceDatum; cls->coerceObject = _Timetz_coerceObject; Type_registerType("java.sql.Time", TypeClass_allocInstance(cls, TIMETZOID)); + + cls = TypeClass_alloc("type.LocalTime"); + cls->JNISignature = "Ljava/time/LocalTime;"; + cls->javaTypeName = "java.time.LocalTime"; + cls->coerceDatum = _LocalTime_coerceDatum; + cls->coerceObject = _LocalTime_coerceObject; + cls->canReplaceType = _LocalTime_canReplaceType; + s_LocalTimeClass = cls; + Type_registerType2(InvalidOid, "java.time.LocalTime", _LocalTime_obtain); + + cls = TypeClass_alloc("type.OffsetTime"); + cls->JNISignature = "Ljava/time/OffsetTime;"; + cls->javaTypeName = "java.time.OffsetTime"; + cls->coerceDatum = _OffsetTime_coerceDatum; + cls->coerceObject = _OffsetTime_coerceObject; + cls->canReplaceType = _OffsetTime_canReplaceType; + s_OffsetTimeClass = cls; + Type_registerType2(InvalidOid, "java.time.OffsetTime", _OffsetTime_obtain); } diff --git a/pljava-so/src/main/c/type/Timestamp.c b/pljava-so/src/main/c/type/Timestamp.c index cbf9ecdb..7cd76a6c 100644 --- a/pljava-so/src/main/c/type/Timestamp.c +++ b/pljava-so/src/main/c/type/Timestamp.c @@ -1,14 +1,18 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2007, 2008, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause * - * @author Thomas Hallgren + * Contributors: + * Tada AB + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ -#include +#include #include #include @@ -17,17 +21,45 @@ #include "pljava/type/Type_priv.h" #include "pljava/type/Timestamp.h" -#define pg_unreachable() abort() - #define EPOCH_DIFF (((uint32)86400) * (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE)) /* - * Timestamp type. Postgres will pass (and expect in return) a local timestamp. - * Java on the other hand has no object that represents local time (localization - * is added when the object is converted to/from readable form). Hence, all - * postgres timestamps must be converted from local time to UTC when passed as - * a parameter to a Java method and all Java Timestamps must be converted from UTC - * to localtime when returned to postgres. + * Types timestamp and timestamptz. This compilation unit supplies code for both + * PostgreSQL types. The legacy JDBC mapping for both is to java.sql.Timestamp, + * which holds an implicit timezone offset and therefore can't be an equally + * good fit for both. + * + * Java 8 and JDBC 4.2 introduce java.time.LocalDateTime and + * java.time.OffsetDateTime, which more directly fit PG's timestamp and + * timestamptz, respectively. For compatibility reasons, the legacy behavior of + * getObject (with no Class parameter) is unchanged, and still returns the data + * weirdly shoehorned into java.sql.Timestamp. But Java 8 application code can + * and should use the form of getObject with a Class parameter to request + * java.time.LocalDateTime or java.time.OffsetDateTime, as appropriate. + * + * Note that it is somewhat misleading for PostgreSQL to call one of these types + * TIMESTAMP WITH TIME ZONE. The stored form does not, in fact, include a time + * zone (and this is in contrast to TIME WITH TIME ZONE, which does). Instead, + * what PostgreSQL means by TIMESTAMP WITH TIMEZONE is that a zone can be given + * (or inferred from the session) when a value is input, and used to adjust the + * value to UTC, and, likewise, the stored UTC value can be output converted to + * a given (or implicit) zone offset. Meanwhile, a TIMESTAMP WITHOUT TIME ZONE + * is just stored as the number of seconds from epoch that would make a clock + * on UTC show the same date and time as the value input. + * + * When producing a java.time.LocalDateTime from a timestamp and vice versa, + * the conversion is just what you would think. When producing an OffsetDateTime + * from a timestamptz, the OffsetDateTime will always have offset zero from UTC. + * That's what the stored PostgreSQL data represents; to produce anything else + * would be lying. When receiving an OffsetDateTime into PostgreSQL, of course + * any zone offset it contains will be used to adjust the value to UTC for + * storage. + * + * The legacy behavior when mapping timestamp and timestamptz to + * java.sql.Timestamp is that a timestamptz is converted in both directions + * without alteration, and a (local!) timestamp is *adjusted as if to UTC from + * the current session's implicit timezone* (and vice versa when receiving + * a value). Weird or not, that's how PL/Java has always done it. */ static jclass s_Timestamp_class; static jmethodID s_Timestamp_init; @@ -38,6 +70,178 @@ static jmethodID s_Timestamp_setNanos; static TypeClass s_TimestampClass; static TypeClass s_TimestamptzClass; +static TypeClass s_LocalDateTimeClass; +static TypeClass s_OffsetDateTimeClass; +/* + * The following statics are specific to Java 8 +, and will be initialized + * only on demand (pre-8 application code will have no way to demand them). + */ +static Type s_LocalDateTimeInstance; +static jclass s_LocalDateTime_class; +static jmethodID s_LocalDateTime_ofEpochSecond; +static jmethodID s_LocalDateTime_atOffset; +static Type s_OffsetDateTimeInstance; +static jclass s_OffsetDateTime_class; +static jmethodID s_OffsetDateTime_of; +static jmethodID s_OffsetDateTime_toEpochSecond; +static jmethodID s_OffsetDateTime_getNano; +static jobject s_ZoneOffset_UTC; + +static Type _LocalDateTime_obtain(Oid); +static Type _OffsetDateTime_obtain(Oid); + +#if PG_VERSION_NUM < 100000 +static int32 Timestamp_getTimeZone_dd(double dt); +#endif + +/* + * This only answers true for (same class or) TIMESTAMPOID. + * The obtainer (below) only needs to construct and remember one instance. + */ +static bool _LocalDateTime_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return Type_getClass(self) == cls || Type_getOid(other) == TIMESTAMPOID; +} + +static jvalue _LocalDateTime_coerceDatum(Type self, Datum arg) +{ + jint onlyMicros; + jlong secs; + jvalue result; +#if PG_VERSION_NUM < 100000 + if ( !integerDateTimes ) + { + double fracSecs = DatumGetFloat8(arg); + secs = (jlong)floor(fracSecs); + fracSecs -= (double)secs; + onlyMicros = ((jint)floor(2e6 * fracSecs) + 1) / 2; + } + else +#endif + { + int64 micros = DatumGetInt64(arg); + /* Expect number of microseconds since 01 Jan 2000. Tease out a + * non-negative sub-second microseconds value (whether this C compiler's + * signed % has trunc or floor behavior). Factor a 2 out right away to + * avoid wraparound when flooring near the most negative values. + */ + int lowBit = (int)(micros & 1); + micros = (micros ^ lowBit) / 2; + onlyMicros = (jint)(((micros % 500000) + 500000) % 500000); + secs = (micros - onlyMicros) / 500000; + onlyMicros = (onlyMicros << 1) | lowBit; + } + result.l = JNI_callStaticObjectMethod(s_LocalDateTime_class, + s_LocalDateTime_ofEpochSecond, + EPOCH_DIFF + secs, 1000 * onlyMicros, s_ZoneOffset_UTC); + return result; +} + +static Datum _LocalDateTime_coerceObject(Type self, jobject timestamp) +{ + jobject offsetDateTime = JNI_callObjectMethod(timestamp, + s_LocalDateTime_atOffset, s_ZoneOffset_UTC); + Datum result = Type_coerceObject(s_OffsetDateTimeInstance, offsetDateTime); + JNI_deleteLocalRef(offsetDateTime); + return result; +} + +static Type _LocalDateTime_obtain(Oid typeId) +{ + if ( NULL == s_LocalDateTimeInstance ) + { + jclass zoneOffsetCls = PgObject_getJavaClass("java/time/ZoneOffset"); + jfieldID fldUTC = PgObject_getStaticJavaField( + zoneOffsetCls, "UTC", "Ljava/time/ZoneOffset;"); + s_ZoneOffset_UTC = JNI_newGlobalRef(JNI_getStaticObjectField( + zoneOffsetCls, fldUTC)); + JNI_deleteLocalRef(zoneOffsetCls); + + s_LocalDateTime_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/time/LocalDateTime")); + s_LocalDateTime_ofEpochSecond = PgObject_getStaticJavaMethod( + s_LocalDateTime_class, "ofEpochSecond", + "(JILjava/time/ZoneOffset;)Ljava/time/LocalDateTime;"); + s_LocalDateTime_atOffset = PgObject_getJavaMethod(s_LocalDateTime_class, + "atOffset", "(Ljava/time/ZoneOffset;)Ljava/time/OffsetDateTime;"); + + s_OffsetDateTime_class = JNI_newGlobalRef(PgObject_getJavaClass( + "java/time/OffsetDateTime")); + s_OffsetDateTime_toEpochSecond = PgObject_getJavaMethod( + s_OffsetDateTime_class, "toEpochSecond", "()J"); + s_OffsetDateTime_getNano = PgObject_getJavaMethod( + s_OffsetDateTime_class, "getNano", "()I"); + + s_LocalDateTimeInstance = + TypeClass_allocInstance(s_LocalDateTimeClass, TIMESTAMPOID); + + if ( NULL == s_OffsetDateTimeInstance ) + _OffsetDateTime_obtain(TIMESTAMPTZOID); + } + return s_LocalDateTimeInstance; +} + +/* + * This only answers true for (same class or) TIMESTAMPTZOID. + * The obtainer (below) only needs to construct and remember one instance. + */ +static bool _OffsetDateTime_canReplaceType(Type self, Type other) +{ + TypeClass cls = Type_getClass(other); + return Type_getClass(self) == cls || Type_getOid(other) == TIMESTAMPTZOID; +} + +static jvalue _OffsetDateTime_coerceDatum(Type self, Datum arg) +{ + jvalue localDateTime = Type_coerceDatum(s_LocalDateTimeInstance, arg); + jvalue result; + result.l = JNI_callStaticObjectMethod(s_OffsetDateTime_class, + s_OffsetDateTime_of, localDateTime.l, s_ZoneOffset_UTC); + JNI_deleteLocalRef(localDateTime.l); + return result; +} + +static Datum _OffsetDateTime_coerceObject(Type self, jobject timestamp) +{ + jlong epochSec = JNI_callLongMethod( + timestamp, s_OffsetDateTime_toEpochSecond) - EPOCH_DIFF; + jint nanos = JNI_callIntMethod(timestamp, s_OffsetDateTime_getNano); + Datum result; + +#if PG_VERSION_NUM < 100000 + if ( !integerDateTimes ) + { + double secs = (double)epochSec + ((double)nanos)/1e9; + result = Float8GetDatum(secs); + } + else +#endif + { + result = Int64GetDatum(1000000L * epochSec + nanos / 1000); + } + + return result; +} + +static Type _OffsetDateTime_obtain(Oid typeId) +{ + if ( NULL == s_OffsetDateTimeInstance ) + { + s_OffsetDateTimeInstance = + TypeClass_allocInstance(s_OffsetDateTimeClass, TIMESTAMPTZOID); + + if ( NULL == s_LocalDateTimeInstance ) + _LocalDateTime_obtain(TIMESTAMPOID); + + s_OffsetDateTime_of = PgObject_getStaticJavaMethod( + s_OffsetDateTime_class, "of", + "(Ljava/time/LocalDateTime;Ljava/time/ZoneOffset;)" + "Ljava/time/OffsetDateTime;"); + } + return s_OffsetDateTimeInstance; +} + static bool _Timestamp_canReplaceType(Type self, Type other) { TypeClass cls = Type_getClass(other); @@ -47,16 +251,26 @@ static bool _Timestamp_canReplaceType(Type self, Type other) static jvalue Timestamp_coerceDatumTZ_id(Type self, Datum arg, bool tzAdjust) { jvalue result; + jint uSecs; + jlong mSecs; int64 ts = DatumGetInt64(arg); - int tz = Timestamp_getTimeZone_id(ts); - /* Expect number of microseconds since 01 Jan 2000 + /* Expect number of microseconds since 01 Jan 2000. Tease out a non-negative + * sub-second microseconds value (whether this C compiler's signed % + * has trunc or floor behavior). Factor a 2 out right away to + * avoid wraparound when flooring near the most negative values. */ - jlong mSecs = ts / 1000; /* Convert to millisecs */ - jint uSecs = (jint)(ts % 1000000); /* preserve microsecs */ + int lowBit = (int)(ts & 1); + ts = (ts ^ lowBit) / 2; + uSecs = (jint)(((ts % 500000) + 500000) % 500000); + mSecs = (ts - uSecs) / 500; /* Convert to millisecs */ + uSecs = (uSecs << 1) | lowBit; if(tzAdjust) + { + int tz = Timestamp_getTimeZone_id(ts); /* function expects halved ts */ mSecs += tz * 1000; /* Adjust from local time to UTC */ + } /* Adjust for diff between Postgres and Java (Unix) */ mSecs += ((jlong)EPOCH_DIFF) * 1000L; @@ -67,9 +281,10 @@ static jvalue Timestamp_coerceDatumTZ_id(Type self, Datum arg, bool tzAdjust) return result; } +#if PG_VERSION_NUM < 100000 static jvalue Timestamp_coerceDatumTZ_dd(Type self, Datum arg, bool tzAdjust) { - jlong mSecs; + jlong secs; jint uSecs; jvalue result; double ts = DatumGetFloat8(arg); @@ -80,40 +295,61 @@ static jvalue Timestamp_coerceDatumTZ_dd(Type self, Datum arg, bool tzAdjust) if(tzAdjust) ts += tz; /* Adjust from local time to UTC */ ts += EPOCH_DIFF; /* Adjust for diff between Postgres and Java (Unix) */ - mSecs = (jlong) floor(ts * 1000.0); /* Convert to millisecs */ - uSecs = (jint) ((ts - floor(ts)) * 1000000.0); /* Preserve microsecs */ - result.l = JNI_newObject(s_Timestamp_class, s_Timestamp_init, mSecs); + secs = (jlong) floor(ts); /* Take just the secs */ + uSecs = (((jint) ((ts - (double)secs) * 2e6)) + 1) / 2; /* Preserve usecs */ + result.l = JNI_newObject(s_Timestamp_class, s_Timestamp_init, secs * 1000); if(uSecs != 0) JNI_callVoidMethod(result.l, s_Timestamp_setNanos, uSecs * 1000); return result; } +#endif static jvalue Timestamp_coerceDatumTZ(Type self, Datum arg, bool tzAdjust) { - return integerDateTimes - ? Timestamp_coerceDatumTZ_id(self, arg, tzAdjust) - : Timestamp_coerceDatumTZ_dd(self, arg, tzAdjust); + return +#if PG_VERSION_NUM < 100000 + (!integerDateTimes) ? Timestamp_coerceDatumTZ_dd(self, arg, tzAdjust) : +#endif + Timestamp_coerceDatumTZ_id(self, arg, tzAdjust); } static Datum Timestamp_coerceObjectTZ_id(Type self, jobject jts, bool tzAdjust) { int64 ts; + int lowBit; jlong mSecs = JNI_callLongMethod(jts, s_Timestamp_getTime); jint nSecs = JNI_callIntMethod(jts, s_Timestamp_getNanos); + /* + * getNanos() should have supplied non-negative nSecs, whether mSecs is + * positive or negative. So mSecs needs to be floor()ed to a multiple of + * 1000 ms, whether this C compiler does signed integer division with floor + * or trunc. + */ + mSecs -= ((mSecs % 1000) + 1000) % 1000; mSecs -= ((jlong)EPOCH_DIFF) * 1000L; - ts = mSecs * 1000L; /* Convert millisecs to microsecs */ - if(nSecs != 0) - ts += nSecs / 1000; /* Convert nanosecs to microsecs */ - if(tzAdjust) - ts -= ((jlong)Timestamp_getTimeZone_id(ts)) * 1000000L; /* Adjust from UTC to local time */ + ts = mSecs * 500L; /* millisecs to microsecs, save a factor of 2 for now */ + if(tzAdjust) /* Adjust from UTC to local time; function expects halved ts */ + ts -= ((jlong)Timestamp_getTimeZone_id(ts)) * 500000L; + nSecs /= 1000; /* ok, now they are really microsecs */ + lowBit = nSecs & 1; + nSecs >>= 1; /* nSecs >= 0 so >> has a defined C result */ + ts = 2 * (ts + nSecs) | lowBit; return Int64GetDatum(ts); } +#if PG_VERSION_NUM < 100000 static Datum Timestamp_coerceObjectTZ_dd(Type self, jobject jts, bool tzAdjust) { double ts; jlong mSecs = JNI_callLongMethod(jts, s_Timestamp_getTime); jint nSecs = JNI_callIntMethod(jts, s_Timestamp_getNanos); + /* + * getNanos() should have supplied non-negative nSecs, whether mSecs is + * positive or negative. So mSecs needs to be floor()ed to a multiple of + * 1000 ms, whether this C compiler does signed integer division with floor + * or trunc. + */ + mSecs -= ((mSecs % 1000) + 1000) % 1000; ts = ((double)mSecs) / 1000.0; /* Convert to seconds */ ts -= EPOCH_DIFF; if(nSecs != 0) @@ -122,12 +358,15 @@ static Datum Timestamp_coerceObjectTZ_dd(Type self, jobject jts, bool tzAdjust) ts -= Timestamp_getTimeZone_dd(ts); /* Adjust from UTC to local time */ return Float8GetDatum(ts); } +#endif static Datum Timestamp_coerceObjectTZ(Type self, jobject jts, bool tzAdjust) { - return integerDateTimes - ? Timestamp_coerceObjectTZ_id(self, jts, tzAdjust) - : Timestamp_coerceObjectTZ_dd(self, jts, tzAdjust); + return +#if PG_VERSION_NUM < 100000 + (!integerDateTimes) ? Timestamp_coerceObjectTZ_dd(self, jts, tzAdjust) : +#endif + Timestamp_coerceObjectTZ_id(self, jts, tzAdjust); } static jvalue _Timestamp_coerceDatum(Type self, Datum arg) @@ -160,31 +399,47 @@ static Datum _Timestamptz_coerceObject(Type self, jobject ts) return Timestamp_coerceObjectTZ(self, ts, false); } +/* + * The argument to this function is in seconds from the PostgreSQL epoch, and + * the return is a time zone offset in seconds west of Greenwich. + */ static int32 Timestamp_getTimeZone(pg_time_t time) { -#ifdef _MSC_VER - /* This is gross, but pg_tzset has a cache, so not as gross as you think. - * There is some renewed interest on pgsql-hackers to find a good answer for - * the MSVC PGDLLIMPORT nonsense, so this may not have to stay gross. - */ - char const *tzname = PG_GETCONFIGOPTION("timezone"); - struct pg_tm* tx = pg_localtime(&time, pg_tzset(tzname)); -#else struct pg_tm* tx = pg_localtime(&time, session_timezone); -#endif + if ( NULL == tx ) + ereport(ERROR, ( + errcode(ERRCODE_DATA_EXCEPTION), + errmsg("could not resolve timestamp: %m") + )); return -(int32)tx->tm_gmtoff; } +/* + * This is only used here and in Date.c. The caller must know that the argument + * is not a PostgreSQL int64 Timestamp, but, rather, one of those divided by 2. + */ int32 Timestamp_getTimeZone_id(int64 dt) { return Timestamp_getTimeZone( - (dt / INT64CONST(1000000) + (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * 86400)); + dt / INT64CONST(500000) + + (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * 86400 + ); } -int32 Timestamp_getTimeZone_dd(double dt) +#if PG_VERSION_NUM < 100000 +static int32 Timestamp_getTimeZone_dd(double dt) { + if ( TIMESTAMP_NOT_FINITE(dt) ) + { + errno = EOVERFLOW; + ereport(ERROR, ( + errcode(ERRCODE_DATA_EXCEPTION), + errmsg("could not resolve timestamp: %m") + )); + } return Timestamp_getTimeZone( (pg_time_t)rint(dt + (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * 86400)); } +#endif int32 Timestamp_getCurrentTimeZone(void) { @@ -218,4 +473,24 @@ void Timestamp_initialize(void) cls->coerceObject = _Timestamptz_coerceObject; Type_registerType("java.sql.Timestamp", TypeClass_allocInstance(cls, TIMESTAMPTZOID)); s_TimestamptzClass = cls; + + cls = TypeClass_alloc("type.LocalDateTime"); + cls->JNISignature = "Ljava/time/LocalDateTime;"; + cls->javaTypeName = "java.time.LocalDateTime"; + cls->coerceDatum = _LocalDateTime_coerceDatum; + cls->coerceObject = _LocalDateTime_coerceObject; + cls->canReplaceType = _LocalDateTime_canReplaceType; + s_LocalDateTimeClass = cls; + Type_registerType2(InvalidOid, "java.time.LocalDateTime", + _LocalDateTime_obtain); + + cls = TypeClass_alloc("type.OffsetDateTime"); + cls->JNISignature = "Ljava/time/OffsetDateTime;"; + cls->javaTypeName = "java.time.OffsetDateTime"; + cls->coerceDatum = _OffsetDateTime_coerceDatum; + cls->coerceObject = _OffsetDateTime_coerceObject; + cls->canReplaceType = _OffsetDateTime_canReplaceType; + s_OffsetDateTimeClass = cls; + Type_registerType2(InvalidOid, "java.time.OffsetDateTime", + _OffsetDateTime_obtain); } diff --git a/pljava-so/src/main/c/type/TriggerData.c b/pljava-so/src/main/c/type/TriggerData.c index b8bc0c74..32322548 100644 --- a/pljava-so/src/main/c/type/TriggerData.c +++ b/pljava-so/src/main/c/type/TriggerData.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -10,9 +16,9 @@ #include #include "org_postgresql_pljava_internal_TriggerData.h" #include "pljava/Invocation.h" +#include "pljava/DualState.h" #include "pljava/Exception.h" #include "pljava/type/Type_priv.h" -#include "pljava/type/JavaWrapper.h" #include "pljava/type/String.h" #include "pljava/type/TriggerData.h" #include "pljava/type/Tuple.h" @@ -25,21 +31,25 @@ static jclass s_TriggerData_class; static jmethodID s_TriggerData_init; static jmethodID s_TriggerData_getTriggerReturnTuple; -jobject TriggerData_create(TriggerData* triggerData) +jobject pljava_TriggerData_create(TriggerData* triggerData) { - return (triggerData == 0) ? 0 : JNI_newObject( + if ( NULL == triggerData ) + return NULL; + + return JNI_newObjectLocked( s_TriggerData_class, s_TriggerData_init, - Invocation_createLocalWrapper(triggerData)); + pljava_DualState_key(), + PointerGetJLong(currentInvocation), + PointerGetJLong(triggerData)); } -HeapTuple TriggerData_getTriggerReturnTuple(jobject jtd, bool* wasNull) +HeapTuple pljava_TriggerData_getTriggerReturnTuple(jobject jtd, bool* wasNull) { - Ptr2Long p2l; HeapTuple ret = 0; - p2l.longVal = JNI_callLongMethod(jtd, s_TriggerData_getTriggerReturnTuple); - if(p2l.longVal != 0) - ret = heap_copytuple((HeapTuple)p2l.ptrVal); + jlong jht = JNI_callLongMethod(jtd, s_TriggerData_getTriggerReturnTuple); + if(jht != 0) + ret = heap_copytuple(JLongGet(HeapTuple, jht));/* unconditional copy? */ else *wasNull = true; return ret; @@ -47,17 +57,12 @@ HeapTuple TriggerData_getTriggerReturnTuple(jobject jtd, bool* wasNull) /* Make this datatype available to the postgres system. */ -extern void TriggerData_initialize(void); -void TriggerData_initialize(void) +void pljava_TriggerData_initialize(void) { TypeClass cls; + jclass jcls; JNINativeMethod methods[] = { - { - "_free", - "(J)V", - Java_org_postgresql_pljava_internal_TriggerData__1free - }, { "_getRelation", "(J)Lorg/postgresql/pljava/internal/Relation;", @@ -121,11 +126,15 @@ void TriggerData_initialize(void) { 0, 0, 0 } }; - s_TriggerData_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/TriggerData")); - PgObject_registerNatives2(s_TriggerData_class, methods); + jcls = PgObject_getJavaClass("org/postgresql/pljava/internal/TriggerData"); + PgObject_registerNatives2(jcls, methods); - s_TriggerData_init = PgObject_getJavaMethod(s_TriggerData_class, "", "(J)V"); - s_TriggerData_getTriggerReturnTuple = PgObject_getJavaMethod(s_TriggerData_class, "getTriggerReturnTuple", "()J"); + s_TriggerData_init = PgObject_getJavaMethod(jcls, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); + s_TriggerData_getTriggerReturnTuple = PgObject_getJavaMethod( + jcls, "getTriggerReturnTuple", "()J"); + s_TriggerData_class = JNI_newGlobalRef(jcls); + JNI_deleteLocalRef(jcls); /* Use interface name for signatures. */ @@ -139,19 +148,6 @@ void TriggerData_initialize(void) * JNI methods ****************************************/ -/* - * Class: org_postgresql_pljava_internal_TriggerData - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_TriggerData__1free(JNIEnv* env, jobject _this, jlong pointer) -{ - BEGIN_NATIVE_NO_ERRCHECK - Invocation_freeLocalWrapper(pointer); - END_NATIVE -} - /* * Class: org_postgresql_pljava_TriggerData * Method: _getRelation @@ -161,11 +157,11 @@ JNIEXPORT jobject JNICALL Java_org_postgresql_pljava_internal_TriggerData__1getRelation(JNIEnv* env, jclass clazz, jlong _this) { jobject result = 0; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) { BEGIN_NATIVE - result = Relation_create(self->tg_relation); + result = pljava_Relation_create(self->tg_relation); END_NATIVE } return result; @@ -180,11 +176,11 @@ JNIEXPORT jobject JNICALL Java_org_postgresql_pljava_internal_TriggerData__1getTriggerTuple(JNIEnv* env, jclass clazz, jlong _this) { jobject result = 0; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) { BEGIN_NATIVE - result = Tuple_create(self->tg_trigtuple); + result = pljava_Tuple_create(self->tg_trigtuple); END_NATIVE } return result; @@ -199,11 +195,11 @@ JNIEXPORT jobject JNICALL Java_org_postgresql_pljava_internal_TriggerData__1getNewTuple(JNIEnv* env, jclass clazz, jlong _this) { jobject result = 0; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) { BEGIN_NATIVE - result = Tuple_create(self->tg_newtuple); + result = pljava_Tuple_create(self->tg_newtuple); END_NATIVE } return result; @@ -218,7 +214,7 @@ JNIEXPORT jobjectArray JNICALL Java_org_postgresql_pljava_internal_TriggerData__1getArguments(JNIEnv* env, jclass clazz, jlong _this) { jobjectArray result = 0; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) { char** cpp; @@ -249,7 +245,7 @@ JNIEXPORT jstring JNICALL Java_org_postgresql_pljava_internal_TriggerData__1getName(JNIEnv* env, jclass clazz, jlong _this) { jstring result = 0; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) { BEGIN_NATIVE @@ -268,7 +264,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredAfter(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_AFTER(self->tg_event); return result; @@ -283,7 +279,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredBefore(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_BEFORE(self->tg_event); return result; @@ -298,7 +294,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredForEachRow(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_FOR_ROW(self->tg_event); return result; @@ -313,7 +309,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredForStatement(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_FOR_STATEMENT(self->tg_event); return result; @@ -328,7 +324,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredByDelete(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_BY_DELETE(self->tg_event); return result; @@ -343,7 +339,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredByInsert(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_BY_INSERT(self->tg_event); return result; @@ -358,7 +354,7 @@ JNIEXPORT jboolean JNICALL Java_org_postgresql_pljava_internal_TriggerData__1isFiredByUpdate(JNIEnv* env, jclass clazz, jlong _this) { jboolean result = JNI_FALSE; - TriggerData* self = Invocation_getWrappedPointer(_this); + TriggerData* self = JLongGet(TriggerData *, _this); if(self != 0) result = (jboolean)TRIGGER_FIRED_BY_UPDATE(self->tg_event); return result; diff --git a/pljava-so/src/main/c/type/Tuple.c b/pljava-so/src/main/c/type/Tuple.c index 7b76384f..ed699035 100644 --- a/pljava-so/src/main/c/type/Tuple.c +++ b/pljava-so/src/main/c/type/Tuple.c @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -12,6 +18,7 @@ #include "org_postgresql_pljava_internal_Tuple.h" #include "pljava/Backend.h" +#include "pljava/DualState.h" #include "pljava/Exception.h" #include "pljava/type/Type_priv.h" #include "pljava/type/Tuple.h" @@ -25,75 +32,76 @@ static jmethodID s_Tuple_init; /* * org.postgresql.pljava.type.Tuple type. */ -jobject Tuple_create(HeapTuple ht) +jobject pljava_Tuple_create(HeapTuple ht) { jobject jht = 0; if(ht != 0) { MemoryContext curr = MemoryContextSwitchTo(JavaMemoryContext); - jht = Tuple_internalCreate(ht, true); + jht = pljava_Tuple_internalCreate(ht, true); MemoryContextSwitchTo(curr); } return jht; } -jobjectArray Tuple_createArray(HeapTuple* vals, jint size, bool mustCopy) +jobjectArray pljava_Tuple_createArray(HeapTuple* vals, jint size, bool mustCopy) { jobjectArray tuples = JNI_newObjectArray(size, s_Tuple_class, 0); while(--size >= 0) { - jobject heapTuple = Tuple_internalCreate(vals[size], mustCopy); + jobject heapTuple = pljava_Tuple_internalCreate(vals[size], mustCopy); JNI_setObjectArrayElement(tuples, size, heapTuple); JNI_deleteLocalRef(heapTuple); } return tuples; } -jobject Tuple_internalCreate(HeapTuple ht, bool mustCopy) +jobject pljava_Tuple_internalCreate(HeapTuple ht, bool mustCopy) { jobject jht; - Ptr2Long htH; if(mustCopy) ht = heap_copytuple(ht); - htH.longVal = 0L; /* ensure that the rest is zeroed out */ - htH.ptrVal = ht; - jht = JNI_newObject(s_Tuple_class, s_Tuple_init, htH.longVal); + /* + * Passing (jlong)0 as the ResourceOwner means this will never be matched by a + * nativeRelease call; that's appropriate (for now) as the Tuple copy is + * being made into JavaMemoryContext, which never gets reset, so only + * unreachability from the Java side will free it. + * XXX? this seems like a lot of tuple copying. + */ + jht = JNI_newObjectLocked(s_Tuple_class, s_Tuple_init, + pljava_DualState_key(), (jlong)0, PointerGetJLong(ht)); return jht; } static jvalue _Tuple_coerceDatum(Type self, Datum arg) { jvalue result; - result.l = Tuple_create((HeapTuple)DatumGetPointer(arg)); + result.l = pljava_Tuple_create((HeapTuple)DatumGetPointer(arg)); return result; } /* Make this datatype available to the postgres system. */ -extern void Tuple_initialize(void); -void Tuple_initialize(void) +extern void pljava_Tuple_initialize(void); +void pljava_Tuple_initialize(void) { TypeClass cls; JNINativeMethod methods[] = { { "_getObject", - "(JJI)Ljava/lang/Object;", + "(JJILjava/lang/Class;)Ljava/lang/Object;", Java_org_postgresql_pljava_internal_Tuple__1getObject }, - { - "_free", - "(J)V", - Java_org_postgresql_pljava_internal_Tuple__1free - }, { 0, 0, 0 }}; s_Tuple_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/Tuple")); PgObject_registerNatives2(s_Tuple_class, methods); - s_Tuple_init = PgObject_getJavaMethod(s_Tuple_class, "", "(J)V"); + s_Tuple_init = PgObject_getJavaMethod(s_Tuple_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJ)V"); - cls = JavaWrapperClass_alloc("type.Tuple"); + cls = TypeClass_alloc("type.Tuple"); cls->JNISignature = "Lorg/postgresql/pljava/internal/Tuple;"; cls->javaTypeName = "org.postgresql.pljava.internal.Tuple"; cls->coerceDatum = _Tuple_coerceDatum; @@ -101,18 +109,19 @@ void Tuple_initialize(void) } jobject -Tuple_getObject(TupleDesc tupleDesc, HeapTuple tuple, int index) +pljava_Tuple_getObject( + TupleDesc tupleDesc, HeapTuple tuple, int index, jclass rqcls) { jobject result = 0; PG_TRY(); { - Type type = TupleDesc_getColumnType(tupleDesc, index); + Type type = pljava_TupleDesc_getColumnType(tupleDesc, index); if(type != 0) { bool wasNull = false; Datum binVal = SPI_getbinval(tuple, tupleDesc, (int)index, &wasNull); if(!wasNull) - result = Type_coerceDatum(type, binVal).l; + result = Type_coerceDatumAs(type, binVal, rqcls).l; } } PG_CATCH(); @@ -130,34 +139,17 @@ Tuple_getObject(TupleDesc tupleDesc, HeapTuple tuple, int index) /* * Class: org_postgresql_pljava_internal_Tuple * Method: _getObject - * Signature: (JJI)Ljava/lang/Object; + * Signature: (JJILjava/lang/Class;)Ljava/lang/Object; */ JNIEXPORT jobject JNICALL -Java_org_postgresql_pljava_internal_Tuple__1getObject(JNIEnv* env, jclass cls, jlong _this, jlong _tupleDesc, jint index) +Java_org_postgresql_pljava_internal_Tuple__1getObject(JNIEnv* env, jclass cls, jlong _this, jlong _tupleDesc, jint index, jclass rqcls) { jobject result = 0; - Ptr2Long p2l; - p2l.longVal = _this; BEGIN_NATIVE - HeapTuple self = (HeapTuple)p2l.ptrVal; - p2l.longVal = _tupleDesc; - result = Tuple_getObject((TupleDesc)p2l.ptrVal, self, (int)index); + HeapTuple self = JLongGet(HeapTuple, _this); + result = pljava_Tuple_getObject(JLongGet(TupleDesc, _tupleDesc), self, + (int)index, rqcls); END_NATIVE return result; } - -/* - * Class: org_postgresql_pljava_internal_Tuple - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_Tuple__1free(JNIEnv* env, jobject _this, jlong pointer) -{ - BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = pointer; - heap_freetuple(p2l.ptrVal); - END_NATIVE -} diff --git a/pljava-so/src/main/c/type/TupleDesc.c b/pljava-so/src/main/c/type/TupleDesc.c index e4ac1586..73ed5257 100644 --- a/pljava-so/src/main/c/type/TupleDesc.c +++ b/pljava-so/src/main/c/type/TupleDesc.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -12,6 +16,7 @@ #include "org_postgresql_pljava_internal_TupleDesc.h" #include "pljava/Backend.h" +#include "pljava/DualState.h" #include "pljava/Exception.h" #include "pljava/Invocation.h" #include "pljava/type/Type_priv.h" @@ -31,31 +36,41 @@ static jmethodID s_TupleDesc_init; * TupleDesc, which will be freed later when Java code calls the native method * _free(). Therefore the caller is done with its TupleDesc when this returns. */ -jobject TupleDesc_create(TupleDesc td) +jobject pljava_TupleDesc_create(TupleDesc td) { jobject jtd = 0; if(td != 0) { MemoryContext curr = MemoryContextSwitchTo(JavaMemoryContext); - jtd = TupleDesc_internalCreate(td); + jtd = pljava_TupleDesc_internalCreate(td); MemoryContextSwitchTo(curr); } return jtd; } -jobject TupleDesc_internalCreate(TupleDesc td) +jobject pljava_TupleDesc_internalCreate(TupleDesc td) { jobject jtd; - Ptr2Long tdH; td = CreateTupleDescCopyConstr(td); - tdH.longVal = 0L; /* ensure that the rest is zeroed out */ - tdH.ptrVal = td; - jtd = JNI_newObject(s_TupleDesc_class, s_TupleDesc_init, tdH.longVal, (jint)td->natts); + /* + * Passing (jlong)0 as the ResourceOwner means this will never be matched by a + * nativeRelease call; that's appropriate (for now) as the TupleDesc copy is + * being made into JavaMemoryContext, which never gets reset, so only + * unreachability from the Java side will free it. + * XXX what about invalidating if DDL alters the column layout? + */ + jtd = JNI_newObjectLocked(s_TupleDesc_class, s_TupleDesc_init, + pljava_DualState_key(), (jlong)0, PointerGetJLong(td), (jint)td->natts); return jtd; } -Type TupleDesc_getColumnType(TupleDesc tupleDesc, int index) +/* + * Returns NULL if an exception has been thrown for an invalid attribute index + * (caller should expeditiously return), otherwise the Type for the column data + * (the one representing the boxing Object type, in the primitive case). + */ +Type pljava_TupleDesc_getColumnType(TupleDesc tupleDesc, int index) { Type type; Oid typeId = SPI_gettypeid(tupleDesc, index); @@ -65,7 +80,7 @@ Type TupleDesc_getColumnType(TupleDesc tupleDesc, int index) "Invalid attribute index \"%d\"", (int)index); type = 0; } - else + else /* Type_objectTypeFromOid returns boxed types, when that matters */ type = Type_objectTypeFromOid(typeId, Invocation_getTypeMap()); return type; } @@ -73,14 +88,14 @@ Type TupleDesc_getColumnType(TupleDesc tupleDesc, int index) static jvalue _TupleDesc_coerceDatum(Type self, Datum arg) { jvalue result; - result.l = TupleDesc_create((TupleDesc)DatumGetPointer(arg)); + result.l = pljava_TupleDesc_create((TupleDesc)DatumGetPointer(arg)); return result; } /* Make this datatype available to the postgres system. */ -extern void TupleDesc_initialize(void); -void TupleDesc_initialize(void) +extern void pljava_TupleDesc_initialize(void); +void pljava_TupleDesc_initialize(void) { TypeClass cls; JNINativeMethod methods[] = { @@ -104,18 +119,14 @@ void TupleDesc_initialize(void) "(JI)Lorg/postgresql/pljava/internal/Oid;", Java_org_postgresql_pljava_internal_TupleDesc__1getOid }, - { - "_free", - "(J)V", - Java_org_postgresql_pljava_internal_TupleDesc__1free - }, { 0, 0, 0 }}; s_TupleDesc_class = JNI_newGlobalRef(PgObject_getJavaClass("org/postgresql/pljava/internal/TupleDesc")); PgObject_registerNatives2(s_TupleDesc_class, methods); - s_TupleDesc_init = PgObject_getJavaMethod(s_TupleDesc_class, "", "(JI)V"); + s_TupleDesc_init = PgObject_getJavaMethod(s_TupleDesc_class, "", + "(Lorg/postgresql/pljava/internal/DualState$Key;JJI)V"); - cls = JavaWrapperClass_alloc("type.TupleDesc"); + cls = TypeClass_alloc("type.TupleDesc"); cls->JNISignature = "Lorg/postgresql/pljava/internal/TupleDesc;"; cls->javaTypeName = "org.postgresql.pljava.internal.TupleDesc"; cls->coerceDatum = _TupleDesc_coerceDatum; @@ -140,9 +151,7 @@ Java_org_postgresql_pljava_internal_TupleDesc__1getColumnName(JNIEnv* env, jclas PG_TRY(); { char* name; - Ptr2Long p2l; - p2l.longVal = _this; - name = SPI_fname((TupleDesc)p2l.ptrVal, (int)index); + name = SPI_fname(JLongGet(TupleDesc, _this), (int)index); if(name == 0) { Exception_throw(ERRCODE_INVALID_DESCRIPTOR_INDEX, @@ -177,11 +186,9 @@ Java_org_postgresql_pljava_internal_TupleDesc__1getColumnIndex(JNIEnv* env, jcla char* name = String_createNTS(colName); if(name != 0) { - Ptr2Long p2l; - p2l.longVal = _this; PG_TRY(); { - result = SPI_fnumber((TupleDesc)p2l.ptrVal, name); + result = SPI_fnumber(JLongGet(TupleDesc, _this), name); if(result == SPI_ERROR_NOATTRIBUTE) { Exception_throw(ERRCODE_UNDEFINED_COLUMN, @@ -210,36 +217,36 @@ Java_org_postgresql_pljava_internal_TupleDesc__1formTuple(JNIEnv* env, jclass cl jobject result = 0; BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; PG_TRY(); { jint idx; HeapTuple tuple; MemoryContext curr; - TupleDesc self = (TupleDesc)p2l.ptrVal; + TupleDesc self = JLongGet(TupleDesc, _this); int count = self->natts; Datum* values = (Datum*)palloc(count * sizeof(Datum)); - bool* nulls = (bool *)palloc(count * sizeof(bool)); - jobject typeMap = Invocation_getTypeMap(); + bool* nulls = palloc(count * sizeof(bool)); + jobject typeMap = Invocation_getTypeMap(); /* a global ref */ memset(values, 0, count * sizeof(Datum)); - memset(nulls, true, count); /* all values null initially */ + memset(nulls, true, count * sizeof(bool));/*all values null initially*/ for(idx = 0; idx < count; ++idx) { jobject value = JNI_getObjectArrayElement(jvalues, idx); if(value != 0) { - Type type = Type_fromOid(SPI_gettypeid(self, idx + 1), typeMap); - values[idx] = Type_coerceObject(type, value); + /* Obtain boxed types here too, when that matters. */ + Type type = Type_objectTypeFromOid(SPI_gettypeid(self, idx + 1), typeMap); + values[idx] = Type_coerceObjectBridged(type, value); nulls[idx] = false; + JNI_deleteLocalRef(value); } } curr = MemoryContextSwitchTo(JavaMemoryContext); tuple = heap_form_tuple(self, values, nulls); - result = Tuple_internalCreate(tuple, false); + result = pljava_Tuple_internalCreate(tuple, false); MemoryContextSwitchTo(curr); pfree(values); pfree(nulls); @@ -253,21 +260,6 @@ Java_org_postgresql_pljava_internal_TupleDesc__1formTuple(JNIEnv* env, jclass cl return result; } -/* - * Class: org_postgresql_pljava_internal_TupleDesc - * Method: _free - * Signature: (J)V - */ -JNIEXPORT void JNICALL -Java_org_postgresql_pljava_internal_TupleDesc__1free(JNIEnv* env, jobject _this, jlong pointer) -{ - BEGIN_NATIVE_NO_ERRCHECK - Ptr2Long p2l; - p2l.longVal = pointer; - FreeTupleDesc((TupleDesc)p2l.ptrVal); - END_NATIVE -} - /* * Class: org_postgresql_pljava_internal_TupleDesc * Method: _getOid @@ -279,11 +271,9 @@ Java_org_postgresql_pljava_internal_TupleDesc__1getOid(JNIEnv* env, jclass cls, jobject result = 0; BEGIN_NATIVE - Ptr2Long p2l; - p2l.longVal = _this; PG_TRY(); { - Oid typeId = SPI_gettypeid((TupleDesc)p2l.ptrVal, (int)index); + Oid typeId = SPI_gettypeid(JLongGet(TupleDesc, _this), (int)index); if(!OidIsValid(typeId)) { Exception_throw(ERRCODE_INVALID_DESCRIPTOR_INDEX, diff --git a/pljava-so/src/main/c/type/TupleTable.c b/pljava-so/src/main/c/type/TupleTable.c index 17382fff..b45a5440 100644 --- a/pljava-so/src/main/c/type/TupleTable.c +++ b/pljava-so/src/main/c/type/TupleTable.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -15,7 +19,9 @@ #include "pljava/type/Tuple.h" #include "pljava/type/TupleDesc.h" -#define pg_unreachable() abort() +#if PG_VERSION_NUM < 120000 +#define ExecCopySlotHeapTuple(tts) ExecCopySlotTuple((tts)) +#endif static jclass s_TupleTable_class; static jmethodID s_TupleTable_init; @@ -32,9 +38,9 @@ jobject TupleTable_createFromSlot(TupleTableSlot* tts) curr = MemoryContextSwitchTo(JavaMemoryContext); - tupdesc = TupleDesc_internalCreate(tts->tts_tupleDescriptor); + tupdesc = pljava_TupleDesc_internalCreate(tts->tts_tupleDescriptor); tuple = ExecCopySlotHeapTuple(tts); - tuples = Tuple_createArray(&tuple, 1, false); + tuples = pljava_Tuple_createArray(&tuple, 1, false); MemoryContextSwitchTo(curr); @@ -44,23 +50,29 @@ jobject TupleTable_createFromSlot(TupleTableSlot* tts) jobject TupleTable_create(SPITupleTable* tts, jobject knownTD) { jobjectArray tuples; + uint64 tupcount; MemoryContext curr; if(tts == 0) return 0; +#if PG_VERSION_NUM < 130000 + tupcount = tts->alloced - tts->free; +#else + tupcount = tts->numvals; +#endif + if ( tupcount > PG_INT32_MAX ) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("a PL/Java TupleTable cannot represent more than " + "INT32_MAX rows"))); + curr = MemoryContextSwitchTo(JavaMemoryContext); if(knownTD == 0) - knownTD = TupleDesc_internalCreate(tts->tupdesc); + knownTD = pljava_TupleDesc_internalCreate(tts->tupdesc); - uint64 tupcount; - #if PG_VERSION_NUM < 130000 - tupcount = tts->alloced - tts->free; - #else - tupcount = tts->numvals; - #endif - tuples = Tuple_createArray(tts->vals, (jint)tupcount, true); + tuples = pljava_Tuple_createArray(tts->vals, (jint)tupcount, true); MemoryContextSwitchTo(curr); return JNI_newObject(s_TupleTable_class, s_TupleTable_init, knownTD, tuples); diff --git a/pljava-so/src/main/c/type/Type.c b/pljava-so/src/main/c/type/Type.c index f4475de3..1d9e122d 100644 --- a/pljava-so/src/main/c/type/Type.c +++ b/pljava-so/src/main/c/type/Type.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -26,34 +30,15 @@ #include "pljava/HashMap.h" #include "pljava/SPI.h" +#ifndef pg_unreachable #define pg_unreachable() abort() - -#if PG_VERSION_NUM < 80300 -typedef enum CoercionPathType -{ - COERCION_PATH_NONE, /* failed to find any coercion pathway */ - COERCION_PATH_FUNC, /* apply the specified coercion function */ - COERCION_PATH_RELABELTYPE, /* binary-compatible cast, no function */ - COERCION_PATH_ARRAYCOERCE, /* need an ArrayCoerceExpr node */ - COERCION_PATH_COERCEVIAIO /* need a CoerceViaIO node */ -} CoercionPathType; - -static CoercionPathType fcp(Oid targetTypeId, Oid sourceTypeId, - CoercionContext ccontext, Oid *funcid); -static CoercionPathType fcp(Oid targetTypeId, Oid sourceTypeId, - CoercionContext ccontext, Oid *funcid) -{ - if ( find_coercion_pathway(targetTypeId, sourceTypeId, ccontext, funcid) ) - return *funcid != InvalidOid ? - COERCION_PATH_FUNC : COERCION_PATH_RELABELTYPE; - else - return COERCION_PATH_NONE; -} -#define find_coercion_pathway fcp #endif -#if PG_VERSION_NUM < 90500 -#define DomainHasConstraints(x) true +#if PG_VERSION_NUM < 110000 +static Oid BOOLARRAYOID; +static Oid CHARARRAYOID; +static Oid FLOAT8ARRAYOID; +static Oid INT8ARRAYOID; #endif static HashMap s_typeByOid; @@ -76,36 +61,109 @@ static jclass s_Iterator_class; static jmethodID s_Iterator_hasNext; static jmethodID s_Iterator_next; -/* Structure used in multi function calls (calls returning - * SETOF ) +static jclass s_TypeBridge_Holder_class; +static jmethodID s_TypeBridge_Holder_className; +static jmethodID s_TypeBridge_Holder_defaultOid; +static jmethodID s_TypeBridge_Holder_payload; + +/* + * Structure used to retain state of set-returning functions using the + * SFRM_ValuePerCall protocol (the only one PL/Java currently supports). In that + * protocol, PostgreSQL will make repeated calls arriving at Type_invokeSRF + * below, which returns one result row on each call (and then a no-more-results + * result). This struct holds necessary context through the sequence of calls. + * + * If PostgreSQL is satisfied before the whole set has been returned, the + * _endOfSetCB below will be invoked to clean up the work in progress, and also + * needs this stashed information. */ typedef struct { Type elemType; + Function fn; jobject rowProducer; jobject rowCollector; + /* + * Invocation instance, if any, the Java counterpart to currentInvocation + * the C struct. There isn't one unless it gets asked for, then if it is, + * it's saved here, so even though the C currentInvocation really is new on + * each entry from PG, Java will see one Invocation instance throughout the + * sequence of calls. + */ jobject invocation; - MemoryContext rowContext; + /* + * Two pieces of state from Invocation.c's management of SPI connection, + * effectively keeping one such connection alive through the sequence of + * calls. I could easily be led to question the advisability of even doing + * that, but it has a long history in PL/Java, so changing it might call for + * some careful analysis. + */ MemoryContext spiContext; bool hasConnected; - bool trusted; } CallContextData; +/* + * Called during evaluation of a set-returning function, at various points after + * calls into Java code could have instantiated an Invocation, or connected SPI. + * Does not stash elemType, rowProducer, or rowCollector; those are all + * unconditionally set in the first-call initialization, and spiContext to zero. + */ +static void stashCallContext(CallContextData *ctxData) +{ + bool wasConnected = ctxData->hasConnected; + + ctxData->hasConnected = currentInvocation->hasConnected; + + ctxData->invocation = currentInvocation->invocation; + + if ( wasConnected ) + return; + + /* + * If SPI has been connected for the first time, capture the memory context + * it imposed. Curiously, this is not used again except in _closeIteration. + */ + if(ctxData->hasConnected) + ctxData->spiContext = CurrentMemoryContext; +} + +/* + * Called either at normal completion of a set-returning function, or by the + * _endOfSetCB if PostgreSQL doesn't want all the results. + */ static void _closeIteration(CallContextData* ctxData) { + jobject dummy; currentInvocation->hasConnected = ctxData->hasConnected; currentInvocation->invocation = ctxData->invocation; - Type_closeSRF(ctxData->elemType, ctxData->rowProducer); + /* + * Why pass 1 as the call_cntr? We won't always have the actual call_cntr + * value at _closeIteration time (the _endOfSetCB isn't passed it), and the + * Java interfaces being used don't need it (close() isn't passed a row + * number), but at least 1 is different from zero, in case vpcInvoke has + * a reason to distinguish the first call (in the same invocation as the + * overall setup) from subsequent ones. + */ + pljava_Function_vpcInvoke( + ctxData->fn, ctxData->rowProducer, NULL, 1, JNI_TRUE, &dummy); + JNI_deleteGlobalRef(ctxData->rowProducer); if(ctxData->rowCollector != 0) JNI_deleteGlobalRef(ctxData->rowCollector); - MemoryContextDelete(ctxData->rowContext); if(ctxData->hasConnected && ctxData->spiContext != 0) { - /* Connect during SRF_IS_FIRSTCALL(). Switch context back to what - * it was at that time and disconnect. + /* + * SPI was connected. We will (1) switch back to the memory context that + * was imposed by SPI_connect, then (2) disconnect. SPI_finish will have + * switched back to whatever memory context was current when SPI_connect + * was called, and that context had better still be valid. It might be + * the executor's multi_call_memory_ctx, if the SPI_connect happened + * during initialization of the rowProducer or rowCollector, or the + * executor's per-row context, if it happened later. Both of those are + * still valid at this point. The final step (3) is to switch back to + * the context we had before (1) and (2) happened. */ MemoryContext currCtx = MemoryContextSwitchTo(ctxData->spiContext); Invocation_assertDisconnect(); @@ -113,18 +171,37 @@ static void _closeIteration(CallContextData* ctxData) } } +/* + * Called by PostgreSQL if abandoning the collection of set-returning-function + * results early. + */ static void _endOfSetCB(Datum arg) { - Invocation topCall; - bool saveInExprCtxCB; + Invocation ctx; CallContextData* ctxData = (CallContextData*)DatumGetPointer(arg); - if(currentInvocation == 0) - Invocation_pushInvocation(&topCall, ctxData->trusted); - saveInExprCtxCB = currentInvocation->inExprContextCB; - currentInvocation->inExprContextCB = true; - _closeIteration(ctxData); - currentInvocation->inExprContextCB = saveInExprCtxCB; + /* + * Even if there is an invocation already on the stack, there is no + * convincing reason to think this callback belongs to it; PostgreSQL + * will make this callback when the expression context we did belong to + * is being torn down. This is not a hot operation; it only happens in + * rare cases when an SRF has been called and not completely consumed. + * So just unconditionally set up a context for this call, and clean up + * our own mess. + */ + PG_TRY(); + { + Invocation_pushInvocation(&ctx); + currentInvocation->inExprContextCB = true; + _closeIteration(ctxData); + Invocation_popInvocation(false); + } + PG_CATCH(); + { + Invocation_popInvocation(true); + PG_RE_THROW(); + } + PG_END_TRY(); } static Type _getCoerce(Type self, Type other, Oid fromOid, Oid toOid, @@ -132,9 +209,7 @@ static Type _getCoerce(Type self, Type other, Oid fromOid, Oid toOid, Type Type_getCoerceIn(Type self, Type other) { - elog(DEBUG2, "Type_getCoerceIn(%s,%s)", - format_type_be(self->typeId), - format_type_be(other->typeId)); + elog(DEBUG2, "Type_getCoerceIn(%d,%d)", self->typeId, other->typeId); return _getCoerce(self, other, other->typeId, self->typeId, &(self->inCoercions), Coerce_createIn); } @@ -142,9 +217,7 @@ Type Type_getCoerceIn(Type self, Type other) Type Type_getCoerceOut(Type self, Type other) { - elog(DEBUG2, "Type_getCoerceOut(%s,%s)", - format_type_be(self->typeId), - format_type_be(other->typeId)); + elog(DEBUG2, "Type_getCoerceOut(%d,%d)", self->typeId, other->typeId); return _getCoerce(self, other, self->typeId, other->typeId, &(self->outCoercions), Coerce_createOut); } @@ -167,26 +240,26 @@ static Type _getCoerce(Type self, Type other, Oid fromOid, Oid toOid, switch ( cpt ) { case COERCION_PATH_NONE: - elog(ERROR, "no conversion function from %s to %s", - format_type_be(fromOid), - format_type_be(toOid)); + elog(ERROR, "no conversion function from (regtype) %d to %d", + fromOid, toOid); + pg_unreachable(); /*elog(ERROR is already so marked; what's with gcc?*/ case COERCION_PATH_RELABELTYPE: /* * Binary compatible type. No need for a special coercer. * Unless ... it's a domain .... */ if ( ! IsBinaryCoercible(fromOid, toOid) && DomainHasConstraints(toOid)) - elog(WARNING, "disregarding domain constraints of %s", - format_type_be(toOid)); + elog(WARNING, "disregarding domain constraints of (regtype) %d", + toOid); return self; case COERCION_PATH_COERCEVIAIO: - elog(ERROR, "COERCEVIAIO not implemented from %s to %s", - format_type_be(fromOid), - format_type_be(toOid)); + elog(ERROR, "COERCEVIAIO not implemented from (regtype) %d to %d", + fromOid, toOid); + pg_unreachable(); case COERCION_PATH_ARRAYCOERCE: - elog(ERROR, "ARRAYCOERCE not implemented from %s to %s", - format_type_be(fromOid), - format_type_be(toOid)); + elog(ERROR, "ARRAYCOERCE not implemented from (regtype) %d to %d", + fromOid, toOid); + pg_unreachable(); case COERCION_PATH_FUNC: break; } @@ -219,11 +292,59 @@ jvalue Type_coerceDatum(Type self, Datum value) return self->typeClass->coerceDatum(self, value); } +jvalue Type_coerceDatumAs(Type self, Datum value, jclass rqcls) +{ + jstring rqcname; + char *rqcname0; + Type rqtype; + + if ( NULL == rqcls || Type_getJavaClass(self) == rqcls ) + return Type_coerceDatum(self, value); + + rqcname = JNI_callObjectMethod(rqcls, Class_getCanonicalName); + rqcname0 = String_createNTS(rqcname); + JNI_deleteLocalRef(rqcname); + rqtype = Type_fromJavaType(self->typeId, rqcname0); + pfree(rqcname0); + if ( Type_canReplaceType(rqtype, self) ) + return Type_coerceDatum(rqtype, value); + return Type_coerceDatum(self, value); +} + Datum Type_coerceObject(Type self, jobject object) { return self->typeClass->coerceObject(self, object); } +Datum Type_coerceObjectBridged(Type self, jobject object) +{ + jstring rqcname; + char *rqcname0; + Type rqtype; + + if ( JNI_FALSE == JNI_isInstanceOf(object, s_TypeBridge_Holder_class) ) + return Type_coerceObject(self, object); + + rqcname = JNI_callObjectMethod(object, s_TypeBridge_Holder_className); + rqcname0 = String_createNTS(rqcname); + JNI_deleteLocalRef(rqcname); + rqtype = Type_fromJavaType(self->typeId, rqcname0); + pfree(rqcname0); + if ( ! Type_canReplaceType(rqtype, self) ) + { + /* + * Ignore the TypeBridge in this one oddball case that results from the + * existence of two Types both mapping Java's byte[]. + */ + if ( BYTEAOID == self->typeId && CHARARRAYOID == rqtype->typeId ) + rqtype = self; + else + elog(ERROR, "type bridge failure"); + } + object = JNI_callObjectMethod(object, s_TypeBridge_Holder_payload); + return Type_coerceObject(rqtype, object); +} + char Type_getAlign(Type self) { return self->align; @@ -287,11 +408,6 @@ const char* Type_getJNISignature(Type self) return self->typeClass->getJNISignature(self); } -const char* Type_getJNIReturnSignature(Type self, bool forMultiCall, bool useAltRepr) -{ - return self->typeClass->getJNIReturnSignature(self, forMultiCall, useAltRepr); -} - Type Type_getArrayType(Type self, Oid arrayTypeId) { Type arrayType = self->arrayType; @@ -336,14 +452,14 @@ TupleDesc Type_getTupleDesc(Type self, PG_FUNCTION_ARGS) return self->typeClass->getTupleDesc(self, fcinfo); } -Datum Type_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +Datum Type_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - return self->typeClass->invoke(self, cls, method, args, fcinfo); + return self->typeClass->invoke(self, fn, fcinfo); } -Datum Type_invokeSRF(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +Datum Type_invokeSRF(Type self, Function fn, PG_FUNCTION_ARGS) { - bool hasRow; + jobject row; CallContextData* ctxData; FuncCallContext* context; MemoryContext currCtx; @@ -357,12 +473,23 @@ Datum Type_invokeSRF(Type self, jclass cls, jmethodID method, jvalue* args, PG_F /* create a function context for cross-call persistence */ context = SRF_FIRSTCALL_INIT(); + + /* + * Before creating the rowProducer (and rowCollector, if applicable), + * switch to the SRF_FIRSTCALL_INIT-created multi_call_memory_ctx that + * is not reset between calls. The motivation seems clear enough (allow + * the first-call initialization to allocate things in a context that + * will last through the sequence), though it is not clear whether + * anything in existing PL/Java code in fact does so (other than our + * allocation of ctxData below, which could perhaps just be a direct + * MemoryContextAllocZero). + */ currCtx = MemoryContextSwitchTo(context->multi_call_memory_ctx); - /* Call the declared Java function. It returns an instance that can produce - * the rows. + /* Call the declared Java function. It returns an instance + * that can produce the rows. */ - tmp = Type_getSRFProducer(self, cls, method, args); + tmp = pljava_Function_refInvoke(fn); if(tmp == 0) { Invocation_assertDisconnect(); @@ -371,10 +498,11 @@ Datum Type_invokeSRF(Type self, jclass cls, jmethodID method, jvalue* args, PG_F SRF_RETURN_DONE(context); } - ctxData = (CallContextData*)palloc(sizeof(CallContextData)); + ctxData = (CallContextData*)palloc0(sizeof(CallContextData)); context->user_fctx = ctxData; ctxData->elemType = self; + ctxData->fn = fn; ctxData->rowProducer = JNI_newGlobalRef(tmp); JNI_deleteLocalRef(tmp); @@ -382,53 +510,57 @@ Datum Type_invokeSRF(Type self, jclass cls, jmethodID method, jvalue* args, PG_F * to produce the row. If one is needed, it's created here. */ tmp = Type_getSRFCollector(self, fcinfo); - if(tmp == 0) - ctxData->rowCollector = 0; - else + if(tmp != 0) { ctxData->rowCollector = JNI_newGlobalRef(tmp); JNI_deleteLocalRef(tmp); } - ctxData->trusted = currentInvocation->trusted; - ctxData->hasConnected = currentInvocation->hasConnected; - ctxData->invocation = currentInvocation->invocation; - if(ctxData->hasConnected) - ctxData->spiContext = CurrentMemoryContext; - else - ctxData->spiContext = 0; - - ctxData->rowContext = AllocSetContextCreate(context->multi_call_memory_ctx, - "PL/Java row context", - ALLOCSET_DEFAULT_SIZES); + stashCallContext(ctxData); /* Register callback to be called when the function ends */ - RegisterExprContextCallback(((ReturnSetInfo*)fcinfo->resultinfo)->econtext, _endOfSetCB, PointerGetDatum(ctxData)); + RegisterExprContextCallback( + ((ReturnSetInfo*)fcinfo->resultinfo)->econtext, + _endOfSetCB, PointerGetDatum(ctxData)); + + /* + * Switch back to the context on entry, which by caller arrangement is + * one that gets reset between calls. Thus here at the conclusion of the + * first-call initialization, the context invariant below is satisfied. + */ MemoryContextSwitchTo(currCtx); } + /* + * Invariant: whether this is the first call and the SRF_IS_FIRSTCALL block + * above just completed, or this is a subsequent call, at this point, the + * memory context is the per-row one supplied by the executor (which gets + * reset between calls). + */ + context = SRF_PERCALL_SETUP(); ctxData = (CallContextData*)context->user_fctx; - MemoryContextReset(ctxData->rowContext); - currCtx = MemoryContextSwitchTo(ctxData->rowContext); + currCtx = CurrentMemoryContext; /* save executor's per-row context */ currentInvocation->hasConnected = ctxData->hasConnected; currentInvocation->invocation = ctxData->invocation; - hasRow = Type_hasNextSRF(self, ctxData->rowProducer, ctxData->rowCollector, (jint)context->call_cntr); - - ctxData->hasConnected = currentInvocation->hasConnected; - ctxData->invocation = currentInvocation->invocation; - currentInvocation->hasConnected = false; - currentInvocation->invocation = 0; - - if(hasRow) + if(JNI_TRUE == pljava_Function_vpcInvoke(ctxData->fn, + ctxData->rowProducer, ctxData->rowCollector, (jlong)context->call_cntr, + JNI_FALSE, &row)) { - Datum result = Type_nextSRF(self, ctxData->rowProducer, ctxData->rowCollector); + Datum result = Type_datumFromSRF(self, row, ctxData->rowCollector); + JNI_deleteLocalRef(row); + stashCallContext(ctxData); + currentInvocation->hasConnected = false; + currentInvocation->invocation = 0; MemoryContextSwitchTo(currCtx); SRF_RETURN_NEXT(context, result); } + stashCallContext(ctxData); + currentInvocation->hasConnected = false; + currentInvocation->invocation = 0; MemoryContextSwitchTo(currCtx); /* Unregister this callback and call it manually. We do this because @@ -456,7 +588,23 @@ bool Type_isPrimitive(Type self) Type Type_fromJavaType(Oid typeId, const char* javaTypeName) { - CacheEntry ce = (CacheEntry)HashMap_getByString(s_obtainerByJavaName, javaTypeName); + /* + * Do an initial lookup with InvalidOid as the oid part of the key. Multiple + * entries for the same Java name and distinct oids are not anticipated + * except for arrays. + */ + CacheEntry ce = (CacheEntry)HashMap_getByStringOid( + s_obtainerByJavaName, javaTypeName, InvalidOid); + + /* + * If no entry was found using InvalidOid and a valid typeId is provided + * and the wanted Java type is an array, repeat the lookup using the typeId. + */ + if ( NULL == ce && InvalidOid != typeId + && NULL != strchr(javaTypeName, ']') ) + ce = (CacheEntry)HashMap_getByStringOid( + s_obtainerByJavaName, javaTypeName, typeId); + if(ce == 0) { size_t jtlen = strlen(javaTypeName) - 2; @@ -490,6 +638,82 @@ Type Type_fromOidCache(Oid typeId) return (Type)HashMap_getByOid(s_typeByOid, typeId); } +/* + * Return NULL unless typeId represents a MappedUDT as found in the typeMap, + * in which case return a freshly-registered UDT Type. + * + * A MappedUDT's supporting functions don't have SQL declarations, from which + * an ordinary function's PLPrincipal and initiating class loader would be + * determined, so when obtaining the support function handles below, NULL will + * be passed as the language name, indicating that information isn't available, + * and won't be baked into the handles. + * + * A MappedUDT only has the two support functions readSQL and writeSQL. + * The I/O support functions parse and toString are only for a BaseUDT, so + * they do not need to be looked up here. + * + * The typeStruct argument supplies the type's name and namespace to + * UDT_registerUDT, as well as the by-value, length, and alignment common to + * any registered Type. + * + * A complication, though: in principle, this is a function on two variables, + * typeId and typeMap. (The typeStruct is functionally dependent on typeId.) + * But registration of the first one to be encountered will enter it in caches + * that depend only on the typeId (or Java class name, for the other direction) + * from that point on. This is longstanding PL/Java behavior, but XXX. + */ +static inline Type +checkTypeMappedUDT(Oid typeId, jobject typeMap, Form_pg_type typeStruct) +{ + jobject joid; + jclass typeClass; + Type type; + jobject readMH; + jobject writeMH; + TupleDesc tupleDesc; + bool hasTupleDesc; + + if ( NULL == typeMap ) + return NULL; + + joid = Oid_create(typeId); + typeClass = (jclass)JNI_callObjectMethod(typeMap, s_Map_get, joid); + JNI_deleteLocalRef(joid); + + if ( NULL == typeClass ) + return NULL; + + if ( -2 == typeStruct->typlen ) + { + JNI_deleteLocalRef(typeClass); + ereport(ERROR, ( + errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg( + "type mapping in PL/Java for %s with NUL-terminated(-2) " + "storage not supported", + format_type_be_qualified(typeId)) + )); + } + + readMH = pljava_Function_udtReadHandle( typeClass, NULL, true); + writeMH = pljava_Function_udtWriteHandle(typeClass, NULL, true); + + tupleDesc = lookup_rowtype_tupdesc_noerror(typeId, -1, true); + hasTupleDesc = NULL != tupleDesc; + if ( hasTupleDesc ) + ReleaseTupleDesc(tupleDesc); + + type = (Type)UDT_registerUDT( + typeClass, typeId, typeStruct, hasTupleDesc, false, + NULL, readMH, writeMH, NULL); + /* + * UDT_registerUDT calls JNI_deleteLocalRef on readMH and writeMH. + */ + + JNI_deleteLocalRef(typeClass); + return type; +} + Type Type_fromOid(Oid typeId, jobject typeMap) { CacheEntry ce; @@ -497,7 +721,7 @@ Type Type_fromOid(Oid typeId, jobject typeMap) Form_pg_type typeStruct; Type type = Type_fromOidCache(typeId); - if(type != 0) + if ( NULL != type ) return type; typeTup = PgObject_getValidTuple(TYPEOID, typeId, "type"); @@ -505,12 +729,14 @@ Type Type_fromOid(Oid typeId, jobject typeMap) if(typeStruct->typelem != 0 && typeStruct->typlen == -1) { - type = Type_getArrayType(Type_fromOid(typeStruct->typelem, typeMap), typeId); + type = Type_getArrayType( + Type_fromOid(typeStruct->typelem, typeMap), typeId); goto finally; } /* For some reason, the anyarray is *not* an array with anyelement as the * element type. We'd like to see it that way though. + * XXX would we, or does that mistake something intended in PostgreSQL? */ if(typeId == ANYARRAYOID) { @@ -527,40 +753,35 @@ Type Type_fromOid(Oid typeId, jobject typeMap) goto finally; } - if(typeMap != 0) - { - jobject joid = Oid_create(typeId); - jclass typeClass = (jclass)JNI_callObjectMethod(typeMap, s_Map_get, joid); - - JNI_deleteLocalRef(joid); - if(typeClass != 0) - { - TupleDesc tupleDesc = lookup_rowtype_tupdesc_noerror(typeId, -1, true); - bool hasTupleDesc = NULL != tupleDesc; - if ( hasTupleDesc ) - ReleaseTupleDesc(tupleDesc); - type = (Type)UDT_registerUDT(typeClass, typeId, typeStruct, hasTupleDesc, false); - JNI_deleteLocalRef(typeClass); - goto finally; - } - } + /* + * Perhaps we have found a MappedUDT. If so, this check will register and + * return it. + */ + type = checkTypeMappedUDT(typeId, typeMap, typeStruct); + if ( NULL != type ) + goto finally; /* Composite and record types will not have a TypeObtainer registered */ - if(typeStruct->typtype == 'c' || (typeStruct->typtype == 'p' && typeId == RECORDOID)) + if(typeStruct->typtype == 'c' + || (typeStruct->typtype == 'p' && typeId == RECORDOID)) { type = Composite_obtain(typeId); goto finally; } ce = (CacheEntry)HashMap_getByOid(s_obtainerByOid, typeId); - if(ce == 0) + if ( NULL == ce ) { - type = Function_checkTypeUDT(typeId, typeStruct); - if ( 0 != type ) + /* + * Perhaps we have found a BaseUDT. If so, this check will register and + * return it. + */ + type = Function_checkTypeBaseUDT(typeId, typeStruct); + if ( NULL != type ) goto finally; /* - * Default to String and standard textin/textout coersion. + * Default to String and standard textin/textout coercion. * Note: if the AS spec includes a Java signature, and the corresponding * Java type is not String, that will trigger a call to * Type_fromJavaType to see if a mapping is registered that way. If not, @@ -593,11 +814,11 @@ bool _Type_canReplaceType(Type self, Type other) return self->typeClass == other->typeClass; } -Datum _Type_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +Datum _Type_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { MemoryContext currCtx; Datum ret; - jobject value = JNI_callStaticObjectMethodA(cls, method, args); + jobject value = pljava_Function_refInvoke(fn); if(value == 0) { fcinfo->isnull = true; @@ -619,36 +840,14 @@ static Type _Type_createArrayType(Type self, Oid arrayTypeId) return Array_fromOid(arrayTypeId, self); } -static jobject _Type_getSRFProducer(Type self, jclass cls, jmethodID method, jvalue* args) -{ - return JNI_callStaticObjectMethodA(cls, method, args); -} - static jobject _Type_getSRFCollector(Type self, PG_FUNCTION_ARGS) { return 0; } -static bool _Type_hasNextSRF(Type self, jobject rowProducer, jobject rowCollector, jint callCounter) -{ - return (JNI_callBooleanMethod(rowProducer, s_Iterator_hasNext) == JNI_TRUE); -} - -static Datum _Type_nextSRF(Type self, jobject rowProducer, jobject rowCollector) -{ - jobject tmp = JNI_callObjectMethod(rowProducer, s_Iterator_next); - Datum result = Type_coerceObject(self, tmp); - JNI_deleteLocalRef(tmp); - return result; -} - -static void _Type_closeSRF(Type self, jobject rowProducer) -{ -} - -jobject Type_getSRFProducer(Type self, jclass cls, jmethodID method, jvalue* args) +static Datum _Type_datumFromSRF(Type self, jobject row, jobject rowCollector) { - return self->typeClass->getSRFProducer(self, cls, method, args); + return Type_coerceObject(self, row); } jobject Type_getSRFCollector(Type self, PG_FUNCTION_ARGS) @@ -656,19 +855,9 @@ jobject Type_getSRFCollector(Type self, PG_FUNCTION_ARGS) return self->typeClass->getSRFCollector(self, fcinfo); } -bool Type_hasNextSRF(Type self, jobject rowProducer, jobject rowCollector, jint callCounter) -{ - return self->typeClass->hasNextSRF(self, rowProducer, rowCollector, callCounter); -} - -Datum Type_nextSRF(Type self, jobject rowProducer, jobject rowCollector) +Datum Type_datumFromSRF(Type self, jobject row, jobject rowCollector) { - return self->typeClass->nextSRF(self, rowProducer, rowCollector); -} - -void Type_closeSRF(Type self, jobject rowProducer) -{ - self->typeClass->closeSRF(self, rowProducer); + return self->typeClass->datumFromSRF(self, row, rowCollector); } static Type _Type_getRealType(Type self, Oid realId, jobject typeMap) @@ -681,11 +870,6 @@ static const char* _Type_getJNISignature(Type self) return self->typeClass->JNISignature; } -static const char* _Type_getJNIReturnSignature(Type self, bool forMultiCall, bool useAltRepr) -{ - return forMultiCall ? "Ljava/util/Iterator;" : Type_getJNISignature(self); -} - TupleDesc _Type_getTupleDesc(Type self, PG_FUNCTION_ARGS) { ereport(ERROR, @@ -694,6 +878,73 @@ TupleDesc _Type_getTupleDesc(Type self, PG_FUNCTION_ARGS) return 0; /* Keep compiler happy */ } +static void addTypeBridge(jclass c, jmethodID m, char const *cName, Oid oid) +{ + jstring jcn = String_createJavaStringFromNTS(cName); + JNI_callStaticObjectMethodLocked(c, m, jcn, oid); + JNI_deleteLocalRef(jcn); +} + +static void initializeTypeBridges() +{ + jclass cls; + jmethodID ofClass; + jmethodID ofInterface; + + cls = PgObject_getJavaClass("org/postgresql/pljava/jdbc/TypeBridge"); + ofClass = PgObject_getStaticJavaMethod(cls, "ofClass", + "(Ljava/lang/String;I)Lorg/postgresql/pljava/jdbc/TypeBridge;"); + ofInterface = PgObject_getStaticJavaMethod(cls, "ofInterface", + "(Ljava/lang/String;I)Lorg/postgresql/pljava/jdbc/TypeBridge;"); + + addTypeBridge(cls, ofClass, "java.time.LocalDate", DATEOID); + addTypeBridge(cls, ofClass, "java.time.LocalDateTime", TIMESTAMPOID); + addTypeBridge(cls, ofClass, "java.time.LocalTime", TIMEOID); + addTypeBridge(cls, ofClass, "java.time.OffsetDateTime", TIMESTAMPTZOID); + addTypeBridge(cls, ofClass, "java.time.OffsetTime", TIMETZOID); + + /* + * TypeBridges that allow Java primitive array types to be passed to things + * expecting their boxed counterparts. An oddball case is byte[], given the + * default oid BYTEAOID here instead of CHARARRAYOID following the pattern, + * because there is a whole 'nother (see byte_array.c) Type that also maps + * byte[] on the Java side, but bytea for PostgreSQL (I am not at all sure + * what I think of that), and bridging it to a different Oid here would + * break it as a parameter to prepared statements that were working. So + * cater to that use, while possibly complicating the new use that was not + * formerly possible. + * + * There is no bridge for char[], because PL/Java has no Type that maps it + * to anything in PostgreSQL. + */ + addTypeBridge(cls, ofClass, "boolean[]", BOOLARRAYOID); + addTypeBridge(cls, ofClass, "byte[]", BYTEAOID); + addTypeBridge(cls, ofClass, "short[]", INT2ARRAYOID); + addTypeBridge(cls, ofClass, "int[]", INT4ARRAYOID); + addTypeBridge(cls, ofClass, "long[]", INT8ARRAYOID); + addTypeBridge(cls, ofClass, "float[]", FLOAT4ARRAYOID); + addTypeBridge(cls, ofClass, "double[]", FLOAT8ARRAYOID); + + addTypeBridge(cls, ofInterface, "java.sql.SQLXML", +#if defined(XMLOID) + XMLOID +#else + TEXTOID +#endif + ); + + JNI_deleteLocalRef(cls); + + cls = PgObject_getJavaClass("org/postgresql/pljava/jdbc/TypeBridge$Holder"); + s_TypeBridge_Holder_class = JNI_newGlobalRef(cls); + s_TypeBridge_Holder_className = PgObject_getJavaMethod(cls, "className", + "()Ljava/lang/String;"); + s_TypeBridge_Holder_defaultOid = PgObject_getJavaMethod(cls, "defaultOid", + "()I"); + s_TypeBridge_Holder_payload = PgObject_getJavaMethod(cls, "payload", + "()Ljava/lang/Object;"); +} + /* * Shortcuts to initializers of known types */ @@ -715,23 +966,16 @@ extern void Timestamp_initialize(void); extern void Oid_initialize(void); extern void AclId_initialize(void); -extern void ErrorData_initialize(void); -extern void LargeObject_initialize(void); extern void String_initialize(void); extern void byte_array_initialize(void); -extern void JavaWrapper_initialize(void); -extern void ExecutionPlan_initialize(void); -extern void Portal_initialize(void); -extern void Relation_initialize(void); -extern void TriggerData_initialize(void); -extern void Tuple_initialize(void); -extern void TupleDesc_initialize(void); extern void TupleTable_initialize(void); extern void Composite_initialize(void); +extern void pljava_SQLXMLImpl_initialize(void); + extern void Type_initialize(void); void Type_initialize(void) { @@ -760,28 +1004,58 @@ void Type_initialize(void) Oid_initialize(); AclId_initialize(); - ErrorData_initialize(); - LargeObject_initialize(); byte_array_initialize(); - JavaWrapper_initialize(); - ExecutionPlan_initialize(); - Portal_initialize(); - TriggerData_initialize(); - Relation_initialize(); - TupleDesc_initialize(); - Tuple_initialize(); TupleTable_initialize(); Composite_initialize(); + pljava_SQLXMLImpl_initialize(); s_Map_class = JNI_newGlobalRef(PgObject_getJavaClass("java/util/Map")); - s_Map_get = PgObject_getJavaMethod(s_Map_class, "get", "(Ljava/lang/Object;)Ljava/lang/Object;"); + s_Map_get = PgObject_getJavaMethod( + s_Map_class, "get", "(Ljava/lang/Object;)Ljava/lang/Object;"); + + s_Iterator_class = JNI_newGlobalRef( + PgObject_getJavaClass("java/util/Iterator")); + s_Iterator_hasNext = PgObject_getJavaMethod( + s_Iterator_class, "hasNext", "()Z"); + s_Iterator_next = PgObject_getJavaMethod( + s_Iterator_class, "next", "()Ljava/lang/Object;"); + +#if PG_VERSION_NUM < 110000 + BOOLARRAYOID = get_array_type(BOOLOID); + CHARARRAYOID = get_array_type(CHAROID); + FLOAT8ARRAYOID = get_array_type(FLOAT8OID); + INT8ARRAYOID = get_array_type(INT8OID); +#endif + + initializeTypeBridges(); +} + +static Type unimplementedTypeObtainer(Oid typeId); +static jvalue unimplementedDatumCoercer(Type, Datum); +static Datum unimplementedObjectCoercer(Type, jobject); - s_Iterator_class = JNI_newGlobalRef(PgObject_getJavaClass("java/util/Iterator")); - s_Iterator_hasNext = PgObject_getJavaMethod(s_Iterator_class, "hasNext", "()Z"); - s_Iterator_next = PgObject_getJavaMethod(s_Iterator_class, "next", "()Ljava/lang/Object;"); +static Type unimplementedTypeObtainer(Oid typeId) +{ + ereport(ERROR, + (errmsg("no type obtainer registered for type oid %ud", typeId))); + pg_unreachable(); +} + +static jvalue unimplementedDatumCoercer(Type t, Datum d) +{ + ereport(ERROR, + (errmsg("no datum coercer registered for type oid %ud", t->typeId))); + pg_unreachable(); +} + +static Datum unimplementedObjectCoercer(Type t, jobject o) +{ + ereport(ERROR, + (errmsg("no object coercer registered for type oid %ud", t->typeId))); + pg_unreachable(); } /* @@ -789,10 +1063,12 @@ void Type_initialize(void) */ TypeClass TypeClass_alloc(const char* typeName) { - return TypeClass_alloc2(typeName, sizeof(struct TypeClass_), sizeof(struct Type_)); + return TypeClass_alloc2( + typeName, sizeof(struct TypeClass_), sizeof(struct Type_)); } -TypeClass TypeClass_alloc2(const char* typeName, Size classSize, Size instanceSize) +TypeClass TypeClass_alloc2( + const char* typeName, Size classSize, Size instanceSize) { TypeClass self = (TypeClass)MemoryContextAlloc(TopMemoryContext, classSize); PgObjectClass_init((PgObjectClass)self, typeName, instanceSize, 0); @@ -800,18 +1076,14 @@ TypeClass TypeClass_alloc2(const char* typeName, Size classSize, Size instanceSi self->javaTypeName = ""; self->javaClass = 0; self->canReplaceType = _Type_canReplaceType; - self->coerceDatum = (DatumCoercer)_PgObject_pureVirtualCalled; - self->coerceObject = (ObjectCoercer)_PgObject_pureVirtualCalled; + self->coerceDatum = unimplementedDatumCoercer; + self->coerceObject = unimplementedObjectCoercer; self->createArrayType = _Type_createArrayType; self->invoke = _Type_invoke; - self->getSRFProducer = _Type_getSRFProducer; self->getSRFCollector = _Type_getSRFCollector; - self->hasNextSRF = _Type_hasNextSRF; - self->nextSRF = _Type_nextSRF; - self->closeSRF = _Type_closeSRF; + self->datumFromSRF = _Type_datumFromSRF; self->getTupleDesc = _Type_getTupleDesc; self->getJNISignature = _Type_getJNISignature; - self->getJNIReturnSignature = _Type_getJNIReturnSignature; self->dynamic = false; self->outParameter = false; self->getRealType = _Type_getRealType; @@ -831,7 +1103,8 @@ Type TypeClass_allocInstance(TypeClass cls, Oid typeId) */ Type TypeClass_allocInstance2(TypeClass cls, Oid typeId, Form_pg_type pgType) { - Type t = (Type)PgObjectClass_allocInstance((PgObjectClass)(cls), TopMemoryContext); + Type t = (Type) + PgObjectClass_allocInstance((PgObjectClass)(cls), TopMemoryContext); t->typeId = typeId; t->arrayType = 0; t->elementType = 0; @@ -863,15 +1136,29 @@ Type TypeClass_allocInstance2(TypeClass cls, Oid typeId, Form_pg_type pgType) /* * Register this type. */ -static void _registerType(Oid typeId, const char* javaTypeName, Type type, TypeObtainer obtainer) +static void _registerType( + Oid typeId, const char* javaTypeName, Type type, TypeObtainer obtainer) { - CacheEntry ce = (CacheEntry)MemoryContextAlloc(TopMemoryContext, sizeof(CacheEntryData)); + CacheEntry ce = (CacheEntry) + MemoryContextAlloc(TopMemoryContext, sizeof(CacheEntryData)); ce->typeId = typeId; ce->type = type; ce->obtainer = obtainer; if(javaTypeName != 0) - HashMap_putByString(s_obtainerByJavaName, javaTypeName, ce); + { + /* + * The s_obtainerByJavaName cache is now keyed by Java name and an oid, + * rather than Java name alone, to address an issue affecting arrays. + * To avoid changing other behavior, the oid used in the hash key will + * be InvalidOid always, unless the Java name being registered is + * an array type and the caller has passed a valid oid. + */ + Oid keyOid = (NULL == strchr(javaTypeName, ']')) + ? InvalidOid + : typeId; + HashMap_putByStringOid(s_obtainerByJavaName, javaTypeName, keyOid, ce); + } if(typeId != InvalidOid && HashMap_getByOid(s_obtainerByOid, typeId) == 0) HashMap_putByOid(s_obtainerByOid, typeId, ce); @@ -879,10 +1166,11 @@ static void _registerType(Oid typeId, const char* javaTypeName, Type type, TypeO void Type_registerType(const char* javaTypeName, Type type) { - _registerType(type->typeId, javaTypeName, type, (TypeObtainer)_PgObject_pureVirtualCalled); + _registerType(type->typeId, javaTypeName, type, unimplementedTypeObtainer); } -void Type_registerType2(Oid typeId, const char* javaTypeName, TypeObtainer obtainer) +void Type_registerType2( + Oid typeId, const char* javaTypeName, TypeObtainer obtainer) { _registerType(typeId, javaTypeName, 0, obtainer); } diff --git a/pljava-so/src/main/c/type/UDT.c b/pljava-so/src/main/c/type/UDT.c index f67c4b24..16f87bf6 100644 --- a/pljava-so/src/main/c/type/UDT.c +++ b/pljava-so/src/main/c/type/UDT.c @@ -1,14 +1,21 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #include #include #include +#include #include #include #include @@ -16,15 +23,44 @@ #include "pljava/type/UDT_priv.h" #include "pljava/type/String.h" #include "pljava/type/Tuple.h" +#include "pljava/Function.h" #include "pljava/Invocation.h" #include "pljava/SQLInputFromChunk.h" #include "pljava/SQLOutputToChunk.h" #include "pljava/SQLInputFromTuple.h" #include "pljava/SQLOutputToTuple.h" -#if PG_VERSION_NUM >= 90000 -#include -#endif +/* + * This code, as currently constituted, makes these assumptions that limit how + * Java can implement a (scalar) UDT: + * + * ASSUMPTION 1: If a Java UDT is declared with INTERNALLENGTH -2 (indicating + * that its internal representation is a variable-length sequence + * of nonzero bytes terminated by a zero byte), this code ASSUMES + * that the internal representation and the human-readable one + * (defined by typinput/typoutput) have to be identical ... an + * assumption apparently made because typinput/typoutput consume + * and produce the type cstring, whose internallength is also -2. + * + * ASSUMPTION 2: Whatever the UDT's internal representation is, its binary + * exchange representation (defined by typreceive/typsend) has to + * be identical to that. + * + * This list of assumptions could grow with further review of the code. + * + * Comments will be added below to tag code that embodies these assumptions. + * + * The current pattern for a scalar UDT has another difficulty: it relies on + * toString for producing the external representation, which is a general + * Object method declared to have nothing to throw. And the general expectation + * for toString is to produce some nice representation, but not necessarily + * always the literally re-parsable representation of something. And the scalar + * readSQL/writeSQL implementations impose a 16-bit limit on lengths of things. + * + * Idea for future: add another scalar UDT pattern using different methods, and + * without the current readSQL/writeSQL limitations. Continue to recognize the + * parse/toString pattern and provide the old behavior for compatibility. + */ #define pg_unreachable() abort() @@ -32,15 +68,21 @@ static jobject coerceScalarDatum(UDT self, Datum arg) { jobject result; int32 dataLen = Type_getLength((Type)self); - jclass javaClass = Type_getJavaClass((Type)self); - bool isJavaBasedScalar = 0 != self->toString; + bool isJavaBasedScalar = 0 != self->parse; if(dataLen == -2) { /* Data is a zero terminated string */ jstring jstr = String_createJavaStringFromNTS(DatumGetCString(arg)); - result = JNI_callStaticObjectMethod(javaClass, self->parse, jstr, self->sqlTypeName); + /* + * ASSUMPTION 1 is in play here. 'arg' here is a Datum holding this + * UDT's internal representation, and will now be passed to 'parse', the + * same method that is specified to parse a value from the human-used + * external representation. + */ + result = pljava_Function_udtParseInvoke( + self->parse, jstr, self->sqlTypeName); JNI_deleteLocalRef(jstr); } else @@ -75,11 +117,11 @@ static jobject coerceScalarDatum(UDT self, Datum arg) data = DatumGetPointer(arg); } } - result = JNI_newObject(javaClass, self->init); inputStream = SQLInputFromChunk_create(data, dataLen, isJavaBasedScalar); - JNI_callVoidMethod(result, self->readSQL, inputStream, self->sqlTypeName); + result = pljava_Function_udtReadInvoke( + self->readSQL, inputStream, self->sqlTypeName); SQLInputFromChunk_close(inputStream); } return result; @@ -87,13 +129,11 @@ static jobject coerceScalarDatum(UDT self, Datum arg) static jobject coerceTupleDatum(UDT udt, Datum arg) { - jobject result = JNI_newObject(Type_getJavaClass((Type)udt), udt->init); - Oid typeId = ((Type)udt)->typeId; - TupleDesc tupleDesc = lookup_rowtype_tupdesc_noerror(typeId, -1, true); + jobject result; jobject inputStream = - SQLInputFromTuple_create(DatumGetHeapTupleHeader(arg), tupleDesc); - ReleaseTupleDesc(tupleDesc); - JNI_callVoidMethod(result, udt->readSQL, inputStream, udt->sqlTypeName); + pljava_SQLInputFromTuple_create(DatumGetHeapTupleHeader(arg)); + result = pljava_Function_udtReadInvoke( + udt->readSQL, inputStream, udt->sqlTypeName); JNI_deleteLocalRef(inputStream); return result; } @@ -102,10 +142,15 @@ static Datum coerceScalarObject(UDT self, jobject value) { Datum result; int32 dataLen = Type_getLength((Type)self); - bool isJavaBasedScalar = 0 != self->toString; + bool isJavaBasedScalar = 0 != self->parse; if(dataLen == -2) { - jstring jstr = (jstring)JNI_callObjectMethod(value, self->toString); + /* + * ASSUMPTION 1 is in play here: the toString method, specified to + * produce the human-used external representation, is being called here + * to produce this UDT's internal representation. + */ + jstring jstr = pljava_Function_udtToStringInvoke(self->toString, value); char* tmp = String_createNTS(jstr); result = CStringGetDatum(tmp); JNI_deleteLocalRef(jstr); @@ -130,7 +175,7 @@ static Datum coerceScalarObject(UDT self, jobject value) enlargeStringInfo(&buffer, dataLen); outputStream = SQLOutputToChunk_create(&buffer, isJavaBasedScalar); - JNI_callVoidMethod(value, self->writeSQL, outputStream); + pljava_Function_udtWriteInvoke(self->writeSQL, value, outputStream); SQLOutputToChunk_close(outputStream); if(dataLen < 0) @@ -143,8 +188,11 @@ static Datum coerceScalarObject(UDT self, jobject value) { ereport(ERROR, ( errcode(ERRCODE_CANNOT_COERCE), - errmsg("UDT for Oid %d produced image with incorrect size. Expected %d, was %d", - Type_getOid((Type)self), dataLen, buffer.len))); + errmsg( + "UDT for Oid %d produced image with incorrect size. " + "Expected %d, was %d", + Type_getOid((Type)self), dataLen, buffer.len) + )); } if (passByValue) { memset(&result, 0, SIZEOF_DATUM); @@ -173,7 +221,7 @@ static Datum coerceTupleObject(UDT self, jobject value) TupleDesc tupleDesc = lookup_rowtype_tupdesc_noerror(typeId, -1, true); jobject sqlOutput = SQLOutputToTuple_create(tupleDesc); ReleaseTupleDesc(tupleDesc); - JNI_callVoidMethod(value, self->writeSQL, sqlOutput); + pljava_Function_udtWriteInvoke(self->writeSQL, value, sqlOutput); tuple = SQLOutputToTuple_getTuple(sqlOutput); if(tuple != 0) result = HeapTupleGetDatum(tuple); @@ -252,12 +300,24 @@ Datum UDT_input(UDT udt, PG_FUNCTION_ARGS) if(Type_getLength((Type)udt) == -2) { + /* + * ASSUMPTION 1 is in play here. UDT_input is passed a cstring holding + * the human-used external representation, and, just because this UDT is + * also declared with length -2, that external representation is being + * copied directly here as the internal representation, without even + * invoking any of the UDT's code. + */ if(txt != 0) txt = pstrdup(txt); PG_RETURN_CSTRING(txt); } + /* + * Length != -2 so we do the expected: call parse to construct a Java object + * from the external representation, then _UDT_coerceObject to get the + * internal representation from the object. + */ jstr = String_createJavaStringFromNTS(txt); - obj = JNI_callStaticObjectMethod(Type_getJavaClass((Type)udt), udt->parse, jstr, udt->sqlTypeName); + obj = pljava_Function_udtParseInvoke(udt->parse, jstr, udt->sqlTypeName); JNI_deleteLocalRef(jstr); return _UDT_coerceObject((Type)udt, obj); @@ -276,12 +336,24 @@ Datum UDT_output(UDT udt, PG_FUNCTION_ARGS) { txt = PG_GETARG_CSTRING(0); if(txt != 0) + /* + * ASSUMPTION 1 is in play here. UDT_output returns a cstring to contain + * the human-used external representation, and, just because this UDT's + * internal representation is also declared with length -2, the internal + * is being copied directly as the external representation, without even + * invoking any of the UDT's code. + */ txt = pstrdup(txt); } else { + /* + * Length != -2 so we do the expected: call _UDT_coerceDatum to + * construct a Java object from the internal representation, then + * toString to get the external representation from the object. + */ jobject value = _UDT_coerceDatum((Type)udt, PG_GETARG_DATUM(0)).l; - jstring jstr = (jstring)JNI_callObjectMethod(value, udt->toString); + jstring jstr = pljava_Function_udtToStringInvoke(udt->toString, value); MemoryContext currCtx = Invocation_switchToUpperContext(); txt = String_createNTS(jstr); @@ -306,6 +378,10 @@ Datum UDT_receive(UDT udt, PG_FUNCTION_ARGS) noTypmodYet(udt, fcinfo); + /* + * ASSUMPTION 2 is in play here. The external byte stream is being received + * and directly stored as the internal representation of the type. + */ if(dataLen == -1) return bytearecv(fcinfo); @@ -328,6 +404,10 @@ Datum UDT_send(UDT udt, PG_FUNCTION_ARGS) errcode(ERRCODE_CANNOT_COERCE), errmsg("UDT with Oid %d is not scalar", Type_getOid((Type)udt)))); + /* + * ASSUMPTION 2 is in play here. The internal representation of the type + * is being transmitted directly as the external byte stream. + */ if(dataLen == -1) return byteasend(fcinfo); @@ -344,9 +424,12 @@ bool UDT_isScalar(UDT udt) return ! udt->hasTupleDesc; } -/* Make this datatype available to the postgres system. +/* Make this datatype available to the postgres system. The four ...MH arguments + * are passed to JNI_deleteLocalRef after being saved as global references. */ -UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, bool hasTupleDesc, bool isJavaBasedScalar) +UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, + bool hasTupleDesc, bool isJavaBasedScalar, jobject parseMH, jobject readMH, + jobject writeMH, jobject toStringMH) { jstring jcn; MemoryContext currCtx; @@ -370,12 +453,21 @@ UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, bool hasTuple { ereport(ERROR, ( errcode(ERRCODE_CANNOT_COERCE), - errmsg("Attempt to register UDT with Oid %d failed. Oid appoints a non UDT type", typeId))); + errmsg( + "Attempt to register UDT with Oid %d failed. " + "Oid appoints a non UDT type", + typeId) + )); } + JNI_deleteLocalRef(parseMH); + JNI_deleteLocalRef(readMH); + JNI_deleteLocalRef(writeMH); + JNI_deleteLocalRef(toStringMH); return (UDT)existing; } - nspTup = PgObject_getValidTuple(NAMESPACEOID, pgType->typnamespace, "namespace"); + nspTup = PgObject_getValidTuple( + NAMESPACEOID, pgType->typnamespace, "namespace"); nspStruct = (Form_pg_namespace)GETSTRUCT(nspTup); /* Concatenate namespace + '.' + typename @@ -412,7 +504,8 @@ UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, bool hasTuple *sp++ = ';'; *sp = 0; - udtClass = TypeClass_alloc2("type.UDT", sizeof(struct TypeClass_), sizeof(struct UDT_)); + udtClass = TypeClass_alloc2( + "type.UDT", sizeof(struct TypeClass_), sizeof(struct UDT_)); udtClass->JNISignature = classSignature; udtClass->javaTypeName = className; @@ -425,11 +518,10 @@ UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, bool hasTuple udt->sqlTypeName = JNI_newGlobalRef(sqlTypeName); JNI_deleteLocalRef(sqlTypeName); - udt->init = PgObject_getJavaMethod(clazz, "", "()V"); - if(isJavaBasedScalar) { - /* A scalar mapping that is implemented in Java will have the static method: + /* A scalar mapping that is implemented in Java (a BaseUDT, as declared + * in Java source annotations) will have the static method: * * T parse(String stringRep, String sqlTypeName); * @@ -437,29 +529,37 @@ UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, bool hasTuple * * String toString(); * - * instance method. A pure mapping (i.e. no Java I/O methods) will not have - * this. + * instance method. A MappedUDT (i.e. no Java I/O methods) will not + * have them. */ - udt->toString = PgObject_getJavaMethod(clazz, "toString", "()Ljava/lang/String;"); /* The parse method is a static method on the class with the signature * (Ljava/lang/String;Ljava/lang/String;) */ - sp = palloc(signatureLen + 40); - strcpy(sp, "(Ljava/lang/String;Ljava/lang/String;)"); - strcpy(sp + 38, classSignature); - udt->parse = PgObject_getStaticJavaMethod(clazz, "parse", sp); - pfree(sp); + if ( NULL == parseMH || NULL == toStringMH ) + elog(ERROR, + "PL/Java UDT with oid %u registered without both i/o handles", + typeId); + udt->parse = JNI_newGlobalRef(parseMH); + udt->toString = JNI_newGlobalRef(toStringMH); + JNI_deleteLocalRef(parseMH); + JNI_deleteLocalRef(toStringMH); } else { - udt->toString = 0; - udt->parse = 0; + udt->parse = NULL; + udt->toString = NULL; } udt->hasTupleDesc = hasTupleDesc; - udt->readSQL = PgObject_getJavaMethod(clazz, "readSQL", "(Ljava/sql/SQLInput;Ljava/lang/String;)V"); - udt->writeSQL = PgObject_getJavaMethod(clazz, "writeSQL", "(Ljava/sql/SQLOutput;)V"); + if ( NULL == readMH || NULL == writeMH ) + elog(ERROR, + "PL/Java UDT with oid %u registered without both r/w handles", + typeId); + udt->readSQL = JNI_newGlobalRef(readMH); + udt->writeSQL = JNI_newGlobalRef(writeMH); + JNI_deleteLocalRef(readMH); + JNI_deleteLocalRef(writeMH); Type_registerType(className, (Type)udt); return udt; } diff --git a/pljava-so/src/main/c/type/Void.c b/pljava-so/src/main/c/type/Void.c index 6205e68b..0b6ad627 100644 --- a/pljava-so/src/main/c/type/Void.c +++ b/pljava-so/src/main/c/type/Void.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include #include @@ -17,9 +21,9 @@ /* * void primitive type. */ -static Datum _void_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS) +static Datum _void_invoke(Type self, Function fn, PG_FUNCTION_ARGS) { - JNI_callStaticVoidMethodA(cls, method, args); + pljava_Function_voidInvoke(fn); fcinfo->isnull = true; return 0; } diff --git a/pljava-so/src/main/c/type/byte_array.c b/pljava-so/src/main/c/type/byte_array.c index 5b2d3efb..91e3103a 100644 --- a/pljava-so/src/main/c/type/byte_array.c +++ b/pljava-so/src/main/c/type/byte_array.c @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #include "pljava/Exception.h" #include "pljava/type/Type_priv.h" diff --git a/fixes/gcj/java_sql_Types.h b/pljava-so/src/main/include/fallback/jdbc/java_sql_Types.h similarity index 61% rename from fixes/gcj/java_sql_Types.h rename to pljava-so/src/main/include/fallback/jdbc/java_sql_Types.h index addae231..2788899d 100644 --- a/fixes/gcj/java_sql_Types.h +++ b/pljava-so/src/main/include/fallback/jdbc/java_sql_Types.h @@ -1,73 +1,101 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class java_sql_Types */ - -#ifndef _Included_java_sql_Types -#define _Included_java_sql_Types -#ifdef __cplusplus -extern "C" { -#endif -#undef java_sql_Types_BIT -#define java_sql_Types_BIT -7L -#undef java_sql_Types_TINYINT -#define java_sql_Types_TINYINT -6L -#undef java_sql_Types_SMALLINT -#define java_sql_Types_SMALLINT 5L -#undef java_sql_Types_INTEGER -#define java_sql_Types_INTEGER 4L -#undef java_sql_Types_BIGINT -#define java_sql_Types_BIGINT -5L -#undef java_sql_Types_FLOAT -#define java_sql_Types_FLOAT 6L -#undef java_sql_Types_REAL -#define java_sql_Types_REAL 7L -#undef java_sql_Types_DOUBLE -#define java_sql_Types_DOUBLE 8L -#undef java_sql_Types_NUMERIC -#define java_sql_Types_NUMERIC 2L -#undef java_sql_Types_DECIMAL -#define java_sql_Types_DECIMAL 3L -#undef java_sql_Types_CHAR -#define java_sql_Types_CHAR 1L -#undef java_sql_Types_VARCHAR -#define java_sql_Types_VARCHAR 12L -#undef java_sql_Types_LONGVARCHAR -#define java_sql_Types_LONGVARCHAR -1L -#undef java_sql_Types_DATE -#define java_sql_Types_DATE 91L -#undef java_sql_Types_TIME -#define java_sql_Types_TIME 92L -#undef java_sql_Types_TIMESTAMP -#define java_sql_Types_TIMESTAMP 93L -#undef java_sql_Types_BINARY -#define java_sql_Types_BINARY -2L -#undef java_sql_Types_VARBINARY -#define java_sql_Types_VARBINARY -3L -#undef java_sql_Types_LONGVARBINARY -#define java_sql_Types_LONGVARBINARY -4L -#undef java_sql_Types_NULL -#define java_sql_Types_NULL 0L -#undef java_sql_Types_OTHER -#define java_sql_Types_OTHER 1111L -#undef java_sql_Types_JAVA_OBJECT -#define java_sql_Types_JAVA_OBJECT 2000L -#undef java_sql_Types_DISTINCT -#define java_sql_Types_DISTINCT 2001L -#undef java_sql_Types_STRUCT -#define java_sql_Types_STRUCT 2002L -#undef java_sql_Types_ARRAY -#define java_sql_Types_ARRAY 2003L -#undef java_sql_Types_BLOB -#define java_sql_Types_BLOB 2004L -#undef java_sql_Types_CLOB -#define java_sql_Types_CLOB 2005L -#undef java_sql_Types_REF -#define java_sql_Types_REF 2006L -#undef java_sql_Types_DATALINK -#define java_sql_Types_DATALINK 70L -#undef java_sql_Types_BOOLEAN -#define java_sql_Types_BOOLEAN 16L -#ifdef __cplusplus -} -#endif -#endif +/* + * FALLBACK COPY of machine-generated file from Java 8. + * This file can be generated from the java.sql.Types class using the javah + * utility present in JDK versions through 9. Java 10 eliminates the javah + * utility, in favor of the -h option to javac, leaving no way to h a class, + * like java.sql.Types, for which the source is not present. Hence, when + * building on Java 10 or later, this fallback file will be used. As of Java 12, + * there have been no changes to these constants since Java 8, and this file is + * only needed by one PL/Java source file (type/Oid.c) that will probably go + * away soon. Therefore, no more elaborate workaround seems necessary. + */ +#include +/* Header for class java_sql_Types */ + +#ifndef _Included_java_sql_Types +#define _Included_java_sql_Types +#ifdef __cplusplus +extern "C" { +#endif +#undef java_sql_Types_BIT +#define java_sql_Types_BIT -7L +#undef java_sql_Types_TINYINT +#define java_sql_Types_TINYINT -6L +#undef java_sql_Types_SMALLINT +#define java_sql_Types_SMALLINT 5L +#undef java_sql_Types_INTEGER +#define java_sql_Types_INTEGER 4L +#undef java_sql_Types_BIGINT +#define java_sql_Types_BIGINT -5L +#undef java_sql_Types_FLOAT +#define java_sql_Types_FLOAT 6L +#undef java_sql_Types_REAL +#define java_sql_Types_REAL 7L +#undef java_sql_Types_DOUBLE +#define java_sql_Types_DOUBLE 8L +#undef java_sql_Types_NUMERIC +#define java_sql_Types_NUMERIC 2L +#undef java_sql_Types_DECIMAL +#define java_sql_Types_DECIMAL 3L +#undef java_sql_Types_CHAR +#define java_sql_Types_CHAR 1L +#undef java_sql_Types_VARCHAR +#define java_sql_Types_VARCHAR 12L +#undef java_sql_Types_LONGVARCHAR +#define java_sql_Types_LONGVARCHAR -1L +#undef java_sql_Types_DATE +#define java_sql_Types_DATE 91L +#undef java_sql_Types_TIME +#define java_sql_Types_TIME 92L +#undef java_sql_Types_TIMESTAMP +#define java_sql_Types_TIMESTAMP 93L +#undef java_sql_Types_BINARY +#define java_sql_Types_BINARY -2L +#undef java_sql_Types_VARBINARY +#define java_sql_Types_VARBINARY -3L +#undef java_sql_Types_LONGVARBINARY +#define java_sql_Types_LONGVARBINARY -4L +#undef java_sql_Types_NULL +#define java_sql_Types_NULL 0L +#undef java_sql_Types_OTHER +#define java_sql_Types_OTHER 1111L +#undef java_sql_Types_JAVA_OBJECT +#define java_sql_Types_JAVA_OBJECT 2000L +#undef java_sql_Types_DISTINCT +#define java_sql_Types_DISTINCT 2001L +#undef java_sql_Types_STRUCT +#define java_sql_Types_STRUCT 2002L +#undef java_sql_Types_ARRAY +#define java_sql_Types_ARRAY 2003L +#undef java_sql_Types_BLOB +#define java_sql_Types_BLOB 2004L +#undef java_sql_Types_CLOB +#define java_sql_Types_CLOB 2005L +#undef java_sql_Types_REF +#define java_sql_Types_REF 2006L +#undef java_sql_Types_DATALINK +#define java_sql_Types_DATALINK 70L +#undef java_sql_Types_BOOLEAN +#define java_sql_Types_BOOLEAN 16L +#undef java_sql_Types_ROWID +#define java_sql_Types_ROWID -8L +#undef java_sql_Types_NCHAR +#define java_sql_Types_NCHAR -15L +#undef java_sql_Types_NVARCHAR +#define java_sql_Types_NVARCHAR -9L +#undef java_sql_Types_LONGNVARCHAR +#define java_sql_Types_LONGNVARCHAR -16L +#undef java_sql_Types_NCLOB +#define java_sql_Types_NCLOB 2011L +#undef java_sql_Types_SQLXML +#define java_sql_Types_SQLXML 2009L +#undef java_sql_Types_REF_CURSOR +#define java_sql_Types_REF_CURSOR 2012L +#undef java_sql_Types_TIME_WITH_TIMEZONE +#define java_sql_Types_TIME_WITH_TIMEZONE 2013L +#undef java_sql_Types_TIMESTAMP_WITH_TIMEZONE +#define java_sql_Types_TIMESTAMP_WITH_TIMEZONE 2014L +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava-so/src/main/include/pljava/Backend.h b/pljava-so/src/main/include/pljava/Backend.h index 3c21897a..e0e8ca30 100644 --- a/pljava-so/src/main/include/pljava/Backend.h +++ b/pljava-so/src/main/include/pljava/Backend.h @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB - Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ #ifndef __pljava_Backend_h #define __pljava_Backend_h @@ -21,30 +26,34 @@ extern "C" { * * @author Thomas Hallgren *****************************************************************/ -extern bool integerDateTimes; +#ifndef PLJAVA_SO_VERSION +#error "PLJAVA_SO_VERSION needs to be defined to compile this file." +#else +#define SO_VERSION_STRING CppAsString2(PLJAVA_SO_VERSION) +#endif -void Backend_setJavaSecurity(bool trusted); +#if PG_VERSION_NUM < 100000 +extern bool integerDateTimes; +#endif int Backend_setJavaLogLevel(int logLevel); +/* + * Called at the ends of committing transactions to emit a warning about future + * JEP 411 impacts, at most once per session, if any PL/Java functions were + * declared or redeclared in the transaction, or if PL/Java was installed or + * upgraded. Also called from InstallHelper, if pg_upgrade is happening. Yes, + * this is a bit tangled. The tracking of function declaration and + * install/upgrade is encapsulated in Backend.c. If isCommit is false, + * no warning is emitted, and the tracking bit is reset. + */ +void Backend_warnJEP411(bool isCommit); + #ifdef PG_GETCONFIGOPTION #error The macro PG_GETCONFIGOPTION needs to be renamed. #endif -/* - * PG_VERSION_NUM >= 80400 is for GPDB5 compatible - * Change PG_VERSION_NUM >= 90200 (from 90100) for master - * branch compatible - */ -#if PG_VERSION_NUM >= 90200 #define PG_GETCONFIGOPTION(key) GetConfigOption(key, false, true) -#elif PG_VERSION_NUM >= 90000 -#define PG_GETCONFIGOPTION(key) GetConfigOption(key, true) -#elif PG_VERSION_NUM >= 80400 -#define PG_GETCONFIGOPTION(key) GetConfigOption(key, true) -#else -#define PG_GETCONFIGOPTION(key) GetConfigOption(key) -#endif #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/DualState.h b/pljava-so/src/main/include/pljava/DualState.h new file mode 100644 index 00000000..64e111c7 --- /dev/null +++ b/pljava-so/src/main/include/pljava/DualState.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#ifndef __pljava_DualState_h +#define __pljava_DualState_h + +#include +#include + +#include "pljava/pljava.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern jobject pljava_DualState_key(void); + +extern void pljava_DualState_cleanEnqueuedInstances(void); + +extern void pljava_DualState_initialize(void); + +extern void pljava_DualState_unregister(void); + +extern void pljava_DualState_nativeRelease(void *); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava-so/src/main/include/pljava/Exception.h b/pljava-so/src/main/include/pljava/Exception.h index e9b10a9e..048ef214 100644 --- a/pljava-so/src/main/include/pljava/Exception.h +++ b/pljava-so/src/main/include/pljava/Exception.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -11,14 +17,6 @@ #include "pljava/PgObject.h" -#if PG_VERSION_NUM < 90500 -#ifdef __GNUC__ -#define pg_attribute_printf(f,a) __attribute__((format(printf, f, a))) -#else -#define pg_attribute_printf(f,a) -#endif -#endif - #ifdef __cplusplus extern "C" { #endif @@ -31,7 +29,15 @@ extern "C" { *******************************************************************/ /* - * Trows an UnsupportedOperationException informing the caller that the + * Tests whether ex is an instance of UnhandledPGException, an SQLException + * subclass that is created when an attempted call into PostgreSQL internals + * cannot be made because of an earlier unhandled ServerException. + * An UnhandledPGException will have, as its cause, the earlier ServerException. + */ +extern bool Exception_isPGUnhandled(jthrowable ex); + +/* + * Throws an UnsupportedOperationException informing the caller that the * requested feature doesn't exist in the current version, it was introduced * starting with the intro version. */ @@ -67,11 +73,19 @@ extern void Exception_throwSPI(const char* function, int errCode); /* * This method will raise a Java ServerException based on an ErrorData obtained - * by a call to CopyErrorData. It will NOT do a longjmp. It's intended use is + * by a call to CopyErrorData. It will NOT do a longjmp. Its intended use is * in PG_CATCH clauses. */ extern void Exception_throw_ERROR(const char* function); +/* + * This method will raise a Java UnhandledPGException based on a ServerException + * that has been stored at some earlier time and not yet resolved (as by + * a rollback). Its intended use is from beginNative in JNICalls when + * errorOccurred is found to be true. + */ +extern void Exception_throw_unhandled(void); + /* * Throw an exception indicating that wanted member could not be * found. This is an ereport(ERROR...) so theres' no return from @@ -79,6 +93,9 @@ extern void Exception_throw_ERROR(const char* function); */ extern void Exception_throwMemberError(const char* memberName, const char* signature, bool isMethod, bool isStatic); +extern jclass NoSuchFieldError_class; +extern jclass NoSuchMethodError_class; + #ifdef __cplusplus } #endif diff --git a/pljava-so/src/main/include/pljava/Function.h b/pljava-so/src/main/include/pljava/Function.h index 5f3b27dc..005d0e5f 100644 --- a/pljava-so/src/main/include/pljava/Function.h +++ b/pljava-so/src/main/include/pljava/Function.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ #ifndef __pljava_Function_h #define __pljava_Function_h @@ -39,32 +43,102 @@ extern "C" { extern void Function_clearFunctionCache(void); /* - * Get a Function using a function Oid. If the function is not found, one - * will be created based on the class and method name denoted in the "AS" - * clause, the parameter types, and the return value of the function - * description. If "isTrigger" is set to true, the parameter type and - * return value of the function will be fixed to: - * - * org.postgresql.pljava.Tuple (org.postgresql.pljava.TriggerData td) + * Determine whether the type represented by typeId is declared as a + * "Java-based scalar" a/k/a BaseUDT and, if so, return a freshly-registered + * UDT Type for it; otherwise return NULL. + */ +extern Type Function_checkTypeBaseUDT(Oid typeId, Form_pg_type typeStruct); + +/* + * First translate a function Oid to a Function (looking it up according to the + * trusted, forTrigger, forValidator, and checkBody parameters), and then + * (unless forValidator is true) invoke it: i.e. coerce the parameters, call the + * java method, and coerce the return value back to a Datum. The return-value + * coercion is handled by a convention where this call will delegate to the Type + * representing the SQL return type. That will call back on one of the flavors + * of fooInvoke below corresponding to the return type of the Java method, and + * then coerce that to the intended SQL type. + * + * If forValidator is true, NULL may be passed in the PG_FUNCTION_ARGS position. + * and NULL is returned immediately on successful validation. + */ +extern Datum Function_invoke( + Oid funcoid, + bool trusted, bool forTrigger, bool forValidator, bool checkBody, + PG_FUNCTION_ARGS); + +/* + * Most slots in the parameter area are set directly in invoke() or + * invokeTrigger() above. The only caller of this is Composite_invoke, which + * needs to set one parameter (always the last one, and a reference type). + * So this function, though with an API that could be general, for now only + * handles the case where index is -1 and the last parameter has reference type. + */ +extern void pljava_Function_setParameter(Function self, int idx, jvalue val); + +/* + * Not intended for any caller other than Invocation_popInvocation. + * 'heavy' indicates that the heavy form of parameter-frame saving has been used + * and must be undone. */ -extern Function Function_getFunction(PG_FUNCTION_ARGS); +extern void pljava_Function_popFrame(bool heavy); -extern Type Function_checkTypeUDT(Oid typeId, Form_pg_type typeStruct); +/* + * These actually invoke a target Java method (returning, respectively, a + * reference type or one of the Java primitive types). The arguments to the + * method have already been coerced, and segregated into reference types (stored + * in the Object array references) and primitives (stored in a C array of jvalue + * covered by a direct byte buffer, primitives). + */ +extern jobject pljava_Function_refInvoke(Function self); +extern void pljava_Function_voidInvoke(Function self); +extern jboolean pljava_Function_booleanInvoke(Function self); +extern jbyte pljava_Function_byteInvoke(Function self); +extern jshort pljava_Function_shortInvoke(Function self); +extern jchar pljava_Function_charInvoke(Function self); +extern jint pljava_Function_intInvoke(Function self); +extern jfloat pljava_Function_floatInvoke(Function self); +extern jlong pljava_Function_longInvoke(Function self); +extern jdouble pljava_Function_doubleInvoke(Function self); /* - * Invoke a function, i.e. coerce the parameters, call the java method, and - * coerce the return value back to a Datum. + * Call the invocable that was returned by the invocation of a set-returning + * user function that observes the SFRM_ValuePerCall protocol. Call with + * close == JNI_FALSE to retrieve the next row if any, JNI_TRUE when done (which + * may be before all rows have been retrieved). Returns JNI_TRUE/JNI_FALSE to + * indicate whether a row was retrieved, AND puts a value (or null) in *result. */ -extern Datum Function_invoke(Function self, PG_FUNCTION_ARGS); +extern jboolean pljava_Function_vpcInvoke( + Function self, jobject invocable, jobject rowcollect, jlong call_cntr, + jboolean close, jobject *result); /* - * Invoke a trigger. Wrap the TriggerData in org.postgresql.pljava.TriggerData - * object, make the call, and unwrap the resulting Tuple. + * These are exposed so they can be called back from type/UDT.c. + * There is one for each flavor of UDT supporting function. */ -extern Datum Function_invokeTrigger(Function self, PG_FUNCTION_ARGS); +extern void pljava_Function_udtWriteInvoke( + jobject invocable, jobject value, jobject stream); +extern jstring pljava_Function_udtToStringInvoke( + jobject invocable, jobject value); +extern jobject pljava_Function_udtReadInvoke( + jobject invocable, jobject stream, jstring typeName); +extern jobject pljava_Function_udtParseInvoke( + jobject invocable, jstring stringRep, jstring typeName); /* - * Returns the Type Map that is associated with the function + * These are exposed so they can be called back from type/Type.c when it is + * registering a MappedUDT. A MappedUDT has these two support functions, + * but never the parse/toString ones a BaseUDT has. + */ +extern jobject pljava_Function_udtWriteHandle( + jclass clazz, char *langName, bool trusted); +extern jobject pljava_Function_udtReadHandle( + jclass clazz, char *langName, bool trusted); + +/* + * Returns the type map that is held by the function's schema loader (the + * initiating loader that was used when the function was resolved). It is a map + * from Java Oid objects to Class objects, as resolved by that loader. */ extern jobject Function_getTypeMap(Function self); @@ -74,11 +148,32 @@ extern jobject Function_getTypeMap(Function self); */ extern bool Function_isCurrentReadOnly(void); +/* + * Return a global reference to the initiating (schema) class loader used + * to load the currently-executing function. + * + * Invocation_getTypeMap is equivalent to calling this and then JNI-invoking + * getTypeMap on the returned loader (cast to PL/Java's loader subclass). + */ +extern jobject Function_currentLoader(void); + /* * A nameless Function singleton with the property ! isCurrentReadOnly() */ extern Function Function_INIT_WRITER; +/* + * A distinguished single JNI global classloader reference, to be used as + * a "no loader" sentinel value in context classloader management (as Java + * considers null to be a meaningful setContextClassLoader argument). Should any + * logic error lead to Java trying to use this object as a loader, null pointer + * exceptions will result, rather than the arbitrary behavior possible if using + * an arbitrary value or object of the wrong type. + * + * As this is a global reference and the only one, it can be compared with ==. + */ +extern jobject pljava_Function_NO_LOADER; + #ifdef __cplusplus } #endif diff --git a/pljava-so/src/main/include/pljava/HashMap.h b/pljava-so/src/main/include/pljava/HashMap.h index 9954149c..7ee27e23 100644 --- a/pljava-so/src/main/include/pljava/HashMap.h +++ b/pljava-so/src/main/include/pljava/HashMap.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_HashMap_h #define __pljava_HashMap_h @@ -76,6 +80,12 @@ extern void* HashMap_getByString(HashMap self, const char* key); */ extern void* HashMap_getByOid(HashMap self, Oid key); +/* + * Returns the object stored using the given null + * terminated string and Oid, or NULL if no such object can be found. + */ +extern void* HashMap_getByStringOid(HashMap self, const char* string, Oid oid); + /* * Returns the object stored using the given Opaque pointer or NULL * if no such object can be found. @@ -111,6 +121,14 @@ extern void* HashMap_putByOid(HashMap self, Oid key, void* value); */ extern void* HashMap_putByOpaque(HashMap self, void* key, void* value); +/* + * Stores the given value under the given null terminated string and oid. If + * an old value was stored using this key, the old value is returned. + * Otherwise this method returns NULL. + */ +extern void* HashMap_putByStringOid(HashMap self, const char* string, Oid oid, + void* value); + /* * Removes the value stored under the given key. The the old value * (if any) is returned. @@ -135,6 +153,12 @@ extern void* HashMap_removeByOid(HashMap self, Oid key); */ extern void* HashMap_removeByOpaque(HashMap self, void* key); +/* + * Removes the value stored under the given key. The the old value + * (if any) is returned. The key associated with the value is deleted. + */ +extern void* HashMap_removeByStringOid(HashMap self, const char* str, Oid oid); + /* * Returns the number of entries currently in the HashMap */ @@ -161,8 +185,8 @@ extern void* Entry_setValue(Entry self, void* value); extern HashKey Entry_getKey(Entry self); /************************************************************* - * The HashKey is an abstract class. Currently, three different - * implementations are used. Oid, Opaque (void*), and String. + * The HashKey is an abstract class. Currently, four different + * implementations are used. Oid, Opaque (void*), String, and StringOid. *************************************************************/ /* diff --git a/pljava-so/src/main/include/pljava/HashMap_priv.h b/pljava-so/src/main/include/pljava/HashMap_priv.h index f7c2fba8..0cc8cf42 100644 --- a/pljava-so/src/main/include/pljava/HashMap_priv.h +++ b/pljava-so/src/main/include/pljava/HashMap_priv.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_HashMap_priv_h #define __pljava_HashMap_priv_h @@ -89,6 +93,17 @@ struct StringKey_ }; typedef struct StringKey_* StringKey; +/* + * HashKey for a string and an Oid. + */ +struct StringOidKey_ +{ + struct StringKey_ StringKey_extension; + + Oid oid; +}; +typedef struct StringOidKey_* StringOidKey; + /* * Default clone method. Allocates a new instance in the given MemoryContext * and copies the orginial HashKey using memcpy and the size stated in the diff --git a/pljava-so/src/main/include/pljava/InstallHelper.h b/pljava-so/src/main/include/pljava/InstallHelper.h index 8fee6ccc..65acdc61 100644 --- a/pljava-so/src/main/include/pljava/InstallHelper.h +++ b/pljava-so/src/main/include/pljava/InstallHelper.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2021 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -18,6 +18,15 @@ * included only in InstallHelper.c and will not clutter most other code. */ +/* + * CppAsString2 first appears in PG8.4. Once the compatibility target reaches + * 8.4, this fallback will not be needed. Used in InstallHelper and Backend, + * both of which include this file. + */ +#ifndef CppAsString2 +#define CppAsString2(x) CppAsString(x) +#endif + /* * The path from which this library is being loaded, which is surprisingly * tricky to find (and wouldn't be, if PostgreSQL called _PG_init functions @@ -55,30 +64,38 @@ extern bool pljavaLoadingAsExtension; * isPLJavaFunction can use the stashed information to determine whether an * arbitrary function Oid is a function built on PL/Java, without relying on * assumptions about the language name, etc. + * + * It can return the language name and/or trusted flag if non-null pointers + * are supplied, as it will be looking up the language anyway. */ -extern char *pljavaFnOidToLibPath(Oid fn); +extern char *pljavaFnOidToLibPath(Oid fn, char **langName, bool *trusted); extern Oid pljavaTrustedOid, pljavaUntrustedOid; -extern bool InstallHelper_isPLJavaFunction(Oid fn); +extern bool InstallHelper_isPLJavaFunction( + Oid fn, char **langName, bool *trusted); /* * Return the name of the current database, from MyProcPort ... don't free it. + * In a background or autovacuum worker, there's no MyProcPort, and the name is + * found another way and strdup'd in TopMemoryContext. It'll keep; don't bother + * freeing it. */ -extern char *pljavaDbName(); +extern char *pljavaDbName(void); /* * Return the name of the cluster if it has been set (only possible in 9.5+), * or an empty string, never NULL. */ -extern char const *pljavaClusterName(); +extern char const *pljavaClusterName(void); /* - * Construct a default for pljava.classpath ($sharedir/pljava/pljava-$VER.jar) - * in pathbuf (which must have length at least MAXPGPATH), and return pathbuf, - * or NULL if the constructed path would not fit. + * Construct a default for pljava.module_path ($sharedir/pljava/pljava-$VER.jar + * and pljava-api-$VER.jar) in pathbuf (which must have length at least + * MAXPGPATH), and return pathbuf, or NULL if the constructed path would not + * fit. (pathbuf, pathSepChar). */ -extern char const *InstallHelper_defaultClassPath(char *); +extern char const *InstallHelper_defaultModulePath(char *, char); /* * Return true if in a 'viable' transaction (not aborted or abort pending). @@ -101,10 +118,39 @@ extern char const *InstallHelper_defaultClassPath(char *); * and disrupt the abort. The trickiest bit was finding available API to * recognize the ABORT_PENDING cases. */ -extern bool pljavaViableXact(); +extern bool pljavaViableXact(void); + +/* + * Backend's initsequencer needs to know whether it's being called in a 9.3+ + * background worker process, or during a pg_upgrade (in either case, the + * init sequence needs to be lazier). Those should both be simple tests of + * IsBackgroundWorker or IsBinaryUpgrade, except (wouldn't you know) for more + * version-specific Windows visibility issues, so the ugly details are in + * InstallHelper, and Backend just asks this nice function. + */ +extern bool InstallHelper_shouldDeferInit(void); -extern char *InstallHelper_hello(); +/* + * Emit a debug message as early as possible with the native code's version + * and build information. A nicer message is produced later by hello and + * includes both the native and Java versions, but that's too late if something + * goes wrong first. + */ +extern void InstallHelper_earlyHello(void); -extern void InstallHelper_groundwork(); +/* + * Perform early setup needed on every start (properties, security policy, etc.) + * and also construct and return a string of native code, Java code, and JVM + * version and build information, to be included in the "PL/Java loaded" + * message. + */ +extern char *InstallHelper_hello(void); + +/* + * Called only when the loading is directly due to CREATE EXTENSION or LOAD, and + * not simply to service a PL/Java function; checks for, and populates or brings + * up to date, as needed, the sqlj schema and its contents. + */ +extern void InstallHelper_groundwork(void); -extern void InstallHelper_initialize(); +extern void InstallHelper_initialize(void); diff --git a/pljava-so/src/main/include/pljava/Invocation.h b/pljava-so/src/main/include/pljava/Invocation.h index 7a67d3e3..28bebe41 100644 --- a/pljava-so/src/main/include/pljava/Invocation.h +++ b/pljava-so/src/main/include/pljava/Invocation.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -10,15 +16,15 @@ #define __pljava_Invocation_h #include +#if PG_VERSION_NUM >= 100000 +#include +#endif #include "pljava/pljava.h" #ifdef __cplusplus extern "C" { #endif -struct CallLocal_; -typedef struct CallLocal_ CallLocal; - struct Invocation_ { /** @@ -46,9 +52,29 @@ struct Invocation_ bool inExprContextCB; /** - * Set to true if the executing function is trusted + * The saved limits reserved in Function.c's static parameter frame, as a + * count of reference and primitive parameters combined in a short. + * FRAME_LIMITS_PUSHED is an otherwise invalid value used to record that the + * more heavyweight saving of the frame as a Java ParameterFrame instance + * has occurred. Otherwise, this value (and the primitive slot 0 value + * below) are simply restored when this Invocation is exited normally or + * exceptionally. */ - bool trusted; + jshort frameLimits; +#define FRAME_LIMITS_PUSHED ((jshort)-1) + + /** + * The saved value of the first primitive slot in Function's static + * parameter frame. Unless frameLimits above is FRAME_LIMITS_PUSHED, this + * value is simply restored when this Invocation is exited normally or + * exceptionally. + */ + jvalue primSlot0; + + /** + * The saved thread context classloader from before this invocation + */ + jobject savedLoader; /** * The currently executing Function. @@ -61,13 +87,16 @@ struct Invocation_ * be prevented until this flag is reset (by a rollback * of a savepoint or function exit). */ - bool errorOccured; + bool errorOccurred; +#if PG_VERSION_NUM >= 100000 /** - * List of call local structures that has been wrapped - * during this invocation. + * TriggerData pointer, if the function is being called as a trigger, + * so it can be passed to SPI_register_trigger_data if the function connects + * to SPI. */ - CallLocal* callLocals; + TriggerData* triggerData; +#endif /** * The previous call context when nested function calls @@ -87,14 +116,17 @@ extern void Invocation_pushBootContext(Invocation* ctx); extern void Invocation_popBootContext(void); -extern void Invocation_pushInvocation(Invocation* ctx, bool trusted); +extern void Invocation_pushInvocation(Invocation* ctx); extern void Invocation_popInvocation(bool wasException); -extern jlong Invocation_createLocalWrapper(void* pointer); -extern void* Invocation_getWrappedPointer(jlong wrapper); -extern void Invocation_freeLocalWrapper(jlong wrapper); - +/* + * Return the type map held by the innermost executing PL/Java function's + * schema loader (the initiating loader that was used to resolve the function). + * The type map is a map from Java Oid objects to Class class objects, + * as resolved by that loader. This is effectively Function_currentLoader() + * followed by JNI-invoking getTypeMap on the loader, but cached to avoid JNI). + */ extern jobject Invocation_getTypeMap(void); /* @@ -107,6 +139,13 @@ extern jobject Invocation_getTypeMap(void); */ extern MemoryContext Invocation_switchToUpperContext(void); +/* + * Called only during Function's initialization to supply these values, making + * them cheap to access during pushInvocation/popInvocation, while still a bit + * more encapsulated than if they were made global. + */ +extern void pljava_Invocation_shareFrame(jvalue *slot0, jshort *limits); + #ifdef __cplusplus } #endif diff --git a/pljava-so/src/main/include/pljava/JNICalls.h b/pljava-so/src/main/include/pljava/JNICalls.h index a9252946..fe95b8c4 100644 --- a/pljava-so/src/main/include/pljava/JNICalls.h +++ b/pljava-so/src/main/include/pljava/JNICalls.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -43,6 +49,7 @@ extern jmethodID ServerException_init; extern jclass Class_class; extern jmethodID Class_getName; +extern jmethodID Class_getCanonicalName; extern jclass Throwable_class; extern jmethodID Throwable_getMessage; @@ -58,7 +65,37 @@ extern jmethodID SQLException_getSQLState; extern jclass UnsupportedOperationException_class; extern jmethodID UnsupportedOperationException_init; -extern jclass NoSuchMethodError_class; +/* + * Method called from Backend.c to set the thread policy. The first parameter + * indicates whether to throw an exception if a thread other than the main one + * tries to use BEGIN_NATIVE. The second indicates whether JNI calls should try + * to release the "threadlock" monitor when calling into Java and reacquire it + * on return. If false, the monitor will be held forever, blocking any other + * Java thread that tries to use the synchronized native methods. So, the + * combinations are: + * false, true: PL/Java's historical behavior: monitor is released/reacquired, + * other threads allowed into PG when the main thread is in Java. + * true, true: Useful for checking whether application code has any other + * threads that try to enter PG; they will incur exceptions. + * true, false: Useful in production if all PG access is known to be done on + * the main thread only; other threads that try will simply block + * (JConsole can show them) rather that incurring exceptions; many + * monitor operations eliminated. + */ +extern void pljava_JNI_setThreadPolicy(bool,bool); + +/* + * Two specialized wrappers to reduce the overhead of multiple wrapped calls + * for a frequent sequence of operations. The threadInitialize method, called + * from Backend.c once the java_thread_pg_entry GUC setting is frozen in place, + * populates the function pointers with the appropriate implementations. + */ +extern void pljava_JNI_threadInitialize(bool manageLoader); +typedef void JNI_ContextLoaderUpdater(jobject loader); +typedef void JNI_ContextLoaderRestorer(void); + +extern JNI_ContextLoaderUpdater *JNI_loaderUpdater; +extern JNI_ContextLoaderRestorer *JNI_loaderRestorer; /* * A few very specialized JNI method-invocation wrappers, that do NOT do @@ -68,15 +105,23 @@ extern jclass NoSuchMethodError_class; * whole time. They are used in String.c for character set coding conversions, * which may frequently call Java methods that are never expected to have any * reason to block or reenter the backend. + * Also, they can be used with DualState and related objects, to be sure certain + * methods or constructors are called on a thread that holds the native lock. */ extern jobject JNI_callObjectMethodLocked(jobject object, jmethodID methodID, ...); extern jobject JNI_callObjectMethodLockedV(jobject object, jmethodID methodID, va_list args); extern jobject JNI_callStaticObjectMethodLocked(jclass clazz, jmethodID methodID, ...); extern jobject JNI_callStaticObjectMethodLockedV(jclass clazz, jmethodID methodID, va_list args); +extern void JNI_callStaticVoidMethodLocked(jclass clazz, jmethodID methodID, ...); +extern void JNI_callStaticVoidMethodLockedV(jclass clazz, jmethodID methodID, va_list args); extern jint JNI_callIntMethodLocked(jobject object, jmethodID methodID, ...); extern jint JNI_callIntMethodLockedV(jobject object, jmethodID methodID, va_list args); +extern jlong JNI_callLongMethodLocked(jobject object, jmethodID methodID, ...); +extern jlong JNI_callLongMethodLockedV(jobject object, jmethodID methodID, va_list args); extern void JNI_callVoidMethodLocked(jobject object, jmethodID methodID, ...); extern void JNI_callVoidMethodLockedV(jobject object, jmethodID methodID, va_list args); +extern jobject JNI_newObjectLocked(jclass clazz, jmethodID ctor, ...); +extern jobject JNI_newObjectLockedV(jclass clazz, jmethodID ctor, va_list args); /* * Misc JNIEnv mappings. See for more info. @@ -97,18 +142,32 @@ extern jobject JNI_callObjectMethod(jobject object, jmethodID methodID, ... extern jobject JNI_callObjectMethodV(jobject object, jmethodID methodID, va_list args); extern jshort JNI_callShortMethod(jobject object, jmethodID methodID, ...); extern jshort JNI_callShortMethodV(jobject object, jmethodID methodID, va_list args); +extern jboolean JNI_callStaticBooleanMethod(jclass clazz, jmethodID methodID, ...); extern jboolean JNI_callStaticBooleanMethodA(jclass clazz, jmethodID methodID, jvalue* args); +extern jboolean JNI_callStaticBooleanMethodV(jclass clazz, jmethodID methodID, va_list args); +extern jbyte JNI_callStaticByteMethod(jclass clazz, jmethodID methodID, ...); extern jbyte JNI_callStaticByteMethodA(jclass clazz, jmethodID methodID, jvalue* args); +extern jbyte JNI_callStaticByteMethodV(jclass clazz, jmethodID methodID, va_list args); +extern jchar JNI_callStaticCharMethod(jclass clazz, jmethodID methodID, ...); +extern jchar JNI_callStaticCharMethodV(jclass clazz, jmethodID methodID, va_list args); +extern jdouble JNI_callStaticDoubleMethod(jclass clazz, jmethodID methodID, ...); extern jdouble JNI_callStaticDoubleMethodA(jclass clazz, jmethodID methodID, jvalue* args); +extern jdouble JNI_callStaticDoubleMethodV(jclass clazz, jmethodID methodID, va_list args); +extern jfloat JNI_callStaticFloatMethod(jclass clazz, jmethodID methodID, ...); extern jfloat JNI_callStaticFloatMethodA(jclass clazz, jmethodID methodID, jvalue* args); +extern jfloat JNI_callStaticFloatMethodV(jclass clazz, jmethodID methodID, va_list args); +extern jint JNI_callStaticIntMethod(jclass clazz, jmethodID methodID, ...); extern jint JNI_callStaticIntMethodA(jclass clazz, jmethodID methodID, jvalue* args); +extern jint JNI_callStaticIntMethodV(jclass clazz, jmethodID methodID, va_list args); extern jlong JNI_callStaticLongMethod(jclass clazz, jmethodID methodID, ...); extern jlong JNI_callStaticLongMethodA(jclass clazz, jmethodID methodID, jvalue* args); extern jlong JNI_callStaticLongMethodV(jclass clazz, jmethodID methodID, va_list args); extern jobject JNI_callStaticObjectMethod(jclass clazz, jmethodID methodID, ...); extern jobject JNI_callStaticObjectMethodA(jclass clazz, jmethodID methodID, jvalue* args); extern jobject JNI_callStaticObjectMethodV(jclass clazz, jmethodID methodID, va_list args); +extern jshort JNI_callStaticShortMethod(jclass clazz, jmethodID methodID, ...); extern jshort JNI_callStaticShortMethodA(jclass clazz, jmethodID methodID, jvalue* args); +extern jshort JNI_callStaticShortMethodV(jclass clazz, jmethodID methodID, va_list args); extern void JNI_callStaticVoidMethod(jclass clazz, jmethodID methodID, ...); extern void JNI_callStaticVoidMethodA(jclass clazz, jmethodID methodID, jvalue* args); extern void JNI_callStaticVoidMethodV(jclass clazz, jmethodID methodID, va_list args); @@ -122,6 +181,7 @@ extern jint JNI_destroyVM(JavaVM *vm); extern jboolean JNI_exceptionCheck(void); extern void JNI_exceptionClear(void); extern void JNI_exceptionDescribe(void); +extern void JNI_exceptionStacktraceAtLevel(jthrowable exh, int elevel); extern jthrowable JNI_exceptionOccurred(void); extern jclass JNI_findClass(const char* className); extern jsize JNI_getArrayLength(jarray array); @@ -130,6 +190,7 @@ extern void JNI_getByteArrayRegion(jbyteArray array, jsize start, jsize extern jboolean* JNI_getBooleanArrayElements(jbooleanArray array, jboolean* isCopy); extern void JNI_getBooleanArrayRegion(jbooleanArray array, jsize start, jsize len, jboolean* buf); extern jfieldID JNI_getFieldID(jclass clazz, const char* name, const char* sig); +extern jfieldID JNI_getFieldIDOrNull(jclass clazz, const char* name, const char* sig); extern jdouble* JNI_getDoubleArrayElements(jdoubleArray array, jboolean* isCopy); extern void JNI_getDoubleArrayRegion(jdoubleArray array, jsize start, jsize len, jdouble* buf); extern jfloat* JNI_getFloatArrayElements(jfloatArray array, jboolean* isCopy); @@ -148,6 +209,8 @@ extern void JNI_getShortArrayRegion(jshortArray array, jsize start, jsiz extern jfieldID JNI_getStaticFieldID(jclass clazz, const char* name, const char* sig); extern jmethodID JNI_getStaticMethodID(jclass clazz, const char* name, const char* sig); extern jmethodID JNI_getStaticMethodIDOrNull(jclass clazz, const char* name, const char* sig); +extern jboolean JNI_getStaticBooleanField(jclass clazz, jfieldID field); +extern jint JNI_getStaticIntField(jclass clazz, jfieldID field); extern jobject JNI_getStaticObjectField(jclass clazz, jfieldID field); extern const char* JNI_getStringUTFChars(jstring string, jboolean* isCopy); extern jboolean JNI_hasNullArrayElement(jobjectArray array); @@ -188,9 +251,11 @@ extern void JNI_setFloatArrayRegion(jfloatArray array, jsize start, jsiz extern void JNI_setIntArrayRegion(jintArray array, jsize start, jsize len, jint* buf); extern void JNI_setLongArrayRegion(jlongArray array, jsize start, jsize len, jlong* buf); extern void JNI_setShortArrayRegion(jshortArray array, jsize start, jsize len, jshort* buf); +extern void JNI_setIntField(jobject object, jfieldID field, jint value); extern void JNI_setLongField(jobject object, jfieldID field, jlong value); extern void JNI_setObjectArrayElement(jobjectArray array, jsize index, jobject value); extern void JNI_setThreadLock(jobject lockObject); +extern void JNI_setStaticObjectField(jclass clazz, jfieldID field, jobject value); extern jint JNI_throw(jthrowable obj); #ifdef __cplusplus diff --git a/pljava-so/src/main/include/pljava/PgObject.h b/pljava-so/src/main/include/pljava/PgObject.h index cd006219..fd18bce9 100644 --- a/pljava-so/src/main/include/pljava/PgObject.h +++ b/pljava-so/src/main/include/pljava/PgObject.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB - Thomas Hallgren + * Chapman Flack */ #ifndef __pljava_PgObject_h #define __pljava_PgObject_h @@ -31,9 +35,9 @@ struct PgObjectClass_; typedef struct PgObjectClass_* PgObjectClass; /* - * The effectiveClassPath is set at initialization time (in Backend.c) + * The effectiveModulePath is set at initialization time (in Backend.c) */ -extern const char* effectiveClassPath; +extern const char* effectiveModulePath; /* * Calles the virtual finalizer and deallocates memory occupided by the @@ -106,6 +110,11 @@ extern PgObjectClass PgObject_getClass(PgObject self); */ extern const char* PgObjectClass_getName(PgObjectClass self); +/* + * Returns the name of a Java class, as a palloc'd NTS. + */ +extern char* PgObject_getClassName(jclass cls); + #ifdef __cplusplus } #endif diff --git a/pljava-so/src/main/include/pljava/PgObject_priv.h b/pljava-so/src/main/include/pljava/PgObject_priv.h index b179a3b8..4c116b20 100644 --- a/pljava-so/src/main/include/pljava/PgObject_priv.h +++ b/pljava-so/src/main/include/pljava/PgObject_priv.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_PgObject_priv_h #define __pljava_PgObject_priv_h @@ -54,12 +58,6 @@ struct PgObject_ PgObjectClass m_class; }; -/* - * Internal bogus. Someone forgot to replace a function - * pointer somewhere. - */ -extern void _PgObject_pureVirtualCalled(PgObject self); - /* * Throw an exception indicating that wanted member could not be * found. diff --git a/pljava-so/src/main/include/pljava/PgSavepoint.h b/pljava-so/src/main/include/pljava/PgSavepoint.h new file mode 100644 index 00000000..b5448f67 --- /dev/null +++ b/pljava-so/src/main/include/pljava/PgSavepoint.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#ifndef __pljava_PgSavepoint_h +#define __pljava_PgSavepoint_h + +#ifdef __cplusplus +extern "C" { +#endif + +extern jobject pljava_PgSavepoint_forId(SubTransactionId); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava-so/src/main/include/pljava/SPI.h b/pljava-so/src/main/include/pljava/SPI.h index 9d3548db..cc8975c6 100644 --- a/pljava-so/src/main/include/pljava/SPI.h +++ b/pljava-so/src/main/include/pljava/SPI.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_SPI_h #define __pljava_SPI_h @@ -17,31 +21,6 @@ extern "C" { #endif -/*********************************************************************** - * Some needed additions to the SPI set of functions. - * - * @author Thomas Hallgren - * - ***********************************************************************/ - -typedef struct -{ - SubTransactionId xid; - int nestingLevel; - char name[1]; -} Savepoint; - -/* infant is set to the savepoint that is being created durin a setSavepoint call. - * It is used by the onStart callback. - */ -extern Savepoint* infant; - -extern Savepoint* SPI_setSavepoint(const char* name); - -extern void SPI_releaseSavepoint(Savepoint* sp); - -extern void SPI_rollbackSavepoint(Savepoint* sp); - #ifdef __cplusplus } /* end of extern "C" declaration */ #endif diff --git a/pljava-so/src/main/include/pljava/SQLInputFromTuple.h b/pljava-so/src/main/include/pljava/SQLInputFromTuple.h index 1483f92c..85f815a3 100644 --- a/pljava-so/src/main/include/pljava/SQLInputFromTuple.h +++ b/pljava-so/src/main/include/pljava/SQLInputFromTuple.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -24,7 +30,9 @@ extern "C" { * ***********************************************************************/ -extern jobject SQLInputFromTuple_create(HeapTupleHeader hth, TupleDesc td); +extern void pljava_SQLInputFromTuple_initialize(void); + +extern jobject pljava_SQLInputFromTuple_create(HeapTupleHeader hth); #ifdef __cplusplus } /* end of extern "C" declaration */ diff --git a/pljava-so/src/main/include/pljava/VarlenaWrapper.h b/pljava-so/src/main/include/pljava/VarlenaWrapper.h new file mode 100644 index 00000000..bbfd692f --- /dev/null +++ b/pljava-so/src/main/include/pljava/VarlenaWrapper.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +#ifndef __pljava_VarlenaWrapper_h +#define __pljava_VarlenaWrapper_h + +#include +#include + +#include "pljava/pljava.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern jobject pljava_VarlenaWrapper_Input( + Datum d, MemoryContext mc, ResourceOwner ro); + +extern jobject pljava_VarlenaWrapper_Output(MemoryContext mc, ResourceOwner ro); + +extern Datum pljava_VarlenaWrapper_adopt(jobject vlos); + +extern void pljava_VarlenaWrapper_initialize(void); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava-so/src/main/include/pljava/pljava.h b/pljava-so/src/main/include/pljava/pljava.h index c400e8e8..feed703e 100644 --- a/pljava-so/src/main/include/pljava/pljava.h +++ b/pljava-so/src/main/include/pljava/pljava.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_pljava_h #define __pljava_pljava_h @@ -33,6 +37,7 @@ extern int vsnprintf(char* buf, size_t count, const char* format, va_list arg); #include #include #include +#include /* * for now we have to support older compilers that don't have @@ -71,23 +76,13 @@ extern int vsnprintf(char* buf, size_t count, const char* format, va_list arg); #endif /* - * GETSTRUCT require "access/htup_details.h" to be included in PG9.3 + * This symbol was spelled without the underscores prior to PG 14. */ -#if PG_VERSION_NUM >= 90300 -#include "access/htup_details.h" +#if PG_VERSION_NUM < 140000 +#define PG_NODE_TREEOID PGNODETREEOID #endif - -/* The errorOccured will be set when a call from Java into one of the - * backend functions results in a elog that causes a longjmp (Levels >= ERROR) - * that was trapped using the PLJAVA_TRY/PLJAVA_CATCH macros. - * When this happens, all further calls from Java must be blocked since the - * state of the current transaction is unknown. Further more, once the function - * that initially called Java finally returns, the intended longjmp (the one - * to the original value of Warn_restart) must be made. - */ -extern jlong mainThreadId; -extern bool pljavaEntryFence(JNIEnv* env); +extern void* mainThreadId; extern JNIEnv* currentJNIEnv; extern MemoryContext JavaMemoryContext; @@ -116,31 +111,13 @@ extern MemoryContext JavaMemoryContext; * stack_base_ptr was static before PG 8.1. By executive decision, PL/Java now * has 8.1 as a back compatibility limit; no empty #defines here for earlier. */ -#if 90104<=PG_VERSION_NUM || \ - 90008<=PG_VERSION_NUM && PG_VERSION_NUM<90100 || \ - 80412<=PG_VERSION_NUM && PG_VERSION_NUM<90000 || \ - 80319<=PG_VERSION_NUM && PG_VERSION_NUM<80400 #define NEED_MISCADMIN_FOR_STACK_BASE #define _STACK_BASE_TYPE pg_stack_base_t #define _STACK_BASE_SET saveStackBasePtr = set_stack_base() #define _STACK_BASE_RESTORE restore_stack_base(saveStackBasePtr) -#else -extern -#if PG_VERSION_NUM < 80200 -DLLIMPORT -#else -PGDLLIMPORT -#endif -char* stack_base_ptr; -#define _STACK_BASE_TYPE char* -#define _STACK_BASE_SET \ - saveStackBasePtr = stack_base_ptr; \ - stack_base_ptr = (char*)&saveMainThreadId -#define _STACK_BASE_RESTORE stack_base_ptr = saveStackBasePtr -#endif #define STACK_BASE_VARS \ - jlong saveMainThreadId = 0; \ + void* saveMainThreadId = 0; \ _STACK_BASE_TYPE saveStackBasePtr; #define STACK_BASE_PUSH(threadId) \ @@ -178,24 +155,20 @@ char* stack_base_ptr; * * Class 07 - Dynamic SQL Exception */ -#define ERRCODE_INVALID_DESCRIPTOR_INDEX MAKE_SQLSTATE('0','7', '0','0','9') - +#define ERRCODE_INVALID_DESCRIPTOR_INDEX MAKE_SQLSTATE('0','7', '0','0','9') /* - * Union used when coercing void* to jlong and vice versa + * Class 46 - SQL/JRT */ -typedef union +#define ERRCODE_CLASS_SQLJRT MAKE_SQLSTATE('4','6','0','0','0') + +static inline jlong +PointerGetJLong(const void *X) { - void* ptrVal; - jlong longVal; /* 64 bit quantity */ - struct - { - /* Used when calculating pointer hash in systems where - * a pointer is 64 bit - */ - uint32 intVal_1; - uint32 intVal_2; - } x64; -} Ptr2Long; + return (jlong)(uintptr_t)(X); +} + +#define JLongGet(T, X) \ + (AssertVariableIsOfTypeMacro(X, jlong), (T)(uintptr_t)(X)) struct Invocation_; typedef struct Invocation_ Invocation; diff --git a/pljava-so/src/main/include/pljava/type/ErrorData.h b/pljava-so/src/main/include/pljava/type/ErrorData.h index adc838bf..7f0a089b 100644 --- a/pljava-so/src/main/include/pljava/type/ErrorData.h +++ b/pljava-so/src/main/include/pljava/type/ErrorData.h @@ -1,15 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #ifndef __pljava_ErrorData_h #define __pljava_ErrorData_h -#include "pljava/type/JavaWrapper.h" #ifdef __cplusplus extern "C" { #endif @@ -24,13 +29,13 @@ extern "C" { * Create the org.postgresql.pljava.internal.ErrorData that represents * the current error obtaind from CopyErrorData(). */ -extern jobject ErrorData_getCurrentError(void); +extern jobject pljava_ErrorData_getCurrentError(void); /* * Extract the native ErrorData from a Java ErrorData. */ -extern ErrorData* ErrorData_getErrorData(jobject jerrorData); - +extern ErrorData* pljava_ErrorData_getErrorData(jobject jerrorData); +extern void pljava_ErrorData_initialize(void); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/HeapTupleHeader.h b/pljava-so/src/main/include/pljava/type/HeapTupleHeader.h deleted file mode 100644 index 5618dad0..00000000 --- a/pljava-so/src/main/include/pljava/type/HeapTupleHeader.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden -* Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html - * - * @author Thomas Hallgren - */ -#ifndef __pljava_type_HeapTupleHeader_h -#define __pljava_type_HeapTupleHeader_h - -#include "pljava/type/Type.h" -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/***************************************************************** - * The HeapTupleHeader java class extends the NativeStruct and provides JNI - * access to some of the attributes of the HeapTupleHeader structure. - * - * @author Thomas Hallgren - *****************************************************************/ - -extern jobject HeapTupleHeader_getTupleDesc(HeapTupleHeader ht); - -extern jobject HeapTupleHeader_getObject(JNIEnv* env, jlong hth, jlong jtd, jint attrNo); - -extern void HeapTupleHeader_free(JNIEnv* env, jlong hth); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/pljava-so/src/main/include/pljava/type/JavaWrapper.h b/pljava-so/src/main/include/pljava/type/JavaWrapper.h deleted file mode 100644 index 7491dafc..00000000 --- a/pljava-so/src/main/include/pljava/type/JavaWrapper.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html - * - * @author Thomas Hallgren - */ -#ifndef __pljava_JavaWrapper_h -#define __pljava_JavaWrapper_h - -#include "pljava/type/Type.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/************************************************************************** - * The JavaWrapper is a Java class that maintains a pointer to a - * piece of memory allocated in the special JavaMemoryContext. - * - * @author Thomas Hallgren - *************************************************************************/ - -/* - * Allocates a new TypeClass and assigns a default coerceObject method used by - * all JavaWrapper derivates. - */ -extern TypeClass JavaWrapperClass_alloc(const char* name); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/pljava-so/src/main/include/pljava/type/LargeObject.h b/pljava-so/src/main/include/pljava/type/LargeObject.h deleted file mode 100644 index bdeaf510..00000000 --- a/pljava-so/src/main/include/pljava/type/LargeObject.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html - * - * @author Thomas Hallgren - */ -#ifndef __pljava_LargeObject_h -#define __pljava_LargeObject_h - -#include "pljava/type/Type.h" -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/***************************************************************** - * The LargeObject java class extends the NativeStruct and provides JNI - * access to some of the attributes of the LargeObjectDesc structure. - * - * @author Thomas Hallgren - *****************************************************************/ - -/* - * Create the org.postgresql.pljava.LargeObject instance - */ -extern jobject LargeObject_create(LargeObjectDesc* lo); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/pljava-so/src/main/include/pljava/type/Portal.h b/pljava-so/src/main/include/pljava/type/Portal.h index 23a48a13..d523ecc3 100644 --- a/pljava-so/src/main/include/pljava/type/Portal.h +++ b/pljava-so/src/main/include/pljava/type/Portal.h @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ #ifndef __pljava_Portal_h #define __pljava_Portal_h @@ -18,16 +23,18 @@ extern "C" { #include /***************************************************************** - * The Portal java class extends the NativeStruct and provides JNI + * The Portal java class provides JNI * access to some of the attributes of the Portal structure. * * @author Thomas Hallgren *****************************************************************/ +extern void pljava_Portal_initialize(void); + /* * Create the org.postgresql.pljava.Portal instance */ -extern jobject Portal_create(Portal portal); +extern jobject pljava_Portal_create(Portal portal, jobject jplan); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/Relation.h b/pljava-so/src/main/include/pljava/type/Relation.h index e3d81d76..5fac1a4a 100644 --- a/pljava-so/src/main/include/pljava/type/Relation.h +++ b/pljava-so/src/main/include/pljava/type/Relation.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -17,7 +23,7 @@ extern "C" { #include /******************************************************************* - * The Relation java class extends the NativeStruct and provides JNI + * The Relation java class provides JNI * access to some of the attributes of the Relation structure. * * @author Thomas Hallgren @@ -26,7 +32,8 @@ extern "C" { /* * Create an instance of org.postgresql.pljava.Relation */ -extern jobject Relation_create(Relation rel); +extern jobject pljava_Relation_create(Relation rel); +extern void pljava_Relation_initialize(void); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/SingleRowReader.h b/pljava-so/src/main/include/pljava/type/SingleRowReader.h new file mode 100644 index 00000000..3894da80 --- /dev/null +++ b/pljava-so/src/main/include/pljava/type/SingleRowReader.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack + * + * @author Thomas Hallgren + */ +#ifndef __pljava_type_SingleRowReader_h +#define __pljava_type_SingleRowReader_h + +#include "pljava/type/Type.h" +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/***************************************************************** + * The SingleRowReader java class presents a ResultSet view of a + * single tuple, represented by a HeapTupleHeader and a TupleDesc + * describing its structure. + * + * @author Thomas Hallgren (as HeapTupleHeader.h) + *****************************************************************/ + +extern void pljava_SingleRowReader_initialize(void); + +extern jobject pljava_SingleRowReader_getTupleDesc(HeapTupleHeader); + +extern jobject pljava_SingleRowReader_create(HeapTupleHeader); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/pljava-so/src/main/include/pljava/type/String.h b/pljava-so/src/main/include/pljava/type/String.h index 6abf0752..9cc518db 100644 --- a/pljava-so/src/main/include/pljava/type/String.h +++ b/pljava-so/src/main/include/pljava/type/String.h @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB - Thomas Hallgren + * Chapman Flack + * Francisco Miguel Biete Banon */ #ifndef __pljava_type_String_h #define __pljava_type_String_h @@ -19,17 +24,21 @@ extern "C" { * The String class extends the Type and adds the members necessary to * perform standard Postgres textin/textout conversion. An instance of this * class will be used for all types that are not explicitly mapped. - * + * * The class also has some convenience routings for Java String manipulation. - * + * * @author Thomas Hallgren * + * Since commit 639a86e in PostgreSQL upstream, this struct can no longer + * have the name String, which is why it is now PLJString but in files still + * named String.[hc]. + * **************************************************************************/ extern jclass s_Object_class; extern jclass s_String_class; struct String_; -typedef struct String_* String; +typedef struct String_* PLJString; /* * Create a Java String object from a null terminated string. Conversion is @@ -73,7 +82,7 @@ extern text* String_createText(jstring javaString); extern Type String_obtain(Oid typeId); -extern String StringClass_obtain(TypeClass self, Oid typeId); +extern PLJString StringClass_obtain(TypeClass self, Oid typeId); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/Timestamp.h b/pljava-so/src/main/include/pljava/type/Timestamp.h index 19a66e12..c3ff9e80 100644 --- a/pljava-so/src/main/include/pljava/type/Timestamp.h +++ b/pljava-so/src/main/include/pljava/type/Timestamp.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -27,13 +33,14 @@ extern "C" { extern int Timestamp_getCurrentTimeZone(void); /* - * Returns the timezone fo the given Timestamp. Comes in two variants. - * The int64 variant will be used when PL/Java is used with a backend - * compiled with integer datetimes. The double variant will be used when - * this is not the case. + * Returns the timezone for the given Timestamp. This is an internal function + * and only declared here because Date.c uses it, and always this int64 variant, + * regardless of whether the backend was compiled with integer datetimes. The + * argument is not a PostgreSQL int64 Timestamp, but rather a PostgreSQL int64 + * Timestamp divided by two. The result is a time zone offset in seconds west + * of Greenwich. */ extern int32 Timestamp_getTimeZone_id(int64 t); -extern int32 Timestamp_getTimeZone_dd(double t); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/TriggerData.h b/pljava-so/src/main/include/pljava/type/TriggerData.h index d2b791af..df26d1c4 100644 --- a/pljava-so/src/main/include/pljava/type/TriggerData.h +++ b/pljava-so/src/main/include/pljava/type/TriggerData.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -18,7 +24,7 @@ extern "C" { #include /********************************************************************** - * The TriggerData java class extends the NativeStruct and provides JNI + * The TriggerData java class provides JNI * access to some of the attributes of the TriggerData structure. * * @author Thomas Hallgren @@ -27,12 +33,17 @@ extern "C" { /* * Create the org.postgresql.pljava.TriggerData object. */ -extern jobject TriggerData_create(TriggerData* triggerData); +extern jobject pljava_TriggerData_create(TriggerData* triggerData); /* * Obtains the returned Tuple after trigger has been processed. + * Note: starting with PG 10, it is the caller's responsibility to ensure SPI + * is connected (and that a longer-lived memory context than SPI's is selected, + * if the caller wants the result to survive SPI_finish). */ -extern HeapTuple TriggerData_getTriggerReturnTuple(jobject jtd, bool* wasNull); +extern HeapTuple pljava_TriggerData_getTriggerReturnTuple( + jobject jtd, bool* wasNull); +extern void pljava_TriggerData_initialize(void); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/Tuple.h b/pljava-so/src/main/include/pljava/type/Tuple.h index 53639212..97a3a108 100644 --- a/pljava-so/src/main/include/pljava/type/Tuple.h +++ b/pljava-so/src/main/include/pljava/type/Tuple.h @@ -1,15 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #ifndef __pljava_Tuple_h #define __pljava_Tuple_h -#include "pljava/type/JavaWrapper.h" #ifdef __cplusplus extern "C" { #endif @@ -26,14 +31,18 @@ extern "C" { /* * Create the org.postgresql.pljava.Tuple instance */ -extern jobject Tuple_create(HeapTuple tuple); -extern jobject Tuple_internalCreate(HeapTuple tuple, bool mustCopy); -extern jobjectArray Tuple_createArray(HeapTuple* tuples, jint size, bool mustCopy); +extern jobject pljava_Tuple_create(HeapTuple tuple); +extern jobject pljava_Tuple_internalCreate(HeapTuple tuple, bool mustCopy); +extern jobjectArray pljava_Tuple_createArray( + HeapTuple* tuples, jint size, bool mustCopy); /* - * Return a java object at given index from a HeapTuple + * Return a java object at given index from a HeapTuple (with a best effort to + * produce an object of class rqcls if it is not null). */ -extern jobject Tuple_getObject(TupleDesc tupleDesc, HeapTuple tuple, int index); +extern jobject pljava_Tuple_getObject( + TupleDesc tupleDesc, HeapTuple tuple, int index, jclass rqcls); +extern void pljava_Tuple_initialize(void); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/TupleDesc.h b/pljava-so/src/main/include/pljava/type/TupleDesc.h index 7a1a6bd1..5b45489e 100644 --- a/pljava-so/src/main/include/pljava/type/TupleDesc.h +++ b/pljava-so/src/main/include/pljava/type/TupleDesc.h @@ -1,15 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ #ifndef __pljava_TupleDesc_h #define __pljava_TupleDesc_h -#include "pljava/type/JavaWrapper.h" #ifdef __cplusplus extern "C" { #endif @@ -17,7 +22,7 @@ extern "C" { #include /******************************************************************** - * The TupleDesc java class extends the NativeStruct and provides JNI + * The TupleDesc java class provides JNI * access to some of the attributes of the TupleDesc structure. * * @author Thomas Hallgren @@ -28,13 +33,14 @@ extern "C" { * is NULL a Java exception has been initiated and the caller * should return to Java ASAP. */ -extern Type TupleDesc_getColumnType(TupleDesc tupleDesc, int index); +extern Type pljava_TupleDesc_getColumnType(TupleDesc tupleDesc, int index); /* * Create the org.postgresql.pljava.TupleDesc instance */ -extern jobject TupleDesc_create(TupleDesc tDesc); -extern jobject TupleDesc_internalCreate(TupleDesc tDesc); +extern jobject pljava_TupleDesc_create(TupleDesc tDesc); +extern jobject pljava_TupleDesc_internalCreate(TupleDesc tDesc); +extern void pljava_TupleDesc_initialize(void); #ifdef __cplusplus } diff --git a/pljava-so/src/main/include/pljava/type/Type.h b/pljava-so/src/main/include/pljava/type/Type.h index 97aacb5c..72170d98 100644 --- a/pljava-so/src/main/include/pljava/type/Type.h +++ b/pljava-so/src/main/include/pljava/type/Type.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_type_Type_h #define __pljava_type_Type_h @@ -59,17 +63,36 @@ extern bool Type_isPrimitive(Type self); extern bool Type_canReplaceType(Type self, Type type); /* - * Translate a given Datum into a jvalue accorging to the type represented + * Translate a given Datum into a jvalue according to the type represented * by this instance. */ extern jvalue Type_coerceDatum(Type self, Datum datum); +/* + * Translate a given Datum into a jvalue, where the type represented + * by this instance is derived from the PG type of the datum, and rqcls, if + * not NULL, is the Java class wanted by the caller (JDBC 4.1 feature). + * Reduces to Type_coerceDatum if rqcls is NULL, or there is no TypeClass that + * can replace this Type and produce the requested class. + */ +extern jvalue Type_coerceDatumAs(Type self, Datum datum, jclass rqcls); + /* * Translate a given Object into a Datum accorging to the type represented - * by this instance. + * by this instance. The caller must be certain that 'object' is an instance + * of a Java type expected by the coercer for this TypeClass. */ extern Datum Type_coerceObject(Type self, jobject object); +/* + * Translate a given Object into a Datum accorging to the type represented + * by this instance. The object may be an instance of TypeBridge.Holder holding + * an object of an alternate Java class than what the coercer for this TypeClass + * expects. Otherwise, it must be an object of the expected class, just as for + * Type_coerceObject. + */ +extern Datum Type_coerceObjectBridged(Type self, jobject object); + /* * Return a Type based on a Postgres Oid. Creates a new type if * necessary. @@ -151,12 +174,6 @@ extern const char* Type_getJavaTypeName(Type self); */ extern const char* Type_getJNISignature(Type self); -/* - * Returns the JNI signature used when returning instances - * of this type. - */ -extern const char* Type_getJNIReturnSignature(Type self, bool forMultiCall, bool useAltRepr); - /* * Returns the array Type. The type is created if it doesn't exist */ @@ -182,29 +199,6 @@ extern Oid Type_getOid(Type self); */ extern TupleDesc Type_getTupleDesc(Type self, PG_FUNCTION_ARGS); -/* - * Calls a java method using one of the CallMethodA routines where - * corresponds to the type represented by this instance and - * coerces the returned value into a Datum. - * - * The method will set the value pointed to by the wasNull parameter - * to true if the Java method returned null. The method expects that - * the wasNull parameter is set to false by the caller prior to the - * call. - */ -extern Datum Type_invoke(Type self, jclass clazz, jmethodID method, jvalue* args, PG_FUNCTION_ARGS); - -/* - * Calls a Set Returning Function (SRF). - */ -extern Datum Type_invokeSRF(Type self, jclass clazz, jmethodID method, jvalue* args, PG_FUNCTION_ARGS); - -/* - * Obtains the Java object that acts as the SRF producer. This instance will be - * called once for each row that should be produced. - */ -extern jobject Type_getSRFProducer(Type self, jclass clazz, jmethodID method, jvalue* args); - /* * Obtains the optional Java object that will act as the value collector for * the SRF producer. The collector typically manifest itself as an OUT @@ -213,19 +207,10 @@ extern jobject Type_getSRFProducer(Type self, jclass clazz, jmethodID method, jv extern jobject Type_getSRFCollector(Type self, PG_FUNCTION_ARGS); /* - * Called to determine if the producer will produce another row. - */ -extern bool Type_hasNextSRF(Type self, jobject producer, jobject collector, jint counter); - -/* - * Converts the next row into a Datum of the expected type. + * Return a Datum of the expected type, from the row collector (if any) and/or + * the value returned by the row producer. */ -extern Datum Type_nextSRF(Type self, jobject producer, jobject collector); - -/* - * Called at the end of an SRF iteration. - */ -extern void Type_closeSRF(Type self, jobject producer); +extern Datum Type_datumFromSRF(Type self, jobject row, jobject rowCollector); /* * Function used when obtaining a type based on an Oid @@ -233,6 +218,14 @@ extern void Type_closeSRF(Type self, jobject producer); * singleton. The only current exception from this is the * String since it makes use of functions stored in the * Form_pg_type structure. + * + * In adding JDBC 4.1 support, this is decreed: a TypeObtainer + * may return its singleton, if that's what it does, regardless + * of whether the Oid stored there matches the one passed to the + * obtainer. In other words, it may ignore the typeId argument. + * It's often appropriate for the caller to check the returned + * type with Type_canReplaceType to determine if it is usable + * for the intended purpose. */ typedef Type (*TypeObtainer)(Oid typeId); @@ -252,6 +245,27 @@ typedef Datum (*ObjectCoercer)(Type, jobject); extern void Type_registerType(const char* javaTypeName, Type type); extern void Type_registerType2(Oid typeId, const char* javaTypeName, TypeObtainer obtainer); +#include "pljava/Function.h" + +/* + * Call a java method using (ultimately) one of the CallMethod routines + * where corresponds to the type represented by this instance and + * coerce the returned value into a Datum. + * + * The method will set the value pointed to by the wasNull parameter + * to true if the Java method returned null. The method expects that + * the wasNull parameter is set to false by the caller prior to the + * call. + * + * The call into Java is made via a callback of Function_Invoke(fn). + */ +extern Datum Type_invoke(Type self, Function fn, PG_FUNCTION_ARGS); + +/* + * Calls a Set Returning Function (SRF). + */ +extern Datum Type_invokeSRF(Type self, Function fn, PG_FUNCTION_ARGS); + #ifdef __cplusplus } #endif diff --git a/pljava-so/src/main/include/pljava/type/Type_priv.h b/pljava-so/src/main/include/pljava/type/Type_priv.h index b3f01602..1b118f3d 100644 --- a/pljava-so/src/main/include/pljava/type/Type_priv.h +++ b/pljava-so/src/main/include/pljava/type/Type_priv.h @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * @author Thomas Hallgren + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ #ifndef __pljava_type_Type_priv_h #define __pljava_type_Type_priv_h @@ -89,24 +93,23 @@ struct TypeClass_ ObjectCoercer coerceObject; /* - * Calls a java method using one of the CallMethodA routines where - * corresponds to the type represented by this instance and - * coerces the returned value into a Datum. + * Call a java method using (ultimately) one of the CallMethod + * routines where corresponds to the type represented by this + * instance and coerce the returned value into a Datum. * * The method will set the value pointed to by the wasNull parameter * to true if the Java method returned null. The method expects that * the wasNull parameter is set to false by the caller prior to the * call. + * + * The call into Java is made via a callback of + * Function_Invoke(fn, refArgs, primArgs). */ - Datum (*invoke)(Type self, jclass clazz, jmethodID method, jvalue* args, PG_FUNCTION_ARGS); + Datum (*invoke)(Type self, Function fn, PG_FUNCTION_ARGS); - jobject (*getSRFProducer)(Type self, jclass clazz, jmethodID method, jvalue* args); jobject (*getSRFCollector)(Type self, PG_FUNCTION_ARGS); - bool (*hasNextSRF)(Type self, jobject producer, jobject collector, jint counter); - Datum (*nextSRF)(Type self, jobject producer, jobject collector); - void (*closeSRF)(Type self, jobject producer); + Datum (*datumFromSRF)(Type self, jobject row, jobject collector); const char* (*getJNISignature)(Type self); - const char* (*getJNIReturnSignature)(Type self, bool forMultiCall, bool useAltRepr); /* * Returns the TupleDesc that corresponds to this type. @@ -168,7 +171,7 @@ extern bool _Type_canReplaceType(Type self, Type other); * Default version of invoke. Will make a JNI CallObjectMethod call and then * a call to self->coerceObject to create the Datum. */ -extern Datum _Type_invoke(Type self, jclass cls, jmethodID method, jvalue* args, PG_FUNCTION_ARGS); +extern Datum _Type_invoke(Type self, Function fn, PG_FUNCTION_ARGS); /* * Return the m_oid member of the Type. This is the default version of diff --git a/pljava-so/src/main/include/pljava/type/UDT.h b/pljava-so/src/main/include/pljava/type/UDT.h index c99dc60b..9323f027 100644 --- a/pljava-so/src/main/include/pljava/type/UDT.h +++ b/pljava-so/src/main/include/pljava/type/UDT.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -33,7 +39,22 @@ extern Datum UDT_send(UDT udt, PG_FUNCTION_ARGS); extern bool UDT_isScalar(UDT udt); -extern UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, bool hasTupleDesc, bool isJavaBasedScalar); +/* + * Register that a Java class is the UDT implementation for a PostgreSQL typeID. + * + * Only one of hasTupleDesc / isJavaBasedScalar can be true, and the parseMH + * argument is only used in the scalar case. A readMH is needed for the scalar + * or the composite case. + * + * Non-null values for {parse,read,write,toString}MH can be passed as arguments + * here as a shortcut in case the registration is coming from Function.c and the + * handles are already known (they are in fact Invocables, but were method + * handles before, and MH still suggests their purpose and makes shorter names). + * If passed as NULL and needed, upcalls will be made to obtain them. + */ +extern UDT UDT_registerUDT(jclass clazz, Oid typeId, Form_pg_type pgType, + bool hasTupleDesc, bool isJavaBasedScalar, jobject parseMH, jobject readMH, + jobject writeMH, jobject toStringMH); typedef Datum (*UDTFunction)(UDT udt, PG_FUNCTION_ARGS); diff --git a/pljava-so/src/main/include/pljava/type/UDT_priv.h b/pljava-so/src/main/include/pljava/type/UDT_priv.h index e8322da3..89ea80c0 100644 --- a/pljava-so/src/main/include/pljava/type/UDT_priv.h +++ b/pljava-so/src/main/include/pljava/type/UDT_priv.h @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack * * @author Thomas Hallgren */ @@ -31,11 +37,18 @@ struct UDT_ jstring sqlTypeName; bool hasTupleDesc; - jmethodID init; - jmethodID parse; - jmethodID toString; - jmethodID readSQL; - jmethodID writeSQL; + jobject parse; + jobject readSQL; + + /* + * At first glance, one might not retain writeSQL and toString handles + * per-UDT, as they are both inherited methods common to all UDTs and so + * do not depend on the class of the receiver. What these jobjects hold, + * though, is an Invocable, which carries an AccessControlContext, which is + * chosen at resolution time per-UDT or per-function, so they must be here. + */ + jobject writeSQL; + jobject toString; }; extern Datum _UDT_coerceObject(Type self, jobject jstr); diff --git a/pljava/pom.xml b/pljava/pom.xml index d26bac9f..c414454c 100644 --- a/pljava/pom.xml +++ b/pljava/pom.xml @@ -4,7 +4,7 @@ org.postgresql pljava.app - 1.5.0 + 1.6.10 pljava PL/Java backend Java code @@ -16,120 +16,203 @@ ${project.version} + org.apache.maven.plugins - maven-shade-plugin - 1.4 + maven-compiler-plugin - true - - - *:* - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - + + -h + ${basedir}/target/javah-include + + --processor-module-path + ${basedir}/../pljava-api/target/pljava-api-${project.version}.jar + + + + org.postgresql.pljava.annotation.processing.DDRProcessor + + - - - package - - shade - - - + + + + - org.apache.maven.plugins - maven-jar-plugin - - - - - org/postgresql/pljava/ - - - - PL/Java API - - - ${project.dependencies[0].version} - - - ${project.organization.name} - - - - - org/postgresql/pljava/internal/ - - - ${project.name} - - - ${project.version} - - - ${project.organization.name} - - - - - org/postgresql/pljava/jdbc/ - - - ${project.name} - - - ${project.version} - - - ${project.organization.name} - - - - - org/postgresql/pljava/management/ - - - ${project.name} - - - ${project.version} - - - ${project.organization.name} - - - - - org/postgresql/pljava/sqlj/ - - - ${project.name} - - - ${project.version} - - - ${project.organization.name} - - - - - - + org.postgresql + pljava-pgxs + ${pljava.pgxs.version} + + + + scripted-report + + + + + + - + diff --git a/pljava/src/main/java/module-info.java b/pljava/src/main/java/module-info.java new file mode 100644 index 00000000..68923bbe --- /dev/null +++ b/pljava/src/main/java/module-info.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ + +/** + * Contains PL/Java's internal implementation. + */ +module org.postgresql.pljava.internal +{ + requires java.base; + requires java.management; + requires org.postgresql.pljava; + + exports org.postgresql.pljava.mbeans; // bothers me, but only interfaces + + exports org.postgresql.pljava.elog to java.logging; + + exports org.postgresql.pljava.policy to java.base; // has custom Permission + + provides java.net.spi.URLStreamHandlerProvider + with org.postgresql.pljava.sqlj.Handler; + + provides java.nio.charset.spi.CharsetProvider + with org.postgresql.pljava.internal.SQL_ASCII.Provider; + + provides java.sql.Driver with org.postgresql.pljava.jdbc.SPIDriver; + + provides org.postgresql.pljava.Session + with org.postgresql.pljava.internal.Session; +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ELogFormatter.java b/pljava/src/main/java/org/postgresql/pljava/elog/ELogFormatter.java similarity index 73% rename from pljava/src/main/java/org/postgresql/pljava/internal/ELogFormatter.java rename to pljava/src/main/java/org/postgresql/pljava/elog/ELogFormatter.java index d1edd219..ec7ca311 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ELogFormatter.java +++ b/pljava/src/main/java/org/postgresql/pljava/elog/ELogFormatter.java @@ -1,10 +1,16 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ -package org.postgresql.pljava.internal; +package org.postgresql.pljava.elog; import java.io.PrintWriter; import java.io.StringWriter; @@ -23,8 +29,6 @@ public class ELogFormatter extends Formatter private final static MessageFormat s_tsFormatter = new MessageFormat( "{0,date,dd MMM yy} {0,time,HH:mm:ss} {1} {2}"); - private final static String s_lineSeparator = System.getProperty("line.separator"); - private final Date m_timestamp = new Date(); private final Object m_args[] = new Object[] { m_timestamp, null, null }; private final StringBuffer m_buffer = new StringBuffer(); @@ -48,9 +52,9 @@ public synchronized String format(LogRecord record) Throwable thrown = record.getThrown(); if(thrown != null) { - sb.append(s_lineSeparator); StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); + pw.println(); /* line.separator safely cached in JVM initPhase1 */ record.getThrown().printStackTrace(pw); pw.close(); sb.append(sw.toString()); diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ELogHandler.java b/pljava/src/main/java/org/postgresql/pljava/elog/ELogHandler.java similarity index 61% rename from pljava/src/main/java/org/postgresql/pljava/internal/ELogHandler.java rename to pljava/src/main/java/org/postgresql/pljava/elog/ELogHandler.java index 3cd27c45..86437e14 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ELogHandler.java +++ b/pljava/src/main/java/org/postgresql/pljava/elog/ELogHandler.java @@ -1,10 +1,16 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ -package org.postgresql.pljava.internal; +package org.postgresql.pljava.elog; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -17,6 +23,8 @@ import java.util.logging.LogManager; import java.util.logging.LogRecord; +import org.postgresql.pljava.internal.Backend; + /** * Provides access to the loggin mechanism of the PostgreSQL server. * @@ -138,6 +146,55 @@ public static void init() } } + /** + * Obtains the "log_min_messages" configuration variable and + * translates it into a {@link Level} object. + * @return The Level that corresponds to the configuration variable. + */ + public static Level getPgLevel() + { + // We use this little trick to provide the correct config option + // without having to call back into the JNI code the first time + // around since that call will be a bit premature (it will come + // during JVM initialization and before the native method is + // registered). + // + String[] options = { "log_min_messages", "client_min_messages" }; + Level finestLevel = null; + for ( String option : options ) + { + String pgLevel = Backend.getConfigOption(option); + if ( null == pgLevel ) + continue; + pgLevel = pgLevel.toLowerCase().trim(); + Level level = null; + if(pgLevel.equals("panic") || pgLevel.equals("fatal")) + level = Level.OFF; + else if(pgLevel.equals("error")) + level = Level.SEVERE; + else if(pgLevel.equals("warning")) + level = Level.WARNING; + else if(pgLevel.equals("notice")) + level = Level.CONFIG; + else if(pgLevel.equals("info")) + level = Level.INFO; + else if(pgLevel.equals("debug1")) + level = Level.FINE; + else if(pgLevel.equals("debug2")) + level = Level.FINER; + else if(pgLevel.equals("debug3") || pgLevel.equals("debug4") || pgLevel.equals("debug5")) + level = Level.FINEST; + if ( null == level ) + continue; + if ( null == finestLevel + || finestLevel.intValue() > level.intValue() ) + finestLevel = level; + } + if ( null == finestLevel ) + finestLevel = Level.ALL; + return finestLevel; + } + // Private method to configure an ELogHandler // private void configure() @@ -150,7 +207,11 @@ private void configure() { try { - this.setFilter((Filter)Class.forName(val.trim()).newInstance()); + this.setFilter((Filter) + Class.forName(val.trim()) + .getDeclaredConstructor() + .newInstance() + ); } catch (Exception e) { @@ -165,7 +226,11 @@ private void configure() { try { - this.setFormatter((Formatter)Class.forName(val.trim()).newInstance()); + this.setFormatter((Formatter) + Class.forName(val.trim()) + .getDeclaredConstructor() + .newInstance() + ); } catch (Exception e) { diff --git a/pljava/src/main/java/org/postgresql/pljava/elog/package-info.java b/pljava/src/main/java/org/postgresql/pljava/elog/package-info.java new file mode 100644 index 00000000..ad0218b6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/elog/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + */ +/** + *

      PL/Java's legacy bridge code between {@code java.util.logging} and + * PostgreSQL's error logging mechanisms, isolated here in a package that can be + * exported to the {@code java.logging} module, as that API requires. + */ +package org.postgresql.pljava.elog; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/AclId.java b/pljava/src/main/java/org/postgresql/pljava/internal/AclId.java index f0956755..bd06aba3 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/AclId.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/AclId.java @@ -1,11 +1,19 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.SQLException; /** @@ -61,10 +69,7 @@ public int hashCode() */ public static AclId getUser() { - synchronized(Backend.THREADLOCK) - { - return _getUser(); - } + return doInPG(AclId::_getUser); } /** @@ -82,10 +87,7 @@ public static AclId getUser() */ public static AclId getOuterUser() { - synchronized(Backend.THREADLOCK) - { - return _getOuterUser(); - } + return doInPG(AclId::_getOuterUser); } /** @@ -111,10 +113,7 @@ public static AclId getSessionUser() */ public static AclId fromName(String name) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _fromName(name); - } + return doInPG(() -> _fromName(name)); } /** @@ -122,10 +121,7 @@ public static AclId fromName(String name) throws SQLException */ public String getName() { - synchronized(Backend.THREADLOCK) - { - return this._getName(); - } + return doInPG(this::_getName); } /** @@ -134,10 +130,7 @@ public String getName() */ public boolean hasSchemaCreatePermission(Oid oid) { - synchronized(Backend.THREADLOCK) - { - return this._hasSchemaCreatePermission(oid); - } + return doInPG(() -> _hasSchemaCreatePermission(oid)); } /** @@ -145,10 +138,7 @@ public boolean hasSchemaCreatePermission(Oid oid) */ public boolean isSuperuser() { - synchronized(Backend.THREADLOCK) - { - return this._isSuperuser(); - } + return doInPG(this::_isSuperuser); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java b/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java index b18a32b7..a07c477d 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Backend.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -12,20 +12,23 @@ */ package org.postgresql.pljava.internal; -import java.io.File; -import java.io.FilePermission; -import java.io.IOException; import java.io.InputStream; -import java.net.URL; -import java.net.URLConnection; -import java.security.Permission; +import java.io.IOException; + import java.sql.SQLException; -import java.util.PropertyPermission; -import java.util.logging.Level; -import java.util.logging.Logger; +import java.sql.SQLDataException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; -import org.postgresql.pljava.management.Commands; +import org.postgresql.pljava.elog.ELogHandler; // for javadoc +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import static org.postgresql.pljava.sqlgen.Lexicals.identifierFrom; +import static + org.postgresql.pljava.sqlgen.Lexicals.ISO_AND_PG_IDENTIFIER_CAPTURING; /** * Provides access to some useful routines in the PostgreSQL server. @@ -36,270 +39,291 @@ public class Backend /** * All native calls synchronize on this object. */ - public static final Object THREADLOCK = new Object(); - - private static Session s_session; - - public static synchronized Session getSession() - { - if(s_session == null) - s_session = new Session(); - return s_session; - } + public static final Object THREADLOCK; /** - * Returns the configuration option as read from the Global - * Unified Config package (GUC). - * @param key The name of the option. - * @return The value of the option. + * Will be {@code Boolean.TRUE} on the one primordial thread first entered + * from PG, and null on any other thread. */ - public static String getConfigOption(String key) + public static final ThreadLocal IAMPGTHREAD = new ThreadLocal<>(); + + public static final boolean WITHOUT_ENFORCEMENT = + "disallow".equals(System.getProperty("java.security.manager")); + + @SuppressWarnings("deprecation") // Java >= 10: .feature() + static final int JAVA_MAJOR = Runtime.version().major(); + + static { - synchronized(THREADLOCK) + IAMPGTHREAD.set(Boolean.TRUE); + THREADLOCK = EarlyNatives._forbidOtherThreads() ? null : new Object(); + /* + * With any luck, the static final null-or-not-ness of THREADLOCK will + * cause JIT to quickly specialize the doInPG() methods to one or the + * other branch of their code. + */ + + try ( InputStream is = + Backend.class.getResourceAsStream("EntryPoints.class") ) { - return _getConfigOption(key); + byte[] image = is.readAllBytes(); + EarlyNatives._defineClass( + "org/postgresql/pljava/internal/EntryPoints", + Backend.class.getClassLoader(), image); } - } + catch ( IOException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + private static final Pattern s_gucList = Pattern.compile(String.format( + "\\G(?:%1$s)(?,\\s*+)?+", ISO_AND_PG_IDENTIFIER_CAPTURING)); /** - * returns the library path '$libdir' + * Do an operation on a thread with serialized access to call into + * PostgreSQL, returning a result. */ - public static String getLibraryPath() + public static T doInPG(Checked.Supplier op) + throws E { - synchronized(THREADLOCK) - { - return _getLibraryPath(); - } + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + return op.get(); + } + assertThreadMayEnterPG(); + return op.get(); } /** - * Returns the size of the statement cache. - * @return the size of the statement cache. + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return no result. This version must be present, as the Java compiler will + * not automagically match a void lambda or method reference to + * {@code Supplier}. */ - public static int getStatementCacheSize() + public static void doInPG(Checked.Runnable op) + throws E { - synchronized(THREADLOCK) - { - return _getStatementCacheSize(); - } + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + op.run(); + return; + } + assertThreadMayEnterPG(); + op.run(); } /** - * Log a message using the internal elog command. - * @param logLevel The log level as defined in - * {@link ELogHandler}. - * @param str The message + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a boolean result. This method need not be present: without it, the + * Java compiler will happily match boolean lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. */ - static void log(int logLevel, String str) + public static boolean doInPG( + Checked.BooleanSupplier op) + throws E { - synchronized(THREADLOCK) - { - _log(logLevel, str); - } + if ( null != THREADLOCK ) + synchronized(THREADLOCK) + { + return op.getAsBoolean(); + } + assertThreadMayEnterPG(); + return op.getAsBoolean(); } - private static class PLJavaSecurityManager extends SecurityManager + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a double result. This method need not be present: without it, the + * Java compiler will happily match double lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static double doInPG( + Checked.DoubleSupplier op) + throws E { - private boolean m_recursion = false; - - public void checkPermission(Permission perm) - { - this.nonRecursiveCheck(perm); - } - - public void checkPermission(Permission perm, Object context) - { - this.nonRecursiveCheck(perm); - } - - private synchronized void nonRecursiveCheck(Permission perm) - { - if(m_recursion) - // - // Something, probably a ClassLoader - // loading one of the referenced - // classes, caused a recursion. Well - // everything done within this method - // is permitted so we just return - // here. - // - return; - - m_recursion = true; - try - { - this.assertPermission(perm); - } - finally + if ( null != THREADLOCK ) + synchronized(THREADLOCK) { - m_recursion = false; + return op.getAsDouble(); } - } + assertThreadMayEnterPG(); + return op.getAsDouble(); + } - void assertPermission(Permission perm) - { - if(perm instanceof RuntimePermission) + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return an int result. This method need not be present: without it, the + * Java compiler will happily match int lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static int doInPG(Checked.IntSupplier op) + throws E + { + if ( null != THREADLOCK ) + synchronized(THREADLOCK) { - String name = perm.getName(); - if("*".equals(name) || "exitVM".equals(name)) - throw new SecurityException(); - else if("setSecurityManager".equals(name) - && !s_inSetTrusted) - // - // Attempt to set another - // security manager while not - // in the setTrusted method - // - throw new SecurityException(); + return op.getAsInt(); } - else if(perm instanceof PropertyPermission) + assertThreadMayEnterPG(); + return op.getAsInt(); + } + + /** + * Specialization of {@link #doInPG(Supplier) doInPG} for operations that + * return a long result. This method need not be present: without it, the + * Java compiler will happily match int lambdas or method references to + * the generic method, at the small cost of some boxing/unboxing; providing + * this method simply allows that to be avoided. + */ + public static long doInPG(Checked.LongSupplier op) + throws E + { + if ( null != THREADLOCK ) + synchronized(THREADLOCK) { - if(perm.getActions().indexOf("write") >= 0) - { - // We never allow this to be changed. - // As for UDT byteorder, the classes that use it only check - // once so it would be misleading to allow runtime changes; - // use pljava.vmoptions to provide an initial value. - // - String propName = perm.getName(); - if ( propName.equals("java.home") || propName.matches( - "org\\.postgresql\\.pljava\\.udt\\.byteorder(?:\\..*)?") - ) - throw new SecurityException(); - } + return op.getAsLong(); } - } + assertThreadMayEnterPG(); + return op.getAsLong(); } - private static boolean s_inSetTrusted = false; + /** + * Return true if the current thread may JNI-call into Postgres. + *

      + * In PL/Java's threading model, only one thread (or only one thread at a + * time, depending on the setting of {@code pljava.java_thread_pg_entry}) + * may make calls into the native PostgreSQL code. + *

      + * Note: The setting {@code pljava.java_thread_pg_entry=error} is an + * exception; under that setting this method will return true for any + * thread that acquires the {@code THREADLOCK} monitor, but any such thread + * that isn't the actual original PG thread will have an exception thrown + * if it calls into PG. + *

      + * Under the setting {@code pljava.java_thread_pg_entry=throw}, this method + * will only return true for the one primordial PG thread (and there is no + * {@code THREADLOCK} object to do any monitor operations on). + * @return true if the current thread is the one prepared to enter PG. + */ + public static boolean threadMayEnterPG() + { + if ( null != THREADLOCK ) + return Thread.holdsLock(THREADLOCK); + return Boolean.TRUE == IAMPGTHREAD.get(); + } - private static final SecurityManager s_untrustedSecurityManager = new PLJavaSecurityManager(); + /** + * Throw {@code IllegalStateException} if {@code threadMayEnterPG()} would + * return false. + *

      + * This method is only called in, and only correct for, the case where no + * {@code THREADLOCK} is in use and only the one primordial thread is ever + * allowed into PG. + */ + private static void assertThreadMayEnterPG() + { + if ( null == IAMPGTHREAD.get() ) + throw new IllegalStateException( + "Attempt by non-initial thread to enter PostgreSQL from Java"); + } /** - * This security manager will block all attempts to access the file system + * Returns the configuration option as read from the Global + * Unified Config package (GUC). + * @param key The name of the option. + * @return The value of the option. */ - private static final SecurityManager s_trustedSecurityManager = new PLJavaSecurityManager() + public static String getConfigOption(String key) { - void assertPermission(Permission perm) - { - if(perm instanceof FilePermission) - { - String actions = perm.getActions(); - if("read".equals(actions)) - { - // Allow read of /dev/random - // and /dev/urandom - - String fileName = perm.getName(); - - if ( "/dev/random".equals( fileName ) - || - "/dev/urandom".equals( fileName ) - ) - return; - - // Must be able to read - // timezone info etc. in the - // java installation - // directory. - // - String classpath = Backend._getConfigOption("pljava_classpath"); - String[] classpathArray = classpath.split(":"); - String jarpath = Backend.getLibraryPath() + "/java"; - - File gpJavaLib = new File(jarpath); - File javaHome = new File(System.getProperty("java.home")); - File accessedFile = new File(perm.getName()); - File fileDir = accessedFile.getParentFile(); - while(fileDir != null) - { - if(fileDir.equals(javaHome)) - return; - if(fileDir.equals(gpJavaLib)) - return; - // now search through the classpaths - for (int i = 0; i < classpathArray.length; i++) - { - File classPathEntry = new File(classpathArray[i]); - // need to check to see if we can read the directory as well as read the actual file. - if (fileDir.equals(classPathEntry.getPath()) || fileDir.equals(classPathEntry.getParentFile())) - return; - } - fileDir = fileDir.getParentFile(); - } - } - throw new SecurityException(perm.getActions() + " on " + perm.getName()); - } - super.assertPermission(perm); - } - }; + return doInPG(() -> _getConfigOption(key)); + } - public static void addClassImages(int jarId, String urlString) + public static List getListConfigOption(String key) throws SQLException { - InputStream urlStream = null; - boolean wasTrusted = (System.getSecurityManager() == s_trustedSecurityManager); + String s = getConfigOption(key); + if ( null == s ) + return null; - if(wasTrusted) - setTrusted(false); - - try - { - URL url = new URL(urlString); - URLConnection uc = url.openConnection(); - uc.connect(); - int sz = uc.getContentLength(); // once java6 obsolete, use ...Long - urlStream = uc.getInputStream(); - Commands.addClassImages(jarId, urlStream, sz); - } - catch(IOException e) - { - throw new SQLException("I/O exception reading jar file: " + e.getMessage()); - } - finally + final Matcher m = s_gucList.matcher(s); + ArrayList al = new ArrayList<>(); + while ( m.find() ) { - if(urlStream != null) - try { urlStream.close(); } catch(IOException e) {} - if(wasTrusted) - setTrusted(true); + al.add(identifierFrom(m)); + if ( null != m.group("more") ) + continue; + if ( ! m.hitEnd() ) + throw new SQLDataException(String.format( + "configuration option \"%1$s\" improper list syntax", + key), "22P02"); } + al.trimToSize(); + return Collections.unmodifiableList(al); + } + + /** + * Returns the size of the statement cache. + * @return the size of the statement cache. + */ + public static int getStatementCacheSize() + { + return doInPG(Backend::_getStatementCacheSize); + } + + /** + * Log a message using the internal elog command. + * @param logLevel The log level as defined in + * {@link ELogHandler}. + * @param str The message + */ + public static void log(int logLevel, String str) + { + doInPG(() -> _log(logLevel, str)); } public static void clearFunctionCache() { - synchronized(THREADLOCK) - { - _clearFunctionCache(); - } + doInPG(Backend::_clearFunctionCache); } public static boolean isCreatingExtension() { - synchronized(THREADLOCK) - { - return _isCreatingExtension(); - } + return doInPG(Backend::_isCreatingExtension); + } + + public static boolean allowingUnenforcedUDT() + { + return doInPG(Backend::_allowingUnenforcedUDT); } /** - * Called when the JVM is first booted and then everytime a switch - * is made between calling a trusted function versus an untrusted - * function. + * Returns the path of PL/Java's shared library. + * @throws SQLException if for some reason it can't be determined. */ - private static void setTrusted(boolean trusted) + public static String myLibraryPath() throws SQLException { - s_inSetTrusted = true; - try - { - Logger log = Logger.getAnonymousLogger(); - if(log.isLoggable(Level.FINE)) - log.fine("Using SecurityManager for " + (trusted ? "trusted" : "untrusted") + " language"); - System.setSecurityManager(trusted ? s_trustedSecurityManager : s_untrustedSecurityManager); - } - finally - { - s_inSetTrusted = false; - } + String result = doInPG(Backend::_myLibraryPath); + + if ( null != result ) + return result; + + throw new SQLException("Unable to retrieve PL/Java's library path"); + } + + /** + * Attempt (best effort, unexposed JDK internals) to suppress + * the layer-inappropriate JEP 411 warning when {@code InstallHelper} + * sets up permission enforcement. + */ + static void pokeJEP411() + { + _pokeJEP411(InstallHelper.class, true); } /** @@ -308,19 +332,28 @@ private static void setTrusted(boolean trusted) * when called from a thread other then the main thread and the main * thread has returned from the call into the JVM. */ - public native static boolean isCallingJava(); + public static native boolean isCallingJava(); /** * Returns the value of the GUC custom variable * pljava.release_lingering_savepoints. */ - public native static boolean isReleaseLingeringSavepoints(); + public static native boolean isReleaseLingeringSavepoints(); - private native static String _getConfigOption(String key); - private native static String _getLibraryPath(); + private static native String _getConfigOption(String key); - private native static int _getStatementCacheSize(); - private native static void _log(int logLevel, String str); - private native static void _clearFunctionCache(); - private native static boolean _isCreatingExtension(); + private static native int _getStatementCacheSize(); + private static native void _log(int logLevel, String str); + private static native void _clearFunctionCache(); + private static native boolean _isCreatingExtension(); + private static native String _myLibraryPath(); + private static native void _pokeJEP411(Class caller, Object token); + private static native boolean _allowingUnenforcedUDT(); + + private static class EarlyNatives + { + private static native boolean _forbidOtherThreads(); + private static native Class _defineClass( + String name, ClassLoader loader, byte[] buf); + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ByteBufferInputStream.java b/pljava/src/main/java/org/postgresql/pljava/internal/ByteBufferInputStream.java new file mode 100644 index 00000000..992fcc29 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ByteBufferInputStream.java @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.InputStream; +import java.io.IOException; + +import java.nio.ByteBuffer; +import java.nio.InvalidMarkException; + +/** + * Wrap a readable {@link ByteBuffer} as an {@link InputStream}. + *

      + * An implementing class must provide a {@link #buffer} method that returns the + * {@code ByteBuffer}, and the method is responsible for knowing when the memory + * region windowed by the {@code ByteBuffer} is no longer to be accessed, and + * throwing an exception in that case. + *

      + * The implementing class may supply an object that the {@code InputStream} + * operations will be {@code synchronized} on. + *

      + * The underlying buffer's + * {@link ByteBuffer#position() position} and + * {@link ByteBuffer#mark() mark} are used to maintain the corresponding values + * for the input stream. + */ +public abstract class ByteBufferInputStream extends InputStream +{ + /** + * The object on which the {@code InputStream} operations will synchronize. + */ + protected final Object m_lock; + + /** + * Whether this stream is open; initially true. + */ + protected boolean m_open; + + /** + * Construct an instance whose critical sections will synchronize on the + * instance itself. + */ + protected ByteBufferInputStream() + { + m_lock = this; + m_open = true; + } + + /** + * Construct an instance, given an object on which to synchronize. + * @param lock The Object to synchronize on. + */ + protected ByteBufferInputStream(Object lock) + { + m_lock = lock; + m_open = true; + } + + /** + * Pin resources if necessary during a reading operation. + *

      + * This default implementation does nothing. A subclass should override it + * if (in addition to synchronizing on {@code m_lock}), some pinning of a + * resource is needed during access operations. + */ + protected void pin() throws IOException + { + } + + /** + * Unpin resources if necessary after a reading operation. + *

      + * This default implementation does nothing. + */ + protected void unpin() + { + } + + /** + * Return the {@link ByteBuffer} being wrapped, or throw an exception if the + * memory windowed by the buffer should no longer be accessed. + *

      + * The monitor on {@link #m_lock} is held when this method is called. + *

      + * This method also should throw an exception if {@link #m_open} is false. + * It is called everywhere that should happen, so it is the perfect place + * for the test, and allows the implementing class to use a customized + * message in the exception. + *

      + * All uses of the buffer in this class are preceded by {@code pin()} and + * followed by {@code unpin()} (whose default implementations in this class + * do nothing). If a subclass overrides {@code pin} with a version that + * throws the appropriate exception in either case or both, it is then + * redundant and unnecessary for {@code buffer} to check the same + * conditions. + */ + protected abstract ByteBuffer buffer() throws IOException; + + @Override + public int read() throws IOException + { + pin(); + try + { + synchronized ( m_lock ) + { + ByteBuffer src = buffer(); + if ( 0 < src.remaining() ) + return src.get(); + return -1; + } + } + finally + { + unpin(); + } + } + + @Override + public int read(byte[] b, int off, int len) throws IOException + { + pin(); + try + { + synchronized ( m_lock ) + { + ByteBuffer src = buffer(); + int has = src.remaining(); + if ( len > has ) + { + if ( 0 == has ) + return -1; + len = has; + } + src.get(b, off, len); + return len; + } + } + finally + { + unpin(); + } + } + + @Override + public long skip(long n) throws IOException + { + pin(); + try + { + synchronized ( m_lock ) + { + ByteBuffer src = buffer(); + int has = src.remaining(); + if ( n > has ) + n = has; + src.position(src.position() + (int)n); + return n; + } + } + finally + { + unpin(); + } + } + + @Override + public int available() throws IOException + { + pin(); + try + { + synchronized ( m_lock ) + { + return buffer().remaining(); + } + } + finally + { + unpin(); + } + } + + @Override + public void close() throws IOException + { + synchronized ( m_lock ) + { + if ( ! m_open ) + return; + m_open = false; + } + } + + @Override + public void mark(int readlimit) + { + synchronized ( m_lock ) + { + if ( ! m_open ) + return; + boolean gotPin = false; // Kludge to get pin() inside the try block + try + { + pin(); + gotPin = true; + buffer().mark(); + } + catch ( IOException e ) + { + /* + * The contract is for mark to throw no checked exception. + * An exception caught here probably means the state's no longer + * live, which will be signaled to the caller if another, + * throwing, method is then called. If not, no harm no foul. + */ + } + finally + { + if ( gotPin ) + unpin(); + } + } + } + + @Override + public void reset() throws IOException + { + synchronized ( m_lock ) + { + if ( ! m_open ) + return; + pin(); + try + { + buffer().reset(); + } + catch ( InvalidMarkException e ) + { + throw new IOException("reset attempted when mark not set"); + } + finally + { + unpin(); + } + } + } + + /** + * Return {@code true}; this class does support {@code mark} and + * {@code reset}. + */ + @Override + public boolean markSupported() + { + return true; + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ByteBufferXMLReader.java b/pljava/src/main/java/org/postgresql/pljava/internal/ByteBufferXMLReader.java new file mode 100644 index 00000000..3a013fb6 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ByteBufferXMLReader.java @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; + +/** + * Wrap a readable {@link ByteBuffer} as a {@link SyntheticXMLReader}. + *

      + * An implementing class must provide a {@link #buffer} method that returns the + * {@code ByteBuffer}, and the method is responsible for knowing when the memory + * region windowed by the {@code ByteBuffer} is no longer to be accessed, and + * throwing an exception in that case (unless the class also overrides + * {@link #pin} and performs the check there instead). + *

      + * The underlying buffer's {@link ByteBuffer#position() position} may be used to + * maintain the XML reader's position. + */ +public abstract class ByteBufferXMLReader extends SyntheticXMLReader +{ + private boolean m_done = false; + + /** + * Pin resources as needed during a reading operation. + *

      + * The implementation is also responsible for tracking whether this + * instance has been closed, and throwing an exception if so. + */ + protected abstract void pin() throws SQLException; + + /** + * Unpin resources after a reading operation. + */ + protected abstract void unpin(); + + /** + * Return the {@link ByteBuffer} being wrapped. + *

      + * All uses of the buffer in this class are preceded by {@code pin()} and + * followed by {@code unpin()}. + */ + protected abstract ByteBuffer buffer() throws SQLException; + + /** + * Return null if no more events available, or an {@code EventCarrier} + * that carries one or more. + *

      + * Start- and end-document events are supplied by the caller, and so should + * not be supplied here. + *

      + * The pin on the underlying state is held. + * @param buf The buffer to read from. Its + * {@link ByteBuffer#position position} may be used to maintain input + * position. + * @return An {@link EventCarrier} representing some XML parse events, + * null if none remain. + */ + protected abstract EventCarrier next(ByteBuffer buf); + + /** + * This implementation invokes {@code next(ByteBuffer)} to get + * some more events. + *

      + * @return an {@link EventCarrier}, or null if no more. + */ + @Override + protected EventCarrier next() + { + if ( m_done ) + return null; + + boolean gotPin = false; // Kludge to get pin() inside the try block + try + { + pin(); + gotPin = true; + EventCarrier ec = next(buffer()); + if ( null == ec ) + m_done = true; + return ec; + } + catch ( Exception e ) + { + m_done = true; + return exceptionCarrier(e); + } + finally + { + if ( gotPin ) + unpin(); + } + } + + @Override + public void parse(InputSource input) throws IOException, SAXException + { + parse(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Checked.java b/pljava/src/main/java/org/postgresql/pljava/internal/Checked.java new file mode 100644 index 00000000..3707c43c --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Checked.java @@ -0,0 +1,2667 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.util.OptionalDouble; +import java.util.OptionalInt; +import java.util.OptionalLong; +import java.util.NoSuchElementException; +import static java.util.Objects.requireNonNull; +import java.util.stream.BaseStream; + +/** + * Functional interfaces handling checked exceptions. + *

      + * It would be ideal if the compiler could preserve its union of possible + * thrown types as the inferred exception type of the functional interface + * method. Instead, it collapses the union to the nearest common supertype, + * which is less useful, as it becomes {@code Exception} rather quickly if + * the lambda can throw a few unrelated exceptions. It is still useful for + * short lambdas that throw only a few related exceptions. + *

      + * Also, the Java API lacks primitive + * {@code Consumer}/{@code Supplier}/{@code Optional} types for {@code byte}, + * {@code short}, {@code char}, {@code float}, and some of them for + * {@code boolean}. To allow a more orthogonal API for access to datum values, + * those are provided here, again supporting checked exceptions. Because these + * "bonus" types do not have checked-exception-less counterparts in the Java + * API, they do not strictly need the wrapper methods described next. + *

      + * For interoperating with Java APIs that require the Java no-checked-exceptions + * versions of these interfaces, each checked interface here (for which a Java + * API no-checked version exists) has an {@code ederWrap} method that produces + * the Java no-checked version of the same interface, using a lightweight idiom + * advanced by Lukas Eder, developer of jOOλ. The checked exception is not + * wrapped, but simply flown under {@code javac}'s radar. That idiom is extended + * here with a corresponding {@code in} method to pass the wrapped interface + * into code that requires it, re-exposing the checked exception type. That + * makes possible constructions like: + * + *

      + * Stream<String> strs = ...;
      + * Writer w = ...;
      + * try {
      + *   Checked.Consumer.use((String s) -> w.write(s)) // throws IOException!
      + *     .in(c -> strs.forEach(c));
      + * }
      + * catch ( IOException e ) { ... }
      + *
      + * + * where the {@code Stream.forEach} method requires a Java {@code Consumer} + * that declares no checked exceptions. + *

      + * Such an idiom is, of course, contrary to an SEI CERT coding standard, + * and likely to produce surprises if the exception will be 'flown' through deep + * layers of code by others that may contain {@code catch} blocks. That said, as + * a convenience for dealing with checked exceptions and simple Java APIs that + * cannot accept them, it can be useful as long as the intervening code through + * which the exception may be 'flown' is simple and short. + *

      + * The functional interfaces defined here that do not correspond to a + * Java API no-checked version, while not strictly needing an {@code ederWrap} + * method, have one anyway, a no-op identity function. That avoids arbitrary + * limits on which ones can participate in the {@code use(...).in(...)} idiom. + *

      + * Static {@code composed()} methods are provided here in place of the instance + * {@code compose} or {@code andThen} methods in Java's function API, which seem + * to challenge {@code javac}'s type inference when exception types are thrown + * in. A static {@code composed} method can substitute for {@code compose} or + * {@code andThen}, by ordering the parameters as desired. Likewise, static + * {@code and} and {@code or} methods are provided in place of the instance + * methods on Java's {@code Predicate}. + *

      + * Each functional interface declared here has a static {@code use(...)} method + * that can serve, as a concise alternative to casting, to constrain the type + * of a lambda expression when the compiler won't infer it. + *

      + * A {@link AutoCloseable variant of AutoCloseable} with an exception-type + * parameter, and some {@link #closing(AutoCloseable) closing} methods (inspired + * by Python, for use with resources that do not already implement + * {@code AutoCloseable}), are also provided. + * + * @param The type this functional interface can be wrapped as by + * ederWrap(), which may be a corresponding Java functional interface that does + * not allow checked exceptions. + * @param The checked exception type (or least upper bound of the checked + * exception types) that the body of this functional interface may throw. + */ +public interface Checked +{ + /** + * Throw an exception, unsafely cast to a different exception type, chiefly + * as used by Lukas Eder to fly a checked exception through a section of + * code that does not accept it. + *

      + * In PL/Java, this method is not intended to be called directly, but used + * transparently within the {@link #in in(...)} construct, which re-exposes + * the checked exception type {@code EX} to the compiler. + * @param The throwable type the argument t should be presented as. + * @param t A throwable. + * @throws E The argument t, represented to the compiler as of type E. + */ + @SuppressWarnings("unchecked") + static E ederThrow(Throwable t) throws E + { + throw (E) t; + } + + /** + * Wraps this {@code Checked} functional interfaces as its + * corresponding Java functional interface {@code WT}, which possibly + * does not allow checked exceptions. + *

      + * Checked exceptions of type {@code EX} may still, in reality, be thrown. + * This method is not intended to be called directly; it is used + * transparently by {@link #in in(...)}, which passes the wrapper type into + * code that requires it, but re-exposes the original checked exception type + * to the compiler. + */ + WT ederWrap(); + + /** + * Passes this functional interface, wrapped as its wrapper type {@code WT}, + * into a code body that requires that wrapper type, while remembering for + * the compiler the checked exception type that may, in fact, be thrown. + * + * @param Any exception type that the body, c, detectably can throw. + * @param c A Consumer to which this instance, wrapped as its corresponding + * functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body c. + */ + default + void in(Consumer c) + throws EX, RX + { + c.accept(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a non-primitive type. + * + * @param Return type of the body f. + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToDoubleFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + RT inReturning(Function f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a {@code double}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToDoubleFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + double inDoubleReturning(ToDoubleFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns an {@code int}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToIntFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + int inIntReturning(ToIntFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a {@code long}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToLongFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + long inLongReturning(ToLongFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a {@code boolean}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A Predicate to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + boolean inBooleanReturning(Predicate f) + throws EX, RX + { + return f.test(ederWrap()); + } + + + /** + * Like {@link #in in(...)} but where the body returns a {@code byte}. + *

      + * This method is provided for consistency of notation, even though it is + * not strictly needed because Java has no checked-exception-less + * counterpart of {@code ToByteFunction}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToByteFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + byte inByteReturning(ToByteFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a {@code short}. + *

      + * This method is provided for consistency of notation, even though it is + * not strictly needed because Java has no checked-exception-less + * counterpart of {@code ToShortFunction}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToShortFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + short inShortReturning(ToShortFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a {@code char}. + *

      + * This method is provided for consistency of notation, even though it is + * not strictly needed because Java has no checked-exception-less + * counterpart of {@code ToCharFunction}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToCharFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + char inCharReturning(ToCharFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /** + * Like {@link #in in(...)} but where the body returns a {@code float}. + *

      + * This method is provided for consistency of notation, even though it is + * not strictly needed because Java has no checked-exception-less + * counterpart of {@code ToFloatFunction}. + * + * @param Any exception type that the body, f, detectably can throw. + * @param f A ToFloatFunction to which this instance, wrapped as its + * corresponding functional interface WT, will be passed. + * @throws EX whatever can be thrown by the body of this instance + * @throws RX whatever can be thrown by the body f. + */ + default + float inFloatReturning(ToFloatFunction f) + throws EX, RX + { + return f.apply(ederWrap()); + } + + /* + * Short-circuiting predicate combinators. + */ + + /** + * Returns a {@code Predicate} that is the short-circuiting {@code AND} of + * two others. + * @param Greatest-lower-bound parameter type acceptable to first and + * after, and the parameter type of the resulting predicate. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * predicate. + * @param first The predicate to be tested first. + * @param after The predicate to be tested next. + */ + static + Predicate and( + Predicate first, + Predicate after) + { + return t -> first.test(t) && after.test(t); + } + + /** + * Returns a {@code Predicate} that is the short-circuiting {@code OR} of + * two others. + * @param Greatest-lower-bound parameter type acceptable to first and + * after, and the parameter type of the resulting predicate. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * predicate. + * @param first The predicate to be tested first. + * @param after The predicate to be tested next. + */ + static + Predicate or( + Predicate first, + Predicate after) + { + return t -> first.test(t) || after.test(t); + } + + /* + * composed() methods. + */ + + /** + * Returns a {@code Function} that is the composition of + * two others. + * @param Parameter type of the resulting function, and acceptable as + * parameter of first. + * @param Type subsuming the return type of first, and acceptable as + * parameter of after. + * @param Return type of the composed function, subsuming the return + * type of after. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * function. + * @param first The first function to be applied. + * @param after The function applied to the result of first. + */ + static + Function composed( + Function first, + Function after) + { + return t -> after.apply(first.apply(t)); + } + + /** + * Returns a {@code BiConsumer} that is the composition of + * two others. + * @param First parameter type of the resulting BiConsumer, acceptable + * as first parameter to both first and after. + * @param Second parameter type of the resulting BiConsumer, acceptable + * as second parameter to both first and after. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * biconsumer. + * @param first The first consumer to be applied. + * @param after The consumer next applied to the same inputs. + */ + static + BiConsumer composed( + BiConsumer first, + BiConsumer after) + { + return (t, u) -> + { + first.accept(t, u); + after.accept(t, u); + }; + } + + /** + * Returns a {@code Consumer} that is the composition of + * two others. + * @param Parameter type of the resulting Consumer, acceptable + * as parameter to both first and after. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * consumer. + * @param first The first consumer to be applied. + * @param after The consumer next applied to the same input. + */ + static + Consumer composed( + Consumer first, + Consumer after) + { + return t -> + { + first.accept(t); + after.accept(t); + }; + } + + /** + * Returns a {@code DoubleConsumer} that is the composition of + * two others. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * consumer. + * @param first The first consumer to be applied. + * @param after The consumer next applied to the same input. + */ + static + DoubleConsumer composed( + DoubleConsumer first, + DoubleConsumer after) + { + return t -> + { + first.accept(t); + after.accept(t); + }; + } + + /** + * Returns an {@code IntConsumer} that is the composition of + * two others. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * consumer. + * @param first The first consumer to be applied. + * @param after The consumer next applied to the same input. + */ + static + IntConsumer composed( + IntConsumer first, + IntConsumer after) + { + return t -> + { + first.accept(t); + after.accept(t); + }; + } + + /** + * Returns a {@code LongConsumer} that is the composition of + * two others. + * @param Least upper bound of the exception types thrown by first and + * after, representing the exception types thrown by the resulting + * consumer. + * @param first The first consumer to be applied. + * @param after The consumer next applied to the same input. + */ + static + LongConsumer composed( + LongConsumer first, + LongConsumer after) + { + return t -> + { + first.accept(t); + after.accept(t); + }; + } + + /** + * Version of {@link java.lang.AutoCloseable} with an exception-type + * parameter. + *

      + * This does not need {@code use} or {@code ederWrap} methods because Java's + * {@code AutoCloseable} already allows checked exceptions. The only trouble + * with the Java one is it can't be parameterized to narrow the thrown type + * from {@code Exception}. In Java's API docs, implementers are "strongly + * encouraged" to narrow their {@code throws} clauses, but that's only + * helpful where the compiler sees the specific implementing class. + */ + @FunctionalInterface + interface AutoCloseable + extends java.lang.AutoCloseable + { + @Override + void close() throws E; + } + + /** + * Returns its argument; shorthand for casting a suitable lambda to + * {@code AutoCloseable}. + *

      + * Where some resource does not itself implement {@code AutoCloseable}, an + * idiom like the following, inspired by Python, can be used in Java 10 or + * later, and the compiler will infer that it can throw whatever + * {@code thing.release()} can throw: + *

      +	 *  try(var ac = closing(() -> { thing.release(); }))
      +	 *  {
      +	 *    ...
      +	 *  }
      +	 *
      + *

      + * Pre-Java 10, without {@code var}, you have to specify the exception type, + * but you still get the simple idiom without needing to declare some new + * interface: + *

      +	 *  try(Checked.AutoCloseable<ThingException> ac =
      +	 *		closing(() -> { thing.release(); }))
      +	 *  {
      +	 *    ...
      +	 *  }
      +	 *
      + * @param Least upper bound of exceptions that can be thrown by o + * @param o Lambda or method reference to serve as the close operation. + */ + static + AutoCloseable closing(AutoCloseable o) + { + return o; + } + + /** + * Wrap some payload and a 'closer' lambda as a {@code Closing} instance + * that can supply the payload and implements {@code AutoCloseable} using + * the lambda; useful in a {@code try}-with-resources when the payload + * itself does not implement {@code AutoCloseable}. + * @param Type of the payload. + * @param Least upper bound of exceptions that may be thrown at close. + * @param payload Any object. + * @param closer Lambda or method reference to serve as the close operation. + */ + static + Closing closing(T payload, AutoCloseable closer) + { + return new Closing<>(payload, closer); + } + + /** + * Given a stream and a lambda that should be invoked when it is closed, + * construct a new stream that runs that lambda when closed, and return a + * {@code Closing} instance with the new stream as its payload, which will + * be closed by the {@code close} action. + *

      + * Intended for use in a {@code try}-with-resources. Any checked exception + * throwable by closer will be remembered as throwable by the + * {@code close} method of the returned {@code Closing} instance (and + * therefore will be considered throwable by the {@code try}-with-resources + * in which it is used. Any other code that calls {@code close} directly on + * the returned stream could be surprised by the checked exception, as a + * stream's {@code close} method is not declared to throw any. When used as + * intended in a {@code try}-with-resources, any such surprise is bounded + * by the scope of that statement. + * @param Type of the stream elements + * @param Type of the stream + * @param Least upper bound of exceptions that can be thrown by closer, + * and the declared throwable type of the close method of the returned + * Closing instance. + * @param stream Stream to have closer added as an action on close. + * @param closer Runnable to be executed when the returned stream is closed. + */ + static , E extends Exception> + Closing closing(S stream, Runnable closer) + { + S newStream = stream.onClose(closer.ederWrap()); + return new Closing<>(newStream, newStream::close); + } + + /** + * A class that can supply a {@code T} while also implementing + * {@code AutoCloseable}; suitable for use in a + * {@code try}-with-resources to wrap some value that does not itself + * implement {@code AutoCloseable}. + *

      + * Obtained via one of the {@code closing} methods above. + */ + /* + * This class also encloses the private interface Trivial, simply to make it + * private (a private interface can only exist within a class) to ensure it + * is only extended by other interfaces in this compilation unit (its + * default method includes an unchecked cast). It did not seem worth + * creating another entire class only to enclose a private interface. + */ + class Closing + implements java.util.function.Supplier, AutoCloseable + { + private final T m_payload; + private final AutoCloseable m_closer; + + private Closing(T payload, AutoCloseable closer) + { + m_payload = payload; + m_closer = requireNonNull(closer); + } + + @Override + public T get() + { + return m_payload; + } + + @Override + public void close() throws E + { + m_closer.close(); + } + + /** + * Superinterface of the functional interfaces declared here that do + * not have checked-exception-less counterparts in Java's API. + *

      + * These can all inherit a no-op default {@code ederWrap} that returns + * the instance unchanged, allowing them also to participate in the + * {@code use(...).in(...)} idiom for stylistic consistency even if it + * is not strictly necessary. + */ + private interface Trivial + , EX extends Throwable> + extends Checked + { + @Override + @SuppressWarnings("unchecked") + default WT ederWrap() + { + return (WT) this; + } + } + } + + /* + * Runnable. + */ + + /** + * Like {@link java.lang.Runnable} but with a body that can throw checked + * exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface Runnable + extends Checked + { + /** + * Execute the body of this {@code Runnable}. + */ + void run() throws E; + + @Override + default java.lang.Runnable ederWrap() + { + return () -> + { + try + { + run(); + } + catch ( Throwable t ) + { + throw Checked.ederThrow(t); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static Runnable use(Runnable o) + { + return o; + } + } + + /* + * Suppliers that have checked-exception-less counterparts in the Java API. + */ + + /** + * Like {@link java.util.function.Supplier} but with a body that can throw + * checked exceptions. + * @param Type the supplier will supply. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface Supplier + extends Checked, E> + { + /** + * Get the supplied value. + */ + T get() throws E; + + @Override + default java.util.function.Supplier ederWrap() + { + return () -> + { + try + { + return get(); + } + catch ( Throwable t ) + { + throw Checked.ederThrow(t); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static Supplier use(Supplier o) + { + return o; + } + } + + /** + * Like {@link java.util.function.BooleanSupplier} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface BooleanSupplier + extends Checked + { + /** + * Get the supplied value. + */ + boolean getAsBoolean() throws E; + + @Override + default java.util.function.BooleanSupplier ederWrap() + { + return () -> + { + try + { + return getAsBoolean(); + } + catch ( Throwable t ) + { + throw Checked.ederThrow(t); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + BooleanSupplier use(BooleanSupplier o) + { + return o; + } + } + + /** + * Like {@link java.util.function.DoubleSupplier} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface DoubleSupplier + extends Checked + { + /** + * Get the supplied value. + */ + double getAsDouble() throws E; + + @Override + default java.util.function.DoubleSupplier ederWrap() + { + return () -> + { + try + { + return getAsDouble(); + } + catch ( Throwable t ) + { + throw Checked.ederThrow(t); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static DoubleSupplier use(DoubleSupplier o) + { + return o; + } + } + + /** + * Like {@link java.util.function.IntSupplier} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface IntSupplier + extends Checked + { + /** + * Get the supplied value. + */ + int getAsInt() throws E; + + @Override + default java.util.function.IntSupplier ederWrap() + { + return () -> + { + try + { + return getAsInt(); + } + catch ( Throwable t ) + { + throw Checked.ederThrow(t); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static IntSupplier use(IntSupplier o) + { + return o; + } + } + + /** + * Like {@link java.util.function.LongSupplier} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface LongSupplier + extends Checked + { + /** + * Get the supplied value. + */ + long getAsLong() throws E; + + @Override + default java.util.function.LongSupplier ederWrap() + { + return () -> + { + try + { + return getAsLong(); + } + catch ( Throwable t ) + { + throw Checked.ederThrow(t); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static LongSupplier use(LongSupplier o) + { + return o; + } + } + + /* + * Suppliers without checked-exception-less Java API counterparts. + */ + + /** + * A supplier of byte-valued results, with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ByteSupplier + extends Closing.Trivial, E> + { + /** + * Get the supplied value. + */ + byte getAsByte() throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static ByteSupplier use(ByteSupplier o) + { + return o; + } + } + + /** + * A supplier of short-valued results, with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ShortSupplier + extends Closing.Trivial, E> + { + /** + * Get the supplied value. + */ + short getAsShort() throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static ShortSupplier use(ShortSupplier o) + { + return o; + } + } + + /** + * A supplier of char-valued results, with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface CharSupplier + extends Closing.Trivial, E> + { + /** + * Get the supplied value. + */ + char getAsChar() throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static CharSupplier use(CharSupplier o) + { + return o; + } + } + + /** + * A supplier of float-valued results, with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface FloatSupplier + extends Closing.Trivial, E> + { + /** + * Get the supplied value. + */ + float getAsFloat() throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static FloatSupplier use(FloatSupplier o) + { + return o; + } + } + + /* + * Functions that have checked-exception-less counterparts in the Java API. + */ + + /** + * Like {@link java.util.function.Function} but with a body that can throw + * checked exceptions. + * @param Type of the function's parameter. + * @param Type of the function's result. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface Function + extends Checked, E> + { + /** + * Applies this function to the given argument. + */ + R apply(T t) throws E; + + @Override + default java.util.function.Function ederWrap() + { + return (t) -> + { + try + { + return apply(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + Function use(Function o) + { + return o; + } + } + + /** + * Like {@link java.util.function.ToDoubleFunction} but with a body that can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToDoubleFunction + extends Checked, E> + { + /** + * Applies this function to the given argument. + */ + double apply(T t) throws E; + + @Override + default java.util.function.ToDoubleFunction ederWrap() + { + return (t) -> + { + try + { + return apply(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToDoubleFunction use(ToDoubleFunction o) + { + return o; + } + } + + /** + * Like {@link java.util.function.ToIntFunction} but with a body that can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToIntFunction + extends Checked, E> + { + /** + * Applies this function to the given argument. + */ + int apply(T t) throws E; + + @Override + default java.util.function.ToIntFunction ederWrap() + { + return (t) -> + { + try + { + return apply(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToIntFunction use(ToIntFunction o) + { + return o; + } + } + + /** + * Like {@link java.util.function.ToLongFunction} but with a body that can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToLongFunction + extends Checked, E> + { + /** + * Applies this function to the given argument. + */ + long apply(T t) throws E; + + @Override + default java.util.function.ToLongFunction ederWrap() + { + return (t) -> + { + try + { + return apply(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToLongFunction use(ToLongFunction o) + { + return o; + } + } + + /** + * Like {@link java.util.function.Predicate} but with a body that can + * throw checked exceptions. + * @param Type of the predicate's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface Predicate + extends Checked, E> + { + boolean test(T t) throws E; + + /** + * Evaluates this predicate on the given argument. + */ + default Predicate negate() + { + return t -> ! test(t); + } + + @Override + default java.util.function.Predicate ederWrap() + { + return (t) -> + { + try + { + return test(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + Predicate use(Predicate o) + { + return o; + } + } + + /* + * Functions without checked-exception-less Java API counterparts. + */ + + /** + * Represents a function that produces a byte-valued result and can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToByteFunction + extends Closing.Trivial, E> + { + /** + * Applies this function to the given argument. + */ + byte apply(T t) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToByteFunction use(ToByteFunction o) + { + return o; + } + } + + /** + * Represents a function that produces a short-valued result and can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToShortFunction + extends Closing.Trivial, E> + { + /** + * Applies this function to the given argument. + */ + short apply(T t) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToShortFunction use(ToShortFunction o) + { + return o; + } + } + + /** + * Represents a function that produces a char-valued result and can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToCharFunction + extends Closing.Trivial, E> + { + /** + * Applies this function to the given argument. + */ + char apply(T t) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToCharFunction use(ToCharFunction o) + { + return o; + } + } + + /** + * Represents a function that produces a float-valued result and can + * throw checked exceptions. + * @param Type of the function's parameter. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ToFloatFunction + extends Closing.Trivial, E> + { + /** + * Applies this function to the given argument. + */ + float apply(T t) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + ToFloatFunction use(ToFloatFunction o) + { + return o; + } + } + + /* + * Consumers that have checked-exception-less counterparts in the Java API. + */ + + /** + * Like {@link java.util.function.BiConsumer} but with a body that can + * throw checked exceptions. + * @param Type of the first argument to the operation. + * @param Type of the second argument to the operation. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface BiConsumer + extends Checked, E> + { + /** + * Performs this operation on the given arguments. + */ + void accept(T t, U u) throws E; + + @Override + default java.util.function.BiConsumer ederWrap() + { + return (t, u) -> + { + try + { + accept(t, u); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static BiConsumer + use(BiConsumer o) + { + return o; + } + } + + /** + * Like {@link java.util.function.Consumer} but with a body that can + * throw checked exceptions. + * @param Type of the input to the operation. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface Consumer + extends Checked, E> + { + /** + * Performs this operation on the given argument. + */ + void accept(T t) throws E; + + @Override + default java.util.function.Consumer ederWrap() + { + return (t) -> + { + try + { + accept(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static Consumer use(Consumer o) + { + return o; + } + } + + /** + * Like {@link java.util.function.DoubleConsumer} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface DoubleConsumer + extends Checked + { + /** + * Performs this operation on the given argument. + */ + void accept(double value) throws E; + + @Override + default java.util.function.DoubleConsumer ederWrap() + { + return (t) -> + { + try + { + accept(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static DoubleConsumer use(DoubleConsumer o) + { + return o; + } + } + + /** + * Like {@link java.util.function.IntConsumer} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface IntConsumer + extends Checked + { + /** + * Performs this operation on the given argument. + */ + void accept(int value) throws E; + + @Override + default java.util.function.IntConsumer ederWrap() + { + return (t) -> + { + try + { + accept(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static IntConsumer use(IntConsumer o) + { + return o; + } + } + + /** + * Like {@link java.util.function.LongConsumer} but with a body that can + * throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface LongConsumer + extends Checked + { + /** + * Performs this operation on the given argument. + */ + void accept(long value) throws E; + + @Override + default java.util.function.LongConsumer ederWrap() + { + return (t) -> + { + try + { + accept(t); + } + catch ( Throwable thw ) + { + throw Checked.ederThrow(thw); + } + }; + } + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static LongConsumer use(LongConsumer o) + { + return o; + } + } + + /* + * Consumers without checked-exception-less counterparts in the Java API. + */ + + /** + * Represents an operation that accepts a single boolean-valued argument + * and can throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface BooleanConsumer + extends Closing.Trivial, E> + { + /** + * Performs this operation on the given argument. + */ + void accept(boolean value) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static + BooleanConsumer use(BooleanConsumer o) + { + return o; + } + } + + /** + * Represents an operation that accepts a single byte-valued argument + * and can throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ByteConsumer + extends Closing.Trivial, E> + { + /** + * Performs this operation on the given argument. + */ + void accept(byte value) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static ByteConsumer use(ByteConsumer o) + { + return o; + } + } + + /** + * Represents an operation that accepts a single short-valued argument + * and can throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface ShortConsumer + extends Closing.Trivial, E> + { + /** + * Performs this operation on the given argument. + */ + void accept(short value) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static ShortConsumer use(ShortConsumer o) + { + return o; + } + } + + /** + * Represents an operation that accepts a single char-valued argument + * and can throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface CharConsumer + extends Closing.Trivial, E> + { + /** + * Performs this operation on the given argument. + */ + void accept(char value) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static CharConsumer use(CharConsumer o) + { + return o; + } + } + + /** + * Represents an operation that accepts a single float-valued argument + * and can throw checked exceptions. + * @param Exception type that can be thrown by the body. + */ + @FunctionalInterface + interface FloatConsumer + extends Closing.Trivial, E> + { + /** + * Performs this operation on the given argument. + */ + void accept(float value) throws E; + + /** + * Shapes a lambda or method reference into an instance of this + * functional interface. + *

      + * This is simply an identity function that can take the place of a + * more unwieldy cast. + * @param Least upper bound of exception types o can throw. + * @param o The implementing lambda or method reference. + */ + static FloatConsumer use(FloatConsumer o) + { + return o; + } + } + + /* + * Optionals without checked-exception-less counterparts in the Java API. + * + * Rather than following Java's odd "Value-Based Class" conventions (which + * would require each class to be final and therefore preclude a None/Some + * implementation), these all have private constructors and constitute an + * effectively sealed hierarchy. Client code can and should treat them as + * value-based classes, and they will behave. + */ + + /** + * Head of a family of {@link java.util.Optional Optional}-like types + * covering the Java primitives that the {@code java.util.Optional...} + * classes do not cover, and whose methods that expect functional interfaces + * will accept the checked-exception versions declared here. + *

      + * Each {@code Optional}Foo class here should be treated as if + * it were a Java "value-based" class; that they might have this class as + * an ancestor, or any superclass/subclass relationships at all, may change + * and should not be relied on. It may be convenient to + * {@code import static} the {@code ofNullable} methods of this class, + * however, which even cover the {@code java.util}-supplied primitive + * optionals. + */ + abstract class OptionalBase + { + /** + * If a value is present, returns true, otherwise false. + */ + public boolean isPresent() + { + return false; + } + + @Override + public boolean equals(Object obj) + { + /* + * This is the equals() inherited by every EMPTY instance, and + * therefore can only return true when obj is an instance of the + * exact same type. + */ + return null != obj && getClass().equals(obj.getClass()); + } + + @Override + public int hashCode() + { + return 0; + } + + @Override + public String toString() + { + return getClass().getSimpleName() + ".empty"; + } + + /** + * Return an {@code OptionalDouble} representing the argument, empty + * if the argument is null. + */ + public static OptionalDouble ofNullable(Double value) + { + return null == value ? + OptionalDouble.empty() : OptionalDouble.of(value); + } + + /** + * Return an {@code OptionalInt} representing the argument, empty + * if the argument is null. + */ + public static OptionalInt ofNullable(Integer value) + { + return null == value ? + OptionalInt.empty() : OptionalInt.of(value); + } + + /** + * Return an {@code OptionalLong} representing the argument, empty + * if the argument is null. + */ + public static OptionalLong ofNullable(Long value) + { + return null == value ? + OptionalLong.empty() : OptionalLong.of(value); + } + + /** + * Return an {@code OptionalBoolean} representing the argument, empty + * if the argument is null. + */ + public static OptionalBoolean ofNullable(Boolean value) + { + return null == value ? + OptionalBoolean.EMPTY : OptionalBoolean.of(value); + } + + /** + * Return an {@code OptionalByte} representing the argument, empty + * if the argument is null. + */ + public static OptionalByte ofNullable(Byte value) + { + return null == value ? + OptionalByte.EMPTY : OptionalByte.of(value); + } + + /** + * Return an {@code OptionalShort} representing the argument, empty + * if the argument is null. + */ + public static OptionalShort ofNullable(Short value) + { + return null == value ? + OptionalShort.EMPTY : OptionalShort.of(value); + } + + /** + * Return an {@code OptionalChar} representing the argument, empty + * if the argument is null. + */ + public static OptionalChar ofNullable(Character value) + { + return null == value ? + OptionalChar.EMPTY : OptionalChar.of(value); + } + + /** + * Return an {@code OptionalFloat} representing the argument, empty + * if the argument is null. + */ + public static OptionalFloat ofNullable(Float value) + { + return null == value ? + OptionalFloat.EMPTY : OptionalFloat.of(value); + } + } + + /** + * A container object which may or may not contain a {@code boolean} value. + */ + class OptionalBoolean extends OptionalBase + { + /** + * An empty {@code OptionalBoolean}, for convenience; not to be used in + * identity-sensitive operations. + */ + public static final OptionalBoolean EMPTY = new OptionalBoolean(); + + /** + * An {@code OptionalBoolean} containing {@code false}, for convenience; + * not to be used in identity-sensitive operations. + */ + public static final OptionalBoolean FALSE = new False(); + + /** + * An {@code OptionalBoolean} containing {@code true}, for convenience; + * not to be used in identity-sensitive operations. + */ + public static final OptionalBoolean TRUE = new True(); + + private OptionalBoolean() + { + } + + public static OptionalBoolean of(boolean value) + { + return value ? TRUE : FALSE; + } + + public boolean getAsBoolean() + { + throw new NoSuchElementException("No value present"); + } + + public void ifPresent( + BooleanConsumer action) + throws E + { + } + + public void ifPresentOrElse( + BooleanConsumer action, + Runnable emptyAction) + throws E + { + emptyAction.run(); + } + + public boolean orElse(boolean other) + { + return other; + } + + public boolean orElseGet( + BooleanSupplier supplier) + throws E + { + return supplier.getAsBoolean(); + } + + public boolean orElseThrow( + Supplier exceptionSupplier) + throws E + { + throw exceptionSupplier.get(); + } + + private abstract static class Present extends OptionalBoolean + { + private Present() + { + } + + @Override + public boolean isPresent() + { + return true; + } + + /* + * The inherited equals() works here too; this and obj must be both + * of class False or both of class True. + */ + + @Override + public int hashCode() + { + return Boolean.hashCode(getAsBoolean()); + } + + @Override + public String toString() + { + return "OptionalBoolean[" + getAsBoolean() + ']'; + } + + @Override + public abstract boolean getAsBoolean(); + + @Override + public void ifPresent( + BooleanConsumer action) + throws E + { + action.accept(getAsBoolean()); + } + + @Override + public void ifPresentOrElse( + BooleanConsumer action, + Runnable emptyAction) + throws E + { + action.accept(getAsBoolean()); + } + + @Override + public boolean orElse(boolean other) + { + return getAsBoolean(); + } + + @Override + public boolean orElseGet( + BooleanSupplier supplier) + throws E + { + return getAsBoolean(); + } + + @Override + public boolean orElseThrow( + Supplier exceptionSupplier) + throws E + { + return getAsBoolean(); + } + } + + private static final class False extends Present + { + private False() + { + } + + @Override + public boolean getAsBoolean() + { + return false; + } + } + + private static final class True extends Present + { + private True() + { + } + + @Override + public boolean getAsBoolean() + { + return true; + } + } + } + + /** + * A container object which may or may not contain a {@code byte} value. + */ + class OptionalByte extends OptionalBase + { + /** + * An empty {@code OptionalByte}, for convenience; not to be used in + * identity-sensitive operations. + */ + public static final OptionalByte EMPTY = new OptionalByte(); + + private OptionalByte() + { + } + + public static OptionalByte of(byte value) + { + return new Present(value); + } + + public byte getAsByte() + { + throw new NoSuchElementException("No value present"); + } + + public void ifPresent( + ByteConsumer action) + throws E + { + } + + public void ifPresentOrElse( + ByteConsumer action, Runnable emptyAction) + throws E + { + emptyAction.run(); + } + + public byte orElse(byte other) + { + return other; + } + + public byte orElseGet( + ByteSupplier supplier) + throws E + { + return supplier.getAsByte(); + } + + public byte orElseThrow( + Supplier exceptionSupplier) + throws E + { + throw exceptionSupplier.get(); + } + + private static final class Present extends OptionalByte + { + private final byte m_value; + + private Present(byte value) + { + m_value = value; + } + + @Override + public boolean isPresent() + { + return true; + } + + @Override + public boolean equals(Object obj) + { + return obj instanceof Present + && (m_value == ((Present)obj).m_value); + } + + @Override + public int hashCode() + { + return Byte.hashCode(m_value); + } + + @Override + public String toString() + { + return "OptionalByte[" + m_value + ']'; + } + + @Override + public byte getAsByte() + { + return m_value; + } + + @Override + public void ifPresent( + ByteConsumer action) + throws E + { + action.accept(m_value); + } + + @Override + public void ifPresentOrElse( + ByteConsumer action, + Runnable emptyAction) + throws E + { + action.accept(m_value); + } + + @Override + public byte orElse(byte other) + { + return m_value; + } + + @Override + public byte orElseGet( + ByteSupplier supplier) + throws E + { + return m_value; + } + + @Override + public byte orElseThrow( + Supplier exceptionSupplier) + throws E + { + return m_value; + } + } + } + + /** + * A container object which may or may not contain a {@code short} value. + */ + class OptionalShort extends OptionalBase + { + /** + * An empty {@code OptionalShort}, for convenience; not to be used in + * identity-sensitive operations. + */ + public static final OptionalShort EMPTY = new OptionalShort(); + + private OptionalShort() + { + } + + public static OptionalShort of(short value) + { + return new Present(value); + } + + public short getAsShort() + { + throw new NoSuchElementException("No value present"); + } + + public void ifPresent( + ShortConsumer action) + throws E + { + } + + public void ifPresentOrElse( + ShortConsumer action, + Runnable emptyAction) + throws E + { + emptyAction.run(); + } + + public short orElse(short other) + { + return other; + } + + public short orElseGet( + ShortSupplier supplier) + throws E + { + return supplier.getAsShort(); + } + + public short orElseThrow( + Supplier exceptionSupplier) + throws E + { + throw exceptionSupplier.get(); + } + + private static final class Present extends OptionalShort + { + private final short m_value; + + private Present(short value) + { + m_value = value; + } + + @Override + public boolean isPresent() + { + return true; + } + + @Override + public boolean equals(Object obj) + { + return obj instanceof Present + && (m_value == ((Present)obj).m_value); + } + + @Override + public int hashCode() + { + return Short.hashCode(m_value); + } + + @Override + public String toString() + { + return "OptionalShort[" + m_value + ']'; + } + + @Override + public short getAsShort() + { + return m_value; + } + + @Override + public void ifPresent( + ShortConsumer action) + throws E + { + action.accept(m_value); + } + + @Override + public void ifPresentOrElse( + ShortConsumer action, + Runnable emptyAction) + throws E + { + action.accept(m_value); + } + + @Override + public short orElse(short other) + { + return m_value; + } + + @Override + public short orElseGet( + ShortSupplier supplier) + throws E + { + return m_value; + } + + @Override + public short orElseThrow( + Supplier exceptionSupplier) + throws E + { + return m_value; + } + } + } + + /** + * A container object which may or may not contain a {@code char} value. + */ + class OptionalChar extends OptionalBase + { + /** + * An empty {@code OptionalChar}, for convenience; not to be used in + * identity-sensitive operations. + */ + public static final OptionalChar EMPTY = new OptionalChar(); + + private OptionalChar() + { + } + + public static OptionalChar of(char value) + { + return new Present(value); + } + + public char getAsChar() + { + throw new NoSuchElementException("No value present"); + } + + public void ifPresent( + CharConsumer action) + throws E + { + } + + public void ifPresentOrElse( + CharConsumer action, Runnable emptyAction) + throws E + { + emptyAction.run(); + } + + public char orElse(char other) + { + return other; + } + + public char orElseGet( + CharSupplier supplier) + throws E + { + return supplier.getAsChar(); + } + + public char orElseThrow( + Supplier exceptionSupplier) + throws E + { + throw exceptionSupplier.get(); + } + + private static final class Present extends OptionalChar + { + private final char m_value; + + private Present(char value) + { + m_value = value; + } + + @Override + public boolean isPresent() + { + return true; + } + + @Override + public boolean equals(Object obj) + { + return obj instanceof Present + && (m_value == ((Present)obj).m_value); + } + + @Override + public int hashCode() + { + return Character.hashCode(m_value); + } + + @Override + public String toString() + { + return "OptionalChar[" + (int)m_value + ']'; + } + + @Override + public char getAsChar() + { + return m_value; + } + + @Override + public void ifPresent( + CharConsumer action) + throws E + { + action.accept(m_value); + } + + @Override + public void ifPresentOrElse( + CharConsumer action, + Runnable emptyAction) + throws E + { + action.accept(m_value); + } + + @Override + public char orElse(char other) + { + return m_value; + } + + @Override + public char orElseGet( + CharSupplier supplier) + throws E + { + return m_value; + } + + @Override + public char orElseThrow( + Supplier exceptionSupplier) + throws E + { + return m_value; + } + } + } + + /** + * A container object which may or may not contain a {@code float} value. + */ + class OptionalFloat extends OptionalBase + { + /** + * An empty {@code OptionalFloat}, for convenience; not to be used in + * identity-sensitive operations. + */ + public static final OptionalFloat EMPTY = new OptionalFloat(); + + private OptionalFloat() + { + } + + public static OptionalFloat of(float value) + { + return new Present(value); + } + + public float getAsFloat() + { + throw new NoSuchElementException("No value present"); + } + + public void ifPresent( + FloatConsumer action) + throws E + { + } + + public void ifPresentOrElse( + FloatConsumer action, Runnable emptyAction) + throws E + { + emptyAction.run(); + } + + public float orElse(float other) + { + return other; + } + + public float orElseGet( + FloatSupplier supplier) + throws E + { + return supplier.getAsFloat(); + } + + public float orElseThrow( + Supplier exceptionSupplier) + throws E + { + throw exceptionSupplier.get(); + } + + private static final class Present extends OptionalFloat + { + private final float m_value; + + private Present(float value) + { + m_value = value; + } + + @Override + public boolean isPresent() + { + return true; + } + + @Override + public boolean equals(Object obj) + { + return obj instanceof Present + && (0 == Float.compare(m_value, ((Present)obj).m_value)); + } + + @Override + public int hashCode() + { + return Float.hashCode(m_value); + } + + @Override + public String toString() + { + return "OptionalFloat[" + m_value + ']'; + } + + @Override + public float getAsFloat() + { + return m_value; + } + + @Override + public void ifPresent( + FloatConsumer action) + throws E + { + action.accept(m_value); + } + + @Override + public void ifPresentOrElse( + FloatConsumer action, + Runnable emptyAction) + throws E + { + action.accept(m_value); + } + + @Override + public float orElse(float other) + { + return m_value; + } + + @Override + public float orElseGet( + FloatSupplier supplier) + throws E + { + return m_value; + } + + @Override + public float orElseThrow( + Supplier exceptionSupplier) + throws E + { + return m_value; + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java b/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java new file mode 100644 index 00000000..438bc4a9 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/DualState.java @@ -0,0 +1,2375 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import static java.lang.invoke.MethodHandles.lookup; +import java.lang.invoke.VarHandle; + +import java.lang.ref.ReferenceQueue; +import java.lang.ref.WeakReference; + +import java.sql.SQLException; + +import java.util.ArrayDeque; +import static java.util.Arrays.copyOf; +import java.util.Deque; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Queue; + +import java.util.concurrent.CancellationException; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.LongAdder; +import static java.util.concurrent.locks.LockSupport.park; +import static java.util.concurrent.locks.LockSupport.unpark; + +import java.util.function.Supplier; + +import static java.lang.management.ManagementFactory.getPlatformMBeanServer; +import javax.management.ObjectName; +import javax.management.JMException; + +import org.postgresql.pljava.mbeans.DualStateStatistics; + +/** + * Base class for object state with corresponding Java and native components. + *

      + * A {@code DualState} object connects some state that exists in the JVM + * as well as some native/PostgreSQL resources. It will 'belong' to some Java + * object that holds a strong reference to it, and this state object is, + * in turn, a {@link WeakReference} to that object. Java state may be held in + * that object (if it needs only to be freed by the garbage collector when + * unreachable), or in this object if it needs some more specific cleanup. + * Native state will be referred to by this object. + *

      + * These interesting events are possible in the life cycle of a + * {@code DualState} object: + *

        + *
      • It is explicitly closed by the Java code using it. It, and any associated + * native state, should be released.
      • + *
      • It is found unreachable by the Java garbage collector. Again, any + * associated state should be released.
      • + *
      • Its associated native state is released or invalidated (such as by exit + * of a corresponding context). If the object is still reachable from Java, it + * must throw an exception for any future attempted access to its + * native state.
      • + *
      + *

      + * A subclass overrides the {@link #javaStateReleased javaStateReleased}, + * {@link #javaStateUnreachable javaStateUnreachable}, or + * {@link #nativeStateReleased nativeStateReleased} methods, respectively, + * to add behavior for those life cycle events. + *

      + * A subclass calls {@link #releaseFromJava releaseFromJava} to signal an event + * of the first kind. Events of the second kind are, naturally, detected by the + * Java garbage collector. To detect events of the third kind, a resource owner + * must be associated with the instance. + *

      + * A parameter to the {@code DualState} constructor is a {@code ResourceOwner}, + * a PostgreSQL implementation concept introduced in PG 8.0. A + * {@code nativeStateReleased} event occurs when the corresponding + * {@code ResourceOwner} is released in PostgreSQL. + *

      + * However, this class does not require the {@code resourceOwner} parameter to + * be, in all cases, a pointer to a PostgreSQL {@code ResourceOwner}. It is + * treated simply as an opaque {@code long} value, to be compared to a value + * passed at release time (as if in a {@code ResourceOwner} callback). Other + * values (such as pointers to other allocated structures, which of course + * cannot match any PG {@code ResourceOwner} existing at the same time) can also + * be used. In PostgreSQL 9.5 and later, a {@code MemoryContext} could be used, + * with its address passed to a {@code MemoryContextCallback} for release. For + * state that is scoped to a single invocation of a PL/Java function, the + * address of the {@code Invocation} can be used. Such references can be + * considered "generalized" resource owners. + *

      + * Java code may execute in multiple threads, but PostgreSQL is not + * multi-threaded; at any given time, there is no more than one thread that may + * safely make JNI calls into PostgreSQL routines (for that thread, + * {@code Backend.threadMayEnterPG()} returns true). Depending on the setting of + * the {@code pljava.java_thread_pg_entry} PostgreSQL configuration variable, + * that may be the same one thread for the duration of a session, or it may be + * possible for one thread to relinquish that status and another thread to take + * it: for the {@code pljava.java_thread_pg_entry} setting {@code allow}, the + * status is represented by holding the object monitor on + * {@code Backend.THREADLOCK}, and {@code Backend.threadMayEnterPG()} returns + * true for whatever thread holds it. Under that setting, there can be moments + * when {@code Backend.threadMayEnterPG()} is not true for any thread, if one + * has released the monitor and no other thread has yet acquired it. For brevity + * in what follows, "the PG thread" will be used to mean whatever thread, at a + * given moment, would observe {@code Backend.threadMayEnterPG()} to return + * true. + *

      + * Some methods of {@code DualState} and subclasses may be called from any Java + * thread, while some must be called from the PG thread. The life-cycle + * callbacks, {@code javaStateReleased}, {@code javaStateUnreachable}, and + * {@code nativeStateReleased}, are called by the implementation, and always + * on the PG thread. + *

      + * The Java Memory Model imposes strict conditions for updates to memory state + * made in one thread to be visible to other threads. Methods that are known to + * be called only on the PG thread can sidestep those complexities, at least + * to the extent that they manipulate only data structures not accessed in other + * threads. This is true even under the {@code pljava.java_thread_pg_entry} + * setting {@code allow}, where "the PG thread" may not always be the same + * thread. Because a Java synchronization event is involved whenever + * "the PG thread" changes, unbroken visibility is assured, just as it would be + * in one unchanging thread, so one can say "the PG thread" for convenience and + * without loss of generality. + *

      + * For the {@code nativeStateReleased} lifecycle event, rules for memory + * visibility are not enough; a mechanism for mutual exclusion is needed. The + * callback is made on the PG thread from PostgreSQL code that is in the process + * of invalidating the native state, and will do so once the callback returns. + * If any other Java thread is actively referring to that native state, there is + * no choice but to block the PG thread making the callback until such other + * threads are no longer relying on the native state. + *

      + * To that end, the {@link #pin pin} and {@link #unpin unpin} methods are + * provided, and must be used to surround any block of code that accesses the + * native state: + *

      + *pin();
      + *try
      + *{
      + *    ... code that dereferences or relies on
      + *    a valid native state ...
      + *}
      + *finally
      + *{
      + *    unpin();
      + *}
      + *
      + *

      + * Pins are lightweight, nonexclusive (any number of threads may simultaneously + * pin the same {@code DualState} instance), and reentrant (a single thread may + * obtain and release nested pins on the same instance). The code protected by a + * pin is ideally a short sequence representing a simple operation (reading a + * value, or refilling a small buffer with data) on the native state. The chief + * purpose of holding a pin is to hold off the possible invalidation of the + * native state until the pin is released. + *

      + * If either the native state or the Java state has been released already (by + * the resource owner callback or an explicit call to {@code releaseFromJava}, + * respectively), {@code pin()} will detect that and throw the appropriate + * exception. Otherwise, the state is safe to make use of until {@code unpin}. + * A subclass can customize the messages or {@code SQLSTATE} codes for the + * exceptions {@code pin()} may throw, by overriding one or more of + * {@link #identifierForMessage identifierForMessage}, + * {@link #invalidMessage invalidMessage}, + * {@link #releasedMessage releasedMessage}, + * {@link #invalidSqlState invalidSqlState}, or + * {@link #releasedSqlState releasedSqlState}. + *

      + * Code that holds a pin may safely act on components of the native state from + * any thread, so long as the actions do not include native calls to PostgreSQL + * routines (directly or transitively). Access to the native memory through a + * direct byte buffer would be a permitted example, or even calls to JNI methods + * to retrieve fields from C {@code struct}s or chase pointers through a data + * structure, as long as only thread-safe routines from the C runtime are called + * and no routines of PostgreSQL itself, and as long as the memory or structure + * being accessed is known to be safe from modification by PostgreSQL while the + * pin is held. In the future, PL/Java may one day have an annotation that can + * be used to mark native methods that satisfy these limits; at present, there + * has been no effort to segregate them into those that do and those that don't. + * Native methods that may (under any circumstances!) invoke PG routines must + * be invoked on the PG thread. + *

      + * The exclusive counterparts to {@code pin} and {@code unpin} are + * {@link #lock lock} and {@link #unlock(int,boolean) unlock}, which are not + * expected to be used as widely. The chief use of {@code lock}/{@code unlock} + * is around the call to {@code nativeStateReleased} when handling a resource + * owner callback from PostgreSQL. They can be used in subclasses to surround + * modifications to the state, as needed. A {@code lock} will block until all + * earlier-acquired pins are released; subsequent pins block until the lock is + * released. Only the PG thread may use {@code lock}/{@code unlock}. An + * {@code upgrade} argument to {@code lock} allows the lock to be acquired + * when the PG thread already holds a pin; it should be specified + * only when inspection of the code identifies a nearby enclosing pin and + * confirms that the planned locked actions will not break the pinning code's + * assumptions. Pins can be freely acquired by the PG thread while it holds a + * lock; the coding convention's strict nesting assures they will all be + * released before the lock is. + *

      + * In an explicit call to {@code releaseFromJava}, which may be made from any + * thread, the instance is immediately, atomically, flagged as released. No + * subsequent pin will succeed. Pins already held are unaffected, so there must + * be no changes made to the state, at the time {@code releaseFromJava} is + * called, that could confuse any code that already holds a pin and is relying + * on the state. Such changes must be made in the {@code javaStateReleased} + * callback, which will execute only after release of the last pin, if any, and + * always on the PG thread. If the last pin is released by a thread other than + * the PG thread, the callback does not execute immediately, but via a queue + * that is polled from the PG thread at convenient points. + *

      + * Instances whose referents are found unreachable by Java's garbage collector + * are placed on the same queue, so their {@code javaStateUnreachable} callbacks + * will be executed on the PG thread when the queue is polled. The callbacks + * should clean up any lingering native state. + *

      + * As the callbacks are executed on the PG thread, any native calls they may + * need to make into PostgreSQL are allowed without extra ceremony. + *

      + * There are different abstract subclasses of {@code DualState} that wrap + * different sorts of PostgreSQL native state, and encapsulate what needs to be + * done when such state is released from the Java or native side. More such + * subclasses can be added as needed. + *

      + * A client class of {@code DualState} will typically contain a static nested + * class that further extends one of these abstract subclasses, and the client + * instance will hold a strong reference to an instance of that + * {@code DualState} subclass constructed at the same time. + *

      + * This class uses some private data structures, to track + * created instances through their life cycles, that are not synchronized or + * thread-safe. The design rests on the following requirements: + *

        + *
      • The structures are only traversed or modified during: + *
          + *
        • Instance construction + *
        • Reference queue processing (instances found unreachable by Java's + * garbage collector, or enqueued following {@code releaseFromJava}) + *
        • Exit of a resource owner's scope + *
        + *
      • There is only one PG thread, or only one at a time. + *
      • Construction of any {@code DualState} instance is to take place only on + * the PG thread. The requirement to pass any + * constructor a {@code DualState.Key} instance, obtainable by native code, is + * intended to reinforce that convention. It is not abuse-proof, or intended as + * a security mechanism, but only a guard against programming mistakes. + *
      • Reference queue processing takes place only at chosen points where a + * thread enters or exits native code, on the PG thread. + *
      • Resource-owner callbacks originate in native code, on the PG thread. + *
      + */ +public abstract class DualState extends WeakReference +{ + /** + * {@code DualState} objects Java no longer needs. + *

      + * They will turn up on this queue (with referent already set null) if + * the garbage collector has determined them to be unreachable. They can + * also arrive here (also with referent nulled) following + * {@code releaseFromJava}. + *

      + * The queue is only processed by a private method called on the PG thread + * in selected places where it makes sense to do so. + */ + private static final ReferenceQueue s_releasedInstances = + new ReferenceQueue<>(); + + /** + * {@code DualState} objects that arrived on {@code s_releasedInstances} + * before their time. + *

      + * A slim chance exists (see {@code releaseFromJava} code comments) for an + * instance occasionally to appear on {@code s_releasedInstances} + * before all pins on it have been released. In queue processing, they can + * be put back on this queue and polled again in a later pass. The rarity of + * the case doesn't suggest a need for anything more elaborate. + */ + private static final Deque> s_deferredReleased = + new ArrayDeque<>(); + + /** + * All instances in a non-transient native scope are added here upon + * creation, to keep them visible to the garbage collector. + *

      + * Because they are not in a transient native scope, only the + * {@code javaStateUnreachable} or {@code javaStateReleased} lifecycle + * events can occur, and in either case the object is in hand with no + * searching, and can be removed from this structure in O(1). + */ + private static final IdentityHashMap + s_unscopedInstances = new IdentityHashMap<>(); + + /** + * All native-scoped instances are added to this structure upon creation. + *

      + * The hash map takes a resource owner to the doubly-linked list of + * instances it owns. The list is implemented directly with the two list + * fields here (rather than by a Collections class), so that an instance can + * be unlinked with no searching in the case of {@code javaStateUnreachable} + * or {@code javaStateReleased}, where the instance to be unlinked is + * already at hand. The list head is of a dummy {@code DualState} subclass. + */ + private static final Map s_scopedInstances = + new HashMap<>(); + + /** Backward link in per-resource-owner list. */ + private DualState m_prev; + + /** Forward link in per-resource-owner list. */ + private DualState m_next; + + /** + * The sole thread (at a given moment) allowed to interact with Postgres and + * to acquire mutate locks on {@code DualState} instances. + *

      + * Depending on the setting of {@code pljava.java_thread_pg_entry}, this may + * refer to the same thread at all times, or be different threads, one at a + * time. + *

      + * Not volatile; atomic operations that follow any update to it will ensure + * its visibility. + */ + private static Thread s_mutatorThread; + + /** + * Tracker (using thread-local storage) of possibly re-entrant pins held + * on objects by the current thread. + *

      + * Organized as a stack, enforcing a strict nesting protocol for pins. + */ + private static final PinCount.Holder s_pinCount = new PinCount.Holder(); + + /** + * One (state object, pin count) entry on a stack of a thread's held pins. + */ + static final class PinCount + { + /** + * DualState object on which the pins counted by this entry are held. + */ + DualState m_referent; + /** + * Count of pins held on {@code m_referent} at this stack level. + *

      + * The stack may hold earlier entries tracking additional pins on the + * same object, if the thread took a pin on some other object in + * between. + */ + short m_count; + + /** + * Construct a new {@code PinCount} for a given referent, with count + * zero. + */ + PinCount(DualState referent) + { + if ( null == referent ) + throw new NullPointerException("null referent of a PinCount"); + m_referent = referent; + } + + /** + * Thread-local stack of {@code PinCount} entries. + */ + static final class Holder extends ThreadLocal + { + @Override + protected Manager initialValue() + { + return new Manager(); + } + + /** + * Increment a thread-local count of pins for a DualState object. + * @return null if there was already at least one pin counted for + * the object (that is, no real pin will need to be taken; this is + * a reentrant pin); otherwise, a {@code Supplier>} + * that can supply a preallocated queue prepopulated with + * the current thread, in case inflation is needed. + */ + Supplier> pin(DualState s) + { + boolean result = false; // assume a real pin must be taken + Manager counts = get(); + PinCount pc = counts.peek(); + if ( null == pc || ! pc.m_referent.equals(s) ) + { + result = counts.hasPin(s); + pc = counts.push(s); + } + if ( 0 < pc.m_count ++ || result ) + return null; + /* + * Ensure that counts.m_protoWaiters contains a preallocated + * queue with this thread already added to it, ready for + * immediate use by a contended pin that needs to inflate. + */ + if ( null == counts.m_protoWaiters ) + { + counts.m_protoWaiters = new ConcurrentLinkedQueue<>(); + counts.m_protoWaiters.add(Thread.currentThread()); + } + return counts; + } + + /** + * Decrement a thread-local count of pins for a DualState object. + * @return true if there remains at least one pin counted for + * the object (that is, no real pin will need to be released; + * this is a reentrant unpin). + */ + boolean unpin(DualState s) + { + Manager counts = get(); + PinCount pc = counts.peek(); + if ( null == pc || ! pc.m_referent.equals(s) ) + throw new IllegalThreadStateException( + "mispairing of DualState pin/unpin"); + if ( 0 == -- pc.m_count ) + { + counts.pop(); + return counts.hasPin(s); + } + return true; + } + + /** + * True if the current thread holds one or more pins on {@code s}. + */ + boolean hasPin(DualState s) + { + return get().hasPin(s); + } + } + + /** + * Open-coded implementation of as much of a Stack as PinCount needs. + *

      + * A lightweight stack implementation that also pools a few of the + * objects once pushed on it, for reuse, intended to produce less + * observed garbage than the earlier straight use of ArrayDeque. + */ + static final class Manager implements Supplier> + { + private static final int INITIAL_SIZE = 4; + private static final int POOL_TARGET = 2; + private PinCount[] m_array = new PinCount [ INITIAL_SIZE ]; + private int m_top = -1; + private int m_pooled = 0; + Queue m_protoWaiters; + + @Override + public Queue get() + { + Queue q = m_protoWaiters; + m_protoWaiters = null; + return q; + } + + PinCount peek() + { + if ( m_top >= 0 ) + return m_array [ m_top ]; + return null; + } + + /** + * A version of 'pop' that returns {@code void}. + *

      + * No caller above needs the value that was popped; {@code peek} is + * used for that. This method simply pops an element (and may, + * behind the scenes, reset its fields and pool it for reuse). + */ + void pop() + { + if ( m_top < 0 ) + throw new NoSuchElementException(); + if ( m_pooled >= POOL_TARGET ) + m_array [ m_top ] = null; + else + { + PinCount pc = m_array [ m_top ]; + pc.m_referent = null; + assert 0 == pc.m_count : "won't pop a nonzero PinCount"; + ++ m_pooled; + } + -- m_top; + } + + /** + * Obtain an entry from {@code allocate}, push and return it. + */ + PinCount push(DualState s) + { + PinCount pc = allocate(s); + ++ m_top; + if ( m_top < m_array.length ) + assert m_top + m_pooled < m_array.length : "stack v. pool"; + else + { + assert 0 == m_pooled : "pool will be empty if extending"; + m_array = copyOf(m_array, 2 * m_array.length); + } + m_array [ m_top ] = pc; + return pc; + } + + private PinCount allocate(DualState s) + { + if ( m_pooled > 0 ) + { + PinCount pc = m_array [ 1 + m_top ]; + -- m_pooled; + pc.m_referent = s; + return pc; + } + return new PinCount(s); + } + + /** + * True if stack of {@code PinCount}s contains any with a non-zero + * count for object {@code s}. + */ + private boolean hasPin(DualState s) + { + for ( int i = 1 + m_top; i --> 0; ) + { + PinCount pc = m_array [ i ]; + if ( pc.m_referent.equals(s) && 0 < pc.m_count ) + return true; + } + return false; + } + } + } + + /** Thread local record of when the PG thread is invoking callbacks. */ + private static final CleanupTracker s_inCleanup = new CleanupTracker(); + + /** Thread local boolean with pairing enter/exit operations. */ + static final class CleanupTracker extends ThreadLocal + { + boolean enter() + { + assert Backend.threadMayEnterPG() : m("inCleanup.enter thread"); + assert ! inCleanup() : m("inCleanup.enter re-entered"); + set(Boolean.TRUE); + return true; + } + + boolean exit() + { + assert inCleanup() : m("inCleanup.exit mispaired"); + set(Boolean.FALSE); + return true; + } + + boolean inCleanup() + { + return Boolean.TRUE == get(); + } + } + + /** + * {@code VarHandle} for applying atomic operations on the {@code m_state} + * field. + */ + private static final VarHandle s_stateVH; + + /** + * {@code VarHandle} for applying atomic operations on the {@code m_waiters} + * field. + */ + private static final VarHandle s_waitersVH; + + /** + * Bean to expose DualState allocation/release statistics to JMX management + * tools. + */ + private static final Statistics s_stats = new Statistics(); + + static { + try + { + s_stateVH = + lookup().findVarHandle(DualState.class, "m_state", int.class); + s_waitersVH = lookup().findVarHandle(DualState.class, "m_waiters", + Queue.class); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + + try + { + ObjectName n = new ObjectName( + "org.postgresql.pljava:type=DualState,name=Statistics"); + getPlatformMBeanServer().registerMBean(s_stats, n); + } + catch ( JMException e ) { /* XXX */ } + } + + /** + * Pointer value of the {@code ResourceOwner} this instance belongs to, + * if any. + */ + protected final long m_resourceOwner; + + /** + * Check that a cookie is valid, throwing an unchecked exception otherwise. + */ + protected static void checkCookie(Key cookie) + { + assert Backend.threadMayEnterPG(); + if ( ! Key.class.isInstance(cookie) ) + throw new UnsupportedOperationException( + "Operation on DualState instance without cookie"); + } + + /** Flag held in lock state showing the native state has been released. */ + private static final int NATIVE_RELEASED = 0x80000000; + /** Flag held in lock state showing the Java state has been released. */ + private static final int JAVA_RELEASED = 0x40000000; + /** Flag held in lock state showing a lock has been acquired. */ + private static final int MUTATOR_HOLDS = 0x20000000; + /** Flag held in lock state showing a lock is pending. */ + private static final int MUTATOR_WANTS = 0x10000000; + /** Reserved, clear bit above count of pinners awaiting lock release. */ + private static final int WAITERS_GUARD = 0x08000000; + /** Mask for count of pending pinners awaiting lock release. */ + private static final int WAITERS_MASK = 0x07ffc000; + /** The bit shift to get WAITERS count from PINNERS count. */ + private static final int WAITERS_SHIFT = 14; + /** Reserved, clear bit above count of current valid pins. */ + private static final int PINNERS_GUARD = 0x00002000; + /** Mask for count of current pinners holding valid pins. */ + private static final int PINNERS_MASK = 0x00001fff; + + /** Lock state, also records whether native or Java release has occurred. */ + private volatile int m_state = 0; + + /** Threads waiting for pins pending release of lock. */ + private Queue m_waiters; + + /** True if argument is zero. */ + static boolean z(int i) { return 0 == i; } + + /** + * Return the argument; convenient breakpoint target for failed assertions. + */ + static T m(T detail) + { + return detail; + } + + /** + * Construct a {@code DualState} instance with a reference to the Java + * object whose state it represents. + *

      + * Subclass constructors must accept a cookie parameter from the + * native caller, and pass it along to superclass constructors. That allows + * some confidence that constructor parameters representing native values + * are for real, and also that the construction is taking place on a thread + * holding the native lock, keeping the concurrency story simple. + * @param cookie Capability held by native code to invoke {@code DualState} + * constructors. + * @param referent The Java object whose state this instance represents. + * @param resourceOwner Pointer value of the native {@code ResourceOwner} + * whose release callback will indicate that this object's native state is + * no longer valid. If zero (a NULL pointer in C), it indicates that the + * state is held in long-lived native memory (such as JavaMemoryContext), + * and can only be released via {@code javaStateUnreachable} or + * {@code javaStateReleased}. + */ + protected DualState(Key cookie, T referent, long resourceOwner) + { + super(referent, s_releasedInstances); + + checkCookie(cookie); + + long scoped = 0L; + + m_resourceOwner = resourceOwner; + + assert Backend.threadMayEnterPG() : m("DualState construction"); + /* + * The following stanza publishes 'this' into one of the static data + * structures, for resource-owner-scoped or non-native-scoped instances, + * respectively. That may look like escape of 'this' from an unfinished + * constructor, but the structures are private, and only manipulated + * during construction and release, always on the thread cleared to + * enter PG. Depending on the pljava.java_thread_pg_entry setting, that + * might or might not always be the same thread: but if it isn't, a + * synchronizing action must occur when a different thread takes over. + * That will happen after this constructor returns, so the reference is + * safely published. + */ + if ( 0 != resourceOwner ) + { + scoped = 1L; + DualState.ListHead head = s_scopedInstances.get(resourceOwner); + if ( null == head ) + { + head = new DualState.ListHead(resourceOwner); + s_scopedInstances.put(resourceOwner, head); + } + m_prev = head; + m_next = ((DualState)head).m_next; + m_prev.m_next = m_next.m_prev = this; + } + else + s_unscopedInstances.put(this, this); + + s_stats.construct(scoped); + } + + /** + * Private constructor only for dummy instances to use as the list heads + * for per-resource-owner lists. + */ + private DualState(T referent, long resourceOwner) + { + super(referent); // as a WeakReference subclass, must have a referent + super.clear(); // but nobody ever said for how long. + m_resourceOwner = resourceOwner; + m_prev = m_next = this; + m_waiters = null; + } + + /** + * Method that will be called when the associated {@code ResourceOwner} + * is released, indicating that the native portion of the state + * is no longer valid. The implementing class should clean up + * whatever is appropriate to that event. + *

      + * This object's exclusive {@code lock()} will always be held when this + * method is called during resource owner release. The class whose state + * this is must use {@link #pin() pin()}, followed by + * {@link #unpin() unpin()} in a {@code finally} block, around every + * (ideally short) block of code that could refer to the native state. + *

      + * This default implementation does nothing. + * @param javaStateLive true is passed if the instance's "Java state" is + * still considered live, that is, {@code releaseFromJava} has not been + * called, and the garbage collector has not determined the referent to be + * unreachable. + */ + protected void nativeStateReleased(boolean javaStateLive) + { + } + + /** + * Method that will be called when the Java garbage collector has determined + * the referent object is no longer strongly reachable. This default + * implementation does nothing; a subclass should override it to do any + * cleanup, or release of native resources, that may be required. + *

      + * If the {@code nativeStateLive} parameter is false, this method must avoid + * any action (such as freeing) it would otherwise take on the associated + * native state; if it does not, double-free crashes can result. + *

      + * It is not necessary for this method to remove the instance from the + * live-instances data structures; that will have been done just before + * this method is called. + * @param nativeStateLive true is passed if the instance's "native state" is + * still considered live, that is, no resource-owner callback has been + * invoked to stamp it invalid (nor has it been "adopted"). + */ + protected void javaStateUnreachable(boolean nativeStateLive) + { + } + + /** + * Called after client code has called {@code releaseFromJava}, always on + * a thread for which {@code Backend.threadMayEnterPG()} is true, and after + * any pins held on the state have been released. + *

      + * This should not be called directly. When Java code has called + * {@code releaseFromJava}, the state will be changed to 'released' + * immediately, though without actually disturbing any state that might be + * referenced by threads with existing pins. This method will be called + * at some later time, always on a thread able to enter PG, and with no + * other threads having the native state pinned, so this is the place for + * any actual release of native state that may be needed. + *

      + * If the {@code nativeStateLive} parameter is false, this method must avoid + * any action (such as freeing) it would otherwise take on the associated + * native state; if it does not, double-free crashes can result. + *

      + * This default implementation calls {@code javaStateUnreachable}, which, in + * typical cases, will have the same cleanup to do. + * @param nativeStateLive true is passed if the instance's "native state" is + * still considered live, that is, no resource-owner callback has been + * invoked to stamp it invalid (nor has it been "adopted"). + */ + protected void javaStateReleased(boolean nativeStateLive) + { + javaStateUnreachable(nativeStateLive); + } + + /** + * What Java code will call to explicitly release this instance + * (in the implementation of {@code close}, for example). + *

      + * The state is immediately marked 'released' to prevent future use, while + * a call to {@code javaStateReleased} will be deferred until after any pins + * currently held on the state have been released. + */ + protected final void releaseFromJava() + { + /* + * The possibility of a race between GC and a releaseFromJava call seems + * remote; it stands to reason that whoever is calling releaseFromJava + * is holding a live reference to the referent, so it's not eligible for + * GC. Strictly, though, something with only a reference to the + * DualState could call this method. It would be a strange coding + * pattern, but nothing here prevents it. + * + * Of greater concern is what happens after releaseFromJava. The caller + * might well let go of any reference, creating a race to see who puts + * the object onto the ReferenceQueue first, GC or this method (or + * unpin, if there are pins to wait for), making it difficult for the + * reference queue drainer to distinguish which case it is handling. + * + * That can be avoided by doing an unconditional clear() here, so (as + * long as the referent was live when we started), the GC is relieved of + * any queuing duties, leaving us in control of the next steps. + * + * We must do the clear() and also detect whether it already was clear, + * by calling referent() first. Those two steps aren't an atom, but at + * first blush it looks safe to hold our own strong reference in r1 and + * so hold off any finding of unreachability from there to the clear(). + * + * Optimizing JIT could muddy the picture, though, by deciding the only + * use of r1 here is to compare it to null, which could be moved ahead + * of the clear(), in the absence of the reachabilityFence below. + * + * The m_state operations are the only ones with synchronizing effects, + * so we had better delay our unconditional clear() until after we know + * the JAVA_RELEASED flag has been CAS'd in. Otherwise we'd be unable to + * decide whether a referent-was-cleared-at-entry condition meant we + * were beaten by GC or by another releaseFromJava call. + * + * Note that Reference.isEnqueued() looks promising as a way to tell if + * the GC has enqueued something already, but it isn't: it goes back to + * false again as soon as a reference is removed from the queue, so it's + * hopelessly racy. + */ + T r1 = referent(); + + int s = (int)s_stateVH.getAndBitwiseOr(this, JAVA_RELEASED); + /* + * The state is now marked JAVA_RELEASED; that flag may have been set + * before us, or here now by us (the latter indicated by its absence + * in s). + * + * Now to find out if the referent was live at entry, and clear it. + */ + boolean releaseFlagWasClear = z(s & JAVA_RELEASED); + boolean refWasClearAtEntry = null == r1; + super.clear(); + reachabilityFence(r1); + + if ( refWasClearAtEntry ) + { + if ( releaseFlagWasClear ) + { + /* + * The garbage collector has already enqueued it; we are too + * late. Count the event and return. + * If there are any pins, two things will happen, one more bad, + * one less. The GC has no clue about waiting for unpins, so + * the queue drainer may receive (or already has, even) this + * object with active pins. It can detect that, and do something + * reasonable. + * Also, when unpin() releases the last one, it will want to + * enqueue the object again. That's already a non-problem: + * Reference.enqueue() only works one time. + */ + s_stats.gcReleaseRace(); + return; + } + else + { + /* + * We were beaten by another releaseFromJava call. That thread + * will be handling the remaining formalities. + */ + s_stats.releaseReleaseRace(); + return; + } + } + + if ( ! releaseFlagWasClear ) + return; // Other winning thread will handle the formalities. + + if ( !z(s & (WAITERS_MASK | PINNERS_MASK)) ) + return; // unpin() will schedule when the last pin is released + + scheduleJavaReleased(s); + } + + /** + * Throws {@code UnsupportedOperationException}; {@code releaseFromJava} + * must be used rather than calling this method directly. + */ + @Override + public final boolean enqueue() + { + throw new UnsupportedOperationException( + "directly calling enqueue() on a DualState object is not " + + "supported; use releaseFromJava()."); + } + + /** + * Throws {@code UnsupportedOperationException}; {@code releaseFromJava} + * must be used rather than calling this method directly. + */ + @Override + public final void clear() + { + /* + * Must relax this assertion, because for some reason the clear() method + * can be called from enqueue() when running on OpenJ9. See + * https://github.com/AdoptOpenJDK/openjdk-support/issues/42 + * + * So, spare the exception as long as the instance has in fact been + * released. + */ + int s = (int)s_stateVH.get(this); + if ( z(s & JAVA_RELEASED) ) + throw new UnsupportedOperationException( + "directly calling clear() on a DualState object is not " + + "supported; use releaseFromJava()."); + super.clear(); + } + + /** + * Throws {@code UnsupportedOperationException}; client code should already + * hold a reference. + */ + @Override + public final T get() + { + throw new UnsupportedOperationException( + "directly calling get() on a DualState object is not supported."); + } + + /** + * Used internally to obtain this object's referent. + */ + protected final T referent() + { + return super.get(); + } + + /** + * Obtain a pin on this state, throwing an appropriate exception if it + * is not still valid, blocking if necessary until release of a lock. + *

      + * Pins are re-entrant; a thread may obtain more than one on the same + * object, in strictly nested fashion. Only the outer acquisition (and + * corresponding release) will have any memory synchronization effect; + * likewise, only the outer acquisition will detect release of the object + * and throw the associated exception. + * @throws SQLException if the native state or the Java state has been + * released. + * @throws CancellationException if the thread is interrupted while waiting. + */ + public final void pin() throws SQLException + { + int r = _pin(); + if ( z(r) ) + return; + if ( !z(r & NATIVE_RELEASED) ) + throw new SQLException(invalidMessage(), invalidSqlState()); + throw new SQLException(releasedMessage(), releasedSqlState()); + } + + /** + * Obtain a pin on this state, if it is still valid, blocking if necessary + * until release of a lock. + *

      + * Pins are re-entrant; a thread may obtain more than one on the same + * object, in strictly nested fashion. Only the outer acquisition (and + * corresponding release) will have any memory synchronization effect; + * likewise, only the outer acquisition will detect release of the object + * and throw the associated exception. + * @return true if the state has already been released; this will often be + * used in a caller (such as a {@code close} or {@code free} operation) that + * will have nothing to do and return immediately if this method returns + * true. + * @throws CancellationException if the thread is interrupted while waiting. + */ + public final boolean pinUnlessReleased() + { + return !z(_pin()); + } + + /** + * Workhorse for {@code pin()} and {@code pinUnlessReleased()}. + * @return zero if the pin was obtained, otherwise {@code NATIVE_RELEASED}, + * {@code JAVA_RELEASED}, or both. + */ + private final int _pin() + { + /* + * The test for a reentrant pin will indicate its result by returning + * null (if the pin is reentrant and no further action is needed here) + * or a queue supplier, which is ready to supply a preallocated queue + * in case inflation is needed. + */ + Supplier> qSupplier = s_pinCount.pin(this); + + if ( null == qSupplier ) + return 0; // reentrant pin, no need for sync effort + + int s = 1 + (int)s_stateVH.getAndAdd(this, 1); // be optimistic + if ( z(s & ~ PINNERS_MASK) ) // nothing in s but a pin count? -> + return 0; // ... uncontended win! + if ( !z(s & (NATIVE_RELEASED | JAVA_RELEASED)) ) + return backoutPinBeforeEnqueue(s); + if ( !z(s & PINNERS_GUARD) ) + { + s = (int)s_stateVH.getAndAdd(this, -1); //recovery iffy in this case + s_pinCount.unpin(this); + throw new Error("DualState pin tracking capacity exceeded"); + } + /* + * The state is either MUTATOR_HOLDS or MUTATOR_WANTS. In either case, + * we're too late to get a pin right now, and need to join the waiters + * queue and move our bit from the PINNERS_MASK region to the + * WAITERS_MASK region (by adding the value of the least waiters bit + * minus one, which is equal to PINNERS_GUARD|PINNERS_MASK). + * + * Proceeding in that order allows the mutator thread (if it is in + * MUTATOR_HOLDS and already unparked), when it releases, to ensure it + * sees us in the queue, by spinning as long as it sees any bits in the + * 'wrong' area. + * + * If moving our bit leaves zero under PINNERS_MASK and it's the + * MUTATOR_WANTS case, we promote and unpark the mutator before parking. + */ + + Thread thr = Thread.currentThread(); + + /* + * Observation shows contention is very rare, so m_waiters can be left + * null for most DualState instances, and be 'inflated' by having a + * queue installed when first needed. That requires a null check here. + */ + if ( null != m_waiters ) + m_waiters.add(thr); + else + { + /* + * We install the queue with a CAS on m_waiters, which is enough to + * coordinate with any other thread trying to do this concurrently. + * It is not enough to synchronize with unlock(), which uses only a + * plain read on m_waiters. But it only does that after seeing our + * upcoming modification to m_state, which this happens before. + */ + if ( s_waitersVH.compareAndSet(this, null, qSupplier.get()) ) + { + /* + * We successfully inflated. The queue obtained from get() above + * already has this thread enqueued on it, so there is nothing + * else to do here. + */ + } + else + { + /* + * Somebody beat us to it. Their queue is just as good; use it. + */ + m_waiters.add(thr); + } + } + + int t; + int u; + /* + * Top-of-loop invariant, s has either MUTATOR_HOLDS or MUTATOR_WANTS, + * and we're counted under PINNERS_MASK, but under WAITERS_MASK is where + * we belong. + * + * Construct t from s, but moving us under WAITERS_MASK; if that leaves + * zero under PINNERS_MASK and s has MUTATOR_WANTS, promote it to + * MUTATOR_HOLDS. Try to CAS the new state into place. + * + * The top-of-loop invariant must still hold if the CAS fails + * and s is refetched: a state without MUTATOR_HOLDS or MUTATOR_WANTS + * cannot be reached as long as we are looping, because our presence in + * the PINNERS count prevents a WANTS advancing to HOLDS, and also + * blocks the final CAS in the release of a HOLDS. + */ + for ( ;; s = u ) + { + t = s + (PINNERS_GUARD|PINNERS_MASK); + /* + * Not necessary to check here for NATIVE_RELEASED - it only gets + * set at the release of a lock, which is prevented while we spin. + * JAVA_RELEASED could have appeared, though. + */ + if ( !z(s & JAVA_RELEASED) ) + return backoutPinAfterEnqueue(s); + if ( !z(s & PINNERS_GUARD) ) + { + backoutPinAfterEnqueue(s); + throw new Error("DualState wait tracking capacity exceeded"); + } + if ( !z(t & MUTATOR_WANTS) && z(t & PINNERS_MASK) ) + t += MUTATOR_WANTS; // promote to MUTATOR_HOLDS, next bit left + u = (int)s_stateVH.compareAndExchange(this, s, t); + if ( s == u ) + break; + } + if ( !z(t & MUTATOR_HOLDS) && !z(s & MUTATOR_WANTS)) // promoted by us + unpark(s_mutatorThread); + + /* + * Invariant: t is the state before we park, and must have either + * MUTATOR_WANTS or MUTATOR_HOLDS (loop will exit if a state is fetched + * that has neither). + */ + for ( ;; t = s ) + { + if ( ! thr.isInterrupted() ) + park(this); + s = (int)s_stateVH.getVolatile(this); + if ( thr.isInterrupted() + || !z(s & (NATIVE_RELEASED | JAVA_RELEASED)) ) + return backoutPinAfterPark(t); + if ( !z(s & MUTATOR_HOLDS) ) // can only be a spurious unpark + continue; + if ( z(s & MUTATOR_WANTS) ) // no HOLDS, no WANTS, so + break; // we have our pin and are free to go + /* + * The newly-updated state has MUTATOR_WANTS. Check t (the pre-park + * state) to tease apart the cases for what that could mean. + */ + if ( !z(t & MUTATOR_HOLDS) ) // t, the pre-park state. + { + /* + * If MUTATOR_HOLDS was set when we parked, what the current + * state tells us is unambiguous: if it is now MUTATOR_WANTS, + * the earlier lock was released, we have our pin and are free + * to go, and the current MUTATOR_WANTS must wait for us. + */ + break; + } + /* + * This case is trickier. It was WANTS when we parked. The WANTS + * we now see could be the same WANTS we parked on, making this a + * spurious unpark, or it could be a new one that raced us to this + * point after the earlier one advanced to HOLDS, released, and + * unparked us. If that happened, we have our pin and are free to go + * (the new WANTS waits for us), and we can distinguish the cases + * because we are still in the queue in the former case but not the + * latter. (There is no race with the mutator thread draining the + * queue, because it does that with WANTS and HOLDS both clear, and + * remember, it is the only thread that gets to request a lock.) + */ + if ( m_waiters.contains(thr) ) + continue; + break; + } + return 0; + } + + /** + * Release a pin. + *

      + * If the current thread has pinned the same object more than once, only the + * last {@code unpin} will have any memory synchronization effect. + */ + public final void unpin() + { + if ( s_pinCount.unpin(this) ) + return; // it was a reentrant pin, no need for sync effort + + int s = 1; // start by assuming state is simple with only one pinner, us + int t; + int u; + for ( ;; s = u ) + { + assert 1 <= (s & PINNERS_MASK) : m("DualState unpin < 1 pinner"); + t = s - 1; + if ( !z(t & MUTATOR_WANTS) && z(t & PINNERS_MASK) ) + t += MUTATOR_WANTS; // promote to MUTATOR_HOLDS, next bit left + u = (int)s_stateVH.compareAndExchange(this, s, t); + if ( s == u ) + break; + } + + /* + * If there is a javaReleased event to schedule and a mutator to unpark, + * do them in that order, so the mutator will not see the event's + * clearing/enqueueing in progress. + */ + if ( !z(t & JAVA_RELEASED) && z(t & (WAITERS_MASK | PINNERS_MASK)) ) + scheduleJavaReleased(t); + if ( !z(t & MUTATOR_HOLDS) && !z(s & MUTATOR_WANTS)) // promoted by us + unpark(s_mutatorThread); + } + + /** + * Whether the current thread has pinned this object, for use in assertions. + * @return true if the current thread holds a(t least one) pin on + * the receiver, or is the PG thread and holds the lock. + */ + public final boolean pinnedByCurrentThread() + { + return s_pinCount.hasPin(this) || s_inCleanup.inCleanup(); + } + + /** + * Back out an in-progress pin before our thread has been placed on the + * queue. + * @param s the most recently known state + * @return the {@code NATIVE_RELEASED} and {@code JAVA_RELEASED} bits of + * the state + */ + private int backoutPinBeforeEnqueue(int s) + { + s_pinCount.unpin(this); + int t; + int u; + for ( ;; s = u ) + { + assert 1 <= (s & PINNERS_MASK) : m("backoutPinBeforeEnqueue"); + t = s - 1; + if ( !z(t & MUTATOR_WANTS) && z(t & PINNERS_MASK) ) + t += MUTATOR_WANTS; // promote to MUTATOR_HOLDS, next bit left + u = (int)s_stateVH.compareAndExchange(this, s, t); + if ( s == u ) + break; + } + /* + * See unpin() for why these are in this order. + */ + if ( !z(t & JAVA_RELEASED) && z(t & (WAITERS_MASK | PINNERS_MASK)) ) + scheduleJavaReleased(t); + if ( !z(t & MUTATOR_HOLDS) && !z(s & MUTATOR_WANTS)) // promoted by us + unpark(s_mutatorThread); + return t & (NATIVE_RELEASED | JAVA_RELEASED); + } + + /** + * Back out an in-progress pin after our thread has been placed on the + * queue, but before success of the CAS that counts us under WAITERS_MASK + * rather than PINNERS_MASK. + * @param s the most recently known state + * @return the {@code NATIVE_RELEASED} and {@code JAVA_RELEASED} bits of + * the state + */ + private int backoutPinAfterEnqueue(int s) + { + m_waiters.remove(Thread.currentThread()); + return backoutPinBeforeEnqueue(s); + } + + /** + * Back out a pin acquisition attempt from within the park loop (which + * includes a slim chance that the pin is, in fact, acquired by the time + * this method can complete, in which case the pin is immediately released). + *

      + * This is only called when a condition is detected during park for which an + * exception ought to be thrown. Therefore, after backing out, this method + * either throws the {@code CancellationException} (if thread interruption + * was the reason), or returns one or both of {@code NATIVE_RELEASED} or + * {@code JAVA_RELEASED}; it never returns zero. + * @param t prior state from before parking + * @throws CancellationException if the reason was thread interruption + * @return the {@code NATIVE_RELEASED} and {@code JAVA_RELEASED} bits of + * the state + */ + private int backoutPinAfterPark(int t) + { + boolean wasHolds = !z(t & MUTATOR_HOLDS); // t, the pre-park state + /* + * Quickly ADD a (fictitious) PINNER, which will reliably jam any more + * transitions by the mutator thread (WANTS to HOLDS, or HOLDS to + * released) while we determine what to do next. + */ + t = 1 + (int)s_stateVH.getAndAdd(this, 1); + + /* + * From the current state, determine whether our pin has in fact been + * acquired (so to back it out we must in fact unpin), or is still + * pending, using the same logic explained at the end of pin() above. + */ + boolean mustUnpin = + z(t & (MUTATOR_HOLDS | MUTATOR_WANTS)) + || z(t & MUTATOR_HOLDS) + && (wasHolds || ! m_waiters.contains(Thread.currentThread())); + + /* + * If the pin has been acquired and must be unpinned, we simply subtract + * the fictitious extra pinner added in this method, then unpin. + * + * Otherwise, we are still enqueued, and counted in the WAITERS region + * (along with our fictitious added PINNER). Subtract out the WAITER, + * which leaves (with the added PINNER) exactly the situation that + * backoutPinAfterEnqueue() knows how to clean up. + */ + + int delta; + if ( mustUnpin ) + { + delta = 1; + assert 1 < (t & PINNERS_MASK) : m("backoutPinAfterPark(acquired)"); + } + else + { + delta = 1 << WAITERS_SHIFT; + assert delta <= (t & WAITERS_MASK) : m("backoutPinAfterPark"); + } + t = (int)s_stateVH.getAndAdd(this, - delta) - delta; + + if ( mustUnpin ) + unpin(); + else + t = backoutPinAfterEnqueue(t); + /* + * One of the following conditions was the reason this method was + * called, so throw the appropriate exception. + */ + if ( Thread.interrupted() ) + throw (CancellationException) + new CancellationException("Interrupted waiting for pin") + .initCause(new InterruptedException()); + return t & (NATIVE_RELEASED | JAVA_RELEASED); + } + + /** + * Arrange the real work of cleaning up for an instance released by Java, + * as soon as there are no pins held on it. + *

      + * This is called immediately by {@code releaseFromJava} if there are no + * pins at the time; otherwise, it is called by {@code unpin} when the last + * pin is released. + *

      + * It could call {@code javaStateReleased} directly if the current thread + * may enter the native PostgreSQL code, otherwise adding the instance to + * the reference queue, to be handled when the queue is polled by such + * a thread. + *

      + * A complication arises because of a very slim chance that the instance has + * already been enqueued (see {@code releaseFromJava} for details), and + * should not be enqueued again. As if tailor-made for such a situation, the + * {@code Reference.enqueue} method only works one time, and is a no-op + * thereafter. Hence, a simple and workable scheme is to unconditionally + * call {@code enqueue} here (enqueuing the object, or not, if it already + * has been), and then call the queue drainer if this is the PG thread. + */ + private void scheduleJavaReleased(int s) + { + super.enqueue(); + + if ( Backend.threadMayEnterPG() ) + cleanEnqueuedInstances(); + } + + /** + * Take an exclusive lock in preparation to mutate the state. + *

      + * Only a thread for which {@code Backend.threadMayEnterPG()} returns true + * may acquire this lock. + * @param upgrade whether to acquire the lock without blocking even in the + * presence of a pin held by this thread; should be true only in cases where + * inspection shows a nearby enclosing pin whose assumptions clearly will + * not be violated by the actions to be taken under the lock. + * @return A semi-redacted version of the lock state, enough to discern + * whether it contains {@code NATIVE_RELEASED} or {@code JAVA_RELEASED} + * in case the caller cares, and for the paired {@code unlock} call to know + * whether this was a reentrant call, or should really be released. + */ + protected final int lock(boolean upgrade) + { + if ( ! Backend.threadMayEnterPG() ) + throw new IllegalThreadStateException( + "This thread may not mutate a DualState object"); + s_mutatorThread = Thread.currentThread(); + + assert !upgrade || pinnedByCurrentThread() : m("upgrade without pin"); + + int s = upgrade ? 1 : 0; // to start, assume simple state, no other pins + int t; + int u; + int contended = 0; + for ( ;; ) + { + t = s; + if ( upgrade ) + t += (PINNERS_GUARD|PINNERS_MASK); // hide my pin as a waiter + t |= ( !z(t & PINNERS_MASK) ? MUTATOR_WANTS : MUTATOR_HOLDS ); + u = (int)s_stateVH.compareAndExchange(this, s, t); + if ( s == u ) + break; + s = u; + if ( !z(s & MUTATOR_HOLDS) ) // surprise! this is a reentrant call. + return s & (NATIVE_RELEASED | JAVA_RELEASED | MUTATOR_HOLDS); + if ( z(s & PINNERS_MASK) ) + upgrade = false; // apparently we have no pin to upgrade + } + while ( z(t & MUTATOR_HOLDS) ) + { + contended = 1; + park(this); + t = (int)s_stateVH.getVolatile(this); + } + s_stats.lockContended(contended); + return t & (NATIVE_RELEASED | JAVA_RELEASED) | (upgrade? 1 : 0); + } + + /** + * Calls {@link #unlock(int,boolean) unlock(s, false)}. + * @param s must be the value returned by the {@code lock} call. + */ + protected final void unlock(int s) + { + unlock(s, false); + } + + /** + * Release a lock, optionally setting the {@code NATIVE_RELEASED} flag + * atomically in the process. + * @param s must be the value returned by the {@code lock} call. + * @param isNativeRelease whether to set the {@code NATIVE_RELEASED} flag. + */ + protected final void unlock(int s, boolean isNativeRelease) + { + int t; + int u; + if ( !z(s & MUTATOR_HOLDS) ) + { + /* + * The paired lock() determined it was already held (this was a + * reentrant acquisition), so that the obvious thing to do here + * is nothing. However, if the caller wants to set NATIVE_RELEASED + * (and it wasn't already), that has to happen, even if nothing + * else does. + */ + if ( isNativeRelease && z(s & NATIVE_RELEASED) ) + u = (int)s_stateVH.getAndBitwiseOr(this, NATIVE_RELEASED); + return; + } + + boolean upgrade = !z(s & 1); // saved there in the last line of lock() + + /* + * We are here, so this is a real unlock action. In the same motion, we + * will CAS in the NATIVE_RELEASED bit if the caller wants it. + */ + int release = isNativeRelease ? NATIVE_RELEASED : 0; + + s = MUTATOR_HOLDS; // start by assuming state is simple, just our lock + if ( upgrade ) + s |= 1 << WAITERS_SHIFT; // ok, assume our stashed pin is there too + for ( ;; ) + { + t = s & ~(MUTATOR_HOLDS|WAITERS_MASK|PINNERS_MASK); + t |= release | ( (s & WAITERS_MASK) >>> WAITERS_SHIFT ); + /* + * Zero the PINNERS region in s, so the CAS will fail if anything's + * there. During MUTATOR_HOLDS, the only bits under PINNERS_MASK + * represent new would-be pinners while they add themselves to the + * queue, so we just spin for them here until they've moved their + * bits under WAITERS_MASK where they belong. Then trust the queue. + */ + s &= ~ PINNERS_MASK; + u = (int)s_stateVH.compareAndExchange(this, s, t); + if ( s == u ) + break; + s = u; + assert !z(s & MUTATOR_HOLDS) : m("DualState mispaired unlock"); + /* + * Most of our CAS spins in this class require only that no other + * thread write s between our fetch and CAS, so 'starving' other + * threads can't last long, and in fact guarantees rapid success. + * This spin, however, could go on for as long as it takes for some + * other thread to enqueue itself and move its bit out of the way. + * That's still a very short spin, unless we are short of CPUs and + * actually competing with the thread we're waiting for. Hence + * onSpinWait seems prudent, if pinner count is the reason we spin. + */ + if ( !z(s & PINNERS_MASK) ) + Thread.onSpinWait(); + } + /* + * It's good to be the only thread allowed to mutate. Nobody else will + * touch this queue until the next time we want a mutate lock, so simply + * drain it and unpark every waiting thread. + */ + t &= PINNERS_MASK; // should equal the number of threads on the queue + if ( upgrade ) // my pin bit was stashed as a waiter, but nothing queued + -- t; + /* + * If no waiters (t is zero), we are done. Don't bother comparing zero + * to the queue size; inflation may not have supplied a queue yet. + */ + if ( 0 == t ) + return; + s_stats.pinContended(t); + Thread thr; + while ( null != (thr = m_waiters.poll()) ) + { + -- t; + unpark(thr); + } + assert 0 == t : m("Miscount of DualState wait queue"); + } + + /** + * Specialized version of {@link #lock lock} for use by code implementing an + * {@code adopt} operation (in which complete control of an object is handed + * back to PostgreSQL and it is dissociated from Java). + *

      + * Can only be called on the PG thread, which must already hold a pin. + * No other thread can hold a pin, and neither the {@code NATIVE_RELEASED} + * nor {@code JAVA_RELEASED} flag may be set. This method is non-blocking + * and will simply throw an exception if these preconditions are not + * satisfied. + * @param cookie Capability held by native code to invoke special + * {@code DualState} methods. + */ + protected final void adoptionLock(Key cookie) throws SQLException + { + checkCookie(cookie); + s_mutatorThread = Thread.currentThread(); + assert pinnedByCurrentThread() : m("adoptionLock without pin"); + int s = 1; // must be: quiescent (our pin only), unreleased + int t = NATIVE_RELEASED | JAVA_RELEASED | MUTATOR_HOLDS + | 1 << WAITERS_SHIFT; + if ( ! (boolean)s_stateVH.compareAndSet(this, s, t) ) + throw new SQLException( + "Attempt by PostgreSQL to adopt a released or non-quiescent " + + "Java object"); + } + + /** + * Specialized version of {@link #unlock(int, boolean) unlock} for use by + * code implementing an {@code adopt} operation (in which complete control + * of an object is handed back to PostgreSQL and it is dissociated from + * Java). + *

      + * Must only be called on the PG thread, which must have acquired + * {@code adoptionLock}. Invokes the {@code nativeStateReleased} callback, + * then releases the lock, leaving both {@code NATIVE_RELEASED} + * and {@code JAVA_RELEASED} flags set. When the calling code releases the + * prior pin it was expected to hold, the {@code javaStateReleased} callback + * will execute. A value of false will be passed to both callbacks. + * @param cookie Capability held by native code to invoke special + * {@code DualState} methods. + */ + protected final void adoptionUnlock(Key cookie) throws SQLException + { + checkCookie(cookie); + int s = NATIVE_RELEASED | JAVA_RELEASED | MUTATOR_HOLDS + | 1 << WAITERS_SHIFT; + int t = NATIVE_RELEASED | JAVA_RELEASED | 1; + + /* + * nativeStateReleased is, as usual, executed while holding the lock. + */ + nativeStateReleased(false); + + if ( ! (boolean)s_stateVH.compareAndSet(this, s, t) ) + throw new SQLException("Release failed while adopting Java object"); + + /* + * The release of our pre-existing pin will take care of delisting and + * executing javaStateReleased. + */ + } + + /** + * Return a string identifying this object in a way useful within an + * exception message for use of this state after native release or Java + * release. + *

      + * This implementation returns the class name of the referent, or of this + * object if the referent has already been cleared. + */ + protected String identifierForMessage() + { + String id; + Object referent = referent(); + if ( null != referent ) + id = referent.getClass().getName(); + else + id = getClass().getName(); + return id; + } + + /** + * Return a string for an exception message reporting the use of this object + * after the native state has been released. + *

      + * This implementation returns {@code identifierForMessage()} with + * " used beyond its PostgreSQL lifetime" appended. + */ + protected String invalidMessage() + { + return identifierForMessage() + " used beyond its PostgreSQL lifetime"; + } + + /** + * Return a string for an exception message reporting the use of this object + * after the Java state has been released. + *

      + * This implementation returns {@code identifierForMessage()} with + * " used after released by Java" appended. + */ + protected String releasedMessage() + { + return identifierForMessage() + " used after released by Java"; + } + + /** + * Return the SQLSTATE appropriate for an attempt to use this object + * after its native state has been released. + *

      + * This implementation returns 55000, object not in prerequisite state. + */ + protected String invalidSqlState() + { + return "55000"; + } + + /** + * Return the SQLSTATE appropriate for an attempt to use this object + * after its Java state has been released. + *

      + * This implementation returns 55000, object not in prerequisite state. + */ + protected String releasedSqlState() + { + return "55000"; + } + + /** + * Produce a string describing this state object in a way useful for + * debugging, with such information as the associated {@code ResourceOwner} + * and whether the state is fresh or stale. + *

      + * This method calls {@link #toString(Object)} passing {@code this}. + * Subclasses are encouraged to override that method with versions that add + * subclass-specific details. + * @return Description of this state object. + */ + @Override + public String toString() + { + return toString(this); + } + + /** + * Produce a string with such details of this object as might be useful for + * debugging, starting with an abbreviated form of the class name of the + * supplied object. + *

      + * Subclasses are encouraged to override this and then call it, via super, + * passing the object unchanged, and then append additional + * subclass-specific details to the result. + *

      + * Because the recursion ends here, this one actually does construct the + * abbreviated form of the class name of the object, and use it at the start + * of the returned string. + * @param o An object whose class name (abbreviated by stripping the package + * prefix) will be used at the start of the string. Passing {@code null} is + * the same as passing {@code this}. + * @return Description of this state object, prefixed with the abbreviated + * class name of the passed object. + */ + public String toString(Object o) + { + Class c = (null == o ? this : o).getClass(); + String cn = c.getCanonicalName(); + int pnl = c.getPackageName().length(); + return String.format("%s owner:%x %s", + cn.substring(1 + pnl), m_resourceOwner, + z((int)s_stateVH.getVolatile(this) & NATIVE_RELEASED) + ? "fresh" : "stale"); + } + + /** + * Called only from native code by the {@code ResourceOwner} callback when a + * resource owner is being released. Must identify the live instances that + * have been registered to that owner, if any, and call their + * {@link #nativeStateReleased nativeStateReleased} methods. + * @param resourceOwner Pointer value identifying the resource owner being + * released. Calls can be received for resource owners to which no instances + * here have been registered. + *

      + * Some state subclasses may have their nativeStateReleased methods called + * from Java code, when it is clear the native state is no longer needed in + * Java. That doesn't remove the state instance from s_scopedInstances, + * though, so it will still eventually be seen by this loop and efficiently + * removed by the iterator. Hence the {@code NATIVE_RELEASED} test, to avoid + * invoking nativeStateReleased more than once. + */ + private static void resourceOwnerRelease(long resourceOwner) + { + long total = 0L, release = 0L; + + assert Backend.threadMayEnterPG() : m("resourceOwnerRelease thread"); + + DualState head = s_scopedInstances.remove(resourceOwner); + if ( null == head ) + return; + + DualState t = head.m_next; + head.m_prev = head.m_next = null; + for ( DualState s = t ; s != head ; s = t ) + { + t = s.m_next; + s.m_prev = s.m_next = null; + ++ total; + /* + * This lock() is part of DualState's contract with clients. + * They are responsible for pinning the state instance + * whenever they need the wrapped native state (which is verified + * to still be valid at that time) and for the duration of whatever + * operation needs access to that state. Taking this lock here + * ensures the native state is blocked from vanishing while it is + * actively in use. + */ + int state = s.lock(false); + try + { + if ( z(NATIVE_RELEASED & state) ) + { + ++ release; + s.nativeStateReleased( + z(JAVA_RELEASED & state) && null != s.referent()); + } + } + finally + { + s.unlock(state, true); // true -> ensure NATIVE_RELEASED is set. + } + } + + s_stats.resourceOwnerPoll(release, total); + } + + /** + * Called only from native code, at points where checking the + * freed/unreachable objects queue would be useful, or from + * {@code scheduleJavaReleased} when on the PG thread. Calls the + * {@link #javaStateUnreachable javaStateUnreachable} method for instances + * that were cleared and enqueued by the garbage collector; calls the + * {@link #javaStateReleased javaStateReleased} method for instances that + * have not yet been garbage collected, but were enqueued by Java code + * explicitly calling {@link #releaseFromJava releaseFromJava}. + */ + private static void cleanEnqueuedInstances() + { + long total = 0L, release = 0L, reDefer = 0L; + DualState s; + int nDeferred = s_deferredReleased.size(); + boolean isDeferred; + + assert s_inCleanup.enter(); // no-op when assertions disabled + try + { + for ( ;; ) + { + isDeferred = 0 < nDeferred; + if ( isDeferred ) + { + -- nDeferred; + s = s_deferredReleased.remove(); + } + else if ( null == (s = (DualState)s_releasedInstances.poll()) ) + break; + + int state = (int)s_stateVH.getVolatile(s); + if ( !z((PINNERS_MASK | WAITERS_MASK) & state) ) + { + s_deferredReleased.add(s); + if ( isDeferred ) + ++ reDefer; + continue; + } + + ++ total; + + s.delist(); + try + { + if ( !z(JAVA_RELEASED & state) ) + { + ++ release; + s.javaStateReleased(z(NATIVE_RELEASED & state)); + } + else if ( z(NATIVE_RELEASED & state) ) + s.javaStateUnreachable(z(NATIVE_RELEASED & state)); + } + catch ( Throwable t ) { } /* JDK 9 Cleaner ignores exceptions */ + } + } + finally + { + assert s_inCleanup.exit(); + } + + s_stats.referenceQueueDrain(total - release, release, total, reDefer); + } + + /** + * Remove this instance from the data structure holding it, for scoped + * instances if it has a non-zero resource owner, otherwise for unscoped + * instances. + */ + private void delist() + { + assert Backend.threadMayEnterPG() : m("DualState delist thread"); + + if ( 0 == m_resourceOwner ) + { + if ( null != s_unscopedInstances.remove(this) ) + s_stats.delistUnscoped(); + return; + } + + if ( null == m_prev || null == m_next ) + return; + if ( this == m_prev.m_next ) + m_prev.m_next = m_next; + if ( this == m_next.m_prev ) + m_next.m_prev = m_prev; + m_prev = m_next = null; + s_stats.delistScoped(); + } + + /** + * Magic cookie needed as a constructor parameter to confirm that + * {@code DualState} subclass instances are being constructed from + * native code. + */ + public static final class Key + { + private static boolean constructed = false; + private Key() + { + synchronized ( Key.class ) + { + if ( constructed ) + throw new IllegalStateException("Duplicate DualState.Key"); + constructed = true; + } + } + } + + /** + * Dummy DualState concrete class whose instances only serve as list + * headers in per-resource-owner lists of instances. + */ + private static class ListHead extends DualState // because why not? + { + /** + * Construct a {@code ListHead} instance. As a subclass of + * {@code DualState}, it can't help having a resource owner field, so + * may as well use it to store the resource owner that the list is for, + * in case it's of interest in debugging. + * @param owner The resource owner + */ + private ListHead(long owner) + { + super("", owner); // An instance needs an object to be its referent + } + + @Override + public String toString(Object o) + { + return String.format( + "DualState.ListHead for resource owner %x", m_resourceOwner); + } + } + + /** + * A {@code DualState} subclass serving only to guard access to a single + * nonzero {@code long} value (typically a native pointer). + *

      + * Nothing in particular is done to the native resource at the time of + * {@code javaStateReleased} or {@code javaStateUnreachable}; if it is + * subject to reclamation, this class assumes it will be shortly, in the + * normal operation of the native code. This can be appropriate for native + * state that was set up by a native caller for a short lifetime, such as a + * single function invocation. + */ + public static abstract class SingleGuardedLong extends DualState + { + private final long m_guardedLong; + + protected SingleGuardedLong( + Key cookie, T referent, long resourceOwner, long guardedLong) + { + super(cookie, referent, resourceOwner); + m_guardedLong = guardedLong; + } + + @Override + public String toString(Object o) + { + return + String.format(formatString(), super.toString(o), m_guardedLong); + } + + /** + * Return a {@code printf} format string resembling + * {@code "%s something(%x)"} where the {@code %x} will be the value + * being guarded; the "something" should indicate what the value + * represents, or what will be done with it when released by Java. + */ + protected String formatString() + { + return "%s GuardedLong(%x)"; + } + + protected final long guardedLong() + { + assert pinnedByCurrentThread() : m("guardedLong() without pin"); + return m_guardedLong; + } + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code pfree} of a single pointer. + */ + public static abstract class SinglePfree extends SingleGuardedLong + { + protected SinglePfree( + Key cookie, T referent, long resourceOwner, long pfreeTarget) + { + super(cookie, referent, resourceOwner, pfreeTarget); + } + + @Override + protected String formatString() + { + return "%s pfree(%x)"; + } + + /** + * When the Java state is released or unreachable, a {@code pfree} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _pfree(guardedLong()); + } + + private native void _pfree(long pointer); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code MemoryContextDelete} of a single context. + *

      + * This class may get called at the {@code nativeStateReleased} entry, not + * only if the native state is actually being released, but if it is being + * 'claimed' by native code for its own purposes. The effect is the same + * as far as Java is concerned; the object is no longer accessible, and the + * native code is responsible for whatever happens to it next. + */ + public static abstract class SingleMemContextDelete + extends SingleGuardedLong + { + protected SingleMemContextDelete( + Key cookie, T referent, long resourceOwner, long memoryContext) + { + super(cookie, referent, resourceOwner, memoryContext); + } + + @Override + public String formatString() + { + return "%s MemoryContextDelete(%x)"; + } + + /** + * When the Java state is released or unreachable, a + * {@code MemoryContextDelete} + * call is made so the native memory is released without having to wait + * for release of its parent context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _memContextDelete(guardedLong()); + } + + private native void _memContextDelete(long pointer); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code FreeTupleDesc} of a single pointer. + */ + public static abstract class SingleFreeTupleDesc + extends SingleGuardedLong + { + protected SingleFreeTupleDesc( + Key cookie, T referent, long resourceOwner, long ftdTarget) + { + super(cookie, referent, resourceOwner, ftdTarget); + } + + @Override + public String formatString() + { + return "%s FreeTupleDesc(%x)"; + } + + /** + * When the Java state is released or unreachable, a + * {@code FreeTupleDesc} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _freeTupleDesc(guardedLong()); + } + + private native void _freeTupleDesc(long pointer); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code heap_freetuple} of a single pointer. + */ + public static abstract class SingleHeapFreeTuple + extends SingleGuardedLong + { + protected SingleHeapFreeTuple( + Key cookie, T referent, long resourceOwner, long hftTarget) + { + super(cookie, referent, resourceOwner, hftTarget); + } + + @Override + public String formatString() + { + return"%s heap_freetuple(%x)"; + } + + /** + * When the Java state is released or unreachable, a + * {@code heap_freetuple} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _heapFreeTuple(guardedLong()); + } + + private native void _heapFreeTuple(long pointer); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code FreeErrorData} of a single pointer. + */ + public static abstract class SingleFreeErrorData + extends SingleGuardedLong + { + protected SingleFreeErrorData( + Key cookie, T referent, long resourceOwner, long fedTarget) + { + super(cookie, referent, resourceOwner, fedTarget); + } + + @Override + public String formatString() + { + return "%s FreeErrorData(%x)"; + } + + /** + * When the Java state is released or unreachable, a + * {@code FreeErrorData} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _freeErrorData(guardedLong()); + } + + private native void _freeErrorData(long pointer); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code SPI_freeplan} of a single pointer. + */ + public static abstract class SingleSPIfreeplan + extends SingleGuardedLong + { + protected SingleSPIfreeplan( + Key cookie, T referent, long resourceOwner, long fpTarget) + { + super(cookie, referent, resourceOwner, fpTarget); + } + + @Override + public String formatString() + { + return "%s SPI_freeplan(%x)"; + } + + /** + * When the Java state is released or unreachable, an + * {@code SPI_freeplan} + * call is made so the native memory is released without having to wait + * for release of its containing context. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _spiFreePlan(guardedLong()); + } + + private native void _spiFreePlan(long pointer); + } + + /** + * A {@code DualState} subclass whose only native resource releasing action + * needed is {@code SPI_cursor_close} of a single pointer. + */ + public static abstract class SingleSPIcursorClose + extends SingleGuardedLong + { + protected SingleSPIcursorClose( + Key cookie, T referent, long resourceOwner, long ccTarget) + { + super(cookie, referent, resourceOwner, ccTarget); + } + + @Override + public String formatString() + { + return "%s SPI_cursor_close(%x)"; + } + + /** + * When the Java state is released or unreachable, an + * {@code SPI_cursor_close} + * call is made so the native memory is released without having to wait + * for release of its containing context. + *

      + * For this class (and for reasons that weren't made + * obvious in the original code this reimplements), the native code will + * avoid calling {@code SPI_cursor_close} if the {@code Invocation}'s + * error-occurred flag is set, or during a callback from the executor + * through an {@code ExprContextCallbackFunction}. + */ + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + if ( nativeStateLive ) + _spiCursorClose(guardedLong()); + } + + /* + * This code copied from its former location in Portal.c, for reasons + * not really explained there, is different from most of the other + * javaStateReleased actions here, by virtue of being conditional; it + * does nothing if the current Invocation's errorOccurred flag is set, + * or during an end-of-expression-context callback from the executor. + */ + private native void _spiCursorClose(long pointer); + } + + /** + * Bean exposing some {@code DualState} allocation and lifecycle statistics + * for viewing in a JMX management client. + */ + static class Statistics implements DualStateStatistics + { + public long getConstructed() + { + return constructed.sum(); + } + + public long getEnlistedScoped() + { + return enlistedScoped.sum(); + } + + public long getEnlistedUnscoped() + { + return enlistedUnscoped.sum(); + } + + public long getDelistedScoped() + { + return delistedScoped.sum(); + } + + public long getDelistedUnscoped() + { + return delistedUnscoped.sum(); + } + + public long getJavaUnreachable() + { + return javaUnreachable.sum(); + } + + public long getJavaReleased() + { + return javaReleased.sum(); + } + + public long getNativeReleased() + { + return nativeReleased.sum(); + } + + public long getResourceOwnerPasses() + { + return resourceOwnerPasses.sum(); + } + + public long getReferenceQueuePasses() + { + return referenceQueuePasses.sum(); + } + + public long getReferenceQueueItems() + { + return referenceQueueItems.sum(); + } + + public long getContendedLocks() + { + return contendedLocks.sum(); + } + + public long getContendedPins() + { + return contendedPins.sum(); + } + + public long getRepeatedlyDeferred() + { + return repeatedlyDeferred.sum(); + } + + public long getGcReleaseRaces() + { + return gcRelRaces.sum(); + } + + public long getReleaseReleaseRaces() + { + return relRelRaces.sum(); + } + + + private LongAdder constructed = new LongAdder(); + private LongAdder enlistedScoped = new LongAdder(); + private LongAdder enlistedUnscoped = new LongAdder(); + private LongAdder delistedScoped = new LongAdder(); + private LongAdder delistedUnscoped = new LongAdder(); + private LongAdder javaUnreachable = new LongAdder(); + private LongAdder javaReleased = new LongAdder(); + private LongAdder nativeReleased = new LongAdder(); + private LongAdder resourceOwnerPasses = new LongAdder(); + private LongAdder referenceQueuePasses = new LongAdder(); + private LongAdder referenceQueueItems = new LongAdder(); + private LongAdder contendedLocks = new LongAdder(); + private LongAdder contendedPins = new LongAdder(); + private LongAdder repeatedlyDeferred = new LongAdder(); + private LongAdder gcRelRaces = new LongAdder(); + private LongAdder relRelRaces = new LongAdder(); + + final void construct(long scoped) + { + constructed.increment(); + enlistedScoped.add(scoped); + enlistedUnscoped.add(1L - scoped); + } + + final void resourceOwnerPoll(long released, long total) + { + resourceOwnerPasses.increment(); + nativeReleased.add(released); + delistedScoped.add(total); + } + + final void javaRelease(long scoped, long unscoped) + { + javaReleased.increment(); + delistedScoped.add(scoped); + delistedUnscoped.add(unscoped); + } + + final void referenceQueueDrain( + long unreachable, long release, long total, long reDefer) + { + referenceQueuePasses.increment(); + referenceQueueItems.add(total); + javaUnreachable.add(unreachable); + javaReleased.add(release); + repeatedlyDeferred.add(reDefer); + } + + final void delistScoped() + { + delistedScoped.increment(); + } + + final void delistUnscoped() + { + delistedUnscoped.increment(); + } + + final void javaRelease() + { + javaReleased.increment(); + } + + final void lockContended(int n) + { + contendedLocks.add(n); + } + + final void pinContended(int n) + { + contendedPins.add(n); + } + + final void gcReleaseRace() + { + gcRelRaces.increment(); + } + + final void releaseReleaseRace() + { + gcRelRaces.increment(); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java b/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java new file mode 100644 index 00000000..707bbf56 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/EntryPoints.java @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2020-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodType; +import static java.lang.invoke.MethodType.methodType; + +import java.security.AccessControlContext; +import java.security.AccessControlException; +import java.security.Permission; +import static java.security.AccessController.doPrivileged; +import java.security.PrivilegedAction; + +import java.sql.SQLData; +import java.sql.SQLException; +import java.sql.SQLNonTransientException; +import java.sql.SQLInput; +import java.sql.SQLOutput; + +import static java.util.Objects.requireNonNull; + +import org.postgresql.pljava.internal.UncheckedException; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; + +/* + * PrivilegedAction is used here in preference to PrivilegedExceptionAction, + * because a PrivilegedActionException can only wrap an Exception, but method + * handle invocation is declared to throw any Throwable. So there needs to be + * wrapping done even with PrivilegedExceptionAction, and whatever is wrapped + * as a runtime exception will propagate up through PrivilegedAction just fine, + * leaving only one flavor of wrapping to deal with rather than two. + */ + +/** + * A class to consolidate entry points from C to PL/Java functions. + *

      + * The *invoke methods in this class can be private, as they are invoked only + * from C via JNI, not from Java. + *

      + * The primary entry point is {@code invoke}. The supplied {@code Invocable}, + * created by {@code invocable} below for its caller {@code Function.create}, + * may contain a {@code MethodHandle}, or a {@code PrivilegedAction} that + * wraps one. The {@code MethodHandle} may have bound references to static + * parameter areas, and will fetch the actual parameters from there to the + * stack before invoking the (potentially reentrant) target method. Primitive + * return values are then stored (after the potentially reentrant method has + * returned) in the first slot of the static parameter area, to allow a single + * {@code Object}-returning {@code invoke} method to cover those cases, rather + * than versions for references, every primitive return type, and {@code void}. + * The {@code PrivilegedAction} is expected to return null for a {@code void} + * or primitive-typed target. + *

      + * The remaining {@code fooInvoke} methods here are for user-defined type (UDT) + * support. For now, those are not consolidated into the general {@code invoke} + * pattern, as UDT support methods may need to be called from the C code + * while it is populating the static parameter area for an ultimate target + * method, so they must be invocable without using the same area. + *

      + * An {@code Invocable} carries the {@code AccessControlContext} under which the + * invocation target will execute. + */ +class EntryPoints +{ + /** + * Prevent instantiation. + */ + private EntryPoints() + { + } + + private static final MethodType s_generalType = + methodType(Object.class, AccessControlContext.class); + private static final MethodType s_udtCtor = methodType(SQLData.class); + private static final MethodType s_udtParse = + methodType(SQLData.class, String.class, String.class); + + /** + * Wrap a {@code MethodHandle} in an {@code Invocable} suitable for + * passing directly to {@link #invoke invoke()}. + *

      + * The supplied method handle must have type + * {@code (AccessControlContext)Object}, and fetch any + * parameter values needed by its target from bound-in references to the + * static reference and primitive parameter areas. If its ultimate target + * has {@code void} or a primitive return type, the handle must be + * constructed to return null, storing any primitive value returned into + * the first static primitive-parameter slot. + *

      + * The {@code AccessControlContext} passed to the handle will be the same + * one under which it will be invoked, and so can be ignored in most cases. + * A handle for a value-per-call set-returning function can copy it into the + * {@code Invocable} that it creates from this function's result, to be used + * for iteratively retrieving the results. + */ + static Invocable invocable(MethodHandle mh, AccessControlContext acc) + { + if ( null == mh + || s_udtCtor.equals(mh.type()) || s_udtParse.equals(mh.type()) ) + return new Invocable(mh, acc); + + if ( ! s_generalType.equals(mh.type()) ) + throw new IllegalArgumentException( + "invocable() passed a MethodHandle with unexpected type"); + + /* + * The EntryPoints class is specially loaded early in PL/Java's startup + * to have unrestricted permissions, so that PL/Java user code can be + * granted permissions as desired in the policy and be able to use those + * without fussing with doPrivileged or having to grant the same + * permissions redundantly to PL/Java itself. + * + * Lambdas end up looking like distinct classes under the hood, but the + * "class" of a lambda is given the same protection domain as its host + * class, so the lambda created here shares the EntryPoints class's own + * specialness, making the scheme Just Work. + */ + PrivilegedAction a = () -> + { + try + { + return mh.invokeExact(acc); + } + catch ( Error | RuntimeException e ) + { + throw e; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + }; + + return new Invocable>(a, acc); + } + + /** + * Entry point for a general PL/Java function. + * @param target Invocable obtained from Function.create that will + * push the actual parameters and call the target method. + * @return The value returned by the target method, or null if the method + * has void type or returns a primitive (which will have been returned in + * the first static primitive parameter slot). + */ + private static Object invoke(Invocable> target) + throws Throwable + { + assert PrivilegedAction.class.isInstance(target.payload); + + return doPrivilegedAndUnwrap(target.payload, target.acc); + } + + /** + * Entry point for calling the {@code writeSQL} method of a UDT. + *

      + * Like {@code udtReadInvoke}, this is a distinct entry point in order to + * avoid use of the static parameter area. While this operation is not + * expected during preparation of a function's parameter list, it can occur + * during a function's execution, if it stores values of user-defined type + * into result sets, prepared-statement bindings, etc. Such uses are not + * individually surrounded by {@code pushInvocation}/{@code popInvocation} + * as ordinary function calls are, and the {@code ParameterFrame} save and + * restore mechanism relies on those, so it is better for this entry point + * also to be handled specially. + * @param target Invocable carrying the appropriate AccessControlContext + * (target's action is unused and expected to be null) + * @param o the UDT instance + * @param stream the SQLOutput stream on which the type's internal + * representation will be written + */ + private static void udtWriteInvoke( + Invocable target, SQLData o, SQLOutput stream) + throws Throwable + { + PrivilegedAction action = () -> + { + try + { + o.writeSQL(stream); + return null; + } + catch ( SQLException e ) + { + throw unchecked(e); + } + }; + + doPrivilegedAndUnwrap(action, target.acc); + } + + /** + * Entry point for calling the {@code toString} method of a UDT. + *

      + * This can be called during transformation of a UDT that has a + * NUL-terminated storage form, and without being separately wrapped in + * {@code pushInvocation}/{@code popInvocation}, so it gets its own entry + * point here to avoid use of the static parameter area. + * @param target Invocable carrying the appropriate AccessControlContext + * (target's action is unused and expected to be null) + * @param o the UDT instance + * @return the UDT's text representation + */ + private static String udtToStringInvoke(Invocable target, SQLData o) + { + PrivilegedAction action = () -> + { + return o.toString(); + }; + + return doPrivileged(action, target.acc); + } + + /** + * Entry point for calling the {@code readSQL} method of a UDT, after + * constructing an instance first. + *

      + * This gets its own entry point so parameters can be passed to it + * independently of the static parameter area used for ordinary function + * invocations. Should an ordinary function have a parameter that is of a + * user-defined type, this entry point is used to instantiate the Java + * form of that parameter during the assembly of the function's + * parameter list, so the static area is not touched here. + * @param target an Invocable that returns a new instance, on which readSQL + * will then be called. The Invocable's access control context will be in + * effect for both operations. + * @param stream the SQLInput stream from which to read the UDT's internal + * representation + * @param typeName the SQL type name to be associated with the instance + * @return the allocated and initialized instance + */ + private static SQLData udtReadInvoke( + Invocable target, SQLInput stream, String typeName) + throws Throwable + { + PrivilegedAction action = () -> + { + try + { + SQLData o = (SQLData)target.payload.invokeExact(); + o.readSQL(stream, typeName); + return o; + } + catch ( Error | RuntimeException e ) + { + throw e; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + }; + + return doPrivilegedAndUnwrap(action, target.acc); + } + + /** + * Entry point for calling the {@code parse} method of a UDT, which will + * construct an instance given its text representation. + *

      + * This can be called during transformation of a UDT that has a + * NUL-terminated storage form, and without being separately wrapped in + * {@code pushInvocation}/{@code popInvocation}, so it gets its own entry + * point here to avoid use of the static parameter area. + * @param target a MethodHandle to the class's static parse method, which + * will allocate and return an instance. + * @param textRep the text representation + * @param typeName the SQL type name to be associated with the instance + * @return the allocated and initialized instance + */ + private static SQLData udtParseInvoke( + Invocable target, String textRep, String typeName) + throws Throwable + { + PrivilegedAction action = () -> + { + try + { + return (SQLData)target.payload.invokeExact(textRep, typeName); + } + catch ( Error | RuntimeException e ) + { + throw e; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + }; + + return doPrivilegedAndUnwrap(action, target.acc); + } + + /** + * Factors out the common {@code doPrivileged} and unwrapping of possible + * wrapped checked exceptions for the above entry points. + */ + private static T doPrivilegedAndUnwrap( + PrivilegedAction action, AccessControlContext context) + throws SQLException + { + Throwable t; + try + { + return doPrivileged(action, context); + } + catch ( ExceptionInInitializerError e ) + { + t = e.getCause(); + } + catch ( Error e ) + { + throw e; + } + catch ( UncheckedException e ) + { + t = e.unwrap(); + } + catch ( Throwable e ) + { + t = e; + } + + if ( t instanceof SQLException ) + throw (SQLException)t; + + if ( t instanceof AccessControlException ) + { + Permission perm = ((AccessControlException)t).getPermission(); + if ( perm != null ) + throw new SecurityException( + perm.getActions() + " on " + perm.getName(), t); + throw (AccessControlException)t; + } + + if ( t instanceof SecurityException ) + throw (SecurityException)t; + + throw new SQLException(t.getMessage(), t); + } + + /** + * Called from {@code Function} to perform the initialization of a class, + * under a selected access control context. + */ + static Class loadAndInitWithACC( + String className, ClassLoader schemaLoader, AccessControlContext acc) + throws SQLException + { + PrivilegedAction> action = () -> + { + try + { + return Class.forName(className, true, schemaLoader); + } + catch ( ExceptionInInitializerError e ) + { + throw e; + } + catch ( LinkageError | ClassNotFoundException e ) + { + /* + * It would be odd to get a ClassNotFoundException here, as + * the caller had to look it up once already to decide what acc + * to use. But try telling that to javac. + */ + throw unchecked(new SQLNonTransientException( + "Initializing class " + className + ": " + e, "46103", e)); + } + }; + + return doPrivilegedAndUnwrap(action, acc); + } + + /** + * A class carrying a payload of some kind and an access control context + * to impose when it is invoked. + *

      + * The type of the payload will be specific to which entry point above + * will be used to invoke it. + */ + static final class Invocable + { + final T payload; + final AccessControlContext acc; + + Invocable(T payload, AccessControlContext acc) + { + this.payload = payload; + this.acc = acc; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java b/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java index cbdea9d0..e667d23f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ErrorData.java @@ -1,44 +1,105 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + +import java.lang.reflect.UndeclaredThrowableException; +import java.sql.SQLException; + /** * The ErrorData correspons to the ErrorData obtained * using an internal PostgreSQL CopyErrorData call. * * @author Thomas Hallgren */ -public class ErrorData extends JavaWrapper +public class ErrorData { - ErrorData(long pointer) + private final State m_state; + + ErrorData(DualState.Key cookie, long resourceOwner, long pointer) + { + m_state = new State(cookie, this, resourceOwner, pointer); + } + + private static class State + extends DualState.SingleFreeErrorData { - super(pointer); + private State( + DualState.Key cookie, ErrorData ed, long ro, long ht) + { + super(cookie, ed, ro, ht); + } + + /** + * Return the ErrorData pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * ErrorData, or subclasses, or something with a strong reference to + * this ErrorData, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while an instance method's on the call + * stack, and the {@code Invocation} marking this state's native scope + * can't be popped before return of any method using the value. + */ + private long getErrorDataPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } } /** - * Returns The error level + * Return pointer to native ErrorData structure as a long; use only while + * a reference to this class is live and the THREADLOCK is held. */ - public int getErrorLevel() + private final long getNativePointer() { - synchronized(Backend.THREADLOCK) + try { - return _getErrorLevel(this.getNativePointer()); + return m_state.getErrorDataPtr(); + } + catch ( SQLException e ) + { + throw new UndeclaredThrowableException(e, e.getMessage()); } } + /** + * Returns The error level + */ + public int getErrorLevel() + { + return doInPG(() -> _getErrorLevel(this.getNativePointer())); + } + /** * Returns true if the error will be reported to the server log */ public boolean isOutputToServer() { - synchronized(Backend.THREADLOCK) - { - return _isOutputToServer(this.getNativePointer()); - } + return doInPG(() -> _isOutputToServer(this.getNativePointer())); } /** @@ -46,54 +107,45 @@ public boolean isOutputToServer() */ public boolean isOutputToClient() { - synchronized(Backend.THREADLOCK) - { - return _isOutputToClient(this.getNativePointer()); - } + return doInPG(() -> _isOutputToClient(this.getNativePointer())); } /** * Returns true if funcname inclusion is set + * @deprecated The property queried by this method was only used + * in PostgreSQL when communicating with old clients over the v2 + * frontend/backend protocol, superseded in PostgreSQL 7.4. In PG 14 + * and later, there is no such property, and this method will always + * return false. */ + @Deprecated public boolean isShowFuncname() { - synchronized(Backend.THREADLOCK) - { - return _isShowFuncname(this.getNativePointer()); - } + return doInPG(() -> _isShowFuncname(this.getNativePointer())); } /** - * Returns The file where the error occured + * Returns The file where the error occurred */ public String getFilename() { - synchronized(Backend.THREADLOCK) - { - return _getFilename(this.getNativePointer()); - } + return doInPG(() -> _getFilename(this.getNativePointer())); } /** - * Returns The line where the error occured + * Returns The line where the error occurred */ public int getLineno() { - synchronized(Backend.THREADLOCK) - { - return _getLineno(this.getNativePointer()); - } + return doInPG(() -> _getLineno(this.getNativePointer())); } /** - * Returns the name of the function where the error occured + * Returns the name of the function where the error occurred */ public String getFuncname() { - synchronized(Backend.THREADLOCK) - { - return _getFuncname(this.getNativePointer()); - } + return doInPG(() -> _getFuncname(this.getNativePointer())); } /** @@ -101,10 +153,7 @@ public String getFuncname() */ public String getSqlState() { - synchronized(Backend.THREADLOCK) - { - return _getSqlState(this.getNativePointer()); - } + return doInPG(() -> _getSqlState(this.getNativePointer())); } /** @@ -112,10 +161,7 @@ public String getSqlState() */ public String getMessage() { - synchronized(Backend.THREADLOCK) - { - return _getMessage(this.getNativePointer()); - } + return doInPG(() -> _getMessage(this.getNativePointer())); } /** @@ -123,10 +169,7 @@ public String getMessage() */ public String getDetail() { - synchronized(Backend.THREADLOCK) - { - return _getDetail(this.getNativePointer()); - } + return doInPG(() -> _getDetail(this.getNativePointer())); } /** @@ -134,10 +177,7 @@ public String getDetail() */ public String getHint() { - synchronized(Backend.THREADLOCK) - { - return _getHint(this.getNativePointer()); - } + return doInPG(() -> _getHint(this.getNativePointer())); } /** @@ -145,10 +185,7 @@ public String getHint() */ public String getContextMessage() { - synchronized(Backend.THREADLOCK) - { - return _getContextMessage(this.getNativePointer()); - } + return doInPG(() -> _getContextMessage(this.getNativePointer())); } /** @@ -156,10 +193,7 @@ public String getContextMessage() */ public int getCursorPos() { - synchronized(Backend.THREADLOCK) - { - return _getCursorPos(this.getNativePointer()); - } + return doInPG(() -> _getCursorPos(this.getNativePointer())); } /** @@ -167,10 +201,7 @@ public int getCursorPos() */ public int getInternalPos() { - synchronized(Backend.THREADLOCK) - { - return _getInternalPos(this.getNativePointer()); - } + return doInPG(() -> _getInternalPos(this.getNativePointer())); } /** @@ -178,10 +209,7 @@ public int getInternalPos() */ public String getInternalQuery() { - synchronized(Backend.THREADLOCK) - { - return _getInternalQuery(this.getNativePointer()); - } + return doInPG(() -> _getInternalQuery(this.getNativePointer())); } /** @@ -189,10 +217,7 @@ public String getInternalQuery() */ public int getSavedErrno() { - synchronized(Backend.THREADLOCK) - { - return _getSavedErrno(this.getNativePointer()); - } + return doInPG(() -> _getSavedErrno(this.getNativePointer())); } private static native int _getErrorLevel(long pointer); @@ -211,5 +236,4 @@ public int getSavedErrno() private static native int _getInternalPos(long pointer); private static native String _getInternalQuery(long pointer); private static native int _getSavedErrno(long pointer); /* errno at entry */ - protected native void _free(long pointer); } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java b/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java index 9cd5593c..86ac4c41 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ExecutionPlan.java @@ -1,19 +1,65 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.SQLException; import java.util.Collections; import java.util.Map; import java.util.LinkedHashMap; /** - * The ExecutionPlan correspons to the execution plan obtained - * using an internal PostgreSQL SPI_prepare call. + * The {@code ExecutionPlan} corresponds to the execution plan obtained + * using an internal PostgreSQL {@code SPI_prepare} call. + *

      + * The {@code ExecutionPlan} is distinct from {@code SPIPreparedStatement} + * because of its greater specificity. The current {@code PreparedStatement} + * behavior (though it may, in future, change) is that the types of parameters + * are not inferred in a "PostgreSQL-up" manner (that is, by having PostgreSQL + * parse the SQL and report what types the parameters would need to have), but + * in a "Java-down" manner, driven by the types of the parameter values supplied + * to the {@code PreparedStatement} before executing it. An + * {@code ExecutionPlan} corresponds to a particular assignment of parameter + * types; a subsequent use of the same {@code PreparedStatement} with different + * parameter values may (depending on their types) lead to generation of a new + * plan, with the former plan displaced from the {@code PreparedStatement} and + * into the {@code PlanCache} implemented here. Another re-use of the same + * {@code PreparedStatement} with the original parameter types will displace the + * newer plan into the cache and retrieve the earlier one. + *

      + * The native state of a plan is not held in a transient context, so it is not + * subject to invalidation from the native side. The Java object is kept "live" + * (garbage-collection prevented) by being referenced either from the + * {@code Statement} that created it, or from the cache if it has been displaced + * there. The {@code close} method does not deallocate a plan, but simply moves + * it to the cache, where it may be found again if needed for the same SQL and + * parameter types. + *

      + * At no time (except in passing) is a plan referred to both by the cache and by + * a {@code Statement}. It is cached when displaced out of its statement, + * and removed from the cache if it is later found there and claimed again by + * a statement, so that one {@code ExecutionPlan} does not end up getting + * shared by multiple statement instances. (There is nothing, however, + * thread-safe about these machinations.) + *

      + * There are not many ways for an {@code ExecutionPlan} to actually be freed. + * That will happen if it is evicted from the cache, either because it is oldest + * and the cache limit is reached, or when another plan is cached for the same + * SQL and parameter types; it will also happen if a {@code PreparedStatement} + * using the plan becomes unreferenced and garbage-collected without + * {@code close} being called (which would have moved the plan back to the + * cache). * * @author Thomas Hallgren */ @@ -23,12 +69,58 @@ public class ExecutionPlan static final float CACHE_LOAD_FACTOR = 0.75f; - private long m_pointer; + /* These three values must match those in ExecutionPlan.c */ + public static final short SPI_READONLY_DEFAULT = 0; + public static final short SPI_READONLY_FORCED = 1; + public static final short SPI_READONLY_CLEARED = 2; + + private final State m_state; + + private static class State + extends DualState.SingleSPIfreeplan + { + private State( + DualState.Key cookie, ExecutionPlan jep, long ro, long ep) + { + super(cookie, jep, ro, ep); + } + + /** + * Return the SPI execution-plan pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * ExecutionPlan, or subclasses, or something with a strong reference to + * this ExecutionPlan, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while a reference is on the call stack, + * and as long as we're on the thread that's in PG, the saved plan won't + * be popped before we return. + */ + private long getExecutionPlanPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } + } /** * MRU cache for prepared plans. + * The key type is Object, not PlanKey, because for a statement with no + * parameters, the statement itself is used as the key, rather than + * constructing a PlanKey. */ - static final class PlanCache extends LinkedHashMap + static final class PlanCache extends LinkedHashMap { private final int m_cacheSize; @@ -38,20 +130,17 @@ public PlanCache(int cacheSize) m_cacheSize = cacheSize; } - protected boolean removeEldestEntry(Map.Entry eldest) + @Override + protected boolean removeEldestEntry( + Map.Entry eldest) { if(this.size() <= m_cacheSize) return false; - ExecutionPlan evicted = (ExecutionPlan)eldest.getValue(); - synchronized(Backend.THREADLOCK) - { - if(evicted.m_pointer != 0) - { - _invalidate(evicted.m_pointer); - evicted.m_pointer = 0; - } - } + ExecutionPlan evicted = eldest.getValue(); + /* + * See close() below for why 'evicted' is not enqueue()d right here. + */ return true; } }; @@ -99,7 +188,7 @@ public int hashCode() } } - private static final Map s_planCache; + private static final Map s_planCache; private final Object m_key; @@ -111,10 +200,11 @@ public int hashCode() : cacheSize)); } - private ExecutionPlan(Object key, long pointer) + private ExecutionPlan(DualState.Key cookie, long resourceOwner, + Object planKey, long spiPlan) { - m_key = key; - m_pointer = pointer; + m_key = planKey; + m_state = new State(cookie, this, resourceOwner, spiPlan); } /** @@ -122,15 +212,16 @@ private ExecutionPlan(Object key, long pointer) */ public void close() { - ExecutionPlan old = (ExecutionPlan)s_planCache.put(m_key, this); - if(old != null && old.m_pointer != 0) - { - synchronized(Backend.THREADLOCK) - { - _invalidate(old.m_pointer); - old.m_pointer = 0; - } - } + ExecutionPlan old = s_planCache.put(m_key, this); + /* + * For now, do NOT immediately enqueue() a non-null 'old'. It could + * still be live via a Portal that is still retrieving results. Java + * reachability will determine when it isn't, in the natural course of + * things. + * If that turns out to keep plans around too long, something more + * elaborate can be done, involving coordination with the reachability + * of any referencing Portal. + */ } /** @@ -140,17 +231,20 @@ public void close() * @param cursorName Name of the cursor or null for a system * generated name. * @param parameters Values for the parameters. + * @param read_only One of the values {@code SPI_READONLY_DEFAULT}, + * {@code SPI_READONLY_FORCED}, or {@code SPI_READONLY_CLEARED} (in the + * default case, the native code will defer to + * {@code Function_isCurrentReadOnly}. * @return The Portal that represents the opened cursor. * @throws SQLException If the underlying native structure has gone stale. */ - public Portal cursorOpen(String cursorName, Object[] parameters) + public Portal cursorOpen( + String cursorName, Object[] parameters, short read_only) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _cursorOpen(m_pointer, System.identityHashCode(Thread - .currentThread()), cursorName, parameters); - } + return doInPG(() -> + _cursorOpen(m_state.getExecutionPlanPtr(), + cursorName, parameters, read_only)); } /** @@ -164,29 +258,29 @@ public Portal cursorOpen(String cursorName, Object[] parameters) */ public boolean isCursorPlan() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isCursorPlan(m_pointer); - } + return doInPG(() -> _isCursorPlan(m_state.getExecutionPlanPtr())); } /** * Execute the plan using the internal SPI_execp function. * * @param parameters Values for the parameters. + * @param read_only One of the values {@code SPI_READONLY_DEFAULT}, + * {@code SPI_READONLY_FORCED}, or {@code SPI_READONLY_CLEARED} (in the + * default case, the native code will defer to + * {@code Function_isCurrentReadOnly}. * @param rowCount The maximum number of tuples to create. A value of * rowCount of zero is interpreted as no limit, * i.e., run to completion. * @return One of the status codes declared in class {@link SPI}. * @throws SQLException If the underlying native structure has gone stale. */ - public int execute(Object[] parameters, int rowCount) throws SQLException + public int execute(Object[] parameters, short read_only, int rowCount) + throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _execute(m_pointer, System.identityHashCode(Thread - .currentThread()), parameters, rowCount); - } + return doInPG(() -> + _execute(m_state.getExecutionPlanPtr(), + parameters, read_only, rowCount)); } /** @@ -206,29 +300,27 @@ public static ExecutionPlan prepare(String statement, Oid[] argTypes) ? (Object)statement : (Object)new PlanKey(statement, argTypes); - ExecutionPlan plan = (ExecutionPlan)s_planCache.remove(key); + ExecutionPlan plan = s_planCache.remove(key); if(plan == null) - { - synchronized(Backend.THREADLOCK) - { - plan = new ExecutionPlan(key, _prepare( - System.identityHashCode(Thread.currentThread()), statement, argTypes)); - } - } + plan = doInPG(() -> _prepare(key, statement, argTypes)); return plan; } - private static native Portal _cursorOpen(long pointer, long threadId, - String cursorName, Object[] parameters) throws SQLException; + /* + * Not static, so the Portal can hold a live reference to us in case we are + * evicted from the cache while it is still using the plan. + */ + private native Portal _cursorOpen(long pointer, + String cursorName, Object[] parameters, short read_only) + throws SQLException; private static native boolean _isCursorPlan(long pointer) throws SQLException; - private static native int _execute(long pointer, long threadId, - Object[] parameters, int rowCount) throws SQLException; + private static native int _execute(long pointer, + Object[] parameters, short read_only, int rowCount) throws SQLException; - private static native long _prepare(long threadId, String statement, Oid[] argTypes) + private static native ExecutionPlan _prepare( + Object key, String statement, Oid[] argTypes) throws SQLException; - - private static native void _invalidate(long pointer); } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Function.java b/pljava/src/main/java/org/postgresql/pljava/internal/Function.java new file mode 100644 index 00000000..5f6580ac --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Function.java @@ -0,0 +1,2153 @@ +/* + * Copyright (c) 2016-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodHandles.Lookup; +import static java.lang.invoke.MethodHandles.arrayElementGetter; +import static java.lang.invoke.MethodHandles.arrayElementSetter; +import static java.lang.invoke.MethodHandles.collectArguments; +import static java.lang.invoke.MethodHandles.constant; +import static java.lang.invoke.MethodHandles.dropArguments; +import static java.lang.invoke.MethodHandles.empty; +import static java.lang.invoke.MethodHandles.exactInvoker; +import static java.lang.invoke.MethodHandles.explicitCastArguments; +import static java.lang.invoke.MethodHandles.filterArguments; +import static java.lang.invoke.MethodHandles.filterReturnValue; +import static java.lang.invoke.MethodHandles.foldArguments; +import static java.lang.invoke.MethodHandles.guardWithTest; +import static java.lang.invoke.MethodHandles.identity; +import static java.lang.invoke.MethodHandles.insertArguments; +import static java.lang.invoke.MethodHandles.lookup; +import static java.lang.invoke.MethodHandles.permuteArguments; +import static java.lang.invoke.MethodHandles.publicLookup; +import static java.lang.invoke.MethodHandles.zero; +import java.lang.invoke.MethodType; +import static java.lang.invoke.MethodType.methodType; +import java.lang.invoke.WrongMethodTypeException; + +import java.lang.reflect.Array; +import java.lang.reflect.Method; +import java.lang.reflect.GenericDeclaration; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +import java.security.AccessControlContext; +import java.security.CodeSigner; +import java.security.CodeSource; +import java.security.Principal; +import java.security.ProtectionDomain; + +import java.sql.ResultSet; +import java.sql.SQLData; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLInput; +import java.sql.SQLOutput; +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; + +import static java.util.Arrays.copyOf; +import static java.util.Collections.addAll; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static java.util.regex.Pattern.compile; + +import javax.security.auth.Subject; +import javax.security.auth.SubjectDomainCombiner; + +import org.postgresql.pljava.PLPrincipal; +import org.postgresql.pljava.ResultSetHandle; +import org.postgresql.pljava.ResultSetProvider; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import static org.postgresql.pljava.internal.Backend.getListConfigOption; +import static org.postgresql.pljava.internal.Backend.WITHOUT_ENFORCEMENT; +import static org.postgresql.pljava.internal.Backend.allowingUnenforcedUDT; +import org.postgresql.pljava.internal.EntryPoints; +import org.postgresql.pljava.internal.EntryPoints.Invocable; +import static org.postgresql.pljava.internal.EntryPoints.invocable; +import static org.postgresql.pljava.internal.EntryPoints.loadAndInitWithACC; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; +import static org.postgresql.pljava.jdbc.TypeOid.INVALID; +import static org.postgresql.pljava.jdbc.TypeOid.TRIGGEROID; +import org.postgresql.pljava.management.Commands; +import org.postgresql.pljava.sqlj.Loader; + +/** + * Methods to look up a PL/Java function and prepare it for invocation. + *

      + * This class contains, mostly, logic that was originally in {@code Function.c} + * and has been ported to Java for better maintainability and adaptability. Many + * methods here have similar or identical names to the C functions they replace. + *

      + * When a PL/Java function is called, the C call handler will call the C + * {@code Function_getFunction}, which will delegate to {@code Function_create} + * if the function has not already been called. That C function calls the + * {@link #create create} method here, which ultimately (after all parsing of + * the {@code CREATE FUNCTION ... AS ...} information, matching up of parameter + * and return types, etc.) will return an {@code Invocable} to use when + * invoking the function. + *

      + * This remains a hybrid approach, in which PL/Java's legacy C {@code Type} + * infrastructure is used for converting the parameter and return values, and a + * C {@code Function_} structure kept in a C hash table holds the details + * needed for invocation, including the {@code Invocable} created here. The + * methods in this class also use some JNI calls to contribute and retrieve + * additional details that belong in the C structure. + *

      + * The {@code Invocable} returned here by {@code create} will have return type + * {@code Object} in all cases, and no formal parameters. It can + * contain bound references to the static {@code s_referenceParameters} and + * {@code s_primitiveParameters} declared in this class, and will fetch the + * parameters from there (where invocation code in {@code Function.c} will have + * put them) at the time of invocation. The parameter areas are static, but + * invocation takes place only on the PG thread, and the method handles created + * here will have fetched all of the values to push on the stack before the + * (potentially reentrant) target method is invoked. If the method has a + * primitive return type, its return value will be placed in the first slot of + * {@code s_primitiveParameters} and the {@code Invocable} returns null. + * Naturally, the (potentially reentrant) target method has already returned + * before that value is placed in the static slot. + */ +public class Function +{ + /** + * Prevent instantiation. + */ + private Function() + { + } + + private static class EarlyNatives + { + /* + * Pass the Java-allocated s_referenceParameters area down to the C code + * and obtain the ByteBuffer over the C-allocated primitives area. + */ + private static native ByteBuffer _parameterArea(Object[] refs); + } + + /** + * Return null if the {@code prosrc} field in the provided {@code procTup} + * does not have the form of a UDT specification; if it does, return the + * associated class, loaded with the class loader for {@code schemaName}. + */ + public static Class getClassIfUDT( + ResultSet procTup, String schemaName) + throws SQLException + { + Matcher info = parse(procTup); + String className = info.group("udtcls"); + + if ( null == className ) + return null; + + Identifier.Simple schema = Identifier.Simple.fromCatalog(schemaName); + return + loadClass(Loader.getSchemaLoader(schema), className, null) + .asSubclass(SQLData.class); + } + + /** + * Like the original C function of the same name, using effectively the same + * inputs, but producing a {@code MethodType} instead of a JNI signature. + *

      + * The return type is the last element of {@code jTypes}. + *

      + * {@code acc} is non-null if validating and class initializers should + * be run for parameter and return-type classes; in any other case it is + * null (see {@code loadClass}). + */ + private static MethodType buildSignature( + ClassLoader schemaLoader, String[] jTypes, AccessControlContext acc, + boolean commute, + boolean retTypeIsOutParameter, boolean isMultiCall, boolean altForm) + throws SQLException + { + /* + * Begin by assuming we won't include the "return" type among the + * parameter types. + */ + int rtIdx = jTypes.length - 1; + String retJType = jTypes[rtIdx]; + + /* + * As things are currently arranged, retTypeIsOutParameter is equivalent + * to "the return type is composite" and "the type is ResultSet". + */ + assert retTypeIsOutParameter == ("java.sql.ResultSet".equals(retJType)); + + /* + * And ... if the return type is composite, and this isn't a multi-call, + * then it does go at the end of the other parameters. + */ + if ( ! isMultiCall && retTypeIsOutParameter ) + ++ rtIdx; + + Class[] pTypes = new Class[ rtIdx ]; + + for ( int i = 0 ; i < rtIdx ; ++ i ) + pTypes[i] = loadClass(schemaLoader, jTypes[i], acc); + + if ( commute ) + { + Class t = pTypes[0]; + pTypes[0] = pTypes[1]; + pTypes[1] = t; + } + + Class returnType = + getReturnSignature(schemaLoader, retJType, acc, + retTypeIsOutParameter, isMultiCall, altForm); + + return methodType(returnType, pTypes); + } + + /** + * Return a {@code Class} object for the target method's return type. + *

      + * The C original in this case was a "virtual method" on {@code Type}, but + * only one "subclass" ({@code Composite}) ever overrode the default + * behavior. The default (for everything else but {@code Composite}) is to + * return the type unchanged in the non-multicall case, or {@code Iterator} + * (of that type) for multicall. + *

      + * The overridden behavior for a composite type is to return boolean in the + * non-multicall case, else one of {@code ResultSetHandle} or + * {@code ResultSetProvider} depending on {@code altForm}. + *

      + * {@code acc} is non-null if validating and class initializers should + * be run for parameter and return-type classes; in any other case it is + * null (see {@code loadClass}). + */ + private static Class getReturnSignature( + ClassLoader schemaLoader, String retJType, AccessControlContext acc, + boolean isComposite, boolean isMultiCall, boolean altForm) + throws SQLException + { + if ( ! isComposite ) + { + if ( isMultiCall ) + return Iterator.class; + return loadClass(schemaLoader, retJType, acc); + } + + /* The composite case */ + if ( isMultiCall ) + return altForm ? ResultSetHandle.class : ResultSetProvider.class; + return boolean.class; + } + + /** + * A Lookup to be used for the few functions inside this module that are + * allowed to be declared in SQL. + */ + private static Lookup s_lookup = + lookup().dropLookupMode(Lookup.PACKAGE); + + /** + * Return a Lookup appropriate to the target class, which will be a public + * Lookup unless the class is {@code Commands} in this module, whose public + * methods are the only ones inside this module that SQL is allowed to + * declare. + */ + private static Lookup lookupFor(Class clazz) + { + if ( Commands.class == clazz ) + return s_lookup; + return publicLookup().in(clazz); + } + + /** + * Replacement for {@code getMethodID} in the C code, but producing a + * {@code MethodHandle} instead. + *

      + * This is called in the cases where {@code init} would return a non-null + * method name: the non-UDT cases. UDT methods are handled their own + * special way. + *

      + * This may modify the last element (the return type) of the {@code jTypes} + * array, in the course of hunting for the right return type of the method. + *

      + * For now, this is a near-facsimile of the C implementation. A further step + * of refactoring into clearer idiomatic Java can come later. + *

      + * {@code acc} is non-null if validating and class initializers should + * be run for parameter and return-type classes; in any other case it is + * null (see {@code loadClass}). + */ + private static MethodHandle getMethodHandle( + ClassLoader schemaLoader, Class clazz, String methodName, + AccessControlContext acc, boolean commute, + String[] jTypes, boolean retTypeIsOutParameter, boolean isMultiCall, + boolean[] returnTypeAdjusted) + throws SQLException + { + MethodType mt = + buildSignature(schemaLoader, jTypes, acc, commute, + retTypeIsOutParameter, isMultiCall, false); // try altForm false + + ReflectiveOperationException ex1 = null; + try + { + return lookupFor(clazz).findStatic(clazz, methodName, mt); + } + catch ( ReflectiveOperationException e ) + { + ex1 = e; + } + + MethodType origMT = mt; + Class altType = null; + boolean adjustReturnType = false; + boolean wrapWithPicker = false; + Class realRetType = + loadClass(schemaLoader, jTypes[jTypes.length-1], acc); + + /* COPIED COMMENT: + * One valid reason for not finding the method is when + * the return type used in the signature is a primitive and + * the true return type of the method is the object class that + * corresponds to that primitive. + */ + if ( realRetType.isPrimitive() ) + { + altType = methodType(realRetType).wrap().returnType(); + realRetType = altType; + adjustReturnType = true; + } + else if ( realRetType.isArray() + && realRetType.getComponentType().isPrimitive() ) + { + Class boxed = + methodType(realRetType.getComponentType()).wrap().returnType(); + altType = java.lang.reflect.Array.newInstance(boxed, 0).getClass(); + realRetType = altType; + adjustReturnType = true; + } + + /* COPIED COMMENT: + * Another reason might be that we expected a ResultSetProvider + * but the implementation returns a ResultSetHandle that needs to be + * wrapped. The wrapping is internal so we retain the original + * return type anyway. + */ + if ( ResultSet.class == realRetType ) + { + altType = realRetType; + wrapWithPicker = true; + } + + if ( null != altType ) + { + jTypes[jTypes.length - 1] = altType.getCanonicalName(); + mt = buildSignature(schemaLoader, jTypes, acc, commute, + retTypeIsOutParameter, isMultiCall, true); // retry altForm true + try + { + MethodHandle h = + lookupFor(clazz).findStatic(clazz, methodName, mt); + if ( returnTypeAdjusted != null ) + returnTypeAdjusted[0] = adjustReturnType; + return wrapWithPicker + ? filterReturnValue(h, s_wrapWithPicker) + : h; + } + catch ( ReflectiveOperationException e ) + { + SQLException sqe1 = + memberException(clazz, methodName, origMT, ex1, + true /*isStatic*/); + SQLException sqe2 = + memberException(clazz, methodName, mt, e, + true /*isStatic*/); + + /* + * If one of the exceptions is NoSuchMethodException and the + * other isn't, then the one that isn't carries news about + * a problem with a method that actually was found. If that's + * the second one, we'll just lie a little about the order and + * report it first. (We never promised what order we'd do the + * lookups in anyway, and the current Java-to-PG exception + * translation only preserves the "first" one's details.) + */ + if ( ex1 instanceof NoSuchMethodException + && ! (e instanceof NoSuchMethodException) ) + { + sqe2.setNextException(sqe1); + throw sqe2; + } + + sqe1.setNextException(sqe2); + throw sqe1; + } + } + + throw + memberException(clazz, methodName, origMT, ex1, true /*isStatic*/); + } + + /** + * Produce an exception for a class member not found, with a message that + * may include details from further down an exception's chain of causes. + */ + private static SQLException memberException( + Class clazz, String name, MethodType mt, + ReflectiveOperationException e, boolean isStatic) + { + /* + * The most useful detail message to include may not be that + * of e itself, but further down the chain of causes, particularly + * if e is IllegalAccessException, which handle lookup can throw even + * for causes that aren't illegal access but rather linkage errors. + */ + Throwable t, prev; + t = prev = e; + for ( Class c : List.of( + IllegalAccessException.class, LinkageError.class, + ClassNotFoundException.class, Void.class) ) + { + if ( ! c.isInstance(t) ) + { + t = prev; + break; + } + prev = t; + t = t.getCause(); + } + + String detail = (null == t) ? "" : (": " + t); + + return new SQLNonTransientException( + String.format("resolving %smethod %s.%s with signature %s%s", + (isStatic ? "static " : ""), + clazz.getCanonicalName(), name, mt, detail), + "38000"); + } + + /** + * Adapt an arbitrary static method's handle to what the C call handler + * expects to invoke. + *

      + * Java does not allow a {@code MethodHandle} to be invoked directly from + * JNI. The invocation has to pass through a Java method that in turn + * invokes the handle. So there has to be a common way to pass an arbitrary + * method's parameters, whether reference or primitive. + *

      + * The convention here will be that the C call handler segregates all of the + * incoming parameters into an {@code Object} array for all those of + * reference type, and a C array of {@code jvalue} for the primitives. + * Those arrays are static, and will be bound into the method handle + * produced here, which will fetch values from them when invoked. + *

      + * The job of this method is to take any static method handle {@code mh} and + * return a method handle that takes no parameters, and invokes the original + * handle with the parameter values unpacked to their proper positions. + *

      + * The handle's return type will always be {@code Object}. If the target + * has {@code void} or a primitive return type, null will be returned. Any + * primitive value returned by the target will be found in the first static + * primitive parameter slot. This convention allows a single wrapper method + * for all return types. + */ + private static MethodHandle adaptHandle(MethodHandle mh) + { + MethodType mt = mh.type(); + int parameterCount = mt.parameterCount(); + int primitives = (int) + mt.parameterList().stream().filter(Class::isPrimitive).count(); + int references = parameterCount - primitives; + short countCheck = (short)((references << 8) | (primitives & 0xff)); + + /* + * "Erase" any/all reference types, parameter or return, to Object. + * Erasing the return type avoids wrong-method-type exceptions when + * invoking the handle from a wrapper method that returns Object. + * Erasing any reference parameter types is kind of a notational + * convenience: alternatively, each refGetter constructed below could + * be built to fetch from the Object array and cast to the non-erased + * parameter type, but this does the same for all of them in one + * swell foop. + */ + mh = mh.asType(mt.erase()); + + /* + * mh represents a method taking some arbitrary arguments a0,...,a(n-1) + * where n is parameterCount. It is the ultimate target, invoked as the + * last thing that happens at invocation time. + * + * Each step in this construction produces a new mh that invokes the one + * from the step before, after doing something useful first. Therefore, + * the mh from the *last* of these construction steps is what will be + * invoked *first* when a call is later made. In other words, the order + * of these steps has a reverse-chronological flavor, producing handles + * whose chronological sequence will be last-to-first during a call. + * + * As a first construction step (and therefore the last thing to happen + * before the target method is ultimately invoked), add a countsZeroer + * (if there were nonzero parameter counts) to announce that the values + * have all been fetched and the static parameter area is free for use. + * The countsZeroer has void return and no parameters, so it doesn't + * affect the constructed parameter list. + */ + if ( 0 != countCheck ) + mh = foldArguments(mh, 0, s_countsZeroer); + + /* + * Iterate through the parameter indices in reverse order. Each step + * takes a method handle with parameters a0,...,ak + * and produces a handle with one fewer: a0,...ak-1. + * At invocation time, this handle will fetch the value for ak (from + * either the Object[] or the ByteBuffer as ak is of reference or + * primitive type), and invoke the next (in construction order, prior) + * handle. + * + * The handle left at the end of this loop will expect no parameters. + */ + while ( parameterCount --> 0 ) + { + Class pt = mt.parameterType(parameterCount); + if ( pt.isPrimitive() ) + { + MethodHandle primGetter; + switch ( pt.getSimpleName() ) + { + case "boolean": primGetter = s_booleanGetter; break; + case "byte": primGetter = s_byteGetter; break; + case "short": primGetter = s_shortGetter; break; + case "char": primGetter = s_charGetter; break; + case "int": primGetter = s_intGetter; break; + case "float": primGetter = s_floatGetter; break; + case "long": primGetter = s_longGetter; break; + case "double": primGetter = s_doubleGetter; break; + default: + throw new AssertionError("unknown Java primitive type"); + } + /* + * Each getter takes one argument: a byte + * offset. Use "insertArguments" to bind in the offset for this + * parameter, so the resulting getter handle has no arguments. + * (That nomenclature again! Here at construction + * time, "insertArguments" produces a method handle with *fewer* + * than the one it starts with. It's later, at call time, when + * the value(s) will get "inserted" as it calls the prior handle + * that expects them.) + */ + int offset = (--primitives) * s_sizeof_jvalue; + primGetter = insertArguments(primGetter, 0, offset); + /* + * The "foldArguments" combinator. At this step, let k be + * parameterCount, so we are looking at a method handle that + * takes a0,...,ak, and foldArguments(..., primGetter) will + * produce a shorter one a0,...,ak-1. + * + * At invocation time, the handle will invoke the primGetter + * (which has arity 0) on the corresponding number of parameters + * (0) starting at position k (or parameterCount, if you will). + * The result of the primGetter will become ak in the invocation + * of the next underlying handle. + * + * *Ahead* of the primGetter here (so, at invocation time, + * *after* the prim has been got), fold in a primitiveZeroer + * bound to the same offset, so argument values won't lie around + * indefinitely in the static area. Because the zeroer has void + * return and (once bound) no arguments, it has no effect on the + * argument list being constructed here for the target method. + */ + mh = foldArguments(mh, 0, + insertArguments(s_primitiveZeroer, 0, offset)); + mh = foldArguments(mh, parameterCount, primGetter); + } + else + { + /* + * The same drill as above, only for reference-typed arguments, + * which will be fetched from the Object[]. + */ + MethodHandle refGetter = s_refGetter; + /* + * Again, s_refGetter has arity 1 (just the integer index); + * bind in the right index for this parameter, producing + * a getter with no argument. + * + * Also again, fold in a referenceNuller, both to prevent the + * lingering exposure of argument values in the static area and, + * as important, indefinitely holding the reference live. The + * nuller, once bound to the index, has no arguments and void + * return, so does not affect the argument list being built. + */ + int index = --references; + refGetter = insertArguments(refGetter, 0, index); + mh = foldArguments(mh, 0, + insertArguments(s_referenceNuller, 0, index)); + mh = foldArguments(mh, parameterCount, refGetter); + } + } + + /* + * If the target has a primitive return type, add a filter that stashes + * the return value in slot 0 of the primitives static area. A return + * value of reference type is simply returned. + */ + Class rt = mt.returnType(); + if ( void.class == rt ) + mh = filterReturnValue(mh, s_voidToNull); + else if ( rt.isPrimitive() ) + { + MethodHandle primReturn; + switch ( rt.getSimpleName() ) + { + case "boolean": primReturn = s_booleanReturn; break; + case "byte": primReturn = s_byteReturn; break; + case "short": primReturn = s_shortReturn; break; + case "char": primReturn = s_charReturn; break; + case "int": primReturn = s_intReturn; break; + case "float": primReturn = s_floatReturn; break; + case "long": primReturn = s_longReturn; break; + case "double": primReturn = s_doubleReturn; break; + default: + throw new AssertionError("unknown Java primitive return type"); + } + + mh = filterReturnValue(mh, primReturn); + } + + /* + * The returned method handle will first confirm that it is being called + * with the expected numbers of reference and primitive parameters ready + * in the static parameter areas, throwing an exception if not. + */ + return foldArguments(mh, + insertArguments(s_paramCountsAre, 0, countCheck)); + } + + private static final MethodHandle s_booleanReturn; + private static final MethodHandle s_byteReturn; + private static final MethodHandle s_shortReturn; + private static final MethodHandle s_charReturn; + private static final MethodHandle s_intReturn; + private static final MethodHandle s_floatReturn; + private static final MethodHandle s_longReturn; + private static final MethodHandle s_doubleReturn; + private static final MethodHandle s_voidToNull; + private static final MethodHandle s_booleanGetter; + private static final MethodHandle s_byteGetter; + private static final MethodHandle s_shortGetter; + private static final MethodHandle s_charGetter; + private static final MethodHandle s_intGetter; + private static final MethodHandle s_floatGetter; + private static final MethodHandle s_longGetter; + private static final MethodHandle s_doubleGetter; + private static final MethodHandle s_refGetter; + private static final MethodHandle s_referenceNuller; + private static final MethodHandle s_primitiveZeroer; + private static final MethodHandle s_paramCountsAre; + private static final MethodHandle s_countsZeroer; + private static final MethodHandle s_nonNull; + private static final MethodHandle s_not; + private static final MethodHandle s_boxedNot; + + /* + * Handles used to retrieve rows using SFRM_ValuePerCall protocol, from a + * function that returns an Iterator or ResultSetProvider, respectively. + * (One that returns a ResultSetHandle gets its return value wrapped with a + * ResultSetPicker and is then treated as in the ResultSetProvider case.) + */ + private static final MethodHandle s_iteratorVPC; + private static final MethodHandle s_resultSetProviderVPC; + private static final MethodHandle s_wrapWithPicker; + + private static final int s_sizeof_jvalue = 8; // Function.c StaticAssertStmt + + /** + * An {@code AccessControlContext} representing "nobody special": it should + * enjoy whatever permissions the {@code Policy} grants to everyone, but no + * others. + * + * This will be clapped on top of any {@code Invocable} whose target isn't + * in a PL/Java-managed jar or in PL/Java itself; PL/Java has always allowed + * {@code CREATE FUNCTION} to name some Java library class directly, but in + * such a case, the permissions should still be limited to what the policy + * would allow a PL/Java function. + */ + private static final AccessControlContext s_lid; + + /** + * An {@code AccessControlContext} representing "no other restrictions": + * it will be used to build the initial context for any {@code Invocable} + * whose target is in a PL/Java-managed jar, so that it will enjoy whatever + * permissions the policy grants to its jar directly. + */ + private static final AccessControlContext s_noLid; + + /* + * Static areas for passing reference and primitive parameters. A Java + * method can have no more than 255 parameters, so each area gets the + * worst-case allocation. s_primitiveParameters is a direct byte buffer + * over an array of 255 JNI jvalues. (It could be made half the size by + * taking into account the JVM convention that long/double take two slots + * each, but that can be a future optimization.) + * + * These areas will be bound into MethodHandle trees constructed over + * desired invocation targets. Such a constructed method handle will take + * care of pushing the arguments from the appropriate static slots. Because + * that happens before the target is invoked, and calls must happen on the + * PG thread, the static areas are safe for reentrant calls (but for an edge + * case involving UDTs among the parameters and a UDT function that incurs + * a reentrant call; it may never happen, but that's what the ParameterFrame + * class is for, below). + * + * Such a constructed method handle will be passed, wrapped in + * an Invocable, to EntryPoints.invoke, which is declared to return + * Object always. If the target returns a primitive value, the last act of + * the constructed method handle will be to store that in the first slot of + * s_primitiveParameters, where the C code will find it, and return null as + * its "Object" return value. + * + * The primitive parameters area is slightly larger than 255 jvalues; the + * next two bytes contain the numbers of actual parameters in the call (as + * an int16 with the count of reference parameters in the MSB, primitives + * in the LSB). Each constructed MethodHandle will have the corresponding + * int16 value bound in for comparison, and will throw an exception if + * invoked with the wrong parameter counts. + */ + private static final Object[] s_referenceParameters = new Object [ 255 ]; + private static final ByteBuffer s_primitiveParameters = + EarlyNatives._parameterArea(s_referenceParameters) + .order(ByteOrder.nativeOrder()); + private static final int s_offset_paramCounts = 255 * s_sizeof_jvalue; + + /** + * Class used to stack parameters for an in-construction call if needed for + * the (unlikely) re-entrant use of the static parameter area. + *

      + * The need for this should be rare, as the only obvious cases for PL/Java + * upcalls that can occur while assembling another call's parameter list + * will be for UDTs that appear among those parameters. On the other hand, + * nothing restricts what a UDT method is allowed to do, so in the unlikely + * case it does something heavy enough to involve another upcall, there has + * to be a way for that to work. + */ + static class ParameterFrame + { + private static ParameterFrame s_stack = null; + private ParameterFrame m_prev; + private Object[] m_refs; + private byte[] m_prims; + + /** + * Construct a copy of the current in-progress parameter area. + */ + private ParameterFrame() + { + short counts = s_primitiveParameters.getShort(s_offset_paramCounts); + assert 0 != counts : "ParameterFrame() called when no parameters"; + + int refs = counts >>> 8; + int prims = counts & 0xff; + + if ( 0 < refs ) + m_refs = copyOf(s_referenceParameters, refs); + + if ( 0 < prims ) + { + m_prims = new byte [ prims * s_sizeof_jvalue ]; + // Java 13: s_primitiveParameters.get(0, m_prims); + s_primitiveParameters.get(m_prims).position(0); + } + + m_prev = s_stack; + } + + /** + * Push a copy of the current in-progress parameter area; called only + * via JNI. + *

      + * Only happens on the PG thread. + */ + private static void push() + { + s_stack = new ParameterFrame(); + s_primitiveParameters.putShort(s_offset_paramCounts, (short)0); + } + + /** + * Pop a stacked parameter frame; called only via JNI, only when + * the current invocation is known to have pushed one. + */ + private static void pop() + { + ParameterFrame f = s_stack; + s_stack = f.m_prev; + + int refs = 0; + int prims = 0; + + if ( null != f.m_refs ) + { + refs = f.m_refs.length; + System.arraycopy(f.m_refs, 0, s_referenceParameters, 0, refs); + } + + if ( null != f.m_prims ) + { + int len = f.m_prims.length; + prims = len / s_sizeof_jvalue; + // Java 13: s_primitiveParameters.put(0, f.m_prims); + s_primitiveParameters.put(f.m_prims).position(0); + } + + s_primitiveParameters.putShort(s_offset_paramCounts, + (short)((refs << 8) | (prims & 0xff))); + } + } + + static + { + Lookup l = publicLookup(); + Lookup myL = lookup(); + MethodHandle toVoid = empty(methodType(void.class, ByteBuffer.class)); + MethodHandle toNull = empty(methodType(Object.class, ByteBuffer.class)); + MethodHandle longSetter = null; + MethodType mt = methodType(byte.class, int.class); + MethodHandle mh; + + try + { + s_byteGetter = l.findVirtual(ByteBuffer.class, "get", mt) + .bindTo(s_primitiveParameters); + mt = mt.changeReturnType(short.class); + s_shortGetter = l.findVirtual(ByteBuffer.class, "getShort", mt) + .bindTo(s_primitiveParameters); + mt = mt.changeReturnType(char.class); + s_charGetter = l.findVirtual(ByteBuffer.class, "getChar", mt) + .bindTo(s_primitiveParameters); + mt = mt.changeReturnType(int.class); + s_intGetter = l.findVirtual(ByteBuffer.class, "getInt", mt) + .bindTo(s_primitiveParameters); + mt = mt.changeReturnType(float.class); + s_floatGetter = l.findVirtual(ByteBuffer.class, "getFloat", mt) + .bindTo(s_primitiveParameters); + mt = mt.changeReturnType(long.class); + s_longGetter = l.findVirtual(ByteBuffer.class, "getLong", mt) + .bindTo(s_primitiveParameters); + mt = mt.changeReturnType(double.class); + s_doubleGetter = l.findVirtual(ByteBuffer.class, "getDouble", mt) + .bindTo(s_primitiveParameters); + + mt = mt.changeReturnType(Object.class); + s_refGetter = arrayElementGetter(Object[].class) + .bindTo(s_referenceParameters); + + mt = methodType(boolean.class, byte.class); + mh = myL.findStatic(Function.class, "byteNonZero", mt); + s_booleanGetter = filterReturnValue(s_byteGetter, mh); + + mt = methodType(void.class, short.class); + mh = myL.findStatic(Function.class, "paramCountsAre", mt); + s_paramCountsAre = mh; + + s_voidToNull = zero(Object.class); + + mt = methodType(ByteBuffer.class, int.class, byte.class); + mh = l.findVirtual(ByteBuffer.class, "put", mt) + .bindTo(s_primitiveParameters); + s_byteReturn = filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mt = mt.changeParameterType(1, short.class); + mh = l.findVirtual(ByteBuffer.class, "putShort", mt) + .bindTo(s_primitiveParameters); + s_shortReturn = + filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mt = mt.changeParameterType(1, char.class); + mh = l.findVirtual(ByteBuffer.class, "putChar", mt) + .bindTo(s_primitiveParameters); + s_charReturn = filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mt = mt.changeParameterType(1, int.class); + mh = l.findVirtual(ByteBuffer.class, "putInt", mt) + .bindTo(s_primitiveParameters); + s_intReturn = filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mt = mt.changeParameterType(1, float.class); + mh = l.findVirtual(ByteBuffer.class, "putFloat", mt) + .bindTo(s_primitiveParameters); + s_floatReturn = + filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mt = mt.changeParameterType(1, long.class); + mh = l.findVirtual(ByteBuffer.class, "putLong", mt) + .bindTo(s_primitiveParameters); + longSetter = filterReturnValue(mh, toVoid); + s_longReturn = filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mt = mt.changeParameterType(1, double.class); + mh = l.findVirtual(ByteBuffer.class, "putDouble", mt) + .bindTo(s_primitiveParameters); + s_doubleReturn = + filterReturnValue(insertArguments(mh, 0, 0), toNull); + + mh = s_byteReturn; + s_booleanReturn = guardWithTest(identity(boolean.class), + dropArguments( + insertArguments(mh, 0, (byte)1), 0, boolean.class), + dropArguments( + insertArguments(mh, 0, (byte)0), 0, boolean.class)); + + s_referenceNuller = + insertArguments( + arrayElementSetter(Object[].class), 2, (Object)null) + .bindTo(s_referenceParameters); + + s_primitiveZeroer = insertArguments(longSetter, 1, 0L); + + s_countsZeroer = + insertArguments(s_primitiveZeroer, 0, s_offset_paramCounts); + + s_nonNull = l.findStatic(Objects.class, "nonNull", + methodType(boolean.class, Object.class)); + + s_not = guardWithTest(identity(boolean.class), + dropArguments(constant(boolean.class, false), 0, boolean.class), + dropArguments(constant(boolean.class, true), 0, boolean.class)); + + s_boxedNot = + guardWithTest( + explicitCastArguments(s_nonNull, + methodType(boolean.class, Boolean.class)), + explicitCastArguments(s_not, + methodType(Boolean.class, Boolean.class)), + identity(Boolean.class)); + + /* + * Build a bit of MethodHandle tree for invoking a set-returning + * user function that will implement the ValuePerCall protocol. + * Such a function will return either an Iterator or a + * ResultSetProvider (or a ResultSetHandle, more on that further + * below). Its MethodHandle, as obtained from AdaptHandle, of course + * has type ()Object (changed to (acc)Object before init() returns + * it, but ordinarily it will ignore the acc passed to it). + * + * The handle tree being built here will go on top of that, and will + * also ultimately have type (acc)Object. What it returns will be + * a new Invocable, carrying the same acc, and a handle tree built + * over the Iterator or ResultSetProvider that the user function + * returned. Part of that tree must depend on whether the return + * type is Iterator or ResultSet; the part being built here is the + * common part. It will have an extra first argument of type + * MethodHandle that can be bound to the ResultSetProvider- or + * Iterator-specific handle. If the user function returns null, so + * will this. + */ + + MethodHandle invocableMH = + myL.findStatic(EntryPoints.class, "invocable", methodType( + Invocable.class, + MethodHandle.class, AccessControlContext.class)); + + mh = l.findVirtual(MethodHandle.class, "bindTo", + methodType(MethodHandle.class, Object.class)); + + mh = collectArguments(invocableMH, 0, mh); + // (hdl, obj, acc) -> Invocable(hdl-bound-to-obj, acc) + + mh = guardWithTest(dropArguments(s_nonNull, 0, MethodHandle.class), + mh, empty(methodType(Invocable.class, + MethodHandle.class, Object.class, AccessControlContext.class + )) + ); + + mh = filterArguments(mh, 1, exactInvoker(methodType(Object.class))); + /* + * We are left with type (MethodHandle,MethodHandle,acc) -> + * Invocable. A first bindTo, passing the ResultSetProvider- or + * Iterator-specific tree fragment, will leave us with + * (MethodHandle,acc)Invocable, and that can be bound to any + * set-returning user function handle, leaving (acc)Invocable, which + * is just what we want. Keep this as vpcCommon (only erasing its + * return type to Object as EntryPoints.invoke will expect). + */ + MethodHandle vpcCommon = + mh.asType(mh.type().changeReturnType(Object.class)); + + /* + * VALUE-PER-CALL Iterator DRIVER + * + * Build the ValuePerCall adapter handle for a function that + * returns Iterator. ValuePerCall adapters will be invoked through + * the general mechanism, and fetch their arguments from the static + * area. They'll be passed one reference argument (a "row collector" + * for the ResultSetProvider case) and two primitives: a long + * call counter (zero on the first call) and a boolean (true when + * the caller wants to stop iteration, perhaps early). An Iterator + * has no use for the row collector or the call counter, so they + * simply won't be fetched; the end-iteration boolean will be + * fetched and will cause false+null to be returned, but will not + * necessarily release any resources promptly, as Iterator has no + * close() method. + * + * These adapters change up the return-value protocol a bit: they + * will return a reference (the value for the row) *and also* a + * boolean via the first primitive slot (false if the end of rows + * has been reached, in which case the reference returned is simply + * null and is not part of the result set). If the boolean is true + * and null is returned, the null is part of the result. + * + * mh1 and mh2 both have type (Iterator)Object and side effect of + * storing to primitive slot 0. Let mh1 be the hasNext case, + * returning a value and storing true, and mh2 the no-more case, + * storing false and returning null. (They don't have a primitive- + * zeroer for either argument, as the return will clobber the first + * slot anyway, and they can only be reached if the 'close' argument + * is already zero. This is the Iterator case, so the row-collector + * reference argument is assumed already null.) + * + * Start with a few constants for parameter getters (else it is + * easy to forget the (sizeof jvalue) for the primitive getters!). + */ + final int REF0 = 0; + final int REF1 = 1; + final int PRIM0 = 0; + final int PRIM1 = 1 * s_sizeof_jvalue; + + MethodHandle mh1 = identity(Object.class); + mh1 = dropArguments(mh1, 1, Object.class); // the null from boolRet + mh1 = collectArguments(mh1, 1, + insertArguments(s_booleanReturn, 0, true)); + mt = methodType(Object.class); + mh = l.findVirtual(Iterator.class, "next", mt); + mh1 = filterArguments(mh1, 0, mh); + + MethodHandle mh2 = insertArguments(s_booleanReturn, 0, false); + mh2 = dropArguments(mh2, 0, Iterator.class); + + mt = methodType(boolean.class); + mh = l.findVirtual(Iterator.class, "hasNext", mt); + mh = guardWithTest(mh, mh1, mh2); + mh = foldArguments(mh, 0, s_countsZeroer); + + /* + * The next (in construction order; first in execution) test is of + * the 'close' argument. Tack a primitiveZeroer onto mh2 for this + * one, as it'll execute in the argument-isn't-zero case. + */ + mh2 = foldArguments(mh2, 0, + insertArguments(s_primitiveZeroer, 0, PRIM1)); + mh2 = foldArguments(mh2, 0, s_countsZeroer); + + mh = guardWithTest( + insertArguments(s_booleanGetter, 0, PRIM1), mh2, mh); + + /* + * mh now has type (Iterator)Object. Erase the Iterator to Object + * (so this and the ResultSetProvider one can have a common type), + * give it an acc argument that it will ignore, bind it into + * vpcCommon, and we'll have the Iterator VPC adapter. + */ + mh = mh.asType(mh.type().erase()); + mh = dropArguments(mh, 1, AccessControlContext.class); + s_iteratorVPC = vpcCommon.bindTo(mh); + + /* + * VALUE-PER-CALL ResultSetProvider DRIVER + * + * The same drill as above, only to drive a ResultSetProvider. + * For now, this will always return a null reference, even when + * a row is retrieved; the thing it would return is just the + * row collector, which the C caller already has, and must extract + * the tuple from. If that could be done in Java, it would be + * a different story. + */ + mt = methodType(boolean.class, ResultSet.class, long.class); + mh1 = collectArguments(s_booleanReturn, 0, + l.findVirtual(ResultSetProvider.class, "assignRowValues", mt)); + mh1 = dropArguments(mh1, 0, boolean.class); + + /* + * The next (in construction order; first in execution) test is of + * the 'close' argument. If it is true, use mh2 to zero that prim + * slot, call close, and return false. + */ + mh2 = insertArguments(s_booleanReturn, 0, false); + mh2 = collectArguments(mh2, 0, + l.findVirtual(ResultSetProvider.class, "close", + methodType(void.class))); + mh2 = foldArguments(mh2, 0, + insertArguments(s_primitiveZeroer, 0, PRIM1)); + mh2 = dropArguments(mh2, 0, boolean.class); + mh2 = dropArguments(mh2, 2, ResultSet.class, long.class); + + mh = guardWithTest(identity(boolean.class), mh2, mh1); + mh = foldArguments(mh, 0, s_countsZeroer); + mh = foldArguments(mh, 0, + insertArguments(s_booleanGetter, 0, PRIM1)); + // ^^^ Test the 'close' flag, prim slot 1 (insert as arg 0) ^^^ + + mh = foldArguments(mh, 2, insertArguments(s_longGetter, 0, PRIM0)); + // ^^^ Get the row count, prim slot 0; return will clobber ^^^ + + /* + * mh now has type (ResultSetProvider,ResultSet)Object. Erase both + * argument types to Object now (so the ResultSet will match the + * refGetter here, and the result will be (Object)Object as expected + * below. + */ + mh = mh.asType(mh.type().erase()); + mh = foldArguments(mh, 1, + insertArguments(s_referenceNuller, 0, REF0)); + mh = foldArguments(mh, 1, insertArguments(s_refGetter, 0, REF0)); + // ^^^ Get and then null the row collector, ref slot 0 ^^^ + + /* + * mh now has type (Object)Object. Give it an acc argument that it + * will ignore, bind it into vpcCommon, and we'll have the + * ResultSetProvider VPC adapter. + */ + mh = dropArguments(mh, 1, AccessControlContext.class); + s_resultSetProviderVPC = vpcCommon.bindTo(mh); + + /* + * WRAPPER for ResultSetHandle to present it as ResultSetProvider + */ + mt = methodType(void.class, ResultSetHandle.class); + mh = myL.findConstructor(ResultSetPicker.class, mt); + s_wrapWithPicker = + mh.asType(mh.type().changeReturnType(ResultSetProvider.class)); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + + /* + * An empty ProtectionDomain array is all it takes to make s_noLid. + * (As far as doPrivileged is concerned, a null AccessControlContext has + * the same effect, but one can't attach a DomainCombiner to that.) + * + * A lid is a bit more work, but there's a method for that. + */ + s_noLid = new AccessControlContext(new ProtectionDomain[] {}); + s_lid = lidWithPrincipals(new Principal[0]); + } + + /** + * Construct a 'lid' {@code AccessControlContext}, optionally with + * associated {@code Principal}s. + *

      + * A 'lid' is a "nobody special" {@code AccessControlContext}: it isn't + * allowed any permission that isn't granted by the Policy to everybody, + * unless it also has a nonempty array of principals. With an empty array, + * there need be only one such lid, so it can be kept in a static. + *

      + * This method also allows creating a lid with associated principals, + * because a {@code SubjectDomainCombiner} does not combine its subject into + * the domains of its inherited {@code AccessControlContext}, and + * that strains the principle of least astonishment if the code is being + * invoked through an SQL declaration that one expects would have a + * {@code PLPrincipal} associated. + *

      + * A null CodeSource is too strict; if your code source is null, you are + * somebody special in a bad way: no dynamic permissions for you! At + * least according to the default policy provider. + *

      + * So, to achieve mere "nobody special"-ness requires a real CodeSource + * with null URL and null code signers. + *

      + * The ProtectionDomain constructor allows the permissions parameter + * to be null, and says so in the javadocs. It seems to allow + * the principals parameter to be null too, but doesn't say that, so an + * array will always be expected here. + */ + private static AccessControlContext lidWithPrincipals(Principal[] ps) + { + return new AccessControlContext(new ProtectionDomain[] { + new ProtectionDomain( + new CodeSource(null, (CodeSigner[])null), + null, ClassLoader.getSystemClassLoader(), + Objects.requireNonNull(ps)) + }); + } + + /** + * Converts a byte value to boolean as the JNI code does, where any nonzero + * value is true. + *

      + * There is a {@code MethodHandles.explicitCastArguments} that will cast a + * byte to boolean by considering only the low bit. I don't trust it. + */ + private static boolean byteNonZero(byte b) + { + return 0 != b; + } + + /** + * Throw a {@code WrongMethodTypeException} if the parameter counts in + * {@code counts} (references in the MSB, primitives in the LSB) do not + * match those at offset {@code s_offset_paramCounts} in the static passing + * area. + */ + private static void paramCountsAre(short counts) + { + short got = s_primitiveParameters.getShort(s_offset_paramCounts); + if ( counts != got ) + throw new WrongMethodTypeException(String.format( + "PL/Java invocation expects (%d reference/%d primitive) " + + "parameter count but passed (%d reference/%d primitive)", + (counts >>> 8), (counts & 0xff), + (got >>> 8), (got & 0xff))); + } + + /** + * Return an {@code Invocable} for the {@code writeSQL} method of + * a given UDT class. + *

      + * While this is not expected to be used while transforming parameters for + * another function call as the UDT-read handle would be, it can still be + * used during a function's execution and without being separately wrapped + * in {@code pushInvocation}/{@code popInvocation}. The pushing and popping + * of {@code ParameterFrame} rely on invocation scoping, so it is better for + * the UDT-write method also to avoid using the static parameter area. + *

      + * The access control context of the {@code Invocable} returned here is used + * at the corresponding entry point; the payload is not. + */ + private static Invocable udtWriteHandle( + Class clazz, String language, boolean trusted) + throws SQLException + { + return invocable( + null, accessControlContextFor(clazz, language, trusted)); + } + + /** + * Return an {@code Invocable} for the {@code toString} method of + * a given UDT class (or any class, really). + *

      + * The access control context of the {@code Invocable} returned here is used + * at the corresponding entry point; the payload is not. + */ + private static Invocable udtToStringHandle( + Class clazz, String language, boolean trusted) + throws SQLException + { + return invocable( + null, accessControlContextFor(clazz, language, trusted)); + } + + /** + * Return a special {@code Invocable} for the {@code readSQL} method of + * a given UDT class. + *

      + * Because this can commonly be invoked while transforming parameters for + * another function call, it has a dedicated corresponding + * {@code EntryPoints} method and does not use the static parameter area. + * The {@code Invocable} created here is bound to the constructor of the + * type, takes no parameters, and simply returns the constructed instance; + * the {@code EntryPoints} method will then call {@code readSQL} on it and + * pass the stream and type-name arguments. The {@code AccessControlContext} + * assigned here will be in effect for both the constructor and the + * {@code readSQL} call. + */ + private static Invocable udtReadHandle( + Class clazz, String language, boolean trusted) + throws SQLException + { + Lookup l = lookupFor(clazz); + MethodHandle ctor; + + try + { + ctor = + l.findConstructor(clazz, methodType(void.class)) + .asType(methodType(SQLData.class)); // invocable() enforces this + } + catch ( ReflectiveOperationException e ) + { + throw new SQLNonTransientException( + "Java UDT implementing class " + clazz.getCanonicalName() + + " must have a no-argument public constructor", "38000", e); + } + + return invocable( + ctor, accessControlContextFor(clazz, language, trusted)); + } + + /** + * Return a "parse" {@code Invocable} for a given UDT class. + *

      + * The method can be invoked during the preparation of a parameter that has + * a NUL-terminated storage form, so it gets its own dedicated entry point + * and does not use the static parameter area. + */ + private static Invocable udtParseHandle( + Class clazz, String language, boolean trusted) + throws SQLException + { + Lookup l = lookupFor(clazz); + MethodHandle mh; + + try + { + mh = l.findStatic(clazz, "parse", + methodType(clazz, String.class, String.class)); + } + catch ( ReflectiveOperationException e ) + { + throw new SQLNonTransientException( + "Java scalar-UDT implementing class " + + clazz.getCanonicalName() + + " must have a public static parse(String,String) method", + "38000", e); + } + + return invocable( + mh.asType(mh.type().changeReturnType(SQLData.class)), + accessControlContextFor(clazz, language, trusted)); + } + + /** + * Parse the function specification in {@code procTup}, initializing most + * fields of the C {@code Function} structure, and returning an + * {@code Invocable} for invoking the method, or null in the + * case of a UDT. + */ + public static Invocable create( + long wrappedPtr, ResultSet procTup, String langName, String schemaName, + boolean trusted, boolean calledAsTrigger, + boolean forValidator, boolean checkBody) + throws SQLException + { + Matcher info = parse(procTup); + + /* + * Reject any TRANSFORM FOR TYPE clause at validation time, on + * the grounds that it will get ignored at invocation time anyway. + * The check could be made unconditional, and so catch at invocation + * time any function that might have been declared before this validator + * check was added. But simply ignoring the clause at invocation time + * (as promised...) keeps that path leaner. + */ + if ( forValidator && null != procTup.getObject("protrftypes") ) + throw new SQLFeatureNotSupportedException( + "a PL/Java function will not apply TRANSFORM FOR TYPE","0A000"); + + if ( forValidator && ! checkBody ) + return null; + + Identifier.Simple schema = Identifier.Simple.fromCatalog(schemaName); + + return init(wrappedPtr, info, procTup, schema, calledAsTrigger, + forValidator, langName, trusted); + } + + /** + * Retrieve the {@code prosrc} field from the provided {@code procTup}, and + * return it parsed as a {@code Matcher} object with named capturing groups. + */ + private static Matcher parse(ResultSet procTup) throws SQLException + { + String spec = getAS(procTup); + + Matcher m = specForms.matcher(spec); + if ( ! m.matches() ) + throw new SQLSyntaxErrorException( + "cannot parse AS string", "42601"); + + return m; + } + + /** + * Given the information passed to {@code create} and the {@code Matcher} + * object from {@code parse}, determine the type of function being created + * (ordinary, UDT, trigger) and initialize most of the C structure + * accordingly. + * @return an Invocable to invoke the implementing method, or + * null in the case of a UDT + */ + private static Invocable init( + long wrappedPtr, Matcher info, ResultSet procTup, + Identifier.Simple schema, boolean calledAsTrigger, boolean forValidator, + String language, boolean trusted) + throws SQLException + { + Map> typeMap = null; + String className = info.group("udtcls"); + boolean isUDT = (null != className); + + if ( ! isUDT ) + { + className = info.group("cls"); + typeMap = Loader.getTypeMap(schema); + } + + boolean readOnly = ((byte)'v' != procTup.getByte("provolatile")); + + ClassLoader schemaLoader = Loader.getSchemaLoader(schema); + Class clazz = loadClass(schemaLoader, className, null); + + AccessControlContext acc = + accessControlContextFor(clazz, language, trusted); + + /* + * false, to leave initialization until the function's first invocation, + * when naturally the right ContextClassLoader and AccessControlContext + * will be in place. Overkill to do more just for a low-impact OpenJ9 + * quirk. + */ + if ( false && forValidator + && clazz != loadClass(schemaLoader, className, acc) ) + throw new SQLException( + "Initialization of class \"" + className + "\" produced a " + + "different class object"); + + if ( isUDT ) + { + setupUDT(wrappedPtr, info, procTup, schemaLoader, + clazz.asSubclass(SQLData.class), readOnly); + return null; + } + + String[] resolvedTypes; + boolean isMultiCall = false; + boolean retTypeIsOutParameter = false; + boolean commute = (null != info.group("com")); + boolean negate = (null != info.group("neg")); + + if ( forValidator ) + calledAsTrigger = isTrigger(procTup); + + if ( calledAsTrigger ) + { + typeMap = null; + resolvedTypes = setupTriggerParams( + wrappedPtr, info, schemaLoader, clazz, readOnly); + } + else + { + boolean[] multi = new boolean[] { isMultiCall }; + boolean[] rtiop = new boolean[] { retTypeIsOutParameter }; + resolvedTypes = setupFunctionParams(wrappedPtr, info, procTup, + schemaLoader, clazz, readOnly, typeMap, multi, rtiop, commute); + isMultiCall = multi [ 0 ]; + retTypeIsOutParameter = rtiop [ 0 ]; + } + + String methodName = info.group("meth"); + + boolean[] returnTypeAdjusted = new boolean[] { false }; + MethodHandle handle = + getMethodHandle(schemaLoader, clazz, methodName, + null, // or acc to initialize parameter classes; overkill. + commute, resolvedTypes, retTypeIsOutParameter, isMultiCall, + returnTypeAdjusted) + .asFixedArity(); + if ( returnTypeAdjusted[0] ) + { + String explicitReturnType = + resolvedTypes[resolvedTypes.length - 1]; + doInPG(() -> _reconcileTypes(wrappedPtr, resolvedTypes, + new String[] { explicitReturnType }, -2)); + } + MethodType mt = handle.type(); + + if ( commute ) + { + Class[] types = mt.parameterArray(); + mt = mt + .changeParameterType(0, types[1]) + .changeParameterType(1, types[0]); + handle = retTypeIsOutParameter + ? permuteArguments(handle, mt, 1, 0, 2) + : permuteArguments(handle, mt, 1, 0); + } + + if ( negate ) + { + MethodHandle inverter = null; + Class rt = mt.returnType(); + if ( boolean.class == rt ) + inverter = s_not; + else if ( Boolean.class == rt ) + inverter = s_boxedNot; + + if ( null == inverter || retTypeIsOutParameter ) + throw new SQLSyntaxErrorException( + "wrong return type for transformation [negate]", "42P13"); + + handle = filterReturnValue(handle, inverter); + } + + handle = adaptHandle(handle); + + if ( isMultiCall ) + handle = ( + retTypeIsOutParameter ? s_resultSetProviderVPC : s_iteratorVPC + ).bindTo(handle); + else + handle = dropArguments(handle, 0, AccessControlContext.class); + + return invocable(handle, acc); + } + + /** + * Determine from a function's {@code pg_proc} entry whether it is a + * trigger function. + *

      + * This is needed to implement a validator, as the function isn't being + * called, so "calledAsTrigger" can't be determined from the call context. + */ + private static boolean isTrigger(ResultSet procTup) + throws SQLException + { + return 0 == procTup.getInt("pronargs") + && TRIGGEROID == + procTup.getInt("prorettype"); // type Oid, but implements Number + } + + /** + * Select the {@code AccessControlContext} to be in effect when invoking + * a function. + *

      + * At present, the only choices are null (no additional restrictions) when + * the target class is in a PL/Java-loaded jar file, or the 'lid' when + * invoking anything else (such as code of the JRE itself, which would + * otherwise have all permissions). The 'lid' is constructed to be 'nobody + * special', so will have only those permissions the policy grants without + * conditions. No exception is made here for the few functions supplied by + * PL/Java's own {@code Commands} class; they get a lid. It is reasonable to + * ask them to use {@code doPrivileged} when appropriate. + *

      + * When {@code WITHOUT_ENFORCEMENT} is true, any nonnull language + * must be named in {@code pljava.allow_unenforced}. PL/Java's own functions + * in the {@code Commands} class are exempt from that check. + */ + private static AccessControlContext accessControlContextFor( + Class clazz, String language, boolean trusted) + throws SQLException + { + Identifier.Simple langIdent = null; + if ( null != language ) + langIdent = Identifier.Simple.fromCatalog(language); + + if ( WITHOUT_ENFORCEMENT && clazz != Commands.class ) + { + if ( null == langIdent ) + { + if ( ! allowingUnenforcedUDT() ) + throw new SQLNonTransientException( + "PL/Java UDT data conversions for " + clazz + + " cannot execute because pljava.allow_unenforced_udt" + + " is off", "46000"); + } + else if ( Optional.ofNullable( + getListConfigOption("pljava.allow_unenforced") + ).orElseGet(List::of).stream().noneMatch(langIdent::equals) ) + throw new SQLNonTransientException( + "PL \"" + language + "\" not listed in " + + "pljava.allow_unenforced configuration setting", "46000"); + } + + Set p = + (null == langIdent) + ? Set.of() + : Set.of( + trusted + ? new PLPrincipal.Sandboxed(langIdent) + : new PLPrincipal.Unsandboxed(langIdent) + ); + + AccessControlContext acc = clazz.getClassLoader() instanceof Loader + ? s_noLid // policy already applies appropriate permissions + : p.isEmpty() // put a lid on permissions if calling JRE directly + ? s_lid + : lidWithPrincipals(p.toArray(new Principal[1])); + + /* + * A cache to avoid the following machinations might be good. + */ + return doPrivileged(() -> + new AccessControlContext(acc, new SubjectDomainCombiner( + new Subject(true, p, Set.of(), Set.of())))); + } + + /** + * The initialization specific to a UDT function. + */ + /* + * A MappedUDT will not have PL/Java I/O functions declared in SQL, + * and therefore will never reach this method. Ergo, this is handling a + * BaseUDT, which must have all four functions, not just the one + * happening to be looked up at this instant. Rather than looking up one + * handle here and leaving the C code to find the rest anyway, simply let + * the C code look up all four; Function.c already contains logic for doing + * that, which it has to have in case the UDT is first encountered by the + * Type machinery rather than in an explicit function call. + */ + private static void setupUDT( + long wrappedPtr, Matcher info, ResultSet procTup, + ClassLoader schemaLoader, Class clazz, + boolean readOnly) + throws SQLException + { + String udtFunc = info.group("udtfun"); + int udtInitial = Character.toLowerCase(udtFunc.charAt(0)); + Oid udtId; + + switch ( udtInitial ) + { + case 'i': + case 'r': + udtId = (Oid)procTup.getObject("prorettype"); + break; + case 's': + case 'o': + udtId = ((Oid[])procTup.getObject("proargtypes"))[0]; + break; + default: + throw new SQLException("internal error in PL/Java UDT parsing"); + } + + doInPG(() -> _storeToUDT(wrappedPtr, schemaLoader, + clazz, readOnly, udtInitial, udtId.intValue())); + } + + /** + * The initialization specific to a trigger function. + */ + private static String[] setupTriggerParams( + long wrappedPtr, Matcher info, + ClassLoader schemaLoader, Class clazz, boolean readOnly) + throws SQLException + { + if ( null != info.group("sig") ) + throw new SQLSyntaxErrorException( + "Triggers may not have a Java parameter signature", "42601"); + + Oid retType = INVALID; + String retJType = "void"; + + Oid[] paramTypes = { INVALID }; + String[] paramJTypes = { "org.postgresql.pljava.TriggerData" }; + + return storeToNonUDT(wrappedPtr, schemaLoader, clazz, readOnly, + false /* isMultiCall */, + null /* typeMap */, retType, retJType, paramTypes, paramJTypes, + null /* [returnTypeIsOutputParameter] */); + } + + /** + * The initialization specific to an ordinary function. + */ + private static String[] setupFunctionParams( + long wrappedPtr, Matcher info, ResultSet procTup, + ClassLoader schemaLoader, Class clazz, + boolean readOnly, Map> typeMap, + boolean[] multi, boolean[] returnTypeIsOP, boolean commute) + throws SQLException + { + int numParams = procTup.getInt("pronargs"); + boolean isMultiCall = procTup.getBoolean("proretset"); + multi [ 0 ] = isMultiCall; + Oid[] paramTypes = null; + + Oid returnType = (Oid)procTup.getObject("prorettype"); + + if ( 0 < numParams ) + paramTypes = (Oid[])procTup.getObject("proargtypes"); + + String[] resolvedTypes = storeToNonUDT(wrappedPtr, schemaLoader, clazz, + readOnly, isMultiCall, typeMap, + returnType, null /* returnJType */, + paramTypes, null /* paramJTypes */, + returnTypeIsOP); + + boolean returnTypeIsOutputParameter = returnTypeIsOP[0]; + + String explicitSignature = info.group("sig"); + if ( null != explicitSignature ) + { + /* + * An explicit signature given for the Java method requires a call + * to parseParameters to reconcile those types with the ones in + * resolvedTypes that the mapping from SQL types suggested above. + */ + parseParameters( wrappedPtr, resolvedTypes, explicitSignature, + isMultiCall, returnTypeIsOutputParameter, commute); + } + + /* As in the original C setupFunctionParams, if an explicit Java return + * type is included in the AS string, now compare it to the previously + * resolved return type and adapt if they are different, like what + * happened just above in parseParameters for the parameters. A close + * look at parseParameters shows it can *also* have adjusted the return + * type ... that happens in the case where a composite value is returned + * using an appended OUT parameter and the actual function's return + * type is boolean. If that happened, the resolved type examined here + * will be the one parseParameters just put in - the actual type of the + * appended parameter - and if an explicit return type was also given + * in AS, that work just done will be overwritten by this to come. + * The case is probably one that has never come up in practice; it's + * probably not useful, but at the moment I am trying to duplicate the + * original behavior. + */ + + String explicitReturnType = info.group("ret"); + if ( null != explicitReturnType ) + { + String resolvedReturnType = resolvedTypes[resolvedTypes.length - 1]; + if ( ! explicitReturnType.equals(resolvedReturnType) ) + { + /* Once again overload the reconcileTypes native method with a + * very slightly different behavior, this one keyed by index -2. + * In this case, its explicitTypes parameter will be a one- + * element array containing only the return type ... and the + * coercer, if needed, will be constructed with getCoerceOut + * instead of getCoerceIn. + */ + doInPG(() -> _reconcileTypes(wrappedPtr, resolvedTypes, + new String[] { explicitReturnType }, -2)); + } + } + + return resolvedTypes; + } + + /** + * Apply the legacy PL/Java rules for matching the types in the SQL + * declaration of the function with those in the Java method signature. + */ + private static void parseParameters( + long wrappedPtr, String[] resolvedTypes, String explicitSignature, + boolean isMultiCall, boolean returnTypeIsOutputParameter, + boolean commute) + throws SQLException + { + boolean lastIsOut = ( ! isMultiCall ) && returnTypeIsOutputParameter; + String[] explicitTypes = explicitSignature.isEmpty() ? + new String[0] : COMMA.split(explicitSignature); + + int expect = resolvedTypes.length - (lastIsOut ? 0 : 1); + + if ( expect != explicitTypes.length ) + throw new SQLSyntaxErrorException(String.format( + "AS (Java): expected %1$d parameter types, found %2$d", + expect, explicitTypes.length), "42601"); + + if ( commute ) + { + if ( explicitTypes.length != (lastIsOut ? 3 : 2) ) + throw new SQLSyntaxErrorException( + "wrong number of parameters for transformation [commute]", + "42P13"); + String t = explicitTypes[0]; + explicitTypes[0] = explicitTypes[1]; + explicitTypes[1] = t; + } + + doInPG(() -> + { + for ( int i = 0 ; i < resolvedTypes.length - 1 ; ++ i ) + { + if ( resolvedTypes[i].equals(explicitTypes[i]) ) + continue; + _reconcileTypes(wrappedPtr, resolvedTypes, explicitTypes, i); + } + }); + + if ( lastIsOut + && ! resolvedTypes[expect-1].equals(explicitTypes[expect-1]) ) + { + /* Use the same reconcileTypes native method to handle the return + * type also ... its behavior must change a bit, so use index -1 to + * identify this case. + */ + doInPG(() -> + _reconcileTypes(wrappedPtr, resolvedTypes, explicitTypes, -1)); + } + } + + /** + * Pattern for splitting an explicit signature on commas, relying on + * whitespace already being stripped by {@code getAS}. Will not match + * consecutive, leading, or trailing commas. + */ + private static final Pattern COMMA = compile("(?<=[^,]),(?=[^,])"); + + /** + * Return a class given a loader to use and a canonical type name, as used + * in explicit signatures in the AS string. Just a bit of gymnastics to + * turn that form of name into the right class, including for primitives, + * void, and arrays. + * + * @param valACC if non-null, force initialization of the loaded class, in + * an effort to bring forward as many possible errors as can be during + * validation. Initialization will run in this access control context. + */ + private static Class loadClass( + ClassLoader schemaLoader, String className, AccessControlContext valACC) + throws SQLException + { + boolean withoutInit = null == valACC; + Matcher m = typeNameInAS.matcher(className); + m.matches(); + className = m.group(1); + Class c; + + switch ( className ) + { + case "boolean": c = boolean.class; break; + case "byte": c = byte.class; break; + case "short": c = short.class; break; + case "int": c = int.class; break; + case "long": c = long.class; break; + case "char": c = char.class; break; + case "float": c = float.class; break; + case "double": c = double.class; break; + case "void": c = void.class; break; + default: + try + { + c = withoutInit + ? Class.forName(className, false, schemaLoader) + : loadAndInitWithACC(className, schemaLoader, valACC); + } + catch ( ClassNotFoundException | LinkageError e ) + { + throw new SQLNonTransientException( + "Resolving class " + className + ": " + e, "46103", e); + } + } + + if ( -1 != m.start(2) ) + { + int ndims = (m.end(2) - m.start(2)) / 2; + c = Array.newInstance(c, new int[ndims]).getClass(); + } + + return c; + } + + /** + * Get the "AS" string (also known as the {@code prosrc} field of the + * {@code pg_proc} tuple), with whitespace stripped, and with an {@code =} + * separating the return type, if any, from the method name, per the rules + * of the earlier C implementation. + */ + private static String getAS(ResultSet procTup) throws SQLException + { + String spec = procTup.getString("prosrc"); // has NOT NULL constraint + + /* COPIED COMMENT */ + /* Strip all whitespace except the first one if it occures after + * some alpha numeric characers and before some other alpha numeric + * characters. We insert a '=' when that happens since it delimits + * the return value from the method name. + */ + /* ANALYZED COMMENT */ + /* Original code skipped every isspace() character encountered while + * atStart or passedFirst was true. Initially true, atStart was reset + * by the first non-isspace character. Initially false, passedFirst + * was set by ANY encounter of a non-isspace non-isalnum, OR of any + * non-isspace following at least one isspace AFTER atStart was reset. + * The = was added if the non-isspace character satisfied isalpha. + */ + spec = stripEarlyWSinAS.matcher(spec).replaceFirst("$2="); + spec = stripOtherWSinAS.matcher(spec).replaceAll(""); + return spec; + } + + + /** + * Pattern used to strip early whitespace in an "AS" string. + */ + private static final Pattern stripEarlyWSinAS = compile( + "^(\\s*+)(\\p{Alnum}++)(\\s*+)(?=\\p{Alpha})" + ); + + /** + * Pattern used to strip the remaining whitespace in an "AS" string. + */ + private static final Pattern stripOtherWSinAS = compile( + "\\s*+" + ); + + /** + * Uncompiled pattern to recognize a Java identifier. + */ + private static final String javaIdentifier = String.format( + "\\p{%1$sStart}\\p{%1sPart}*+", "javaJavaIdentifier" + ); + + /** + * Uncompiled pattern to recognize a Java type name, possibly qualified, + * without array brackets. + */ + private static final String javaTypeName = String.format( + "(?:%1$s\\.)*%1$s", javaIdentifier + ); + + /** + * Uncompiled pattern to recognize one or more {@code []} array markers (the + * match length divided by two is the number of array dimensions). + */ + private static final String arrayDims = "(?:\\[\\])++"; + + /** + * The recognized forms of an "AS" string, distinguishable and broken out + * by named capturing groups. + *

      + * Array brackets are of course not included in the {@code } group, so + * the caller will not have to check for the receiver class being an array. + * A check that it isn't a primitive may be in order, though. + */ + private static final Pattern specForms = compile(String.format( + /* the UDT notation, which is case insensitive */ + "(?i:udt\\[(?%1$s)\\](?input|output|receive|send))" + + + /* or the non-UDT form (which can't begin, insensitively, with UDT) */ + "|(?!(?i:udt\\[))" + + /* allow a prefix like [commute] or [negate] or [commute,negate] */ + "(?:\\[(?:" + + "(?:(?:(?commute)|(?negate))(?:(?=\\])|,(?!\\])))" + + ")++\\])?+" + + /* and the long-standing method spec syntax */ + "(?:(?%2$s)=)?+(?%1$s)\\.(?%3$s)" + + "(?:\\((?(?:(?:%2$s,)*+%2$s)?+)\\))?+", + javaTypeName, + javaTypeName + "(?:" + arrayDims + ")?+", + javaIdentifier + )); + + /** + * The recognized form of a Java type name in an "AS" string. + * The first capturing group is the canonical name of a type; the second + * group, if present, matches one or more {@code []} array markers following + * the name (its length divided by two is the number of array dimensions). + */ + private static final Pattern typeNameInAS = compile( + "(" + javaTypeName + ")(" + arrayDims + ")?+" + ); + + /** + * Test whether the type {@code t0} is, directly or indirectly, + * a specialization of generic type {@code c0}. + * @param t0 a type to be checked + * @param c0 known generic type to check for + * @return null if {@code t0} does not extend {@code c0}, otherwise the + * array of type arguments with which it specializes {@code c0} + */ + private static Type[] specialization(Type t0, Class c0) + { + Type t = t0; + Class c; + ParameterizedType pt = null; + TypeBindings latestBindings = null; + Type[] actualArgs = null; + + if ( t instanceof Class ) + { + c = (Class)t; + if ( ! c0.isAssignableFrom(c) ) + return null; + if ( c0 == c ) + return new Type[0]; + } + else if ( t instanceof ParameterizedType ) + { + pt = (ParameterizedType)t; + c = (Class)pt.getRawType(); + if ( ! c0.isAssignableFrom(c) ) + return null; + if ( c0 == c ) + actualArgs = pt.getActualTypeArguments(); + else + latestBindings = new TypeBindings(null, pt); + } + else + throw new AssertionError( + "expected Class or ParameterizedType, got: " + t); + + if ( null == actualArgs ) + { + List pending = new LinkedList<>(); + pending.add(c.getGenericSuperclass()); + addAll(pending, c.getGenericInterfaces()); + + while ( ! pending.isEmpty() ) + { + t = pending.remove(0); + if ( null == t ) + continue; + if ( t instanceof Class ) + { + c = (Class)t; + if ( c0 == c ) + return new Type[0]; + } + else if ( t instanceof ParameterizedType ) + { + pt = (ParameterizedType)t; + c = (Class)pt.getRawType(); + if ( c0 == c ) + { + actualArgs = pt.getActualTypeArguments(); + break; + } + if ( c0.isAssignableFrom(c) ) + pending.add(new TypeBindings(latestBindings, pt)); + } + else if ( t instanceof TypeBindings ) + { + latestBindings = (TypeBindings)t; + continue; + } + else + throw new AssertionError( + "expected Class or ParameterizedType, got: " + t); + if ( ! c0.isAssignableFrom(c) ) + continue; + pending.add(c.getGenericSuperclass()); + addAll(pending, c.getGenericInterfaces()); + } + } + if ( null == actualArgs ) + throw new AssertionError( + "failed checking whether " + t0 + " specializes " + c0); + + for ( int i = 0; i < actualArgs.length; ++ i ) + if ( actualArgs[i] instanceof TypeVariable ) + actualArgs[i] = + latestBindings.resolve((TypeVariable)actualArgs[i]); + + return actualArgs; + } + + /** + * A class recording the bindings made in a ParameterizedType to the type + * parameters in a {@code GenericDeclaration}. Implements + * {@code Type} so it can be added to the {@code pending} queue in + * {@code specialization}. + *

      + * In {@code specialization}, the tree of superclasses/superinterfaces will + * be searched breadth-first, with all of a node's immediate supers enqueued + * before any from the next level. By recording a node's type variable to + * type argument bindings in an object of this class, and enqueueing it + * before any of the node's supers, any type variables encountered as actual + * type arguments to any of those supers should be resolvable in the object + * of this class most recently dequeued. + */ + static class TypeBindings implements Type + { + private final TypeVariable[] formalTypeParams; + private final Type[] actualTypeArgs; + + TypeBindings(TypeBindings prior, ParameterizedType pt) + { + actualTypeArgs = pt.getActualTypeArguments(); + formalTypeParams = + ((GenericDeclaration)pt.getRawType()).getTypeParameters(); + assert actualTypeArgs.length == formalTypeParams.length; + + if ( null == prior ) + return; + + for ( int i = 0; i < actualTypeArgs.length; ++ i ) + { + Type t = actualTypeArgs[i]; + if ( actualTypeArgs[i] instanceof TypeVariable ) + actualTypeArgs[i] = prior.resolve((TypeVariable)t); + } + } + + Type resolve(TypeVariable v) + { + for ( int i = 0; i < formalTypeParams.length; ++ i ) + if ( formalTypeParams[i].equals(v) ) + return actualTypeArgs[i]; + throw new AssertionError("type binding not found for " + v); + } + } + + /** + * Wrap the native method to store the values computed in Java, for a + * non-UDT function, into the C {@code Function} structure. Returns an array + * of Java type names for the parameters, if any, as suggested by the C code + * based on the SQL types, and can indicate whether the method return type + * is an out parameter, if a one-element array of boolean is passed to + * receive that result. + */ + private static String[] storeToNonUDT( + long wrappedPtr, ClassLoader schemaLoader, Class clazz, + boolean readOnly, boolean isMultiCall, + Map> typeMap, + Oid returnType, String returnJType, Oid[] paramTypes, String[] pJTypes, + boolean[] returnTypeIsOutParameter) + { + int numParams; + int[] paramOids; + if ( null == paramTypes ) + { + numParams = 0; + paramOids = null; + } + else + { + numParams = paramTypes.length; + paramOids = new int [ numParams ]; + for ( int i = 0 ; i < numParams ; ++ i ) + paramOids[i] = paramTypes[i].intValue(); + } + + String[] outJTypes = new String [ 1 + numParams ]; + + boolean rtiop = + doInPG(() -> _storeToNonUDT( + wrappedPtr, schemaLoader, clazz, readOnly, isMultiCall, typeMap, + numParams, returnType.intValue(), returnJType, paramOids, + pJTypes, outJTypes)); + + if ( null != returnTypeIsOutParameter ) + returnTypeIsOutParameter[0] = rtiop; + + return outJTypes; + } + + private static native boolean _storeToNonUDT( + long wrappedPtr, ClassLoader schemaLoader, Class clazz, + boolean readOnly, boolean isMultiCall, + Map> typeMap, + int numParams, int returnType, String returnJType, + int[] paramTypes, String[] paramJTypes, String[] outJTypes); + + private static native void _storeToUDT( + long wrappedPtr, ClassLoader schemaLoader, + Class clazz, + boolean readOnly, int funcInitial, int udtOid); + + private static native void _reconcileTypes( + long wrappedPtr, String[] resolvedTypes, String[] explicitTypes, int i); +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/HeapTupleHeader.java b/pljava/src/main/java/org/postgresql/pljava/internal/HeapTupleHeader.java deleted file mode 100644 index 887c401d..00000000 --- a/pljava/src/main/java/org/postgresql/pljava/internal/HeapTupleHeader.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html - */ -package org.postgresql.pljava.internal; - -import java.sql.SQLException; - -/** - * The HeapTupleHeader correspons to the internal PostgreSQL - * HeapTupleHeader struct. - * - * @author Thomas Hallgren - */ -public class HeapTupleHeader extends JavaWrapper -{ - private final TupleDesc m_tupleDesc; - - HeapTupleHeader(long pointer, TupleDesc tupleDesc) - { - super(pointer); - m_tupleDesc = tupleDesc; - } - - /** - * Obtains a value from the underlying native HeapTupleHeader - * structure. - * @param index Index of value in the structure (one based). - * @return The value or null. - * @throws SQLException If the underlying native structure has gone stale. - */ - public final Object getObject(int index) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _getObject(this.getNativePointer(), m_tupleDesc.getNativePointer(), index); - } - } - - /** - * Obtains the TupleDesc that describes the tuple and returns it. - * @return The TupleDesc that describes this tuple. - */ - public final TupleDesc getTupleDesc() - { - return m_tupleDesc; - } - - protected native void _free(long pointer); - - private static native Object _getObject(long pointer, long tupleDescPointer, int index) - throws SQLException; -} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java b/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java index 19def09b..4f4dd515 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/InstallHelper.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -12,6 +12,17 @@ package org.postgresql.pljava.internal; import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.MalformedURLException; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.Charset; +import java.security.NoSuchAlgorithmException; +import java.security.Policy; +import java.security.Security; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.DriverManager; @@ -22,13 +33,22 @@ import java.sql.Savepoint; import java.sql.Statement; import java.text.ParseException; -import java.util.Scanner; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static java.nio.charset.StandardCharsets.UTF_8; import static java.sql.Types.VARCHAR; import org.postgresql.pljava.jdbc.SQLUtils; +import org.postgresql.pljava.jdbc.SPIReadOnlyControl; import org.postgresql.pljava.management.SQLDeploymentDescriptor; -import static org.postgresql.pljava.sqlgen.DDRWriter.eQuote; +import org.postgresql.pljava.nopolicy.FrozenProperties; +import org.postgresql.pljava.policy.TrialPolicy; +import static org.postgresql.pljava.annotation.processing.DDRWriter.eQuote; +import static org.postgresql.pljava.elog.ELogHandler.LOG_WARNING; +import static org.postgresql.pljava.internal.Backend.WITHOUT_ENFORCEMENT; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; +import static org.postgresql.pljava.sqlgen.Lexicals.Identifier.Simple; /** * Group of methods intended to streamline the PL/Java installation/startup @@ -38,18 +58,51 @@ */ public class InstallHelper { + static final boolean MANAGE_CONTEXT_LOADER; + + static + { + String manageLoaderProp = "org.postgresql.pljava.context.loader"; + String s = System.getProperty(manageLoaderProp); + if ( null == s ) + MANAGE_CONTEXT_LOADER = true; + else if ( "unmanaged".equals(s) ) + MANAGE_CONTEXT_LOADER = false; + else + { + MANAGE_CONTEXT_LOADER = false; + Backend.log(LOG_WARNING, + "value \"" + s + "\" for " + manageLoaderProp + + " unrecognized; using \"unmanaged\""); + } + } + private static void setPropertyIfNull( String property, String value) { if ( null == System.getProperty( property) ) System.setProperty( property, value); } + /** + * Perform miscellaneous early PL/Java initialization, and return a string + * detailing the versions of PL/Java, PostgreSQL, and Java in use, which the + * native caller can use in its "PL/Java loaded" (a/k/a "hello") + * triumphant {@code ereport}. + *

      + * This method calls {@code beginEnforcing} rather late, so that the policy + * needn't be cluttered with permissions for the operations only needed + * before that point. Policy is being enforced by the time this method + * returns (except in case of JEP 411 fallback as described at + * {@code beginEnforcing}). + */ public static String hello( - String nativeVer, String user, String dbname, String clustername, + String nativeVer, String serverBuiltVer, String serverRunningVer, + String user, String dbname, String clustername, String datadir, String libdir, String sharedir, String etcdir) + throws SQLException { String implVersion = - InstallHelper.class.getPackage().getImplementationVersion(); + InstallHelper.class.getModule().getDescriptor().rawVersion().get(); /* * visualvm.display.name is not really used as a property. jvisualvm * picks it up by looking for -Dvisualvm.display.name=something in the @@ -61,10 +114,15 @@ public static String hello( setPropertyIfNull( "org.postgresql.database", dbname); if ( null != clustername ) setPropertyIfNull( "org.postgresql.cluster", clustername); - setPropertyIfNull( "org.postgresql.datadir", datadir); - setPropertyIfNull( "org.postgresql.libdir", libdir); - setPropertyIfNull( "org.postgresql.sharedir", sharedir); - setPropertyIfNull( "org.postgresql.sysconfdir", etcdir); + + if ( ! WITHOUT_ENFORCEMENT ) + { + setPropertyIfNull( "org.postgresql.datadir", datadir); + setPropertyIfNull( "org.postgresql.libdir", libdir); + setPropertyIfNull( "org.postgresql.sharedir", sharedir); + setPropertyIfNull( "org.postgresql.sysconfdir", etcdir); + } + setPropertyIfNull( "org.postgresql.pljava.version", implVersion); setPropertyIfNull( "org.postgresql.pljava.native.version", nativeVer); setPropertyIfNull( "org.postgresql.version", @@ -74,32 +132,38 @@ public static String hello( */ setPropertyIfNull( "sqlj.defaultconnection", "jdbc:default:connection"); - /* - * Set the org.postgresql.pljava.udt.byteorder.{scalar,mirror}.{p2j,j2p} - * properties. For shorthand, defaults can be given in shorter property - * keys org.postgresql.pljava.udt.byteorder.{scalar,mirror} or even just - * org.postgresql.pljava.udt.byteorder for an overall default. These - * shorter keys are then removed from the system properties. - */ - String orderKey = "org.postgresql.pljava.udt.byteorder"; - String orderAll = System.getProperty(orderKey); - String orderScalar = System.getProperty(orderKey + ".scalar"); - String orderMirror = System.getProperty(orderKey + ".mirror"); - - if ( null == orderScalar ) - orderScalar = null != orderAll ? orderAll : "big_endian"; - if ( null == orderMirror ) - orderMirror = null != orderAll ? orderAll : "native"; + String encodingKey = "org.postgresql.server.encoding"; + String encName = System.getProperty(encodingKey); + if ( null == encName ) + encName = Backend.getConfigOption( "server_encoding"); + try + { + Charset cs = Charset.forName(encName); + org.postgresql.pljava.internal.Session.s_serverCharset = cs; // poke + System.setProperty(encodingKey, cs.name()); + } + catch ( IllegalArgumentException iae ) + { + System.clearProperty(encodingKey); + } - setPropertyIfNull(orderKey + ".scalar.p2j", orderScalar); - setPropertyIfNull(orderKey + ".scalar.j2p", orderScalar); + /* so it can be granted permissions in the pljava policy */ + if ( ! WITHOUT_ENFORCEMENT ) + { + System.setProperty( "org.postgresql.pljava.codesource", + InstallHelper.class.getProtectionDomain().getCodeSource() + .getLocation().toString()); - setPropertyIfNull(orderKey + ".mirror.p2j", orderMirror); - setPropertyIfNull(orderKey + ".mirror.j2p", orderMirror); + setPolicyURLs(); + } - System.clearProperty(orderKey); - System.clearProperty(orderKey + ".scalar"); - System.clearProperty(orderKey + ".mirror"); + /* + * PL/Java modifies no more system properties beyond this point. + * Take a defensive copy here that can be exposed through the Session + * API. + */ + org.postgresql.pljava.internal.Session.s_properties = + new FrozenProperties(System.getProperties()); /* * Construct the strings announcing the versions in use. @@ -117,9 +181,28 @@ public static String hello( String vmVer = System.getProperty( "java.vm.version"); String vmInfo = System.getProperty( "java.vm.info"); + if ( ! WITHOUT_ENFORCEMENT ) + { + try + { + // sqlj scheme must exist when reading policy + new URI("sqlj", "x", null).toURL(); + } + catch ( MalformedURLException | URISyntaxException e ) + { + throw new SecurityException( + "failed to create sqlj: URL scheme needed for security policy", + e); + } + + beginEnforcing(); + } + StringBuilder sb = new StringBuilder(); sb.append( "PL/Java native code (").append( nativeVer).append( ")\n"); sb.append( "PL/Java common code (").append( implVersion).append( ")\n"); + sb.append( "Built for (").append( serverBuiltVer).append( ")\n"); + sb.append( "Loaded in (").append( serverRunningVer).append( ")\n"); sb.append( jreName).append( " (").append( jreVer).append( ")\n"); sb.append( vmName).append( " (").append( vmVer); if ( null != vmInfo ) @@ -128,18 +211,215 @@ public static String hello( return sb.toString(); } + /** + * Set the URLs to be read by Java's Policy implementation according to + * pljava.policy_urls. That is a {@code GUC_LIST}-formatted config variable + * where each element can be a plain URL, or a URL prefixed with {@code n=} + * to set the index of the URL to be set or replaced to n. + * If no n is specified, the index following the last one set will + * be used; if the first URL in the list has no n, it will + * be placed at index 2 (after the presumed JRE installed policy at index + * 1). + *

      + * An entry with nothing after the {@code =} causes that and subsequent URL + * positions not to be processed, in case they had been set in the + * systemwide {@code java.security} file. As there is not actually a way to + * delete a security property, the code will simply replace any + * {@code policy.url.n} entries found at that index and higher with copies + * of the URL at the immediately preceding index. + */ + private static void setPolicyURLs() + throws SQLException + { + /* This index is incremented before setting each specified policy URL. + * Initializing it to 1 means the first URL set (if it does not specify + * an index) will be at 2, following the JRE's installed policy. Any URL + * entry can begin with n= in order to set URL number n instead (and + * any that follow will be in sequence after n, unless another n= is + * used). + */ + int urlIndex = 1; + + int stopIndex = -1; + + Pattern p = Pattern.compile( "^(?:(\\d++)?+=)?+"); + + String prevURL = null; + for (Simple u : Backend.getListConfigOption( "pljava.policy_urls")) + { + if ( -1 != stopIndex ) + throw new SQLNonTransientException( + "stop (=) entry must be last in pljava.policy_urls", + "F0000"); + ++ urlIndex; + String s = u.nonFolded(); + Matcher m = p.matcher(s); + if ( m.find() ) + { + s = s.substring(m.end()); + String i = m.group(1); + if ( null != i ) + urlIndex = Integer.parseInt(i); + if ( s.isEmpty() ) + stopIndex = urlIndex; + } + if ( urlIndex < 1 ) + throw new SQLNonTransientException( + "index (n=) must be >= 1 in pljava.policy_urls", + "F0000"); + int prevIndex = urlIndex - 1; + if ( urlIndex > 1 ) + { + prevURL = Security.getProperty( "policy.url." + prevIndex); + if ( null == prevURL ) + { + @SuppressWarnings("deprecation") // Java >= 10: feature() + boolean hint = + (2 == urlIndex) && 24 <= Runtime.version().major(); + + throw new SQLNonTransientException(String.format( + "URL at %d in pljava.policy_urls follows an unset URL" + + (hint ? (". " + jepSuffix) : ""), urlIndex), "F0000"); + } + } + if ( -1 != stopIndex ) + continue; /* should be last, but resume loop to make sure */ + Security.setProperty( "policy.url." + urlIndex, s); + } + if ( -1 == stopIndex ) + return; + + while ( null != Security.getProperty( "policy.url." + stopIndex) ) + { + Security.setProperty( "policy.url." + stopIndex, prevURL); + ++ stopIndex; + } + } + + /** + * From the point of successful call of this method, PL/Java is enforcing + * security policy (except in JEP 411 fallback case described below). + *

      + * This method handles applying the {@code TrialPolicy} if that has been + * selected, and setting the security manager, which thereafter cannot be + * unset or changed (unless the policy has been edited to allow it). + *

      + * In the advent of JEP 411, this method also must also head off the + * layer-inappropriate boilerplate warning message when running on Java 17 + * or later, and react if the operation has been disallowed or "degraded". + *

      + * The expected form of "degradation" as of Java 24 with JEP 486 is for + * {@code setSecurityManager} to throw + * {@code UnsupportedOperationException}. Nonetheless, we still also check + * that {@code getSecurityManager} returns the instance we intended to set. + *

      + * JEP 486 explicitly allows the property {@code java.security.manager} to + * be set to {@code disallow} at invocation, and this detectably differs + * from its null default (despite the semantic equivalence), so that will be + * the setting to include in {@code pljava.vmoptions} to indicate that + * running without any policy enforcement is ok. When that property is so + * set, this method is not even called. + */ + private static void beginEnforcing() throws SQLException + { + String trialURI = System.getProperty( + "org.postgresql.pljava.policy.trial"); + + if ( null != trialURI ) + { + try + { + Policy.setPolicy( new TrialPolicy( trialURI)); + } + catch ( NoSuchAlgorithmException e ) + { + throw new SQLException(e.getMessage(), e); + } + } + + @SuppressWarnings("deprecation") // Java >= 10: feature() + int major = Runtime.version().major(); + + if ( 17 <= major ) + Backend.pokeJEP411(); + + try + { + SecurityManager sm = new SecurityManager(); + System.setSecurityManager( sm); + if ( sm == System.getSecurityManager() ) + return; + } + catch ( UnsupportedOperationException e ) + { + if ( 18 >= major ) + throw new SQLException( + "Unexpected failure enabling permission enforcement", e); + throw new SQLNonTransientException( + "[JEP 411] The Java version selected, " + Runtime.version() + + ", has not allowed PL/Java to enforce security policy. " + + ( 24 > major ? allowHint : "" ) + jepSuffix, "58000", e); + } + + throw new SQLNonTransientException( + "[JEP 411] The Java version selected, " + Runtime.version() + + ", cannot enforce security policy as this PL/Java version " + + "requires. " + ( 24 > major ? allowHint : "" ) + jepSuffix, + "58000"); + } + + private static final String jepSuffix = + "With Java 24 and later, this version of PL/Java can only operate " + + "with -Djava.security.manager=disallow set in pljava.vmoptions, " + + "resulting in no enforcement of any security expectations, no " + + "distinction between trusted and untrusted, and so on. If that is " + + "unacceptable, " + + "pljava.libjvm_location should be pointed to an earlier version " + + "of Java, or a newer PL/Java version should be used. For more " + + "explanation, please see " + + "https://github.com/tada/pljava/wiki/JEP-411"; + + private static final String allowHint = + "To enforce security policy in Java 18 through 23, the setting " + + "-Djava.security.manager=allow must be added in pljava.vmoptions. "; + + private static boolean isGpdb() + { + try ( + Connection c = SQLUtils.getDefaultConnection(); + PreparedStatement stmt = c.prepareStatement( + "SELECT pg_catalog.current_setting(?, true)") + ) + { + stmt.unwrap(SPIReadOnlyControl.class).clearReadOnly(); + stmt.setString(1, "gp_session_role"); + try ( ResultSet rs = stmt.executeQuery() ) + { + return rs.next() && rs.getString(1) != null; + } + } + catch ( SQLException e ) + { + throw unchecked(e); + } + } + + /** + * When PL/Java is loaded as an end-in-itself (that is, by {@code LOAD} + * on its own or from its extension script on {@code CREATE EXTENSION} or + * {@code ALTER EXTENSION}, not just in the course of handling a call of a + * Java function), this method will be called to ensure there is + * a schema {@code sqlj} and that it contains the right, possibly updated, + * stuff. + */ public static void groundwork( String module_pathname, String loadpath_tbl, String loadpath_tbl_quoted, boolean asExtension, boolean exNihilo) - throws SQLException, ParseException + throws SQLException, ParseException, IOException { - Connection c = null; - Statement s = null; - try + try(Connection c = SQLUtils.getDefaultConnection(); + Statement s = c.createStatement()) { - c = SQLUtils.getDefaultConnection(); - s = c.createStatement(); - schema(c, s); SchemaVariant sv = recognizeSchema(c, s, loadpath_tbl); @@ -151,6 +431,9 @@ public static void groundwork( throw new SQLNonTransientException( "sqlj schema not empty for CREATE EXTENSION pljava", "55000"); + if ( asExtension && ! exNihilo ) + preAbsorb(c, s); // handle possible update from unpackaged + handlers(c, s, module_pathname); languages(c, s); deployment(c, s, sv); @@ -168,13 +451,93 @@ public static void groundwork( */ s.execute("DROP TABLE sqlj." + loadpath_tbl_quoted); } - finally + } + + /** + * Absorb a few key objects into the extension, if they exist, before the + * operations that CREATE OR REPLACE them. + * + * Until postgres/postgres@b9b21ac, CREATE OR REPLACE would silently absorb + * the object, if preexisting, into the extension being created. Since that + * change, those CREATE OR REPLACE operations now fail if the object exists + * but is not yet a member of the extension. Therefore, this method is + * called first, to absorb those objects if they exist. Because this only + * matters when the objects do not yet belong to the extension (the old + * "FROM unpackaged" case), this method first checks and returns with no + * effect if javau_call_handler is already an extension member. + * + * Because it's possible to be updating from an older PL/Java version + * (for example, one without the validator functions), failure to add an + * expected object to the extension because the object doesn't exist yet + * is not treated here as an error. + */ + private static void preAbsorb( Connection c, Statement s) + throws SQLException + { + /* + * Do nothing if javau_call_handler is already an extension member. + */ + try ( + ResultSet rs = s.executeQuery( + "SELECT d.refobjid" + + " FROM" + + " pg_catalog.pg_namespace n" + + " JOIN pg_catalog.pg_proc p" + + " ON pronamespace OPERATOR(pg_catalog.=) n.oid" + + " JOIN pg_catalog.pg_depend d" + + " ON d.classid OPERATOR(pg_catalog.=) p.tableoid" + + " AND d.objid OPERATOR(pg_catalog.=) p.oid" + + " WHERE" + + " nspname OPERATOR(pg_catalog.=) 'sqlj'" + + " AND proname OPERATOR(pg_catalog.=) 'javau_call_handler'" + + " AND deptype OPERATOR(pg_catalog.=) 'e'" + ) + ) + { + if ( rs.next() ) + return; + } + + addExtensionUnless(c, s, "42883", "FUNCTION sqlj.java_call_handler()"); + addExtensionUnless(c, s, "42883", "FUNCTION sqlj.javau_call_handler()"); + addExtensionUnless(c, s, "42883", + "FUNCTION sqlj.java_validator(pg_catalog.oid)"); + addExtensionUnless(c, s, "42883", + "FUNCTION sqlj.javau_validator(pg_catalog.oid)"); + addExtensionUnless(c, s, "42704", "LANGUAGE java"); + addExtensionUnless(c, s, "42704", "LANGUAGE javaU"); + } + + /** + * Absorb obj into the pljava extension, unless it doesn't exist. + * Pass the sqlState expected when an obj of that type doesn't exist. + */ + private static void addExtensionUnless( + Connection c, Statement s, String sqlState, String obj) + throws SQLException + { + Savepoint p = null; + try + { + p = c.setSavepoint(); + s.execute("ALTER EXTENSION pljava ADD " + obj); + c.releaseSavepoint(p); + } + catch ( SQLException sqle ) { - SQLUtils.close(s); - SQLUtils.close(c); + c.rollback(p); + if ( ! sqlState.equals(sqle.getSQLState()) ) + throw sqle; } } + /** + * Create the {@code sqlj} schema, adding an appropriate comment and + * granting {@code USAGE} to {@code public}. + *

      + * If the schema already exists, whatever comment and permissions it + * may have will not be disturbed. + */ private static void schema( Connection c, Statement s) throws SQLException { @@ -198,12 +561,24 @@ private static void schema( Connection c, Statement s) } } + /** + * Declare PL/Java's language handler functions. + *

      + * {@code CREATE OR REPLACE} is used so that the library path can be altered + * if this is an upgrade. + *

      + * All privileges are unconditionally revoked on the handler functions. + * PostgreSQL does not need permissions when it invokes them + * as language handlers. + *

      + * Each function will have a default comment added if no comment is present. + */ private static void handlers( Connection c, Statement s, String module_path) throws SQLException { s.execute( "CREATE OR REPLACE FUNCTION sqlj.java_call_handler()" + - " RETURNS language_handler" + + " RETURNS pg_catalog.language_handler" + " AS " + eQuote(module_path) + " LANGUAGE C"); s.execute("REVOKE ALL PRIVILEGES" + @@ -224,7 +599,7 @@ private static void handlers( Connection c, Statement s, String module_path) s.execute( "CREATE OR REPLACE FUNCTION sqlj.javau_call_handler()" + - " RETURNS language_handler" + + " RETURNS pg_catalog.language_handler" + " AS " + eQuote(module_path) + " LANGUAGE C"); s.execute("REVOKE ALL PRIVILEGES" + @@ -242,17 +617,80 @@ private static void handlers( Connection c, Statement s, String module_path) "COMMENT ON FUNCTION sqlj.javau_call_handler() IS '" + "Function-call handler for PL/Java''s untrusted/unsandboxed " + "language.'"); + + s.execute( + "CREATE OR REPLACE FUNCTION sqlj.javau_validator(pg_catalog.oid)" + + " RETURNS pg_catalog.void" + + " AS " + eQuote(module_path) + + " LANGUAGE C"); + s.execute("REVOKE ALL PRIVILEGES" + + " ON FUNCTION sqlj.javau_validator(pg_catalog.oid) FROM public"); + rs = s.executeQuery( + "SELECT pg_catalog.obj_description(CAST(" + + "'sqlj.javau_validator(pg_catalog.oid)' " + + "AS pg_catalog.regprocedure), " + + "'pg_proc')"); + rs.next(); + rs.getString(1); + noComment = rs.wasNull(); + rs.close(); + if ( noComment ) + s.execute( + "COMMENT ON FUNCTION " + + "sqlj.javau_validator(pg_catalog.oid) IS '" + + "Function declaration validator for PL/Java''s " + + "untrusted/unsandboxed language.'"); + + s.execute( + "CREATE OR REPLACE FUNCTION sqlj.java_validator(pg_catalog.oid)" + + " RETURNS pg_catalog.void" + + " AS " + eQuote(module_path) + + " LANGUAGE C"); + s.execute("REVOKE ALL PRIVILEGES" + + " ON FUNCTION sqlj.java_validator(pg_catalog.oid) FROM public"); + rs = s.executeQuery( + "SELECT pg_catalog.obj_description(CAST(" + + "'sqlj.java_validator(pg_catalog.oid)' " + + "AS pg_catalog.regprocedure), " + + "'pg_proc')"); + rs.next(); + rs.getString(1); + noComment = rs.wasNull(); + rs.close(); + if ( noComment ) + s.execute( + "COMMENT ON FUNCTION " + + "sqlj.java_validator(pg_catalog.oid) IS '" + + "Function declaration validator for PL/Java''s " + + "trusted/sandboxed language.'"); } + /** + * Declare PL/Java's basic two (trusted and untrusted) languages. + *

      + * If not declared already, they will have default permissions and comments + * applied. + *

      + * If they exist, {@code CREATE OR REPLACE} will be used, which takes care + * of adding the validator handler during an upgrade from a version that + * lacked it. No permission or comment changes are made in this case. + */ private static void languages( Connection c, Statement s) throws SQLException { + String validatorClause = ""; + if ( ! isGpdb() ) + validatorClause = " VALIDATOR sqlj.java_validator"; + + boolean created = false; Savepoint p = null; try { p = c.setSavepoint(); s.execute( - "CREATE TRUSTED LANGUAGE java HANDLER sqlj.java_call_handler"); + "CREATE TRUSTED LANGUAGE java HANDLER sqlj.java_call_handler" + + validatorClause); + created = true; s.execute( "COMMENT ON LANGUAGE java IS '" + "Trusted/sandboxed language for routines and types in " + @@ -267,11 +705,23 @@ private static void languages( Connection c, Statement s) throw sqle; } + if ( ! created ) /* existed already but may need validator added */ + s.execute( + "CREATE OR REPLACE " + + "TRUSTED LANGUAGE java HANDLER sqlj.java_call_handler" + + validatorClause); + + if ( ! validatorClause.isEmpty() ) + validatorClause = " VALIDATOR sqlj.javau_validator"; + + created = false; try { p = c.setSavepoint(); s.execute( - "CREATE LANGUAGE javaU HANDLER sqlj.javau_call_handler"); + "CREATE LANGUAGE javaU HANDLER sqlj.javau_call_handler" + + validatorClause); + created = true; s.execute( "COMMENT ON LANGUAGE javau IS '" + "Untrusted/unsandboxed language for routines and types in " + @@ -284,16 +734,24 @@ private static void languages( Connection c, Statement s) if ( ! "42710".equals(sqle.getSQLState()) ) throw sqle; } + + if ( ! created ) /* existed already but may need validator added */ + s.execute( + "CREATE OR REPLACE " + + "LANGUAGE javaU HANDLER sqlj.javau_call_handler" + + validatorClause); } /** * Execute the deployment descriptor for PL/Java itself, creating the - * expected tables, functions, etc. Will be skipped if tables conforming + * expected tables, functions, etc. + *

      + * Will be skipped if tables conforming * to the currently expected schema already seem to be there. If an earlier * schema variant is detected, attempt to migrate to the current one. */ private static void deployment( Connection c, Statement s, SchemaVariant sv) - throws SQLException, ParseException + throws SQLException, ParseException, IOException { if ( currentSchema == sv ) return; // assume (optimistically) that means there's nothing to do @@ -304,12 +762,72 @@ private static void deployment( Connection c, Statement s, SchemaVariant sv) return; } - InputStream is = InstallHelper.class.getResourceAsStream("/pljava.ddr"); - String raw = new Scanner(is, "utf-8").useDelimiter("\\A").next(); - SQLDeploymentDescriptor sdd = new SQLDeploymentDescriptor(raw); + deployViaDescriptor( c); + } + + /** + * Only execute the deployment descriptor for PL/Java itself; factored out + * of {@code deployment()} so it can be used also from schema migration to + * avoid duplicating SQL that appears there. + *

      + * Schema migration will use the wrapper method that changes the effective + * set of recognized implementor tags. + */ + private static void deployViaDescriptor( Connection c) + throws SQLException + { + SQLDeploymentDescriptor sdd; + try(InputStream is = + InstallHelper.class.getResourceAsStream("/pljava.ddr")) + { + CharBuffer cb = + UTF_8.newDecoder().decode( ByteBuffer.wrap( is.readAllBytes())); + sdd = new SQLDeploymentDescriptor(cb.toString()); + } + catch ( ParseException | IOException e ) + { + throw new SQLException( + "Could not load PL/Java's deployment descriptor: " + + e.getMessage(), "XX000", e); + } + sdd.install(c); } + /** + * Only execute the deployment descriptor for PL/Java itself, temporarily + * replacing the default set of implementor tags with a specified set, to + * selectively apply commands appearing in the descriptor. + */ + private static void deployViaDescriptor( + Connection c, Statement s, String implementors) + throws SQLException + { + s.execute( "SET LOCAL pljava.implementors TO " + + s.enquoteLiteral(implementors)); + + deployViaDescriptor( c); + + s.execute( "RESET pljava.implementors"); + } + + /** + * Query the database metadata for existence of a column in a table in the + * {@code sqlj} schema. Pass null for the column to simply check the table's + * existence. + */ + private static boolean hasColumn( + DatabaseMetaData md, String table, String column) + throws SQLException + { + try ( + ResultSet rs = md.getColumns( null, "sqlj", table, column) + ) + { + return rs.next(); + } + } + /** * Detect an existing PL/Java sqlj schema. Tests for changes between schema * variants that have appeared in PL/Java's git history and will return a @@ -318,7 +836,8 @@ private static void deployment( Connection c, Statement s, SchemaVariant sv) * messed up schema that never appeared in the git history, if it happened * to match on the tested parts. The variant EMPTY is returned if nothing is * in the schema (based on a direct query of pg_depend, which ought to be - * reliable) except an entry for the extension if applicable. A null return + * reliable) except an entry for the extension if applicable, or for the + * table temporarily created there during CREATE EXTENSION. A null return * indicates that whatever is there didn't match the tests for any known * variant. */ @@ -327,85 +846,101 @@ private static SchemaVariant recognizeSchema( throws SQLException { DatabaseMetaData md = c.getMetaData(); - ResultSet rs = md.getColumns( null, "sqlj", "jar_descriptor", null); - boolean seen = rs.next(); - rs.close(); - if ( seen ) + try ( + ResultSet rs = + md.getProcedures( null, "sqlj", "alias_java_language") + ) + { + if ( rs.next() ) + return SchemaVariant.REL_1_6_0; + } + + if ( hasColumn( md, "jar_descriptor", null) ) return SchemaVariant.UNREL20130301b; - rs = md.getColumns( null, "sqlj", "jar_descriptors", null); - seen = rs.next(); - rs.close(); - if ( seen ) + if ( hasColumn( md, "jar_descriptors", null) ) return SchemaVariant.UNREL20130301a; - rs = md.getColumns( null, "sqlj", "jar_repository", "jarmanifest"); - seen = rs.next(); - rs.close(); - if ( seen ) + if ( hasColumn( md, "jar_repository", "jarmanifest") ) return SchemaVariant.REL_1_3_0; - rs = md.getColumns( null, "sqlj", "typemap_entry", null); - seen = rs.next(); - rs.close(); - if ( seen ) + if ( hasColumn( md, "typemap_entry", null) ) return SchemaVariant.UNREL20060212; - rs = md.getColumns( null, "sqlj", "jar_repository", "jarowner"); - if ( rs.next() ) + try ( + ResultSet rs = + md.getColumns( null, "sqlj", "jar_repository", "jarowner") + ) { - int t = rs.getInt("DATA_TYPE"); - rs.close(); - if ( VARCHAR == t ) - return SchemaVariant.UNREL20060125; - return SchemaVariant.REL_1_1_0; + if ( rs.next() ) + { + if ( VARCHAR == rs.getInt("DATA_TYPE") ) + return SchemaVariant.UNREL20060125; + return SchemaVariant.REL_1_1_0; + } } - rs.close(); - rs = md.getColumns( null, "sqlj", "jar_repository", "deploymentdesc"); - seen = rs.next(); - rs.close(); - if ( seen ) + if ( hasColumn( md, "jar_repository", "deploymentdesc") ) return SchemaVariant.REL_1_0_0; - rs = md.getColumns( null, "sqlj", "jar_entry", null); - seen = rs.next(); - rs.close(); - if ( seen ) + if ( hasColumn( md, "jar_entry", null) ) return SchemaVariant.UNREL20040121; - rs = md.getColumns( null, "sqlj", "jar_repository", "jarimage"); - seen = rs.next(); - rs.close(); - if ( seen ) + if ( hasColumn( md, "jar_repository", "jarimage") ) return SchemaVariant.UNREL20040120; - PreparedStatement ps = c.prepareStatement( "SELECT count(*) " + - "FROM pg_catalog.pg_depend d, pg_catalog.pg_namespace n " + - "WHERE refclassid = 'pg_catalog.pg_namespace'::regclass " + - "AND refobjid = n.oid AND nspname = 'sqlj' " + - "AND deptype = 'n' " + - "AND NOT EXISTS ( " + - " SELECT 1 FROM " + - " pg_catalog.pg_class sqc JOIN pg_catalog.pg_namespace sqn " + - " ON relnamespace = sqn.oid " + - " WHERE " + - " nspname = 'pg_catalog' AND relname = 'pg_extension' " + - " AND classid = sqc.oid " + - " OR " + - " nspname = 'sqlj' AND relname = ?" + - " AND classid = 'pg_catalog.pg_class'::regclass " + - " AND objid = sqc.oid)"); - ps.setString(1, loadpath_tbl); - rs = ps.executeQuery(); - if ( rs.next() && 0 == rs.getInt(1) ) - { - rs.close(); - ps.close(); - return SchemaVariant.EMPTY; + try ( + PreparedStatement stmt = Checked.Supplier.use((() -> + { + PreparedStatement ps = c.prepareStatement( + /* + * Is the sqlj schema 'empty'? Count the pg_depend + * type 'n' dependency entries referring to the sqlj + * namespace ... + */ + "SELECT count(*)" + + "FROM" + + " pg_catalog.pg_depend d, pg_catalog.pg_namespace n " + + "WHERE" + + " refclassid OPERATOR(pg_catalog.=) n.tableoid " + + " AND refobjid OPERATOR(pg_catalog.=) n.oid" + + " AND nspname OPERATOR(pg_catalog.=) 'sqlj' " + + " AND deptype OPERATOR(pg_catalog.=) 'n' " + + /* + * ... but exclude from the count, if present: + */ + " AND NOT EXISTS ( " + + " SELECT 1 FROM " + + " pg_catalog.pg_class sqc" + + " JOIN pg_catalog.pg_namespace sqn" + + " ON relnamespace OPERATOR(pg_catalog.=) sqn.oid " + + " WHERE " + + /* + * (1) any dependency that is an extension (d.classid + * identifies pg_catalog.pg_extension) ... + */ + " nspname OPERATOR(pg_catalog.=) 'pg_catalog'" + + " AND" + + " relname OPERATOR(pg_catalog.=) 'pg_extension' " + + " AND d.classid OPERATOR(pg_catalog.=) sqc.oid " + + " OR " + + /* + * (2) any dependency that is the loadpath_tbl table + * we temporarily create in the extension script. + */ + " nspname OPERATOR(pg_catalog.=) 'sqlj'" + + " AND relname OPERATOR(pg_catalog.=) ?" + + " AND classid OPERATOR(pg_catalog.=) sqc.tableoid" + + " AND objid OPERATOR(pg_catalog.=) sqc.oid)"); + ps.setString(1, loadpath_tbl); + return ps; + })).get(); + ResultSet rs = stmt.executeQuery(); + ) + { + if ( rs.next() && 0 == rs.getInt(1) ) + return SchemaVariant.EMPTY; } - rs.close(); - ps.close(); return null; } @@ -416,10 +951,22 @@ private static SchemaVariant recognizeSchema( * up to date. */ private static final SchemaVariant currentSchema = - SchemaVariant.REL_1_5_0; + SchemaVariant.REL_1_6_0; private enum SchemaVariant { + REL_1_6_0 ("5565a3c9c4b8d6dd0b0f7fff4090d4e8120dc10a") + { + @Override + void migrateFrom( SchemaVariant sv, Connection c, Statement s) + throws SQLException + { + if ( REL_1_5_0 != sv ) + REL_1_5_0.migrateFrom( sv, c, s); + + deployViaDescriptor( c, s, "alias_java_language"); + } + }, REL_1_5_0 ("c51cffa34acd5a228325143ec29563174891a873") { @Override @@ -432,7 +979,7 @@ void migrateFrom( SchemaVariant sv, Connection c, Statement s) s.execute( "CREATE TABLE sqlj.jar_descriptor " + "(jarId, ordinal, entryId) AS SELECT " + - "CAST(jarId AS INT), CAST(0 AS INT2), " + + "CAST(jarId AS INT), CAST(0 AS pg_catalog.INT2), " + "deploymentDesc FROM sqlj.jar_repository " + "WHERE deploymentDesc IS NOT NULL"); s.execute( @@ -469,6 +1016,28 @@ void migrateFrom( SchemaVariant sv, Connection c, Statement s) UNREL20040120 ("5e4131738cd095b7ff6367d64f809f6cec6a7ba7"), EMPTY (null); + static final SchemaVariant REL_1_6_10 = REL_1_6_0; + static final SchemaVariant REL_1_6_9 = REL_1_6_0; + static final SchemaVariant REL_1_6_8 = REL_1_6_0; + static final SchemaVariant REL_1_6_7 = REL_1_6_0; + static final SchemaVariant REL_1_6_6 = REL_1_6_0; + static final SchemaVariant REL_1_6_5 = REL_1_6_0; + static final SchemaVariant REL_1_6_4 = REL_1_6_0; + static final SchemaVariant REL_1_6_3 = REL_1_6_0; + static final SchemaVariant REL_1_6_2 = REL_1_6_0; + static final SchemaVariant REL_1_6_1 = REL_1_6_0; + + static final SchemaVariant REL_1_5_8 = REL_1_5_0; + static final SchemaVariant REL_1_5_7 = REL_1_5_0; + static final SchemaVariant REL_1_5_6 = REL_1_5_0; + static final SchemaVariant REL_1_5_5 = REL_1_5_0; + static final SchemaVariant REL_1_5_4 = REL_1_5_0; + static final SchemaVariant REL_1_5_3 = REL_1_5_0; + static final SchemaVariant REL_1_5_2 = REL_1_5_0; + static final SchemaVariant REL_1_5_1 = REL_1_5_0; + static final SchemaVariant REL_1_5_1_BETA3 = REL_1_5_0; + static final SchemaVariant REL_1_5_1_BETA2 = REL_1_5_0; + static final SchemaVariant REL_1_5_1_BETA1 = REL_1_5_0; static final SchemaVariant REL_1_5_0_BETA3 = REL_1_5_0; static final SchemaVariant REL_1_5_0_BETA2 = REL_1_5_0_BETA3; static final SchemaVariant REL_1_5_0_BETA1 = REL_1_5_0_BETA2; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/JavaWrapper.java b/pljava/src/main/java/org/postgresql/pljava/internal/JavaWrapper.java deleted file mode 100644 index 5b21ecc8..00000000 --- a/pljava/src/main/java/org/postgresql/pljava/internal/JavaWrapper.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root directory of this distribution or at - * http://eng.tada.se/osprojects/COPYRIGHT.html - */ -package org.postgresql.pljava.internal; - -public abstract class JavaWrapper -{ - private final long m_pointer; - - /** - * Creates an instance of this class that will be attached to a native - * structure represented by pointer. This constructor must only be called - * from native code. - * - * @param pointer The wapped pointer. - */ - protected JavaWrapper(long pointer) - { - m_pointer = pointer; - } - - public void finalize() - { - synchronized(Backend.THREADLOCK) - { - _free(m_pointer); - } - } - - /** - * Returns the native pointer - */ - public final long getNativePointer() - { - return m_pointer; - } - - /** - * Calls the C function pfree() with the given pointer as an argument. - * Subclasses may override this method if special handling is needed when - * freeing up the object. - * - * @param pointer The pointer to free. - */ - protected native void _free(long pointer); -} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/LargeObject.java b/pljava/src/main/java/org/postgresql/pljava/internal/LargeObject.java deleted file mode 100644 index 4725ae19..00000000 --- a/pljava/src/main/java/org/postgresql/pljava/internal/LargeObject.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) 2004-2016 Tada AB and other contributors, as listed below. - * - * All rights reserved. This program and the accompanying materials - * are made available under the terms of the The BSD 3-Clause License - * which accompanies this distribution, and is available at - * http://opensource.org/licenses/BSD-3-Clause - * - * Contributors: - * Tada AB - * Chapman Flack - */ -package org.postgresql.pljava.internal; - -import java.sql.SQLException; - -/** - * The LargeObject correspons to the internal PostgreSQL - * LargeObjectDesc. - * - * @author Thomas Hallgren - */ -public class LargeObject extends JavaWrapper -{ - /** - * Write mode flag to be passed to {@link #create} and {@link #open} - */ - public static final int INV_WRITE = 0x00020000; - - /** - * Read mode flag to be passed to {@link #create} and {@link #open} - */ - public static final int INV_READ = 0x00040000; - - /** - * Flag returned by {@link #create} and {@link #open} - */ - public static final int IFS_RDLOCK = (1 << 0); - - /** - * Flag returned by {@link #create} and {@link #open} - */ - public static final int IFS_WRLOCK = (1 << 1); - - /** - * Flag to be passed to {@link #seek} denoting that the - * offset parameter should be treated as an absolute address. - */ - public static final int SEEK_SET = 0; - - /** - * Flag to be passed to {@link #seek} denoting that the - * offset parameter should be treated relative to the current - * address. - */ - public static final int SEEK_CUR = 1; - - /** - * Flag to be passed to {@link #seek} denoting that the - * offset parameter should be treated relative to the end - * of the data. - */ - public static final int SEEK_END = 2; - - LargeObject(long nativePointer) - { - super(nativePointer); - } - - /** - * Creates a LargeObject handle and returns the {@link Oid} of - * that handle. - * @param flags Flags to use for creation. - * @return A Oid that can be used in a call to {@link #open(Oid, int)} - * or {@link #drop(Oid)}. - * @throws SQLException - */ - public static Oid create(int flags) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _create(flags); - } - } - - public static LargeObject open(Oid lobjId, int flags) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _open(lobjId, flags); - } - } - - public void close() - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - _close(this.getNativePointer()); - } - } - - public static int drop(Oid lobjId) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _drop(lobjId); - } - } - - public Oid getId() - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _getId(this.getNativePointer()); - } - } - - public long length() - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _length(this.getNativePointer()); - } - } - - public long seek(long offset, int whence) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _seek(this.getNativePointer(), offset, whence); - } - } - - public long tell() - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _tell(this.getNativePointer()); - } - } - - public void truncate(long offset) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - _truncate(this.getNativePointer(), offset); - } - } - - public int read(byte[] buf) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _read(this.getNativePointer(), buf); - } - } - - public int write(byte[] buf) - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _write(this.getNativePointer(), buf); - } - } - - private static native Oid _create(int flags) - throws SQLException; - - private static native int _drop(Oid lobjId) - throws SQLException; - - private static native LargeObject _open(Oid lobjId, int flags) - throws SQLException; - - private static native void _close(long pointer) - throws SQLException; - - private static native Oid _getId(long pointer) - throws SQLException; - - private static native long _length(long pointer) - throws SQLException; - - private static native long _seek(long pointer, long offset, int whence) - throws SQLException; - - private static native long _tell(long pointer) - throws SQLException; - - private static native void _truncate(long pointer, long offset) - throws SQLException; - - private static native int _read(long pointer, byte[] buf) - throws SQLException; - - private static native int _write(long pointer, byte[] buf) - throws SQLException; -} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/MarkableSequenceInputStream.java b/pljava/src/main/java/org/postgresql/pljava/internal/MarkableSequenceInputStream.java new file mode 100644 index 00000000..e05305f7 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/MarkableSequenceInputStream.java @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2018-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.io.IOException; + +import java.lang.reflect.UndeclaredThrowableException; + +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CancellationException; + +/** + * Version of {@link SequenceInputStream} that supports + * {@code mark} and {@code reset}, to the extent its constituent input streams + * do. + *

      + * This class implements {@code mark} and {@code reset} by calling the + * corresponding methods on the underlying streams; it does not add buffering + * or have any means of providing {@code mark} and {@code reset} support if + * the underlying streams do not. + *

      + * As with {@code SequenceInputStream}, each underlying stream, when completely + * read and no longer needed, is closed to free resources. This instance itself + * will remain in "open at EOF" condition until explicitly closed, but does not + * prevent reclamation of the underlying streams. + *

      + * Unlike {@code SequenceInputStream}, this class can keep underlying streams + * open, after fully reading them, if a {@code mark} has been set, so that + * {@code reset} will be possible. When a mark is no longer needed, it can be + * canceled (by calling {@code mark} with a {@code readlimit} of 0) to again + * allow the underlying streams to be reclaimed as soon as possible. + */ +public class MarkableSequenceInputStream extends InputStream +{ + /** + * A sentinel value, needed because a {@code BlockingQueue} does not allow + * a null value to be enqueued. + */ + public static final InputStream NO_MORE = new InputStream() + { + @Override public int read() { return -1; } + }; + + private boolean m_closed = false; + private InputStream m_markedStream = null; + + private ListIterator m_streams; + private int m_readlimit_orig; + private int m_readlimit_curr; + private boolean m_markSupported; + private boolean m_markSupported_determined; + + /** + * Create a {@code MarkableSequenceInputStream} from one or more existing + * input streams. + * @param streams Sequence of {@code InputStream}s that will be read from + * in order. + * @throws NullPointerException if {@code streams} is {@code null}, or + * contains {@code null} for any stream. + */ + public MarkableSequenceInputStream(InputStream... streams) + { + if ( null == streams ) + throw new NullPointerException("MarkableSequenceInputStream(null)"); + LinkedList isl = new LinkedList<>(); + for ( InputStream s : streams ) + { + if ( null == s ) + throw new NullPointerException( + "MarkableSequenceInputStream(..., null, ...)"); + isl.add(s); + } + m_streams = isl.listIterator(); + } + + /** + * A {@code MarkableSequenceInputStream} that will receive streams to read, + * in order, over a {@code BlockingQueue}. + *

      + * The thread supplying the queue should enqueue the value {@link #NO_MORE} + * following the last actual {@code InputStream} to read. (The sentinel is + * needed because a {@code BlockingQueue} does not allow null values.) + * @param queue Source of input streams to read. + * @throws NullPointerException if {@code queue} is {@code null}. + */ + public MarkableSequenceInputStream(BlockingQueue queue) + { + m_streams = + new FetchingListIterator<>( + new LinkedList(), queue, NO_MORE); + } + + /* + * This method depends on an invariant: the iterator's next() method, when + * called here, will return the current, active stream. Each consumer method + * is responsible for preserving that invariant by calling previous() once + * after obtaining, but not hitting EOF on, a stream from next(). + */ + private InputStream currentStream() throws IOException + { + if ( m_closed ) + throw new IOException("I/O on closed InputStream"); + if ( m_streams.hasNext() ) + return m_streams.next(); + return null; + } + + /* + * The invariant here is that a "current" stream was recently obtained, and + * can be re-obtained with previous(). This should not be called unless + * there is nothing left to read from that stream. + */ + private InputStream nextStream() throws IOException + { + if ( null == m_markedStream ) + { + m_streams.previous().close(); + assert ! m_streams.hasPrevious(); + m_streams.remove(); + if ( m_streams.hasNext() ) + return m_streams.next(); + } + else if ( m_streams.hasNext() ) + { + InputStream is = m_streams.next(); + is.mark(m_readlimit_curr); + return is; + } + return null; + } + + private void decrementLimit(long bytes) + { + assert 0 < bytes; + if ( null == m_markedStream ) + return; + if ( bytes < m_readlimit_curr ) + { + m_readlimit_curr -= bytes; + return; + } + mark(0); /* undo markage of underlying streams */ + } + + @Override + public int read() throws IOException + { + synchronized ( this ) + { + for ( InputStream s = currentStream(); null != s; s = nextStream() ) + { + int c = s.read(); + if ( -1 != c ) + { + decrementLimit(1); + m_streams.previous(); /* maintain "current" invariant */ + return c; + } + } + return -1; + } + } + + @Override + public int read(byte[] b, int off, int len) throws IOException + { + synchronized ( this ) + { + for ( InputStream s = currentStream(); null != s; s = nextStream() ) + { + int rslt = s.read(b, off, len); + if ( -1 != rslt ) + { + decrementLimit(rslt); + m_streams.previous(); /* maintain "current" invariant */ + return rslt; + } + } + return -1; + } + } + + @Override + public long skip(long n) throws IOException + { + synchronized ( this ) + { + long skipped; + long totalSkipped = 0; + InputStream s = currentStream(); + while ( null != s ) + { + skipped = s.skip(n); + n -= skipped; + decrementLimit(skipped); + totalSkipped += skipped; + if ( 0 >= n ) + { + m_streams.previous(); /* maintain "current" invariant */ + break; + } + /* + * A short count from skip doesn't have to mean EOF was reached. + * A read() will settle that question, though. + */ + if ( -1 != s.read() ) + { + n -= 1; + decrementLimit(1); + totalSkipped += 1; + continue; + } + /* + * Ok, it was EOF on that underlying stream. + */ + s = nextStream(); + } + return totalSkipped; + } + } + + @Override + public int available() throws IOException + { + synchronized ( this ) + { + if ( m_closed ) + return 0; + InputStream s = currentStream(); + if ( null == s ) + return 0; + m_streams.previous(); /* maintain "current" invariant */ + return s.available(); + } + } + + @Override + public void close() throws IOException + { + synchronized ( this ) + { + if ( m_closed ) + return; + while ( m_streams.hasPrevious() ) + m_streams.previous(); + while ( m_streams.hasNext() ) + m_streams.next().close(); + m_streams = null; + m_closed = true; + } + } + + /** + * Marks the current position in this input stream. In this implementation, + * it is possible to 'cancel' a mark, by passing this method a + * {@code readlimit} of zero, returning the stream immediately to the state + * of having no mark. + */ + @Override + public void mark(int readlimit) + { + synchronized ( this ) + { + if ( m_closed ) + return; + + InputStream activeStream = null; + if ( m_streams.hasNext() ) + { + m_streams.next(); + activeStream = m_streams.previous(); + } + + if ( null != m_markedStream ) + { + for ( InputStream is = activeStream; is != m_markedStream; ) + is = m_streams.previous(); + /* + * Whether the above loop executed zero or more times, the last + * event on m_streams was a previous(), and returned the marked + * stream, and the next next() will also. + */ + m_markedStream = null; // so nextStream() will close things + /* + * It is safe to start off this loop with next(), because it + * will return the formerly marked stream, known to exist. + */ + for ( InputStream is = m_streams.next(); is != activeStream; ) + { + try + { + is = nextStream(); // will close stream and return next + } + catch ( IOException e ) + { + throw new UndeclaredThrowableException(e); + } + } + /* + * Leave the invariant the same whether this if block was taken + * or not. + */ + if ( null != activeStream ) + m_streams.previous(); + } + + if ( 0 >= readlimit ) /* setting instantly-invalid mark */ + { + m_readlimit_curr = m_readlimit_orig = 0; + return; + } + m_readlimit_curr = m_readlimit_orig = readlimit; + + if ( null == activeStream ) /* setting mark at EOF */ + return; + + m_markedStream = activeStream; + activeStream.mark(readlimit); + } + } + + @Override + public void reset() throws IOException + { + synchronized ( this ) + { + if ( m_closed ) + throw new IOException("reset on closed InputStream"); + + if ( null == m_markedStream ) + { + if ( 0 < m_readlimit_orig ) + return; // the mark-at-EOF case; reset allowed, no effect + throw new IOException("reset without mark"); + } + + InputStream is = currentStream(); + /* + * 'is' right now is the active stream, or null if we are at EOF; + * either way the first call to previous() coming up below will + * return an existing stream, the one we need (in reverse order) + * to reset first. + */ + + while ( true ) + { + is = m_streams.previous(); + is.reset(); + if ( is == m_markedStream ) + break; + is.mark(0); // release possible resources + } + m_readlimit_curr = m_readlimit_orig; + /* + * The invariant (that the next next() will return the stream we + * just touched) is already satisfied, as we obtained it with + * previous() above. + */ + } + } + + /** + * Tests if this input stream supports the mark and reset methods. + *

      + * By the API spec, this method's return is "an invariant property of a + * particular input stream instance." For any instance of this class, the + * result is determined by the first call to this method, and does not + * change thereafter. At the first call, the result is determined only by + * the underlying input streams remaining to be read (or, if a mark has been + * set, which is possible before checking this method, then by the + * underlying input streams including and following the one that was current + * when the mark was set). The result will be {@code true} unless any of + * those underlying streams reports it as {@code false}. + */ + @Override + public boolean markSupported() + { + synchronized ( this ) + { + if ( m_markSupported_determined ) + return m_markSupported; + if ( m_closed ) + return false; + + InputStream activeStream = null; + if ( m_streams.hasNext() ) + { + m_streams.next(); + activeStream = m_streams.previous(); + } + + if ( null != m_markedStream ) + { + for ( InputStream is = activeStream; is != m_markedStream; ) + is = m_streams.previous(); + } + + /* + * The next next() returns the marked stream (if there is one), or + * the active stream (if there is one). + */ + m_markSupported = true; + while ( m_streams.hasNext() ) + if ( ! m_streams.next().markSupported() ) + m_markSupported = false; + /* + * We've run to the end of the streams list. Back up to the active + * one. + */ + for ( InputStream is = null; is != activeStream; ) + is = m_streams.previous(); + + /* + * The "current" invariant is satisfied. + */ + m_markSupported_determined = true; + return m_markSupported; + } + } + + /** + * A {@code ListIterator} that will fetch an element from a + * {@code BlockingQueue} whenever {@code hasNext} would (otherwise) + * return {@code false}, adding it to the end of the list where the next + * {@code next()} will retrieve it. + *

      + * It is possible for the {@code hasNext}, {@code next}, and + * {@code nextIndex} methods to throw {@link CancellationException}, if the + * thread is interrupted while they await something on the queue. + */ + public static class FetchingListIterator implements ListIterator + { + private final ListIterator m_li; + private BlockingQueue m_q; + private final E m_sentinel; + + /** + * Construct a {@code FetchingListIterator} given an existing list, + * a {@code BlockingQueue}, and a particular instance of the list's + * element type to use as an end-of-queue sentinel (it is not possible + * to enqueue a null value on a {@code BlockingQueue}). + * @param list An existing list. + * @param queue A blocking queue whose elements will be taken in order + * following any existing elements in the original list. + * @param sentinel A value that the supplier, feeding the blocking + * queue, will enqueue when no more actual values will be forthcoming. + * When an element is dequeued that matches this sentinel (per reference + * equality), it is not added to the list, and nothing more will be + * fetched from the queue. + * @throws NullPointerException if any parameter is null. + */ + public FetchingListIterator( + List list, BlockingQueue queue, E sentinel) + { + if ( null == list || null == queue || null == sentinel ) + throw new NullPointerException( + "a null parameter was passed to FetchingListIterator"); + + m_li = list.listIterator(); + m_q = queue; + m_sentinel = sentinel; + } + + @Override + public boolean hasNext() + { + boolean has = m_li.hasNext(); + E e; + if ( has || null == m_q ) + return has; + try + { + e = m_q.take(); + } + catch ( InterruptedException ex ) + { + m_q = null; + throw (CancellationException) + new CancellationException("Interrupted waiting for input") + .initCause(ex); + } + if ( m_sentinel == e ) + { + m_q = null; + return has; + } + m_li.add(e); + m_li.previous(); + return true; + } + + @Override + public E next() + { + hasNext(); + return m_li.next(); + } + + @Override + public int nextIndex() + { + hasNext(); + return m_li.nextIndex(); + } + + @Override public void add(E e) { m_li.add(e); } + @Override public boolean hasPrevious() { return m_li.hasPrevious(); } + @Override public E previous() { return m_li.previous(); } + @Override public int previousIndex() { return m_li.previousIndex(); } + @Override public void remove() { m_li.remove(); } + @Override public void set(E e) { m_li.set(e); } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/MarkableSequenceReader.java b/pljava/src/main/java/org/postgresql/pljava/internal/MarkableSequenceReader.java new file mode 100644 index 00000000..82b101d8 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/MarkableSequenceReader.java @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.Reader; +import java.io.IOException; + +import java.lang.reflect.UndeclaredThrowableException; + +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; + +/** + * Like a {@link MarkableSequenceInputStream} but for characters. + *

      + * This class implements {@code mark} and {@code reset} by calling the + * corresponding methods on the underlying readers; it does not add buffering + * or have any means of providing {@code mark} and {@code reset} support if + * the underlying readers do not. + *

      + * As with {@code SequenceInputStream}, each underlying reader, when completely + * read and no longer needed, is closed to free resources. This instance itself + * will remain in "open at EOF" condition until explicitly closed, but does not + * prevent reclamation of the underlying readers. + *

      + * Unlike {@code SequenceInputStream}, this class can keep underlying readers + * open, after fully reading them, if a {@code mark} has been set, so that + * {@code reset} will be possible. When a mark is no longer needed, it can be + * canceled (by calling {@code mark} with a {@code readlimit} of 0) to again + * allow the underlying readers to be reclaimed as soon as possible. + */ +public class MarkableSequenceReader extends Reader +{ + private boolean m_closed = false; + private Reader m_markedStream = null; + + private ListIterator m_streams; + private int m_readlimit_orig; + private int m_readlimit_curr; + private boolean m_markSupported; + private boolean m_markSupported_determined; + + /** + * Create a {@code MarkableSequenceReader} from one or more existing + * readers. + * @param streams Sequence of {@code Reader}s that will be read from + * in order. + * @throws NullPointerException if {@code streams} is {@code null}, or + * contains {@code null} for any stream. + */ + public MarkableSequenceReader(Reader... streams) + { + if ( null == streams ) + throw new NullPointerException("MarkableSequenceReader(null)"); + LinkedList isl = new LinkedList<>(); + for ( Reader s : streams ) + { + if ( null == s ) + throw new NullPointerException( + "MarkableSequenceReader(..., null, ...)"); + isl.add(s); + } + m_streams = isl.listIterator(); + } + + /* + * This method depends on an invariant: the iterator's next() method, when + * called here, will return the current, active stream. Each consumer method + * is responsible for preserving that invariant by calling previous() once + * after obtaining, but not hitting EOF on, a stream from next(). + */ + private Reader currentStream() throws IOException + { + if ( m_closed ) + throw new IOException("I/O on closed Reader"); + if ( m_streams.hasNext() ) + return m_streams.next(); + return null; + } + + /* + * The invariant here is that a "current" stream was recently obtained, and + * can be re-obtained with previous(). This should not be called unless + * there is nothing left to read from that stream. + */ + private Reader nextStream() throws IOException + { + if ( null == m_markedStream ) + { + m_streams.previous().close(); + assert ! m_streams.hasPrevious(); + m_streams.remove(); + if ( m_streams.hasNext() ) + return m_streams.next(); + } + else if ( m_streams.hasNext() ) + { + Reader is = m_streams.next(); + is.mark(m_readlimit_curr); + return is; + } + return null; + } + + private void decrementLimit(long bytes) throws IOException + { + assert 0 < bytes; + if ( null == m_markedStream ) + return; + if ( bytes < m_readlimit_curr ) + { + m_readlimit_curr -= bytes; + return; + } + mark(0); /* undo markage of underlying streams */ + } + + @Override + public int read() throws IOException + { + synchronized ( this ) + { + for ( Reader s = currentStream(); null != s; s = nextStream() ) + { + int c = s.read(); + if ( -1 != c ) + { + decrementLimit(1); + m_streams.previous(); /* maintain "current" invariant */ + return c; + } + } + return -1; + } + } + + @Override + public int read(char[] b, int off, int len) throws IOException + { + synchronized ( this ) + { + for ( Reader s = currentStream(); null != s; s = nextStream() ) + { + int rslt = s.read(b, off, len); + if ( -1 != rslt ) + { + decrementLimit(rslt); + m_streams.previous(); /* maintain "current" invariant */ + return rslt; + } + } + return -1; + } + } + + @Override + public long skip(long n) throws IOException + { + synchronized ( this ) + { + long skipped; + long totalSkipped = 0; + Reader s = currentStream(); + while ( null != s ) + { + skipped = s.skip(n); + n -= skipped; + decrementLimit(skipped); + totalSkipped += skipped; + if ( 0 >= n ) + { + m_streams.previous(); /* maintain "current" invariant */ + break; + } + /* + * A short count from skip doesn't have to mean EOF was reached. + * A read() will settle that question, though. + */ + if ( -1 != s.read() ) + { + n -= 1; + decrementLimit(1); + totalSkipped += 1; + continue; + } + /* + * Ok, it was EOF on that underlying stream. + */ + s = nextStream(); + } + return totalSkipped; + } + } + + @Override + public void close() throws IOException + { + synchronized ( this ) + { + if ( m_closed ) + return; + while ( m_streams.hasPrevious() ) + m_streams.previous(); + while ( m_streams.hasNext() ) + m_streams.next().close(); + m_streams = null; + m_closed = true; + } + } + + /** + * Marks the current position in this reader. In this implementation, + * it is possible to 'cancel' a mark, by passing this method a + * {@code readlimit} of zero, returning the reader immediately to the state + * of having no mark. + */ + @Override + public void mark(int readlimit) throws IOException + { + synchronized ( this ) + { + if ( m_closed ) + return; + + Reader activeStream = null; + if ( m_streams.hasNext() ) + { + m_streams.next(); + activeStream = m_streams.previous(); + } + + if ( null != m_markedStream ) + { + for ( Reader is = activeStream; is != m_markedStream; ) + is = m_streams.previous(); + /* + * Whether the above loop executed zero or more times, the last + * event on m_streams was a previous(), and returned the marked + * stream, and the next next() will also. + */ + m_markedStream = null; // so nextStream() will close things + /* + * It is safe to start off this loop with next(), because it + * will return the formerly marked stream, known to exist. + */ + for ( Reader is = m_streams.next(); is != activeStream; ) + { + try + { + is = nextStream(); // will close stream and return next + } + catch ( IOException e ) + { + throw new UndeclaredThrowableException(e); + } + } + /* + * Leave the invariant the same whether this if block was taken + * or not. + */ + if ( null != activeStream ) + m_streams.previous(); + } + + if ( 0 >= readlimit ) /* setting instantly-invalid mark */ + { + m_readlimit_curr = m_readlimit_orig = 0; + return; + } + m_readlimit_curr = m_readlimit_orig = readlimit; + + if ( null == activeStream ) /* setting mark at EOF */ + return; + + m_markedStream = activeStream; + activeStream.mark(readlimit); + } + } + + @Override + public void reset() throws IOException + { + synchronized ( this ) + { + if ( m_closed ) + throw new IOException("reset on closed Reader"); + + if ( null == m_markedStream ) + { + if ( 0 < m_readlimit_orig ) + return; // the mark-at-EOF case; reset allowed, no effect + throw new IOException("reset without mark"); + } + + Reader is = currentStream(); + /* + * 'is' right now is the active stream, or null if we are at EOF; + * either way the first call to previous() coming up below will + * return an existing stream, the one we need (in reverse order) + * to reset first. + */ + + while ( true ) + { + is = m_streams.previous(); + is.reset(); + if ( is == m_markedStream ) + break; + is.mark(0); // release possible resources + } + m_readlimit_curr = m_readlimit_orig; + /* + * The invariant (that the next next() will return the stream we + * just touched) is already satisfied, as we obtained it with + * previous() above. + */ + } + } + + /** + * Tests if this reader supports the mark and reset methods. + *

      + * For any instance of this class, the + * result is determined by the first call to this method, and does not + * change thereafter. At the first call, the result is determined only by + * the underlying readers remaining to be read (or, if a mark has been + * set, which is possible before checking this method, then by the + * underlying readers including and following the one that was current + * when the mark was set). The result will be {@code true} unless any of + * those underlying readers reports it as {@code false}. + */ + @Override + public boolean markSupported() + { + synchronized ( this ) + { + if ( m_markSupported_determined ) + return m_markSupported; + if ( m_closed ) + return false; + + Reader activeStream = null; + if ( m_streams.hasNext() ) + { + m_streams.next(); + activeStream = m_streams.previous(); + } + + if ( null != m_markedStream ) + { + for ( Reader is = activeStream; is != m_markedStream; ) + is = m_streams.previous(); + } + + /* + * The next next() returns the marked stream (if there is one), or + * the active stream (if there is one). + */ + m_markSupported = true; + while ( m_streams.hasNext() ) + if ( ! m_streams.next().markSupported() ) + m_markSupported = false; + /* + * We've run to the end of the streams list. Back up to the active + * one. + */ + for ( Reader is = null; is != activeStream; ) + is = m_streams.previous(); + + /* + * The "current" invariant is satisfied. + */ + m_markSupported_determined = true; + return m_markSupported; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ObjectPoolImpl.java b/pljava/src/main/java/org/postgresql/pljava/internal/ObjectPoolImpl.java index 7b42c11f..5c259b12 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ObjectPoolImpl.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ObjectPoolImpl.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root directory of this distribution or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; @@ -14,26 +20,27 @@ import org.postgresql.pljava.ObjectPool; import org.postgresql.pljava.PooledObject; -class ObjectPoolImpl implements ObjectPool +class ObjectPoolImpl implements ObjectPool { /** * An InstanceHandle is a link in a single linked list that * holds on to a ResultSetProvider. */ - private static class PooledObjectHandle + private static class PooledObjectHandle { - private PooledObject m_instance; - private PooledObjectHandle m_next; + private T m_instance; + private PooledObjectHandle m_next; } private static Class[] s_ctorSignature = { ObjectPool.class }; private static PooledObjectHandle s_handlePool; - private static final IdentityHashMap s_poolCache = new IdentityHashMap(); + private static final IdentityHashMap,ObjectPoolImpl> + s_poolCache = new IdentityHashMap<>(); - private final Constructor m_ctor; - private PooledObjectHandle m_providerPool; + private final Constructor m_ctor; + private PooledObjectHandle m_providerPool; - private ObjectPoolImpl(Class c) + private ObjectPoolImpl(Class c) { if(!PooledObject.class.isAssignableFrom(c)) throw new IllegalArgumentException("Class " + c + " does not implement the " + @@ -55,13 +62,12 @@ private ObjectPoolImpl(Class c) /** * Obtain a pool for the given class. - * @param cls - * @return - * @throws SQLException */ - public static ObjectPoolImpl getObjectPool(Class cls) + @SuppressWarnings("unchecked") + public static ObjectPoolImpl + getObjectPool(Class cls) { - ObjectPoolImpl pool = (ObjectPoolImpl)s_poolCache.get(cls); + ObjectPoolImpl pool = (ObjectPoolImpl)s_poolCache.get(cls); if(pool == null) { pool = new ObjectPoolImpl(cls); @@ -70,11 +76,12 @@ public static ObjectPoolImpl getObjectPool(Class cls) return pool; } - public PooledObject activateInstance() + @SuppressWarnings("unchecked") + public T activateInstance() throws SQLException { - PooledObject instance; - PooledObjectHandle handle = m_providerPool; + T instance; + PooledObjectHandle handle = m_providerPool; if(handle != null) { m_providerPool = handle.m_next; @@ -90,7 +97,7 @@ public PooledObject activateInstance() { try { - instance = (PooledObject)m_ctor.newInstance(new Object[] { this }); + instance = m_ctor.newInstance(new Object[] { this }); } catch(InvocationTargetException e) { @@ -125,7 +132,7 @@ public PooledObject activateInstance() return instance; } - public void passivateInstance(PooledObject instance) + public void passivateInstance(T instance) throws SQLException { try @@ -141,21 +148,24 @@ public void passivateInstance(PooledObject instance) // Obtain a handle from the pool of handles so that // we have something to wrap the instance in. // - PooledObjectHandle handle = s_handlePool; + @SuppressWarnings("unchecked") + PooledObjectHandle handle = (PooledObjectHandle)s_handlePool; if(handle != null) s_handlePool = handle.m_next; else - handle = new PooledObjectHandle(); + handle = new PooledObjectHandle<>(); handle.m_instance = instance; handle.m_next = m_providerPool; m_providerPool = handle; } - public void removeInstance(PooledObject instance) throws SQLException + @SuppressWarnings("unchecked") + public void removeInstance(T instance) throws SQLException { PooledObjectHandle prev = null; - for(PooledObjectHandle handle = m_providerPool; handle != null; handle = handle.m_next) + for(PooledObjectHandle handle = m_providerPool; + handle != null; handle = handle.m_next) { if(handle.m_instance == instance) { diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Oid.java b/pljava/src/main/java/org/postgresql/pljava/internal/Oid.java index f3d0df32..01e0524b 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Oid.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Oid.java @@ -1,11 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + +import java.sql.SQLData; import java.sql.SQLException; import java.util.HashMap; @@ -18,32 +27,22 @@ */ public class Oid extends Number { - private static final HashMap s_class2typeId = new HashMap(); + private static final HashMap,Oid> s_class2typeId = + new HashMap<>(); - private static final HashMap s_typeId2class = new HashMap(); - static - { - try - { - // Ensure that the SPI JDBC driver is loaded and registered - // with the java.sql.DriverManager. - // - Class.forName("org.postgresql.pljava.jdbc.SPIDriver"); - } - catch(ClassNotFoundException e) - { - throw new ExceptionInInitializerError(e); - } - } + private static final HashMap> s_typeId2class = + new HashMap<>(); /** - * Finds the PostgreSQL well known Oid for the given class. - * @param clazz The class. + * Finds the PostgreSQL well known Oid for the given Java object. + * @param obj The object. * @return The well known Oid or null if no such Oid could be found. */ - public static Oid forJavaClass(Class clazz) + public static Oid forJavaObject(Object obj) throws SQLException { - return (Oid)s_class2typeId.get(clazz); + if ( obj instanceof SQLData ) + return forTypeName(((SQLData)obj).getSQLTypeName()); + return s_class2typeId.get(obj.getClass()); } /** @@ -55,10 +54,7 @@ public static Oid forJavaClass(Class clazz) public static Oid forTypeName(String typeString) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return new Oid(_forTypeName(typeString)); - } + return doInPG(() -> new Oid(_forTypeName(typeString))); } /** @@ -69,10 +65,7 @@ public static Oid forTypeName(String typeString) public static Oid forSqlType(int sqlType) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return new Oid(_forSqlType(sqlType)); - } + return doInPG(() -> new Oid(_forSqlType(sqlType))); } /** @@ -80,10 +73,7 @@ public static Oid forSqlType(int sqlType) */ public static Oid getTypeId() { - synchronized(Backend.THREADLOCK) - { - return _getTypeId(); - } + return doInPG(Oid::_getTypeId); } /** @@ -136,26 +126,29 @@ public float floatValue() public Class getJavaClass() throws SQLException { - Class c = (Class)s_typeId2class.get(this); - if(c == null) + Class c = s_typeId2class.get(this); + if(c != null) + return c; + return doInPG(() -> { - String className; - synchronized(Backend.THREADLOCK) - { - className = _getJavaClassName(m_native); - } + String className = _getJavaClassName(m_native); + ClassLoader loader = _getCurrentLoader(); + Class cc; try { - c = Class.forName(getCanonicalClassName(className, 0)); + String canonName = getCanonicalClassName(className, 0); + if ( null == loader ) + loader = getClass().getClassLoader(); + cc = Class.forName(canonName, true, loader); } catch(ClassNotFoundException e) { throw new SQLException(e.getMessage()); } - s_typeId2class.put(this, c); - s_class2typeId.put(c, this); - } - return c; + s_typeId2class.put(this, cc); + s_class2typeId.put(cc, this); + return cc; + }); } /** @@ -238,4 +231,12 @@ private native static int _forSqlType(int sqlType) private native static String _getJavaClassName(int nativeOid) throws SQLException; + + /** + * Return the (initiating, "schema") ClassLoader of the innermost + * currently-executing PL/Java function, or null if there is none or the + * schema loaders have since been cleared and the loader is gone. + */ + private native static ClassLoader _getCurrentLoader() + throws SQLException; } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java b/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java index 26477121..cbe5e8e6 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/PgSavepoint.java @@ -1,139 +1,313 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.Connection; import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.SQLNonTransientException; import java.util.Iterator; import java.util.WeakHashMap; import java.util.logging.Logger; /** + * Implementation of {@link Savepoint} for the SPI connection. + *

      + * It is an historical oddity that this is in the {@code .internal} package + * rather than {@code .jdbc}. * @author Thomas Hallgren */ -public class PgSavepoint implements java.sql.Savepoint +public class PgSavepoint implements Savepoint { - private static final WeakHashMap s_knownSavepoints = new WeakHashMap(); + /* + * Instances that might be live are tracked here in a WeakHashMap. The + * first one created (i.e., outermost) also has a strong reference held by + * the Invocation, so it can be zapped if lingering around at function exit. + * That automatically takes care of any later/inner ones, so it is not as + * critical to track those. Any instance that disappears from this + * WeakHashMap (because the application code has let go of all its live + * references) is an instance we don't have to fuss with. (That can mean, + * though, if any SavepointListeners are registered, and a reportable event + * happens to such a savepoint, listeners will be called with null instead + * of a Savepoint instance. That hasn't been changed, but is now documented + * over at SavepointListener, where it wasn't before.) + * + * Manipulations of this map take place only on the PG thread. + */ + private static final WeakHashMap s_knownSavepoints = + new WeakHashMap<>(); + + private static void forgetNestLevelsGE(int nestLevel) + { + assert Backend.threadMayEnterPG(); + Iterator it = s_knownSavepoints.keySet().iterator(); + while ( it.hasNext() ) + { + PgSavepoint sp = it.next(); + if ( sp.m_nestLevel < nestLevel ) + continue; + it.remove(); + sp.m_nestLevel = 0; // force exception on future attempts to use + } + } + + /* + * PostgreSQL allows an internal subtransaction to have a name, though it + * isn't used for much, and is allowed to be null. The JDBC Savepoint API + * also allows a name, so we will pass it along to PG if provided, and save + * it here for the API method getSavepointName. + */ + private final String m_name; + + /* + * The transaction ID assigned during BeginInternalSubTransaction is really + * the identifier that matters. An instance will briefly have the default + * value zero when constructed; the real value will be filled in during the + * native _set method. + */ + private int m_xactId; + + /* + * The nesting level will also have its default value briefly at + * construction, with the real value filled in by _set. Real values will + * be positive, so setting this back to zero can be used to signal + * onInvocationExit that nothing remains to do. Always manipulated and + * checked on "the PG thread". + */ + private int m_nestLevel; - private long m_pointer; + /* + * JDBC requires the rollback operation not "use up" the savepoint that is + * rolled back to (though it does discard any that were created after it). + * PL/Java has historically gotten that wrong. Changing that behavior could + * lead to unexpected warnings at function exit, if code written to the + * prior behavior has rolled back to a savepoint and then forgotten it, + * expecting it not to be found unreleased when the function exits. + * + * The behavior at function exit has historically been governed by the + * pljava.release_lingering_savepoints GUC: true => savepoint released, + * false => savepoint rolled back, with a warning either way. To accommodate + * savepoints that are still alive after rollback, a situation that formerly + * did not arise, create a third behavior: if such a 'reborn' savepoint is + * found "lingering" at function exit, it will be silently released. + */ + private boolean m_reborn = false; - PgSavepoint(long pointer) + /* + * A complication arises if a savepoint listener has been registered: + * PostgreSQL will make the callback before BeginInternalSubTransaction + * has returned. The correct xactId will be passed to the callback, but it + * won't have been set in this object yet. The forId() method below can + * handle that by finding the object in this static and setting its m_xactId + * so it's fully initialized by the time it is passed to the listener. + * + * As all of this action (constructing and setting a new savepoint, calling + * a savepoint listener) can only happen on the PG thread, this static is + * effectively confined to one thread. + */ + private static PgSavepoint s_nursery; + + private PgSavepoint(String name) { - m_pointer = pointer; + m_name = name; } + /** + * Establish a savepoint; only to be called by + * {@link Connection#setSavepoint Connection.setSavepoint}, which makes the + * arrangements for {@link #onInvocationExit onInvocationExit} to be + * called. + */ public static PgSavepoint set(String name) throws SQLException { - synchronized(Backend.THREADLOCK) + return doInPG(() -> { - PgSavepoint sp = new PgSavepoint(_set(name)); + PgSavepoint sp = new PgSavepoint(name); + s_nursery = sp; + try + { + /* + * This assignment of m_xactId will be redundant if a listener + * already was called and filled in the ID, but harmless. + */ + sp.m_xactId = sp._set(name); + } + finally + { + s_nursery = null; + } s_knownSavepoints.put(sp, Boolean.TRUE); return sp; - } + }); } static PgSavepoint forId(int savepointId) { - if(savepointId != 0) + if(savepointId == 0) + return null; + return doInPG(() -> { - synchronized(Backend.THREADLOCK) + if ( null != s_nursery ) // can only be the Savepoint being set { - Iterator itor = s_knownSavepoints.keySet().iterator(); - while(itor.hasNext()) - { - PgSavepoint sp = (PgSavepoint)itor.next(); - if(savepointId == _getId(sp.m_pointer)) - return sp; - } + PgSavepoint sp = s_nursery; + sp.m_xactId = savepointId; + s_nursery = null; + return sp; } - } - return null; + for ( PgSavepoint sp : s_knownSavepoints.keySet() ) + { + if(savepointId == sp.m_xactId) + return sp; + } + return null; + }); } + @Override public int hashCode() { - return this.getSavepointId(); + return System.identityHashCode(this); } + @Override public boolean equals(Object o) { return (this == o); } + /** + * Release (commit) a savepoint; only to be called by + * {@link Connection#releaseSavepoint Connection.releaseSavepoint}. + */ public void release() throws SQLException { - synchronized(Backend.THREADLOCK) + doInPG(() -> { - _release(m_pointer); - s_knownSavepoints.remove(this); - m_pointer = 0; - } + if ( 0 == m_nestLevel ) + throw new SQLNonTransientException( + "attempt to release savepoint " + + (null != m_name ? ('"' + m_name + '"') : m_xactId) + + " that is no longer valid", "3B001"); + + _release(m_xactId, m_nestLevel); + forgetNestLevelsGE(m_nestLevel); + }); } + /** + * Roll back a savepoint; only to be called by + * {@link Connection#rollback(Savepoint) Connection.rollback}. + *

      + * JDBC's rollback-to-savepoint operation discards all more-deeply-nested + * savepoints, but not the one that is the target of the rollback. That one + * remains active and can be used again. That behavior matches PostgreSQL's + * SQL-level savepoints, but here it has to be built on top of the + * "internal subtransaction" layer, and made to work the right way. + */ public void rollback() throws SQLException { - synchronized(Backend.THREADLOCK) + doInPG(() -> { - _rollback(m_pointer); - s_knownSavepoints.remove(this); - m_pointer = 0; - } + if ( 0 == m_nestLevel ) + throw new SQLNonTransientException( + "attempt to roll back to savepoint " + + (null != m_name ? ('"' + m_name + '"') : m_xactId) + + " that is no longer valid", "3B001"); + + _rollback(m_xactId, m_nestLevel); + + /* Forget only more-deeply-nested savepoints, NOT this one */ + forgetNestLevelsGE(1 + m_nestLevel); + + /* + * The "internal subtransaction" was used up by rolling back. To + * provide the correct JDBC behavior, where a savepoint is not + * used up by a rollback, transparently set a new internal one. + */ + try + { + s_nursery = this; + m_xactId = _set(m_name); + m_reborn = true; + } + finally + { + s_nursery = null; + } + }); } + @Override public String getSavepointName() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getName(m_pointer); - } + // XXX per JDBC, this should throw an exception rather than return null + return m_name; } + @Override public int getSavepointId() { - synchronized(Backend.THREADLOCK) - { - return _getId(m_pointer); - } + // XXX per JDBC, this should throw an exception if m_name ISN'T null + return m_xactId; } - public void onInvocationExit(Connection conn) + public void onInvocationExit(boolean withError) throws SQLException { - if(m_pointer == 0) + assert Backend.threadMayEnterPG(); + if(m_nestLevel == 0) return; Logger logger = Logger.getAnonymousLogger(); - if(Backend.isReleaseLingeringSavepoints()) + if(!withError && (m_reborn || Backend.isReleaseLingeringSavepoints())) { - logger.warning("Releasing savepoint '" + _getId(m_pointer) + - "' since its lifespan exceeds that of the function where it was set"); - conn.releaseSavepoint(this); + if ( ! m_reborn ) + logger.warning("Releasing savepoint '" + m_xactId + + "' since its lifespan exceeds that of the function where " + + "it was set"); + /* + * Perform release directly, not through Connection, which does + * other bookkeeping that's unnecessary on invocation exit. + */ + _release(m_xactId, m_nestLevel); + forgetNestLevelsGE(m_nestLevel); } else { - logger.warning("Rolling back to savepoint '" + _getId(m_pointer) + - "' since its lifespan exceeds that of the function where it was set"); - conn.rollback(this); + if ( ! withError || ! m_reborn ) + logger.warning("Rolling back to savepoint '" + m_xactId + + "' since its lifespan exceeds that of the function where " + + "it was set"); + /* + * Perform rollback directly, without Connection's unnecessary + * bookkeeping, and without resurrecting the savepoint this time. + */ + _rollback(m_xactId, m_nestLevel); + forgetNestLevelsGE(m_nestLevel); } } - private static native long _set(String name) + private native int _set(String name) throws SQLException; - private static native void _release(long pointer) + private static native void _release(int xid, int nestLevel) throws SQLException; - private static native void _rollback(long pointer) + private static native void _rollback(int xid, int nestLevel) throws SQLException; - - private static native String _getName(long pointer); - - private static native int _getId(long pointer); } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java b/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java index eefeee0c..363f2bb1 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Portal.java @@ -1,11 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import org.postgresql.pljava.internal.SPI; // for javadoc +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.SQLException; /** @@ -16,11 +25,65 @@ */ public class Portal { - private long m_pointer; + private final State m_state; + + Portal(DualState.Key cookie, long ro, long pointer, ExecutionPlan plan) + { + m_state = new State(cookie, this, ro, pointer, plan); + } - Portal(long pointer) + private static class State + extends DualState.SingleSPIcursorClose { - m_pointer = pointer; + /* + * Hold a reference to the Java ExecutionPlan object as long as we might + * be using it, just to make sure Java unreachability doesn't cause it + * to mop up its native plan state while the portal might still want it. + */ + private ExecutionPlan m_plan; + + private State( + DualState.Key cookie, Portal referent, long ro, long portal, + ExecutionPlan plan) + { + super(cookie, referent, ro, portal); + m_plan = plan; + } + + /** + * Return the Portal pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * Portal, or subclasses, or something with a strong reference to + * this Portal, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while a reference is on the call stack, + * and as long as we're on the thread that's in PG, the saved plan won't + * be popped before we return. + */ + private long getPortalPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } + + @Override + protected void javaStateReleased(boolean nativeStateLive) + { + super.javaStateReleased(nativeStateLive); + m_plan = null; + } } /** @@ -29,144 +92,117 @@ public class Portal */ public void close() { - synchronized(Backend.THREADLOCK) - { - _close(m_pointer); - m_pointer = 0; - } + m_state.releaseFromJava(); } /** * Returns the name of this Portal. - * @throws SQLException if the handle to the native structur is stale. + * @throws SQLException if the handle to the native structure is stale. */ public String getName() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getName(m_pointer); - } + return doInPG(() -> _getName(m_state.getPortalPtr())); } /** * Returns the value of the portalPos attribute. - * @throws SQLException if the handle to the native structur is stale. + * @throws SQLException if the handle to the native structure is stale. */ - public int getPortalPos() + public long getPortalPos() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getPortalPos(m_pointer); - } + long pos = doInPG(() -> _getPortalPos(m_state.getPortalPtr())); + if ( pos < 0 ) + throw new ArithmeticException( + "portal position too large to report " + + "in a Java signed long"); + return pos; } /** * Returns the TupleDesc that describes the row Tuples for this * Portal. - * @throws SQLException if the handle to the native structur is stale. + * @throws SQLException if the handle to the native structure is stale. */ public TupleDesc getTupleDesc() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getTupleDesc(m_pointer); - } + return doInPG(() -> _getTupleDesc(m_state.getPortalPtr())); } /** * Performs an SPI_cursor_fetch. + *

      + * The fetched rows are parked at the C global {@code SPI_tuptable}; see + * {@link SPI#getTupTable SPI.getTupTable} for retrieving them. (While + * faithful to the way the C API works, this seems a bit odd as a Java API, + * and suggests that calls to this method and then {@code SPI.getTupTable} + * would ideally be done inside a single {@code doInPG}.) * @param forward Set to true for forward, false for backward. * @param count Maximum number of rows to fetch. * @return The actual number of fetched rows. - * @throws SQLException if the handle to the native structur is stale. + * @throws SQLException if the handle to the native structure is stale. */ - public int fetch(boolean forward, int count) + public long fetch(boolean forward, long count) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _fetch(m_pointer, System.identityHashCode(Thread.currentThread()), forward, count); - } + long fetched = + doInPG(() -> _fetch(m_state.getPortalPtr(), forward, count)); + if ( fetched < 0 ) + throw new ArithmeticException( + "fetched too many rows to report in a Java signed long"); + return fetched; } /** * Returns the value of the atEnd attribute. - * @throws SQLException if the handle to the native structur is stale. + * @throws SQLException if the handle to the native structure is stale. */ public boolean isAtEnd() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isAtEnd(m_pointer); - } + return doInPG(() -> _isAtEnd(m_state.getPortalPtr())); } /** * Returns the value of the atStart attribute. - * @throws SQLException if the handle to the native structur is stale. + * @throws SQLException if the handle to the native structure is stale. */ public boolean isAtStart() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isAtStart(m_pointer); - } - } - - /** - * Returns the value of the posOverflow attribute. - * @throws SQLException if the handle to the native structur is stale. - */ - public boolean isPosOverflow() - throws SQLException - { - synchronized(Backend.THREADLOCK) - { - return _isPosOverflow(m_pointer); - } - } - - /** - * Checks if the portal is still active. I can be closed either explicitly - * using the {@link #close()} mehtod or implicitly due to a pop of invocation - * context. - */ - public boolean isValid() - { - return m_pointer != 0; + return doInPG(() -> _isAtStart(m_state.getPortalPtr())); } /** * Performs an SPI_cursor_move. * @param forward Set to true for forward, false for backward. * @param count Maximum number of rows to fetch. - * @return The value of the global variable SPI_result. - * @throws SQLException if the handle to the native structur is stale. + * @return The actual number of rows moved. + * @throws SQLException if the handle to the native structure is stale. */ - public int move(boolean forward, int count) + public long move(boolean forward, long count) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _move(m_pointer, System.identityHashCode(Thread.currentThread()), forward, count); - } + long moved = + doInPG(() -> _move(m_state.getPortalPtr(), forward, count)); + if ( moved < 0 ) + throw new ArithmeticException( + "moved too many rows to report in a Java signed long"); + return moved; } private static native String _getName(long pointer) throws SQLException; - private static native int _getPortalPos(long pointer) + private static native long _getPortalPos(long pointer) throws SQLException; private static native TupleDesc _getTupleDesc(long pointer) throws SQLException; - private static native int _fetch(long pointer, long threadId, boolean forward, int count) + private static native long _fetch(long pointer, boolean forward, long count) throws SQLException; private static native void _close(long pointer); @@ -176,10 +212,7 @@ private static native boolean _isAtEnd(long pointer) private static native boolean _isAtStart(long pointer) throws SQLException; - - private static native boolean _isPosOverflow(long pointer) - throws SQLException; - private static native int _move(long pointer, long threadId, boolean forward, int count) + private static native long _move(long pointer, boolean forward, long count) throws SQLException; } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Privilege.java b/pljava/src/main/java/org/postgresql/pljava/internal/Privilege.java new file mode 100644 index 00000000..65fcaf23 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Privilege.java @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.Permission; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +/** + * Clean interface to the {@code doPrivileged...} methods on + * {@link AccessController AccessController}. + *

      + * This interface must remain non-exported + * from {@code org.postgresql.pljava.internal}. + *

      + * The reason, of course, is that the real methods on {@code AccessController} + * end up getting called from these wrappers, and will therefore apply the + * permissions granted to this module. As long as these methods are only + * accessible within this module, that isn't a problem. + *

      + * It would be great to develop this into an exportable API so user code + * could benefit, but that would be a much trickier undertaking, with editing of + * {@code AccessControlContext}s to snag the correct caller's context, and + * not for the faint of heart. + *

      + * Each method here comes in a flavor accepting a + * {@link Checked.Supplier Checked.Supplier}, matching any lambda that returns a + * reference type, and a flavor accepting a + * {@link Checked.Runnable Checked.Runnable} for {@code void} lambdas, because + * the compiler will not match those up with {@code Supplier}. + *

      + * Fuss no more with {@code PrivilegedExceptionAction} and catching + * {@code PrivilegedActionException}: just pass any of these methods a lambda. + * If the lambda throws a checked exception, so does the method. If the lambda + * throws some checked exceptions, the method throws their least common + * supertype, which is not as nice as throwing their union, and climbs all the + * way up to {@code Exception} if they are unrelated. But even so, you can now + * simply catch it, rather than catching a {@code PrivilegedActionException} and + * having to unwrap it first. + */ +public interface Privilege +{ + public static T doPrivileged( + Checked.Supplier op) + throws E + { + try + { + return (T)AccessController.doPrivileged( + (PrivilegedExceptionAction)op::get); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static T doPrivileged( + Checked.Supplier op, AccessControlContext acc) + throws E + { + try + { + return (T)AccessController.doPrivileged( + (PrivilegedExceptionAction)op::get, acc); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static T doPrivileged( + Checked.Supplier op, AccessControlContext acc, Permission... perms) + throws E + { + try + { + return (T)AccessController.doPrivileged( + (PrivilegedExceptionAction)op::get, acc, perms); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static T doPrivilegedWithCombiner( + Checked.Supplier op) + throws E + { + try + { + return (T)AccessController.doPrivilegedWithCombiner( + (PrivilegedExceptionAction)op::get); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static T doPrivilegedWithCombiner( + Checked.Supplier op, AccessControlContext acc, Permission... perms) + throws E + { + try + { + return (T)AccessController.doPrivilegedWithCombiner( + (PrivilegedExceptionAction)op::get, acc, perms); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static void doPrivileged( + Checked.Runnable op) + throws E + { + try + { + AccessController.doPrivileged((PrivilegedExceptionAction)() -> + { + op.run(); + return null; + }); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static void doPrivileged( + Checked.Runnable op, AccessControlContext acc) + throws E + { + try + { + AccessController.doPrivileged((PrivilegedExceptionAction)() -> + { + op.run(); + return null; + }, acc); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static void doPrivileged( + Checked.Runnable op, AccessControlContext acc, Permission... perms) + throws E + { + try + { + AccessController.doPrivileged((PrivilegedExceptionAction)() -> + { + op.run(); + return null; + }, acc, perms); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static void doPrivilegedWithCombiner( + Checked.Runnable op) + throws E + { + try + { + AccessController + .doPrivilegedWithCombiner((PrivilegedExceptionAction)() -> + { + op.run(); + return null; + }); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } + + public static void doPrivilegedWithCombiner( + Checked.Runnable op, AccessControlContext acc, Permission... perms) + throws E + { + try + { + AccessController + .doPrivilegedWithCombiner((PrivilegedExceptionAction)() -> + { + op.run(); + return null; + }, acc, perms); + } + catch ( PrivilegedActionException pae ) + { + throw Checked.ederThrow(pae.getException()); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java b/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java index 816fb396..f405ed92 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Relation.java @@ -1,11 +1,19 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.SQLException; /** @@ -14,13 +22,52 @@ * * @author Thomas Hallgren */ -public class Relation extends JavaWrapper +public class Relation { private TupleDesc m_tupleDesc; + private final State m_state; + + Relation(DualState.Key cookie, long resourceOwner, long pointer) + { + m_state = new State(cookie, this, resourceOwner, pointer); + } - Relation(long pointer) + private static class State + extends DualState.SingleGuardedLong { - super(pointer); + private State( + DualState.Key cookie, Relation r, long ro, long hth) + { + super(cookie, r, ro, hth); + } + + /** + * Return the Relation pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * Relation, or subclasses, or something with a strong reference to + * this Relation, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while an instance method's on the call + * stack, and the {@code Invocation} marking this state's native scope + * can't be popped before return of any method using the value. + */ + private long getRelationPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } } /** @@ -30,10 +77,7 @@ public class Relation extends JavaWrapper public String getName() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getName(this.getNativePointer()); - } + return doInPG(() -> _getName(m_state.getRelationPtr())); } /** @@ -43,10 +87,7 @@ public String getName() public String getSchema() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getSchema(this.getNativePointer()); - } + return doInPG(() -> _getSchema(m_state.getRelationPtr())); } /** @@ -58,18 +99,22 @@ public TupleDesc getTupleDesc() { if(m_tupleDesc == null) { - synchronized(Backend.THREADLOCK) - { - m_tupleDesc = _getTupleDesc(this.getNativePointer()); - } + m_tupleDesc = doInPG(() -> _getTupleDesc(m_state.getRelationPtr())); } return m_tupleDesc; } /** - * Creates a new Tuple by substituting new values for selected columns - * copying the columns of the original Tuple at other positions. The - * original Tuple is not modified.
      + * Creates a new {@code Tuple} by substituting new values for selected + * columns copying the columns of the original {@code Tuple} at other + * positions. The original {@code Tuple} is not modified. + *

      + * Note: starting with PostgreSQL 10, this method can fail if SPI is not + * connected; it is the caller's responsibility in PG 10 and up + * to ensure that SPI is connected and that a longer-lived memory + * context than SPI's has been selected, if the caller wants the result of + * this call to survive {@code SPI_finish}. + * * @param original The tuple that serves as the source. * @param fieldNumbers An array of one based indexes denoting the positions that * are to receive modified values. @@ -81,14 +126,11 @@ public TupleDesc getTupleDesc() public Tuple modifyTuple(Tuple original, int[] fieldNumbers, Object[] values) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _modifyTuple(this.getNativePointer(), original.getNativePointer(), fieldNumbers, values); - } + return doInPG(() -> + _modifyTuple(m_state.getRelationPtr(), + original.getNativePointer(), fieldNumbers, values)); } - protected native void _free(long pointer); - private static native String _getName(long pointer) throws SQLException; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ResultSetPicker.java b/pljava/src/main/java/org/postgresql/pljava/internal/ResultSetPicker.java index 36871d7a..f1d7ae3c 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ResultSetPicker.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ResultSetPicker.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root directory of this distribution or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB - Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; @@ -13,7 +19,22 @@ import org.postgresql.pljava.ResultSetProvider; import org.postgresql.pljava.jdbc.SingleRowWriter; -public class ResultSetPicker implements ResultSetProvider +/** + * An adapter class used internally when a set-returning user function returns + * a {@code ResultSetHandle}, presenting it as a {@link ResultSetProvider} + * instead. + *

      + * Note on the current implementation: + * this class operates by fetching every field of every row of the result set + * as a Java object via the one-argument {@code getObject}, then storing it into + * the writable result set supplied by PL/Java. Apart from being rather + * inefficient, this can involve conversions through legacy types (such as + * {@code java.sql.Timestamp} when the JSR 310 {@code java.time} conversions are + * better specified). In cases where that isn't acceptable, the user function + * should be declared to return {@code ResultSetProvider} and do this work + * itself. + */ +public class ResultSetPicker implements ResultSetProvider.Large { private final ResultSetHandle m_resultSetHandle; private final ResultSet m_resultSet; @@ -25,7 +46,7 @@ public ResultSetPicker(ResultSetHandle resultSetHandle) m_resultSet = resultSetHandle.getResultSet(); } - public boolean assignRowValues(ResultSet receiver, int currentRow) + public boolean assignRowValues(ResultSet receiver, long currentRow) throws SQLException { if(m_resultSet == null || !m_resultSet.next()) diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java b/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java index c33370b7..98ac1993 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SPI.java @@ -1,11 +1,19 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + /** * The SPI class provides access to some global * variables used by SPI. @@ -14,28 +22,40 @@ */ public class SPI { - public static final int ERROR_CONNECT = -1; - public static final int ERROR_COPY = -2; - public static final int ERROR_OPUNKNOWN = -3; - public static final int ERROR_UNCONNECTED = -4; - public static final int ERROR_CURSOR = -5; - public static final int ERROR_ARGUMENT = -6; - public static final int ERROR_PARAM = -7; - public static final int ERROR_TRANSACTION = -8; - public static final int ERROR_NOATTRIBUTE = -9; - public static final int ERROR_NOOUTFUNC = -10; - public static final int ERROR_TYPUNKNOWN = -11; + public static final int ERROR_CONNECT = -1; + public static final int ERROR_COPY = -2; + public static final int ERROR_OPUNKNOWN = -3; + public static final int ERROR_UNCONNECTED = -4; + public static final int ERROR_CURSOR = -5; + public static final int ERROR_ARGUMENT = -6; + public static final int ERROR_PARAM = -7; + public static final int ERROR_TRANSACTION = -8; + public static final int ERROR_NOATTRIBUTE = -9; + public static final int ERROR_NOOUTFUNC = -10; + public static final int ERROR_TYPUNKNOWN = -11; + public static final int ERROR_REL_DUPLICATE = -12; + public static final int ERROR_REL_NOT_FOUND = -13; - public static final int OK_CONNECT = 1; - public static final int OK_FINISH = 2; - public static final int OK_FETCH = 3; - public static final int OK_UTILITY = 4; - public static final int OK_SELECT = 5; - public static final int OK_SELINTO = 6; - public static final int OK_INSERT = 7; - public static final int OK_DELETE = 8; - public static final int OK_UPDATE = 9; - public static final int OK_CURSOR = 10; + public static final int OK_CONNECT = 1; + public static final int OK_FINISH = 2; + public static final int OK_FETCH = 3; + public static final int OK_UTILITY = 4; + public static final int OK_SELECT = 5; + public static final int OK_SELINTO = 6; + public static final int OK_INSERT = 7; + public static final int OK_DELETE = 8; + public static final int OK_UPDATE = 9; + public static final int OK_CURSOR = 10; + public static final int OK_INSERT_RETURNING = 11; + public static final int OK_DELETE_RETURNING = 12; + public static final int OK_UPDATE_RETURNING = 13; + public static final int OK_REWRITTEN = 14; + public static final int OK_REL_REGISTER = 15; + public static final int OK_REL_UNREGISTER = 16; + public static final int OK_TD_REGISTER = 17; + public static final int OK_MERGE = 18; + + public static final int OPT_NONATOMIC = 1 << 0; /** * Execute a command using the internal SPI_exec function. @@ -44,32 +64,29 @@ public class SPI * of rowCount of zero is interpreted as no limit, i.e., * run to completion. * @return One of the declared status codes. + * @deprecated This seems never to have been used in git history of project. */ - public static int exec(String command, int rowCount) + @Deprecated + private static int exec(String command, int rowCount) { - synchronized(Backend.THREADLOCK) - { - return _exec(System.identityHashCode(Thread.currentThread()), command, rowCount); - } + return doInPG(() -> _exec(command, rowCount)); } public static void freeTupTable() { - synchronized(Backend.THREADLOCK) - { - _freeTupTable(); - } + doInPG(SPI::_freeTupTable); } /** * Returns the value of the global variable SPI_processed. */ - public static int getProcessed() + public static long getProcessed() { - synchronized(Backend.THREADLOCK) - { - return _getProcessed(); - } + long count = doInPG(SPI::_getProcessed); + if ( count < 0 ) + throw new ArithmeticException( + "too many rows processed to count in a Java signed long"); + return count; } /** @@ -77,10 +94,7 @@ public static int getProcessed() */ public static int getResult() { - synchronized(Backend.THREADLOCK) - { - return _getResult(); - } + return doInPG(SPI::_getResult); } /** @@ -88,91 +102,60 @@ public static int getResult() */ public static TupleTable getTupTable(TupleDesc known) { - synchronized(Backend.THREADLOCK) - { - return _getTupTable(known); - } + return doInPG(() -> _getTupTable(known)); } /** - * Returns a textual representatio of a result code + * Returns a textual representation of a result code. + */ + /* + * XXX PG 11 introduces a real SPI_result_code_string function. + * The strings it returns are like these with SPI_ prepended. */ public static String getResultText(int resultCode) { - String s; switch(resultCode) { - case ERROR_CONNECT: - s = "ERROR_CONNECT"; - break; - case ERROR_COPY: - s = "ERROR_COPY"; - break; - case ERROR_OPUNKNOWN: - s = "ERROR_OPUNKNOWN"; - break; - case ERROR_UNCONNECTED: - s = "ERROR_UNCONNECTED"; - break; - case ERROR_CURSOR: - s = "ERROR_CURSOR"; - break; - case ERROR_ARGUMENT: - s = "ERROR_ARGUMENT"; - break; - case ERROR_PARAM: - s = "ERROR_PARAM"; - break; - case ERROR_TRANSACTION: - s = "ERROR_TRANSACTION"; - break; - case ERROR_NOATTRIBUTE: - s = "ERROR_NOATTRIBUTE"; - break; - case ERROR_NOOUTFUNC: - s = "ERROR_NOOUTFUNC"; - break; - case ERROR_TYPUNKNOWN: - s = "ERROR_TYPUNKNOWN"; - break; - case OK_CONNECT: - s = "OK_CONNECT"; - break; - case OK_FINISH: - s = "OK_FINISH"; - break; - case OK_FETCH: - s = "OK_FETCH"; - break; - case OK_UTILITY: - s = "OK_UTILITY"; - break; - case OK_SELECT: - s = "OK_SELECT"; - break; - case OK_SELINTO: - s = "OK_SELINTO"; - break; - case OK_INSERT: - s = "OK_INSERT"; - break; - case OK_DELETE: - s = "OK_DELETE"; - break; - case OK_UPDATE: - s = "OK_UPDATE"; - break; - case OK_CURSOR: - s = "OK_CURSOR"; - break; - default: - s = "Unkown result code: " + resultCode; + case ERROR_CONNECT: return "ERROR_CONNECT"; + case ERROR_COPY: return "ERROR_COPY"; + case ERROR_OPUNKNOWN: return "ERROR_OPUNKNOWN"; + case ERROR_UNCONNECTED: return "ERROR_UNCONNECTED"; + case ERROR_CURSOR: return "ERROR_CURSOR"; + case ERROR_ARGUMENT: return "ERROR_ARGUMENT"; + case ERROR_PARAM: return "ERROR_PARAM"; + case ERROR_TRANSACTION: return "ERROR_TRANSACTION"; + case ERROR_NOATTRIBUTE: return "ERROR_NOATTRIBUTE"; + case ERROR_NOOUTFUNC: return "ERROR_NOOUTFUNC"; + case ERROR_TYPUNKNOWN: return "ERROR_TYPUNKNOWN"; + case ERROR_REL_DUPLICATE: return "ERROR_REL_DUPLICATE"; + case ERROR_REL_NOT_FOUND: return "ERROR_REL_NOT_FOUND"; + + case OK_CONNECT: return "OK_CONNECT"; + case OK_FINISH: return "OK_FINISH"; + case OK_FETCH: return "OK_FETCH"; + case OK_UTILITY: return "OK_UTILITY"; + case OK_SELECT: return "OK_SELECT"; + case OK_SELINTO: return "OK_SELINTO"; + case OK_INSERT: return "OK_INSERT"; + case OK_DELETE: return "OK_DELETE"; + case OK_UPDATE: return "OK_UPDATE"; + case OK_CURSOR: return "OK_CURSOR"; + case OK_INSERT_RETURNING: return "OK_INSERT_RETURNING"; + case OK_DELETE_RETURNING: return "OK_DELETE_RETURNING"; + case OK_UPDATE_RETURNING: return "OK_UPDATE_RETURNING"; + case OK_REWRITTEN: return "OK_REWRITTEN"; + case OK_REL_REGISTER: return "OK_REL_REGISTER"; + case OK_REL_UNREGISTER: return "OK_REL_UNREGISTER"; + case OK_TD_REGISTER: return "OK_TD_REGISTER"; + + default: return "Unknown result code: " + resultCode; } - return s; } - private native static int _exec(long threadId, String command, int rowCount); - private native static int _getProcessed(); + @Deprecated + private native static int _exec(String command, int rowCount); + + private native static long _getProcessed(); private native static int _getResult(); private native static void _freeTupTable(); private native static TupleTable _getTupTable(TupleDesc known); diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SPIException.java b/pljava/src/main/java/org/postgresql/pljava/internal/SPIException.java index 2c6072c5..3a0d5220 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/SPIException.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SPIException.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; @@ -10,6 +16,7 @@ /** + * A Java exception constructed from a PostgreSQL SPI result code. * @author Thomas Hallgren */ public class SPIException extends SQLException diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java b/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java new file mode 100644 index 00000000..a9e5454a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SQL_ASCII.java @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2020-2021 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; + +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.CoderResult; +import java.nio.charset.spi.CharsetProvider; +import static java.nio.charset.StandardCharsets.US_ASCII; + +import static java.util.Collections.singletonList; +import java.util.Iterator; +import java.util.List; + +/** + * An {@code SQL_ASCII}, a/k/a {@code X-PGSQL_ASCII}, "character set". + *

      + * This is a principled Java take on the PostgreSQL definition of + * SQL_ASCII as an encoding where the seven-bit ASCII values are + * themselves and the remaining eight-bit values are who-knows-what. + * It isn't appropriate to just copy byte values with no conversion, + * as that would amount to saying we know the values correspond to + * LATIN-1, which would be lying. Java strings are by definition Unicode, + * so it's not ok to go stuffing code points in that do not mean what + * Unicode defines those code points to mean. + *

      + * What this decoder does is decode the seven-bit ASCII values as + * themselves, and decode each eight-bit value into a pair of Unicode + * noncharacters, one from the range u+fdd8 to u+fddf, followed by one + * from u+fde0 to u+fdef, where the first one's four low bits are the + * four high bits of the original value, and the second has the low four. + * The encoder transparently reverses that. + *

      + * Those noncharacter code points are permanently defined in Unicode + * to have no glyphs, no correspondence to specific characters, and + * no interesting properties. Implementing this charset allows PL/Java + * code to work usefully in a database with SQL_ASCII encoding, when the + * expectation is that whatever the code needs to recognize, act on, or + * edit will be in ASCII, and any non-ASCII content can be passed along + * uninterpreted and unchanged. + */ +class SQL_ASCII extends Charset +{ + static class Holder + { + static final List s_list = + singletonList((Charset)new SQL_ASCII()); + } + + + public static class Provider extends CharsetProvider + { + static final String s_canonName = "X-PGSQL_ASCII"; + static final String[] s_aliases = { "SQL_ASCII" }; + + @Override + public Charset charsetForName(String charsetName) + { + if ( s_canonName.equalsIgnoreCase(charsetName) ) + return Holder.s_list.get(0); + for ( String s : s_aliases ) + if ( s.equalsIgnoreCase(charsetName) ) + return Holder.s_list.get(0); + return null; + } + + @Override + public Iterator charsets() + { + return Holder.s_list.iterator(); + } + } + + + private SQL_ASCII() + { + super(Provider.s_canonName, Provider.s_aliases); + } + + @Override + public boolean contains(Charset cs) + { + return this.equals(cs) || US_ASCII.equals(cs); + } + + @Override + public CharsetDecoder newDecoder() + { + return new Decoder(); + } + + @Override + public CharsetEncoder newEncoder() + { + return new Encoder(); + } + + + static class Decoder extends CharsetDecoder + { + Decoder() + { + super(Holder.s_list.get(0), 1.002f, 2.0f); + } + + @Override + protected CoderResult decodeLoop(ByteBuffer in, CharBuffer out) + { + int ipos = in.position(); + int opos = out.position(); + int ilim = in.limit(); + int olim = out.limit(); + + for ( ; ipos < ilim ; ++ ipos ) + { + char b = (char)(0xff & in.get(ipos)); + + if ( b < 128 ) + { + if ( opos == olim ) + { + in.position(ipos); + out.position(opos); + return CoderResult.OVERFLOW; + } + out.put(opos++, b); + } + else + { + if ( opos + 1 >= olim ) + { + in.position(ipos); + out.position(opos); + return CoderResult.OVERFLOW; + } + out.put(opos++, (char)(0xFDD0 | (b >> 4))); + out.put(opos++, (char)(0xFDE0 | (b & 0xf))); + } + } + in.position(ipos); + out.position(opos); + return CoderResult.UNDERFLOW; + } + } + + static class Encoder extends CharsetEncoder + { + Encoder() + { + super(Holder.s_list.get(0), 0.998f, 1.0f); + } + + @Override + protected CoderResult encodeLoop(CharBuffer in, ByteBuffer out) + { + int ipos = in.position(); + int opos = out.position(); + int ilim = in.limit(); + int olim = out.limit(); + + for ( ; ipos < ilim ; ++ ipos ) + { + if ( opos == olim ) + { + in.position(ipos); + out.position(opos); + return CoderResult.OVERFLOW; + } + + char c = in.get(ipos); + + if ( '\uFDD8' <= c && c < '\uFDE0' ) + { + if ( ipos + 1 == ilim ) + break; + + char d = in.get(ipos + 1); + + if ( '\uFDE0' > d || d > '\uFDEF' ) + { + in.position(ipos); + out.position(opos); + return CoderResult.malformedForLength(2); + } + c = (char)(( (c & 0xf) << 4 ) | (d & 0xf)); + ++ ipos; + } + else if ( c >= 128 ) + { + in.position(ipos); + out.position(opos); + return CoderResult.unmappableForLength(1); + } + out.put(opos++, (byte)c); + } + in.position(ipos); + out.position(opos); + return CoderResult.UNDERFLOW; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java b/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java index 24f703bc..5c81053a 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/ServerException.java @@ -1,14 +1,27 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; import java.sql.SQLException; +import static java.util.Arrays.copyOfRange; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; + +import static org.postgresql.pljava.jdbc.Invocation.s_unhandled; + /** + * A Java exception constructed over a PostgreSQL error report. * @author Thomas Hallgren */ public class ServerException extends SQLException @@ -17,7 +30,24 @@ public class ServerException extends SQLException private transient final ErrorData m_errorData; - public ServerException(ErrorData errorData) + private static ServerException obtain(ErrorData errorData) + { + assert threadMayEnterPG() : "ServerException obtain() thread"; + + ServerException e = new ServerException(errorData); + + StackTraceElement[] es = e.getStackTrace(); + if ( null != es && 0 < es.length ) + e.setStackTrace(copyOfRange(es, 1, es.length)); + + if ( null == s_unhandled ) + s_unhandled = e; + else + s_unhandled.addSuppressed(e); + return e; + } + + private ServerException(ErrorData errorData) { super(errorData.getMessage(), errorData.getSqlState()); m_errorData = errorData; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Session.java b/pljava/src/main/java/org/postgresql/pljava/internal/Session.java index 0f55cdad..283951fa 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Session.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Session.java @@ -1,31 +1,112 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import java.nio.charset.Charset; + import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.Savepoint; import java.sql.Statement; import java.util.HashMap; +import static java.util.Objects.requireNonNull; +import java.util.Properties; import org.postgresql.pljava.ObjectPool; +import org.postgresql.pljava.PooledObject; import org.postgresql.pljava.SavepointListener; import org.postgresql.pljava.TransactionListener; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + import org.postgresql.pljava.jdbc.SQLUtils; +import org.postgresql.pljava.elog.ELogHandler; + +import static org.postgresql.pljava.internal.Backend.doInPG; /** * An instance of this interface reflects the current session. The attribute - * store is transactional. + * store is deprecated. It had interesting transactional behavior until + * PL/Java 1.2.0, but since then it has behaved as any (non-null-allowing) Map. + * Anyone needing any sort of attribute store with transactional behavior will + * need to implement one and use a {@link TransactionListener} to keep it + * sync'd. * * @author Thomas Hallgren */ public class Session implements org.postgresql.pljava.Session { + public static Session provider() + { + return Holder.INSTANCE; + } + + private final Properties m_properties; + + private Session() + { + /* + * This strategy assumes that no user code will request a Session + * instance until after InstallHelper has poked the frozen properties + * into s_properties. + */ + m_properties = requireNonNull(s_properties); + } + + private static class Holder + { + static final Session INSTANCE = new Session(); + } + + /** + * An unmodifiable defensive copy of the Java system properties that will be + * put here by InstallHelper via package access at startup. + */ + static Properties s_properties; + + /** + * The Java charset corresponding to the server encoding, or null if none + * such was found. Put here by InstallHelper via package access at startup. + */ + static Charset s_serverCharset; + + @Override + public Properties frozenSystemProperties() + { + return m_properties; + } + + /** + * A static method (not part of the API-exposed Session interface) by which + * pljava implementation classes can get hold of the server charset without + * the indirection of getting a Session instance. If there turns out to be + * demand for client code to obtain it through the API, an interface method + * {@code serverCharset} can easily be added later. + * @return The Java Charset corresponding to the server's encoding, or null + * if no matching Java charset was found. That can happen if a corresponding + * Java charset really does exist but is not successfully found using the + * name reported by PostgreSQL. That can be worked around by giving the + * right name explicitly as the system property + * {@code org.postgresql.server.encoding} in {@code pljava.vmoptions} for + * the affected database (or cluster-wide, if the same encoding is used). + */ + public static Charset implServerCharset() + { + return s_serverCharset; + } + + @SuppressWarnings("removal") private final TransactionalMap m_attributes = new TransactionalMap(new HashMap()); /** @@ -46,12 +127,24 @@ public void addSavepointListener(SavepointListener listener) SubXactListener.addListener(listener); } + /** + * Get an attribute from the session's attribute store. + * @deprecated {@code Session}'s attribute store once had a special, and + * possibly useful, transactional behavior, but since PL/Java 1.2.0 it has + * lacked that, and offers nothing you don't get with an ordinary + * {@code Map} (that forbids nulls). If some kind of store with + * transactional behavior is needed, it should be implemented in straight + * Java and kept in sync by using a {@link TransactionListener}. + */ + @Override + @SuppressWarnings("removal") + @Deprecated(since="1.5.3", forRemoval=true) public Object getAttribute(String attributeName) { return m_attributes.get(attributeName); } - public ObjectPool getObjectPool(Class cls) + public ObjectPool getObjectPool(Class cls) { return ObjectPoolImpl.getObjectPool(cls); } @@ -69,17 +162,43 @@ public String getOuterUserName() } @Override + @SuppressWarnings("removal") + @Deprecated(since="1.5.0", forRemoval=true) public String getSessionUserName() { return getOuterUserName(); } + /** + * Remove an attribute from the session's attribute store. + * @deprecated {@code Session}'s attribute store once had a special, and + * possibly useful, transactional behavior, but since PL/Java 1.2.0 it has + * lacked that, and offers nothing you don't get with an ordinary + * {@code Map} (that forbids nulls). If some kind of store with + * transactional behavior is needed, it should be implemented in straight + * Java and kept in sync by using a {@link TransactionListener}. + */ + @Override + @SuppressWarnings("removal") + @Deprecated(since="1.5.3", forRemoval=true) public void removeAttribute(String attributeName) { m_attributes.remove(attributeName); } + /** + * Set an attribute in the session's attribute store. + * @deprecated {@code Session}'s attribute store once had a special, and + * possibly useful, transactional behavior, but since PL/Java 1.2.0 it has + * lacked that, and offers nothing you don't get with an ordinary + * {@code Map} (that forbids nulls). If some kind of store with + * transactional behavior is needed, it should be implemented in straight + * Java and kept in sync by using a {@link TransactionListener}. + */ + @Override + @SuppressWarnings("removal") + @Deprecated(since="1.5.3", forRemoval=true) public void setAttribute(String attributeName, Object value) { m_attributes.put(attributeName, value); @@ -104,6 +223,8 @@ public void removeSavepointListener(SavepointListener listener) } @Override + @SuppressWarnings("removal") + @Deprecated(since="1.5.0", forRemoval=true) public void executeAsSessionUser(Connection conn, String statement) throws SQLException { @@ -115,22 +236,32 @@ public void executeAsOuterUser(Connection conn, String statement) throws SQLException { Statement stmt = conn.createStatement(); - synchronized(Backend.THREADLOCK) + doInPG(() -> { ResultSet rs = null; AclId outerUser = AclId.getOuterUser(); AclId effectiveUser = AclId.getUser(); + Savepoint sp = null; boolean wasLocalChange = false; boolean changeSucceeded = false; try { wasLocalChange = _setUser(outerUser, true); changeSucceeded = true; + sp = conn.setSavepoint(); if(stmt.execute(statement)) { rs = stmt.getResultSet(); rs.next(); } + conn.releaseSavepoint(sp); + sp = null; + } + catch ( SQLException sqle ) + { + if ( null != sp ) + conn.rollback(sp); + throw sqle; } finally { @@ -139,7 +270,7 @@ public void executeAsOuterUser(Connection conn, String statement) if ( changeSucceeded ) _setUser(effectiveUser, wasLocalChange); } - } + }); } /** @@ -147,24 +278,24 @@ public void executeAsOuterUser(Connection conn, String statement) * Currently used only in Commands.java. Not made visible API yet * because there has to be a more general way to do this. */ - public String getOuterUserSchema() + public Identifier.Simple getOuterUserSchema() throws SQLException { Statement stmt = SQLUtils.getDefaultConnection().createStatement(); - synchronized(Backend.THREADLOCK) + return doInPG(() -> { ResultSet rs = null; - AclId sessionUser = AclId.getSessionUser(); + AclId outerUser = AclId.getOuterUser(); AclId effectiveUser = AclId.getUser(); boolean wasLocalChange = false; boolean changeSucceeded = false; try { - wasLocalChange = _setUser(sessionUser, true); + wasLocalChange = _setUser(outerUser, true); changeSucceeded = true; rs = stmt.executeQuery("SELECT current_schema()"); if ( rs.next() ) - return rs.getString(1); + return Identifier.Simple.fromCatalog(rs.getString(1)); throw new SQLException("Unable to obtain current schema"); } finally @@ -174,21 +305,16 @@ public String getOuterUserSchema() if ( changeSucceeded ) _setUser(effectiveUser, wasLocalChange); } - } + }); } /** * Called from native code when the JVM is instantiated. */ - static long init() + static void init() throws SQLException { ELogHandler.init(); - - // Should be replace with a Thread.getId() once we abandon - // Java 1.4 - // - return System.identityHashCode(Thread.currentThread()); } private static native boolean _setUser(AclId userId, boolean isLocalChange); diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SubXactListener.java b/pljava/src/main/java/org/postgresql/pljava/internal/SubXactListener.java index 88673c5b..0d72231f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/SubXactListener.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SubXactListener.java @@ -1,69 +1,120 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; +import org.postgresql.pljava.SavepointListener; +import org.postgresql.pljava.Session; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.EntryPoints.Invocable; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; + +import static java.security.AccessController.getContext; + +import java.sql.Savepoint; import java.sql.SQLException; -import java.util.HashMap; -import org.postgresql.pljava.SavepointListener; +import java.util.ArrayDeque; +import java.util.Deque; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; /** - * Class that enables registrations using the PostgreSQL RegisterSubXactCallback - * function. + * Class that enables registrations using the PostgreSQL + * {@code RegisterSubXactCallback} function. * * @author Thomas Hallgren */ class SubXactListener { - private static final HashMap s_listeners = new HashMap(); - - static void onAbort(long listenerId, int spId, int parentSpId) throws SQLException + @FunctionalInterface + private interface Target { - SavepointListener listener = (SavepointListener)s_listeners.get(new Long(listenerId)); - if(listener != null) - listener.onAbort(Backend.getSession(), PgSavepoint.forId(spId), PgSavepoint.forId(parentSpId)); + void accept(SavepointListener l, Session s, Savepoint sp, Savepoint p) + throws SQLException; } - static void onCommit(long listenerId, int spId, int parentSpId) throws SQLException + /* + * These must match the values of the PostgreSQL enum; StaticAssertStmt + * is used in the C source to produce errors (from compilers with the + * feature) if they do not. + */ + private static final int START_SUB = 0; + private static final int COMMIT_SUB = 1; + private static final int ABORT_SUB = 2; + private static final int PRE_COMMIT_SUB = 3; + + private static final Target[] s_refs = { - SavepointListener listener = (SavepointListener)s_listeners.get(new Long(listenerId)); - if(listener != null) - listener.onCommit(Backend.getSession(), PgSavepoint.forId(spId), PgSavepoint.forId(parentSpId)); - } + SavepointListener::onStart, + SavepointListener::onCommit, + SavepointListener::onAbort, + SavepointListener::onPreCommit + }; + + /* + * A non-thread-safe Deque; will be made safe by doing all mutations on the + * PG thread (even though actually calling into PG is necessary only when + * the size changes from 0 to 1 or 1 to 0). + */ + private static final Deque> s_listeners = + new ArrayDeque<>(); - static void onStart(long listenerId, long spPointer, int parentSpId) throws SQLException + private static void invokeListeners( + int eventIndex, PgSavepoint sp, PgSavepoint parent) + throws SQLException { - SavepointListener listener = (SavepointListener)s_listeners.get(new Long(listenerId)); - if(listener != null) - listener.onStart(Backend.getSession(), new PgSavepoint(spPointer), PgSavepoint.forId(parentSpId)); + Target target = s_refs[eventIndex]; + Session session = org.postgresql.pljava.internal.Session.provider(); + + // Take a snapshot. Handlers might unregister during event processing + for ( Invocable listener : + s_listeners.stream().collect(toList()) ) + { + doPrivileged(() -> + { + target.accept(listener.payload, session, sp, parent); + }, listener.acc); + } } static void addListener(SavepointListener listener) { - synchronized(Backend.THREADLOCK) + Invocable invocable = + new Invocable<>(requireNonNull(listener), getContext()); + + doInPG(() -> { - long key = System.identityHashCode(listener); - if(s_listeners.put(new Long(key), listener) != listener) - _register(key); - } + s_listeners.removeIf(v -> v.payload.equals(listener)); + s_listeners.push(invocable); + if( 1 == s_listeners.size() ) + _register(); + }); } static void removeListener(SavepointListener listener) { - synchronized(Backend.THREADLOCK) + doInPG(() -> { - long key = System.identityHashCode(listener); - if(s_listeners.remove(new Long(key)) == listener) - _unregister(key); - } + if ( ! s_listeners.removeIf(v -> v.payload.equals(listener)) ) + return; + if ( 0 == s_listeners.size() ) + _unregister(); + }); } - private static native void _register(long listenerId); + private static native void _register(); - private static native void _unregister(long listenerId); + private static native void _unregister(); } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java b/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java new file mode 100644 index 00000000..ca590c63 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/SyntheticXMLReader.java @@ -0,0 +1,899 @@ +/* + * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.IOException; +import java.io.Reader; + +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.US_ASCII; + +import java.sql.SQLException; + +import static java.util.Collections.unmodifiableSet; +import java.util.EnumSet; +import java.util.Set; + +import org.w3c.dom.Node; + +import org.xml.sax.Attributes; +import org.xml.sax.ContentHandler; +import org.xml.sax.DTDHandler; +import org.xml.sax.EntityResolver; +import org.xml.sax.ErrorHandler; +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; +import org.xml.sax.SAXException; +import org.xml.sax.SAXNotRecognizedException; +import org.xml.sax.SAXNotSupportedException; + +import org.xml.sax.ext.Attributes2; +import org.xml.sax.ext.Attributes2Impl; +import org.xml.sax.ext.DeclHandler; +import org.xml.sax.ext.DefaultHandler2; +import org.xml.sax.ext.EntityResolver2; +import org.xml.sax.ext.LexicalHandler; + +/** + * Base class implementing the tedious parts of a SAX {@code XMLReader} whose + * XML content is synthesized on the fly. + *

      + * An implementing class provides {@link #parse(InputSource)} (which might do as + * little as ignoring its argument and calling {@code super.parse()}), and the + * workhorse {@link #next()}, which should return an {@link EventCarrier} on + * every call, then {@code null} when no parse events remain. An + * {@code EventCarrier} is a closure that can disgorge one or more SAX events + * onto SAX handlers (provided by this framework) when its + * {@link EventCarrier#toSAX() toSAX()} method is called. Start- and + * end-document events are synthesized here, so only the events in between + * should be generated by {@code EventCarrier}s. + *

      + * An implementing class could return a single {@code EventCarrier} that will + * provide all of the XML content, or a sequence of {@code EventCarrier}s each + * supplying one event or more, at the discretion of the implementor; if the + * content might be large or complex to generate, breaking it into multiple + * {@code EventCarrier}s can provide a StAX-like ability to pull it in smaller + * pieces. + *

      + * This odd hybrid based on SAX is used, rather than simply basing a synthetic + * XML source directly on StAX, because of the numerous bugs in the Java + * runtime's implementation of StAX-to-TRAX bridging. Those are not so much in + * StAX itself (a tidy API), nor in the TRAX transformer implementations, but in + * the JRE classes added to bridge the two when StAX was added. The worst are in + * the handling of XML 'content' fragments, which are explicitly permitted by + * SQL/XML. A look at the bridge classes' code does not show a complete lack of + * attention to that case, but the code is broken and, after so many years, + * will probably not have its behavior changed. Because the + * {@code java.sql.SQLXML} API is expressly designed to allow easily obtaining a + * default {@code Source} to pass to a TRAX transformation, and to handle + * SQL/XML content that can be fragmentary, it follows that the default flavor + * of {@code Source} to return (and to implement synthetically here) must not be + * StAX. SAX is well supported and plays well with TRAX, even for content + * fragments. + */ +public abstract class SyntheticXMLReader implements XMLReader +{ + /** + * A final, pre-allocated, read-only {@code Attributes2} that is empty, for + * convenience in {@code startElement} calls for elements with no + * attributes. + */ + public static final Attributes2 NO_ATTRIBUTES = new EmptyAttributes2(); + + /** + * A per-instance, pre-allocated {@code FluentAttributes2} that can be + * re-used (but not across threads) for convenience in {@code startElement} + * calls for elements with attributes. + */ + public final FluentAttributes2 m_attributes = new FluentAttributes2(); + + public enum SAX2FEATURE + { + EXTERNAL_GENERAL_ENTITIES("external-general-entities", null), + EXTERNAL_PARAMETER_ENTITIES("external-parameter-entities", null), + IS_STANDALONE("is-standalone"), + LEXICAL_HANDLER_PARAMETER_ENTITIES("lexical-handler/parameter-entities", + null), + NAMESPACES("namespaces", true), + NAMESPACE_PREFIXES("namespace-prefixes", true), + RESOLVE_DTD_URIS("resolve-dtd-uris", true), + STRING_INTERNING("string-interning", true), + UNICODE_NORMALIZATION_CHECKING("unicode-normalization-checking", true), + USE_ATTRIBUTES2("use-attributes2", false), + USE_LOCATOR2("use-locator2", false), + USE_ENTITY_RESOLVER2("use-entity-resolver2", true), + VALIDATION("validation", true), + XMLNS_URIS("xmlns-uris", true), + XML_1_1("xml-1.1", false); + + private static final String PREFIX = "http://xml.org/sax/features/"; + + static final Set STANDARD_DEFAULTS; + static final Set STANDARD_INITIALIZED; + + public final String featureId; + public final boolean writable; + public final Boolean standardDefault; // null if unspecified in standard + + public String featureUri() + { + return PREFIX + featureId; + } + + /** + * @return null if not recognized + */ + public static SAX2FEATURE fromUri(String uri) + { + if ( null != uri && uri.startsWith(PREFIX) ) + { + String s = uri.substring(PREFIX.length()); + for ( SAX2FEATURE f : values() ) + if ( f.featureId.equals(s) ) + return f; + } + return null; + } + + SAX2FEATURE(String id) + { + featureId = id; + writable = false; + standardDefault = null; + } + + SAX2FEATURE(String id, Boolean standardDefault) + { + featureId = id; + writable = true; + this.standardDefault = standardDefault; + } + + static + { + EnumSet dflts = EnumSet.noneOf(SAX2FEATURE.class); + EnumSet inits = dflts.clone(); + + for ( SAX2FEATURE f : values() ) + { + if ( null == f.standardDefault ) + continue; + if ( f.standardDefault.booleanValue() ) + dflts.add(f); + inits.add(f); + } + + STANDARD_DEFAULTS = unmodifiableSet(dflts); + STANDARD_INITIALIZED = unmodifiableSet(inits); + } + } + + public enum ApacheFeature + { + DISALLOW_DOCTYPE_DECL("disallow-doctype-decl", false), + XINCLUDE("xinclude", false), + LOAD_EXTERNAL_DTD("nonvalidating/load-external-dtd", true); + + private static final String PREFIX = "http://apache.org/xml/features/"; + + static final Set STANDARD_DEFAULTS; + + public final String featureId; + public final Boolean standardDefault; + + public String featureUri() + { + return PREFIX + featureId; + } + + /** + * @return null if not recognized + */ + public static ApacheFeature fromUri(String uri) + { + if ( null != uri && uri.startsWith(PREFIX) ) + { + String s = uri.substring(PREFIX.length()); + for ( ApacheFeature f : values() ) + if ( f.featureId.equals(s) ) + return f; + } + return null; + } + + ApacheFeature(String id, Boolean standardDefault) + { + featureId = id; + this.standardDefault = standardDefault; + } + + static + { + EnumSet dflts = EnumSet.noneOf(ApacheFeature.class); + + for ( ApacheFeature f : values() ) + { + if ( f.standardDefault.booleanValue() ) + dflts.add(f); + } + + STANDARD_DEFAULTS = unmodifiableSet(dflts); + } + } + + public enum SAX2PROPERTY + { + DECLARATION_HANDLER("declaration-handler", DeclHandler.class), + DOCUMENT_XML_VERSION("document-xml-version", String.class, false), + DOM_NODE("dom-node", Node.class), + LEXICAL_HANDLER("lexical-handler", LexicalHandler.class), + XML_STRING("xml-string", String.class, false); + + private static final String PREFIX = "http://xml.org/sax/properties/"; + + public final String propertyId; + public final boolean writable; + public final Class requiredClass; + + public String propertyUri() + { + return PREFIX + propertyId; + } + + public static SAX2PROPERTY fromUri(String uri) + throws SAXNotRecognizedException + { + if ( null != uri && uri.startsWith(PREFIX) ) + { + String s = uri.substring(PREFIX.length()); + for ( SAX2PROPERTY p : values() ) + if ( p.propertyId.equals(s) ) + return p; + } + throw new SAXNotRecognizedException(uri); + } + + public boolean valueOk(Object v) + { + return null == v || requiredClass.isInstance(v); + } + + SAX2PROPERTY(String id, Class reqClass) + { + propertyId = id; + requiredClass = reqClass; + this.writable = true; + } + + SAX2PROPERTY(String id, Class reqClass, boolean writable) + { + propertyId = id; + requiredClass = reqClass; + this.writable = writable; + } + } + + private Set m_featuresSet = + EnumSet.copyOf(SAX2FEATURE.STANDARD_DEFAULTS); + + private Set m_featuresKnown = + EnumSet.copyOf(SAX2FEATURE.STANDARD_INITIALIZED); + + private Set m_apacheFeaturesSet = + EnumSet.copyOf(ApacheFeature.STANDARD_DEFAULTS); + + private final Object[] m_propertyValue = + new Object[SAX2PROPERTY.values().length]; + + private final DefaultHandler2 m_dummy = new DefaultHandler2(); + + private ContentHandler m_contentHandler; + private DTDHandler m_dtdHandler; + private EntityResolver m_entityResolver; + private ErrorHandler m_errorHandler; + + /* + * The following (with names ending in _) are versions of the above meant + * for quick reference: whenever they are set, if the value is null, a dummy + * that supports the methods with no-ops will be substituted; if an + * EntityResolver (not an EntityResolver2) is supplied, it will be wrapped + * to implement the missing methods with no-ops. + */ + private ContentHandler m_contentHandler_; + private DTDHandler m_dtdHandler_; + private EntityResolver2 m_entityResolver_; + private ErrorHandler m_errorHandler_; + private DeclHandler m_declHandler_; + private LexicalHandler m_lexicalHandler_; + + @Override + public boolean getFeature(String uri) + throws SAXNotRecognizedException, SAXNotSupportedException + { + SAX2FEATURE f = SAX2FEATURE.fromUri(uri); + if ( m_featuresKnown.contains(f) ) + return m_featuresSet.contains(f); + throw new SAXNotSupportedException(uri); + } + + @Override + public void setFeature(String uri, boolean value) + throws SAXNotRecognizedException, SAXNotSupportedException + { + SAX2FEATURE f = SAX2FEATURE.fromUri(uri); + if ( null != f ) + { + if ( ! f.writable ) + throw new SAXNotSupportedException(uri); + m_featuresKnown.add(f); + if ( value ) + m_featuresSet.add(f); + else + m_featuresSet.remove(f); + return; + } + ApacheFeature af = ApacheFeature.fromUri(uri); + if ( null != af ) + { + if ( value ) + m_apacheFeaturesSet.add(af); + else + m_apacheFeaturesSet.remove(af); + return; + } + throw new SAXNotRecognizedException(uri); + } + + @Override + public Object getProperty(String uri) + throws SAXNotRecognizedException, SAXNotSupportedException + { + SAX2PROPERTY p = SAX2PROPERTY.fromUri(uri); + return m_propertyValue[p.ordinal()]; + // XXX make some provision for unsupported settings + } + + @Override + public void setProperty(String uri, Object value) + throws SAXNotRecognizedException, SAXNotSupportedException + { + SAX2PROPERTY p = SAX2PROPERTY.fromUri(uri); + if ( ! p.writable || ! p.valueOk(value) ) + throw new SAXNotSupportedException(uri); + m_propertyValue[p.ordinal()] = value; + switch ( p ) + { + case DECLARATION_HANDLER: + m_declHandler_ = null == value ? m_dummy : (DeclHandler)value; + break; + case LEXICAL_HANDLER: + m_lexicalHandler_ = null == value ? m_dummy : (LexicalHandler)value; + break; + default: + } + } + + @Override + public void setEntityResolver(EntityResolver resolver) + { + m_entityResolver = resolver; + // XXX this should also be sensitive to USE_ENTITY_RESOLVER2 + if ( null == resolver ) + m_entityResolver_ = m_dummy; + else if ( resolver instanceof EntityResolver2 ) + m_entityResolver_ = (EntityResolver2)resolver; + else + m_entityResolver_ = new EntityResolverWrapper(resolver); + } + + @Override + public EntityResolver getEntityResolver() + { + return m_entityResolver; + } + + @Override + public void setDTDHandler(DTDHandler handler) + { + m_dtdHandler = handler; + m_dtdHandler_ = null != handler ? handler : m_dummy; + } + + @Override + public DTDHandler getDTDHandler() + { + return m_dtdHandler; + } + + @Override + public void setContentHandler(ContentHandler handler) + { + m_contentHandler = handler; + m_contentHandler_ = null != handler ? handler : m_dummy; + } + + @Override + public ContentHandler getContentHandler() + { + return m_contentHandler; + } + + @Override + public void setErrorHandler(ErrorHandler handler) + { + m_errorHandler = handler; + m_errorHandler_ = null != handler ? handler : m_dummy; + } + + @Override + public ErrorHandler getErrorHandler() + { + return m_errorHandler; + } + + /** + * The workhorse method for an implementing class to supply. + *

      + * It should return {@code null} when no more events are available, and + * until then, on each call should return an {@link EventCarrier} subclass + * whose {@link EventCarrier#toSAX() toSAX()} method will disgorge one or + * more SAX events. + * @return An EventCarrier, or null when no more events are to be returned. + */ + protected abstract EventCarrier next(); + + /** + * The only {@code parse} variant that the implementing class needs to + * supply. + *

      + * An implementation could do as little as ignoring its {@code InputSource} + * argument and calling the zero-argument {@code super.parse()}. + */ + @Override + public abstract void parse(InputSource input) + throws IOException, SAXException; + + /** + * If not overridden, calls {@link #parse(InputSource)} with the system-id + * wrapped in an {@code InputSource}. + */ + @Override + public void parse (String systemId) throws IOException, SAXException + { + parse(new InputSource(systemId)); + } + + /** + * Where the work happens. + *

      + * Synthesizes a {@code startDocument}, then loops calling {@code next()} + * until it returns null, calling {@code toSAX} on every returned + * {@code EventCarrier}, and finally synthesizes an {@code endDocument}. + */ + protected final void parse() throws IOException, SAXException + { + m_contentHandler_.startDocument(); + + EventCarrier c; + + try + { + while ( null != ( c = next() ) ) + c.toSAX(); + } + catch ( SQLException e ) + { + throw new IOException(e.getMessage(), e); + } + + m_contentHandler_.endDocument(); + } + + /** + * Produce an {@code EventCarrier} that wraps a checked exception and will + * rethrow it when used, which can be returned by the {@code next()} method, + * which is not declared to throw any checked exceptions itself. + *

      + * To simplify callers, the exception parameter is allowed to be a + * {@code RuntimeException}, in which case it will simply be rethrown here + * rather than wrapped. + * @param e An Exception, which may be a RuntimeException or checked. + * @return An EventCarrier wrapping the exception, if it is checked. + */ + protected EventCarrier exceptionCarrier(final Exception e) + { + if ( e instanceof RuntimeException ) + throw (RuntimeException)e; + return new ExceptionCarrier(e); + } + + /** + * Obtain a {@code Reader} given a system-id and a character set. + *

      + * If not overridden, this method delegates to + * {@link #sysIdToInputStream} and wraps the result in a {@code Reader} for + * the specified character set. + */ + protected Reader sysIdToReader(URI sysId, Charset cs) + throws IOException, SAXException + { + InputStream is = sysIdToInputStream(sysId); + if ( null == is ) + return null; + return new InputStreamReader(is, cs.newDecoder()); + } + + /** + * Obtain an {@code InputStream} given a system-id. + *

      + * If not overridden, this method tries {@code toURL().openStream()} on the + * supplied system-id, wrapping exceptions as needed to throw only those + * appropriate in a SAX method. + */ + protected InputStream sysIdToInputStream(URI sysId) + throws IOException, SAXException + { + try { + return sysId.toURL().openStream(); + } + catch ( MalformedURLException mue ) { + throw (SAXNotSupportedException) + new SAXNotSupportedException(sysId.toString()).initCause(mue); + } + } + + /** + * Obtain a {@code Reader} given an {@code InputSource}. + *

      + * If not overridden, this method returns the {@code Reader} directly + * contained in the source if there is one, or one that wraps the source's + * byte stream and character encoding if available, or the result of + * {@link #sysIdToReader sysIdToReader} on the source's system-id + * if available. + */ + protected Reader sourceToReader(InputSource input) + throws IOException, SAXException + { + Reader r = input.getCharacterStream(); + if ( null != r ) + return r; + + String encoding = input.getEncoding(); + Charset cs = (null != encoding) ? Charset.forName(encoding) : US_ASCII; + + InputStream is = input.getByteStream(); + if ( null != is ) + return new InputStreamReader(is, cs.newDecoder()); + + String sysId = input.getSystemId(); + if ( null == sysId ) + throw new SAXNotSupportedException(input.toString()); + + URI uri; + try { + uri = new URI(sysId); + } + catch ( URISyntaxException use ) { + throw (SAXNotSupportedException) + new SAXNotSupportedException(input.toString()).initCause(use); + } + + if ( ! uri.isAbsolute() ) + throw new IllegalArgumentException(uri.toString()); + + r = sysIdToReader(uri, cs); + if ( null != r ) + return r; + + throw new SAXNotSupportedException(input.toString()); + } + + /** + * Wrapper for an {@code EntityResolver} allowing it to be used as an + * {@code EntityResolver2}. + */ + static class EntityResolverWrapper extends DefaultHandler2 + { + private final EntityResolver m_entityResolver; + + EntityResolverWrapper(EntityResolver er) + { + m_entityResolver = er; + } + + @Override + public InputSource resolveEntity( + String name, String publicId, + String baseURI, String systemId) + throws SAXException, IOException + { + return resolveEntity(publicId, systemId); + } + + @Override + public InputSource resolveEntity(String publicId, String systemId) + throws SAXException, IOException + { + return m_entityResolver.resolveEntity(publicId, systemId); + } + } + + /** + * Base class for a closure carrying one or more SAX events. + *

      + * Only {@link #toSAX} needs to be provided by an implementing class. + * It can use {@link #content}, {@link #dtd}, {@link #entity}, {@link #err}, + * {@link #decl}, and {@link #lex} to obtain the various SAX handlers onto + * which it should disgorge events. Those methods never return null; a no-op + * handler will be returned if the consumer code did not register a handler + * of the corresponding type. + *

      + * Additional convenience methods are provided for generating the most + * common SAX parse events. + */ + public abstract class EventCarrier + { + protected ContentHandler content() + { + return m_contentHandler_; + } + + protected DTDHandler dtd() + { + return m_dtdHandler_; + } + + protected EntityResolver2 entity() + { + return m_entityResolver_; + } + + protected ErrorHandler err() + { + return m_errorHandler_; + } + + protected DeclHandler decl() + { + return m_declHandler_; + } + + protected LexicalHandler lex() + { + return m_lexicalHandler_; + } + + /** + * Return the per-instance, reusable + * {@link FluentAttributes2 FluentAttributes2} instance, without + * clearing it first, so the attributes from its last use can be + * reused or modified. + */ + protected FluentAttributes2 attrs() + { + return m_attributes; + } + + /** + * Return the per-instance, reusable + * {@link FluentAttributes2 FluentAttributes2} instance, + * clearing it first. + */ + protected FluentAttributes2 cleared() + { + return m_attributes.cleared(); + } + + /** + * Write a {@code String} value as character content. + */ + protected void characters(String s) throws SAXException + { + m_contentHandler_.characters(s.toCharArray(), 0, s.length()); + } + + /** + * Write a {@code String} value as a {@code CDATA} segment. + */ + protected void cdataCharacters(String s) throws SAXException + { + m_lexicalHandler_.startCDATA(); + try + { + m_contentHandler_.characters(s.toCharArray(), 0, s.length()); + } + finally + { + m_lexicalHandler_.endCDATA(); + } + } + + /** + * Start an element with only a local name and no attributes. + */ + protected void startElement(String localName) throws SAXException + { + m_contentHandler_.startElement( + "", localName, localName, NO_ATTRIBUTES); + } + + /** + * Start an element with only a local name, and attributes. + */ + protected void startElement(String localName, Attributes atts) + throws SAXException + { + m_contentHandler_.startElement("", localName, localName, atts); + } + + /** + * End an element with only a local name. + */ + protected void endElement(String localName) throws SAXException + { + m_contentHandler_.endElement("", localName, localName); + } + + public abstract void toSAX() + throws IOException, SAXException, SQLException; + } + + /** + * An {@code EventCarrier} that only wraps an exception, which will be + * rethrown when {@code toSAX()} is called, wrapped in a + * {@code SAXException} if it is not a {@code SAXException} or + * {@code IOException}. + */ + class ExceptionCarrier extends EventCarrier + { + private Exception e; + ExceptionCarrier(Exception e) + { + this.e = e; + } + + @Override + public void toSAX() + throws IOException, SAXException, SQLException + { + if ( e instanceof IOException ) + throw (IOException)e; + if ( e instanceof SAXException ) + throw (SAXException)e; + if ( e instanceof SQLException ) + throw (SQLException)e; + throw new SQLException(e.getMessage(), e); + } + } + + /** + * An immutable and empty collection of attributes. + */ + public static class EmptyAttributes2 extends Attributes2Impl + { + @Override + public void addAttribute( + String uri, String localName, String qName, + String type, String value) + { + throw new UnsupportedOperationException( + "addAttribute() to the NO_ATTRIBUTES instance"); + } + + @Override + public void setAttributes(Attributes atts) + { + throw new UnsupportedOperationException( + "setAttributes() to the NO_ATTRIBUTES instance"); + } + } + + /** + * Subclass of {@link Attributes2Impl} that also provides chainable methods + * so attribute information can be supplied in a fluent style. + */ + public static class FluentAttributes2 extends Attributes2Impl + { + public FluentAttributes2 cleared() + { + clear(); + return this; + } + + public FluentAttributes2 withAttribute(String localName) + { + addAttribute("", localName, localName, "CDATA", ""); + return this; + } + + public FluentAttributes2 withAttribute(String localName, String value) + { + addAttribute("", localName, localName, "CDATA", value); + return this; + } + + public FluentAttributes2 withAttribute(String uri, String localName, + String qName, String type, String value) + { + addAttribute(uri, localName, qName, type, value); + return this; + } + + public FluentAttributes2 withoutAttribute(int index) + { + removeAttribute(index); + return this; + } + + public FluentAttributes2 withAttribute(int index, String uri, + String localName, String qName, String type, String value) + { + setAttribute(index, uri, localName, qName, type, value); + return this; + } + + public FluentAttributes2 withAttributes(Attributes atts) + { + setAttributes(atts); + return this; + } + + public FluentAttributes2 withLocalName(int index, String localName) + { + setLocalName(index, localName); + return this; + } + + public FluentAttributes2 withQName(int index, String qName) + { + setQName(index, qName); + return this; + } + + public FluentAttributes2 withType(int index, String type) + { + setType(index, type); + return this; + } + + public FluentAttributes2 withURI(int index, String uri) + { + setURI(index, uri); + return this; + } + + public FluentAttributes2 withValue(int index, String value) + { + setValue(index, value); + return this; + } + + public FluentAttributes2 withDeclared(int index, boolean value) + { + setDeclared(index, value); + return this; + } + + public FluentAttributes2 withSpecified(int index, boolean value) + { + setSpecified(index, value); + return this; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/TransactionalMap.java b/pljava/src/main/java/org/postgresql/pljava/internal/TransactionalMap.java index 3c562605..a80b5491 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/TransactionalMap.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/TransactionalMap.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; @@ -15,6 +21,12 @@ import java.util.AbstractCollection; import java.util.NoSuchElementException; +/* + * The smallest quantum of reason for adding an import: to avoid a javadoc error + * in a deprecation note. + */ +import org.postgresql.pljava.TransactionListener; + /** * A TransactionalMap acts as a modifiable front for a backing map. All * modifications can be reverted by a call to abort or propagated to @@ -23,8 +35,17 @@ * The map is not synchronized so care should be taken if multiple threads * will access the map. * + * @deprecated This class (a) isn't exposed in {@code pljava-api}, (b) is only + * used to implement the once-transactional attribute map in {@code Session}, + * and (c) hasn't had transactional behavior even there, since 3ab90e5 + * (November 2005). Future code needing any kind of store sync'd to PostgreSQL + * transactions should implement that behavior with Java's ordinary tools, using + * a {@link TransactionListener} to be kept in sync with transactions. + * * @author Thomas Hallgren */ +@Deprecated(since="1.5.3", forRemoval=true) +@SuppressWarnings("unchecked") // fix warnings in a deprecated class? no thanks. public class TransactionalMap extends HashMap { private static final long serialVersionUID = 5337569423915578121L; @@ -386,4 +407,4 @@ public Object next() return TransactionalMap.this.get(super.next()); } } -} \ No newline at end of file +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java b/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java index 986b065c..74368e0f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/TriggerData.java @@ -1,14 +1,23 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; import java.sql.ResultSet; import java.sql.SQLException; +import static org.postgresql.pljava.internal.Backend.doInPG; + +import org.postgresql.pljava.TriggerException; import org.postgresql.pljava.jdbc.TriggerResultSet; /** @@ -16,17 +25,74 @@ * * @author Thomas Hallgren */ -public class TriggerData extends JavaWrapper implements org.postgresql.pljava.TriggerData +public class TriggerData implements org.postgresql.pljava.TriggerData { private Relation m_relation; private TriggerResultSet m_old = null; private TriggerResultSet m_new = null; private Tuple m_newTuple; private Tuple m_triggerTuple; + private boolean m_suppress = false; + private final State m_state; + + TriggerData(DualState.Key cookie, long resourceOwner, long pointer) + { + m_state = new State(cookie, this, resourceOwner, pointer); + } + + private static class State + extends DualState.SingleGuardedLong + { + private State( + DualState.Key cookie, TriggerData td, long ro, long hth) + { + super(cookie, td, ro, hth); + } + + /** + * Return the TriggerData pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * TriggerData, or subclasses, or something with a strong reference + * to this TriggerData, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while an instance method's on the call + * stack, and the {@code Invocation} marking this state's native scope + * can't be popped before return of any method using the value. + */ + private long getTriggerDataPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } + } - TriggerData(long pointer) + private long getNativePointer() throws SQLException { - super(pointer); + return m_state.getTriggerDataPtr(); + } + + @Override + public void suppress() throws SQLException + { + if ( isFiredForStatement() ) + throw new TriggerException(this, + "Attempt to suppress operation in a STATEMENT trigger"); + if ( isFiredAfter() ) + throw new TriggerException(this, + "Attempt to suppress operation in an AFTER trigger"); + m_suppress = true; } /** @@ -90,16 +156,23 @@ public ResultSet getOld() throws SQLException * new and returns the native pointer of new tuple. This * method is called automatically by the trigger handler and should not * be called in any other way. + *

      + * Note: starting with PostgreSQL 10, this method can fail if SPI is not + * connected; it is the caller's responsibility in PG 10 and up + * to ensure that SPI is connected and that a longer-lived memory + * context than SPI's has been selected, if the caller wants the result of + * this call to survive {@code SPI_finish}. * * @return The modified tuple, or if no modifications have been made, the * original tuple. */ public long getTriggerReturnTuple() throws SQLException { - if(this.isFiredForStatement() || this.isFiredAfter()) + if(this.isFiredForStatement() || this.isFiredAfter() || m_suppress) // - // Only triggers fired before each row can have a return - // value. + // Only triggers fired for each row, and not AFTER, can have a + // nonzero return value. If such a trigger does return zero, it + // tells PostgreSQL to silently suppress the row operation involved. // return 0; @@ -141,10 +214,7 @@ public Relation getRelation() { if(m_relation == null) { - synchronized(Backend.THREADLOCK) - { - m_relation = _getRelation(this.getNativePointer()); - } + m_relation = doInPG(() -> _getRelation(this.getNativePointer())); } return m_relation; } @@ -167,10 +237,8 @@ public Tuple getTriggerTuple() { if(m_triggerTuple == null) { - synchronized(Backend.THREADLOCK) - { - m_triggerTuple = _getTriggerTuple(this.getNativePointer()); - } + m_triggerTuple = + doInPG(() -> _getTriggerTuple(this.getNativePointer())); } return m_triggerTuple; } @@ -191,10 +259,7 @@ public Tuple getNewTuple() { if(m_newTuple == null) { - synchronized(Backend.THREADLOCK) - { - m_newTuple = _getNewTuple(this.getNativePointer()); - } + m_newTuple = doInPG(() -> _getNewTuple(this.getNativePointer())); } return m_newTuple; } @@ -210,10 +275,7 @@ public Tuple getNewTuple() public String[] getArguments() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getArguments(this.getNativePointer()); - } + return doInPG(() -> _getArguments(this.getNativePointer())); } /** @@ -226,10 +288,7 @@ public String[] getArguments() public String getName() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getName(this.getNativePointer()); - } + return doInPG(() -> _getName(this.getNativePointer())); } /** @@ -242,10 +301,7 @@ public String getName() public boolean isFiredAfter() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredAfter(this.getNativePointer()); - } + return doInPG(() -> _isFiredAfter(this.getNativePointer())); } /** @@ -258,10 +314,7 @@ public boolean isFiredAfter() public boolean isFiredBefore() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredBefore(this.getNativePointer()); - } + return doInPG(() -> _isFiredBefore(this.getNativePointer())); } /** @@ -274,10 +327,7 @@ public boolean isFiredBefore() public boolean isFiredForEachRow() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredForEachRow(this.getNativePointer()); - } + return doInPG(() -> _isFiredForEachRow(this.getNativePointer())); } /** @@ -290,10 +340,7 @@ public boolean isFiredForEachRow() public boolean isFiredForStatement() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredForStatement(this.getNativePointer()); - } + return doInPG(() -> _isFiredForStatement(this.getNativePointer())); } /** @@ -305,10 +352,7 @@ public boolean isFiredForStatement() public boolean isFiredByDelete() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredByDelete(this.getNativePointer()); - } + return doInPG(() -> _isFiredByDelete(this.getNativePointer())); } /** @@ -320,10 +364,7 @@ public boolean isFiredByDelete() public boolean isFiredByInsert() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredByInsert(this.getNativePointer()); - } + return doInPG(() -> _isFiredByInsert(this.getNativePointer())); } /** @@ -335,13 +376,9 @@ public boolean isFiredByInsert() public boolean isFiredByUpdate() throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _isFiredByUpdate(this.getNativePointer()); - } + return doInPG(() -> _isFiredByUpdate(this.getNativePointer())); } - protected native void _free(long pointer); private static native Relation _getRelation(long pointer) throws SQLException; private static native Tuple _getTriggerTuple(long pointer) throws SQLException; private static native Tuple _getNewTuple(long pointer) throws SQLException; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java b/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java index 4acdd8c4..ac4fc417 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/Tuple.java @@ -1,11 +1,19 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.SQLException; /** @@ -14,36 +22,85 @@ * * @author Thomas Hallgren */ -public class Tuple extends JavaWrapper +public class Tuple { - Tuple(long pointer) + private final State m_state; + + Tuple(DualState.Key cookie, long resourceOwner, long pointer) + { + m_state = new State(cookie, this, resourceOwner, pointer); + } + + private static class State + extends DualState.SingleHeapFreeTuple + { + private State( + DualState.Key cookie, Tuple t, long ro, long ht) + { + super(cookie, t, ro, ht); + } + + /** + * Return the HeapTuple pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * Tuple, or subclasses, or something with a strong reference + * to this Tuple, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while an instance method's on the call + * stack, and the {@code Invocation} marking this state's native scope + * can't be popped before return of any method using the value. + */ + private long getHeapTuplePtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } + } + + /** + * Return pointer to native HeapTuple structure as a long; use only while + * a reference to this class is live and the THREADLOCK is held. + */ + public final long getNativePointer() throws SQLException { - super(pointer); + return m_state.getHeapTuplePtr(); } /** * Obtains a value from the underlying native HeapTuple * structure. + *

      + * Conversion to a JDBC 4.1 specified class is best effort, if the native + * type system knows how to do so; otherwise, the return value can be + * whatever would have been returned in the legacy case. Caller beware! * @param tupleDesc The Tuple descriptor for this instance. * @param index Index of value in the structure (one based). + * @param type Desired Java class of the result, if the JDBC 4.1 version + * of {@code getObject} has been called; null in all the legacy cases. * @return The value or null. * @throws SQLException If the underlying native structure has gone stale. */ - public Object getObject(TupleDesc tupleDesc, int index) + public Object getObject(TupleDesc tupleDesc, int index, Class type) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getObject(this.getNativePointer(), tupleDesc.getNativePointer(), index); - } + return doInPG(() -> + _getObject(this.getNativePointer(), + tupleDesc.getNativePointer(), index, type)); } - /** - * Calls the backend function heap_freetuple(HeapTuple tuple) - * @param pointer The native pointer to the source HeapTuple - */ - protected native void _free(long pointer); - - private static native Object _getObject(long pointer, long tupleDescPointer, int index) + private static native Object _getObject( + long pointer, long tupleDescPointer, int index, Class type) throws SQLException; } diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java b/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java index 47c962b7..8dd5b343 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/TupleDesc.java @@ -1,11 +1,19 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.internal; +import static org.postgresql.pljava.internal.Backend.doInPG; + import java.sql.SQLException; /** @@ -14,17 +22,66 @@ * * @author Thomas Hallgren */ -public class TupleDesc extends JavaWrapper +public class TupleDesc { + private final State m_state; private final int m_size; private Class[] m_columnClasses; - TupleDesc(long pointer, int size) throws SQLException + TupleDesc(DualState.Key cookie, long resourceOwner, long pointer, int size) + throws SQLException { - super(pointer); + m_state = new State(cookie, this, resourceOwner, pointer); m_size = size; } + private static class State + extends DualState.SingleFreeTupleDesc + { + private State( + DualState.Key cookie, TupleDesc td, long ro, long hth) + { + super(cookie, td, ro, hth); + } + + /** + * Return the TupleDesc pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * TupleDesc, or subclasses, or something with a strong reference + * to this TupleDesc, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while an instance method's on the call + * stack, and the {@code Invocation} marking this state's native scope + * can't be popped before return of any method using the value. + */ + private long getTupleDescPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } + } + + /** + * Return pointer to native TupleDesc structure as a long; use only while + * a reference to this class is live and the THREADLOCK is held. + */ + public final long getNativePointer() throws SQLException + { + return m_state.getTupleDescPtr(); + } + /** * Returns the name of the column at index. * @param index The one based index of the column. @@ -35,10 +92,7 @@ public class TupleDesc extends JavaWrapper public String getColumnName(int index) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getColumnName(this.getNativePointer(), index); - } + return doInPG(() -> _getColumnName(this.getNativePointer(), index)); } /** @@ -51,10 +105,8 @@ public String getColumnName(int index) public int getColumnIndex(String colName) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getColumnIndex(this.getNativePointer(), colName.toLowerCase()); - } + return doInPG(() -> + _getColumnIndex(this.getNativePointer(), colName.toLowerCase())); } /** @@ -68,10 +120,7 @@ public int getColumnIndex(String colName) public Tuple formTuple(Object[] values) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _formTuple(this.getNativePointer(), values); - } + return doInPG(() -> _formTuple(this.getNativePointer(), values)); } /** @@ -91,12 +140,12 @@ public Class getColumnClass(int index) if(m_columnClasses == null) { m_columnClasses = new Class[m_size]; - synchronized(Backend.THREADLOCK) + doInPG(() -> { long _this = this.getNativePointer(); for(int idx = 0; idx < m_size; ++idx) m_columnClasses[idx] = _getOid(_this, idx+1).getJavaClass(); - } + }); } return m_columnClasses[index-1]; } @@ -107,18 +156,9 @@ public Class getColumnClass(int index) public Oid getOid(int index) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getOid(this.getNativePointer(), index); - } + return doInPG(() -> _getOid(this.getNativePointer(), index)); } - /** - * Calls the backend function FreeTupleDesc(TupleDesc desc) - * @param pointer The native pointer to the source TupleDesc - */ - protected native void _free(long pointer); - private static native String _getColumnName(long _this, int index) throws SQLException; private static native int _getColumnIndex(long _this, String colName) throws SQLException; private static native Tuple _formTuple(long _this, Object[] values) throws SQLException; diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/UncheckedException.java b/pljava/src/main/java/org/postgresql/pljava/internal/UncheckedException.java new file mode 100644 index 00000000..63fba7e8 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/UncheckedException.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.PrintStream; +import java.io.PrintWriter; + +/** + * An unchecked exception to efficiently wrap checked Throwables. + *

      + * This exception does not carry a message or stack trace of its own; most of + * its methods proxy through to those of its 'cause', so that it does not appear + * as an extra layer of indirection in a typical stack trace. It has one + * specific new method, {@link #unwrap unwrap}, to obtain the actual wrapped + * throwable (as {@code getCause} is proxied to return the wrapped throwable's + * cause). + */ +public final class UncheckedException extends RuntimeException +{ + /** + * Return the exception e as a {@code RuntimeException}. + *

      + * Intended for use in a {@code throw unchecked(e);} construct. + * If e is already an unchecked exception, it is simply returned; + * otherwise, it is returned wrapped. + * @return the supplied exception, possibly wrapped + */ + public static RuntimeException unchecked(Exception e) + { + if ( e instanceof RuntimeException ) + return (RuntimeException)e; + return new UncheckedException(e); + } + + /** + * Return the throwable t as a {@code RuntimeException}. + *

      + * Intended for use in a {@code throw unchecked(t);} construct. + * If t is already a {@code RuntimeException}, it is simply + * returned; if it is an {@code Error}, it is thrown from this method; + * otherwise, it is returned wrapped. + * @return the supplied exception, possibly wrapped + * @throws Error or a subclass, if that's what t is + */ + public static RuntimeException unchecked(Throwable t) + { + if ( t instanceof Error ) + throw (Error)t; + if ( t instanceof RuntimeException ) + return (RuntimeException)t; + return new UncheckedException(t); + } + + private UncheckedException(Throwable t) + { + super(null, null != t ? t : new NullPointerException( + "null 'cause' passed to UncheckedException constructor"), + true, false); + } + + /** + * Return the {@code Throwable} that this {@code UncheckedException} wraps. + *

      + * The familiar inherited methods proxy through to the wrapped throwable + * (so {@code getCause} will return its cause, and so on); this + * distinct method is provided to undo the wrapping. + * @return the wrapped Throwable + */ + public Throwable unwrap() + { + return super.getCause(); + } + + @Override + public Throwable fillInStackTrace() + { + super.getCause().fillInStackTrace(); + return this; + } + + @Override + public Throwable getCause() + { + return super.getCause().getCause(); + } + + @Override + public String getLocalizedMessage() + { + return super.getCause().getLocalizedMessage(); + } + + @Override + public String getMessage() + { + return super.getCause().getMessage(); + } + + @Override + public StackTraceElement[] getStackTrace() + { + return super.getCause().getStackTrace(); + } + + @Override + public Throwable initCause(Throwable cause) + { + super.getCause().initCause(cause); + return this; + } + + @Override + public void printStackTrace() + { + super.getCause().printStackTrace(); + } + + @Override + public void printStackTrace(PrintStream s) + { + super.getCause().printStackTrace(s); + } + + @Override + public void printStackTrace(PrintWriter s) + { + super.getCause().printStackTrace(s); + } + + @Override + public void setStackTrace(StackTraceElement[] stackTrace) + { + super.getCause().setStackTrace(stackTrace); + } + + @Override + public String toString() + { + return "unchecked:" + super.getCause().toString(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java b/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java new file mode 100644 index 00000000..95c01532 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/UnhandledPGException.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.sql.SQLException; + +import static java.util.Arrays.copyOfRange; + +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; + +import static org.postgresql.pljava.jdbc.Invocation.s_unhandled; + +/** + * A Java exception constructed over a {@link ServerException} that has been + * thrown but not recovered from (as by rolling back to a prior savepoint) + * before another attempt to call into PostgreSQL routines. + * @author Thomas Hallgren + */ +public class UnhandledPGException extends SQLException +{ + private static final long serialVersionUID = 1L; + + private static UnhandledPGException obtain() + { + assert threadMayEnterPG() : "UnhandledPGException.create thread"; + + SQLException e = s_unhandled; + + if ( e instanceof UnhandledPGException ) + return (UnhandledPGException)e; + else if ( ! (e instanceof ServerException) ) + throw new AssertionError("unexpected s_unhandled"); + + e = new UnhandledPGException((ServerException)e); + + StackTraceElement[] es = e.getStackTrace(); + if ( null != es && 0 < es.length ) + e.setStackTrace(copyOfRange(es, 1, es.length)); + + return (UnhandledPGException)(s_unhandled = e); + } + + private UnhandledPGException(ServerException e) + { + super( + "an earlier PostgreSQL exception (see Caused by:) prevents " + + "further calls into PostgreSQL until rollback of this " + + "transaction or a subtransaction / savepoint", "25P02", e); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java new file mode 100644 index 00000000..44432000 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaWrapper.java @@ -0,0 +1,1236 @@ +/* + * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.Closeable; +import java.io.FilterInputStream; +import java.io.InputStream; +import java.io.OutputStream; + +import java.io.IOException; + +import java.nio.ByteBuffer; + +import java.sql.SQLException; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import static java.util.concurrent.Executors.privilegedCallable; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; + +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; + +import static org.postgresql.pljava.internal.Backend.doInPG; + +/** + * Interface that wraps a PostgreSQL native variable-length ("varlena") datum; + * implementing classes present an existing one to Java as a readable + * {@code InputStream}, or allow a new one to be constructed by presenting a + * writable {@code OutputStream}. + *

      + * Common to both is a method {@link #adopt adopt()}, allowing native + * code to reassert control over the varlena (for the writable variety, after + * Java code has written and closed it), after which it is no longer accessible + * from Java. + */ +public interface VarlenaWrapper extends Closeable +{ + /** + * Return the varlena address to native code and dissociate the varlena + * from Java. + * @param cookie Capability held by native code. + */ + long adopt(DualState.Key cookie) throws SQLException; + + /** + * Return a string describing this object in a way useful for debugging, + * prefixed with the name (abbreviated for comfort) of the class of the + * object passed in (the normal Java {@code toString()} method should pass + * {@code this}). + *

      + * Subclasses or consumers are encouraged to call this method and append + * further details specific to the subclass or consumer. The convention + * should be that the recursion will stop at some class that will actually + * construct the abbreviated class name of {@code o} and use it to prefix + * the returned value. + * @param o An object whose class name (possibly abbreviated) should be used + * to prefix the returned string. + * @return Description of this object. + */ + String toString(Object o); + + + + /** + * A class by which Java reads the content of a varlena. + * + * Associated with a {@code ResourceOwner} to bound the lifetime of + * the native reference; the chosen resource owner must be one that will be + * released no later than the memory context containing the varlena. + */ + public static class Input implements VarlenaWrapper + { + private long m_parkedSize; + private long m_bufferSize; + private final State m_state; + + /** + * Construct a {@code VarlenaWrapper.Input}. + * @param cookie Capability held by native code. + * @param resourceOwner Resource owner whose release will indicate that the + * underlying varlena is no longer valid. + * @param context Memory context in which the varlena is allocated. + * @param snapshot A snapshot that has been registered in case the + * parked varlena is TOASTed on disk, to keep the toast tuples from + * being vacuumed away. + * @param varlenaPtr Pointer value to the underlying varlena, to be + * {@code pfree}d when Java code closes or reclaims this object. + * @param parkedSize Size occupied by this datum in memory while it is + * "parked", that is, before the first call to a reading method. + * @param bufferSize Size that is or will be occupied by the detoasted + * content once a reading method has been called. + * @param buf Readable direct {@code ByteBuffer} constructed over the + * varlena's data bytes. + */ + private Input(DualState.Key cookie, long resourceOwner, + long context, long snapshot, long varlenaPtr, + long parkedSize, long bufferSize, ByteBuffer buf) + { + m_parkedSize = parkedSize; + m_bufferSize = bufferSize; + m_state = new State( + cookie, this, resourceOwner, + context, snapshot, varlenaPtr, buf); + } + + public void pin() throws SQLException + { + m_state.pin(); + } + + public boolean pinUnlessReleased() + { + return m_state.pinUnlessReleased(); + } + + public void unpin() + { + m_state.unpin(); + } + + public ByteBuffer buffer() throws SQLException + { + return m_state.buffer(); + } + + @Override + public void close() throws IOException + { + if ( pinUnlessReleased() ) + return; + try + { + m_state.releaseFromJava(); + } + finally + { + unpin(); + } + } + + @Override + public String toString() + { + return toString(this); + } + + @Override + public String toString(Object o) + { + return String.format("%s parked:%d buffer:%d", + m_state.toString(o), m_parkedSize, m_bufferSize); + } + + @Override + public long adopt(DualState.Key cookie) throws SQLException + { + m_state.pin(); + try + { + return m_state.adopt(cookie); + } + finally + { + m_state.unpin(); + } + } + + public class Stream + extends ByteBufferInputStream implements VarlenaWrapper + { + /** + * A duplicate of the {@code VarlenaWrapper.Input}'s byte buffer, + * so its {@code position} and {@code mark} can be updated by the + * {@code InputStream} operations without affecting the original + * (therefore multiple {@code Stream}s may read one {@code Input}). + */ + private ByteBuffer m_movingBuffer; + + /* + * Overrides {@code ByteBufferInputStream} method and throws the + * exception type declared there. For other uses of pin in this + * class where SQLException is expected, just use + * {@code m_state.pin} directly. + */ + @Override + protected void pin() throws IOException + { + if ( ! m_open ) + throw new IOException("Read from closed VarlenaWrapper"); + try + { + Input.this.pin(); + } + catch ( SQLException e ) + { + throw new IOException(e.getMessage(), e); + } + } + + /* + * Unpin for use in {@code ByteBufferInputStream} or here; no + * throws-clause difference to blotch things up. + */ + protected void unpin() + { + Input.this.unpin(); + } + + @Override + public void close() throws IOException + { + if ( pinUnlessReleased() ) + return; + try + { + super.close(); + Input.this.close(); + } + finally + { + unpin(); + } + } + + @Override + public String toString(Object o) + { + return String.format("%s %s", + Input.this.toString(o), m_open ? "open" : "closed"); + } + + /** + * Apply a {@code Verifier} to the input data. + *

      + * This should only be necessary if the input wrapper is being used + * directly as an output item, and needs verification that it + * conforms to the format of the target type. + *

      + * The current position must be at the beginning of the stream. The + * verifier must leave it at the end to confirm the entire stream + * was examined. There should be no need to reset the position here, + * as the only anticipated use is during an {@code adopt}, and the + * native code will only care about the varlena's address. + */ + public void verify(Verifier v) throws SQLException + { + /* + * This is only called from some client code's adopt() method, + * calls to which are serialized through Backend.THREADLOCK + * anyway, so holding a pin here for the duration doesn't + * further limit concurrency. Hold m_state's monitor also to + * block any extraneous reading interleaved with the verifier. + */ + m_state.pin(); + try + { + ByteBuffer buf = buffer(); + synchronized ( m_state ) + { + if ( 0 != buf.position() ) + throw new SQLException( + "Variable-length input data to be verified " + + " not positioned at start", + "55000"); + InputStream dontCloseMe = new FilterInputStream(this) + { + @Override + public void close() throws IOException { } + }; + v.verify(dontCloseMe); + if ( 0 != buf.remaining() ) + throw new SQLException( + "Verifier finished prematurely"); + } + } + catch ( SQLException | RuntimeException e ) + { + throw e; + } + catch ( Exception e ) + { + throw new SQLException( + "Exception verifying variable-length data: " + + e.getMessage(), "XX000", e); + } + finally + { + m_state.unpin(); + } + } + + @Override + protected ByteBuffer buffer() throws IOException + { + try + { + if ( null == m_movingBuffer ) + { + ByteBuffer b = Input.this.buffer(); + m_movingBuffer = b.duplicate().order(b.order()); + } + return m_movingBuffer; + } + catch ( SQLException sqe ) + { + throw new IOException("Read from varlena failed", sqe); + } + } + + @Override + public long adopt(DualState.Key cookie) throws SQLException + { + Input.this.pin(); + try + { + if ( ! m_open ) + throw new SQLException( + "Cannot adopt VarlenaWrapper.Input after " + + "it is closed", "55000"); + return Input.this.adopt(cookie); + } + finally + { + Input.this.unpin(); + } + } + } + + + + private static class State + extends DualState.SingleMemContextDelete + { + private ByteBuffer m_buf; + private long m_snapshot; + private long m_varlena; + + private State( + DualState.Key cookie, Input vr, long resourceOwner, + long memContext, long snapshot, long varlenaPtr, ByteBuffer buf) + { + super(cookie, vr, resourceOwner, memContext); + m_snapshot = snapshot; + m_varlena = varlenaPtr; + m_buf = null == buf ? buf : buf.asReadOnlyBuffer(); + } + + private ByteBuffer buffer() throws SQLException + { + pin(); + try + { + if ( null != m_buf ) + return m_buf; + doInPG(() -> + { + m_buf = _detoast( + m_varlena, guardedLong(), m_snapshot, + m_resourceOwner).asReadOnlyBuffer(); + m_snapshot = 0; + }); + return m_buf; + } + finally + { + unpin(); + } + } + + private long adopt(DualState.Key cookie) throws SQLException + { + adoptionLock(cookie); + try + { + if ( 0 != m_snapshot ) + { + /* fetch, before snapshot released */ + m_varlena = _fetch(m_varlena, guardedLong()); + } + return m_varlena; + } + finally + { + adoptionUnlock(cookie); + } + } + + @Override + protected void nativeStateReleased(boolean javaStateLive) + { + assert Backend.threadMayEnterPG(); + super.nativeStateReleased(javaStateLive); + /* + * You might not expect to have to explicitly unregister a + * snapshot from the resource owner that is at this very + * moment being released, and will happily unregister the + * snapshot itself in the course of so doing. Ah, but it + * also happily logs a warning when it does that, so we need + * to have our toys picked up before it gets the chance. + */ + if ( 0 != m_snapshot ) + _unregisterSnapshot(m_snapshot, m_resourceOwner); + m_snapshot = 0; + m_buf = null; + } + + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + assert Backend.threadMayEnterPG(); + super.javaStateUnreachable(nativeStateLive); + if ( 0 != m_snapshot ) + _unregisterSnapshot(m_snapshot, m_resourceOwner); + m_snapshot = 0; + m_buf = null; + } + + @Override + public String toString(Object o) + { + return String.format("%s snap:%x varlena:%x %s", + super.toString(o), m_snapshot, m_varlena, + String.valueOf(m_buf).replace("java.nio.", "")); + } + + /** + * Unregister a snapshot we've been holding. + */ + private native void + _unregisterSnapshot(long snap, long resOwner); + + /** + * Detoast the parked value; called when a method needing to read it + * has been invoked. + *

      + * Detoast the passed {@code varlena} into the same + * {@code memContext}, {@code pfree} the original, update the + * {@code m_varlena} instance field to point to the detoasted copy, + * and return a direct byte buffer that windows it. + *

      + * If {@code snapshot} is nonzero, unregister the snapshot from the + * resource owner. The caller may rely on this happening, and + * confidently set {@code m_snapshot} to zero after this call. + */ + private native ByteBuffer _detoast( + long varlena, long memContext, long snapshot, long resOwner); + + /** + * Merely fetch a parked value, when it does not need to be fully + * detoasted and readable, but simply retrieved from its TOAST rows + * before loss of the snapshot that may be protecting them from + * VACUUM. The original value is {@code pfree}d. + *

      + * The result may still have an 'extended' (for example, compressed) + * form. + */ + private native long _fetch(long varlena, long memContext); + } + } + + /** + * A class by which Java writes the content of a varlena as an OutputStream. + * + * Associated with a {@code ResourceOwner} to bound the lifetime of + * the native reference; the chosen resource owner must be one that will be + * released no later than the memory context containing the varlena. + */ + public class Output extends OutputStream implements VarlenaWrapper + { + private State m_state; + private boolean m_open = true; + + /** + * Construct a {@code VarlenaWrapper.Output}. + * @param cookie Capability held by native code. + * @param resourceOwner Resource owner whose release will indicate that + * the underlying varlena is no longer valid. + * @param context Pointer to memory context containing the underlying + * varlena; subject to {@code MemoryContextDelete} if Java code frees or + * reclaims this object. + * @param varlenaPtr Pointer value to the underlying varlena. + * @param buf Writable direct {@code ByteBuffer} constructed over (an + * initial region of) the varlena's data bytes. + */ + private Output(DualState.Key cookie, long resourceOwner, + long context, long varlenaPtr, ByteBuffer buf) + { + m_state = new State( + cookie, this, resourceOwner, context, varlenaPtr, buf); + } + + /** + * Set the {@link Verifier Verifier} to be used on content written to + * this varlena. + *

      + * A verifier must be set, either to {@link Verifier.NoOp NoOp} or a + * datatype-specific subclass of {@link Verifier.Base Base}, before + * writing can succeed. + *

      + * On construction, no verifier is set, so the datatype-specific code + * can determine whether the {@code NoOp} or a specific verifier will be + * needed. This method can only be called once, so that this class could + * then be exposed to client code as an {@code OutputStream} without + * allowing the verifier to be changed. + */ + public void setVerifier(Verifier v) throws IOException + { + if ( ! m_open ) + throw new IOException( + "I/O operation on closed VarlenaWrapper.Output"); + m_state.setVerifier(v); + } + + /** + * Return a ByteBuffer to write into. + *

      + * It will be the existing buffer if it has any remaining capacity to + * write into (even if it is less than desiredCapacity), otherwise a new + * buffer allocated with desiredCapacity as a hint (it may still be + * smaller than the hint, or larger). Call with desiredCapacity zero to + * indicate that writing is finished and make the varlena available for + * native code to adopt. + */ + private ByteBuffer buf(int desiredCapacity) throws IOException + { + if ( ! m_open ) + throw new IOException("Write on closed VarlenaWrapper.Output"); + try + { + return m_state.buffer(desiredCapacity); + } + catch ( SQLException sqe ) + { + throw new IOException("Write on varlena failed", sqe); + } + } + + /** + * Wrapper around the {@code pin} method of the native state, for sites + * where an {@code IOException} is needed rather than + * {@code SQLException}. + */ + private void pin() throws IOException + { + try + { + m_state.pin(); + } + catch ( SQLException e ) + { + throw new IOException(e.getMessage(), e); + } + } + + /** + * Wrapper around the {@code pinUnlessReleased} method of the native + * state. + */ + private boolean pinUnlessReleased() + { + return m_state.pinUnlessReleased(); + } + + @Override + public void write(int b) throws IOException + { + pin(); + try + { + ByteBuffer dst = buf(1); + dst.put((byte)(b & 0xff)); + } + finally + { + m_state.unpin(); + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException + { + pin(); + try + { + while ( 0 < len ) + { + ByteBuffer dst = buf(len); + int can = dst.remaining(); + if ( can > len ) + can = len; + dst.put(b, off, can); + off += can; + len -= can; + } + } + finally + { + m_state.unpin(); + } + } + + @Override + public void close() throws IOException + { + if ( pinUnlessReleased() ) + return; + try + { + if ( ! m_open ) + return; + m_state.setVerifierIfNone(); + buf(0); + m_open = false; + m_state.verify(); + } + finally + { + m_state.unpin(); + } + } + + /** + * Actually free a {@code VarlenaWrapper.Output}. + *

      + * {@code close()} does not do so, because the typical use of this class + * is to write to an instance, close it, then let some native code adopt + * it. If it turns out one won't be adopted and must be freed, use this + * method. + */ + public void free() throws IOException + { + close(); + m_state.releaseFromJava(); + } + + @Override + public long adopt(DualState.Key cookie) throws SQLException + { + m_state.pin(); + try + { + if ( m_open ) + throw new SQLException( + "Writing of VarlenaWrapper.Output not yet complete", + "55000"); + return m_state.adopt(cookie); + } + finally + { + m_state.unpin(); + } + } + + @Override + public String toString() + { + return toString(this); + } + + @Override + public String toString(Object o) + { + return String.format("%s %s", m_state.toString(o), + m_open ? "open" : "closed"); + } + + + + private static class State + extends DualState.SingleMemContextDelete + { + private ByteBuffer m_buf; + private long m_varlena; + private Verifier m_verifier; + + private State( + DualState.Key cookie, Output vr, + long resourceOwner, long memContext, long varlenaPtr, + ByteBuffer buf) + { + super(cookie, vr, resourceOwner, memContext); + m_varlena = varlenaPtr; + m_buf = buf; + } + + private ByteBuffer buffer(int desiredCapacity) throws SQLException + { + pin(); + try + { + if ( 0 < m_buf.remaining() && 0 < desiredCapacity ) + return m_buf; + ByteBuffer filledBuf = m_buf; + doInPG(() -> + { + int lstate = lock(true); // true -> upgrade my held pin + try + { + m_buf = _nextBuffer(m_varlena, m_buf.position(), + desiredCapacity); + } + finally + { + unlock(lstate); + } + }); + m_verifier.update(this, filledBuf); + if ( 0 == desiredCapacity ) + m_verifier.update(MarkableSequenceInputStream.NO_MORE); + return m_buf; + } + finally + { + unpin(); + } + } + + private long adopt(DualState.Key cookie) throws SQLException + { + adoptionLock(cookie); + try + { + return m_varlena; + } + finally + { + adoptionUnlock(cookie); + } + } + + private void setVerifier(Verifier v) + { + if ( null != m_verifier ) + throw new IllegalStateException( + "setVerifier when already set"); + if ( null == v ) + throw new NullPointerException("Null Verifier parameter"); + m_verifier = v.schedule(); + } + + /* + * Only for use in close() in case of early closing before the + * caller has set a verifier; make sure at least the NoOp verifier + * is there. + */ + private void setVerifierIfNone() + { + if ( null == m_verifier ) + m_verifier = Verifier.NoOp.INSTANCE; + } + + private void cancelVerifier() + { + try + { + m_verifier.cancel(); + } + catch ( Exception e ) + { + } + } + + private void verify() throws IOException // because called in close + { + try + { + m_verifier.finish(); + } + catch ( SQLException e ) + { + throw new IOException( + "Variable-length PostgreSQL data written failed " + + "verification", e); + } + } + + @Override + public String toString(Object o) + { + return String.format("%s varlena:%x %s", + super.toString(o), m_varlena, + String.valueOf(m_buf).replace("java.nio.", "")); + } + + @Override + protected void nativeStateReleased(boolean javaStateLive) + { + m_buf = null; + cancelVerifier(); + super.nativeStateReleased(javaStateLive); + } + + @Override + protected void javaStateUnreachable(boolean nativeStateLive) + { + m_buf = null; + cancelVerifier(); + super.javaStateUnreachable(nativeStateLive); + } + + private native ByteBuffer _nextBuffer( + long varlenaPtr, int currentBufPosition, int desiredCapacity); + } + } + + /** + * A {@code Verifier} verifies the proper form of content written to a + * {@code VarlenaWrapper.Output}. + *

      + * This is necessary only when the correctness of the written stream may be + * doubtful, as when an API spec requires exposing a method for client code + * to write arbitrary bytes. If a type implementation exposes only + * type-appropriate operations to client code, and always controls the byte + * stream written to the varlena, the {@code NoOp} verifier can be used. + *

      + * {@code Verifier} itself cannot be instantiated or extended, except by its + * two immediate subclasses, {@link NoOp NoOp} and {@link Base Base}. + * Type-specific verifiers must extend {@code Base}. Exactly one instance of + * {@code NoOp}, {@link NoOp#INSTANCE NoOp.INSTANCE}, exists. + *

      + * A type-specific verifier must supply a {@link #verify} method that reads + * its input stream argument (which may be assumed to support + * {@link InputStream#mark mark} and {@link InputStream#reset reset} + * efficiently), and complete normally if the full stream is a complete and + * well-formed representation of the type. Otherwise, it must throw an + * exception. + *

      + * In use, a verifier is instantiated and {@link #schedule schedule()}d, + * which sets the {@code verify} method running in a separate thread. + * The {@code verify} method must not interact with PostgreSQL. + * The varlena wrapper code then passes buffers to it via {@link #update + * update()} as they are filled. A final call to {@link #finish}, in the + * thread interacting with PostgreSQL, waits for the verify task to + * complete and then rethrows the exception, if it threw one. It is possible + * to {@link #cancel} a {@code Verifier}. + *

      + * As an optimization, all those methods are no-ops in the {@code NoOp} + * class; no other thread is used, and no work is done. The {@code Base} + * class, unextended, also serves as a verifier that accepts anything, but + * goes through all the motions to do it. + */ + public static abstract class Verifier implements Callable + { + private final BlockingQueue m_queue; + private final CountDownLatch m_latch; + private volatile Future m_future; + + /* + * The design of Java's FutureTask strikes me as bizarre. One might + * think that the most natural way to use it for a task that does a + * certain thing and returns a result would be to extend it, override + * a particular method to do that thing, and submit it, ending up with + * one object that serves both as the task to be run and the Future. + * And that seems to be the exact design approach that its API + * /precludes/, because its only available constructors require passing + * a Runnable or Callable /that is some other object/. + * + * Can the constructor create a Callable that simply calls back to the + * verify method of this object, and pass that callable to the + * FutureTask constructor? No, because referring to 'this' before the + * supertype constructor has been called is a compile-time error. + * + * I really want one Verifier object that has all of: the verify() + * method being run in the executor, the update() method used to feed it + * stuff, and the Future-inspired methods for dealing with its status + * and result. And the only way I am seeing to get there is to have it + * submit() itself (in the schedule method) and then hold a reference to + * the Future created for it in that operation. Then it can have some + * Future-inspired methods that are more or less proxies to the methods + * on the Future itself, but then those have to deal with the chance + * that the Future reference hasn't been stored yet, so another whole + * synchronization puzzle crops up around the convenient synchronization + * tool. :( + * + * So, this method returns the Future, if we have it, or waits + * for the latch and /then/ returns the Future. + */ + private Future future() throws SQLException + { + Future f = m_future; + if ( null != f ) + return f; + try + { + m_latch.await(); + } + catch ( InterruptedException e ) + { + throw new SQLException("Waiting thread interrupted", e); + } + return m_future; + } + + /* + * Private constructor. The nested class NoOp can call it passing nulls. + */ + private Verifier( + BlockingQueue queue, + CountDownLatch latch) + { + m_queue = queue; + m_latch = latch; + } + + /* + * The nested class Base can call this one. Otherwise it's private, + * so no other direct subclasses are possible. + */ + private Verifier() + { + this(new LinkedBlockingQueue(), + new CountDownLatch(1)); + } + + protected void verify(InputStream is) throws Exception + { + do + { + is.skip(Long.MAX_VALUE); + } + while ( -1 != is.read() ); + } + + @Override + public final Void call() throws Exception + { + try ( InputStream is = new MarkableSequenceInputStream(m_queue) ) + { + verify(is); + return null; + } + } + + /** + * A Verifier that accepts any content, cheaply. + */ + public static final class NoOp extends Verifier + { + private NoOp() { super(null, null); } + + public static final Verifier INSTANCE = new NoOp(); + + public Verifier schedule() + { + return this; + } + + public void update(InputStream is) throws SQLException + { + } + + public void update(Output.State state, ByteBuffer bb) + throws SQLException + { + } + + public void finish() throws SQLException + { + } + + public void cancel() throws SQLException + { + } + } + + /** + * Verifier to be extended to verify byte streams for specific types. + *

      + * A subclass should override {@link verify} with a method that reads + * the InputStream and throws an exception unless the entire stream was + * successfully read and represented a well-formed instance of the type. + */ + public static class Base extends Verifier + { + protected Base() { } + + @Override + public final Verifier schedule() + { + return super.schedule(); + } + + @Override + public final void update(InputStream is) throws SQLException + { + super.update(is); + } + + public final void update(Output.State state, ByteBuffer bb) + throws SQLException + { + super.update(state, bb); + } + + @Override + public final void finish() throws SQLException + { + super.finish(); + } + + @Override + public final void cancel() throws SQLException + { + super.cancel(); + } + } + + /** + * Set up the {@link #verify verify} method to be executed + * in another thread. + * @return This {@code Verifier} object. + */ + public Verifier schedule() + { + synchronized (m_latch) + { + if ( 1 == m_latch.getCount() ) + { + m_future = + LazyExecutorService.INSTANCE + .submit(privilegedCallable(this)); + m_latch.countDown(); + } + } + return this; + } + + /** + * Send the next {@code InputStream} of content to be verified. + *

      + * It is assumed, but not checked here, that any + * {@code InputStream} supplied to this method supports + * {@link InputStream#mark mark} and {@link InputStream#reset reset} + * efficiently. + *

      + * If the verifier has already thrown an exception, it will be rethrown + * here in the current thread. + * @param is InputStream representing the next range of bytes to be + * verified. + * @throws SQLException if a verification error has already been + * detected, the verifier has been cancelled, etc. + */ + public void update(InputStream is) throws SQLException + { + Future f = future(); + if ( f.isDone() ) + { + finish(); + throw new SQLException("Verifier finished prematurely"); + } + try + { + m_queue.put(is); + } + catch ( InterruptedException e ) + { + f.cancel(true); + throw (CancellationException) + new CancellationException("Waiting thread interrupted") + .initCause(e); + } + } + + /** + * Convenience method that calls {@link ByteBuffer#flip flip()} on a + * byte buffer, wraps it in a {@link BufferWrapper BufferWrapper}, and + * passes it to {@link update(InputStream)}. + *

      + * Note that the {@link NoOp NoOp} version of this method does none of + * that; in particular, the byte buffer will not have been flipped. This + * should not be a problem, as the thread passing the buffer to this + * method had better make no further use of it anyway. + * @param state The state object protecting the native memory. + * @param bb Byte buffer containing next range of content to verify. + * @throws SQLException if a verification error has already been + * detected, the verifier has been cancelled, etc. + */ + public void update(Output.State state, ByteBuffer bb) + throws SQLException + { + bb.flip(); + update(new BufferWrapper(state, bb)); + } + + /** + * Cancel this verifier. + */ + public void cancel() throws SQLException + { + Future f = future(); + f.cancel(true); + } + + /** + * Wait for the verify task and rethrow any exception it might + * have thrown. + * @throws SQLException any exception thrown by the verify method, or + * for unexpected conditions such as interruption while waiting. + */ + public void finish() throws SQLException + { + Future f = future(); + + try + { + f.get(); + } + catch ( InterruptedException inte ) + { + f.cancel(true); + throw (CancellationException) + new CancellationException("Waiting thread interrupted") + .initCause(inte); + } + catch ( ExecutionException exce ) + { + Throwable t = exce.getCause(); + if ( t instanceof SQLException ) + throw (SQLException) t; + if ( t instanceof RuntimeException ) + throw (RuntimeException) t; + throw new SQLException( + "Exception verifying variable-length data: " + + exce.getMessage(), "XX000", exce); + } + + if ( ! m_queue.isEmpty() ) + throw new SQLException("Verifier finished prematurely"); + } + + /** + * Lazy holder for a singleton instance of a thread-pool + * {@link ExecutorService}. + *

      + * If it ever happens later that other PL/Java components could have use + * for a thread pool, this could certainly be moved out of + * {@code VarlenaWrapper} to a more common place. + */ + static class LazyExecutorService + { + static final ExecutorService INSTANCE; + + static + { + final ThreadFactory dflttf = Executors.defaultThreadFactory(); + ThreadFactory daemtf = new ThreadFactory() + { + @Override + public Thread newThread(Runnable r) + { + Thread t = dflttf.newThread(r); + if ( null != t ) + { + t.setDaemon(true); + t.setName( + "varlenaVerify-" + t.getName().substring(5)); + } + return t; + } + }; + INSTANCE = Executors.newCachedThreadPool(daemtf); + } + } + + /** + * {@link ByteBufferInputStream ByteBufferInputStream} subclass that + * wraps a {@code ByteBuffer} and the {@link Output.State Output.State} + * that protects it. + *

      + * {@code BufferWrapper} installs itself as the + * {@code ByteBufferInputStream}'s lock object, so its methods + * synchronize on this rather than anything that would interfere with + * the writing thread. The {@code pin} and {@code unpin} methods, + * of course, forward to those of the native state object. + */ + static class BufferWrapper + extends ByteBufferInputStream + { + private ByteBuffer m_buf; + private Output.State m_nativeState; + + BufferWrapper(Output.State state, ByteBuffer buf) + { + // default superclass constructor uses 'this' as m_lock. + m_nativeState = state; + m_buf = buf; + } + + @Override + protected void pin() throws IOException + { + try + { + m_nativeState.pin(); + } + catch ( SQLException e ) + { + throw new IOException(e.getMessage(), e); + } + } + + @Override + protected void unpin() + { + m_nativeState.unpin(); + } + + @Override + protected ByteBuffer buffer() throws IOException + { + if ( ! m_open ) + throw new IOException( + "I/O operation on closed VarlenaWrapper.Verifier"); + /* + * Caller holds a pin already. + */ + return m_buf; + } + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java new file mode 100644 index 00000000..11bc2754 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/internal/VarlenaXMLRenderer.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.io.IOException; + +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; + +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; + +/** + * Class adapting a {@code ByteBufferXMLReader} to a + * {@code VarlenaWrapper.Input}. + */ +public abstract class VarlenaXMLRenderer +extends ByteBufferXMLReader implements VarlenaWrapper +{ + private final VarlenaWrapper.Input m_input; + + protected final CharsetDecoder m_decoder; + + /** + * A duplicate of the {@code VarlenaWrapper.Input}'s byte buffer, + * so its {@code position} can be updated by the + * {@code XMLEventReader} operations without affecting the original + * (therefore multiple streams may read one {@code Input}). + */ + private ByteBuffer m_movingBuffer; + + public VarlenaXMLRenderer(VarlenaWrapper.Input input) throws SQLException + { + m_input = input; + Charset cs = Session.implServerCharset(); + if ( null == cs ) + { + try + { + input.close(); + } + catch ( IOException e ) { } + throw new SQLFeatureNotSupportedException("SQLXML: no Java " + + "Charset found to match server encoding; perhaps set " + + "org.postgresql.server.encoding system property to a " + + "valid Java charset name for the same encoding?", "0A000"); + + } + m_decoder = cs.newDecoder(); + } + + @Override + public long adopt(DualState.Key cookie) throws SQLException + { + throw new UnsupportedOperationException( + "adopt() on a synthetic XML rendering"); + } + + @Override + public String toString() + { + return toString(this); + } + + @Override + public String toString(Object o) + { + return m_input.toString(o); + } + + @Override + protected void pin() throws SQLException + { + m_input.pin(); + } + + @Override + protected void unpin() + { + m_input.unpin(); + } + + @Override + protected ByteBuffer buffer() throws SQLException + { + if ( null == m_movingBuffer ) + { + ByteBuffer b = m_input.buffer(); + m_movingBuffer = b.duplicate().order(b.order()); + } + return m_movingBuffer; + } + + @Override + public void close() throws IOException + { + m_input.close(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/internal/XactListener.java b/pljava/src/main/java/org/postgresql/pljava/internal/XactListener.java index e972e8da..5c61433e 100644 --- a/pljava/src/main/java/org/postgresql/pljava/internal/XactListener.java +++ b/pljava/src/main/java/org/postgresql/pljava/internal/XactListener.java @@ -1,69 +1,121 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2022 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.internal; +import org.postgresql.pljava.TransactionListener; + +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.EntryPoints.Invocable; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; + +import static java.security.AccessController.getContext; + import java.sql.SQLException; -import java.util.HashMap; -import org.postgresql.pljava.TransactionListener; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; /** - * Class that enables registrations using the PostgreSQL RegisterXactCallback - * function. + * Class that enables registrations using the PostgreSQL + * {@code RegisterXactCallback} function. * * @author Thomas Hallgren */ class XactListener { - private static final HashMap s_listeners = new HashMap(); + /* + * These do not need to match the values of the PostgreSQL enum (which, over + * the years, has had members not merely added but reordered). The C code + * will map those to these. + */ + private static final int COMMIT = 0; + private static final int ABORT = 1; + private static final int PREPARE = 2; + private static final int PRE_COMMIT = 3; + private static final int PRE_PREPARE = 4; + private static final int PARALLEL_COMMIT = 5; + private static final int PARALLEL_ABORT = 6; + private static final int PARALLEL_PRE_COMMIT = 7; - static void onAbort(long listenerId) throws SQLException - { - TransactionListener listener = (TransactionListener)s_listeners.get(new Long(listenerId)); - if(listener != null) - listener.onAbort(Backend.getSession()); - } + private static final + List> s_refs = + List.of( + TransactionListener::onCommit, + TransactionListener::onAbort, + TransactionListener::onPrepare, + TransactionListener::onPreCommit, + TransactionListener::onPrePrepare, + TransactionListener::onParallelCommit, + TransactionListener::onParallelAbort, + TransactionListener::onParallelPreCommit + ); - static void onCommit(long listenerId) throws SQLException - { - TransactionListener listener = (TransactionListener)s_listeners.get(new Long(listenerId)); - if(listener != null) - listener.onCommit(Backend.getSession()); - } + /* + * A non-thread-safe Deque; will be made safe by doing all mutations on the + * PG thread (even though actually calling into PG is necessary only when + * the size changes from 0 to 1 or 1 to 0). + */ + private static final Deque> s_listeners = + new ArrayDeque<>(); - static void onPrepare(long listenerId) throws SQLException + private static void invokeListeners(int eventIndex) + throws SQLException { - TransactionListener listener = (TransactionListener)s_listeners.get(new Long(listenerId)); - if(listener != null) - listener.onPrepare(Backend.getSession()); + Checked.BiConsumer target = + s_refs.get(eventIndex); + Session session = Session.provider(); + + // Take a snapshot. Handlers might unregister during event processing + for ( Invocable listener : + s_listeners.stream().collect(toList()) ) + { + doPrivileged(() -> + { + target.accept(listener.payload, session); + }, listener.acc); + } } static void addListener(TransactionListener listener) { - synchronized(Backend.THREADLOCK) + Invocable invocable = + new Invocable<>(requireNonNull(listener), getContext()); + + doInPG(() -> { - long key = System.identityHashCode(listener); - if(s_listeners.put(new Long(key), listener) != listener) - _register(key); - } + s_listeners.removeIf(v -> v.payload.equals(listener)); + s_listeners.push(invocable); + if( 1 == s_listeners.size() ) + _register(); + }); } static void removeListener(TransactionListener listener) { - synchronized(Backend.THREADLOCK) + doInPG(() -> { - long key = System.identityHashCode(listener); - if(s_listeners.remove(new Long(key)) == listener) - _unregister(key); - } + if ( ! s_listeners.removeIf(v -> v.payload.equals(listener)) ) + return; + if ( 0 == s_listeners.size() ) + _unregister(); + }); } - private static native void _register(long listenerId); + private static native void _register(); - private static native void _unregister(long listenerId); + private static native void _unregister(); } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSet.java index 8e56055f..1233a893 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSet.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -31,8 +36,8 @@ import java.util.Map; /** - * The AbstractResultSet serves as a base class for implementations - * of the{@link java.sql.ResultSet} interface. All calls using columnNames are + * The {@code AbstractResultSet} serves as a base class for implementations + * of the {@link java.sql.ResultSet} interface. All calls using columnNames are * translated into the corresponding call with index position computed using * a call to {@link java.sql.ResultSet#findColumn(String) findColumn}. * @@ -40,340 +45,367 @@ */ public abstract class AbstractResultSet implements ResultSet { + // ************************************************************ + // Pre-JDBC 4 + // Getters-by-columnName mapped to getters-by-columnIndex + // ************************************************************ + + @Override public Array getArray(String columnName) throws SQLException { return this.getArray(this.findColumn(columnName)); } + @Override public InputStream getAsciiStream(String columnName) throws SQLException { return this.getAsciiStream(this.findColumn(columnName)); } + @Override public BigDecimal getBigDecimal(String columnName) throws SQLException { return this.getBigDecimal(this.findColumn(columnName)); } - /** - * @deprecated - */ + @SuppressWarnings("deprecation") @Override public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException { return this.getBigDecimal(this.findColumn(columnName), scale); } + @Override public InputStream getBinaryStream(String columnName) throws SQLException { return this.getBinaryStream(this.findColumn(columnName)); } + @Override public Blob getBlob(String columnName) throws SQLException { return this.getBlob(this.findColumn(columnName)); } + @Override public boolean getBoolean(String columnName) throws SQLException { return this.getBoolean(this.findColumn(columnName)); } + @Override public byte getByte(String columnName) throws SQLException { return this.getByte(this.findColumn(columnName)); } + @Override public byte[] getBytes(String columnName) throws SQLException { return this.getBytes(this.findColumn(columnName)); } + @Override public Reader getCharacterStream(String columnName) throws SQLException { return this.getCharacterStream(this.findColumn(columnName)); } + @Override public Clob getClob(String columnName) throws SQLException { return this.getClob(this.findColumn(columnName)); } - public String getCursorName() - throws SQLException - { - return null; - } - + @Override public Date getDate(String columnName) throws SQLException { return this.getDate(this.findColumn(columnName)); } + @Override public Date getDate(String columnName, Calendar cal) throws SQLException { return this.getDate(this.findColumn(columnName), cal); } + @Override public double getDouble(String columnName) throws SQLException { return this.getDouble(this.findColumn(columnName)); } + @Override public float getFloat(String columnName) throws SQLException { return this.getFloat(this.findColumn(columnName)); } + @Override public int getInt(String columnName) throws SQLException { return this.getInt(this.findColumn(columnName)); } + @Override public long getLong(String columnName) throws SQLException { return this.getLong(this.findColumn(columnName)); } + @Override public Object getObject(String columnName) throws SQLException { return this.getObject(this.findColumn(columnName)); } - public Object getObject(String columnName, Map map) + @Override + public Object getObject(String columnName, Map> map) throws SQLException { return this.getObject(this.findColumn(columnName), map); } - public T getObject(int columnIndex, Class type) - throws SQLException - { - final Object obj = getObject( columnIndex ); - if ( obj.getClass().equals( type ) ) return (T) obj; - throw new SQLException( "Cannot convert " + obj.getClass().getName() + " to " + type ); - } - - public T getObject(String columnName, Class type) - throws SQLException - { - final Object obj = getObject( columnName ); - if ( obj.getClass().equals( type ) ) return (T) obj; - throw new SQLException( "Cannot convert " + obj.getClass().getName() + " to " + type ); - } - + @Override public Ref getRef(String columnName) throws SQLException { return this.getRef(this.findColumn(columnName)); } + @Override public short getShort(String columnName) throws SQLException { return this.getShort(this.findColumn(columnName)); } - public Statement getStatement() - throws SQLException - { - return null; - } - + @Override public String getString(String columnName) throws SQLException { return this.getString(this.findColumn(columnName)); } + @Override public Time getTime(String columnName) throws SQLException { return this.getTime(this.findColumn(columnName)); } + @Override public Time getTime(String columnName, Calendar cal) throws SQLException { return this.getTime(this.findColumn(columnName), cal); } + @Override public Timestamp getTimestamp(String columnName) throws SQLException { return this.getTimestamp(this.findColumn(columnName)); } + @Override public Timestamp getTimestamp(String columnName, Calendar cal) throws SQLException { return this.getTimestamp(this.findColumn(columnName), cal); } - /** - * @deprecated - */ + @SuppressWarnings("deprecation") @Override public InputStream getUnicodeStream(String columnName) throws SQLException { return this.getUnicodeStream(this.findColumn(columnName)); } + @Override public URL getURL(String columnName) throws SQLException { return this.getURL(this.findColumn(columnName)); } + // ************************************************************ + // Pre-JDBC 4 + // Updaters-by-columnName mapped to updaters-by-columnIndex + // ************************************************************ + + @Override public void updateArray(String columnName, Array x) throws SQLException { this.updateArray(this.findColumn(columnName), x); } + @Override public void updateAsciiStream(String columnName, InputStream x, int length) throws SQLException { this.updateAsciiStream(this.findColumn(columnName), x, length); } + @Override public void updateBigDecimal(String columnName, BigDecimal x) throws SQLException { this.updateBigDecimal(this.findColumn(columnName), x); } + @Override public void updateBinaryStream(String columnName, InputStream x, int length) throws SQLException { this.updateBinaryStream(this.findColumn(columnName), x, length); } + @Override public void updateBlob(String columnName, Blob x) throws SQLException { this.updateBlob(this.findColumn(columnName), x); } + @Override public void updateBoolean(String columnName, boolean x) throws SQLException { this.updateBoolean(this.findColumn(columnName), x); } + @Override public void updateByte(String columnName, byte x) throws SQLException { this.updateByte(this.findColumn(columnName), x); } + @Override public void updateBytes(String columnName, byte x[]) throws SQLException { this.updateBytes(this.findColumn(columnName), x); } + @Override public void updateCharacterStream(String columnName, Reader x, int length) throws SQLException { this.updateCharacterStream(this.findColumn(columnName), x, length); } + @Override public void updateClob(String columnName, Clob x) throws SQLException { this.updateClob(this.findColumn(columnName), x); } + @Override public void updateDate(String columnName, Date x) throws SQLException { this.updateDate(this.findColumn(columnName), x); } + @Override public void updateDouble(String columnName, double x) throws SQLException { this.updateDouble(this.findColumn(columnName), x); } + @Override public void updateFloat(String columnName, float x) throws SQLException { this.updateFloat(this.findColumn(columnName), x); } + @Override public void updateInt(String columnName, int x) throws SQLException { this.updateInt(this.findColumn(columnName), x); } + @Override public void updateLong(String columnName, long x) throws SQLException { this.updateLong(this.findColumn(columnName), x); } + @Override public void updateNull(String columnName) throws SQLException { this.updateNull(this.findColumn(columnName)); } + @Override public void updateObject(String columnName, Object x) throws SQLException { this.updateObject(this.findColumn(columnName), x); } + @Override public void updateObject(String columnName, Object x, int scale) throws SQLException { this.updateObject(this.findColumn(columnName), x, scale); } + @Override public void updateRef(String columnName, Ref x) throws SQLException { this.updateRef(this.findColumn(columnName), x); } + @Override public void updateShort(String columnName, short x) throws SQLException { this.updateShort(this.findColumn(columnName), x); } + @Override public void updateString(String columnName, String x) throws SQLException { this.updateString(this.findColumn(columnName), x); } + @Override public void updateTime(String columnName, Time x) throws SQLException { this.updateTime(this.findColumn(columnName), x); } + @Override public void updateTimestamp(String columnName, Timestamp x) throws SQLException { @@ -381,382 +413,471 @@ public void updateTimestamp(String columnName, Timestamp x) } // ************************************************************ - // Non-implementation of JDBC 4 methods. + // Pre-JDBC 4 + // Trivial default implementations for some methods inquiring + // ResultSet status. // ************************************************************ - public void updateNClob(int columnIndex, NClob nClob) + /** + * Returns null if not overridden in a subclass. + */ + @Override + public String getCursorName() throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNClob( int, NClob ) not implemented yet.", - "0A000" ); + return null; } - public void updateNClob(String columnLabel, NClob nClob) + /** + * Returns null if not overridden in a subclass. + */ + @Override + public Statement getStatement() throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNClob( String, NClob ) not implemented yet.", - "0A000" ); + return null; } - public void updateNClob(int columnIndex, Reader reader) + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNClob( int, Reader ) not implemented yet.", - "0A000" ); + return iface.isInstance(this); } - public void updateNClob(int columnIndex, Reader reader, long length) + @Override + public T unwrap(Class iface) throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNClob( int, Reader, long ) not implemented yet.", - "0A000" ); + if ( iface.isInstance(this) ) + return iface.cast(this); + throw new SQLFeatureNotSupportedException + ( this.getClass().getSimpleName() + + " does not wrap " + iface.getName(), + "0A000" ); } - public void updateNClob(String columnLabel, Reader reader) + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNClob( String, Reader ) not implemented yet.", - "0A000" ); + return getObject(columnIndex, SQLXML.class); } - public void updateNClob(String columnLabel, Reader reader, long length) + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNClob( String, Reader, long ) not implemented yet.", - "0A000" ); + return getObject(columnLabel, SQLXML.class); } - public void updateClob(int columnIndex, Reader reader) + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateClob( int, Reader ) not implemented yet.", - "0A000" ); + updateObject(columnIndex, xmlObject); } - public void updateClob(int columnIndex, Reader reader, long length) + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) + throws SQLException + { + updateObject(columnLabel, xmlObject); + } + + // ************************************************************ + // Non-implementation of JDBC 4 get methods. + // ************************************************************ + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateClob( int, Reader, long ) not implemented yet.", + ".getNCharacterStream( String ) not implemented yet.", "0A000" ); } - public void updateClob(String columnLabel, Reader reader) + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateClob( String, Reader ) not implemented yet.", - "0A000" ); + ".gett( int ) not implemented yet.", "0A000" ); } - public void updateClob(String columnLabel, Reader reader, long length) + @Override + public NClob getNClob(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateClob( String, Reader, long ) not implemented yet.", - "0A000" ); + ".getNClob( String ) not implemented yet.", "0A000" ); } - public void updateBlob(int columnIndex, InputStream inputStream) + @Override + public NClob getNClob(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBlob( int, InputStream ) not implemented yet.", - "0A000" ); + ".getNClob( int ) not implemented yet.", "0A000" ); } - public void updateBlob(int columnIndex, InputStream inputStream, long length) + @Override + public String getNString(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBlob( int, InputStream, long ) not implemented yet.", + ".getNString( String ) not implemented yet.", "0A000" ); } + @Override + public String getNString(int columnIndex) + throws SQLException + { + throw new SQLFeatureNotSupportedException( this.getClass() + + ".getNString( int ) not implemented yet.", "0A000" ); + } - public void updateBlob(String columnLabel, InputStream inputStream) + @Override + public RowId getRowId(String columnLabel) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBlob( String, InputStream ) not implemented yet.", - "0A000" ); + ".getRowId( String ) not implemented yet.", "0A000" ); } - public void updateBlob(String columnLabel, InputStream inputStream, long length) + @Override + public RowId getRowId(int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBlob( String, InputStream, long ) not implemented yet.", - "0A000" ); + "getRowId( int ) not implemented yet.", "0A000" ); } - public void updateCharacterStream(int columnIndex, Reader x) + // ************************************************************ + // Non-implementation of JDBC 4 update methods. + // ************************************************************ + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateCharacterStream( int, Reader ) not implemented yet.", - "0A000" ); + ".updateAsciiStream( String, InputStream ) not implemented yet.", "0A000" ); } - public void updateCharacterStream(int columnIndex, Reader x, long length) + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateCharacterStream( int, Reader, long ) not implemented yet.", - "0A000" ); + ".updateAsciiStream( String, InputStream, long ) not implemented yet.", "0A000" ); } - public void updateCharacterStream(String ColumnLabel, Reader x) + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateCharacterStream( String, Reader ) not implemented yet.", + ".updateAsciiStream( int, InputStream ) not implemented yet.", "0A000" ); } - public void updateCharacterStream(String ColumnLabel, Reader x, long length) + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateCharacterStream( String, Reader, long ) not implemented yet.", + ".updateAsciiStream( int, InputStream, long ) not implemented yet.", "0A000" ); } - + @Override public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBinaryStream( String, InputStream ) not implemented yet.", + ".updateBinaryStream( String, InputStream ) not implemented yet.", "0A000" ); } + @Override public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBinaryStream( String, InputStream, long ) not implemented yet.", + ".updateBinaryStream( String, InputStream, long ) not implemented yet.", "0A000" ); } + @Override public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBinaryStream( int, InputStream ) not implemented yet.", + ".updateBinaryStream( int, InputStream ) not implemented yet.", "0A000" ); } + @Override public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateBinaryStream( int, InputStream, long ) not implemented yet.", + ".updateBinaryStream( int, InputStream, long ) not implemented yet.", "0A000" ); } - public void updateAsciiStream(String columnLabel, InputStream x) + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateAsciiStream( String, InputStream ) not implemented yet.", "0A000" ); + ".updateBlob( String, InputStream ) not implemented yet.", + "0A000" ); } - public void updateAsciiStream(String columnLabel, InputStream x, long length) + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateAsciiStream( String, InputStream, long ) not implemented yet.", "0A000" ); + ".updateBlob( String, InputStream, long ) not implemented yet.", + "0A000" ); } - public void updateAsciiStream(int columnIndex, InputStream x) + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateAsciiStream( int, InputStream ) not implemented yet.", + ".updateBlob( int, InputStream ) not implemented yet.", "0A000" ); } - public void updateAsciiStream(int columnIndex, InputStream x, long length) + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateAsciiStream( int, InputStream, long ) not implemented yet.", + ".updateBlob( int, InputStream, long ) not implemented yet.", "0A000" ); } - public void updateNCharacterStream(String columnLabel, Reader reader) + @Override + public void updateCharacterStream(String ColumnLabel, Reader x) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNCharacterStream( String, Reader ) not implemented yet.", + ".updateCharacterStream( String, Reader ) not implemented yet.", "0A000" ); } - public void updateNCharacterStream(String columnLabel, Reader reader, long length) + @Override + public void updateCharacterStream(String ColumnLabel, Reader x, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNCharacterStream( String, Reader, long ) not implemented yet.", + ".updateCharacterStream( String, Reader, long ) not implemented yet.", "0A000" ); } - public void updateNCharacterStream(int columnIndex, Reader reader) + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNCharacterStream( int, Reader ) not implemented yet.", + ".updateCharacterStream( int, Reader ) not implemented yet.", "0A000" ); } - public void updateNCharacterStream(int columnIndex, Reader reader, long length) + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNCharaterStream( int, Reader, long] ) not implemented yet.", + ".updateCharacterStream( int, Reader, long ) not implemented yet.", "0A000" ); } - public Reader getNCharacterStream(String columnLabel) + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getNCharacterStream( String ) not implemented yet.", + ".updateClob( String, Reader ) not implemented yet.", "0A000" ); } - public Reader getNCharacterStream(int columnIndex) + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".gett( int ) not implemented yet.", "0A000" ); + ".updateClob( String, Reader, long ) not implemented yet.", + "0A000" ); } - public String getNString(int columnIndex) + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getNString( int ) not implemented yet.", "0A000" ); + ".updateClob( int, Reader ) not implemented yet.", + "0A000" ); } - public String getNString(String columnLabel) + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getNString( String ) not implemented yet.", + ".updateClob( int, Reader, long ) not implemented yet.", "0A000" ); } - public void updateSQLXML(int columnIndex, SQLXML xmlObject) + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateSQLXML( int, SQLXML ) not implemented yet.", + ".updateNCharacterStream( String, Reader ) not implemented yet.", "0A000" ); } - public void updateSQLXML(String columnLabel, SQLXML xmlObject) + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateSQLXML( String, SQLXML ) not implemented yet.", + ".updateNCharacterStream( String, Reader, long ) not implemented yet.", "0A000" ); } - public SQLXML getSQLXML(int columnIndex) + @Override + public void updateNCharacterStream(int columnIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getSQLXML( int ) not implemented yet.", "0A000" ); + ".updateNCharacterStream( int, Reader ) not implemented yet.", + "0A000" ); } - public SQLXML getSQLXML(String columnLabel) + @Override + public void updateNCharacterStream(int columnIndex, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getSQLXML( String ) not implemented yet.", "0A000" ); + ".updateNCharaterStream( int, Reader, long] ) not implemented yet.", + "0A000" ); } - public NClob getNClob(String columnLabel) + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getNClob( String ) not implemented yet.", "0A000" ); + ".updateNClob( String, NClob ) not implemented yet.", + "0A000" ); } - public NClob getNClob(int columnIndex) + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getNClob( int ) not implemented yet.", "0A000" ); + ".updateNClob( String, Reader ) not implemented yet.", + "0A000" ); } - public void updateNString(String columnLabel, String nString) + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNString( String, String ) not implemented yet.", + ".updateNClob( String, Reader, long ) not implemented yet.", "0A000" ); } - public void updateNString(int columnIndex, String nString) + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateNString( String, Object[] ) not implemented yet.", + ".updateNClob( int, NClob ) not implemented yet.", "0A000" ); } - public void updateRowId(int columnIndex, RowId x) + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateRowId( int, RowId ) not implemented yet.", + ".updateNClob( int, Reader ) not implemented yet.", "0A000" ); } - public void updateRowId(String columnLabel, RowId x) + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".updateRowId( String, RowId ) not implemented yet.", + ".updateNClob( int, Reader, long ) not implemented yet.", "0A000" ); } - public RowId getRowId(int columnIndex) + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - "getRowId( int ) not implemented yet.", "0A000" ); + ".updateNString( String, String ) not implemented yet.", + "0A000" ); } - public RowId getRowId(String columnLabel) + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { throw new SQLFeatureNotSupportedException( this.getClass() + - ".getRowId( String ) not implemented yet.", "0A000" ); + ".updateNString( String, Object[] ) not implemented yet.", + "0A000" ); } - public boolean isWrapperFor(Class iface) + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".isWrapperFor( Class ) not implemented yet.", - "0A000" ); + throw new SQLFeatureNotSupportedException( this.getClass() + + ".updateRowId( String, RowId ) not implemented yet.", + "0A000" ); } - public T unwrap(Class iface) + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".unwrapClass( Class ) not implemented yet.", - "0A000" ); + throw new SQLFeatureNotSupportedException( this.getClass() + + ".updateRowId( int, RowId ) not implemented yet.", + "0A000" ); + } + + // ************************************************************ + // Implementation of JDBC 4.1 methods. + // ************************************************************ + + @Override + public T getObject(String columnName, Class type) + throws SQLException + { + return this.getObject(this.findColumn(columnName), type); } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSetMetaData.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSetMetaData.java index 545ec34d..0fe1bb1e 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSetMetaData.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/AbstractResultSetMetaData.java @@ -1,14 +1,21 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Filip Hrbek + * Chapman Flack */ package org.postgresql.pljava.jdbc; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.sql.Connection; import java.sql.DriverManager; @@ -416,4 +423,27 @@ private Connection getDefaultConnection() throws SQLException return m_conn; } + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + + public boolean isWrapperFor(Class iface) + throws SQLException + { + return iface.isInstance(this); + } + + public T unwrap(Class iface) + throws SQLException + { + if ( iface.isInstance(this) ) + return iface.cast(this); + throw new SQLFeatureNotSupportedException + ( this.getClass().getSimpleName() + + " does not wrap " + iface.getName(), + "0A000" ); + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/BlobValue.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/BlobValue.java index 9c4c0657..26e46238 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/BlobValue.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/BlobValue.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -19,6 +24,7 @@ import java.sql.SQLFeatureNotSupportedException; /** + * Implementation of {@link Blob} for the SPI connection. * @author Thomas Hallgren */ public class BlobValue extends InputStream implements Blob diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/ClobValue.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/ClobValue.java index eda4bd08..9e594967 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/ClobValue.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/ClobValue.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -21,6 +26,7 @@ import java.sql.SQLFeatureNotSupportedException; /** + * Implementation of {@link Clob} for the SPI connection. * @author Thomas Hallgren */ public class ClobValue extends Reader implements Clob diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java index 3da1c84c..63e62bed 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/Invocation.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -12,9 +18,26 @@ import java.util.logging.Logger; import org.postgresql.pljava.internal.Backend; +import static org.postgresql.pljava.internal.Backend.doInPG; import org.postgresql.pljava.internal.PgSavepoint; +import org.postgresql.pljava.internal.ServerException; // for javadoc +import org.postgresql.pljava.internal.UnhandledPGException; // for javadoc /** + * One invocation, from PostgreSQL, of functionality implemented using PL/Java. + *

      + * This class is the Java counterpart of the {@code struct Invocation_} in the + * C code, but while there is a new stack-allocated C structure on every entry + * from PG to PL/Java, no instance of this class is created unless requested + * (with {@link #current current()}; once requested, a reference to it is saved + * in the C struct for the duration of the invocation. + *

      + * One further piece of magic applies to set-returning functions. Under the + * value-per-call protocol, there is technically a new entry into PL/Java, and + * a new C {@code Invocation_} struct, for every row to be returned, but that + * low-level complication is hidden at this level: a single instance of this + * class, if once requested, will be remembered throughout the value-per-call + * sequence of calls. * @author Thomas Hallgren */ public class Invocation @@ -24,6 +47,61 @@ public class Invocation */ private static Invocation[] s_levels = new Invocation[10]; + /** + * Recent exception representing a PostgreSQL {@code ereport(ERROR} that has + * been thrown in Java but not yet resolved (as by rollback of the + * transaction or subtransaction / savepoint). + *

      + * Mutation happens on "the PG thread". + *

      + * This field should be non-null when and only when {@code errorOccurred} + * is true in the C {@code Invocation} struct. Both are set when such an + * exception is thrown, and cleared by + * {@link #clearErrorCondition clearErrorCondition}. + *

      + * One static field suffices, not one per invocation nesting level, because + * it will always be recognized and cleared on invocation exit (to any + * possible outer nest level), and {@code errorOccurred} is meant to prevent + * calling into any PostgreSQL functions that could reach an inner nest + * level. (On reflection, that reasoning ought to apply also to + * {@code errorOccurred} itself, but that has been the way it is for decades + * and this can be added without changing that.) + *

      + * On the first creation of a {@link ServerException ServerException}, that + * exception is stored here. If any later call into PostgreSQL is thwarted + * by finding {@code errorOccurred} true, the {@code ServerException} stored + * here will be replaced by an + * {@link UnhandledPGException UnhandledPGException} that has the original + * {@code ServerException} as its {@link Throwable#cause cause} and the new + * exception will be thrown. Once this field holds an + * {@code UnhandledPGException}, it will be reused and rethrown unchanged if + * further attempts to call into PostgreSQL are made. + *

      + * At invocation exit, the C {@code popInvocation} code knows whether the + * exit is normal or exceptional. If the exit is normal but + * {@code errorOccurred} is true, that means the exiting Java function + * caught a {@code ServerException} but without rethrowing it (or some + * higher-level exception) and also without resolving it (as with a + * rollback). That is a bug in the Java function, and the exception stored + * here can have its stacktrace logged. If it is the original + * {@code ServerException}, the logging will be skipped at levels quieter + * than {@code DEBUG1}. If the exception here is already + * {@code UnhandledPGException}, then at least one attempted PostgreSQL + * operation is known to have been thwarted because of it, and a stacktrace + * will be generated at {@code WARNING} level. + *

      + * If the invocation is being popped exceptionally, the exception probably + * is this one, or has this one in its cause chain, and longstanding code + * in {@code JNICalls.c::endCall} will have generated that stack trace at + * level {@code DEBUG1}. Should that not be the case, then a stacktrace of + * this exception can be obtained from {@code popInvocation} by bumping the + * level to {@code DEBUG2}. + *

      + * Public access so factory methods of {@code ServerException} and + * {@code UnhandledPGException}, in another package, can access it. + */ + public static SQLException s_unhandled; + /** * Nesting level for this invocation */ @@ -55,29 +133,6 @@ final PgSavepoint getSavepoint() return m_savepoint; } - private ArrayList m_preparedStatements; - - final void manageStatement(PreparedStatement statement) - { - if(m_preparedStatements == null) - m_preparedStatements = new ArrayList(); - m_preparedStatements.add(statement); - } - - final void forgetStatement(PreparedStatement statement) - { - if(m_preparedStatements == null) - return; - - int idx = m_preparedStatements.size(); - while(--idx >= 0) - if(m_preparedStatements.get(idx) == statement) - { - m_preparedStatements.remove(idx); - return; - } - } - /** * @param savepoint The savepoint to set. */ @@ -90,31 +145,13 @@ final void setSavepoint(PgSavepoint savepoint) * Called from the backend when the invokation exits. Should * not be invoked any other way. */ - public void onExit() + public void onExit(boolean withError) throws SQLException { try { if(m_savepoint != null) - m_savepoint.onInvocationExit(SPIDriver.getDefault()); - - if(m_preparedStatements != null) - { - int idx = m_preparedStatements.size(); - if(idx > 0) - { - Logger w = Logger.getAnonymousLogger(); - w.warning( - "Closing " + idx + " \"forgotten\" statement" - + ((idx > 1) ? "s" : "")); - while(--idx >= 0) - { - PreparedStatement stmt = (PreparedStatement)m_preparedStatements.get(idx); - w.fine("Closed: " + stmt); - stmt.close(); - } - } - } + m_savepoint.onInvocationExit(withError); } finally { @@ -127,7 +164,7 @@ public void onExit() */ public static Invocation current() { - synchronized(Backend.THREADLOCK) + return doInPG(() -> { Invocation curr = _getCurrent(); if(curr != null) @@ -156,15 +193,16 @@ public static Invocation current() s_levels[level] = curr; curr._register(); return curr; - } + }); } - static void clearErrorCondition() + public static void clearErrorCondition() { - synchronized(Backend.THREADLOCK) + doInPG(() -> { + s_unhandled = null; _clearErrorCondition(); - } + }); } /** diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/ObjectResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/ObjectResultSet.java index f92addc7..0393c5d1 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/ObjectResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/ObjectResultSet.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -26,229 +31,361 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; -import java.io.UnsupportedEncodingException; +import static java.nio.charset.StandardCharsets.US_ASCII; /** + * Implements most getters in terms of {@link #getValue}, {@link #getNumber}, + * or a few other {@code ResultSet} getters that are so implemented, tracks + * {@link #wasNull wasNull}, and provides {@link #getObjectValue(int)} as the + * chief method for subclasses to implement; turns most updaters into + * {@link #updateObject(int,Object)}. * @author Thomas Hallgren */ public abstract class ObjectResultSet extends AbstractResultSet { private boolean m_wasNull = false; + /** + * Returns a private value updated by final methods in this class. + */ + @Override + public boolean wasNull() + { + return m_wasNull; + } /** * This is a noop since warnings are not supported. */ + @Override public void clearWarnings() throws SQLException { } + /** + * Returns null if not overridden in a subclass. + */ + @Override + public SQLWarning getWarnings() + throws SQLException + { + return null; + } + + /** + * Throws "unsupported" exception if not overridden in a subclass. + * @throws SQLException indicating that this feature is not supported. + */ + @Override + public ResultSetMetaData getMetaData() + throws SQLException + { + throw new UnsupportedFeatureException( + "ResultSet meta data is not yet implemented"); + } + + // ************************************************************ + // Pre-JDBC 4 + // Getters-by-columnIndex + // ************************************************************ + + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public Array getArray(int columnIndex) throws SQLException { - return (Array)this.getValue(columnIndex, Array.class); + return getValue(columnIndex, Array.class); } + /** + * Implemented over {@link #getClob(int) getClob}. + */ + @Override public InputStream getAsciiStream(int columnIndex) throws SQLException { - Clob c = this.getClob(columnIndex); + Clob c = getClob(columnIndex); return (c == null) ? null : c.getAsciiStream(); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - return (BigDecimal)this.getValue(columnIndex, BigDecimal.class); + return getValue(columnIndex, BigDecimal.class); } /** - * @deprecated + * Throws "unsupported" exception. */ + @SuppressWarnings("deprecation") @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { throw new UnsupportedFeatureException("getBigDecimal(int, int)"); } + /** + * Implemented over {@link #getBlob(int) getBlob}. + */ + @Override public InputStream getBinaryStream(int columnIndex) throws SQLException { - Blob b = this.getBlob(columnIndex); + Blob b = getBlob(columnIndex); return (b == null) ? null : b.getBinaryStream(); } - + /** + * Implemented over {@link #getBytes(int) getBytes}. + */ + @Override public Blob getBlob(int columnIndex) throws SQLException { - byte[] bytes = this.getBytes(columnIndex); + byte[] bytes = getBytes(columnIndex); return (bytes == null) ? null : new BlobValue(bytes); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public boolean getBoolean(int columnIndex) throws SQLException { - Boolean b = (Boolean)this.getValue(columnIndex, Boolean.class); + Boolean b = getValue(columnIndex, Boolean.class); return (b == null) ? false : b.booleanValue(); } + /** + * Implemented over {@link #getNumber getNumber}. + */ + @Override public byte getByte(int columnIndex) throws SQLException { - Number b = this.getNumber(columnIndex, byte.class); + Number b = getNumber(columnIndex, byte.class); return (b == null) ? 0 : b.byteValue(); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public byte[] getBytes(int columnIndex) throws SQLException { - return (byte[])this.getValue(columnIndex, byte[].class); + return getValue(columnIndex, byte[].class); } + /** + * Implemented over {@link #getClob(int) getClob}. + */ + @Override public Reader getCharacterStream(int columnIndex) throws SQLException { - Clob c = this.getClob(columnIndex); + Clob c = getClob(columnIndex); return (c == null) ? null : c.getCharacterStream(); } + /** + * Implemented over {@link #getString(int) getString}. + */ + @Override public Clob getClob(int columnIndex) throws SQLException { - String str = this.getString(columnIndex); + String str = getString(columnIndex); return (str == null) ? null : new ClobValue(str); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public Date getDate(int columnIndex) throws SQLException { - return (Date)this.getValue(columnIndex, Date.class); + return getValue(columnIndex, Date.class); } + /** + * Implemented over {@link #getValue(int,Class,Calendar) getValue}. + */ + @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { - return (Date)this.getValue(columnIndex, Date.class, cal); + return getValue(columnIndex, Date.class, cal); } + /** + * Implemented over {@link #getNumber getNumber}. + */ + @Override public double getDouble(int columnIndex) throws SQLException { - Number d = this.getNumber(columnIndex, double.class); + Number d = getNumber(columnIndex, double.class); return (d == null) ? 0 : d.doubleValue(); } + /** + * Implemented over {@link #getNumber getNumber}. + */ + @Override public float getFloat(int columnIndex) throws SQLException { - Number f = this.getNumber(columnIndex, float.class); + Number f = getNumber(columnIndex, float.class); return (f == null) ? 0 : f.floatValue(); } + /** + * Implemented over {@link #getNumber getNumber}. + */ + @Override public int getInt(int columnIndex) throws SQLException { - Number i = this.getNumber(columnIndex, int.class); + Number i = getNumber(columnIndex, int.class); return (i == null) ? 0 : i.intValue(); } + /** + * Implemented over {@link #getNumber getNumber}. + */ + @Override public long getLong(int columnIndex) throws SQLException { - Number l = this.getNumber(columnIndex, long.class); + Number l = getNumber(columnIndex, long.class); return (l == null) ? 0 : l.longValue(); } /** - * ResultSetMetaData is not yet supported. - * @throws SQLException indicating that this feature is not supported. + * Implemented over {@link #getObjectValue(int) getObjectValue}. + * Final because it records {@code wasNull} for use by other methods. */ - public ResultSetMetaData getMetaData() - throws SQLException - { - throw new UnsupportedFeatureException("ResultSet meta data is not yet implemented"); - } - + @Override public final Object getObject(int columnIndex) throws SQLException { - Object value = this.getObjectValue(columnIndex); + Object value = getObjectValue(columnIndex); m_wasNull = (value == null); return value; } + /** + * Implemented over {@link #getObjectValue(int,Map) getObjectValue}. + * Final because it records {@code wasNull} for use by other methods. + */ + @Override public final Object getObject(int columnIndex, Map map) throws SQLException { - Object value = this.getObjectValue(columnIndex, map); + Object value = getObjectValue(columnIndex, map); m_wasNull = (value == null); return value; } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public Ref getRef(int columnIndex) throws SQLException { - return (Ref)this.getValue(columnIndex, Ref.class); + return getValue(columnIndex, Ref.class); } + /** + * Implemented over {@link #getNumber getNumber}. + */ + @Override public short getShort(int columnIndex) throws SQLException { - Number s = this.getNumber(columnIndex, short.class); + Number s = getNumber(columnIndex, short.class); return (s == null) ? 0 : s.shortValue(); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public String getString(int columnIndex) throws SQLException { - return (String)this.getValue(columnIndex, String.class); + return getValue(columnIndex, String.class); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public Time getTime(int columnIndex) throws SQLException { - return (Time)this.getValue(columnIndex, Time.class); + return getValue(columnIndex, Time.class); } + /** + * Implemented over {@link #getValue(int,Class,Calendar) getValue}. + */ + @Override public Time getTime(int columnIndex, Calendar cal) throws SQLException { - return (Time)this.getValue(columnIndex, Time.class, cal); + return getValue(columnIndex, Time.class, cal); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { - return (Timestamp)this.getValue(columnIndex, Timestamp.class); + return getValue(columnIndex, Timestamp.class); } + /** + * Implemented over {@link #getValue(int,Class,Calendar) getValue}. + */ + @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - return (Timestamp)this.getValue(columnIndex, Timestamp.class, cal); + return getValue(columnIndex, Timestamp.class, cal); } /** - * @deprecated + * Throws "unsupported" exception. */ + @SuppressWarnings("deprecation") @Override public InputStream getUnicodeStream(int columnIndex) throws SQLException { throw new UnsupportedFeatureException("ResultSet.getUnicodeStream"); } + /** + * Implemented over {@link #getValue getValue}. + */ + @Override public URL getURL(int columnIndex) throws SQLException { - return (URL)this.getValue(columnIndex, URL.class); - } - - public SQLWarning getWarnings() - throws SQLException - { - return null; + return getValue(columnIndex, URL.class); } /** @@ -261,172 +398,317 @@ public void refreshRow() throw new UnsupportedFeatureException("Refresh row"); } + // ************************************************************ + // Pre-JDBC 4 + // Updaters-by-columnIndex + // ************************************************************ + + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateArray(int columnIndex, Array x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link ClobValue} and + * {@link #updateObject updateObject}. + */ + @Override public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { - try - { - this.updateObject(columnIndex, - new ClobValue(new InputStreamReader(x, "US-ASCII"), length)); - } - catch(UnsupportedEncodingException e) - { - throw new SQLException("US-ASCII encoding is not supported by this JVM"); - } + updateObject(columnIndex, + new ClobValue(new InputStreamReader(x, US_ASCII), length)); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link BlobValue} and + * {@link #updateBlob updateBlob}. + */ + @Override public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { - this.updateBlob(columnIndex, (Blob) new BlobValue(x, length)); + updateBlob(columnIndex, (Blob) new BlobValue(x, length)); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateBlob(int columnIndex, Blob x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateBoolean(int columnIndex, boolean x) throws SQLException { - this.updateObject(columnIndex, x ? Boolean.TRUE : Boolean.FALSE); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateByte(int columnIndex, byte x) throws SQLException { - this.updateObject(columnIndex, new Byte(x)); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateBytes(int columnIndex, byte[] x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link ClobValue} and + * {@link #updateClob updateClob}. + */ + @Override public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { - this.updateClob(columnIndex, (Clob) new ClobValue(x, length)); + updateClob(columnIndex, (Clob) new ClobValue(x, length)); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateClob(int columnIndex, Clob x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateDate(int columnIndex, Date x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateDouble(int columnIndex, double x) throws SQLException { - this.updateObject(columnIndex, new Double(x)); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateFloat(int columnIndex, float x) throws SQLException { - this.updateObject(columnIndex, new Float(x)); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateInt(int columnIndex, int x) throws SQLException { - this.updateObject(columnIndex, new Integer(x)); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateLong(int columnIndex, long x) throws SQLException { - this.updateObject(columnIndex, new Long(x)); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateNull(int columnIndex) throws SQLException { - this.updateObject(columnIndex, null); + updateObject(columnIndex, null); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateRef(int columnIndex, Ref x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateShort(int columnIndex, short x) throws SQLException { - this.updateObject(columnIndex, new Short(x)); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateString(int columnIndex, String x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateTime(int columnIndex, Time x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } + /** + * Implemented over {@link #updateObject updateObject}. + */ + @Override public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - this.updateObject(columnIndex, x); + updateObject(columnIndex, x); } - public boolean wasNull() + // ************************************************************ + // JDBC 4.1 + // Getter-by-columnIndex + // ************************************************************ + + /** + * Implemented over {@link #getObjectValue(int,Class) getObjectValue}. + * Final because it records {@code wasNull} for use by other methods. + */ + @Override + public final T getObject(int columnIndex, Class type) + throws SQLException { - return m_wasNull; + Object value = getObjectValue(columnIndex, type); + m_wasNull = (value == null); + if ( m_wasNull || type.isInstance(value) ) + return type.cast(value); + throw new SQLException("Cannot convert " + value.getClass().getName() + + " to " + type.getName()); } + // ************************************************************ + // Implementation methods + // ************************************************************ + + /** + * Implemented over {@link #getObjectValue}, tracks {@code wasNull}, + * applies {@link SPIConnection#basicNumericCoercion} to {@code cls}. + */ protected final Number getNumber(int columnIndex, Class cls) throws SQLException { - Object value = this.getObjectValue(columnIndex); + Object value = getObjectValue(columnIndex); m_wasNull = (value == null); - return SPIConnection.basicNumericCoersion(cls, value); + return SPIConnection.basicNumericCoercion(cls, value); } - protected final Object getValue(int columnIndex, Class cls) + /** + * Implemented over {@link #getObject}, + * applies {@link SPIConnection#basicCoercion} to {@code cls}. + */ + protected final T getValue(int columnIndex, Class cls) throws SQLException { - return SPIConnection.basicCoersion(cls, this.getObject(columnIndex)); + return SPIConnection.basicCoercion(cls, getObject(columnIndex)); } - protected Object getValue(int columnIndex, Class cls, Calendar cal) + /** + * Implemented over {@link #getObject}, + * applies {@link SPIConnection#basicCalendricalCoercion} to {@code cls}. + */ + protected T getValue(int columnIndex, Class cls, Calendar cal) throws SQLException { - return SPIConnection.basicCalendricalCoersion(cls, this.getObject(columnIndex), cal); + return SPIConnection.basicCalendricalCoercion(cls, + getObject(columnIndex), cal); } + /** + * Implemented over {@link #getObjectValue(int)}, complains if + * {@code typeMap} is non-null. + */ protected Object getObjectValue(int columnIndex, Map typeMap) throws SQLException { if(typeMap == null) - return this.getObjectValue(columnIndex); - throw new UnsupportedFeatureException("Obtaining values using explicit Map"); + return getObjectValue(columnIndex); + throw new UnsupportedFeatureException( + "Obtaining values using explicit Map"); } - protected abstract Object getObjectValue(int columnIndex) + /** + * Implemented over {@link #getObjectValue(int,Class)}, passing null for + * the class. + *

      + * To preserve back-compatible behavior in the 1.5.x branch, this is still + * what ends up getting called in all cases that do not explicitly use the + * JDBC 4.1 new {@link #getObject(int,Class)}. + */ + protected Object getObjectValue(int columnIndex) + throws SQLException + { + return getObjectValue(columnIndex, (Class)null); + } + + /** + * Primary method for subclass to override to retrieve a value. + *

      + * The signature does not constrain this to return an object of the + * requested class, so it can still be used as before by methods that may do + * additional coercions. When called by {@link #getObject(int,Class)}, that + * caller enforces the class of the result. + */ + protected abstract Object getObjectValue(int columnIndex, Class type) throws SQLException; } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java new file mode 100644 index 00000000..8d5de171 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/PgNodeTreeAsXML.java @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Parsing logic from src/backend/nodes/read.c: Andrew Yu, Nov 2, 1994 + * Chapman Flack + */ +package org.postgresql.pljava.jdbc; + +import java.io.IOException; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; + +import java.nio.charset.CharacterCodingException; + +import java.sql.SQLException; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.xml.sax.SAXException; + +import org.postgresql.pljava.internal.VarlenaWrapper; +import org.postgresql.pljava.internal.VarlenaXMLRenderer; + +/** + * An adapter presenting PostgreSQL's {@code pg_node_tree} type (a serialized + * representation of a tree data structure) through the XML API (in, currently, + * an ad-hoc, schemaless rendering, but one with which some practical use might + * be made of the information, after a little study). + */ +public class PgNodeTreeAsXML extends VarlenaXMLRenderer +{ + PgNodeTreeAsXML(VarlenaWrapper.Input vwi) throws SQLException + { + super(vwi); + } + + /* + * Special returns from the low-level tokenizer. + */ + public static final String LPAR_TOK = "("; + public static final String RPAR_TOK = ")"; + public static final String LBRA_TOK = "{"; + public static final String RBRA_TOK = "}"; + + enum NodeTokenType + { + T_Integer, T_Float, T_String, T_BitString, + RIGHT_PAREN, LEFT_PAREN, LEFT_BRACE, OTHER_TOKEN; + + private static final Pattern s_maybeNumber = + Pattern.compile("^[-+]?+\\.?+\\d"); + + static NodeTokenType of(String token) + { + if ( LPAR_TOK == token ) + return LEFT_PAREN; + if ( RPAR_TOK == token ) + return RIGHT_PAREN; + if ( LBRA_TOK == token ) + return LEFT_BRACE; + if ( token.startsWith("\"") && token.endsWith("\"") ) + return T_String; + if ( token.startsWith("b") ) + return T_BitString; + + if ( s_maybeNumber.matcher(token).lookingAt() ) + { + try + { + Integer.parseInt(token); + return T_Integer; + } + catch ( NumberFormatException e ) + { + return T_Float; + } + } + + return OTHER_TOKEN; + } + } + + @Override + protected EventCarrier next(ByteBuffer buf) + { + if ( 0 == buf.remaining() ) + return null; + try + { + final CharBuffer cb = m_decoder.decode(buf); + return new EventCarrier() + { + @Override + public void toSAX() + throws IOException, SAXException, SQLException + { + nodeRead(null); + } + + private String nextToken() + { + return PgNodeTreeAsXML.this.nextToken(cb); + } + + private void nodeRead(String token) + throws IOException, SAXException, SQLException + { + if ( null == token ) + if ( null == (token = nextToken()) ) + return; + + NodeTokenType type = NodeTokenType.of(token); + switch ( type ) + { + case LEFT_BRACE: + parseNodeString(); + break; + case LEFT_PAREN: + if ( null == (token = nextToken()) ) + throw new SQLException( + "unterminated List structure"); + String listType = + "i".equals(token) ? "int" : + "o".equals(token) ? "oid" : + "b".equals(token) ? "bit" : /* not in PG source! */ + null; + + if ( null != listType ) + { + startElement("list", + cleared().withAttribute("all", listType)); + for (;;) + { + if ( null == (token = nextToken()) ) + throw new SQLException( + "unterminated List structure"); + if ( RPAR_TOK == token ) + break; + startElement("v"); + characters(token); + endElement("v"); + } + } + else + { + startElement("list"); + for (;;) + { + if ( RPAR_TOK == token ) + break; + nodeRead(token); + if ( null == (token = nextToken()) ) + throw new SQLException( + "unterminated List structure"); + } + } + endElement("list"); + break; + case RIGHT_PAREN: + throw new SQLException("unexpected right parenthesis"); + case OTHER_TOKEN: + if ( token.isEmpty() ) + { + startElement("null"); + endElement("null"); + } + else + throw new SQLException( + "unrecognized token: \"" + token + '"'); + break; + case T_Integer: + case T_Float: + startElement(type.name()); + characters(token); + endElement(type.name()); + break; + case T_String: + startElement(type.name()); + characters(token.substring(1, token.length() - 1)); + endElement(type.name()); + break; + case T_BitString: + startElement(type.name()); + characters(token.substring(1)); + endElement(type.name()); + break; + } + } + + private void parseNodeString() + throws IOException, SAXException, SQLException + { + String token = nextToken(); + if ( null == token || RBRA_TOK == token ) + throw new SQLException( + "badly formatted node string \"" + token + '"'); + String tokname = token; + boolean seenMember = false; + boolean isCONST; + + startElement(tokname); + isCONST = "CONST".equals(tokname); + for (;;) + { + if ( null == (token = nextToken()) ) + throw new SQLException( + "unterminated node structure"); + if ( RBRA_TOK == token ) + break; + + if ( token.startsWith(":") ) + { + if ( seenMember ) + endElement("member"); + seenMember = true; + String name = token.substring(1); + if ( isCONST && "constvalue".equals(name) ) + readDatum(); + else + startElement("member", + cleared() + .withAttribute("name", name)); + continue; + } + + if ( LBRA_TOK == token || LPAR_TOK == token ) + { + nodeRead(token); + continue; + } + + if ( ! seenMember ) + throw new SQLException("node value outside member"); + characters(token); + } + if ( seenMember ) + endElement("member"); + endElement(tokname); + } + + private void readDatum() + throws IOException, SAXException, SQLException + { + String token = nextToken(); + if ( null == token ) + throw new SQLException( + "malformed constvalue (expected length)"); + /* + * The length can be <> which nextToken() returns as "" + * which means the constvalue is null with no more to read. + */ + if ( token.isEmpty() ) + { + startElement("member", + cleared() + .withAttribute("name", "constvalue")); + return; + } + startElement("member", + cleared() + .withAttribute("name", "constvalue") + .withAttribute("length", token)); + token = nextToken(); + if ( ! "[".equals(token) ) + throw new SQLException("malformed constvalue " + + "(expected \"[\" got \"" + token + "\")"); + for (;;) + { + if ( null == (token = nextToken()) ) + throw new SQLException("unterminated constvalue"); + if ( "]".equals(token) ) + break; + int b = Integer.parseInt(token); + assert -128 <= b && b < 128 : "constvalue out of range"; + characters(Integer.toHexString(512 + b) + .substring(1).toUpperCase()); + } + // caller will add the + } + }; + } + catch ( CharacterCodingException e ) + { + return exceptionCarrier(e); + } + } + + String nextToken(CharBuffer cb) + { + int beg = cb.position(); + int end = cb.limit(); + int cur = beg; + char ch = 0; // sop for javac + + while ( cur < end ) + { + ch = cb.get(cur); + if ( ' ' == ch || '\n' == ch || '\t' == ch ) + beg = ++cur; + else + break; + } + + if ( cur == end ) + { + cb.position(cur); + return null; + } + + if ( '(' == ch || ')' == ch || '{' == ch || '}' == ch ) + { + cb.position(++cur); + switch ( ch ) + { + case '(': return LPAR_TOK; + case ')': return RPAR_TOK; + case '{': return LBRA_TOK; + case '}': return RBRA_TOK; + } + } + + StringBuilder sb = new StringBuilder(); + + while ( -1 == "(){} \n\t".indexOf(ch) ) + { + ++ cur; + if ( '\\' == ch && cur < end ) + sb.append(cb.get(cur++)); + else + sb.append(ch); + if ( cur == end ) + break; + ch = cb.get(cur); + } + + cb.position(cur); + + if ( 2 == sb.length() && 0 == sb.indexOf("<>") ) + return ""; + + return sb.toString(); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/ReadOnlyResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/ReadOnlyResultSet.java index e6a6fb2c..1b315456 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/ReadOnlyResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/ReadOnlyResultSet.java @@ -1,8 +1,13 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren */ package org.postgresql.pljava.jdbc; @@ -11,7 +16,7 @@ /** - * The ReadOnlyResultSet implements all methods that changes the ResultSet + * Implements all methods that change the ResultSet * in any way as methods that yield an {@link UnsupportedFeatureException}. * * @author Thomas Hallgren @@ -21,6 +26,7 @@ public abstract class ReadOnlyResultSet extends ObjectResultSet /** * Returns {@link ResultSet#CONCUR_READ_ONLY}. */ + @Override public int getConcurrency() throws SQLException { @@ -31,6 +37,7 @@ public int getConcurrency() * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void cancelRowUpdates() throws SQLException { @@ -41,6 +48,7 @@ public void cancelRowUpdates() * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void deleteRow() throws SQLException { @@ -51,6 +59,7 @@ public void deleteRow() * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void insertRow() throws SQLException { @@ -61,6 +70,7 @@ public void insertRow() * This is a no-op since the moveToInsertRow() method is * unsupported. */ + @Override public void moveToCurrentRow() throws SQLException { @@ -70,6 +80,7 @@ public void moveToCurrentRow() * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void moveToInsertRow() throws SQLException { @@ -80,6 +91,7 @@ public void moveToInsertRow() * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void updateRow() throws SQLException { @@ -89,6 +101,7 @@ public void updateRow() /** * Always returns false. */ + @Override public boolean rowDeleted() throws SQLException { @@ -98,6 +111,7 @@ public boolean rowDeleted() /** * Always returns false. */ + @Override public boolean rowInserted() throws SQLException { @@ -107,6 +121,7 @@ public boolean rowInserted() /** * Always returns false. */ + @Override public boolean rowUpdated() throws SQLException { @@ -117,6 +132,7 @@ public boolean rowUpdated() * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void updateObject(int columnIndex, Object x) throws SQLException { throw readOnlyException(); @@ -126,6 +142,7 @@ public void updateObject(int columnIndex, Object x) throws SQLException * This feature is not supported on a ReadOnlyResultSet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void updateObject(int columnIndex, Object x, int scale) throws SQLException { diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetBase.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetBase.java index f6ccc1c0..bd8aa85d 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetBase.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetBase.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Filip Hrbek + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -12,41 +17,62 @@ import java.sql.SQLException; /** - * A ResultSet base provides methods that are common both for + * Provides methods that are common both for * a SyntheticResultSet (which is not associated with a * statement) and SPIResultSet. * * @author Filip Hrbek */ -abstract class ResultSetBase extends ReadOnlyResultSet +public abstract class ResultSetBase extends ReadOnlyResultSet { private int m_fetchSize; private int m_row; + /** + * Records a fetch size, and an initial position before the first row. + */ ResultSetBase(int fetchSize) { m_fetchSize = fetchSize; m_row = 0; // First row is 1 so 0 is on undefined position. } + /** + * Always returns {@link #FETCH_FORWARD} if not overridden. + */ + @Override public int getFetchDirection() throws SQLException { return FETCH_FORWARD; } + /** + * Returns the fetch size set by the constructor or with + * {@link #setFetchSize}. + */ + @Override public final int getFetchSize() throws SQLException { return m_fetchSize; } + /** + * Returns the row set by the constructor or with + * {@link #setRow}. + */ + @Override public final int getRow() throws SQLException { return m_row; } + /** + * Always returns {@link #TYPE_FORWARD_ONLY} if not overridden. + */ + @Override public int getType() throws SQLException { @@ -54,9 +80,10 @@ public int getType() } /** - * Cursor positoning is not implemented yet. + * Cursor positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void afterLast() throws SQLException { @@ -64,15 +91,17 @@ public void afterLast() } /** - * Cursor positoning is not implemented yet. + * Cursor positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public void beforeFirst() throws SQLException { throw new UnsupportedFeatureException("Cursor positioning"); } + @Override public void close() throws SQLException { @@ -83,21 +112,25 @@ public void close() * Cursor positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public boolean first() throws SQLException { throw new UnsupportedFeatureException("Cursor positioning"); } + @Override public boolean isAfterLast() throws SQLException { return m_row < 0; } + @Override public boolean isBeforeFirst() throws SQLException { return m_row == 0; } + @Override public boolean isFirst() throws SQLException { return m_row == 1; @@ -107,6 +140,7 @@ public boolean isFirst() throws SQLException * Cursor positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public boolean last() throws SQLException { @@ -117,6 +151,7 @@ public boolean last() * Reverse positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public boolean previous() throws SQLException { @@ -127,6 +162,7 @@ public boolean previous() * Cursor positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public boolean absolute(int row) throws SQLException { @@ -137,6 +173,7 @@ public boolean absolute(int row) * Cursor positioning is not implemented yet. * @throws SQLException indicating that this feature is not supported. */ + @Override public boolean relative(int rows) throws SQLException { @@ -148,6 +185,7 @@ public boolean relative(int rows) * @throws SQLException indicating that this feature is not supported * for other values on direction. */ + @Override public void setFetchDirection(int direction) throws SQLException { @@ -160,6 +198,7 @@ public void setFetchDirection(int direction) // ************************************************************ + @Override public boolean isClosed() throws SQLException { @@ -167,8 +206,7 @@ public boolean isClosed() } /** - * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. Cursors - * are actually closed when a function returns to SQL. + * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. */ public int getHoldability() throws SQLException @@ -180,6 +218,10 @@ public int getHoldability() // End of implementation of JDBC 4 methods. // ************************************************************ + /** + * Sets the fetch size maintained in this class. + */ + @Override public void setFetchSize(int fetchSize) throws SQLException { @@ -188,6 +230,10 @@ public void setFetchSize(int fetchSize) m_fetchSize = fetchSize; } + /** + * Sets the row reported by this class; should probably have + * {@code protected} access. + */ final void setRow(int row) { m_row = row; diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetField.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetField.java index 4ea8ceb0..fd8d69b7 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetField.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/ResultSetField.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -11,7 +17,7 @@ import org.postgresql.pljava.internal.Oid; /** - * + * Representation of a field to be presented in a {@link SyntheticResultSet}. * @author Filip Hrbek */ @@ -71,6 +77,7 @@ public final Class getJavaClass() /* * @return true if the field can contain a value of specified class */ + @SuppressWarnings("unchecked") public final boolean canContain(Class cls) throws SQLException { @@ -100,4 +107,4 @@ public final int getLength() { return m_len; } - } \ No newline at end of file + } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java index c7a120a6..1e2df2b3 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIConnection.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2009, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -23,6 +28,7 @@ import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; +import java.sql.ResultSetMetaData; // for javadoc link import java.sql.SQLClientInfoException; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; @@ -64,11 +70,6 @@ */ public class SPIConnection implements Connection { - /** - * A map from Java classes to java.sql.Types integers. - */ - private static final HashMap s_sqlType2Class = new HashMap(30); - /** * The version number of the currently executing PostgreSQL * server. @@ -80,6 +81,16 @@ public class SPIConnection implements Connection */ private Properties _clientInfo; + /** + * A map from Java classes to java.sql.Types integers. + *

      + * This map is only used by the (non-API) getTypeForClass method, + * which, in turn, is only used for + * {@link PreparedStatement#setObject(int,Object)}. + */ + private static final HashMap,Integer> s_class2sqlType = + new HashMap<>(30); + static { addType(String.class, Types.VARCHAR); @@ -105,12 +116,32 @@ public class SPIConnection implements Connection private static final void addType(Class clazz, int sqlType) { - s_sqlType2Class.put(clazz, new Integer(sqlType)); + s_class2sqlType.put(clazz, sqlType); } /** - * Returns a default connection instance. It is the callers responsability - * to close this instance. + * Map a {@code Class} to a {@link Types} integer, as used in + * (and only in) {@link PreparedStatement#setObject(int,Object)}. + */ + static int getTypeForClass(Class c) + { + if(c.isArray() && !c.equals(byte[].class)) + return Types.ARRAY; + + Integer sqt = s_class2sqlType.get(c); + if(sqt != null) + return sqt; + + /* + * This is not a well known JDBC type. + */ + return Types.OTHER; + } + + /** + * Returns a default connection instance. It is normally the caller's + * responsibility to close this instance, but as {@code close} is a no-op + * for this connection, that isn't critical. */ public static Connection getDefault() throws SQLException @@ -119,9 +150,9 @@ public static Connection getDefault() } /** - * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. Cursors are actually - * closed when a function returns to SQL. + * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. */ + @Override public int getHoldability() { return ResultSet.CLOSE_CURSORS_AT_COMMIT; @@ -130,6 +161,7 @@ public int getHoldability() /** * Returns {@link Connection#TRANSACTION_READ_COMMITTED}. */ + @Override public int getTransactionIsolation() { return TRANSACTION_READ_COMMITTED; @@ -139,6 +171,7 @@ public int getTransactionIsolation() * Warnings are not yet supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public void clearWarnings() throws SQLException { @@ -148,6 +181,7 @@ public void clearWarnings() /** * This is a no-op. The default connection never closes. */ + @Override public void close() { } @@ -156,6 +190,7 @@ public void close() * It's not legal to do a commit within a call from SQL. * @throws SQLException indicating that this feature is not supported. */ + @Override public void commit() throws SQLException { @@ -166,6 +201,7 @@ public void commit() * It's not legal to do a rollback within a call from SQL. * @throws SQLException indicating that this feature is not supported. */ + @Override public void rollback() throws SQLException { @@ -176,6 +212,7 @@ public void rollback() * It is assumed that an SPI call is under transaction control. This method * will always return false. */ + @Override public boolean getAutoCommit() { return false; @@ -184,6 +221,7 @@ public boolean getAutoCommit() /** * Will always return false. */ + @Override public boolean isClosed() { return false; @@ -192,6 +230,7 @@ public boolean isClosed() /** * Returns false. The SPIConnection is not real-only. */ + @Override public boolean isReadOnly() { return false; @@ -201,6 +240,7 @@ public boolean isReadOnly() * Change of holdability is not supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public void setHoldability(int holdability) throws SQLException { @@ -211,6 +251,7 @@ public void setHoldability(int holdability) * Change of transaction isolation level is not supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public void setTransactionIsolation(int level) throws SQLException { @@ -222,6 +263,7 @@ public void setTransactionIsolation(int level) * that is not supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public void setAutoCommit(boolean autoCommit) throws SQLException { @@ -233,6 +275,7 @@ public void setAutoCommit(boolean autoCommit) * SPIConnection. Changing that is not supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public void setReadOnly(boolean readOnly) throws SQLException { @@ -242,6 +285,7 @@ public void setReadOnly(boolean readOnly) /** * Returns the database in which we are running. */ + @Override public String getCatalog() throws SQLException { @@ -258,6 +302,7 @@ public String getCatalog() * The catalog name cannot be set. * @throws SQLException indicating that this feature is not supported. */ + @Override public void setCatalog(String catalog) throws SQLException { @@ -275,6 +320,7 @@ public void setCatalog(String catalog) * @return an SPIDatabaseMetaData object for this * Connection object */ + @Override public DatabaseMetaData getMetaData() { return new SPIDatabaseMetaData(this); @@ -284,12 +330,14 @@ public DatabaseMetaData getMetaData() * Warnings are not yet supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public SQLWarning getWarnings() throws SQLException { throw new UnsupportedFeatureException("Connection.getWarnings"); } + @Override public void releaseSavepoint(Savepoint savepoint) throws SQLException { if(!(savepoint instanceof PgSavepoint)) @@ -300,6 +348,7 @@ public void releaseSavepoint(Savepoint savepoint) throws SQLException forgetSavepoint(sp); } + @Override public void rollback(Savepoint savepoint) throws SQLException { if(!(savepoint instanceof PgSavepoint)) @@ -308,12 +357,12 @@ public void rollback(Savepoint savepoint) throws SQLException PgSavepoint sp = (PgSavepoint)savepoint; Invocation.clearErrorCondition(); sp.rollback(); - forgetSavepoint(sp); } /** * Creates a new instance of SPIStatement. */ + @Override public Statement createStatement() throws SQLException { @@ -332,6 +381,7 @@ public Statement createStatement() * resultSetConcurrencty differs from * {@link ResultSet#CONCUR_READ_ONLY}. */ + @Override public Statement createStatement( int resultSetType, int resultSetConcurrency) @@ -354,6 +404,7 @@ public Statement createStatement( * differs from {@link ResultSet#CONCUR_READ_ONLY}, or if the * resultSetHoldability differs from {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. */ + @Override public Statement createStatement( int resultSetType, int resultSetConcurrency, @@ -369,7 +420,8 @@ public Statement createStatement( /** * Returns null. Type map is not yet imlemented. */ - public Map getTypeMap() + @Override + public Map> getTypeMap() throws SQLException { return null; @@ -379,7 +431,8 @@ public Map getTypeMap() * Type map is not yet implemented. * @throws SQLException indicating that this feature is not supported. */ - public void setTypeMap(Map map) + @Override + public void setTypeMap(Map> map) throws SQLException { throw new UnsupportedOperationException("Type map is not yet implemented"); @@ -388,12 +441,17 @@ public void setTypeMap(Map map) /** * Parse the JDBC SQL into PostgreSQL. */ + @Override public String nativeSQL(String sql) throws SQLException { return this.nativeSQL(sql, null); } + /* + * An internal nativeSQL that returns a count of substitutable parameters + * detected, used in prepareStatement(). + */ public String nativeSQL(String sql, int[] paramCountRet) { StringBuffer buf = new StringBuffer(); @@ -422,7 +480,7 @@ public String nativeSQL(String sql, int[] paramCountRet) // if(inQuote == c) inQuote = 0; - else + else if(inQuote == 0) inQuote = c; break; @@ -459,6 +517,7 @@ public String nativeSQL(String sql, int[] paramCountRet) * Procedure calls are not yet implemented. * @throws SQLException indicating that this feature is not supported. */ + @Override public CallableStatement prepareCall(String sql) throws SQLException { throw new UnsupportedOperationException("Procedure calls are not yet implemented"); @@ -468,6 +527,7 @@ public CallableStatement prepareCall(String sql) throws SQLException * Procedure calls are not yet implemented. * @throws SQLException indicating that this feature is not supported. */ + @Override public CallableStatement prepareCall( String sql, int resultSetType, @@ -481,6 +541,7 @@ public CallableStatement prepareCall( * Procedure calls are not yet implemented. * @throws SQLException indicating that this feature is not supported. */ + @Override public CallableStatement prepareCall( String sql, int resultSetType, @@ -494,6 +555,7 @@ public CallableStatement prepareCall( /** * Creates a new instance of SPIPreparedStatement. */ + @Override public PreparedStatement prepareStatement(String sql) throws SQLException { @@ -503,7 +565,6 @@ public PreparedStatement prepareStatement(String sql) int[] pcount = new int[] { 0 }; sql = this.nativeSQL(sql, pcount); PreparedStatement stmt = new SPIPreparedStatement(this, sql, pcount[0]); - Invocation.current().manageStatement(stmt); return stmt; } @@ -511,6 +572,7 @@ public PreparedStatement prepareStatement(String sql) * Return of auto generated keys is not yet supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { @@ -525,6 +587,7 @@ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) * ResultSet#TYPE_FORWARD_ONLY} or if the resultSetConcurrencty * differs from {@link ResultSet#CONCUR_READ_ONLY}. */ + @Override public PreparedStatement prepareStatement( String sql, int resultSetType, @@ -548,6 +611,7 @@ public PreparedStatement prepareStatement( * differs from {@link ResultSet#CONCUR_READ_ONLY}, or if the * resultSetHoldability differs from {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. */ + @Override public PreparedStatement prepareStatement( String sql, int resultSetType, @@ -565,6 +629,7 @@ public PreparedStatement prepareStatement( * Return of auto generated keys is not yet supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { @@ -575,39 +640,31 @@ public PreparedStatement prepareStatement(String sql, int[] columnIndexes) * Return of auto generated keys is not yet supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { throw new UnsupportedFeatureException("Auto generated key support not yet implemented"); } + @Override public Savepoint setSavepoint() throws SQLException { - return this.rememberSavepoint(PgSavepoint.set("anonymous_savepoint")); + return this.rememberSavepoint(PgSavepoint.set(null)); } + @Override public Savepoint setSavepoint(String name) throws SQLException { return this.rememberSavepoint(PgSavepoint.set(name)); } - static int getTypeForClass(Class c) - { - if(c.isArray() && !c.equals(byte[].class)) - return Types.ARRAY; - - Integer sqt = (Integer)s_sqlType2Class.get(c); - if(sqt != null) - return sqt.intValue(); - - /* - * This is not a well known JDBC type. - */ - return Types.OTHER; - } - + /* + * An implementation factor of setSavepoint() to ensure that all such + * savepoints are released when the function returns. + */ private Savepoint rememberSavepoint(PgSavepoint sp) throws SQLException { @@ -622,6 +679,10 @@ private Savepoint rememberSavepoint(PgSavepoint sp) return sp; } + /* + * An implementation factor of releaseSavepoint() + * undoing the registration done by rememberSavepoint(). + */ private static void forgetSavepoint(PgSavepoint sp) throws SQLException { @@ -630,13 +691,18 @@ private static void forgetSavepoint(PgSavepoint sp) invocation.setSavepoint(null); } - public int[] getVersionNumber() throws SQLException + /** + * Return the server version number as a three-element {@code int} array + * (of which the third may be null), as used in the + * {@code getDatabase...Version} methods of {@link DatabaseMetaData}. + */ + public int[] getVersionNumber() throws SQLException { if (VERSION_NUMBER != null) return VERSION_NUMBER; ResultSet rs = createStatement().executeQuery( - "SELECT version()"); + "SELECT pg_catalog.version()"); try { @@ -674,25 +740,34 @@ public int[] getVersionNumber() throws SQLException } } - /* - * This implemetation uses the jdbc3Types array to support the jdbc3 - * datatypes. Basically jdbc2 and jdbc3 are the same, except that - * jdbc3 adds some - */ + /** + * Convert a PostgreSQL type name to a {@link Types} integer, using the + * {@code JDBC_TYPE_NAMES}/{@code JDBC_TYPE_NUMBERS} arrays; used in + * {@link DatabaseMetaData} and {@link ResultSetMetaData}. + */ public int getSQLType(String pgTypeName) { if (pgTypeName == null) return Types.OTHER; - for (int i = 0;i < JDBC3_TYPE_NAMES.length;i++) - if (pgTypeName.equals(JDBC3_TYPE_NAMES[i])) + for (int i = 0;i < JDBC_TYPE_NAMES.length;i++) + if (pgTypeName.equals(JDBC_TYPE_NAMES[i])) return JDBC_TYPE_NUMBERS[i]; return Types.OTHER; } - /* - * This returns the java.sql.Types type for a PG type oid + /** + * This returns the {@link Types} type for a PG type oid, by mapping it + * to a name using {@link #getPGType} and then to the result via + * {@link #getSQLType(String)}; used in {@link ResultSetMetaData} and + * five places in {@link DatabaseMetaData}. + *

      + * This method is a bit goofy, as it first maps from Oid to type name, and + * then from name to JDBC type, all to accomplish the inverse of the JDBC + * type / Oid mapping that already exists in Oid.c, and so the mapping + * arrays in this file have to be updated in sync with that. Look into + * future consolidation.... * * @param oid PostgreSQL type oid * @return the java.sql.Types type @@ -703,7 +778,12 @@ public int getSQLType(Oid oid) throws SQLException return getSQLType(getPGType(oid)); } - public String getPGType(Oid oid) throws SQLException + /** + * Map the Oid of a PostgreSQL type to its name (specifically, the + * {@code typname} attribute of {@code pg_type}. Used in + * {@link DatabaseMetaData} and {@link ResultSetMetaData}. + */ + public String getPGType(Oid oid) throws SQLException { String typeName = null; PreparedStatement query = null; @@ -735,11 +815,25 @@ public String getPGType(Oid oid) throws SQLException return typeName; } - static Object basicCoersion(Class cls, Object value) + /** + * Apply some hardwired coercions from an object to a desired class, + * where the class can be {@code String} or {@code URL}, as used in + * {@code ObjectResultSet} for retrieving values and + * {@code SingleRowWriter} for storing them, and also in + * {@code SQLInputFromTuple} for UDTs mapping composite types. + *

      + * Some review may be in order to determine just what part of JDBC's + * type mapping rules this corresponds to. It seems strangely limited, and + * the use of the same coercion in both the retrieval and storage direction + * in {@code ResultSet}s seems a bit suspect, as does its use in UDT input + * but not output with composites. + */ + @SuppressWarnings("unchecked") + static T basicCoercion(Class cls, Object value) throws SQLException { if(value == null || cls.isInstance(value)) - return value; + return (T)value; if(cls == String.class) { @@ -748,13 +842,15 @@ static Object basicCoersion(Class cls, Object value) || value instanceof Timestamp || value instanceof Date || value instanceof Time) - return value.toString(); + return (T)value.toString(); } else if(cls == URL.class && value instanceof String) { try { - return new URL((String)value); + @SuppressWarnings("deprecation") // PL/Java major rev or forever + T result = (T)new URL((String)value); + return result; } catch(MalformedURLException e) { @@ -765,7 +861,25 @@ else if(cls == URL.class && value instanceof String) cls.getName() + " from an object of class " + value.getClass().getName()); } - static Number basicNumericCoersion(Class cls, Object value) + /** + * Apply some hardwired coercions from an object to a desired class, + * one of Java's several numeric classes, when the value is an instance of + * {@code Number}, {@code String}, or {@code Boolean}, as used in + * {@code ObjectResultSet} for retrieving values and + * {@code SingleRowWriter} for storing them, and also in + * {@code SQLInputFromTuple} for UDTs mapping composite types. + *

      + * Some review may be in order to determine just what part of JDBC's + * type mapping rules this corresponds to. It seems strangely limited, and + * the use of the same coercion in both the retrieval and storage direction + * in {@code ResultSet}s seems a bit suspect, as does its use in UDT input + * but not output with composites. + *

      + * Oddly, this doesn't promise to return a subclass of its {@code cls} + * parameter: if {@code value} is a {@code Number}, it is returned directly + * no matter what {@code cls} was requested. + */ + static Number basicNumericCoercion(Class cls, Object value) throws SQLException { if(value == null || value instanceof Number) @@ -777,7 +891,7 @@ static Number basicNumericCoersion(Class cls, Object value) return Long.valueOf((String)value); if(value instanceof Boolean) - return new Long(((Boolean)value).booleanValue() ? 1 : 0); + return ((Boolean)value) ? 1 : 0; } else if(cls == BigDecimal.class) { @@ -793,19 +907,34 @@ else if(cls == BigDecimal.class) return Double.valueOf((String)value); if(value instanceof Boolean) - return new Double(((Boolean)value).booleanValue() ? 1 : 0); + return ((Boolean)value) ? 1 : 0; } throw new SQLException("Cannot derive a Number from an object of class " + value.getClass().getName()); } - static Object basicCalendricalCoersion(Class cls, Object value, Calendar cal) + /** + * Apply some hardwired coercions from an object to a desired class, + * where the class may be {@link Timestamp}, {@link Date}, or {@link Time} + * and the value one of those or {@code String}, as used in + * {@code ObjectResultSet} for retrieving values and + * {@code SingleRowWriter} for storing them, but not also in + * {@code SQLInputFromTuple} for UDTs mapping composite types. + *

      + * Some review may be in order to determine just what part of JDBC's + * type mapping rules this corresponds to. It seems strangely limited, and + * the use of the same coercion in both the retrieval and storage direction + * in {@code ResultSet}s seems a bit suspect. + */ + @SuppressWarnings("unchecked") + static T basicCalendricalCoercion( + Class cls, Object value, Calendar cal) throws SQLException { if(value == null) - return value; + return null; if(cls.isInstance(value)) - return value; + return (T)value; if(cls == Timestamp.class) { @@ -816,17 +945,17 @@ static Object basicCalendricalCoersion(Class cls, Object value, Calendar cal) cal.set(Calendar.MINUTE, 0); cal.set(Calendar.SECOND, 0); cal.set(Calendar.MILLISECOND, 0); - return new Timestamp(cal.getTimeInMillis()); + return (T)new Timestamp(cal.getTimeInMillis()); } else if(value instanceof Time) { cal.setTime((Date)value); cal.set(1970, 0, 1); - return new Timestamp(cal.getTimeInMillis()); + return (T)new Timestamp(cal.getTimeInMillis()); } else if(value instanceof String) { - return Timestamp.valueOf((String)value); + return (T)Timestamp.valueOf((String)value); } } else if(cls == Date.class) @@ -839,11 +968,11 @@ else if(cls == Date.class) cal.set(Calendar.MINUTE, 0); cal.set(Calendar.SECOND, 0); cal.set(Calendar.MILLISECOND, 0); - return new Date(cal.getTimeInMillis()); + return (T)new Date(cal.getTimeInMillis()); } else if(value instanceof String) { - return Date.valueOf((String)value); + return (T)Date.valueOf((String)value); } } else if(cls == Time.class) @@ -853,11 +982,11 @@ else if(cls == Time.class) Timestamp ts = (Timestamp)value; cal.setTime(ts); cal.set(1970, 0, 1); - return new Time(cal.getTimeInMillis()); + return (T)new Time(cal.getTimeInMillis()); } else if(value instanceof String) { - return Time.valueOf((String)value); + return (T)Time.valueOf((String)value); } } throw new SQLException("Cannot derive a value of class " + @@ -870,10 +999,13 @@ else if(value instanceof String) * They default automatically to Types.OTHER * * Note: This must be in the same order as below. + * + * These arrays are not only used by getSQLType() in this file, but also + * directly accessed by getUDTs() in DatabaseMetaData. * * Tip: keep these grouped together by the Types. value */ - public static final String JDBC3_TYPE_NAMES[] = { + public static final String JDBC_TYPE_NAMES[] = { "int2", "int4", "oid", "int8", @@ -889,6 +1021,7 @@ else if(value instanceof String) "date", "time", "timetz", "abstime", "timestamp", "timestamptz", + "xml", "_bool", "_char", "_int2", "_int4", "_text", "_oid", "_varchar", "_int8", "_float4", "_float8", "_abstime", "_date", "_time", "_timestamp", "_numeric", @@ -902,47 +1035,43 @@ else if(value instanceof String) * * Tip: keep these grouped together by the Types. value */ - public static final int JDBC_TYPE_NUMBERS[] = - { - Types.SMALLINT, - Types.INTEGER, Types.INTEGER, - Types.BIGINT, - Types.DOUBLE, Types.DOUBLE, - Types.NUMERIC, - Types.REAL, - Types.DOUBLE, - Types.CHAR, Types.CHAR, Types.CHAR, Types.CHAR, Types.CHAR, Types.CHAR, - Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, - Types.BINARY, - Types.BOOLEAN, - Types.BIT, - Types.DATE, - Types.TIME, Types.TIME, - Types.TIMESTAMP, Types.TIMESTAMP, Types.TIMESTAMP, - Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, - Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, - Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, - Types.ARRAY - }; - - // ************************************************************ - // Non-implementation of JDBC 4 methods. - // ************************************************************ + public static final int JDBC_TYPE_NUMBERS[]; - public Struct createStruct( String typeName, Object[] attributes ) - throws SQLException + static { - throw new SQLFeatureNotSupportedException( - "SPIConnection.createStruct( String, Object[] ) not implemented yet.", "0A000" ); + JDBC_TYPE_NUMBERS = new int[] + { + Types.SMALLINT, + Types.INTEGER, Types.INTEGER, + Types.BIGINT, + Types.DOUBLE, Types.DOUBLE, + Types.NUMERIC, + Types.REAL, + Types.DOUBLE, + Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR,Types.CHAR, + Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, + Types.BINARY, + Types.BOOLEAN, + Types.BIT, + Types.DATE, + Types.TIME, Types.TIME_WITH_TIMEZONE, + Types.TIMESTAMP, Types.TIMESTAMP, Types.TIMESTAMP_WITH_TIMEZONE, + Types.SQLXML, + Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, + Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, + Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, Types.ARRAY, + Types.ARRAY + }; } - public Array createArrayOf(String typeName, Object[] elements) - throws SQLException - { - throw new SQLFeatureNotSupportedException( - "SPIConnection.createArrayOf( String, Object[] ) not implemented yet.", "0A000" ); - } + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + @Override public boolean isValid( int timeout ) throws SQLException { @@ -950,77 +1079,67 @@ public boolean isValid( int timeout ) // ready, right? } - public SQLXML createSQLXML() - throws SQLException - { - throw new SQLFeatureNotSupportedException( "SPIConnection.createSQLXML() not implemented yet.", - "0A000" ); - } - public NClob createNClob() - throws SQLException - { - throw new SQLFeatureNotSupportedException( "SPIConnection.createNClob() not implemented yet.", - "0A000" ); - } - public Blob createBlob() - throws SQLException - { - throw new SQLFeatureNotSupportedException( "SPIConnection.createBlob() not implemented yet.", - "0A000" ); - } - public Clob createClob() - throws SQLException - { - throw new SQLFeatureNotSupportedException( "SPIConnection.createClob() not implemented yet.", - "0A000" ); - } - + @Override public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".isWrapperFor( Class ) not implemented yet.", - "0A000" ); + return iface.isInstance(this); } + @Override public T unwrap(Class iface) throws SQLException { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".unwrapClass( Class ) not implemented yet.", + if ( iface.isInstance(this) ) + return iface.cast(this); + throw new SQLFeatureNotSupportedException + ( this.getClass().getSimpleName() + + " does not wrap " + iface.getName(), "0A000" ); } - public void setClientInfo(String name, String value) throws SQLClientInfoException - { - Map failures = new HashMap(); - failures.put(name, ClientInfoStatus.REASON_UNKNOWN_PROPERTY); - throw new SQLClientInfoException("ClientInfo property not supported.", failures); - } + /* + * These ClientInfo implementations behave as if there are no known + * ClientInfo properties, which is an allowable implementation. However, + * there is a PostgreSQL notion corresponding to ApplicationName, so a + * later extension of these to recognize that property would not be amiss. + */ + + @Override + public void setClientInfo(String name, String value) + throws SQLClientInfoException + { + Map failures = new HashMap<>(); + failures.put(name, ClientInfoStatus.REASON_UNKNOWN_PROPERTY); + throw new SQLClientInfoException( + "ClientInfo property not supported.", failures); + } + @Override public void setClientInfo(Properties properties) throws SQLClientInfoException { if (properties == null || properties.size() == 0) return; - Map failures = new HashMap(); + Map failures = new HashMap<>(); Iterator i = properties.stringPropertyNames().iterator(); while (i.hasNext()) { failures.put(i.next(), ClientInfoStatus.REASON_UNKNOWN_PROPERTY); } - throw new SQLClientInfoException("ClientInfo property not supported.", failures); + throw new SQLClientInfoException( + "ClientInfo property not supported.", failures); } + @Override public String getClientInfo(String name) throws SQLException { return null; } + @Override public Properties getClientInfo() throws SQLException { if (_clientInfo == null) { @@ -1029,34 +1148,94 @@ public Properties getClientInfo() throws SQLException return _clientInfo; } - public void abort(Executor executor) throws SQLException - { + @Override + public SQLXML createSQLXML() + throws SQLException + { + return SQLXMLImpl.newWritable(); + } + + // ************************************************************ + // Non-implementation of JDBC 4 methods. + // ************************************************************ + + @Override + public Struct createStruct( String typeName, Object[] attributes ) + throws SQLException + { + throw new SQLFeatureNotSupportedException( + "SPIConnection.createStruct( String, Object[] ) not implemented yet.", "0A000" ); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) + throws SQLException + { + throw new SQLFeatureNotSupportedException( + "SPIConnection.createArrayOf( String, Object[] ) not implemented yet.", "0A000" ); + } + + @Override + public NClob createNClob() + throws SQLException + { + throw new SQLFeatureNotSupportedException( "SPIConnection.createNClob() not implemented yet.", + "0A000" ); + } + + @Override + public Blob createBlob() + throws SQLException + { + throw new SQLFeatureNotSupportedException( "SPIConnection.createBlob() not implemented yet.", + "0A000" ); + } + + @Override + public Clob createClob() + throws SQLException + { + throw new SQLFeatureNotSupportedException( "SPIConnection.createClob() not implemented yet.", + "0A000" ); + } + + // ************************************************************ + // Non-implementation of JDBC 4.1 methods. + // ************************************************************ + + @Override + public void abort(Executor executor) throws SQLException + { throw new SQLFeatureNotSupportedException( "SPIConnection.abort(Executor) not implemented yet.", "0A000" ); - } + } - public int getNetworkTimeout() throws SQLException - { + @Override + public int getNetworkTimeout() throws SQLException + { throw new SQLFeatureNotSupportedException( "SPIConnection.getNetworkTimeout() not implemented yet.", "0A000" ); } - public void setNetworkTimeout(Executor executor, int milliseconds) - throws SQLException - { + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) + throws SQLException + { throw new SQLFeatureNotSupportedException( "SPIConnection.setNetworkTimeout(Executor,int) not implemented yet.", "0A000" ); - } + } - public String getSchema() throws SQLException - { + @Override + public String getSchema() throws SQLException + { throw new SQLFeatureNotSupportedException( "SPIConnection.getSchema() not implemented yet.", "0A000" ); - } + } - public void setSchema(String schema) throws SQLException - { + @Override + public void setSchema(String schema) throws SQLException + { throw new SQLFeatureNotSupportedException( "SPIConnection.setSchema(String) not implemented yet.", "0A000" ); - } + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDatabaseMetaData.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDatabaseMetaData.java index d478e764..067bfa0f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDatabaseMetaData.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDatabaseMetaData.java @@ -1,17 +1,19 @@ /* - * Copyright (c) 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2005, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2005-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Filip Hrbek + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; -/** - * @author Filip Hrbek - */ import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.PreparedStatement; @@ -21,12 +23,17 @@ import java.sql.SQLFeatureNotSupportedException; import java.sql.Statement; import java.util.ArrayList; +import static java.util.Arrays.sort; import java.util.HashMap; import org.postgresql.pljava.internal.AclId; import org.postgresql.pljava.internal.Backend; import org.postgresql.pljava.internal.Oid; +/** + * Implementation of {@link DatabaseMetaData} for the SPI connection. + * @author Filip Hrbek + */ public class SPIDatabaseMetaData implements DatabaseMetaData { public SPIDatabaseMetaData(SPIConnection conn) @@ -62,10 +69,12 @@ protected int getMaxNameLength() throws SQLException { if(NAMEDATALEN == 0) { - String sql = "SELECT t.typlen FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n" + - " WHERE t.typnamespace=n.oid" + - " AND t.typname='name'" + - " AND n.nspname='pg_catalog'"; + String sql = + "SELECT t.typlen" + + " FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n" + + " WHERE t.typnamespace OPERATOR(pg_catalog.=) n.oid" + + " AND t.typname OPERATOR(pg_catalog.=) 'name'" + + " AND n.nspname OPERATOR(pg_catalog.=) 'pg_catalog'"; ResultSet rs = m_connection.createStatement().executeQuery(sql); if(!rs.next()){ throw new SQLException( @@ -97,7 +106,7 @@ public boolean allTablesAreSelectable() throws SQLException } /* - * What is the URL for this database? @return the url or null if it cannott + * What is the URL for this database? @return the url or null if it cannot * be generated @exception SQLException if a database access error occurs */ public String getURL() throws SQLException @@ -1334,7 +1343,7 @@ private static String resolveSchemaConditionWithOperator( //This means that only "visible" schemas are searched. //It was approved to change to *all* schemas. //return expr + " " + operator + " ANY (current_schemas(true))"; - return "1=1"; + return "1 OPERATOR(pg_catalog.=) 1"; } //schema is specified => search in this schema else if(!"".equals(schema)) @@ -1358,7 +1367,8 @@ else if(!"".equals(schema)) */ private static String resolveSchemaCondition(String expr, String schema) { - return resolveSchemaConditionWithOperator(expr, schema, "="); + return resolveSchemaConditionWithOperator( + expr, schema, "OPERATOR(pg_catalog.=)"); } /** @@ -1402,10 +1412,15 @@ public java.sql.ResultSet getProcedures(String catalog, + java.sql.DatabaseMetaData.procedureReturnsResult + " AS PROCEDURE_TYPE " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_proc p " - + " LEFT JOIN pg_catalog.pg_description d ON (p.oid=d.objoid) " - + " LEFT JOIN pg_catalog.pg_class c ON (d.classoid=c.oid AND c.relname='pg_proc') " - + " LEFT JOIN pg_catalog.pg_namespace pn ON (c.relnamespace=pn.oid AND pn.nspname='pg_catalog') " - + " WHERE p.pronamespace=n.oid " + + " LEFT JOIN pg_catalog.pg_description d" + + " ON (p.oid OPERATOR(pg_catalog.=) d.objoid) " + + " LEFT JOIN pg_catalog.pg_class c ON (" + + " d.classoid OPERATOR(pg_catalog.=) c.oid" + + " AND c.relname OPERATOR(pg_catalog.=) 'pg_proc') " + + " LEFT JOIN pg_catalog.pg_namespace pn ON (" + + " c.relnamespace OPERATOR(pg_catalog.=) pn.oid" + + " AND pn.nspname OPERATOR(pg_catalog.=) 'pg_catalog') " + + " WHERE p.pronamespace OPERATOR(pg_catalog.=) n.oid " + " AND " + resolveSchemaPatternCondition( "n.nspname", schemaPattern); if(procedureNamePattern != null) @@ -1455,7 +1470,7 @@ public java.sql.ResultSet getProcedureColumns(String catalog, String columnNamePattern) throws SQLException { ResultSetField f[] = new ResultSetField[13]; - ArrayList v = new ArrayList(); // The new ResultSet tuple stuff + ArrayList v = new ArrayList<>(); // New ResultSet tuple stuff f[0] = new ResultSetField("PROCEDURE_CAT", TypeOid.VARCHAR, getMaxNameLength()); @@ -1477,11 +1492,17 @@ public java.sql.ResultSet getProcedureColumns(String catalog, f[12] = new ResultSetField("REMARKS", TypeOid.VARCHAR, getMaxNameLength()); - String sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype::varchar,t.typrelid " - + " FROM pg_catalog.pg_proc p,pg_catalog.pg_namespace n, pg_catalog.pg_type t " - + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid " - + " AND " + resolveSchemaPatternCondition( - "n.nspname", schemaPattern); + String sql = + "SELECT" + + " n.nspname, p.proname, p.prorettype, p.proargtypes," + + " t.typtype::pg_catalog.varchar, t.typrelid " + + " FROM" + + " pg_catalog.pg_proc p, pg_catalog.pg_namespace n," + + " pg_catalog.pg_type t" + + " WHERE p.pronamespace OPERATOR(pg_catalog.=) n.oid" + + " AND p.prorettype OPERATOR(pg_catalog.=) t.oid " + + " AND " + resolveSchemaPatternCondition( + "n.nspname", schemaPattern); if(procedureNamePattern != null) { sql += " AND p.proname LIKE '" @@ -1514,14 +1535,16 @@ public java.sql.ResultSet getProcedureColumns(String catalog, tuple[1] = schema; tuple[2] = procedureName; tuple[3] = "returnValue"; - tuple[4] = new Short((short)java.sql.DatabaseMetaData.procedureColumnReturn); - tuple[5] = new Short((short)m_connection.getSQLType(returnType)); + tuple[4] = (short) + java.sql.DatabaseMetaData.procedureColumnReturn; + tuple[5] = (short)m_connection.getSQLType(returnType); tuple[6] = m_connection.getPGType(returnType); tuple[7] = null; tuple[8] = null; tuple[9] = null; tuple[10] = null; - tuple[11] = new Short((short)java.sql.DatabaseMetaData.procedureNullableUnknown); + tuple[11] = (short) + java.sql.DatabaseMetaData.procedureNullableUnknown; tuple[12] = null; v.add(tuple); } @@ -1535,14 +1558,15 @@ public java.sql.ResultSet getProcedureColumns(String catalog, tuple[1] = schema; tuple[2] = procedureName; tuple[3] = "$" + (i + 1); - tuple[4] = new Short((short)java.sql.DatabaseMetaData.procedureColumnIn); - tuple[5] = new Short((short)m_connection.getSQLType(argOid)); + tuple[4] = (short)java.sql.DatabaseMetaData.procedureColumnIn; + tuple[5] = (short)m_connection.getSQLType(argOid); tuple[6] = m_connection.getPGType(argOid); tuple[7] = null; tuple[8] = null; tuple[9] = null; tuple[10] = null; - tuple[11] = new Short((short)java.sql.DatabaseMetaData.procedureNullableUnknown); + tuple[11] = (short) + java.sql.DatabaseMetaData.procedureNullableUnknown; tuple[12] = null; v.add(tuple); } @@ -1550,7 +1574,11 @@ public java.sql.ResultSet getProcedureColumns(String catalog, // if we are returning a multi-column result. if(returnTypeType.equals("c")) { - String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a WHERE a.attrelid = ? ORDER BY a.attnum "; + String columnsql = + "SELECT a.attname,a.atttypid " + + "FROM pg_catalog.pg_attribute a " + + "WHERE a.attrelid OPERATOR(pg_catalog.=) ? " + + "ORDER BY a.attnum"; PreparedStatement stmt = m_connection.prepareStatement(columnsql); stmt.setObject(1, returnTypeRelid); ResultSet columnrs = stmt.executeQuery(columnsql); @@ -1563,14 +1591,16 @@ public java.sql.ResultSet getProcedureColumns(String catalog, tuple[1] = schema; tuple[2] = procedureName; tuple[3] = columnrs.getString("attname"); - tuple[4] = new Short((short)java.sql.DatabaseMetaData.procedureColumnResult); - tuple[5] = new Short((short)m_connection.getSQLType(columnTypeOid)); + tuple[4] = (short) + java.sql.DatabaseMetaData.procedureColumnResult; + tuple[5] = (short)m_connection.getSQLType(columnTypeOid); tuple[6] = m_connection.getPGType(columnTypeOid); tuple[7] = null; tuple[8] = null; tuple[9] = null; tuple[10] = null; - tuple[11] = new Short((short)java.sql.DatabaseMetaData.procedureNullableUnknown); + tuple[11] = (short) + java.sql.DatabaseMetaData.procedureNullableUnknown; tuple[12] = null; v.add(tuple); } @@ -1602,46 +1632,63 @@ public java.sql.ResultSet getProcedureColumns(String catalog, * should be "%" @param types a list of table types to include; null returns * all types @return each row is a table description @exception SQLException * if a database-access error occurs. + * + * September 2018: instead of rewriting all these CASE foo WHEN WHEN WHEN + * structures to avoid the implicit = operator, just cast the WHEN operands + * to the known type ("char") of the proband, as there is sure to be an + * =("char","char") in pg_catalog. */ public java.sql.ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String types[]) throws SQLException { String useSchemas = "SCHEMAS"; String select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname AS TABLE_NAME, " - + " CASE n.nspname LIKE 'pg!_%' ESCAPE '!' OR n.nspname = 'information_schema' " + + " CASE" + + " n.nspname LIKE 'pg!_%' ESCAPE '!'" + + " OR n.nspname OPERATOR(pg_catalog.=) 'information_schema' " + " WHEN true THEN CASE " - + " WHEN n.nspname = 'pg_catalog' OR n.nspname = 'information_schema' THEN CASE c.relkind " - + " WHEN 'r' THEN 'SYSTEM TABLE' " - + " WHEN 'v' THEN 'SYSTEM VIEW' " - + " WHEN 'i' THEN 'SYSTEM INDEX' " - + " ELSE NULL " - + " END " - + " WHEN n.nspname = 'pg_toast' THEN CASE c.relkind " - + " WHEN 'r' THEN 'SYSTEM TOAST TABLE' " - + " WHEN 'i' THEN 'SYSTEM TOAST INDEX' " - + " ELSE NULL " + + " WHEN" + + " n.nspname OPERATOR(pg_catalog.=) 'pg_catalog'" + + " OR n.nspname OPERATOR(pg_catalog.=) 'information_schema'" + + " THEN CASE c.relkind " + + " WHEN 'r'::pg_catalog.\"char\" THEN 'SYSTEM TABLE' " + + " WHEN 'v'::pg_catalog.\"char\" THEN 'SYSTEM VIEW' " + + " WHEN 'i'::pg_catalog.\"char\" THEN 'SYSTEM INDEX' " + + " ELSE NULL " + + " END " + + " WHEN n.nspname OPERATOR(pg_catalog.=) 'pg_toast'" + + " THEN CASE c.relkind " + + " WHEN 'r'::pg_catalog.\"char\" THEN 'SYSTEM TOAST TABLE' " + + " WHEN 'i'::pg_catalog.\"char\" THEN 'SYSTEM TOAST INDEX' " + + " ELSE NULL " + + " END " + + " ELSE CASE c.relkind " + + " WHEN 'r'::pg_catalog.\"char\" THEN 'TEMPORARY TABLE' " + + " WHEN 'i'::pg_catalog.\"char\" THEN 'TEMPORARY INDEX' " + + " ELSE NULL " + + " END " + " END " - + " ELSE CASE c.relkind " - + " WHEN 'r' THEN 'TEMPORARY TABLE' " - + " WHEN 'i' THEN 'TEMPORARY INDEX' " + + " WHEN false THEN CASE c.relkind " + + " WHEN 'r'::pg_catalog.\"char\" THEN 'TABLE' " + + " WHEN 'i'::pg_catalog.\"char\" THEN 'INDEX' " + + " WHEN 'S'::pg_catalog.\"char\" THEN 'SEQUENCE' " + + " WHEN 'v'::pg_catalog.\"char\" THEN 'VIEW' " + " ELSE NULL " + " END " - + " END " - + " WHEN false THEN CASE c.relkind " - + " WHEN 'r' THEN 'TABLE' " - + " WHEN 'i' THEN 'INDEX' " - + " WHEN 'S' THEN 'SEQUENCE' " - + " WHEN 'v' THEN 'VIEW' " - + " ELSE NULL " - + " END " + " ELSE NULL " + " END " + " AS TABLE_TYPE, d.description AS REMARKS " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c " - + " LEFT JOIN pg_catalog.pg_description d ON (c.oid = d.objoid AND d.objsubid = 0) " - + " LEFT JOIN pg_catalog.pg_class dc ON (d.classoid=dc.oid AND dc.relname='pg_class') " - + " LEFT JOIN pg_catalog.pg_namespace dn ON (dn.oid=dc.relnamespace AND dn.nspname='pg_catalog') " - + " WHERE c.relnamespace = n.oid " + + " LEFT JOIN pg_catalog.pg_description d ON (" + + " c.oid OPERATOR(pg_catalog.=) d.objoid" + + " AND d.objsubid OPERATOR(pg_catalog.=) 0) " + + " LEFT JOIN pg_catalog.pg_class dc ON (" + + " d.classoid OPERATOR(pg_catalog.=) dc.oid" + + " AND dc.relname OPERATOR(pg_catalog.=) 'pg_class') " + + " LEFT JOIN pg_catalog.pg_namespace dn ON (" + + " dn.oid OPERATOR(pg_catalog.=) dc.relnamespace" + + " AND dn.nspname OPERATOR(pg_catalog.=) 'pg_catalog') " + + " WHERE c.relnamespace OPERATOR(pg_catalog.=) n.oid " + " AND " + resolveSchemaPatternCondition( "n.nspname", schemaPattern); String orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME "; @@ -1659,12 +1706,9 @@ public java.sql.ResultSet getTables(String catalog, String schemaPattern, sql += " AND (false "; for(int i = 0; i < types.length; i++) { - HashMap clauses = (HashMap)s_tableTypeClauses.get(types[i]); - if(clauses != null) - { - String clause = (String)clauses.get(useSchemas); + String clause = s_tableTypeClauses.get(types[i]); + if(clause != null) sql += " OR ( " + clause + " ) "; - } } sql += ") "; sql += orderby; @@ -1672,71 +1716,48 @@ public java.sql.ResultSet getTables(String catalog, String schemaPattern, return createMetaDataStatement().executeQuery(sql); } - private static final HashMap s_tableTypeClauses; + private static final HashMap s_tableTypeClauses; static { - s_tableTypeClauses = new HashMap(); - HashMap ht = new HashMap(); - s_tableTypeClauses.put("TABLE", ht); - ht.put("SCHEMAS", - "c.relkind = 'r' AND n.nspname NOT LIKE 'pg!_%' ESCAPE '!' AND n.nspname <> 'information_schema'"); - ht.put("NOSCHEMAS", - "c.relkind = 'r' AND c.relname NOT LIKE 'pg!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("VIEW", ht); - ht.put("SCHEMAS", - "c.relkind = 'v' AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema'"); - ht.put("NOSCHEMAS", - "c.relkind = 'v' AND c.relname NOT LIKE 'pg!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("INDEX", ht); - ht.put("SCHEMAS", - "c.relkind = 'i' AND n.nspname NOT LIKE 'pg!_%' ESCAPE '!' AND n.nspname <> 'information_schema'"); - ht.put("NOSCHEMAS", - "c.relkind = 'i' AND c.relname NOT LIKE 'pg!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("SEQUENCE", ht); - ht.put("SCHEMAS", "c.relkind = 'S'"); - ht.put("NOSCHEMAS", "c.relkind = 'S'"); - ht = new HashMap(); - s_tableTypeClauses.put("SYSTEM TABLE", ht); - ht.put("SCHEMAS", - "c.relkind = 'r' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema')"); - ht.put("NOSCHEMAS", - "c.relkind = 'r' AND c.relname LIKE 'pg!_%' ESCAPE '!' AND c.relname NOT LIKE 'pgLIKE 'pg!_toast!_%' ESCAPE '!'toast!_%' ESCAPE '!' AND c.relname NOT LIKE 'pg!_temp!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("SYSTEM TOAST TABLE", ht); - ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname = 'pg_toast'"); - ht.put("NOSCHEMAS", - "c.relkind = 'r' AND c.relname LIKE 'pg!_toast!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("SYSTEM TOAST INDEX", ht); - ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname = 'pg_toast'"); - ht.put("NOSCHEMAS", - "c.relkind = 'i' AND c.relname LIKE 'pg!_toast!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("SYSTEM VIEW", ht); - ht.put("SCHEMAS", - "c.relkind = 'v' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') "); - ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname LIKE 'pg!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("SYSTEM INDEX", ht); - ht.put("SCHEMAS", - "c.relkind = 'i' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') "); - ht.put("NOSCHEMAS", - "c.relkind = 'v' AND c.relname LIKE 'pg!_%' ESCAPE '!' AND c.relname NOT LIKE 'pg!_toast!_%' ESCAPE '!' AND c.relname NOT LIKE 'pg!_temp!_%' ESCAPE '!'"); - ht = new HashMap(); - s_tableTypeClauses.put("TEMPORARY TABLE", ht); - ht.put("SCHEMAS", - "c.relkind = 'r' AND n.nspname LIKE 'pg!_temp!_%' ESCAPE '!' "); - ht.put("NOSCHEMAS", - "c.relkind = 'r' AND c.relname LIKE 'pg!_temp!_%' ESCAPE '!' "); - ht = new HashMap(); - s_tableTypeClauses.put("TEMPORARY INDEX", ht); - ht.put("SCHEMAS", - "c.relkind = 'i' AND n.nspname LIKE 'pg!_temp!_%' ESCAPE '!' "); - ht.put("NOSCHEMAS", - "c.relkind = 'i' AND c.relname LIKE 'pg!_temp!_%' ESCAPE '!' "); + s_tableTypeClauses = new HashMap<>(); + s_tableTypeClauses.put("TABLE", + "c.relkind OPERATOR(pg_catalog.=) 'r' " + + "AND n.nspname NOT LIKE 'pg!_%' ESCAPE '!' " + + "AND n.nspname OPERATOR(pg_catalog.<>) 'information_schema'"); + s_tableTypeClauses.put("VIEW", + "c.relkind OPERATOR(pg_catalog.=) 'v' " + + "AND n.nspname OPERATOR(pg_catalog.<>) 'pg_catalog' " + + "AND n.nspname OPERATOR(pg_catalog.<>) 'information_schema'"); + s_tableTypeClauses.put("INDEX", + "c.relkind OPERATOR(pg_catalog.=) 'i' " + + "AND n.nspname NOT LIKE 'pg!_%' ESCAPE '!' " + + "AND n.nspname OPERATOR(pg_catalog.<>) 'information_schema'"); + s_tableTypeClauses.put("SEQUENCE", + "c.relkind OPERATOR(pg_catalog.=) 'S'"); + s_tableTypeClauses.put("SYSTEM TABLE", + "c.relkind OPERATOR(pg_catalog.=) 'r' AND (" + + " n.nspname OPERATOR(pg_catalog.=) 'pg_catalog'" + + " OR n.nspname OPERATOR(pg_catalog.=) 'information_schema')"); + s_tableTypeClauses.put("SYSTEM TOAST TABLE", + "c.relkind OPERATOR(pg_catalog.=) 'r' " + + "AND n.nspname OPERATOR(pg_catalog.=) 'pg_toast'"); + s_tableTypeClauses.put("SYSTEM TOAST INDEX", + "c.relkind OPERATOR(pg_catalog.=) 'i' " + + "AND n.nspname OPERATOR(pg_catalog.=) 'pg_toast'"); + s_tableTypeClauses.put("SYSTEM VIEW", + "c.relkind OPERATOR(pg_catalog.=) 'v' AND (" + + " n.nspname OPERATOR(pg_catalog.=) 'pg_catalog'" + + " OR n.nspname OPERATOR(pg_catalog.=) 'information_schema') "); + s_tableTypeClauses.put("SYSTEM INDEX", + "c.relkind OPERATOR(pg_catalog.=) 'i' AND (" + + " n.nspname OPERATOR(pg_catalog.=) 'pg_catalog'" + + " OR n.nspname OPERATOR(pg_catalog.=) 'information_schema') "); + s_tableTypeClauses.put("TEMPORARY TABLE", + "c.relkind OPERATOR(pg_catalog.=) 'r' " + + "AND n.nspname LIKE 'pg!_temp!_%' ESCAPE '!' "); + s_tableTypeClauses.put("TEMPORARY INDEX", + "c.relkind OPERATOR(pg_catalog.=) 'i' " + + "AND n.nspname LIKE 'pg!_temp!_%' ESCAPE '!' "); } // These are the default tables, used when NULL is passed to getTables @@ -1752,7 +1773,11 @@ public java.sql.ResultSet getTables(String catalog, String schemaPattern, */ public java.sql.ResultSet getSchemas() throws SQLException { - String sql = "SELECT nspname AS TABLE_SCHEM FROM pg_catalog.pg_namespace WHERE nspname <> 'pg_toast' AND nspname NOT LIKE 'pg!_temp!_%' ESCAPE '!' ORDER BY TABLE_SCHEM"; + String sql = + "SELECT nspname AS TABLE_SCHEM FROM pg_catalog.pg_namespace " + + "WHERE nspname OPERATOR(pg_catalog.<>) 'pg_toast' " + + "AND nspname NOT LIKE 'pg!_temp!_%' ESCAPE '!' " + + "ORDER BY TABLE_SCHEM"; return createMetaDataStatement().executeQuery(sql); } @@ -1778,10 +1803,10 @@ public java.sql.ResultSet getCatalogs() throws SQLException public java.sql.ResultSet getTableTypes() throws SQLException { String types[] = (String[])s_tableTypeClauses.keySet().toArray(new String[s_tableTypeClauses.size()]); - sortStringArray(types); + sort(types); ResultSetField f[] = new ResultSetField[1]; - ArrayList v = new ArrayList(); + ArrayList v = new ArrayList<>(); f[0] = new ResultSetField(new String("TABLE_TYPE"), TypeOid.VARCHAR, getMaxNameLength()); for(int i = 0; i < types.length; i++) @@ -1829,7 +1854,7 @@ public java.sql.ResultSet getTableTypes() throws SQLException public java.sql.ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { - ArrayList v = new ArrayList(); // The new ResultSet tuple stuff + ArrayList v = new ArrayList<>(); // New ResultSet tuple stuff ResultSetField f[] = new ResultSetField[18]; // The field descriptors // for the new ResultSet @@ -1861,17 +1886,30 @@ public java.sql.ResultSet getColumns(String catalog, String schemaPattern, f[17] = new ResultSetField("IS_NULLABLE", TypeOid.VARCHAR, getMaxNameLength()); - String sql = "SELECT n.nspname,c.relname,a.attname," - + " a.atttypid as atttypid,a.attnotnull,a.atttypmod," - + " a.attlen::int4 as attlen,a.attnum,def.adsrc,dsc.description " + String sql = "SELECT n.nspname, c.relname, a.attname," + + " a.atttypid as atttypid, a.attnotnull, a.atttypmod," + + " a.attlen::pg_catalog.int4 as attlen, a.attnum," + + " pg_catalog.pg_get_expr(def.adbin, c.oid) AS adsrc," + + " dsc.description" + " FROM pg_catalog.pg_namespace n " - + " JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) " - + " JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) " - + " LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) " - + " LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) " - + " LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') " - + " LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') " - + " WHERE a.attnum > 0 AND NOT a.attisdropped " + + " JOIN pg_catalog.pg_class c" + + " ON (c.relnamespace OPERATOR(pg_catalog.=) n.oid) " + + " JOIN pg_catalog.pg_attribute a " + + " ON (a.attrelid OPERATOR(pg_catalog.=) c.oid) " + + " LEFT JOIN pg_catalog.pg_attrdef def ON (" + + " a.attrelid OPERATOR(pg_catalog.=) def.adrelid" + + " AND a.attnum OPERATOR(pg_catalog.=) def.adnum) " + + " LEFT JOIN pg_catalog.pg_description dsc ON (" + + " c.oid OPERATOR(pg_catalog.=) dsc.objoid" + + " AND a.attnum OPERATOR(pg_catalog.=) dsc.objsubid) " + + " LEFT JOIN pg_catalog.pg_class dc ON (" + + " dc.oid OPERATOR(pg_catalog.=) dsc.classoid" + + " AND dc.relname OPERATOR(pg_catalog.=) 'pg_class') " + + " LEFT JOIN pg_catalog.pg_namespace dn ON (" + + " dc.relnamespace OPERATOR(pg_catalog.=) dn.oid" + + " AND dn.nspname OPERATOR(pg_catalog.=) 'pg_catalog') " + + " WHERE a.attnum OPERATOR(pg_catalog.>) 0" + + " AND NOT a.attisdropped " + " AND " + resolveSchemaPatternCondition( "n.nspname", schemaPattern); @@ -1897,7 +1935,7 @@ public java.sql.ResultSet getColumns(String catalog, String schemaPattern, tuple[1] = rs.getString("nspname"); // Schema tuple[2] = rs.getString("relname"); // Table name tuple[3] = rs.getString("attname"); // Column name - tuple[4] = new Short((short)m_connection.getSQLType(typeOid)); + tuple[4] = (short)m_connection.getSQLType(typeOid); String pgType = m_connection.getPGType(typeOid); tuple[5] = m_connection.getPGType(typeOid); // Type name @@ -1922,45 +1960,44 @@ else if(pgType.equals("int8")) // by default no decimal_digits // if the type is numeric or decimal we will // overwrite later. - tuple[8] = new Integer(0); + tuple[8] = 0; if(pgType.equals("bpchar") || pgType.equals("varchar")) { int atttypmod = rs.getInt("atttypmod"); - tuple[6] = new Integer(atttypmod != -1 - ? atttypmod - VARHDRSZ - : 0); + tuple[6] = atttypmod != -1 + ? atttypmod - VARHDRSZ + : 0; } else if(pgType.equals("numeric") || pgType.equals("decimal")) { int attypmod = rs.getInt("atttypmod") - VARHDRSZ; - tuple[6] = new Integer ((attypmod >> 16) & 0xffff); - tuple[8] = new Integer (attypmod & 0xffff); - tuple[9] = new Integer(10); + tuple[6] = (attypmod >> 16) & 0xffff; + tuple[8] = attypmod & 0xffff; + tuple[9] = 10; } else if(pgType.equals("bit") || pgType.equals("varbit")) { tuple[6] = rs.getObject("atttypmod"); - tuple[9] = new Integer(2); + tuple[9] = 2; } else { tuple[6] = rs.getObject("attlen"); - tuple[9] = new Integer(10); + tuple[9] = 10; } tuple[7] = null; // Buffer length - tuple[10] = new Integer(rs - .getBoolean("attnotnull") + tuple[10] = rs.getBoolean("attnotnull") // Nullable ? java.sql.DatabaseMetaData.columnNoNulls - : java.sql.DatabaseMetaData.columnNullable); // Nullable + : java.sql.DatabaseMetaData.columnNullable; tuple[11] = rs.getString("description"); // Description (if any) tuple[12] = rs.getString("adsrc"); // Column default tuple[13] = null; // sql data type (unused) tuple[14] = null; // sql datetime sub (unused) tuple[15] = tuple[6]; // char octet length - tuple[16] = new Integer(rs.getInt("attnum")); // ordinal position + tuple[16] = rs.getInt("attnum"); // ordinal position tuple[17] = rs.getBoolean("attnotnull") ? "NO" : "YES"; // Is // nullable v.add(tuple); @@ -1993,7 +2030,7 @@ public java.sql.ResultSet getColumnPrivileges(String catalog, throws SQLException { ResultSetField f[] = new ResultSetField[8]; - ArrayList v = new ArrayList(); + ArrayList v = new ArrayList<>(); if(table == null) table = "%"; @@ -2020,14 +2057,17 @@ public java.sql.ResultSet getColumnPrivileges(String catalog, String sql = "SELECT n.nspname,c.relname,u.usename,c.relacl,a.attname " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, pg_catalog.pg_user u, pg_catalog.pg_attribute a " - + " WHERE c.relnamespace = n.oid " - + " AND u.usesysid = c.relowner " + " AND c.oid = a.attrelid " - + " AND c.relkind = 'r' " - + " AND a.attnum > 0 AND NOT a.attisdropped " + + " WHERE c.relnamespace OPERATOR(pg_catalog.=) n.oid " + + " AND u.usesysid OPERATOR(pg_catalog.=) c.relowner " + + " AND c.oid OPERATOR(pg_catalog.=) a.attrelid " + + " AND c.relkind OPERATOR(pg_catalog.=) 'r' " + + " AND a.attnum OPERATOR(pg_catalog.>) 0" + + " AND NOT a.attisdropped " + " AND " + resolveSchemaCondition( "n.nspname", schema); - sql += " AND c.relname = '" + escapeQuotes(table) + "' "; + sql += " AND c.relname OPERATOR(pg_catalog.=) '" + + escapeQuotes(table) + "' "; if(columnNamePattern != null && !"".equals(columnNamePattern)) { sql += " AND a.attname LIKE '" + escapeQuotes(columnNamePattern) @@ -2041,7 +2081,7 @@ public java.sql.ResultSet getColumnPrivileges(String catalog, String column = null; String owner = null; String[] acls = null; - HashMap permissions = null; + HashMap> permissions = null; String permNames[] = null; while(rs.next()) @@ -2052,8 +2092,9 @@ public java.sql.ResultSet getColumnPrivileges(String catalog, owner = rs.getString("usename"); acls = (String[])rs.getObject("relacl"); permissions = parseACL(acls, owner); - permNames = (String[])permissions.keySet().toArray(new String[permissions.size()]); - sortStringArray(permNames); + permNames = + permissions.keySet().toArray(new String[permissions.size()]); + sort(permNames); for(int i = 0; i < permNames.length; i++) { ArrayList grantees = (ArrayList)permissions.get(permNames[i]); @@ -2102,7 +2143,7 @@ public java.sql.ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { ResultSetField f[] = new ResultSetField[7]; - ArrayList v = new ArrayList(); + ArrayList v = new ArrayList<>(); f[0] = new ResultSetField("TABLE_CAT", TypeOid.VARCHAR, getMaxNameLength()); @@ -2121,8 +2162,9 @@ public java.sql.ResultSet getTablePrivileges(String catalog, String sql = "SELECT n.nspname,c.relname,u.usename,c.relacl " + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, pg_catalog.pg_user u " - + " WHERE c.relnamespace = n.oid " - + " AND u.usesysid = c.relowner " + " AND c.relkind = 'r' " + + " WHERE c.relnamespace OPERATOR(pg_catalog.=) n.oid " + + " AND u.usesysid OPERATOR(pg_catalog.=) c.relowner " + + " AND c.relkind OPERATOR(pg_catalog.=) 'r' " + " AND " + resolveSchemaPatternCondition( "n.nspname", schemaPattern); @@ -2138,7 +2180,7 @@ public java.sql.ResultSet getTablePrivileges(String catalog, String table = null; String owner = null; String[] acls = null; - HashMap permissions = null; + HashMap> permissions = null; String permNames[] = null; while(rs.next()) @@ -2148,11 +2190,12 @@ public java.sql.ResultSet getTablePrivileges(String catalog, owner = rs.getString("usename"); acls = (String[])rs.getObject("relacl"); permissions = parseACL(acls, owner); - permNames = (String[])permissions.keySet().toArray(new String[permissions.size()]); - sortStringArray(permNames); + permNames = + permissions.keySet().toArray(new String[permissions.size()]); + sort(permNames); for(int i = 0; i < permNames.length; i++) { - ArrayList grantees = (ArrayList)permissions.get(permNames[i]); + ArrayList grantees = permissions.get(permNames[i]); for(int j = 0; j < grantees.size(); j++) { String grantee = (String)grantees.get(j); @@ -2174,27 +2217,12 @@ public java.sql.ResultSet getTablePrivileges(String catalog, return createSyntheticResultSet(f, v); } - private static void sortStringArray(String s[]) - { - for(int i = 0; i < s.length - 1; i++) - { - for(int j = i + 1; j < s.length; j++) - { - if(s[i].compareTo(s[j]) > 0) - { - String tmp = s[i]; - s[i] = s[j]; - s[j] = tmp; - } - } - } - } - /** * Add the user described by the given acl to the ArrayLists of users with the * privileges described by the acl. */ - private void addACLPrivileges(String acl, HashMap privileges) + private void addACLPrivileges( + String acl, HashMap> privileges) { int equalIndex = acl.lastIndexOf("="); String name = acl.substring(0, equalIndex); @@ -2247,13 +2275,9 @@ private void addACLPrivileges(String acl, HashMap privileges) default: sqlpriv = "UNKNOWN"; } - ArrayList usersWithPermission = (ArrayList)privileges.get(sqlpriv); - if(usersWithPermission == null) - { - usersWithPermission = new ArrayList(); - privileges.put(sqlpriv, usersWithPermission); - } - usersWithPermission.add(name); + privileges + .computeIfAbsent(sqlpriv, k -> new ArrayList<>()) + .add(name); } } @@ -2262,14 +2286,16 @@ private void addACLPrivileges(String acl, HashMap privileges) * mapping the SQL permission name to a ArrayList of usernames who have that * permission. */ - protected HashMap parseACL(String[] aclArray, String owner) + protected HashMap> parseACL( + String[] aclArray, String owner) { if(aclArray == null || aclArray.length == 0) { // null acl is a shortcut for owner having full privs + // XXX (2020) the implied default depends on type of catalog object aclArray = new String[] { owner + "=arwdRxt" }; } - HashMap privileges = new HashMap(); + HashMap> privileges = new HashMap<>(); for(int i = 0; i < aclArray.length; i++) { String acl = aclArray[i]; @@ -2304,7 +2330,7 @@ public java.sql.ResultSet getBestRowIdentifier(String catalog, throws SQLException { ResultSetField f[] = new ResultSetField[8]; - ArrayList v = new ArrayList(); // The new ResultSet tuple stuff + ArrayList v = new ArrayList<>(); // New ResultSet tuple stuff f[0] = new ResultSetField("SCOPE", TypeOid.INT2, 2); f[1] = new ResultSetField("COLUMN_NAME", TypeOid.VARCHAR, @@ -2325,13 +2351,17 @@ public java.sql.ResultSet getBestRowIdentifier(String catalog, String where = ""; String from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, pg_catalog.pg_attribute a, pg_catalog.pg_index i "; - where = " AND ct.relnamespace = n.oid " + where = " AND ct.relnamespace OPERATOR(pg_catalog.=) n.oid " + " AND " + resolveSchemaCondition( "n.nspname", schema); String sql = "SELECT a.attname, a.atttypid as atttypid " + from - + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid " - + " AND a.attrelid=ci.oid AND i.indisprimary " - + " AND ct.relname = '" + escapeQuotes(table) + "' " + where + + " WHERE ct.oid OPERATOR(pg_catalog.=) i.indrelid " + + " AND ci.oid OPERATOR(pg_catalog.=) i.indexrelid " + + " AND a.attrelid OPERATOR(pg_catalog.=) ci.oid" + + " AND i.indisprimary " + + " AND ct.relname OPERATOR(pg_catalog.=) '" + + escapeQuotes(table) + "' " + + where + " ORDER BY a.attnum "; ResultSet rs = m_connection.createStatement().executeQuery(sql); @@ -2339,14 +2369,14 @@ public java.sql.ResultSet getBestRowIdentifier(String catalog, { Object[] tuple = new Object[8]; Oid columnTypeOid = (Oid)rs.getObject("atttypid"); - tuple[0] = new Short((short)scope); + tuple[0] = (short)scope; tuple[1] = rs.getString("attname"); - tuple[2] = new Short((short)m_connection.getSQLType(columnTypeOid)); + tuple[2] = (short)m_connection.getSQLType(columnTypeOid); tuple[3] = m_connection.getPGType(columnTypeOid); tuple[4] = null; tuple[5] = null; tuple[6] = null; - tuple[7] = new Short((short)java.sql.DatabaseMetaData.bestRowNotPseudo); + tuple[7] = (short)java.sql.DatabaseMetaData.bestRowNotPseudo; v.add(tuple); } @@ -2373,7 +2403,7 @@ public java.sql.ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { ResultSetField f[] = new ResultSetField[8]; - ArrayList v = new ArrayList(); // The new ResultSet tuple stuff + ArrayList v = new ArrayList<>(); // New ResultSet tuple stuff f[0] = new ResultSetField("SCOPE", TypeOid.INT2, 2); f[1] = new ResultSetField("COLUMN_NAME", TypeOid.VARCHAR, @@ -2400,12 +2430,12 @@ public java.sql.ResultSet getVersionColumns(String catalog, String schema, tuple[0] = null; tuple[1] = "ctid"; - tuple[2] = new Short((short)m_connection.getSQLType("tid")); + tuple[2] = (short)m_connection.getSQLType("tid"); tuple[3] = "tid"; tuple[4] = null; tuple[5] = null; tuple[6] = null; - tuple[7] = new Short((short)java.sql.DatabaseMetaData.versionColumnPseudo); + tuple[7] = (short)java.sql.DatabaseMetaData.versionColumnPseudo; v.add(tuple); /* @@ -2434,17 +2464,21 @@ public java.sql.ResultSet getPrimaryKeys(String catalog, String schema, String where = ""; String select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "; from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, pg_catalog.pg_attribute a, pg_catalog.pg_index i "; - where = " AND ct.relnamespace = n.oid AND " + + where = " AND ct.relnamespace OPERATOR(pg_catalog.=) n.oid AND " + resolveSchemaCondition("n.nspname", schema); String sql = select + " ct.relname AS TABLE_NAME, " - + " a.attname AS COLUMN_NAME, " + " a.attnum::int2 AS KEY_SEQ, " + + " a.attname AS COLUMN_NAME," + + " a.attnum::pg_catalog.int2 AS KEY_SEQ, " + " ci.relname AS PK_NAME " + from - + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid " - + " AND a.attrelid=ci.oid AND i.indisprimary "; + + " WHERE ct.oid OPERATOR(pg_catalog.=) i.indrelid" + + " AND ci.oid OPERATOR(pg_catalog.=) i.indexrelid " + + " AND a.attrelid OPERATOR(pg_catalog.=) ci.oid" + + " AND i.indisprimary "; if(table != null && !"".equals(table)) { - sql += " AND ct.relname = '" + escapeQuotes(table) + "' "; + sql += " AND ct.relname OPERATOR(pg_catalog.=) '" + + escapeQuotes(table) + "' "; } sql += where + " ORDER BY table_name, pk_name, key_seq"; @@ -2500,33 +2534,38 @@ protected java.sql.ResultSet getImportedExportedKeys(String primaryCatalog, * multiple unique indexes covering the same keys can be created which * make it difficult to determine the PK_NAME field. */ - String sql = "SELECT NULL::text AS PKTABLE_CAT, pkn.nspname AS PKTABLE_SCHEM, pkc.relname AS PKTABLE_NAME, pka.attname AS PKCOLUMN_NAME, " - + "NULL::text AS FKTABLE_CAT, fkn.nspname AS FKTABLE_SCHEM, fkc.relname AS FKTABLE_NAME, fka.attname AS FKCOLUMN_NAME, " - + "pos.n::int2 AS KEY_SEQ, " + String sql = "SELECT " + + "NULL::pg_catalog.text AS PKTABLE_CAT, " + + "pkn.nspname AS PKTABLE_SCHEM, pkc.relname AS PKTABLE_NAME, " + + "pka.attname AS PKCOLUMN_NAME, " + + "NULL::pg_catalog.text AS FKTABLE_CAT, " + + "fkn.nspname AS FKTABLE_SCHEM, fkc.relname AS FKTABLE_NAME, " + + "fka.attname AS FKCOLUMN_NAME, " + + "pos.n::pg_catalog.int2 AS KEY_SEQ, " + "CASE con.confupdtype " - + " WHEN 'c' THEN " + + " WHEN 'c'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeyCascade - + " WHEN 'n' THEN " + + " WHEN 'n'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeySetNull - + " WHEN 'd' THEN " + + " WHEN 'd'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeySetDefault - + " WHEN 'r' THEN " + + " WHEN 'r'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeyRestrict - + " WHEN 'a' THEN " + + " WHEN 'a'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeyNoAction - + " ELSE NULL END::int2 AS UPDATE_RULE, " + + " ELSE NULL END::pg_catalog.int2 AS UPDATE_RULE, " + "CASE con.confdeltype " - + " WHEN 'c' THEN " + + " WHEN 'c'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeyCascade - + " WHEN 'n' THEN " + + " WHEN 'n'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeySetNull - + " WHEN 'd' THEN " + + " WHEN 'd'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeySetDefault - + " WHEN 'r' THEN " + + " WHEN 'r'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeyRestrict - + " WHEN 'a' THEN " + + " WHEN 'a'::pg_catalog.\"char\" THEN " + DatabaseMetaData.importedKeyNoAction - + " ELSE NULL END::int2 AS DELETE_RULE, " + + " ELSE NULL END::pg_catalog.int2 AS DELETE_RULE, " + "con.conname AS FK_NAME, pkic.relname AS PK_NAME, " + "CASE " + " WHEN con.condeferrable AND con.condeferred THEN " @@ -2535,27 +2574,42 @@ protected java.sql.ResultSet getImportedExportedKeys(String primaryCatalog, + DatabaseMetaData.importedKeyInitiallyImmediate + " ELSE " + DatabaseMetaData.importedKeyNotDeferrable - + " END::int2 AS DEFERRABILITY " + + " END::pg_catalog.int2 AS DEFERRABILITY " + " FROM " + " pg_catalog.pg_namespace pkn, pg_catalog.pg_class pkc, pg_catalog.pg_attribute pka, " + " pg_catalog.pg_namespace fkn, pg_catalog.pg_class fkc, pg_catalog.pg_attribute fka, " + " pg_catalog.pg_constraint con, " + " pg_catalog.generate_series(1, " + getMaxIndexKeys() + ") pos(n), " + " pg_catalog.pg_depend dep, pg_catalog.pg_class pkic " - + " WHERE pkn.oid = pkc.relnamespace AND pkc.oid = pka.attrelid AND pka.attnum = con.confkey[pos.n] AND con.confrelid = pkc.oid " - + " AND fkn.oid = fkc.relnamespace AND fkc.oid = fka.attrelid AND fka.attnum = con.conkey[pos.n] AND con.conrelid = fkc.oid " - + " AND con.contype = 'f' AND con.oid = dep.objid AND pkic.oid = dep.refobjid AND pkic.relkind = 'i' AND dep.classid = 'pg_constraint'::regclass::oid AND dep.refclassid = 'pg_class'::regclass::oid " + - " AND " + resolveSchemaCondition("pkn.nspname", primarySchema) + - " AND " + resolveSchemaCondition("fkn.nspname", foreignSchema); + + " WHERE pkn.oid OPERATOR(pg_catalog.=) pkc.relnamespace" + + " AND pkc.oid OPERATOR(pg_catalog.=) pka.attrelid" + + " AND pka.attnum OPERATOR(pg_catalog.=) con.confkey[pos.n]" + + " AND con.confrelid OPERATOR(pg_catalog.=) pkc.oid " + + " AND fkn.oid OPERATOR(pg_catalog.=) fkc.relnamespace" + + " AND fkc.oid OPERATOR(pg_catalog.=) fka.attrelid" + + " AND fka.attnum OPERATOR(pg_catalog.=) con.conkey[pos.n]" + + " AND con.conrelid OPERATOR(pg_catalog.=) fkc.oid " + + " AND con.contype OPERATOR(pg_catalog.=) 'f'" + + " AND con.oid OPERATOR(pg_catalog.=) dep.objid" + + " AND pkic.oid OPERATOR(pg_catalog.=) dep.refobjid" + + " AND pkic.relkind OPERATOR(pg_catalog.=) 'i'" + + " AND dep.classid OPERATOR(pg_catalog.=)" + + " 'pg_constraint'::pg_catalog.regclass::pg_catalog.oid" + + " AND dep.refclassid OPERATOR(pg_catalog.=)" + + " 'pg_class'::pg_catalog.regclass::pg_catalog.oid" + + " AND " + resolveSchemaCondition("pkn.nspname", primarySchema) + + " AND " + resolveSchemaCondition("fkn.nspname", foreignSchema); if(primaryTable != null && !"".equals(primaryTable)) { - sql += " AND pkc.relname = '" + escapeQuotes(primaryTable) + sql += " AND pkc.relname OPERATOR(pg_catalog.=) '" + + escapeQuotes(primaryTable) + "' "; } if(foreignTable != null && !"".equals(foreignTable)) { - sql += " AND fkc.relname = '" + escapeQuotes(foreignTable) + sql += " AND fkc.relname OPERATOR(pg_catalog.=) '" + + escapeQuotes(foreignTable) + "' "; } @@ -2724,7 +2778,7 @@ public java.sql.ResultSet getTypeInfo() throws SQLException { ResultSetField f[] = new ResultSetField[18]; - ArrayList v = new ArrayList(); // The new ResultSet tuple stuff + ArrayList v = new ArrayList<>(); // New ResultSet tuple stuff f[0] = new ResultSetField("TYPE_NAME", TypeOid.VARCHAR, getMaxNameLength()); @@ -2750,15 +2804,17 @@ public java.sql.ResultSet getTypeInfo() throws SQLException f[16] = new ResultSetField("SQL_DATETIME_SUB", TypeOid.INT4, 4); f[17] = new ResultSetField("NUM_PREC_RADIX", TypeOid.INT4, 4); - String sql = "SELECT typname FROM pg_catalog.pg_type where typrelid = 0"; + String sql = + "SELECT typname FROM pg_catalog.pg_type " + + "WHERE typrelid OPERATOR(pg_catalog.=) 0"; ResultSet rs = m_connection.createStatement().executeQuery(sql); // cache some results, this will keep memory useage down, and speed // things up a little. - Integer i9 = new Integer(9); - Integer i10 = new Integer(10); - Short nn = new Short((short)java.sql.DatabaseMetaData.typeNoNulls); - Short ts = new Short((short)java.sql.DatabaseMetaData.typeSearchable); + Integer i9 = 9; + Integer i10 = 10; + Short nn = (short)java.sql.DatabaseMetaData.typeNoNulls; + Short ts = (short)java.sql.DatabaseMetaData.typeSearchable; String typname = null; @@ -2767,7 +2823,7 @@ public java.sql.ResultSet getTypeInfo() throws SQLException Object[] tuple = new Object[18]; typname = rs.getString(1); tuple[0] = typname; - tuple[1] = new Short((short)m_connection.getSQLType(typname)); + tuple[1] = (short)m_connection.getSQLType(typname); tuple[2] = i9; // for now tuple[6] = nn; // for now tuple[7] = Boolean.FALSE; // false for now - not case sensitive @@ -2848,7 +2904,7 @@ public java.sql.ResultSet getIndexInfo(String catalog, String schema, String select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "; String from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, pg_catalog.pg_index i, pg_catalog.pg_attribute a, pg_catalog.pg_am am "; String where = - " AND n.oid = ct.relnamespace " + + " AND n.oid OPERATOR(pg_catalog.=) ct.relnamespace " + " AND " + resolveSchemaCondition("n.nspname", schema); String sql = select @@ -2856,22 +2912,27 @@ public java.sql.ResultSet getIndexInfo(String catalog, String schema, + " CASE i.indisclustered " + " WHEN true THEN " + java.sql.DatabaseMetaData.tableIndexClustered - + " ELSE CASE am.amname " - + " WHEN 'hash' THEN " + + " ELSE CASE" + + " WHEN am.amname OPERATOR(pg_catalog.=) 'hash' THEN " + java.sql.DatabaseMetaData.tableIndexHashed + " ELSE " + java.sql.DatabaseMetaData.tableIndexOther + " END " - + " END::int2 AS TYPE, " - + " a.attnum::int2 AS ORDINAL_POSITION, " + + " END::pg_catalog.int2 AS TYPE, " + + " a.attnum::pg_catalog.int2 AS ORDINAL_POSITION, " + " a.attname AS COLUMN_NAME, " + " NULL AS ASC_OR_DESC, " + " ci.reltuples AS CARDINALITY, " + " ci.relpages AS PAGES, " + " NULL AS FILTER_CONDITION " + from - + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid AND a.attrelid=ci.oid AND ci.relam=am.oid " - + where + " AND ct.relname = '" + escapeQuotes(tableName) + "' "; + + " WHERE ct.oid OPERATOR(pg_catalog.=) i.indrelid" + + " AND ci.oid OPERATOR(pg_catalog.=) i.indexrelid" + + " AND a.attrelid OPERATOR(pg_catalog.=) ci.oid" + + " AND ci.relam OPERATOR(pg_catalog.=) am.oid " + + where + + " AND ct.relname OPERATOR(pg_catalog.=) '" + + escapeQuotes(tableName) + "' "; if(unique) { @@ -2982,26 +3043,35 @@ public boolean supportsBatchUpdates() throws SQLException public java.sql.ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { - String sql = "select " - + "null as type_cat, n.nspname as type_schem, t.typname as type_name, null as class_name, " - + "CASE WHEN t.typtype='c' then " - + java.sql.Types.STRUCT - + " else " - + java.sql.Types.DISTINCT - + " end as data_type, pg_catalog.obj_description(t.oid, 'pg_type') " - + "as remarks, CASE WHEN t.typtype = 'd' then (select CASE"; - - for(int i = 0; i < SPIConnection.JDBC3_TYPE_NAMES.length; i++) + String sql = + "SELECT" + + " null AS type_cat, n.nspname AS type_schem," + + " t.typname AS type_name, null AS class_name," + + " CASE WHEN t.typtype OPERATOR(pg_catalog.=) 'c' THEN " + + java.sql.Types.STRUCT + + " ELSE " + + java.sql.Types.DISTINCT + + " END AS data_type," + + " pg_catalog.obj_description(t.oid, 'pg_type') AS remarks," + + " CASE WHEN t.typtype OPERATOR(pg_catalog.=) 'd' THEN (" + + " select CASE"; + + for(int i = 0; i < SPIConnection.JDBC_TYPE_NAMES.length; i++) { - sql += " when typname = '" + SPIConnection.JDBC_TYPE_NUMBERS[i] - + "' then " + SPIConnection.JDBC_TYPE_NUMBERS[i]; + sql += " WHEN typname OPERATOR(pg_catalog.=) '" + + SPIConnection.JDBC_TYPE_NUMBERS[i] + + "' THEN " + SPIConnection.JDBC_TYPE_NUMBERS[i]; } - sql += " else " + sql += " ELSE " + java.sql.Types.OTHER - + " end from pg_type where oid=t.typbasetype) " - + "else null end as base_type " - + "from pg_catalog.pg_type t, pg_catalog.pg_namespace n where t.typnamespace = n.oid and n.nspname != 'pg_catalog' and n.nspname != 'pg_toast'"; + + " END" + + " FROM pg_type WHERE oid OPERATOR(pg_catalog.=) t.typbasetype) " + + "ELSE null END AS base_type " + + "FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n " + + "WHERE t.typnamespace OPERATOR(pg_catalog.=) n.oid " + + "AND n.nspname OPERATOR(pg_catalog.<>) 'pg_catalog' " + + "AND n.nspname OPERATOR(pg_catalog.<>) 'pg_toast'"; String toAdd = ""; if(types != null) @@ -3012,10 +3082,10 @@ public java.sql.ResultSet getUDTs(String catalog, String schemaPattern, switch(types[i]) { case java.sql.Types.STRUCT: - toAdd += " or t.typtype = 'c'"; + toAdd += " or t.typtype OPERATOR(pg_catalog.=) 'c'"; break; case java.sql.Types.DISTINCT: - toAdd += " or t.typtype = 'd'"; + toAdd += " or t.typtype OPERATOR(pg_catalog.=) 'd'"; break; } } @@ -3023,7 +3093,9 @@ public java.sql.ResultSet getUDTs(String catalog, String schemaPattern, } else { - toAdd += " and t.typtype IN ('c','d') "; + toAdd += + " AND t.typtype IN " + + "('c'::pg_catalog.\"char\", 'd'::pg_catalog.\"char\") "; } // spec says that if typeNamePattern is a fully qualified name // then the schema and catalog are ignored @@ -3107,7 +3179,7 @@ private Statement createMetaDataStatement() throws SQLException */ public boolean supportsSavepoints() throws SQLException { - return this.getDatabaseMajorVersion() >= 8; + return true; // PG < 8 no longer supported } /** @@ -3339,7 +3411,7 @@ public ResultSet getAttributes(String catalog, String schemaPattern, public boolean supportsResultSetHoldability(int holdability) throws SQLException { - return true; + return ResultSet.CLOSE_CURSORS_AT_COMMIT == holdability; } /** @@ -3354,7 +3426,7 @@ public boolean supportsResultSetHoldability(int holdability) */ public int getResultSetHoldability() throws SQLException { - return ResultSet.HOLD_CURSORS_OVER_COMMIT; + return ResultSet.CLOSE_CURSORS_AT_COMMIT; } /** @@ -3455,11 +3527,41 @@ public boolean supportsStatementPooling() throws SQLException * This method creates a ResultSet which is not associated with any * statement. */ - private ResultSet createSyntheticResultSet(ResultSetField[] f, ArrayList tuples) + private ResultSet createSyntheticResultSet( + ResultSetField[] f, ArrayList tuples) throws SQLException { return new SyntheticResultSet(f, tuples); } + + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + + public boolean isWrapperFor(Class iface) + throws SQLException + { + return iface.isInstance(this); + } + + public T unwrap(java.lang.Class iface) + throws SQLException + { + if ( iface.isInstance(this) ) + return iface.cast(this); + throw new SQLFeatureNotSupportedException + ( this.getClass().getSimpleName() + + " does not wrap " + iface.getName(), + "0A000" ); + } + + public boolean generatedKeyAlwaysReturned() throws SQLException + { + return false; + } // ************************************************************ // Non-implementation of JDBC 4 methods. @@ -3530,32 +3632,12 @@ public RowIdLifetime getRowIdLifetime() "SPIDatabaseMetadata.getRowIdLifetime() not implemented yet.", "0A000" ); } - public boolean isWrapperFor(Class c) - throws SQLException - { - throw new SQLFeatureNotSupportedException( - "SPIDatabaseMetadata.isWrapperFor( Class< ? > ) not implemented yet.", "0A000" ); - } - - public T unwrap(java.lang.Class T) - throws SQLException + public ResultSet getPseudoColumns(String catalog, String schemaPattern, + String tableNamePattern, String columnNamePattern) throws SQLException { - throw new SQLFeatureNotSupportedException( - "SPIDatabaseMetadata.unwrap( Class< T > ) not implemented yet.", "0A000" ); - - } - - public boolean generatedKeyAlwaysReturned() throws SQLException - { - return false; - } - - public ResultSet getPseudoColumns(String catalog, String schemaPattern, - String tableNamePattern, String columnNamePattern) throws SQLException - { throw new SQLFeatureNotSupportedException( "SPIDatabaseMetadata.getPseudoColumns(String,String,String,String) not implemented yet.", "0A000" ); - } + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDriver.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDriver.java index 1246cc95..a8eb168e 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDriver.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIDriver.java @@ -1,8 +1,13 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren */ package org.postgresql.pljava.jdbc; @@ -16,12 +21,13 @@ import java.util.logging.Logger; /** - * + * Implementation of {@link Driver} for the SPI connection. * @author Thomas Hallgren */ public class SPIDriver implements Driver { - private static final Logger s_logger = Logger.getLogger( "org.postgresql.pljava.jdbc" ); + private static final Logger s_logger = Logger.getLogger( + "org.postgresql.pljava.jdbc" ); private static final String s_defaultURL = "jdbc:default:connection"; private static final int s_defaultURLLen = s_defaultURL.length(); @@ -88,8 +94,8 @@ static Connection getDefault() return s_defaultConn; } - public Logger getParentLogger() throws SQLFeatureNotSupportedException - { - return s_logger; - } + public Logger getParentLogger() throws SQLFeatureNotSupportedException + { + return s_logger; + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIParameterMetaData.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIParameterMetaData.java index 38ad4654..5fe27784 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIParameterMetaData.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIParameterMetaData.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group - * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -13,7 +18,7 @@ import java.sql.SQLFeatureNotSupportedException; /** - * + * Implementation of {@link ParameterMetaData} for the SPI connection. * @author Thomas Hallgren */ public class SPIParameterMetaData implements ParameterMetaData @@ -95,24 +100,26 @@ public int getParameterMode(int paramIndex) throws SQLException } // ************************************************************ - // Non-implementation of JDBC 4 methods. + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. // ************************************************************ public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".isWrapperFor( Class ) not implemented yet.", - "0A000" ); + return iface.isInstance(this); } public T unwrap(Class iface) throws SQLException { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".unwrapClass( Class ) not implemented yet.", + if ( iface.isInstance(this) ) + return iface.cast(this); + throw new SQLFeatureNotSupportedException + ( this.getClass().getSimpleName() + + " does not wrap " + iface.getName(), "0A000" ); } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIPreparedStatement.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIPreparedStatement.java index f61c04aa..02ccd407 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIPreparedStatement.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIPreparedStatement.java @@ -1,19 +1,24 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2007, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; -import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.net.URL; +import static java.nio.charset.StandardCharsets.US_ASCII; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; @@ -38,7 +43,7 @@ import org.postgresql.pljava.internal.Oid; /** - * + * Implementation of {@link PreparedStatement} for the SPI connection. * @author Thomas Hallgren */ public class SPIPreparedStatement extends SPIStatement implements PreparedStatement @@ -59,6 +64,7 @@ public SPIPreparedStatement(SPIConnection conn, String statement, int paramCount Arrays.fill(m_sqlTypes, Types.NULL); } + @Override public void close() throws SQLException { @@ -67,123 +73,132 @@ public void close() m_plan.close(); m_plan = null; } - this.clearParameters(); + clearParameters(); super.close(); - Invocation.current().forgetStatement(this); } + @Override public ResultSet executeQuery() throws SQLException { - this.execute(); - return this.getResultSet(); + execute(); + return getResultSet(); } + @Override public int executeUpdate() throws SQLException { - this.execute(); - return this.getUpdateCount(); + execute(); + return getUpdateCount(); } + @Override public void setNull(int columnIndex, int sqlType) throws SQLException { - this.setObject(columnIndex, null, sqlType); + setObject(columnIndex, null, sqlType); } + @Override public void setBoolean(int columnIndex, boolean value) throws SQLException { - this.setObject(columnIndex, value ? Boolean.TRUE : Boolean.FALSE, Types.BOOLEAN); + setObject(columnIndex, value, Types.BOOLEAN); } + @Override public void setByte(int columnIndex, byte value) throws SQLException { - this.setObject(columnIndex, new Byte(value), Types.TINYINT); + setObject(columnIndex, value, Types.TINYINT); } + @Override public void setShort(int columnIndex, short value) throws SQLException { - this.setObject(columnIndex, new Short(value), Types.SMALLINT); + setObject(columnIndex, value, Types.SMALLINT); } + @Override public void setInt(int columnIndex, int value) throws SQLException { - this.setObject(columnIndex, new Integer(value), Types.INTEGER); + setObject(columnIndex, value, Types.INTEGER); } + @Override public void setLong(int columnIndex, long value) throws SQLException { - this.setObject(columnIndex, new Long(value), Types.BIGINT); + setObject(columnIndex, value, Types.BIGINT); } + @Override public void setFloat(int columnIndex, float value) throws SQLException { - this.setObject(columnIndex, new Float(value), Types.FLOAT); + setObject(columnIndex, value, Types.FLOAT); } + @Override public void setDouble(int columnIndex, double value) throws SQLException { - this.setObject(columnIndex, new Double(value), Types.DOUBLE); + setObject(columnIndex, value, Types.DOUBLE); } + @Override public void setBigDecimal(int columnIndex, BigDecimal value) throws SQLException { - this.setObject(columnIndex, value, Types.DECIMAL); + setObject(columnIndex, value, Types.DECIMAL); } + @Override public void setString(int columnIndex, String value) throws SQLException { - this.setObject(columnIndex, value, Types.VARCHAR); + setObject(columnIndex, value, Types.VARCHAR); } + @Override public void setBytes(int columnIndex, byte[] value) throws SQLException { - this.setObject(columnIndex, value, Types.VARBINARY); + setObject(columnIndex, value, Types.VARBINARY); } + @Override public void setDate(int columnIndex, Date value) throws SQLException { - this.setObject(columnIndex, value, Types.DATE); + setObject(columnIndex, value, Types.DATE); } + @Override public void setTime(int columnIndex, Time value) throws SQLException { - this.setObject(columnIndex, value, Types.TIME); + setObject(columnIndex, value, Types.TIME); } + @Override public void setTimestamp(int columnIndex, Timestamp value) throws SQLException { - this.setObject(columnIndex, value, Types.TIMESTAMP); + setObject(columnIndex, value, Types.TIMESTAMP); } + @Override public void setAsciiStream(int columnIndex, InputStream value, int length) throws SQLException { - try - { - this.setObject(columnIndex, - new ClobValue(new InputStreamReader(value, "US-ASCII"), length), - Types.CLOB); - } - catch(UnsupportedEncodingException e) - { - throw new SQLException("US-ASCII encoding is not supported by this JVM"); - } + setObject(columnIndex, + new ClobValue(new InputStreamReader(value, US_ASCII), length), + Types.CLOB); } - /** - * @deprecated - */ + @SuppressWarnings("deprecation") @Override public void setUnicodeStream(int columnIndex, InputStream value, int arg2) throws SQLException { throw new UnsupportedFeatureException("PreparedStatement.setUnicodeStream"); } + @Override public void setBinaryStream(int columnIndex, InputStream value, int length) throws SQLException { - this.setObject(columnIndex, new BlobValue(value, length), Types.BLOB); + setObject(columnIndex, new BlobValue(value, length), Types.BLOB); } + @Override public void clearParameters() throws SQLException { @@ -191,21 +206,38 @@ public void clearParameters() Arrays.fill(m_sqlTypes, Types.NULL); } + /** + * Implemented on {@link #setObject(int,Object,int)}, discarding scale. + */ + @Override public void setObject(int columnIndex, Object value, int sqlType, int scale) throws SQLException { - this.setObject(columnIndex, value, sqlType); + setObject(columnIndex, value, sqlType); } + @Override public void setObject(int columnIndex, Object value, int sqlType) throws SQLException + { + setObject(columnIndex, value, sqlType, TypeBridge.wrap(value)); + } + + private void setObject( + int columnIndex, Object value, int sqlType, TypeBridge.Holder vAlt) + throws SQLException { if(columnIndex < 1 || columnIndex > m_sqlTypes.length) throw new SQLException("Illegal parameter index"); - Oid id = (sqlType == Types.OTHER) - ? Oid.forJavaClass(value.getClass()) - : Oid.forSqlType(sqlType); + Oid id = null; + + if ( null != vAlt ) + id = new Oid(vAlt.defaultOid()); + else if ( sqlType != Types.OTHER ) + id = Oid.forSqlType(sqlType); + else + id = Oid.forJavaObject(value); // Default to String. // @@ -213,29 +245,70 @@ public void setObject(int columnIndex, Object value, int sqlType) id = Oid.forSqlType(Types.VARCHAR); Oid op = m_typeIds[--columnIndex]; + + /* + * Coordinate this behavior with the newly-implemented + * setNull(int,int,String), which can have been used to set a specific + * PostgreSQL type oid that is not the default mapping from any JDBC + * type. + * + * If no oid has already been set, unconditionally assign the one just + * chosen above. If the one just chosen matches one already set, do + * nothing. Otherwise, assign the one just chosen and re-prepare, but + * ONLY IF WE HAVE NOT BEEN GIVEN A TYPEBRIDGE.HOLDER. If a Holder is + * supplied, the value is of one of the types newly allowed for 1.5.1; + * it is safe to introduce a different behavior with those, as they had + * no prior behavior to match. + * + * The behavior for the new types is to NOT overwrite whatever PG oid + * may have been already assigned, but to simply pass the Holder and + * hope the native Type implementation knows how to munge the object + * to that PG type. An exception will ensue if it does not. + * + * The ultimate (future major release) way for PreparedStatement + * parameter typing to work will be to rely on the improved SPI from + * PG 9.0 to find out the parameter types PostgreSQL's type inference + * has come up with, and treat assignments here as coercions to those, + * just as for result-set updaters. That will moot most of these goofy + * half-measures here. https://www.postgresql.org/message-id/ + * d5ecbef6-88ee-85d8-7cc2-8c8741174f2d%40anastigmatix.net + */ + if(op == null) m_typeIds[columnIndex] = id; - else if(!op.equals(id)) + else if ( null == vAlt && !op.equals(id) ) { m_typeIds[columnIndex] = id; // We must re-prepare // - if(m_plan != null) + if ( m_plan != null ) + { m_plan.close(); - m_plan = null; + m_plan = null; + } } m_sqlTypes[columnIndex] = sqlType; - m_values[columnIndex] = value; + m_values[columnIndex] = null == vAlt ? value : vAlt; } + @Override public void setObject(int columnIndex, Object value) throws SQLException { if(value == null) - throw new SQLException("Can't assign null unless the SQL type is known"); + throw new SQLException( + "Can't assign null unless the SQL type is known"); + + TypeBridge.Holder vAlt = TypeBridge.wrap(value); + + int sqlType; + if ( null == vAlt ) + sqlType = SPIConnection.getTypeForClass(value.getClass()); + else + sqlType = Types.OTHER; - this.setObject(columnIndex, value, SPIConnection.getTypeForClass(value.getClass())); + setObject(columnIndex, value, sqlType, vAlt); } /** @@ -254,6 +327,7 @@ private int[] getSqlTypes() return types; } + @Override public boolean execute() throws SQLException { @@ -266,8 +340,8 @@ public boolean execute() if(m_plan == null) m_plan = ExecutionPlan.prepare(m_statement, m_typeIds); - boolean result = this.executePlan(m_plan, m_values); - this.clearParameters(); // Parameters are cleared upon successful completion. + boolean result = executePlan(m_plan, m_values); + clearParameters(); // Parameters are cleared upon successful completion. return result; } @@ -281,92 +355,145 @@ public boolean execute(String statement) throw new UnsupportedFeatureException("Can't execute other statements using a prepared statement"); } + @Override public void addBatch() throws SQLException { - this.internalAddBatch(new Object[]{m_values.clone(), m_sqlTypes.clone(), m_typeIds.clone()}); - this.clearParameters(); // Parameters are cleared upon successful completion. + internalAddBatch(new Object[]{m_values.clone(), m_sqlTypes.clone(), m_typeIds.clone()}); + clearParameters(); // Parameters are cleared upon successful completion. } /** * The prepared statement cannot have other statements added too it. * @throws SQLException indicating that this feature is not supported. */ + @Override public void addBatch(String statement) throws SQLException { throw new UnsupportedFeatureException("Can't add batch statements to a prepared statement"); } + @Override public void setCharacterStream(int columnIndex, Reader value, int length) throws SQLException { - this.setObject(columnIndex, new ClobValue(value, length), Types.CLOB); + setObject(columnIndex, new ClobValue(value, length), Types.CLOB); } + @Override public void setRef(int columnIndex, Ref value) throws SQLException { - this.setObject(columnIndex, value, Types.REF); + setObject(columnIndex, value, Types.REF); } + @Override public void setBlob(int columnIndex, Blob value) throws SQLException { - this.setObject(columnIndex, value, Types.BLOB); + setObject(columnIndex, value, Types.BLOB); } + @Override public void setClob(int columnIndex, Clob value) throws SQLException { - this.setObject(columnIndex, value, Types.CLOB); + setObject(columnIndex, value, Types.CLOB); } + @Override public void setArray(int columnIndex, Array value) throws SQLException { - this.setObject(columnIndex, value, Types.ARRAY); + setObject(columnIndex, value, Types.ARRAY); } /** * ResultSetMetaData is not yet supported. * @throws SQLException indicating that this feature is not supported. */ + @Override public ResultSetMetaData getMetaData() throws SQLException { throw new UnsupportedFeatureException("ResultSet meta data is not yet implemented"); } + @Override public void setDate(int columnIndex, Date value, Calendar cal) throws SQLException { - if(cal == null || cal == Calendar.getInstance()) - this.setObject(columnIndex, value, Types.DATE); - throw new UnsupportedFeatureException("Setting date using explicit Calendar"); + if(cal != null && cal != Calendar.getInstance()) + throw new UnsupportedFeatureException( + "Setting date using explicit Calendar"); + setObject(columnIndex, value, Types.DATE); } + @Override public void setTime(int columnIndex, Time value, Calendar cal) throws SQLException { - if(cal == null || cal == Calendar.getInstance()) - this.setObject(columnIndex, value, Types.TIME); - throw new UnsupportedFeatureException("Setting time using explicit Calendar"); + if(cal != null && cal != Calendar.getInstance()) + throw new UnsupportedFeatureException( + "Setting time using explicit Calendar"); + setObject(columnIndex, value, Types.TIME); } + @Override public void setTimestamp(int columnIndex, Timestamp value, Calendar cal) throws SQLException { - if(cal == null || cal == Calendar.getInstance()) - this.setObject(columnIndex, value, Types.TIMESTAMP); - throw new UnsupportedFeatureException("Setting time using explicit Calendar"); + if(cal != null && cal != Calendar.getInstance()) + throw new UnsupportedFeatureException( + "Setting time using explicit Calendar"); + setObject(columnIndex, value, Types.TIMESTAMP); } + /** + * This method can (and is the only method that can, until JDBC 4.2 SQLType + * is implemented) assign a specific PostgreSQL type, by name, to a + * PreparedStatement parameter. + *

      + * However, to avoid a substantial behavior change in a 1.5.x minor release, + * its effect is limited for now. Any subsequent assignment of a non-null + * value for the parameter, using any of the setter methods or + * setObject-accepted classes from pre-JDBC 4.2, will reset the associated + * PostgreSQL type to what would have been assigned according to the JDBC + * {@code sqlType} or the type of the object. + *

      + * In contrast, setObject with any of the object types newly recognized + * in PL/Java 1.5.1 will not overwrite the PostgreSQL type assigned by this + * method, but will let it stand, on the assumption that the object's native + * to-Datum coercions will include one that applies to the type. If not, an + * exception will result. + *

      + * The {@code sqlType} supplied here will be remembered, only to be used by + * the somewhat-functionally-impaired {@code ParameterMetaData} + * implementation. It is not checked for compatibility with the supplied + * PostgreSQL {@code typeName} in any way. + */ + @Override public void setNull(int columnIndex, int sqlType, String typeName) throws SQLException { - this.setNull(columnIndex, sqlType); + Oid id = Oid.forTypeName(typeName); + Oid op = m_typeIds[--columnIndex]; + if ( null == op ) + m_typeIds[columnIndex] = id; + else if ( !op.equals(id) ) + { + m_typeIds[columnIndex] = id; + if ( null != m_plan ) + { + m_plan.close(); + m_plan = null; + } + } + m_sqlTypes[columnIndex] = sqlType; + m_values[columnIndex] = null; } + @Override public void setURL(int columnIndex, URL value) throws SQLException { - this.setObject(columnIndex, value, Types.DATALINK); + setObject(columnIndex, value, Types.DATALINK); } public String toString() @@ -383,16 +510,17 @@ public String toString() * object based on the supplied values. * @return The meta data for parameter values. */ + @Override public ParameterMetaData getParameterMetaData() throws SQLException { - return new SPIParameterMetaData(this.getSqlTypes()); + return new SPIParameterMetaData(getSqlTypes()); } - protected int executeBatchEntry(Object batchEntry) + protected long executeBatchEntry(Object batchEntry) throws SQLException { - int ret = SUCCESS_NO_INFO; + long ret = SUCCESS_NO_INFO; Object batchParams[] = (Object[])batchEntry; Object batchValues = batchParams[0]; Object batchSqlTypes = batchParams[1]; @@ -418,221 +546,244 @@ protected int executeBatchEntry(Object batchEntry) } } - if(this.execute()) - this.getResultSet().close(); + if(execute()) + getResultSet().close(); else { - int updCount = this.getUpdateCount(); + long updCount = getUpdateCount(); if(updCount >= 0) ret = updCount; } return ret; } + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) + throws SQLException + { + setObject(parameterIndex, xmlObject, Types.SQLXML); + } + // ************************************************************ // Non-implementation of JDBC 4 methods. // ************************************************************ + @Override public void setNClob(int parameterIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setNClob( int, Reader ) not implemented yet.", "0A000" ); } + @Override public void setNClob(int parameterIndex, NClob value) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setNClob( int, NClob ) not implemented yet.", "0A000" ); } + @Override public void setNClob(int parameterIndex, Reader reader,long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setNClob( int, Reader, long ) not " + "implemented yet.", "0A000" ); } + @Override public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setBlob( int, InputStream ) not " + "implemented yet.", "0A000" ); } + @Override public void setBlob(int parameterIndex, InputStream inputStream,long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setBlob( int, InputStream, long ) not " + "implemented yet.", "0A000" ); } + @Override public void setClob(int parameterIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setClob( int, Reader ) not implemented yet.", "0A000" ); } + + @Override public void setClob(int parameterIndex, Reader reader,long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setClob( int, Reader, long ) not " + "implemented yet.", "0A000" ); } + @Override public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setNCharacterStream( int, Reader ) not " + "implemented yet.", "0A000" ); } + + @Override public void setNCharacterStream(int parameterIndex, Reader value,long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setNCharacterStream( int, Reader, long ) not " + "implemented yet.", "0A000" ); } + @Override public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setCharacterStream( int, Reader ) not " + "implemented yet.", "0A000" ); } + @Override public void setCharacterStream(int parameterIndex, - Reader reader,long lenght) + Reader reader, long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setCharacterStream( int, Reader, long ) not " + "implemented yet.", "0A000" ); } + @Override public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setBinaryStream( int, InputStream ) not " + "implemented yet.", "0A000" ); } + @Override public void setBinaryStream(int parameterIndex, - InputStream x,long length) + InputStream x, long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setBinaryStream( int, InputStream, long ) not " + "implemented yet.", "0A000" ); } + @Override public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setAsciiStream( int, InputStream ) not " + "implemented yet.", "0A000" ); } + @Override public void setAsciiStream(int parameterIndex, InputStream x,long length) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setAsciiStream( int, InputStream, long ) not " + "implemented yet.", "0A000" ); } - - public void setSQLXML(int parameterIndex, - SQLXML xmlObject) - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".setSQLXML( int, SQLXML ) not implemented yet.", - "0A000" ); - } + @Override public void setNString(int parameterIndex, String value) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setNString( int, String ) not implemented yet.", "0A000" ); } + @Override public void setRowId(int parameterIndex, RowId x) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".setRowId( int, RowId ) not implemented yet.", "0A000" ); } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIReadOnlyControl.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIReadOnlyControl.java new file mode 100644 index 00000000..30bde8af --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIReadOnlyControl.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.jdbc; + +/** + * An extended interface on {@code Statement} (accessible with {@code unwrap()}) + * allowing control of the {@code read_only} flag that PostgreSQL SPI will see + * when the statement is next executed. + *

      + * Currently an internal interface, not in {@code pljava-api}, as the known need + * so far is just for the internal class loader. + */ +public interface SPIReadOnlyControl +{ + /** + * Specify that the statement, when next executed, will have the + * behavior recommended in the PostgreSQL SPI documentation: + * {@code read_only} will be set to {@code true} if the currently-executing + * PL/Java function is declared {@code IMMUTABLE}, {@code false} otherwise. + */ + void defaultReadOnly(); + + /** + * Specify that the statement, when next executed, will have have + * {@code read_only} set to {@code true} unconditionally. + */ + void forceReadOnly(); + + /** + * Specify that the statement, when next executed, will have have + * {@code read_only} set to {@code false} unconditionally. + */ + void clearReadOnly(); +} + diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java index 75441f79..673fe5ee 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSet.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -29,7 +35,7 @@ public class SPIResultSet extends ResultSetBase private final SPIStatement m_statement; private final Portal m_portal; private final TupleDesc m_tupleDesc; - private final int m_maxRows; + private final long m_maxRows; private Tuple m_currentRow; private Tuple m_nextRow; @@ -37,7 +43,9 @@ public class SPIResultSet extends ResultSetBase private TupleTable m_table; private int m_tableRow; - SPIResultSet(SPIStatement statement, Portal portal, int maxRows) + private boolean m_open; + + SPIResultSet(SPIStatement statement, Portal portal, long maxRows) throws SQLException { super(statement.getFetchSize()); @@ -46,19 +54,16 @@ public class SPIResultSet extends ResultSetBase m_maxRows = maxRows; m_tupleDesc = portal.getTupleDesc(); m_tableRow = -1; + m_open = true; } - public int getFetchDirection() - throws SQLException - { - return FETCH_FORWARD; - } - + @Override public void close() throws SQLException { - if(m_portal.isValid()) + if(m_open) { + m_open = false; m_portal.close(); m_statement.resultSetClosed(this); m_table = null; @@ -69,11 +74,13 @@ public void close() } } + @Override public boolean isLast() throws SQLException { return m_currentRow != null && this.peekNext() == null; } + @Override public boolean next() throws SQLException { @@ -84,32 +91,47 @@ public boolean next() return result; } + /** + * This method does return the name of the portal, but beware of attempting + * positioned update/delete, because rows are read from the portal in + * {@link #getFetchSize} batches. + */ + @Override public String getCursorName() throws SQLException { return this.getPortal().getName(); } + @Override public int findColumn(String columnName) throws SQLException { return m_tupleDesc.getColumnIndex(columnName); } + @Override public Statement getStatement() throws SQLException { return m_statement; } + /** + * Return the {@code Portal} associated with this {@code ResultSet}. + */ protected final Portal getPortal() throws SQLException { - if(!m_portal.isValid()) + if(!m_open) throw new SQLException("ResultSet is closed"); return m_portal; } + /** + * Get a(nother) table of {@link #getFetchSize} rows from the + * {@link Portal}. + */ protected final TupleTable getTupleTable() throws SQLException { @@ -119,7 +141,7 @@ protected final TupleTable getTupleTable() if(portal.isAtEnd()) return null; - int mx; + long mx; int fetchSize = this.getFetchSize(); if(m_maxRows > 0) { @@ -134,7 +156,7 @@ protected final TupleTable getTupleTable() try { - int result = portal.fetch(true, mx); + long result = portal.fetch(true, mx); if(result > 0) m_table = SPI.getTupTable(m_tupleDesc); m_tableRow = -1; @@ -147,6 +169,9 @@ protected final TupleTable getTupleTable() return m_table; } + /** + * Return the {@link Tuple} most recently returned by {@link #next}. + */ protected final Tuple getCurrentRow() throws SQLException { @@ -155,6 +180,10 @@ protected final Tuple getCurrentRow() return m_currentRow; } + /** + * Get another {@link Tuple} from the {@link TupleTable}, refreshing the + * table as needed. + */ protected final Tuple peekNext() throws SQLException { @@ -167,7 +196,7 @@ protected final Tuple peekNext() if(m_tableRow >= table.getCount() - 1) { - // Current table is exhaused, get the next + // Current table is exhausted, get the next // one. // m_table = null; @@ -179,12 +208,21 @@ protected final Tuple peekNext() return m_nextRow; } - protected Object getObjectValue(int columnIndex) + /** + * Implemented over + * {@link Tuple#getObject Tuple.getObject(TupleDesc,int,Class)}. + */ + @Override // defined in ObjectResultSet + protected Object getObjectValue(int columnIndex, Class type) throws SQLException { - return this.getCurrentRow().getObject(m_tupleDesc, columnIndex); + return this.getCurrentRow().getObject(m_tupleDesc, columnIndex, type); } + /** + * Returns an {@link SPIResultSetMetaData} instance. + */ + @Override public ResultSetMetaData getMetaData() throws SQLException { diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSetMetaData.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSetMetaData.java index dcc3cefa..8788149a 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSetMetaData.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIResultSetMetaData.java @@ -1,16 +1,20 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group - * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Filip Hrbek + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; import org.postgresql.pljava.internal.Oid; import org.postgresql.pljava.internal.TupleDesc; @@ -135,28 +139,4 @@ protected final int getFieldLength(int column) throws SQLException { return 0; } - - // ************************************************************ - // Non-implementation of JDBC 4 methods. - // ************************************************************ - - - public boolean isWrapperFor(Class iface) - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".isWrapperFor( Class ) not implemented yet.", - "0A000" ); - } - - public T unwrap(Class iface) - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".unwrapClass( Class ) not implemented yet.", - "0A000" ); - } - } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIStatement.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIStatement.java index 7ea241a9..6197c81f 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIStatement.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SPIStatement.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2008, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -22,10 +27,10 @@ import org.postgresql.pljava.internal.SPIException; /** - * + * Implementation of {@link Statement} for the SPI connection. * @author Thomas Hallgren */ -public class SPIStatement implements Statement +public class SPIStatement implements Statement, SPIReadOnlyControl { private final SPIConnection m_connection; @@ -35,9 +40,10 @@ public class SPIStatement implements Statement private int m_fetchSize = 1000; private int m_maxRows = 0; private ResultSet m_resultSet = null; - private int m_updateCount = 0; - private ArrayList m_batch = null; + private long m_updateCount = 0; + private ArrayList m_batch = null; private boolean m_closed = false; + private short m_readonly_spec = ExecutionPlan.SPI_READONLY_DEFAULT; public SPIStatement(SPIConnection conn) { @@ -125,14 +131,15 @@ protected boolean executePlan(ExecutionPlan plan, Object[] paramValues) boolean isResultSet = plan.isCursorPlan(); if(isResultSet) { - Portal portal = plan.cursorOpen(m_cursorName, paramValues); + Portal portal = plan.cursorOpen( + m_cursorName, paramValues, m_readonly_spec); m_resultSet = new SPIResultSet(this, portal, m_maxRows); } else { try { - plan.execute(paramValues, m_maxRows); + plan.execute(paramValues, m_readonly_spec, m_maxRows); m_updateCount = SPI.getProcessed(); } finally @@ -179,7 +186,23 @@ public int[] executeBatch() int numBatches = (m_batch == null) ? 0 : m_batch.size(); int[] result = new int[numBatches]; for(int idx = 0; idx < numBatches; ++idx) + { + long count = this.executeBatchEntry(m_batch.get(idx)); + result[idx] = (count > Integer.MAX_VALUE) + ? SUCCESS_NO_INFO : (int)count; + } + return result; + } + + public long[] executeLargeBatch() + throws SQLException + { + int numBatches = (m_batch == null) ? 0 : m_batch.size(); + long[] result = new long[numBatches]; + for(int idx = 0; idx < numBatches; ++idx) + { result[idx] = this.executeBatchEntry(m_batch.get(idx)); + } return result; } @@ -313,6 +336,15 @@ public int getResultSetType() public int getUpdateCount() throws SQLException + { + if ( m_updateCount > Integer.MAX_VALUE ) + throw new ArithmeticException( + "too many rows updated to report in a Java signed int"); + return (int)m_updateCount; + } + + public long getLargeUpdateCount() + throws SQLException { return m_updateCount; } @@ -376,18 +408,24 @@ public void setQueryTimeout(int seconds) throw new UnsupportedFeatureException("Statement.setQueryTimeout"); } + /** + * The argument is either a {@code String} containing SQL (if from a + * {@code Statement}, or an {@code Object} array of length three (if from + * a {@code PreparedStatement}) holding parameter values, SQL types, and + * PG type Oids. + */ protected void internalAddBatch(Object batch) throws SQLException { if(m_batch == null) - m_batch = new ArrayList(); + m_batch = new ArrayList(); m_batch.add(batch); } - protected int executeBatchEntry(Object batchEntry) + protected long executeBatchEntry(Object batchEntry) throws SQLException { - int ret = SUCCESS_NO_INFO; + long ret = SUCCESS_NO_INFO; if(this.execute(m_connection.nativeSQL((String)batchEntry))) this.getResultSet().close(); else if(m_updateCount >= 0) @@ -401,6 +439,35 @@ void resultSetClosed(ResultSet rs) m_resultSet = null; } + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + + public boolean isWrapperFor(Class iface) + throws SQLException + { + return iface.isInstance(this); + } + + public T unwrap(Class iface) + throws SQLException + { + if ( iface.isInstance(this) ) + return iface.cast(this); + throw new SQLFeatureNotSupportedException + ( this.getClass().getSimpleName() + + " does not wrap " + iface.getName(), + "0A000" ); + } + + public boolean isCloseOnCompletion() throws SQLException + { + return false; + } + // ************************************************************ // Non-implementation of JDBC 4 methods. // ************************************************************ @@ -432,35 +499,34 @@ public boolean isClosed() "0A000" ); } - public boolean isWrapperFor(Class iface) - throws SQLException + public void closeOnCompletion() throws SQLException { throw new SQLFeatureNotSupportedException ( this.getClass() - + ".isWrapperFor( Class ) not implemented yet.", + + ".closeOneCompletion() not implemented yet.", "0A000" ); } - public T unwrap(Class iface) - throws SQLException + // ************************************************************ + // Implementation of the SPIReadOnlyControl extended interface + // ************************************************************ + + @Override + public void defaultReadOnly() { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".unwrapClass( Class ) not implemented yet.", - "0A000" ); + m_readonly_spec = ExecutionPlan.SPI_READONLY_DEFAULT; } - public void closeOnCompletion() throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".closeOneCompletion() not implemented yet.", - "0A000" ); - } + @Override + public void forceReadOnly() + { + m_readonly_spec = ExecutionPlan.SPI_READONLY_FORCED; + } - public boolean isCloseOnCompletion() throws SQLException - { - return false; - } + @Override + public void clearReadOnly() + { + m_readonly_spec = ExecutionPlan.SPI_READONLY_CLEARED; + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLChunkIOOrder.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLChunkIOOrder.java new file mode 100644 index 00000000..b7038c95 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLChunkIOOrder.java @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2016-2025 TADA AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.jdbc; + +import java.nio.ByteOrder; + +import java.sql.SQLNonTransientException; + +import java.util.Properties; + +/** + * Caches the scalar and mirror {@code MappedUDT} byte orders as determined by + * system properties during PL/Java startup. + *

      + * This class is initialized from native code ahead of the + * {@link SQLInputFromChunk} and {@link SQLOutputToChunk} classes that depend + * on it. This happens before {@code InstallHelper} has taken and frozen its + * defensive copy of the Java system properties, and also before PL/Java user + * code has potentially run and changed them. + *

      + * This defensive implementation is needed only for the "PL/Java with no + * security policy enforcement" case, as PL/Java's supplied policy file protects + * these properties from modification when policy is being enforced. + */ +class SQLChunkIOOrder +{ + private SQLChunkIOOrder() { } // do not instantiate + + /** + * Byte order for conversion of "mirror" UDT values in the + * Java-to-PostgreSQL direction. + */ + static final ByteOrder MIRROR_J2P; + + /** + * Byte order for conversion of "mirror" UDT values in the + * PostgreSQL-to-Java direction. + */ + static final ByteOrder MIRROR_P2J; + + /** + * Byte order for conversion of "scalar" UDT values in the + * Java-to-PostgreSQL direction. + */ + static final ByteOrder SCALAR_J2P; + + /** + * Byte order for conversion of "scalar" UDT values in the + * PostgreSQL-to-Java direction. + */ + static final ByteOrder SCALAR_P2J; + + static + { + /* + * Set the org.postgresql.pljava.udt.byteorder.{scalar,mirror}.{p2j,j2p} + * properties. For shorthand, defaults can be given in shorter property + * keys org.postgresql.pljava.udt.byteorder.{scalar,mirror} or even just + * org.postgresql.pljava.udt.byteorder for an overall default. These + * shorter keys are then removed from the system properties. + */ + Properties ps = System.getProperties(); + + String orderKey = "org.postgresql.pljava.udt.byteorder"; + String orderAll = ps.getProperty(orderKey); + String orderMirror = ps.getProperty(orderKey + ".mirror"); + String orderScalar = ps.getProperty(orderKey + ".scalar"); + + if ( null == orderMirror ) + orderMirror = null != orderAll ? orderAll : "native"; + if ( null == orderScalar ) + orderScalar = null != orderAll ? orderAll : "big_endian"; + + System.clearProperty(orderKey); + System.clearProperty(orderKey + ".mirror"); + System.clearProperty(orderKey + ".scalar"); + + try + { + MIRROR_J2P = toByteOrder(ps, orderKey + ".mirror.j2p", orderMirror); + MIRROR_P2J = toByteOrder(ps, orderKey + ".mirror.p2j", orderMirror); + SCALAR_J2P = toByteOrder(ps, orderKey + ".scalar.j2p", orderScalar); + SCALAR_P2J = toByteOrder(ps, orderKey + ".scalar.p2j", orderScalar); + } + catch ( SQLNonTransientException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + private static ByteOrder toByteOrder(Properties ps, String k, String dfl) + throws SQLNonTransientException + { + switch ( (String)ps.computeIfAbsent(k, p -> dfl) ) + { + case "big_endian": return ByteOrder.BIG_ENDIAN; + case "little_endian": return ByteOrder.LITTLE_ENDIAN; + case "native": return ByteOrder.nativeOrder(); + default: + throw new SQLNonTransientException( + "System property " + k + + " must be big_endian, little_endian, or native", "F0000"); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromChunk.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromChunk.java index d668da36..eae14618 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromChunk.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromChunk.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2016 TADA AB and other contributors, as listed below. + * Copyright (c) 2004-2025 TADA AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -16,13 +16,11 @@ import java.io.InputStream; import java.io.Reader; import java.io.StringReader; -import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.net.MalformedURLException; import java.net.URL; import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.charset.CharsetDecoder; import java.sql.Array; import java.sql.Blob; @@ -42,6 +40,9 @@ import org.postgresql.pljava.internal.Backend; +import static org.postgresql.pljava.jdbc.SQLChunkIOOrder.MIRROR_P2J; +import static org.postgresql.pljava.jdbc.SQLChunkIOOrder.SCALAR_P2J; + /** * The SQLInputToChunk uses JNI to read from memory that has been allocated by * the PostgreSQL backend. A user should never make an attempt to create an @@ -57,47 +58,10 @@ public class SQLInputFromChunk implements SQLInput { private ByteBuffer m_bb; - /* get rid of this once no longer supporting back to Java 6 */ - private static final Charset UTF8 = Charset.forName("UTF-8"); - - private static ByteOrder scalarOrder; - private static ByteOrder mirrorOrder; - public SQLInputFromChunk(ByteBuffer bb, boolean isJavaBasedScalar) throws SQLException { - m_bb = bb; - if ( isJavaBasedScalar ) - { - if ( null == scalarOrder ) - scalarOrder = getOrder(true); - m_bb.order(scalarOrder); - } - else - { - if ( null == mirrorOrder ) - mirrorOrder = getOrder(false); - m_bb.order(mirrorOrder); - } - } - - private ByteOrder getOrder(boolean isJavaBasedScalar) throws SQLException - { - ByteOrder result; - String key = "org.postgresql.pljava.udt.byteorder." - + ( isJavaBasedScalar ? "scalar" : "mirror" ) + ".p2j"; - String val = System.getProperty(key); - if ( "big_endian".equals(val) ) - result = ByteOrder.BIG_ENDIAN; - else if ( "little_endian".equals(val) ) - result = ByteOrder.LITTLE_ENDIAN; - else if ( "native".equals(val) ) - result = ByteOrder.nativeOrder(); - else - throw new SQLNonTransientException( - "System property " + key + - " must be big_endian, little_endian, or native", "F0000"); - return result; + m_bb = bb.order(isJavaBasedScalar ? SCALAR_P2J : MIRROR_P2J); } @Override @@ -282,7 +246,7 @@ public String readString() throws SQLException int len = m_bb.getShort() & 0xffff; ByteBuffer bytes = (ByteBuffer)m_bb.slice().limit(len); m_bb.position(m_bb.position() + len); - return UTF8.newDecoder().decode(bytes).toString(); + return UTF_8.newDecoder().decode(bytes).toString(); } catch ( Exception e ) { @@ -321,7 +285,9 @@ public URL readURL() throws SQLException { try { - return new URL(this.readString()); + @SuppressWarnings("deprecation") //'til PL/Java major rev or forever + URL u = new URL(this.readString()); + return u; } catch( Exception e ) { diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java index 070f3eb9..2b99e01b 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLInputFromTuple.java @@ -1,10 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -21,233 +25,328 @@ import java.sql.RowId; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; -import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLNonTransientException; import java.sql.SQLInput; import java.sql.SQLXML; import java.sql.Time; import java.sql.Timestamp; import org.postgresql.pljava.internal.Backend; -import org.postgresql.pljava.internal.JavaWrapper; +import org.postgresql.pljava.internal.DualState; import org.postgresql.pljava.internal.TupleDesc; /** - * A single row, updateable ResultSet specially made for triggers. The - * changes made to this ResultSet are remembered and converted to a - * SPI_modify_tuple call prior to function return. + * Implements the {@code SQLInput} interface for a user-defined type (UDT) + * implemented in Java, for the case where a composite type in PostgreSQL is + * used as the UDT's representation, so it can be accessed as a PG tuple. * * @author Thomas Hallgren */ -public class SQLInputFromTuple extends JavaWrapper implements SQLInput +public class SQLInputFromTuple extends SingleRowReader implements SQLInput { private int m_index; - private final TupleDesc m_tupleDesc; - private boolean m_wasNull; - - public SQLInputFromTuple(long heapTupleHeaderPointer, TupleDesc tupleDesc) + private final int m_columns; + + /** + * Construct an instance, given the (native) pointer to a PG + * {@code HeapTupleHeader}, as well as the TupleDesc (Java object this time) + * describing its structure. + */ + public SQLInputFromTuple(DualState.Key cookie, long resourceOwner, + long heapTupleHeaderPointer, TupleDesc tupleDesc) throws SQLException { - super(heapTupleHeaderPointer); - m_tupleDesc = tupleDesc; - m_index = 0; - m_wasNull = false; + super(cookie, resourceOwner, heapTupleHeaderPointer, tupleDesc); + m_index = 0; + m_columns = tupleDesc.size(); } + protected int nextIndex() throws SQLException + { + if ( m_index >= m_columns ) + throw new SQLNonTransientException("Tuple has no more columns"); + return ++m_index; + } + + /** + * Implemented over {@link #readValue}. + */ + @Override public Array readArray() throws SQLException { - return (Array)this.readValue(Array.class); + return readValue(Array.class); } + /** + * Implemented over {@link #readClob}. + */ + @Override public InputStream readAsciiStream() throws SQLException { - Clob c = this.readClob(); + Clob c = readClob(); return (c == null) ? null : c.getAsciiStream(); } + /** + * Implemented over {@link #readValue}. + */ + @Override public BigDecimal readBigDecimal() throws SQLException { - return (BigDecimal)this.readValue(BigDecimal.class); + return readValue(BigDecimal.class); } + /** + * Implemented over {@link #readBlob}. + */ + @Override public InputStream readBinaryStream() throws SQLException { - Blob b = this.readBlob(); + Blob b = readBlob(); return (b == null) ? null : b.getBinaryStream(); } + /** + * Implemented over {@link #readBytes}. + */ + @Override public Blob readBlob() throws SQLException { - byte[] bytes = this.readBytes(); + byte[] bytes = readBytes(); return (bytes == null) ? null : new BlobValue(bytes); } + /** + * Implemented over {@link #readValue}. + */ + @Override public boolean readBoolean() throws SQLException { - Boolean b = (Boolean)this.readValue(Boolean.class); + Boolean b = readValue(Boolean.class); return (b == null) ? false : b.booleanValue(); } + /** + * Implemented over {@link #readNumber}. + */ + @Override public byte readByte() throws SQLException { - Number b = this.readNumber(byte.class); + Number b = readNumber(byte.class); return (b == null) ? 0 : b.byteValue(); } + /** + * Implemented over {@link #readValue}. + */ + @Override public byte[] readBytes() throws SQLException { - return (byte[])this.readValue(byte[].class); + return readValue(byte[].class); } + /** + * Implemented over {@link #readClob}. + */ public Reader readCharacterStream() throws SQLException { - Clob c = this.readClob(); + Clob c = readClob(); return (c == null) ? null : c.getCharacterStream(); } + /** + * Implemented over {@link #readString}. + */ public Clob readClob() throws SQLException { - String str = this.readString(); + String str = readString(); return (str == null) ? null : new ClobValue(str); } + /** + * Implemented over {@link #readValue}. + */ + @Override public Date readDate() throws SQLException { - return (Date)this.readValue(Date.class); + return readValue(Date.class); } + /** + * Implemented over {@link #readNumber}. + */ + @Override public double readDouble() throws SQLException { - Number d = this.readNumber(double.class); + Number d = readNumber(double.class); return (d == null) ? 0 : d.doubleValue(); } + /** + * Implemented over {@link #readNumber}. + */ + @Override public float readFloat() throws SQLException { - Number f = this.readNumber(float.class); + Number f = readNumber(float.class); return (f == null) ? 0 : f.floatValue(); } + /** + * Implemented over {@link #readNumber}. + */ + @Override public int readInt() throws SQLException { - Number i = this.readNumber(int.class); + Number i = readNumber(int.class); return (i == null) ? 0 : i.intValue(); } + /** + * Implemented over {@link #readNumber}. + */ + @Override public long readLong() throws SQLException { - Number l = this.readNumber(long.class); + Number l = readNumber(long.class); return (l == null) ? 0 : l.longValue(); } + @Override public Object readObject() throws SQLException { - if(m_index < m_tupleDesc.size()) - { - Object v; - synchronized(Backend.THREADLOCK) - { - v = _getObject(this.getNativePointer(), m_tupleDesc.getNativePointer(), ++m_index); - } - m_wasNull = v == null; - return v; - } - throw new SQLException("Tuple has no more columns"); + return getObject(nextIndex()); } + /** + * Implemented over {@link #readValue}. + */ + @Override public Ref readRef() throws SQLException { - return (Ref)this.readValue(Ref.class); + return readValue(Ref.class); } + /** + * Implemented over {@link #readNumber}. + */ + @Override public short readShort() throws SQLException { - Number s = this.readNumber(short.class); + Number s = readNumber(short.class); return (s == null) ? 0 : s.shortValue(); } + /** + * Implemented over {@link #readValue}. + */ + @Override public String readString() throws SQLException { - return (String)this.readValue(String.class); + return readValue(String.class); } + /** + * Implemented over {@link #readValue}. + */ + @Override public Time readTime() throws SQLException { - return (Time)this.readValue(Time.class); + return readValue(Time.class); } + /** + * Implemented over {@link #readValue}. + */ + @Override public Timestamp readTimestamp() throws SQLException { - return (Timestamp)this.readValue(Timestamp.class); + return readValue(Timestamp.class); } + /** + * Implemented over {@link #readValue}. + */ + @Override public URL readURL() throws SQLException { - return (URL)this.readValue(URL.class); + return readValue(URL.class); } - public boolean wasNull() throws SQLException - { - return m_wasNull; - } + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ - private Number readNumber(Class numberClass) throws SQLException + @Override + public SQLXML readSQLXML() + throws SQLException { - return SPIConnection.basicNumericCoersion(numberClass, this.readObject()); + return readObject(SQLXML.class); } - private Object readValue(Class valueClass) throws SQLException - { - return SPIConnection.basicCoersion(valueClass, this.readObject()); - } // ************************************************************ // Non-implementation of JDBC 4 methods. // ************************************************************ - + /** Not yet implemented. */ + @Override public RowId readRowId() throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".readRowId() not implemented yet.", "0A000" ); } - public SQLXML readSQLXML() - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".readSQLXML() not implemented yet.", - "0A000" ); - } - + /** Not yet implemented. */ + @Override public String readNString() throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".readNString() not implemented yet.", "0A000" ); } + /** Not yet implemented. */ + @Override public NClob readNClob() throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".readNClob() not implemented yet.", "0A000" ); } // ************************************************************ - // End of non-implementation of JDBC 4 methods. + // Implementation of JDBC 4.2 method. // ************************************************************ - protected native void _free(long pointer); + @Override + public T readObject(Class type) throws SQLException + { + return getObject(nextIndex(), type); + } - private static native Object _getObject(long pointer, long tupleDescPointer, int index) - throws SQLException; + // ************************************************************ + // Implementation methods, over methods of ObjectResultSet. + // ************************************************************ + + private Number readNumber(Class numberClass) throws SQLException + { + return getNumber(nextIndex(), numberClass); + } + + private T readValue(Class valueClass) throws SQLException + { + return getValue(nextIndex(), valueClass); + } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToChunk.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToChunk.java index b2c2db09..5524b70c 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToChunk.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToChunk.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2016 TADA AB and other contributors, as listed below. + * Copyright (c) 2004-2025 TADA AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -17,13 +17,11 @@ import java.io.InputStream; import java.io.Reader; import java.io.StringWriter; -import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; -import java.nio.ByteOrder; import java.nio.CharBuffer; -import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.charset.CharsetEncoder; import java.nio.charset.CoderResult; import java.nio.charset.MalformedInputException; @@ -47,7 +45,10 @@ import java.sql.Time; import java.sql.Timestamp; -import org.postgresql.pljava.internal.Backend; +import static org.postgresql.pljava.internal.Backend.doInPG; + +import static org.postgresql.pljava.jdbc.SQLChunkIOOrder.MIRROR_J2P; +import static org.postgresql.pljava.jdbc.SQLChunkIOOrder.SCALAR_J2P; /** * The SQLOutputToChunk uses JNI to build a PostgreSQL StringInfo buffer in @@ -64,52 +65,15 @@ public class SQLOutputToChunk implements SQLOutput { private static final byte[] s_byteBuffer = new byte[8]; - /* get rid of this once no longer supporting back to Java 6 */ - private static final Charset UTF8 = Charset.forName("UTF-8"); - private long m_handle; private ByteBuffer m_bb; - private static ByteOrder scalarOrder; - private static ByteOrder mirrorOrder; - public SQLOutputToChunk(long handle, ByteBuffer bb, boolean isJavaBasedScalar) throws SQLException { m_handle = handle; - m_bb = bb; - if ( isJavaBasedScalar ) - { - if ( null == scalarOrder ) - scalarOrder = getOrder(true); - m_bb.order(scalarOrder); - } - else - { - if ( null == mirrorOrder ) - mirrorOrder = getOrder(false); - m_bb.order(mirrorOrder); - } - } - - private ByteOrder getOrder(boolean isJavaBasedScalar) throws SQLException - { - ByteOrder result; - String key = "org.postgresql.pljava.udt.byteorder." - + ( isJavaBasedScalar ? "scalar" : "mirror" ) + ".j2p"; - String val = System.getProperty(key); - if ( "big_endian".equals(val) ) - result = ByteOrder.BIG_ENDIAN; - else if ( "little_endian".equals(val) ) - result = ByteOrder.LITTLE_ENDIAN; - else if ( "native".equals(val) ) - result = ByteOrder.nativeOrder(); - else - throw new SQLNonTransientException( - "System property " + key + - " must be big_endian, little_endian, or native", "F0000"); - return result; + m_bb = bb.order(isJavaBasedScalar ? SCALAR_J2P : MIRROR_J2P); } @Override @@ -193,7 +157,7 @@ public void writeCharacterStream(Reader value) throws SQLException { ByteBuffer bb = ByteBuffer.allocate(65535); CharBuffer cb = CharBuffer.allocate(1024); - CharsetEncoder enc = UTF8.newEncoder(); + CharsetEncoder enc = UTF_8.newEncoder(); CoderResult cr; try @@ -331,7 +295,7 @@ public void writeString(String value) throws SQLException CharBuffer cb = CharBuffer.wrap(value); try { - CharsetEncoder enc = UTF8.newEncoder(); + CharsetEncoder enc = UTF_8.newEncoder(); ByteBuffer bb = enc.encode(cb); int len = bb.limit(); if ( 65535 < len ) @@ -490,7 +454,7 @@ public void writeSQLXML(SQLXML x) private void ensureCapacity(int c) throws SQLException { - synchronized(Backend.THREADLOCK) + doInPG(() -> { if(m_handle == 0) throw new SQLException("Stream is closed"); @@ -498,7 +462,7 @@ private void ensureCapacity(int c) throws SQLException m_bb = _ensureCapacity(m_handle, m_bb, m_bb.position(), c); if ( m_bb != oldbb ) m_bb.order(oldbb.order()); - } + }); } private static native ByteBuffer _ensureCapacity(long handle, diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToTuple.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToTuple.java index ddce3499..ec2365bb 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToTuple.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLOutputToTuple.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * Copyright (c) 2010, 2011 PostgreSQL Global Development Group * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root directory of this distribution or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -13,9 +18,9 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; -import java.io.UnsupportedEncodingException; import java.math.BigDecimal; import java.net.URL; +import static java.nio.charset.StandardCharsets.US_ASCII; import java.sql.Array; import java.sql.Blob; import java.sql.Clob; @@ -36,6 +41,7 @@ import org.postgresql.pljava.internal.TupleDesc; /** + * Implementation of {@link SQLOutput} for the case of a composite data type. * @author Thomas Hallgren */ public class SQLOutputToTuple implements SQLOutput @@ -75,129 +81,135 @@ public long getTuple() public void writeArray(Array value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeAsciiStream(InputStream value) throws SQLException { - try - { - Reader rdr = new BufferedReader(new InputStreamReader(value, "US-ASCII")); - this.writeClob(new ClobValue(rdr, ClobValue.getReaderLength(rdr))); - } - catch(UnsupportedEncodingException e) - { - throw new SQLException(e.toString()); - } + Reader rdr = new BufferedReader(new InputStreamReader(value, US_ASCII)); + writeClob(new ClobValue(rdr, ClobValue.getReaderLength(rdr))); } public void writeBigDecimal(BigDecimal value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeBinaryStream(InputStream value) throws SQLException { if(!value.markSupported()) value = new BufferedInputStream(value); - this.writeBlob(new BlobValue(value, BlobValue.getStreamLength(value))); + writeBlob(new BlobValue(value, BlobValue.getStreamLength(value))); } public void writeBlob(Blob value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeBoolean(boolean value) throws SQLException { - this.writeValue(value ? Boolean.TRUE : Boolean.FALSE); + writeValue(value); } public void writeByte(byte value) throws SQLException { - this.writeValue(new Byte(value)); + writeValue(value); } public void writeBytes(byte[] value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeCharacterStream(Reader value) throws SQLException { if(!value.markSupported()) value = new BufferedReader(value); - this.writeClob(new ClobValue(value, ClobValue.getReaderLength(value))); + writeClob(new ClobValue(value, ClobValue.getReaderLength(value))); } public void writeClob(Clob value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeDate(Date value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeDouble(double value) throws SQLException { - this.writeValue(new Double(value)); + writeValue(value); } public void writeFloat(float value) throws SQLException { - this.writeValue(new Float(value)); + writeValue(value); } public void writeInt(int value) throws SQLException { - this.writeValue(new Integer(value)); + writeValue(value); } public void writeLong(long value) throws SQLException { - this.writeValue(new Long(value)); + writeValue(value); } public void writeObject(SQLData value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeRef(Ref value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeShort(short value) throws SQLException { - this.writeValue(new Short(value)); + writeValue(value); } public void writeString(String value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeStruct(Struct value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeTime(Time value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeTimestamp(Timestamp value) throws SQLException { - this.writeValue(value); + writeValue(value); } public void writeURL(URL value) throws SQLException { - this.writeValue(value.toString()); + writeValue(value.toString()); + } + + // ************************************************************ + // Implementation of JDBC 4 methods. Methods go here if they + // don't throw SQLFeatureNotSupportedException; they can be + // considered implemented even if they do nothing useful, as + // long as that's an allowed behavior by the JDBC spec. + // ************************************************************ + + public void writeSQLXML(SQLXML x) + throws SQLException + { + writeValue(x); } // ************************************************************ @@ -208,7 +220,7 @@ public void writeNClob(NClob x) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".writeNClob( NClob ) not implemented yet.", "0A000" ); } @@ -217,7 +229,7 @@ public void writeNString(String x) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".writeNString( String ) not implemented yet.", "0A000" ); } @@ -226,19 +238,10 @@ public void writeRowId(RowId x) throws SQLException { throw new SQLFeatureNotSupportedException - ( this.getClass() + ( getClass() + ".writeRowId( RowId ) not implemented yet.", "0A000" ); } - - public void writeSQLXML(SQLXML x) - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".writeSQLXML( SQLXML ) not implemented yet.", - "0A000" ); - } // ************************************************************ // End of non-implementation of JDBC 4 methods. @@ -248,6 +251,7 @@ private void writeValue(Object value) throws SQLException { if(m_index >= m_values.length) throw new SQLException("Tuple cannot take more values"); - m_values[m_index++] = value; + TypeBridge.Holder vAlt = TypeBridge.wrap(value); + m_values[m_index++] = null == vAlt ? value : vAlt; } } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java new file mode 100644 index 00000000..8baae3dd --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SQLXMLImpl.java @@ -0,0 +1,5660 @@ +/* + * Copyright (c) 2018-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.jdbc; + +/* Imports for API */ + +import java.sql.SQLXML; + +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Reader; +import java.io.Writer; +import javax.xml.transform.Source; +import javax.xml.transform.Result; + +import java.sql.SQLException; + +/* ... for SQLXMLImpl */ + +import java.io.ByteArrayInputStream; +import java.io.FilterInputStream; +import java.io.IOException; + +import java.lang.invoke.MethodHandles.Lookup; +import static java.lang.invoke.MethodHandles.lookup; +import java.lang.invoke.VarHandle; + +import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.US_ASCII; + +import org.postgresql.pljava.internal.Backend; +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.MarkableSequenceInputStream; + +import java.sql.SQLNonTransientException; + +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLStreamReader; + +import static javax.xml.stream.XMLStreamConstants.CDATA; +import static javax.xml.stream.XMLStreamConstants.CHARACTERS; +import static javax.xml.stream.XMLStreamConstants.COMMENT; +import static javax.xml.stream.XMLStreamConstants.DTD; +import static javax.xml.stream.XMLStreamConstants.PROCESSING_INSTRUCTION; +import static javax.xml.stream.XMLStreamConstants.SPACE; +import static javax.xml.stream.XMLStreamConstants.START_DOCUMENT; +import static javax.xml.stream.XMLStreamConstants.START_ELEMENT; + +import javax.xml.stream.XMLStreamException; + +/* ... for SQLXMLImpl.Readable */ + +import java.io.InputStreamReader; +import java.nio.CharBuffer; + +import javax.xml.transform.stream.StreamSource; +import javax.xml.transform.sax.SAXSource; +import javax.xml.transform.stax.StAXSource; +import javax.xml.transform.dom.DOMSource; + +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.SAXParserFactory; + +import org.w3c.dom.Document; +import org.w3c.dom.DocumentFragment; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.Text; + +import static org.postgresql.pljava.internal.Session.implServerCharset; +import org.postgresql.pljava.internal.VarlenaWrapper; + +import java.sql.SQLFeatureNotSupportedException; + +/* ... for SQLXMLImpl.WhitespaceAccumulator */ + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/* ... for SQLXMLImpl.DeclProbe */ + +import java.io.BufferedInputStream; +import java.io.ByteArrayOutputStream; + +import java.util.Arrays; + +import java.sql.SQLDataException; + +/* ... for SQLXMLImpl.Writable */ + +import java.io.FilterOutputStream; +import java.io.OutputStreamWriter; + +import static javax.xml.transform.OutputKeys.ENCODING; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerFactory; + +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.sax.SAXResult; +import javax.xml.transform.stax.StAXResult; +import javax.xml.transform.dom.DOMResult; + +import javax.xml.transform.sax.SAXTransformerFactory; +import javax.xml.transform.sax.TransformerHandler; + +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamWriter; + +/* ... for SQLXMLImpl.SAXResultAdapter and .SAXUnwrapFilter */ + +import javax.xml.transform.Transformer; +import org.xml.sax.Attributes; +import org.xml.sax.SAXException; +import org.xml.sax.ext.DefaultHandler2; +import org.xml.sax.helpers.XMLFilterImpl; + +import org.postgresql.pljava.internal.SyntheticXMLReader.SAX2PROPERTY; + +/* ... for SQLXMLImpl.StAXResultAdapter and .StAXUnwrapFilter */ + +import java.util.NoSuchElementException; + +import javax.xml.namespace.NamespaceContext; +import javax.xml.namespace.QName; +import javax.xml.stream.XMLEventReader; +import javax.xml.stream.events.XMLEvent; +import javax.xml.stream.util.StreamReaderDelegate; + +/* ... for static adopt() method, doing low-level copies from foreign objects */ + +import java.io.BufferedReader; +import java.io.CharArrayReader; +import java.io.FilterReader; + +import javax.xml.stream.XMLEventWriter; +import javax.xml.stream.util.XMLEventConsumer; + +import org.postgresql.pljava.internal.MarkableSequenceReader; + +import org.xml.sax.ContentHandler; +import org.xml.sax.DTDHandler; +import org.xml.sax.ext.LexicalHandler; + +/* ... for Adjusting API for Source / Result */ + +import java.io.StringReader; +import java.util.List; +import static javax.xml.XMLConstants.ACCESS_EXTERNAL_DTD; +import static javax.xml.XMLConstants.ACCESS_EXTERNAL_SCHEMA; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.validation.Schema; +import org.postgresql.pljava.Adjusting; +import static org.postgresql.pljava.Adjusting.XML.setFirstSupported; +import org.xml.sax.EntityResolver; +import org.xml.sax.SAXNotRecognizedException; +import org.xml.sax.SAXNotSupportedException; + +/* ... for error handling */ + +import static java.util.logging.Level.WARNING; +import java.util.logging.Logger; +import org.xml.sax.ErrorHandler; +import org.xml.sax.SAXParseException; + +/* ... for SQLXMLImpl.Readable.Synthetic */ + +import org.postgresql.pljava.internal.VarlenaXMLRenderer; +import static org.postgresql.pljava.jdbc.TypeOid.PG_NODE_TREEOID; + +/** + * Implementation of {@link SQLXML} for the SPI connection. + */ +public abstract class SQLXMLImpl implements SQLXML +{ + private static final VarHandle s_backingVH; + protected volatile V m_backing; + + static + { + try + { + s_backingVH = lookup().findVarHandle( + SQLXMLImpl.class, "m_backing", VarlenaWrapper.class); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + protected SQLXMLImpl(V backing) + { + s_backingVH.set(this, backing); + } + + @Override + public void free() throws SQLException + { + @SuppressWarnings("unchecked") // javac 24 first to warn here + V backing = (V)s_backingVH.getAndSet(this, null); + if ( null == backing ) + return; + try + { + backing.close(); + } + catch ( IOException e ) + { + throw normalizedException(e); + } + } + + @Override + public InputStream getBinaryStream() throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of getBinaryStream on an unreadable SQLXML object", + "55000"); + } + + @Override + public OutputStream setBinaryStream() throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of setBinaryStream on an unwritable SQLXML object", + "55000"); + } + + @Override + public Reader getCharacterStream() throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of getCharacterStream on an unreadable " + + "SQLXML object", "55000"); + } + + @Override + public Writer setCharacterStream() throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of setCharacterStream on an unwritable " + + "SQLXML object", "55000"); + } + + @Override + public String getString() throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of getString on an unreadable SQLXML object", + "55000"); + } + + @Override + public void setString(String value) throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of setString on an unwritable SQLXML object", + "55000"); + } + + @Override + public T getSource(Class sourceClass) + throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of getSource on an unreadable SQLXML object", + "55000"); + } + + @Override + public T setResult(Class resultClass) + throws SQLException + { + throw new SQLNonTransientException( + "Attempted use of setResult on an unwritable SQLXML object", + "55000"); + } + + protected V backingIfNotFreed() throws SQLException + { + @SuppressWarnings("unchecked") // javac 24 first to warn here + V backing = (V)s_backingVH.getAcquire(this); + if ( null == backing ) + throw new SQLNonTransientException( + "Attempted use of already-freed SQLXML object", "55000"); + return backing; + } + + /** + * Wrap other checked exceptions in SQLException for methods specified to + * throw only that. + */ + static SQLException normalizedException(Exception e) + { + if ( e instanceof SQLException ) + return (SQLException) e; + if ( e instanceof RuntimeException ) + throw (RuntimeException) e; + + if ( e instanceof IOException ) + { + Throwable cause = e.getCause(); + if ( cause instanceof SQLException ) + return (SQLException)cause; + } + + return new SQLException( + "Exception in XML processing, not otherwise provided for: " + + e.getMessage(), "XX000", e); + } + + /** + * Create a new, initially empty and writable, SQLXML instance, whose + * backing memory will in a transaction-scoped PostgreSQL memory context. + */ + static SQLXML newWritable() + { + return doInPG(() -> _newWritable()); + } + + /** + * Native code calls this static method to take over an SQLXML object + * with its content. + *

      + * This is a static method because an {@code SQLXML} object presented to + * PostgreSQL need not necessarily be this implementation. If it is, then + * the real {@code adopt} method will be called directly; otherwise, a + * native {@code SQLXML} object has to be created, and the content copied + * to it. + * @param sx The SQLXML object to be adopted. + * @param oid The PostgreSQL type ID the native code is expecting; + * see Readable.adopt for why that can matter. + * @return The underlying {@code VarlenaWrapper} (which has its own + * {@code adopt} method the native code will call next. + * @throws SQLException if this {@code SQLXML} instance is not in the + * proper state to be adoptable. + */ + private static VarlenaWrapper adopt(SQLXML sx, int oid) throws SQLException + { + if ( sx instanceof Readable.PgXML || sx instanceof Writable ) + return ((SQLXMLImpl)sx).adopt(oid); + + Source src = sx.getSource(null); + SQLXML rx = + newWritable().setResult(Adjusting.XML.SourceResult.class) + .set(src).getSQLXML(); + + sx.free(); + return ((SQLXMLImpl)rx).adopt(oid); + } + + /** + * Allow native code to claim complete control over the + * underlying {@code VarlenaWrapper} and dissociate it from Java. + * @param oid The PostgreSQL type ID the native code is expecting; + * see Readable.adopt for why that can matter. + * @return The underlying {@code VarlenaWrapper} (which has its own + * {@code adopt} method the native code will call next. + * @throws SQLException if this {@code SQLXML} instance is not in the + * proper state to be adoptable. + */ + protected abstract VarlenaWrapper adopt(int oid) throws SQLException; + + /** + * Return a description of this object useful for debugging (not the raw + * XML content). + */ + @Override + public String toString() + { + return toString(this); + } + + /** + * Return information about this object useful for debugging, prefixed with + * a possibly shortened form of the class name of the passed object + * {@code o}; the normal Java {@code toString()} will pass {@code this}. + *

      + * Subclasses are encouraged to override, call the super method and append + * subclass-specific detail. + * @param o Object whose class name should be used to prefix the returned + * string. Passing {@code null} is the same as passing {@code this}. + * @return Description of this object for debugging convenience. + */ + protected String toString(Object o) + { + if ( null == o ) + o = this; + @SuppressWarnings("unchecked") // javac 24 first to warn here + V backing = (V)s_backingVH.getAcquire(this); + if ( null != backing ) + return backing.toString(o); + Class c = o.getClass(); + String cn = c.getCanonicalName(); + int pnl = c.getPackageName().length(); + return cn.substring(1 + pnl) + " defunct"; + } + + private static native SQLXML _newWritable(); + + /** + * Return an InputStream presenting the contents of the underlying + * varlena, but with the leading declaration corrected if need be. + *

      + * The current stored form in PG for the XML type is a character string + * in server encoding, which may or may not still include a declaration + * left over from an input or cast operation, which declaration may or + * may not be correct (about the encoding, anyway). Nothing is stored + * to distinguish whether the value is of the {@code DOCUMENT} or + * {@code CONTENT} form, to determine which requires a full reparse in + * the general case. + *

      + * This method only peeks at early parse events in the stream, to see + * if a {@code DOCTYPE} is present (must be {@code DOCUMENT}, or there + * is any other content before the first element (cannot be + * {@code DOCUMENT}). The input will not have a synthetic root element + * wrapped around it if a {@code DOCTYPE} is present, as that would + * break validation; otherwise (whether the check concluded it can't be + * {@code DOCUMENT}, or was simply inconclusive}, a synthetic wrapper + * will be added, as it will not break anything. + *

      + * As a side effect, this method sets {@code m_wrapped} to {@code true} + * if it applies a wrapper element. When returning a type of + * {@code Source} that presents parsed results, it will be configured + * to present them with the wrapper element filtered out. + *

      + * However, when using the API that exposes the serialized form + * directly ({@code getBinaryStream}, {@code getCharacterStream}, + * {@code getString}), this method is passed {@code true} for + * {@code neverWrap}, and no wrapping is done. The application code must + * then handle the possibility that the stream may fail to parse as a + * {@code DOCUMENT}. (The JDBC spec gives no guidance in this area.) + * @param is The InputStream to be corrected. + * @param neverWrap When {@code true}, suppresses the wrapping described + * above. + * @param wrapping An array of one boolean, which will be set true if + * the returned stream has had a wrapping document element applied that + * will have to be filtered away after parsing. + * @return An InputStream with its original decl, if any, replaced with + * a new one known to be correct, or none if the defaults are correct, + * and with the remaining content wrapped in a synthetic root element, + * unless the input is known early (by having a {@code DOCTYPE}) not to + * need one. + */ + static InputStream correctedDeclStream( + InputStream is, boolean neverWrap, Charset serverCS, boolean[] wrapping) + throws IOException, SQLException + { + assert null != wrapping && 1 == wrapping.length; + + byte[] buf = new byte[40]; + int got; + boolean needMore = false; + DeclProbe probe = new DeclProbe(); + + while ( -1 != ( got = is.read(buf) ) ) + { + for ( int i = 0 ; i < got ; ++ i ) + needMore = probe.take(buf[i]); + if ( ! needMore ) + break; + } + probe.finish(); + + return correctedDeclStream(is, probe, neverWrap, serverCS, wrapping); + } + + /** + * Version of {@code correctedDeclStream} for use when a {@code DeclProbe} + * has already been constructed, and early bytes of the stream fed to it. + */ + static InputStream correctedDeclStream( + InputStream is, DeclProbe probe, boolean neverWrap, Charset serverCS, + boolean[] wrapping) + throws IOException + { + /* + * At this point, for better or worse, the loop is done. There may + * or may not be more of m_backing left to read; the probe may or may + * not have found a decl. If it didn't, prefix() will treat whatever + * had been read as readahead and hand it all back, so it suffices + * here to create a SequenceInputStream of the prefix and whatever + * is or isn't left of m_backing. + * A bonus is that the SequenceInputStream closes each underlying + * stream as it reaches EOF. After the last stream is used up, the + * SequenceInputStream remains open-at-EOF until explicitly closed, + * providing the expected input-stream behavior, but the underlying + * resources don't have to stick around for that. + */ + byte[] pfx = probe.prefix(serverCS); + int raLen = probe.readaheadLength(); + int raOff = pfx.length - raLen; + InputStream pfis = new ByteArrayInputStream(pfx, 0, raOff); + InputStream rais = new ByteArrayInputStream(pfx, raOff, raLen); + + if ( neverWrap ) + return new MarkableSequenceInputStream(pfis, rais, is); + + int markLimit = 1048576; // don't assume a markable stream's economical + if ( ! is.markSupported() ) + is = new BufferedInputStream(is); + else if ( is instanceof VarlenaWrapper ) // a VarlenaWrapper is, though + markLimit = Integer.MAX_VALUE; + + InputStream msis = new MarkableSequenceInputStream(pfis, rais, is); + if ( ! useWrappingElement(msis, markLimit) ) + return msis; + + wrapping[0] = true; + InputStream elemStart = new ByteArrayInputStream( + "".getBytes(serverCS)); + InputStream elemEnd = new ByteArrayInputStream( + "".getBytes(serverCS)); + msis = new MarkableSequenceInputStream( + pfis, elemStart, rais, is, elemEnd); + return msis; + } + + static Reader correctedDeclReader( + Reader r, DeclProbe probe, Charset impliedCS, boolean[] wrapping) + throws IOException + { + char[] pfx = probe.charPrefix(impliedCS); + int raLen = probe.readaheadLength(); + int raOff = pfx.length - raLen; + Reader pfr = new CharArrayReader(pfx, 0, raOff); + Reader rar = new CharArrayReader(pfx, raOff, raLen); + + if ( ! r.markSupported() ) + r = new BufferedReader(r); + + Reader msr = new MarkableSequenceReader(pfr, rar, r); + if ( ! useWrappingElement(msr) ) + return msr; + + wrapping[0] = true; + Reader elemStart = new StringReader(""); + Reader elemEnd = new StringReader(""); + msr = new MarkableSequenceReader( + pfr, elemStart, rar, r, elemEnd); + return msr; + } + + /** + * Check (incompletely!) whether an {@code InputStream} is in XML + * {@code DOCUMENT} form (which Java XML parsers will accept) or + * {@code CONTENT} form, (which they won't, unless enclosed in a + * wrapping element). + *

      + * Proceed by requiring the input stream to support {@code mark} and + * {@code reset}, marking it, creating a StAX parser, and pulling some + * initial parse events. + *

      + * A possible {@code START_DOCUMENT} along with possible {@code SPACE}, + * {@code COMMENT}, and {@code PROCESSING_INSTRUCTION} events could + * allowably begin either the {@code DOCUMENT} or the {@code CONTENT} + * form. + *

      + * If a {@code DTD} is seen, the input must be in {@code DOCUMENT} form, + * and must not have a wrapper element added. + *

      + * If anything else is seen before the first {@code START_ELEMENT}, the + * input must be in {@code CONTENT} form, and must have + * a wrapper element added. + *

      + * If a {@code START_ELEMENT} is seen before either of those conclusions + * can be reached, this check is inconclusive. The conclusive check + * would be to finish parsing that element to see what, if anything, + * follows it. But that would often amount to parsing the whole stream + * just to determine how to parse it. Instead, just return @code true} + * anyway, as without a DTD, the wrapping trick is usable and won't + * break anything, even if it may not be necessary. + * @param is An {@code InputStream} that must be markable, will be + * marked on entry, and reset upon return. + * @return {@code true} if a wrapping element should be used. + */ + static boolean useWrappingElement(InputStream is, int markLimit) + throws IOException + { + is.mark(markLimit); + + /* + * The XMLStreamReader may actually close the input stream if it + * reaches the end skipping only whitespace. That is probably a bug; + * in any case, protect the original input stream from being closed. + */ + InputStream tmpis = new FilterInputStream(is) + { + @Override + public void close() throws IOException { } + }; + + boolean rslt = useWrappingElement(tmpis, null); + + is.reset(); + is.mark(0); // relax any reset-buffer requirement + + return rslt; + } + + static boolean useWrappingElement(Reader r) + throws IOException + { + r.mark(524288); // don't trust mark-supporting Reader to be economical + + /* + * The XMLStreamReader may actually close the input stream if it + * reaches the end skipping only whitespace. That is probably a bug; + * in any case, protect the original input stream from being closed. + */ + Reader tmpr = new FilterReader(r) + { + @Override + public void close() throws IOException { } + }; + + boolean rslt = useWrappingElement(null, tmpr); + + r.reset(); + r.mark(0); // relax any reset-buffer requirement + + return rslt; + } + + private static boolean useWrappingElement(InputStream is, Reader r) + throws IOException + { + boolean mustBeDocument = false; + boolean cantBeDocument = false; + + XMLInputFactory xif = XMLInputFactory.newDefaultFactory(); + xif.setProperty(xif.IS_NAMESPACE_AWARE, true); + xif.setProperty(xif.SUPPORT_DTD, false);// will still report one it sees + xif.setProperty(xif.IS_REPLACING_ENTITY_REFERENCES, false); + + XMLStreamReader xsr = null; + try + { + if ( null != is ) + xsr = xif.createXMLStreamReader(is); + else + xsr = xif.createXMLStreamReader(r); + while ( xsr.hasNext() ) + { + int evt = xsr.next(); + + if ( COMMENT == evt || PROCESSING_INSTRUCTION == evt + || START_DOCUMENT == evt ) + continue; + + if ( DTD == evt ) + { + mustBeDocument = true; + break; + } + + if ( START_ELEMENT == evt ) // could be DOCUMENT or CONTENT + break; + + cantBeDocument = true; + break; + } + } + catch ( XMLStreamException e ) + { + cantBeDocument = true; + } + + if ( null != xsr ) + { + try + { + xsr.close(); + } + catch ( XMLStreamException e ) + { + } + } + + return ! mustBeDocument; + } + + + + static abstract class Readable + extends SQLXMLImpl + { + private static final VarHandle s_readableVH; + protected volatile boolean m_readable = true; + protected final int m_pgTypeID; + protected Charset m_serverCS = implServerCharset(); + protected boolean m_wrapped = false; + + static + { + try + { + s_readableVH = lookup().findVarHandle( + Readable.class, "m_readable", boolean.class); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + /** + * Create a readable instance, when called by native code (the + * constructor is otherwise private, after all), passing an initialized + * {@code VarlenaWrapper} and the PostgreSQL type ID from which it has + * been created. + * @param vwi The already-created wrapper for reading the varlena from + * native memory. + * @param oid The PostgreSQL type ID from which this instance is being + * created (for why it matters, see {@code adopt}). + */ + private Readable(V vwi, int oid) throws SQLException + { + super(vwi); + m_pgTypeID = oid; + if ( null == m_serverCS ) + { + free(); + throw new SQLFeatureNotSupportedException("SQLXML: no Java " + + "Charset found to match server encoding; perhaps set " + + "org.postgresql.server.encoding system property to a " + + "valid Java charset name for the same encoding?", "0A000"); + } + } + + private V backingAndClearReadable() throws SQLException + { + V backing = backingIfNotFreed(); + return (boolean)s_readableVH.getAndSet(this, false) + ? backing : null; + } + + protected abstract InputStream toBinaryStream( + V backing, boolean neverWrap) + throws SQLException, IOException; + + @Override + public InputStream getBinaryStream() throws SQLException + { + V backing = backingAndClearReadable(); + if ( null == backing ) + return super.getBinaryStream(); + try + { + return toBinaryStream(backing, true); + } + catch ( IOException e ) + { + throw normalizedException(e); + } + } + + protected abstract Reader toCharacterStream( + V backing, boolean neverWrap) + throws SQLException, IOException; + + @Override + public Reader getCharacterStream() throws SQLException + { + V backing = backingAndClearReadable(); + if ( null == backing ) + return super.getCharacterStream(); + try + { + return toCharacterStream(backing, true); + } + catch ( IOException e ) + { + throw normalizedException(e); + } + } + + @Override + public String getString() throws SQLException + { + V backing = backingAndClearReadable(); + if ( null == backing ) + return super.getString(); + + CharBuffer cb = CharBuffer.allocate(32768); + StringBuilder sb = new StringBuilder(); + try + { + Reader r = toCharacterStream(backing, true); + while ( -1 != r.read(cb) ) + { + sb.append((CharBuffer)cb.flip()); + cb.clear(); + } + r.close(); + return sb.toString(); + } + catch ( Exception e ) + { + throw normalizedException(e); + } + } + + /** + * Return a {@code Class} object representing the most + * natural or preferred presentation if the caller has left it + * unspecified. + *

      + * Override if the preferred flavor is not {@code SAXSource.class}, + * which this implementation returns. + * @param sourceClass Either null, Source, or Adjusting.XML.Source. + * @return A preferred flavor of Adjusting.XML.Source, if sourceClass is + * Adjusting.XML.Source, otherwise the corresponding flavor of ordinary + * Source. + */ + @SuppressWarnings("unchecked") + protected Class preferredSourceClass( + Class sourceClass) + { + return Adjusting.XML.Source.class == sourceClass + ? (Class)Adjusting.XML.SAXSource.class + : (Class)SAXSource.class; + } + + /** + * Return a {@code StreamSource} presenting backing as a binary + * or character stream, whichever is most natural. + *

      + * This implementation returns the binary stream obtained with + * {@code toBinaryStream(backing, true)}. + */ + protected StreamSource toStreamSource(V backing) + throws SQLException, IOException + { + return new StreamSource(toBinaryStream(backing, true)); + } + + protected abstract Adjusting.XML.SAXSource toSAXSource(V backing) + throws SQLException, SAXException, IOException; + + protected abstract Adjusting.XML.StAXSource toStAXSource(V backing) + throws SQLException, XMLStreamException, IOException; + + protected abstract Adjusting.XML.DOMSource toDOMSource(V backing) + throws + SQLException, SAXException, IOException, + ParserConfigurationException; + + @Override + public T getSource(Class sourceClass) + throws SQLException + { + V backing = backingAndClearReadable(); + if ( null == backing ) + return super.getSource(sourceClass); + + Class sc = sourceClass; + + if ( null == sc + || Source.class == sc + || Adjusting.XML.Source.class.equals(sc) ) + sc = preferredSourceClass(sc); + + try + { + if ( sc.isAssignableFrom(StreamSource.class) ) + return sc.cast(toStreamSource(backing)); + + if ( sc.isAssignableFrom(SAXSource.class) + || sc.isAssignableFrom(AdjustingSAXSource.class) ) + { + Adjusting.XML.SAXSource ss = toSAXSource(backing); + /* + * Caution: while StAXSource and DOMSource have defaults() + * called right here, SAXSource does not, because there is + * an irksome ordering constraint such that schema() can't + * work if any XMLReader adjustments have been made first. + * Instead, SAXSource (and only SAXSource, so much for + * consistency) must do its own tracking of whether + * defaults() has been called, and do so if it hasn't been, + * either before the first explicit adjustment, or at get() + * time if none. + */ + // ss.defaults(); + if ( Adjusting.XML.Source.class + .isAssignableFrom(sc) ) + return sc.cast(ss); + return sc.cast(ss.get()); + } + + if ( sc.isAssignableFrom(StAXSource.class) + || sc.isAssignableFrom(AdjustingStAXSource.class) ) + { + Adjusting.XML.StAXSource ss = toStAXSource(backing); + ss.defaults(); + if ( Adjusting.XML.Source.class + .isAssignableFrom(sc) ) + return sc.cast(ss); + return sc.cast(ss.get()); + } + + if ( sc.isAssignableFrom(DOMSource.class) + || sc.isAssignableFrom(AdjustingDOMSource.class) ) + { + Adjusting.XML.DOMSource ds = toDOMSource(backing); + ds.defaults(); + if ( Adjusting.XML.Source.class + .isAssignableFrom(sc) ) + return sc.cast(ds); + return sc.cast(ds.get()); + } + } + catch ( Exception e ) + { + throw normalizedException(e); + } + + throw new SQLFeatureNotSupportedException( + "No support for SQLXML.getSource(" + + sc.getName() + ".class)", "0A000"); + } + + @Override + protected String toString(Object o) + { + return String.format("%s %sreadable %swrapped", + super.toString(o), (boolean)s_readableVH.getAcquire() + ? "" : "not ", m_wrapped ? "" : "not "); + } + + static class PgXML + extends Readable + { + private PgXML(VarlenaWrapper.Input vwi, int oid) + throws SQLException + { + super(vwi.new Stream(), oid); + } + + /** + * {@inheritDoc} + *

      + * This is the readable subclass, most typically used for + * data coming from PostgreSQL to Java. The only circumstance in + * which it can be {@code adopt}ed is if the Java code has left it + * untouched, and simply returned it from a function, or used it + * directly as a query parameter. + *

      + * That is a very efficient handoff with no superfluous copying of + * data. However, the backend is able to associate {@code SQLXML} + * instances with more than one PostgreSQL data type (as of this + * writing, it will allow XML or text, so that this API is usable in + * Java even if the PostgreSQL instance was not built with the XML + * type, or if, for some other reason, it is useful to apply Java + * XML processing to values in the database as text, without the + * overhead of a PG cast). + *

      + * It would break type safety to allow a {@code SQLXML} instance + * created from text (on which PostgreSQL does not impose any + * particular syntax) to be directly assigned to a PostgreSQL XML + * type without verifying that it is XML. For generality, the + * verification will be done here whenever the PostgreSQL oid at + * {@code adopt} time differs from the one saved at creation. Doing + * the verification is noticeably slower than not doing it, but that + * fast case has to be reserved for when there is no funny business + * with the PostgreSQL types. + */ + @Override + protected VarlenaWrapper adopt(int oid) throws SQLException + { + VarlenaWrapper.Input.Stream vw = (VarlenaWrapper.Input.Stream) + s_backingVH.getAndSet(this, null); + if ( ! (boolean)s_readableVH.getAcquire(this) ) + throw new SQLNonTransientException( + "SQLXML object has already been read from", "55000"); + if ( null == vw ) + backingIfNotFreed(); /* shorthand to throw the exception */ + if ( m_pgTypeID != oid ) + vw.verify(new Verifier()); + return vw; + } + + /* + * This implementation of toBinaryStream has the side effect of + * setting m_wrapped to indicate whether a wrapping element has been + * added around the stream contents. + */ + @Override + protected InputStream toBinaryStream( + VarlenaWrapper.Input.Stream backing, boolean neverWrap) + throws SQLException, IOException + { + boolean[] wrapped = { false }; + InputStream rslt = correctedDeclStream( + backing, neverWrap, m_serverCS, wrapped); + m_wrapped = wrapped[0]; + return rslt; + } + + @Override + protected Reader toCharacterStream( + VarlenaWrapper.Input.Stream backing, boolean neverWrap) + throws SQLException, IOException + { + InputStream is = toBinaryStream(backing, neverWrap); + return new InputStreamReader(is, m_serverCS.newDecoder()); + } + + @Override + protected Adjusting.XML.SAXSource toSAXSource( + VarlenaWrapper.Input.Stream backing) + throws SQLException, SAXException, IOException + { + InputStream is = toBinaryStream(backing, false); + return new AdjustingSAXSource(new InputSource(is), m_wrapped); + } + + @Override + protected Adjusting.XML.StAXSource toStAXSource( + VarlenaWrapper.Input.Stream backing) + throws SQLException, XMLStreamException, IOException + { + InputStream is = toBinaryStream(backing, false); + return new AdjustingStAXSource(is, m_serverCS, m_wrapped); + } + + @Override + protected Adjusting.XML.DOMSource toDOMSource( + VarlenaWrapper.Input.Stream backing) + throws + SQLException, SAXException, IOException, + ParserConfigurationException + { + InputStream is = toBinaryStream(backing, false); + return new AdjustingDOMSource(is, m_wrapped); + } + } + + static class Synthetic extends Readable + { + private Synthetic(VarlenaWrapper.Input vwi, int oid) + throws SQLException + { + super(xmlRenderer(oid, vwi), oid); + } + + private static VarlenaXMLRenderer xmlRenderer( + int oid, VarlenaWrapper.Input vwi) + throws SQLException + { + switch ( oid ) + { + case PG_NODE_TREEOID: return new PgNodeTreeAsXML(vwi); + default: + throw new SQLNonTransientException( + "no synthetic SQLXML support for Oid " + oid, "0A000"); + } + } + + @Override + protected VarlenaWrapper adopt(int oid) throws SQLException + { + throw new SQLFeatureNotSupportedException( + "adopt() on a synthetic SQLXML not yet supported", "0A000"); + } + + @Override + protected InputStream toBinaryStream( + VarlenaXMLRenderer backing, boolean neverWrap) + throws SQLException, IOException + { + throw new SQLFeatureNotSupportedException( + "synthetic SQLXML as binary stream not yet supported", + "0A000"); + } + + @Override + protected Reader toCharacterStream( + VarlenaXMLRenderer backing, boolean neverWrap) + throws SQLException, IOException + { + throw new SQLFeatureNotSupportedException( + "synthetic SQLXML as character stream not yet supported", + "0A000"); + } + + @Override + protected Adjusting.XML.SAXSource toSAXSource( + VarlenaXMLRenderer backing) + throws SQLException, SAXException, IOException + { + return new AdjustingSAXSource(backing, new InputSource()); + } + + protected Adjusting.XML.StAXSource toStAXSource( + VarlenaXMLRenderer backing) + throws SQLException, XMLStreamException, IOException + { + throw new SQLFeatureNotSupportedException( + "synthetic SQLXML as StAXSource not yet supported", + "0A000"); + } + + protected Adjusting.XML.DOMSource toDOMSource( + VarlenaXMLRenderer backing) + throws + SQLException, SAXException, IOException, + ParserConfigurationException + { + throw new SQLFeatureNotSupportedException( + "synthetic SQLXML as DOMSource not yet supported", + "0A000"); + } + } + } + + static final Pattern s_entirelyWS = Pattern.compile("\\A[ \\t\\n\\r]*+\\z"); + + /** + * Unwrap a DOM tree parsed from input that was wrapped in a synthetic + * root element in case it had the form of {@code XML(CONTENT)}. + *

      + * Because the wrapping is applied pessimistically (it is done whenever + * a quick preparse did not conclusively prove the input was + * {@code DOCUMENT}), repeat the check here, where it requires only + * traversing one list of immediate DOM node children. Produce a + * {@code Document} node if possible, a {@code DocumentFragment} only if + * the tree really does not have {@code DOCUMENT} form. + * @param ds A {@code DOMSource} produced by parsing wrapped input. + * The parse result will be retrieved using {@code getNode()}, then + * replaced using {@code setNode()} with the unwrapped result, either a + * {@code Document} or a {@code DocumentFragment} node. + */ + static void domUnwrap(DOMSource ds) + { + Document d = (Document)ds.getNode(); + Element wrapper = d.getDocumentElement(); + /* + * Wrapping isn't done if the input has a DTD, so if we are here, + * the input does not have a DTD, and the null, null, null parameter + * list for createDocument is appropriate. + */ + Document newDoc = + d.getImplementation().createDocument(null, null, null); + DocumentFragment docFrag = newDoc.createDocumentFragment(); + + Matcher entirelyWhitespace = s_entirelyWS.matcher(""); + + boolean isDocument = true; + boolean seenElement = false; + boolean seenText = false; + for ( Node n = wrapper.getFirstChild(), next = null; + null != n; n = next ) + { + /* + * Grab the next sibling early, before the adoptNode() below, + * because that will unlink this node from its source Document, + * clearing its nextSibling link. + */ + next = n.getNextSibling(); + + switch ( n.getNodeType() ) + { + case Node.ELEMENT_NODE: + if ( seenElement ) + isDocument = false; + seenElement = true; + break; + case Node.COMMENT_NODE: + case Node.PROCESSING_INSTRUCTION_NODE: + break; + case Node.TEXT_NODE: + if ( isDocument ) + { + seenText = true; + entirelyWhitespace.reset(n.getNodeValue()); + if ( ! entirelyWhitespace.matches() ) + isDocument = false; + } + break; + default: + isDocument = false; + } + + docFrag.appendChild(newDoc.adoptNode(n)); + } + + if ( ! seenElement ) + isDocument = false; + + if ( isDocument ) + { + if ( seenText ) + { + /* + * At least one text node was seen at top level, but none + * containing anything but whitespace (else isDocument would + * be false and we wouldn't be here). Such nodes have to go. + */ + for ( Node n = docFrag.getFirstChild(), next = null; + null != n; n = next ) + { + next = n.getNextSibling(); + if ( Node.TEXT_NODE == n.getNodeType() ) + docFrag.removeChild(n); + } + } + + newDoc.appendChild(docFrag); + ds.setNode(newDoc); + } + else + ds.setNode(docFrag); + } + + + + static class Writable extends SQLXMLImpl + { + private static final VarHandle s_writableVH; + private volatile boolean m_writable = true; + private Charset m_serverCS = implServerCharset(); + private DOMResult m_domResult; + + static + { + try + { + s_writableVH = lookup().findVarHandle( + Writable.class, "m_writable", boolean.class); + } + catch ( ReflectiveOperationException e ) + { + throw new ExceptionInInitializerError(e); + } + } + + private Writable(VarlenaWrapper.Output vwo) throws SQLException + { + super(vwo); + if ( null == m_serverCS ) + { + try + { + vwo.free(); + } + catch ( IOException ioe ) { } + throw new SQLFeatureNotSupportedException("SQLXML: no Java " + + "Charset found to match server encoding; perhaps set " + + "org.postgresql.server.encoding system property to a " + + "valid Java charset name for the same encoding?", "0A000"); + } + } + + private VarlenaWrapper.Output backingAndClearWritable() + throws SQLException + { + VarlenaWrapper.Output backing = backingIfNotFreed(); + return (boolean)s_writableVH.getAndSet(this, false)? backing : null; + } + + @Override + public void free() throws SQLException + { + VarlenaWrapper.Output vwo = + (VarlenaWrapper.Output)s_backingVH.getAndSet(this, null); + if ( null == vwo ) + return; + try + { + vwo.free(); + } + catch ( Exception e ) + { + throw normalizedException(e); + } + } + + @Override + public OutputStream setBinaryStream() throws SQLException + { + VarlenaWrapper.Output vwo = backingAndClearWritable(); + if ( null == vwo ) + return super.setBinaryStream(); + return new AdjustingStreamResult(vwo, m_serverCS) + .defaults().preferBinaryStream().get().getOutputStream(); + } + + @Override + public Writer setCharacterStream() throws SQLException + { + VarlenaWrapper.Output vwo = backingAndClearWritable(); + if ( null == vwo ) + return super.setCharacterStream(); + return new AdjustingStreamResult(vwo, m_serverCS) + .defaults().preferCharacterStream().get().getWriter(); + } + + @Override + public void setString(String value) throws SQLException + { + VarlenaWrapper.Output vwo = backingAndClearWritable(); + if ( null == vwo ) + super.setString(value); + try + { + Writer w = new AdjustingStreamResult(vwo, m_serverCS) + .defaults().preferCharacterStream().get().getWriter(); + w.write(value); + w.close(); + } + catch ( Exception e ) + { + throw normalizedException(e); + } + } + + @Override + public T setResult(Class resultClass) + throws SQLException + { + VarlenaWrapper.Output vwo = backingAndClearWritable(); + if ( null == vwo ) + return super.setResult(resultClass); + return setResult(vwo, resultClass); + } + + /** + * Return a {@code Class} object representing the most + * natural or preferred presentation if the caller has left it + * unspecified. + *

      + * Override if the preferred flavor is not {@code SAXResult.class}, + * which this implementation returns. + * @param resultClass Either null, Result, or Adjusting.XML.Result. + * @return A preferred flavor of Adjusting.XML.Result, if resultClass is + * Adjusting.XML.Result, otherwise the corresponding flavor of ordinary + * Result. + */ + @SuppressWarnings("unchecked") + protected Class preferredResultClass( + Class resultClass) + { + return Adjusting.XML.Result.class == resultClass + ? (Class)AdjustingSAXResult.class + : (Class)SAXResult.class; + } + + /* + * Internal version for use in the implementation of + * AdjustingSourceResult, when 'officially' the instance is no longer + * writable (because backingAndClearWritable was called in obtaining the + * AdjustingSourceResult itself). + */ + private T setResult( + VarlenaWrapper.Output vwo, Class resultClass) + throws SQLException + { + Class rc = resultClass; + + if ( null == rc + || Result.class == rc + || Adjusting.XML.Result.class == rc ) + rc = preferredResultClass(rc); + + try + { + if ( rc.isAssignableFrom(StreamResult.class) + || rc.isAssignableFrom(AdjustingStreamResult.class) + ) + { + /* + * As with AdjustingSAXSource, defaults() cannot be called + * here, but must be deferred in case schema() is called. + */ + AdjustingStreamResult sr = + new AdjustingStreamResult(vwo, m_serverCS); + if ( Adjusting.XML.Result.class + .isAssignableFrom(rc) ) + return rc.cast(sr); + return rc.cast(sr.get()); + } + + /* + * This special case must defer setting the verifier; a later + * call to this method with a different result class will be + * made, setting it then. + */ + if ( rc.isAssignableFrom(AdjustingSourceResult.class) ) + { + return rc.cast( + new AdjustingSourceResult(this, m_serverCS)); + } + + /* + * The remaining cases all can use the NoOp verifier. + */ + vwo.setVerifier(VarlenaWrapper.Verifier.NoOp.INSTANCE); + OutputStream os = vwo; + Writer w; + + if ( rc.isAssignableFrom(SAXResult.class) + || rc.isAssignableFrom(AdjustingSAXResult.class) ) + { + SAXTransformerFactory saxtf = (SAXTransformerFactory) + SAXTransformerFactory.newDefaultInstance(); + TransformerHandler th = saxtf.newTransformerHandler(); + th.getTransformer().setOutputProperty( + ENCODING, m_serverCS.name()); + os = new DeclCheckedOutputStream(os, m_serverCS); + w = new OutputStreamWriter(os, m_serverCS.newEncoder()); + th.setResult(new StreamResult(w)); + th = SAXResultAdapter.newInstance(th, w); + SAXResult sr = new SAXResult(th); + if ( Adjusting.XML.Result.class + .isAssignableFrom(rc) ) + return rc.cast(new AdjustingSAXResult(sr)); + return rc.cast(sr); + } + + if ( rc.isAssignableFrom(StAXResult.class) ) + { + XMLOutputFactory xof = XMLOutputFactory.newDefaultFactory(); + os = new DeclCheckedOutputStream(os, m_serverCS); + XMLStreamWriter xsw = xof.createXMLStreamWriter( + os, m_serverCS.name()); + xsw = new StAXResultAdapter(xsw, os); + return rc.cast(new StAXResult(xsw)); + } + + if ( rc.isAssignableFrom(DOMResult.class) ) + { + return rc.cast(m_domResult = new DOMResult()); + } + } + catch ( Exception e ) + { + throw normalizedException(e); + } + + throw new SQLFeatureNotSupportedException( + "No support for SQLXML.setResult(" + + rc.getName() + ".class)", "0A000"); + } + + /** + * Serialize a {@code DOMResult} to an {@code OutputStream} + * and close it. + */ + private void serializeDOM(DOMResult r, OutputStream os) + throws SQLException + { + DOMSource src = new DOMSource(r.getNode()); + try + { + TransformerFactory tf = TransformerFactory.newDefaultInstance(); + Transformer t = tf.newTransformer(); + t.setOutputProperty(ENCODING, m_serverCS.name()); + os = new DeclCheckedOutputStream(os, m_serverCS); + Writer w = new OutputStreamWriter(os, m_serverCS.newEncoder()); + StreamResult rlt = new StreamResult(w); + t.transform(src, rlt); + w.close(); + } + catch ( Exception e ) + { + throw normalizedException(e); + } + } + + @Override + protected VarlenaWrapper adopt(int oid) throws SQLException + { + VarlenaWrapper.Output vwo = + (VarlenaWrapper.Output)s_backingVH.getAndSet(this, null); + if ( (boolean)s_writableVH.getAcquire(this) ) + throw new SQLNonTransientException( + "Writable SQLXML object has not been written yet", "55000"); + if ( null == vwo ) + backingIfNotFreed(); /* shorthand way to throw the exception */ + if ( null != m_domResult ) + { + serializeDOM(m_domResult, vwo); + m_domResult = null; + } + return vwo; + } + + @Override + protected String toString(Object o) + { + return String.format("%s %swritable", super.toString(o), + (boolean)s_writableVH.getAcquire() ? "" : "not "); + } + } + + static class Verifier extends VarlenaWrapper.Verifier.Base + { + private XMLReader m_xr; + + /** + * Constructor called only from {@code adopt()} when an untouched + * {@code Readable} is being bounced back to PostgreSQL with a type Oid + * different from its original. + */ + Verifier() throws SQLException + { + try + { + /* + * Safe to pass false for wrapping; whether the input is wrapped + * or not, the verifying parser will have no need to unwrap. + */ + m_xr = new AdjustingSAXSource(null, false) + .defaults().get().getXMLReader(); + } + catch ( SAXException e ) + { + throw normalizedException(e); + } + } + + /** + * Constructor called with an already-constructed {@code XMLReader}. + *

      + * Adjustments may have been made to the {@code XMLReader}. + */ + Verifier(XMLReader xr) + { + m_xr = xr; + } + + @Override + protected void verify(InputStream is) throws Exception + { + boolean[] wrapped = { false }; + is = correctedDeclStream( + is, false, implServerCharset(), wrapped); + + /* + * The supplied XMLReader is never set up to do unwrapping, which is + * ok; it never needs to. But it will have had its error handler set + * on that assumption, which must be changed here if wrapping is in + * effect, just in case schema validation has been requested. + */ + if ( wrapped[0] ) + m_xr.setErrorHandler(SAXDOMErrorHandler.instance(true)); + + /* + * What does an XMLReader do if no handlers have been set for + * content events? Parses everything and discards the events. + * Just what you'd want for a verifier. + */ + m_xr.parse(new InputSource(is)); + } + } + + /** + * Filter an {@code OutputStream} by ensuring it doesn't begin with a + * declaration of a character encoding other than the server encoding, and + * passing any declaration along in an edited form more palatable to + * PostgreSQL. + */ + static class DeclCheckedOutputStream extends FilterOutputStream + { + private Charset m_serverCS; + private DeclProbe m_probe; + + private DeclCheckedOutputStream(OutputStream os, Charset cs) + throws IOException + { + super(os); + os.write(new byte[0]); // is the VarlenaWrapper.Output still alive? + m_serverCS = cs; + m_probe = new DeclProbe(); + } + + @Override + public void write(int b) throws IOException + { + synchronized ( out ) + { + if ( null == m_probe ) + { + out.write(b); + return; + } + try + { + if ( ! m_probe.take((byte)(b & 0xff)) ) + check(); + } + catch ( SQLException sqe ) + { + throw normalizedException(sqe); + } + } + } + + @Override + public void write(byte[] b, int off, int len) throws IOException + { + synchronized ( out ) + { + if ( null != m_probe ) + { + try + { + while ( 0 < len -- ) + { + if ( ! m_probe.take(b[off ++]) ) + { + check(); + break; + } + } + } + catch ( SQLException sqe ) + { + throw normalizedException(sqe); + } + } + out.write(b, off, len); + } + } + + @Override + public void flush() throws IOException + { + } + + @Override + public void close() throws IOException + { + synchronized ( out ) + { + try + { + check(); + } + catch ( SQLException sqe ) + { + throw normalizedException(sqe); + } + out.close(); + } + } + + /** + * Called after enough bytes have been passed to the {@code DeclProbe} + * for it to know whether a decl is present and correct, to throw an + * exception if an encoding is declared that is not the server encoding, + * and then pass the (possibly edited) decl and any readahead along + * to the output. + *

      + * It is assumed that the stream is being generated by code that does + * encoding declarations properly, so should have one if any code + * other than UTF-8 is in use. (For now, in a mood of leniency, + * {@code false} is passed to {@code checkEncoding}'s {@code strict} + * parameter, so an exception will be generated only if the stream + * explicitly declares an encoding that isn't the server encoding. This + * could one day be made configurable, perhaps as a {@code Connection} + * property. + *

      + * It's assumed that the destination of the stream is PostgreSQL's + * native XML datatype, where some of the native functions can fall over + * if an encoding declaration is present (even if it correctly matches + * the server encoding), so any decl generated into the output will be + * edited to remove any reference to encoding; this can fall short of + * strict conformance, but works better with the PG core implementation. + */ + private void check() throws IOException, SQLException + { + if ( null == m_probe ) + return; + m_probe.finish(); + m_probe.checkEncoding(m_serverCS, false); + byte[] prefix = m_probe.prefix(null /* not m_serverCS */); + m_probe = null; // Do not check more than once. + out.write(prefix); + } + + /** + * Wrap other checked exceptions in IOException for methods specified to + * throw only that. + */ + private IOException normalizedException(Exception e) + { + if ( e instanceof IOException ) + return (IOException)e; + if ( e instanceof RuntimeException ) + throw (RuntimeException)e; + return new IOException("Malformed XML: " + e.getMessage(), e); + } + } + + /** + * Class to wrap an {@code XMLReader} and pass all of the parse events + * except the outermost ("document root") element, in effect producing + * {@code XML(CONTENT)} when the underlying stream has had a synthetic + * root element wrapped around it to satisfy a JRE-bundled parser that + * only accepts {@code XML(DOCUMENT)}. + *

      + * The result may be surprising to code consuming the SAX stream, depending + * on what it expects, but testing has shown the JRE-bundled identity + * transformer, at least, to accept the input and faithfully reproduce the + * non-document content. + */ + static class SAXUnwrapFilter extends XMLFilterImpl implements LexicalHandler + { + private int m_nestLevel = 0; + private WhitespaceAccumulator m_wsAcc = new WhitespaceAccumulator(); + private boolean m_topElementSeen = false; + private boolean m_couldBeDocument = true; + + SAXUnwrapFilter(XMLReader parent) + { + super(parent); + } + + @Override + public void endDocument() throws SAXException + { + if ( m_couldBeDocument && ! m_topElementSeen ) + commitToContent(); + super.endDocument(); + } + + @Override + public void startElement( + String uri, String localName, String qName, Attributes atts) + throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + { + if ( m_topElementSeen ) // a second top-level element? + commitToContent(); // ==> has to be CONTENT. + else + m_wsAcc.discard(); + m_topElementSeen = true; + } + + if ( 0 < m_nestLevel++ ) + super.startElement(uri, localName, qName, atts); + } + + @Override + public void endElement(String uri, String localName, String qName) + throws SAXException + { + if ( 0 < --m_nestLevel ) + super.endElement(uri, localName, qName); + } + + @Override + public void characters(char[] ch, int start, int length) + throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + { + int mismatchIndex = m_wsAcc.accumulate(ch, start, length); + if ( -1 == mismatchIndex ) + return; + commitToContent(); // they weren't all whitespace ==> CONTENT. + start = mismatchIndex; + } + super.characters(ch, start, length); + } + + @Override + public void processingInstruction(String target, String data) + throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + m_wsAcc.discard(); + super.processingInstruction(target, data); + } + + @Override + public void skippedEntity(String name) throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + commitToContent(); // an entity at the top level? CONTENT. + super.skippedEntity(name); + } + + /** + * Called whenever, at "top level" (really nesting level 1, inside our + * wrapping element), a parse event that could not appear there in + * {@code XML(DOCUMENT)} form is encountered. + *

      + * Forces {@code m_couldBeDocument} false, and disburses any whitespace + * that may be held in the accumulator. + *

      + * The occurrence of a parse event that could occur in the + * {@code XML(DOCUMENT)} form should be handled not by calling this + * method, but by simply discarding any held whitespace instead. + */ + private void commitToContent() throws SAXException + { + char[] buf = new char [ WhitespaceAccumulator.MAX_RUN ]; + int length; + m_couldBeDocument = false; + while ( 0 < (length = m_wsAcc.disburse(buf)) ) + super.characters(buf, 0, length); + } + + /* + * Implementation of the LexicalHandler interface (and the property + * interception to set and retrieve the handler). No help from + * XMLFilterImpl there. + */ + + private static final LexicalHandler s_dummy = new DefaultHandler2(); + private LexicalHandler m_consumersLexHandler; + private LexicalHandler m_realLexHandler; + private boolean m_lexHandlerIsRegistered = false; + + @Override + public void setContentHandler(ContentHandler handler) + { + super.setContentHandler(handler); + if ( m_lexHandlerIsRegistered ) + return; + + /* + * The downstream consumer might never register a LexicalHandler of + * its own, but those events still matter here, so trigger the + * registration of 'this' if necessary. + */ + try + { + setProperty(SAX2PROPERTY.LEXICAL_HANDLER.propertyUri(), + m_consumersLexHandler); + } + catch ( SAXException e ) + { + } + } + + @Override + public Object getProperty(String name) + throws SAXNotRecognizedException, SAXNotSupportedException + { + if ( SAX2PROPERTY.LEXICAL_HANDLER.propertyUri().equals(name) ) + return m_consumersLexHandler; + return super.getProperty(name); + } + + @Override + public void setProperty(String name, Object value) + throws SAXNotRecognizedException, SAXNotSupportedException + { + if ( SAX2PROPERTY.LEXICAL_HANDLER.propertyUri().equals(name) ) + { + if ( ! SAX2PROPERTY.LEXICAL_HANDLER.valueOk(value) ) + throw new SAXNotSupportedException(name); + /* + * Make sure 'this' is registered as the upstream parser's + * lexical handler, done here to avoid publishing 'this' early + * from the constructor, and also to make sure the consumer gets + * an appropriate exception if it doesn't work for some reason. + */ + if ( ! m_lexHandlerIsRegistered ) + { + super.setProperty(name, this); + m_lexHandlerIsRegistered = true; + } + m_consumersLexHandler = (LexicalHandler)value; + m_realLexHandler = + null != value ? m_consumersLexHandler : s_dummy; + return; + } + super.setProperty(name, value); + } + + @Override + public void startDTD(String name, String publicId, String systemId) + throws SAXException + { + assert false; // this filter is never used on input with a DTD + } + + @Override + public void endDTD() throws SAXException + { + assert false; // this filter is never used on input with a DTD + } + + @Override + public void startEntity(String name) throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + commitToContent(); + m_realLexHandler.startEntity(name); + } + + @Override + public void endEntity(String name) throws SAXException + { + m_realLexHandler.endEntity(name); + } + + @Override + public void startCDATA() throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + commitToContent(); + m_realLexHandler.startCDATA(); + } + + @Override + public void endCDATA() throws SAXException + { + m_realLexHandler.startCDATA(); + } + + @Override + public void comment(char[] ch, int start, int length) + throws SAXException + { + if ( m_couldBeDocument && 1 == m_nestLevel ) + m_wsAcc.discard(); + m_realLexHandler.comment(ch, start, length); + } + } + + /** + * Class to wrap a SAX {@code TransformerHandler} and hook the + * {@code endDocument} callback to also close the underlying output stream, + * making the {@code SQLXML} object ready to use for storing or returning + * the value. + */ + static class SAXResultAdapter + extends XMLFilterImpl implements TransformerHandler + { + private Writer m_w; + private TransformerHandler m_th; + private SAXResultAdapter(TransformerHandler th, Writer w) + { + m_w = w; + m_th = th; + setContentHandler(th); + setDTDHandler(th); + } + + static TransformerHandler newInstance( + TransformerHandler th, Writer w) + { + return new SAXResultAdapter(th, w); + } + + /** + * Version of {@code endDocument} that also closes the underlying + * stream. + */ + @Override + public void endDocument() throws SAXException + { + super.endDocument(); + try + { + m_w.close(); + } + catch ( IOException ioe ) + { + throw new SAXException("Failure closing SQLXML SAXResult", ioe); + } + m_w = null; + } + + /* + * XMLFilterImpl provides default pass-through methods for most of the + * superinterfaces of TransformerHandler, but not for those of + * LexicalHandler, so here goes. + */ + + @Override + public void startDTD(String name, String publicId, String systemId) + throws SAXException + { + m_th.startDTD(name, publicId, systemId); + } + + @Override + public void endDTD() + throws SAXException + { + m_th.endDTD(); + } + + @Override + public void startEntity(String name) + throws SAXException + { + /* + * For the time being, do NOT pass through startEntity/endEntity. + * When we are the result of a transform using the JRE-bundled + * Transformer implementation, we may get called by a class + * com.sun.org.apache.xml.internal.serializer.ToXMLSAXHandler + * that overrides startEntity and gives us those, but neglects to + * override endEntity and never gives us those, leaving our + * serializer thinking it's stuck in an entity forever. (Insert + * Java bug number here if assigned.) Can revisit later if a fixed + * Java version is known, or could use a simple test to check for + * presence of the bug. + */ + //m_th.startEntity(name); + } + + @Override + public void endEntity(String name) + throws SAXException + { + /* + * See startEntity. + */ + // m_th.endEntity(name); + } + + @Override + public void startCDATA() + throws SAXException + { + m_th.startCDATA(); + } + + @Override + public void endCDATA() + throws SAXException + { + m_th.endCDATA(); + } + + @Override + public void comment(char[] ch, int start, int length) + throws SAXException + { + m_th.comment(ch, start, length); + } + + @Override + public void setResult(Result result) + { + throw new IllegalArgumentException("Result already set"); + } + + @Override + public void setSystemId(String systemId) + { + m_th.setSystemId(systemId); + } + + @Override + public String getSystemId() + { + return m_th.getSystemId(); + } + + @Override + public Transformer getTransformer() + { + return m_th.getTransformer(); + } + } + + /** + * Class to wrap an {@code XMLStreamReader} and pass all of the parse events + * except the outermost ("document root") element, in effect producing + * {@code XML(CONTENT)} when the underlying stream has had a synthetic + * root element wrapped around it to satisfy a JRE-bundled parser that + * only accepts {@code XML(DOCUMENT)}. + *

      + * The result may be surprising to code consuming the StAX stream, depending + * on what it expects; testing has shown the JRE-bundled identity + * transformer does not faithfully reproduce such input (though, oddly, the + * 'same' identity transformer reading the 'same' content through the + * {@code SAXUnwrapFilter} does). Code that will be expected to handle + * {@code XML(CONTENT)} and not just {@code XML(DOCUMENT)} using this + * interface should be tested for correct behavior. + */ + static class StAXUnwrapFilter extends StreamReaderDelegate + { + private boolean m_hasPeeked; + private int m_nestLevel = 0; + private WhitespaceAccumulator m_wsAcc = new WhitespaceAccumulator(); + private boolean m_topElementSeen = false; + private boolean m_couldBeDocument = true; + private int m_disbursed = 0; + private int m_disburseOffset = 0; + private char[] m_disburseBuffer; + private int m_tailFrom = -1; + private Matcher m_allWhiteSpace = s_entirelyWS.matcher(""); + + StAXUnwrapFilter(XMLStreamReader reader) + { + super(reader); + } + + /** + * Wrap upstream {@code hasNext} to account for possible accumulated + * whitespace being disbursed. + *

      + * This method and {@code wrappedNext} are responsible for the + * illusion of additional {@code CHARACTERS} events before the next real + * upstream event, if there was accumulated whitespace that is now being + * disbursed because the input has been determined to have + * {@code CONTENT} form. + */ + private boolean wrappedHasNext() throws XMLStreamException + { + /* + * If we are currently looking at a 'disburse' buffer, return true; + * the next event will be either another disburse buffer from the + * accumulator, or the upstream event that's still under the cursor. + * That one is either a CHARACTERS event (from which some tail + * characters still need to be emitted), or whatever following event + * triggered the commitToContent. + * + * Otherwise, defer to the upstream's hasNext(). + */ + if ( 0 < m_disbursed ) + return true; + + return super.hasNext(); + } + + /** + * Wrap upstream {@code next} to account for possible accumulated + * whitespace being disbursed. + *

      + * This method and {@code wrappedHasNext} are responsible for the + * illusion of additional {@code CHARACTERS} events before the next real + * upstream event, if there was accumulated whitespace that is now being + * disbursed because the input has been determined to have + * {@code CONTENT} form. + */ + private int wrappedNext() throws XMLStreamException + { + /* + * If we are currently looking at a 'disburse' buffer and there is + * another one, get that one and return CHARACTERS. If there isn't, + * and m_tailFrom is -1, then the event now under the upstream + * cursor is the one that triggered the commitToContent; return its + * event type. A nonnegative m_tailFrom indicates that the event + * under the cursor is still the CHARACTERS event that turned out + * not to be all whitespace, and still has a tail of characters to + * emit. Store a reference to its upstream array in m_disburseBuffer + * and the proper offset and length values to fake it up as one last + * disburse array; this requires less work in the many other methods + * that must be overridden to sustain the illusion. Set m_tailFrom + * to another negative value in that case (-2), to be replaced with + * -1 on the next iteration and returning to your regularly + * scheduled programming. + * + * Otherwise, defer to the upstream's next(). + */ + if ( 0 < m_disbursed ) + { + m_disbursed = m_wsAcc.disburse(m_disburseBuffer); + if ( 0 < m_disbursed ) + return CHARACTERS; + if ( -1 == m_tailFrom ) + return super.getEventType(); + if ( 0 <= m_tailFrom ) + { + m_disburseBuffer = super.getTextCharacters(); + m_disburseOffset = super.getTextStart() + m_tailFrom; + m_disbursed = super.getTextLength() - m_tailFrom; + m_tailFrom = -2; + return CHARACTERS; + } + m_tailFrom = -1; + m_disburseBuffer = null; + m_disburseOffset = m_disbursed = 0; + } + + return super.next(); + } + + @Override + public boolean hasNext() throws XMLStreamException + { + if ( m_hasPeeked ) + return true; + + while ( wrappedHasNext() ) + { + /* + * Set hasPeeked = true *just before* peeking. Even if next() + * throws an exception, hasNext() must be idempotent: another + * call shouldn't try another next(), which could advance + * the cursor to the wrong location for the error. + */ + m_hasPeeked = true; + switch ( wrappedNext() ) + { + case START_ELEMENT: + if ( m_couldBeDocument && 1 == m_nestLevel ) + { + if ( m_topElementSeen ) + { + commitToContent(); + if ( 0 < m_disbursed ) + return true; // no nestLevel++; we'll be back + } + else + m_wsAcc.discard(); + m_topElementSeen = true; + } + if ( 0 < m_nestLevel++ ) + return true; + continue; + + case END_ELEMENT: + if ( 0 < --m_nestLevel ) + return true; + continue; + + case END_DOCUMENT: + if ( m_couldBeDocument && ! m_topElementSeen ) + commitToContent(); + return true; + + case CHARACTERS: + if ( m_couldBeDocument && 1 == m_nestLevel ) + { + int mismatchIndex = m_wsAcc.accumulate( + super.getTextCharacters(), + super.getTextStart(), super.getTextLength()); + if ( -1 == mismatchIndex ) + continue; + commitToContent(); + m_tailFrom = mismatchIndex; + } + return true; + + case COMMENT: + case PROCESSING_INSTRUCTION: + if ( m_couldBeDocument && 1 == m_nestLevel ) + m_wsAcc.discard(); + return true; + + case CDATA: + case ENTITY_REFERENCE: + if ( m_couldBeDocument && 1 == m_nestLevel ) + commitToContent(); + return true; + + default: + return true; + } + } + + m_hasPeeked = false; + return false; + } + + @Override + public int next() throws XMLStreamException + { + if ( ! hasNext() ) + throw new NoSuchElementException(); + m_hasPeeked = false; + return getEventType(); + } + + @Override + public int getEventType() + { + if ( 0 < m_disbursed ) + return CHARACTERS; + return super.getEventType(); + } + + private void commitToContent() + { + char[] buf = new char [ WhitespaceAccumulator.MAX_RUN ]; + int got = m_wsAcc.disburse(buf); + m_couldBeDocument = false; + if ( 0 == got ) + return; + m_disburseBuffer = buf; + m_disbursed = got; + } + + /* + * The methods specific to CHARACTERS events must be overridden here + * to handle 'extra' CHARACTERS events after commitToContent. That's + * the bare-metal ones getTextCharacters, getTextStart, getTextLength + * for sure, but also getText and the copying getTextCharacters, because + * the StAX API spec does not guarantee that those are implemented with + * virtual calls to the bare ones. + */ + + @Override + public char[] getTextCharacters() + { + if ( 0 < m_disbursed ) + return m_disburseBuffer; + return super.getTextCharacters(); + } + + @Override + public int getTextStart() + { + if ( 0 < m_disbursed ) + return m_disburseOffset; + return super.getTextStart(); + } + + @Override + public int getTextLength() + { + if ( 0 < m_disbursed ) + return m_disbursed; + return super.getTextLength(); + } + + @Override + public String getText() + { + if ( 0 < m_disbursed ) + return new String( + m_disburseBuffer, m_disburseOffset, m_disbursed); + return super.getText(); + } + + @Override + public int getTextCharacters( + int sourceStart, char[] target, int targetStart, int length) + throws XMLStreamException + { + int internalStart = getTextStart(); + int internalLength = getTextLength(); + if ( sourceStart < 0 ) // arraycopy might not catch this, check here + throw new IndexOutOfBoundsException(); + internalStart += sourceStart; + internalLength -= sourceStart; + if ( length > internalLength ) + length = internalLength; + System.arraycopy( // let arraycopy do the other index checks + getTextCharacters(), internalStart, + target, targetStart, length); + return length; + } + + /* + * But wait, there's more: some methods that are valid in "All States" + * need adjustments to play along with 'inserted' CHARACTERS events. + */ + + @Override + public void require(int type, String namespaceURI, String localName) + throws XMLStreamException + { + if ( 0 < m_disbursed ) + if ( CHARACTERS != type + || null != namespaceURI || null != localName ) + throw new XMLStreamException( + "Another event expected, parsed CHARACTERS"); + super.require(type, namespaceURI, localName); + } + + @Override + public String getNamespaceURI() + { + if ( 0 < m_disbursed ) + return null; + return super.getNamespaceURI(); + } + + @Override + public boolean isStartElement() + { + if ( 0 < m_disbursed ) + return false; + return super.isStartElement(); + } + + @Override + public boolean isEndElement() + { + if ( 0 < m_disbursed ) + return false; + return super.isEndElement(); + } + + @Override + public boolean isCharacters() + { + if ( 0 < m_disbursed ) + return true; + return super.isCharacters(); + } + + @Override + public boolean isWhiteSpace() + { + if ( 0 == m_disbursed ) + return super.isWhiteSpace(); + /* + * If you are about to change the below to a simple 'return true' + * because things are disbursed by the WhitespaceAccumulator, don't + * forget that one last 'disbursement' can be faked up containing + * the tail of the CHARACTERS event that was not all whitespace. + */ + CharBuffer cb = CharBuffer.wrap( + m_disburseBuffer, m_disburseOffset, m_disbursed); + m_allWhiteSpace.reset(cb); + boolean result = m_allWhiteSpace.matches(); + m_allWhiteSpace.reset(""); + return result; + } + + @Override + public boolean hasText() + { + if ( 0 < m_disbursed ) + return true; + return super.hasText(); + } + + @Override + public boolean hasName() + { + if ( 0 < m_disbursed ) + return false; + return super.hasName(); + } + + @Override + public int nextTag() throws XMLStreamException + { + int evt; + while ( true ) + { + if ( ! hasNext() ) + throw new NoSuchElementException(); + evt = next(); + if ( ( CHARACTERS == evt || CDATA == evt ) && isWhiteSpace() ) + continue; + if ( SPACE != evt && PROCESSING_INSTRUCTION != evt + && COMMENT != evt ) + break; + } + /* if NoSuchElement wasn't thrown, evt is definitely assigned */ + if ( START_ELEMENT != evt && END_ELEMENT != evt ) + throw new XMLStreamException( + "expected start or end tag", getLocation()); + return evt; + } + + /* + * It ain't over till it's over: the methods that must throw + * IllegalStateException when positioned on a CHARACTERS event + * must do so on an 'inserted' one also. + */ + + private void illegalForCharacters() + { + if ( 0 < m_disbursed ) + throw new IllegalStateException( + "XML parsing method inappropriate for a CHARACTERS event."); + } + + @Override + public String getAttributeValue(String namespaceURI, String localName) + { + illegalForCharacters(); + return super.getAttributeValue(namespaceURI, localName); + } + + @Override + public int getAttributeCount() + { + illegalForCharacters(); + return super.getAttributeCount(); + } + + @Override + public QName getAttributeName(int index) + { + illegalForCharacters(); + return super.getAttributeName(index); + } + + @Override + public String getAttributeNamespace(int index) + { + illegalForCharacters(); + return super.getAttributeNamespace(index); + } + + @Override + public String getAttributeLocalName(int index) + { + illegalForCharacters(); + return super.getAttributeLocalName(index); + } + + @Override + public String getAttributePrefix(int index) + { + illegalForCharacters(); + return super.getAttributePrefix(index); + } + + @Override + public String getAttributeType(int index) + { + illegalForCharacters(); + return super.getAttributeType(index); + } + + @Override + public String getAttributeValue(int index) + { + illegalForCharacters(); + return super.getAttributeValue(index); + } + + @Override + public boolean isAttributeSpecified(int index) + { + illegalForCharacters(); + return super.isAttributeSpecified(index); + } + + @Override + public int getNamespaceCount() + { + illegalForCharacters(); + return super.getNamespaceCount(); + } + + @Override + public String getNamespacePrefix(int index) + { + illegalForCharacters(); + return super.getNamespacePrefix(index); + } + + @Override + public String getNamespaceURI(int index) + { + illegalForCharacters(); + return super.getNamespaceURI(index); + } + + @Override + public QName getName() + { + illegalForCharacters(); + return super.getName(); + } + + @Override + public String getLocalName() + { + illegalForCharacters(); + return super.getLocalName(); + } + } + + /** + * Class to wrap a StAX {@code XMLStreamWriter} and hook the method + * {@code writeEndDocument} to also close the underlying output stream, + * making the {@code SQLXML} object ready to use for storing or returning + * the value. + */ + static class StAXResultAdapter implements XMLStreamWriter + { + private XMLStreamWriter m_xsw; + private OutputStream m_os; + + StAXResultAdapter(XMLStreamWriter xsw, OutputStream os) + { + m_xsw = xsw; + m_os = os; + } + + @Override + public void writeStartElement(String localName) + throws XMLStreamException + { + m_xsw.writeStartElement(localName); + } + + @Override + public void writeStartElement(String namespaceURI, String localName) + throws XMLStreamException + { + m_xsw.writeStartElement(namespaceURI, localName); + } + + @Override + public void writeStartElement( + String prefix, String localName, String namespaceURI) + throws XMLStreamException + { + m_xsw.writeStartElement(prefix, localName, namespaceURI); + } + + @Override + public void writeEmptyElement(String namespaceURI, String localName) + throws XMLStreamException + { + m_xsw.writeEmptyElement(namespaceURI, localName); + } + + @Override + public void writeEmptyElement( + String prefix, String localName, String namespaceURI) + throws XMLStreamException + { + m_xsw.writeEmptyElement(prefix, localName, namespaceURI); + } + + @Override + public void writeEmptyElement(String localName) + throws XMLStreamException + { + m_xsw.writeEmptyElement(localName); + } + + @Override + public void writeEndElement() throws XMLStreamException + { + m_xsw.writeEndElement(); + } + + /** + * Version of {@code writeEndDocument} that also closes the underlying + * stream. + *

      + * Note it does not call this class's own close; a + * calling transformer may emit a warning if that is done. + */ + @Override + public void writeEndDocument() throws XMLStreamException + { + m_xsw.writeEndDocument(); + m_xsw.flush(); + + try + { + m_os.close(); + } + catch ( Exception ioe ) + { + throw new XMLStreamException( + "Failure closing SQLXML StAXResult", ioe); + } + } + + @Override + public void close() throws XMLStreamException + { + m_xsw.close(); + } + + @Override + public void flush() throws XMLStreamException + { + m_xsw.flush(); + } + + @Override + public void writeAttribute(String localName, String value) + throws XMLStreamException + { + m_xsw.writeAttribute(localName, value); + } + + @Override + public void writeAttribute( + String prefix, String namespaceURI, String localName, String value) + throws XMLStreamException + { + m_xsw.writeAttribute(prefix, namespaceURI, localName, value); + } + + @Override + public void writeAttribute( + String namespaceURI, String localName, String value) + throws XMLStreamException + { + m_xsw.writeAttribute(namespaceURI, localName, value); + } + + @Override + public void writeNamespace(String prefix, String namespaceURI) + throws XMLStreamException + { + m_xsw.writeNamespace(prefix, namespaceURI); + } + + @Override + public void writeDefaultNamespace(String namespaceURI) + throws XMLStreamException + { + m_xsw.writeDefaultNamespace(namespaceURI); + } + + @Override + public void writeComment(String data) throws XMLStreamException + { + m_xsw.writeComment(data); + } + + @Override + public void writeProcessingInstruction(String target) + throws XMLStreamException + { + m_xsw.writeProcessingInstruction(target); + } + + @Override + public void writeProcessingInstruction(String target, String data) + throws XMLStreamException + { + m_xsw.writeProcessingInstruction(target, data); + } + + @Override + public void writeCData(String data) throws XMLStreamException + { + m_xsw.writeCData(data); + } + + @Override + public void writeDTD(String dtd) throws XMLStreamException + { + m_xsw.writeDTD(dtd); + } + + @Override + public void writeEntityRef(String name) throws XMLStreamException + { + m_xsw.writeEntityRef(name); + } + + @Override + public void writeStartDocument() throws XMLStreamException + { + m_xsw.writeStartDocument(); + } + + @Override + public void writeStartDocument(String version) throws XMLStreamException + { + m_xsw.writeStartDocument(version); + } + + @Override + public void writeStartDocument(String encoding, String version) + throws XMLStreamException + { + m_xsw.writeStartDocument(encoding, version); + } + + @Override + public void writeCharacters(String text) throws XMLStreamException + { + m_xsw.writeCharacters(text); + } + + @Override + public void writeCharacters(char[] text, int start, int len) + throws XMLStreamException + { + m_xsw.writeCharacters(text, start, len); + } + + @Override + public String getPrefix(String uri) throws XMLStreamException + { + return m_xsw.getPrefix(uri); + } + + @Override + public void setPrefix(String prefix, String uri) + throws XMLStreamException + { + m_xsw.setPrefix(prefix, uri); + } + + @Override + public void setDefaultNamespace(String uri) throws XMLStreamException + { + m_xsw.setDefaultNamespace(uri); + } + + @Override + public void setNamespaceContext(NamespaceContext context) + throws XMLStreamException + { + m_xsw.setNamespaceContext(context); + } + + @Override + public NamespaceContext getNamespaceContext() + { + return m_xsw.getNamespaceContext(); + } + + @Override + public Object getProperty(String name) throws IllegalArgumentException + { + return m_xsw.getProperty(name); + } + } + + /** + * Accumulate whitespace at top level (outside any element) pending + * determination of what to do with it. + *

      + * The handling of whitespace at the top level is a subtle business. Per the + * XML spec "Character Data and Markup" section (in either spec version), + * whitespace is considered, when at the top level, "markup" rather than + * "character data". And the section on "White Space Handling" spells out + * that an XML processor "MUST always pass all characters in a document that + * are not markup through to the application." A sharp-eyed language lawyer + * will see right away that whitespace at the top level does not fall + * under that mandate. (It took me longer.) Indeed, a bit of experimenting + * with a SAX parser will show that it doesn't invoke any handler callbacks + * at all for whitespace at the top level. The whitespace could as well not + * even be there. Some applications rely on that and will report an error if + * the parser shows them any whitespace outside the element they expect. + *

      + * Our application of a wrapping element, to avoid parse errors for the + * {@code XML(CONTENT)} form, alters the treatment of whitespace that would + * otherwise have been at the top level. As it will now be inside of + * an element, Java's parser will want to pass it on, and our unwrap filter + * will have to fix that. + *

      + * Complicating matters, our determination whether to apply a wrapping + * element is lazy. It looks only far enough into the start of the stream + * to conclude one of: (1) it is definitely {@code XML(DOCUMENT)}, + * (2) it is definitely {@code XML(CONTENT)}, or (3) it could be either and + * has to be be wrapped in case it turns out to be {@code XML(CONTENT)}. + *

      + * The first two cases are simple. In case (1), we apply no wrapping and + * no filter, and the underlying parser does the right thing. In case (2) + * we know this is not a document, and no whitespace should be filtered out. + *

      + * Case (3) is the tricky one, and as long as PostgreSQL does not store any + * {@code DOCUMENT}/{@code CONTENT} flag with the value and we have no API + * for the application to say what's expected, unless we are willing to + * pre-parse what could end up being the whole stream just to decide how + * to parse it, we'll have to settle for an approximate behavior. + *

      + * What's implemented here is to handle character data reported by the + * parser, if it is at "top level" (within our added wrapping element), by + * accumulating any whitespace here until we see what comes next. + *

      + * This must be applied above the parser (that is, to character events that + * the parser reports), because it applies the XML definition of whitespace, + * which includes only the four characters " \t\n\r" but recognizes them + * after the parser has normalized various newline styles to '\n'. + * The exact set of those newline styles depends on the XML version, and the + * XML 1.1 set includes non-ASCII characters and therefore depend on the + * parser's knowledge of the input stream encoding. + *

      + * If the character data includes anything other than whitespace, we emit + * it intact including the whitespace, and note that the input is now known + * to be {@code CONTENT} and gets no more special whitespace treatment. + *

      + * If all whitespace, and followed by the end of input or by an element that + * is not the first one to be seen, we emit it intact and turn off + * special whitespace handling for the remainder of the stream (if any). + *

      + * If all whitespace and followed by a comment, PI, or the first element + * to be seen it is discarded. + *

      + * This strategy will produce correct results for any case (3) input that + * turns out to be {@code XML(DOCUMENT)}. In the case of input that turns + * out to be {@code XML(CONTENT)}, it can fail to preserve whitespace ahead + * of the first point where the input is definitely known to be + * {@code CONTENT}. + *

      + * That may be good enough for many cases. To cover those where it isn't, + * it may be necessary to offer a nonstandard API to specify what the + * application expects, or observe the PostgreSQL {@code XMLOPTION} setting + * in case 3, or both. + */ + static class WhitespaceAccumulator + { + /** + * A Pattern to walk through some character data in runs of the same + * whitespace character, allowing a rudimentary run-length encoding. + */ + static final Pattern s_wsChunk = Pattern.compile( + "\\G([ \\t\\n\\r])\\1*+(?![^ \\t\\n\\r])"); + + private static final char[] s_runValToChar = {' ', '\t', '\n', '\r'}; + static final int MAX_RUN = 1 + (0xff >>> 2); + + byte[] m_rleBuffer = new byte [ 8 ]; + int m_bufPos = 0; + int m_disbursePos = 0; + Matcher m_matcher = s_wsChunk.matcher(""); + + /** + * Given an array with reported character data, return -1 if exclusively + * whitespace characters were seen and have been added to the + * accumulator; otherwise return an index into the input array from + * which the caller should emit the tail unprocessed after using + * {@link #disburse disburse} here to emit any earlier-accumulated + * whitespace. + *

      + * Java's XML parsing APIs generally do not promise to supply all + * characters of contiguous text in one parse event, so this method + * may be called more than once accumulating whitespace from several + * consecutive events. + */ + int accumulate(char[] content, int start, int length) + { + CharBuffer cb = CharBuffer.wrap(content, start, length); + int tailPos = 0; + + m_matcher.reset(cb); + while ( m_matcher.find() ) + { + tailPos = m_matcher.end(); + char c = m_matcher.group(1).charAt(0); + int runVal = (c & 3) | (c >>> 1 & 2); // index in s_runValToChar + int runLength = tailPos - m_matcher.start(); + + newRun(); + while ( runLength > MAX_RUN ) + { + m_rleBuffer [m_bufPos - 1] = + (byte)(runVal | (MAX_RUN - 1) << 2); + runLength -= MAX_RUN; + newRun(); + } + m_rleBuffer [m_bufPos - 1] = + (byte)(runVal | (runLength - 1) << 2); + } + + m_matcher.reset(""); // don't hold a reference to caller's array + if ( tailPos == length ) + return -1; + return start + tailPos; + } + + private final void newRun() + { + ++ m_bufPos; + if ( m_rleBuffer.length == m_bufPos ) + m_rleBuffer = Arrays.copyOf(m_rleBuffer, 2*m_rleBuffer.length); + } + + /** + * Retrieve the accumulated whitespace if it is not to be discarded. + *

      + * If the caller detects that the whitespace is significant (either + * because {@link #accumulate accumulate} returned a nonnegative result + * or because the next parse event was a second top-level element or + * the end-event of the wrapping element), the caller should allocate + * a {@code char} array of length at least {@code MAX_RUN} and supply it + * to this method until zero is returned; for each non-zero value + * returned, that many {@code char}s at the head of the array should be + * passed to the application as a character event. + *

      + * After this method has returned zero, if the caller had received a + * non-negative result from {@code accumulate}, it should present one + * more character event to the application, containing the tail of the + * the array that was given to {@code accumulate}, starting at the index + * {@code accumulate} returned. + */ + int disburse(char[] into) + { + assert into.length >= MAX_RUN; + if ( m_disbursePos == m_bufPos ) + { + m_bufPos = m_disbursePos = 0; + return 0; + } + int runVal = m_rleBuffer [ m_disbursePos ] & 3; + int runLength = 1 + ((m_rleBuffer [ m_disbursePos ] & 0xff) >>> 2); + ++ m_disbursePos; + char c = s_runValToChar [ runVal ]; + Arrays.fill(into, 0, runLength, c); + return runLength; + } + + /** + * Discard the accumulated whitespace. + *

      + * This should be called if some whitespace was successfully accumulated + * ({@code accumulate} returned -1) but the following parse event is one + * that must be passed to the application and does not force the input + * to be classified as {@code XML(CONTENT)}. + */ + void discard() + { + m_bufPos = m_disbursePos = 0; + } + } + + /** + * A class to parse and, if necessary, check or correct, the + * possibly-erroneous XMLDecl or TextDecl syntax found in the stored form + * of PG's XML datatype. + *

      + * This implementation depends heavily on the (currently dependable) fact + * that, in all PG-supported server encodings, the characters that matter + * for decls are encoded as in ASCII. + */ + static class DeclProbe + { + /* + * In Python 3, they've achieved a very nice symmetry where they provide + * regular expressions with comparable functionality for both character + * streams and byte streams. Will Java ever follow suit? It's 2018, I + * can ask my phone spoken questions, and I'm writing a DFA by hand. + */ + private static enum State + { + START, + MAYBEVER, + VER, VEQ, VQ, VVAL, VVALTAIL, + MAYBEENC, + ENC, EEQ, EQ, EVAL, EVALTAIL, + MAYBESA, + SA , SEQ, SQ, SVAL, SVALTAIL, + TRAILING, END, MATCHED, UNMATCHED, UNMATCHEDCHAR, ABANDONED + }; + private State m_state = State.START; + private int m_idx = 0; + private byte m_q = 0; + private ByteArrayOutputStream m_save = new ByteArrayOutputStream(); + private static final byte[] s_tpl = { + '<', '?', 'x', 'm', 'l', 0, // 0 - 5 + 'v', 'e', 'r', 's', 'i', 'o', 'n', 0, // 6 - 13 + '1', '.', 0, // 14 - 16 + 'e', 'n', 'c', 'o', 'd', 'i', 'n', 'g', 0, // 17 - 25 + 's', 't', 'a', 'n', 'd', 'a', 'l', 'o', 'n', 'e', 0, // 26 - 36 + 'y', 'e', 's', 0, // 37 - 40 + 'n', 'o', 0 // 41 - 43 + }; + + private boolean m_saving = true; + private int m_pos = 0; + private boolean m_mustBeDecl = false; + private boolean m_mustBeXmlDecl = false; + private boolean m_mustBeTextDecl = false; + private boolean m_xml1_0 = false; + private Boolean m_standalone = null; + + private int m_versionStart, m_versionEnd; + private int m_encodingStart, m_encodingEnd; + private int m_readaheadStart; + /* + * Contains, if m_state is UNMATCHEDCHAR, a single, non-ASCII char that + * followed whatever ASCII bytes may be saved in m_save. + */ + private char m_savedChar; + + /** + * Parse for an initial declaration (XMLDecl or TextDecl) in a stream + * made available a byte at a time. + *

      + * Pass bytes in as long as this method keeps returning {@code true}; + * once it returns {@code false}, it has either parsed a declaration + * successfully or determined none to be present. The results of parsing + * are remembered in the instance and available to the + * {@link #prefix prefix()} method to generate a suitable decl with the + * encoding corrected as needed. + *

      + * It is not an error to pass some more bytes after the method has + * returned {@code false}; they will simply be buffered as readahead + * and included in the result of {@code prefix()}. If no decl + * was found, the readahead will include all bytes passed in. If a + * partial or malformed decl was found, an exception is thrown. + * @param b The next byte of the stream. + * @return True if more input is needed to fully parse a decl or be sure + * that none is present; false when enough input has been seen. + * @throws SQLDataException If a partial or malformed decl is found. + */ + boolean take(byte b) throws SQLException + { + if ( m_saving ) + { + m_save.write(b); + ++ m_pos; + } + byte tpl = s_tpl[m_idx]; + switch ( m_state ) + { + case START: + if ( 0 == tpl && isSpace(b) ) + { + m_mustBeDecl = true; + m_saving = false; + m_state = State.MAYBEVER; + return true; + } + if ( tpl != b ) + { + m_state = State.UNMATCHED; + return false; + } + ++ m_idx; + return true; + case MAYBEVER: + if ( isSpace(b) ) + return true; + switch ( b ) + { + case 'v': + m_state = State.VER; + m_idx = 7; + return true; + case 'e': + m_mustBeTextDecl = true; + m_state = State.ENC; + m_idx = 18; + return true; + default: + } + break; + case VER: + if ( 0 == tpl ) + { + if ( isSpace(b) ) + { + m_state = State.VEQ; + return true; + } + if ( '=' == b ) + { + m_state = State.VQ; + return true; + } + } + if ( tpl != b ) + break; + ++ m_idx; + return true; + case VEQ: + if ( isSpace(b) ) + return true; + if ( '=' != b ) + break; + m_state = State.VQ; + return true; + case VQ: + if ( isSpace(b) ) + return true; + if ( '\'' != b && '"' != b) + break; + m_q = b; + m_state = State.VVAL; + m_idx = 14; + m_saving = true; + m_versionStart = m_pos; + return true; + case VVAL: + if ( 0 == tpl ) + { + if ( '0' > b || b > '9' ) + break; + if ( '0' == b ) + m_xml1_0 = true; + m_state = State.VVALTAIL; + return true; + } + if ( tpl != b ) + break; + ++ m_idx; + return true; + case VVALTAIL: + if ( '0' <= b && b <= '9' ) + { + m_xml1_0 = false; + return true; + } + if ( m_q != b ) + break; + m_state = State.MAYBEENC; + m_saving = false; + m_versionEnd = m_pos - 1; + return true; + case MAYBEENC: + if ( isSpace(b) ) + return true; + if ( 'e' == b ) + { + m_state = State.ENC; + m_idx = 18; + return true; + } + if ( m_mustBeTextDecl ) + break; + m_mustBeXmlDecl = true; + if ( 's' == b ) + { + m_state = State.SA; + m_idx = 27; + return true; + } + if ( '?' != b ) + break; + m_state = State.END; + return true; + case ENC: + if ( 0 == tpl ) + { + if ( isSpace(b) ) + { + m_state = State.EEQ; + return true; + } + if ( '=' == b ) + { + m_state = State.EQ; + return true; + } + } + if ( tpl != b ) + break; + ++ m_idx; + return true; + case EEQ: + if ( isSpace(b) ) + return true; + if ( '=' != b ) + break; + m_state = State.EQ; + return true; + case EQ: + if ( isSpace(b) ) + return true; + if ( '\'' != b && '"' != b) + break; + m_q = b; + m_state = State.EVAL; + m_saving = true; + m_encodingStart = m_pos; + return true; + case EVAL: + if ( ( 'A' > b || b > 'Z' ) && ( 'a' > b || b > 'z' ) ) + break; + m_state = State.EVALTAIL; + return true; + case EVALTAIL: + if ( ( 'A' <= b && b <= 'Z' ) || ( 'a' <= b && b <= 'z' ) || + ( '0' <= b && b <= '9' ) || ( '.' == b ) || ( '_' == b ) || + ( '-' == b ) ) + return true; + if ( m_q != b ) + break; + m_state = m_mustBeTextDecl ? State.TRAILING : State.MAYBESA; + m_saving = false; + m_encodingEnd = m_pos - 1; + return true; + case MAYBESA: + if ( isSpace(b) ) + return true; + switch ( b ) + { + case 's': + m_mustBeXmlDecl = true; + m_state = State.SA; + m_idx = 27; + return true; + case '?': + m_state = State.END; + return true; + default: + } + break; + case SA: + if ( 0 == tpl ) + { + if ( isSpace(b) ) + { + m_state = State.SEQ; + return true; + } + if ( '=' == b ) + { + m_state = State.SQ; + return true; + } + } + if ( tpl != b ) + break; + ++ m_idx; + return true; + case SEQ: + if ( isSpace(b) ) + return true; + if ( '=' != b ) + break; + m_state = State.SQ; + return true; + case SQ: + if ( isSpace(b) ) + return true; + if ( '\'' != b && '"' != b) + break; + m_q = b; + m_state = State.SVAL; + return true; + case SVAL: + if ( 'y' == b ) + { + m_idx = 38; + m_standalone = Boolean.TRUE; + } + else if ( 'n' == b ) + { + m_idx = 42; + m_standalone = Boolean.FALSE; + } + else + break; + m_state = State.SVALTAIL; + return true; + case SVALTAIL: + if ( 0 == tpl ) + { + if ( m_q != b ) + break; + m_state = State.TRAILING; + return true; + } + if ( tpl != b ) + break; + ++ m_idx; + return true; + case TRAILING: + if ( isSpace(b) ) + return true; + if ( '?' != b ) + break; + m_state = State.END; + return true; + case END: + if ( '>' != b ) + break; + m_state = State.MATCHED; + m_readaheadStart = m_pos; + m_saving = true; + return false; + case MATCHED: // no more input needed for a determination; + case UNMATCHED: // whatever more is provided, just buffer it + return false; // as readahead + case UNMATCHEDCHAR: // can't happen; fall into ABANDONED if it does + case ABANDONED: + } + m_state = State.ABANDONED; + String m = "Invalid XML/Text declaration"; + if ( m_mustBeXmlDecl ) + m = "Invalid XML declaration"; + else if ( m_mustBeTextDecl ) + m = "Invalid text declaration"; + throw new SQLDataException(m, "2200N"); + } + + /** + * Version of {@link take(byte)} for use when input is coming from a + * character stream. + *

      + * Exploits (again) the assumption that in all encodings of interest, + * the characters in a decl will have the values they have in ASCII, and + * the fact that ASCII characters are all encoded in the low 7 bits + * of chars. + *

      + * Unlike {@link take(byte)}, this method will not accept further input + * after it has returned {@code false} once. A caller should not mix + * calls to this method and {@link take(byte)}. + * @param c The next char of the stream. + * @return True if more input is needed to fully parse a decl or be sure + * that none is present; false when enough input has been seen. + * @throws SQLDataException If a partial or malformed decl is found. + * @throws IllegalStateException if called again after returning false. + */ + boolean take(char c) throws SQLException + { + byte b = (byte)(c & 0x7f); + switch ( m_state ) + { + case START: + if ( b == c ) + return take(b); + m_savedChar = c; + m_state = State.UNMATCHEDCHAR; + return false; + case ABANDONED: + case MATCHED: + case UNMATCHED: + case UNMATCHEDCHAR: + throw new IllegalStateException("too many take(char) calls"); + default: + if ( b == c ) + return take(b); + } + return take((byte)-1); // will throw appropriate SQLDataException + } + + private boolean isSpace(byte b) + { + return (0x20 == b) || (0x09 == b) || (0x0D == b) || (0x0A == b); + } + + /** + * Call after the last call to {@code take} before examining results. + */ + void finish() throws SQLException + { + switch ( m_state ) + { + case ABANDONED: + case MATCHED: + case UNMATCHED: + case UNMATCHEDCHAR: + return; + case START: + if ( 0 == m_idx ) + { + m_state = State.UNMATCHED; + return; + } + /* FALLTHROUGH */ + default: + } + throw new SQLDataException( + "XML begins with an incomplete declaration", "2200N"); + } + + /** + * Generate a declaration, if necessary, with the XML version and + * standalone status determined in declaration parsing and the name of + * the server encoding, followed always by any readahead buffered during + * a nonmatching parse or following a matching one. + * @param serverCharset The encoding to be named in the declaration if + * one is generated (which is forced if the encoding isn't UTF-8). + * Pass null to force the omission of any encoding declaration; this is + * needed when writing to the native PG XML datatype, as some PG native + * functions such as IS DOCUMENT can misbehave if the declaration is + * present (even if it correctly matches the server encoding). + * @return A byte array representing the declaration if any, followed + * by any readahead. + */ + byte[] prefix(Charset serverCharset) throws IOException + { + /* + * Will this be DOCUMENT or CONTENT ? + * Without some out-of-band indication, we just don't know yet. + * For now, get DOCUMENT working. + */ + // boolean mightBeDocument = true; + // boolean mightBeContent = true; + + /* + * Defaults for when no declaration was matched: + */ + boolean canOmitVersion = true; // no declaration => 1.0 + byte[] version = new byte[] { '1', '.', '0' }; + boolean canOmitEncoding = + null == serverCharset || "UTF-8".equals(serverCharset.name()); + boolean canOmitStandalone = true; + + byte[] parseResult = m_save.toByteArray(); + + if ( State.MATCHED == m_state ) + { + /* + * Parsing the decl could have turned up a non-1.0 XML version, + * which would mean we can't neglect to declare it, As for any + * encoding found in the varlena, the value doesn't matter (PG + * always uses the server encoding, whatever the stored decl + * might say). Its presence or absence in the decl can influence + * m_mustBeXmlDecl: the grammar productions XMLDecl and TextDecl + * are slightly different, which in a better world could help + * distinguish DOCUMENT from CONTENT, but PG doesn't preserve + * the distinction, instead always omitting the encoding in + * xml_out, which only XMLDecl can match. This code isn't + * reading from xml_out ... but if the value has ever been put + * through PG expressions that involved casting, xml_out may + * have eaten the encoding at that time. + * So, for now, all that can be done here is refinement of + * canOmitVersion and canOmitStandalone. Also, PG's hand-laid + * parse_xml_decl always insists on the version being present, + * so if we produce a decl at all, it had better not have either + * the version or the encoding omitted. + */ + canOmitVersion = m_xml1_0; // && ! m_mustBeXmlDecl; + // canOmitEncoding &&= ! m_mustBeTextDecl; + canOmitStandalone = null == m_standalone; + if ( ! m_xml1_0 && m_versionEnd > m_versionStart ) + version = Arrays.copyOfRange(parseResult, + m_versionStart, m_versionEnd); + } + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + if ( ! ( canOmitVersion && canOmitEncoding && canOmitStandalone ) ) + { + baos.write(s_tpl, 0, 5); // '); + } + + baos.write(parseResult, + m_readaheadStart, parseResult.length - m_readaheadStart); + + return baos.toByteArray(); + } + + char[] charPrefix(Charset serverCharset) throws IOException + { + byte[] bpfx = prefix(serverCharset); + char[] cpfx = new char [ + bpfx.length + (State.UNMATCHEDCHAR == m_state ? 1 : 0) ]; + int i = 0; + /* + * Again the assumption that all supported encodings will match + * ASCII for the characters of the decl. + */ + for ( byte b : bpfx ) + cpfx [ i++ ] = (char)(b&0x7f); + if ( i < cpfx.length ) + cpfx [ i ] = m_savedChar; + return cpfx; + } + + /** + * Return the number of bytes at the end of the {@code prefix} result + * that represent readahead, rather than being part of the decl. + */ + int readaheadLength() + { + /* + * If the probing was done as chars, because of the more restrictive + * behavior of take(char), the readahead length can be exactly one, + * only if the state is UNMATCHEDCHAR, and will otherwise be zero. + */ + if ( State.UNMATCHEDCHAR == m_state ) + return 1; + return m_save.size() - m_readaheadStart; + } + + /** + * Throw an exception if a decl was matched and specified an encoding + * that isn't the server encoding, or if a decl was malformed, or if + * strict is specified, no encoding was declared, and the server + * encoding is not UTF-8. + * @param serverCharset The encoding used by the server; any encoding + * specified in the stream must resolve (possibly as an alias) to this + * encoding. + * @param strict if true, a decl may only be absent, or lack encoding + * information, if the server charset is UTF-8. If false, the check + * passes regardless of server encoding if the stream contains no decl + * or the decl does not declare an encoding. + */ + void checkEncoding(Charset serverCharset, boolean strict) + throws SQLException + { + if ( State.MATCHED == m_state ) + { + if ( m_encodingEnd > m_encodingStart ) + { + byte[] parseResult = m_save.toByteArray(); + /* + * The assumption that the serverCharset can be used in + * constructing this String rests again on all supported + * server charsets matching on the characters used in decls. + */ + String encName = new String(parseResult, + m_encodingStart, m_encodingEnd - m_encodingStart, + serverCharset); + try + { + Charset cs = Charset.forName(encName); + if ( serverCharset.equals(cs) ) + return; + } + catch ( IllegalArgumentException iae ) { } + throw new SQLDataException( + "XML declares character set \"" + encName + + "\" which does not match server encoding", "2200N"); + } + } + + if ( ! strict || "UTF-8".equals(serverCharset.name()) ) + return; + throw new SQLDataException( + "XML does not declare a character set, and server encoding " + + "is not UTF-8", "2200N"); + } + + String queryEncoding() throws SQLException + { + if ( State.MATCHED == m_state ) + { + if ( m_encodingEnd <= m_encodingStart ) + return null; + byte[] parseResult = m_save.toByteArray(); + return new String(parseResult, + m_encodingStart, m_encodingEnd - m_encodingStart, + US_ASCII); + } + return null; + } + } + + /** + * Encapsulation of how to copy from one {@code SQLXML} to another. + *

      + * In the case of a source {@code SQLXML} object that prefers to present its + * content as a {@code StreamSource}, obtain an instance with + * {@code copierFor}, passing the target {@code SQLXML} instance, the server + * character set and the encoding name peeked from any declaration at the + * front of the source stream. Then supply the {@code DeclProbe} object + * representing the peeked initial content, and the {@code InputStream} or + * {@code Reader} representing the rest of the source content, to the + * appropriate {@code prepare} method. The copy is completed by calling + * {@link #finish finish}. + *

      + * Between {@code prepare} and {@code finish}, parser restrictions can be + * adjusted if needed, using the {@link Adjusting.XML.Source} API on the + * object returned by {@link #getAdjustable getAdjustable}. + *

      + * For the cases of {@code SQLXML} objects that present their content as + * {@code SAXSource}, {@code StAXSource}, or {@code DOMSource}, there are + * no {@code prepare} methods, and {@code getAdjustable} returns a dummy + * object that doesn't adjust anything. When the source presents XML content + * in already-parsed form, there are no parser restrictions to adjust. + */ + static abstract class XMLCopier + { + protected Writable m_tgt; + + protected XMLCopier(Writable tgt) + { + m_tgt = tgt; + } + + Adjusting.XML.Source getAdjustable() + { + return AdjustingSAXSource.Dummy.INSTANCE; + } + + static abstract class Stream extends XMLCopier + { + protected Adjusting.XML.Source m_adjustable; + + protected Stream(Writable tgt) + { + super(tgt); + } + + @Override + Adjusting.XML.Source getAdjustable() + { + return m_adjustable; + } + + abstract XMLCopier prepare(DeclProbe probe, InputStream is) + throws IOException, SQLException; + + abstract XMLCopier prepare(DeclProbe probe, Reader r) + throws IOException, SQLException; + } + + /** + * Return an {@code XMLCopier} that can copy a stream source that + * declares an encoding name srcCSName to a target whose + * character set is tgtCS (which is here strongly assumed to + * be the PostgreSQL server charset, so will not need to be remembered + * in the created {@code XMLStreamCopier}). + */ + static Stream copierFor( + Writable tgt, Charset tgtCS, String srcCSName) + throws SQLException + { + if ( null == srcCSName ) + srcCSName = "UTF-8"; + + if ( tgtCS.name().equalsIgnoreCase(srcCSName) ) + return new Direct(tgt); + + Charset srcCS; + try + { + srcCS = Charset.forName(srcCSName); + } + catch ( IllegalArgumentException e ) + { + throw new SQLDataException( + "XML declares unsupported encoding \"" + srcCSName + "\"", + "2200N"); + } + if ( tgtCS.equals(srcCS) ) + return new Direct(tgt); + if ( tgtCS.contains(srcCS) ) + return new Transcoding(tgt, srcCS); + return new Transforming(tgt, srcCS); + } + + abstract Writable finish() throws IOException, SQLException; + + /** + * Copier usable when source and target encodings are the same. + */ + static class Direct extends Stream + { + /* Exactly one of m_is, m_rdr must be non-null */ + private InputStream m_is; + private Reader m_rdr; + private DeclProbe m_probe; + private AdjustingStreamResult m_asr; + + protected Direct(Writable tgt) + { + super(tgt); + } + + @Override + XMLCopier prepare(DeclProbe probe, InputStream is) + throws SQLException + { + m_is = is; + return prepare(probe, (Reader)null); + } + + @Override + XMLCopier prepare(DeclProbe probe, Reader r) + throws SQLException + { + m_rdr = r; + m_probe = probe; + m_asr = m_tgt.setResult( + m_tgt.backingIfNotFreed(), + AdjustingStreamResult.class); + m_adjustable = m_asr.theVerifierSource(false); + return this; + } + + @Override + Writable finish() throws IOException, SQLException + { + if ( null != m_is ) + { + OutputStream os = + m_asr.preferBinaryStream().get().getOutputStream(); + os.write(m_probe.prefix(null)); + byte[] b = new byte [ 8192 ]; + int got; + while ( -1 != (got = m_is.read(b)) ) + os.write(b, 0, got); + m_is.close(); + os.close(); + } + else + { + Writer w = m_asr.preferCharacterStream().get().getWriter(); + w.write(m_probe.charPrefix(null)); + char[] b = new char [ 8192 ]; + int got; + while ( -1 != (got = m_rdr.read(b)) ) + w.write(b, 0, got); + m_rdr.close(); + w.close(); + } + return m_tgt; + } + } + + /** + * Copier usable when source charset is contained in the target charset. + *

      + * Charset containment doesn't guarantee encoding equivalence, so the + * stream may have to be transcoded, but there won't be any characters + * unrepresentable in the target encoding that need to be escaped. If + * the source presented a character stream, it is handled just as for + * {@code Direct}; if a binary stream, it is wrapped as a character + * stream and then handled the same way. + */ + static class Transcoding extends Direct + { + private Charset m_srcCS; + + Transcoding(Writable tgt, Charset srcCS) + { + super(tgt); + m_srcCS = srcCS; + } + + @Override + XMLCopier prepare(DeclProbe probe, InputStream is) + throws SQLException + { + return prepare(probe, new InputStreamReader(is, m_srcCS)); + } + } + + /** + * Copier usable when source charset may not be contained in the target + * charset. + *

      + * The stream has to be parsed and serialized so that any characters + * not representable in the target encoding can be serialized as the + * XML character references. + */ + static class Transforming extends Stream + { + private Charset m_srcCS; + + Transforming(Writable tgt, Charset srcCS) + { + super(tgt); + m_srcCS = srcCS; + } + + @Override + XMLCopier prepare(DeclProbe probe, InputStream is) + throws IOException, SQLException + { + try + { + boolean[] wrapping = new boolean[] { false }; + is = correctedDeclStream( + is, probe, /* neverWrap */ false, m_srcCS, wrapping); + m_adjustable = /* again without defaults() */ + new AdjustingSAXSource(new InputSource(is),wrapping[0]); + } + catch ( SAXException e ) + { + throw normalizedException(e); + } + return this; + } + + @Override + XMLCopier prepare(DeclProbe probe, Reader r) + throws IOException, SQLException + { + try + { + boolean[] wrapping = new boolean[] { false }; + r = correctedDeclReader(r, probe, m_srcCS, wrapping); + m_adjustable = + new AdjustingSAXSource(new InputSource(r), wrapping[0]); + } + catch ( SAXException e ) + { + throw normalizedException(e); + } + return this; + } + + @Override + Writable finish() throws IOException, SQLException + { + saxCopy(m_adjustable.get(), + m_tgt.setResult( + m_tgt.backingIfNotFreed(), SAXResult.class)); + return m_tgt; + } + } + + /** + * Copy from a {@code SAXSource} to a {@code SAXResult}, provided the + * {@code SAXSource} supplies its own {@code XMLReader}. + *

      + * See {@code XMLCopier.SAX.Parsing} for when it does not. + */ + static void saxCopy(SAXSource sxs, SAXResult sxr) throws SQLException + { + XMLReader xr = sxs.getXMLReader(); + try + { + ContentHandler ch = sxr.getHandler(); + xr.setContentHandler(ch); + if ( ch instanceof DTDHandler ) + xr.setDTDHandler((DTDHandler)ch); + LexicalHandler lh = sxr.getLexicalHandler(); + if ( null == lh && ch instanceof LexicalHandler ) + lh = (LexicalHandler)ch; + if ( null != lh ) + xr.setProperty( + SAX2PROPERTY.LEXICAL_HANDLER.propertyUri(), lh); + xr.parse(sxs.getInputSource()); + } + /* + * If changing these wrapping conventions, change them also in + * AdjustingDOMSource.get() + */ + catch ( SAXException e ) + { + throw new SQLDataException(e.getMessage(), "22000", e); + } + catch ( IOException e ) + { + throw new SQLException(e.getMessage(), "58030", e); + } + } + + /** + * Copier for a {@code SAXSource} that supplies its own non-null + * {@code XMLReader}. + */ + static class SAX extends XMLCopier + { + private SAXSource m_source; + + SAX(Writable tgt, SAXSource src) + { + super(tgt); + m_source = src; + } + + @Override + Writable finish() throws IOException, SQLException + { + saxCopy(m_source, + m_tgt.setResult( + m_tgt.backingIfNotFreed(), SAXResult.class)); + return m_tgt; + } + + /** + * Copier for a {@code SAXSource} that does not supply its own + * {@code XMLReader}. + *

      + * Such a source needs a parser constructed here, which may, like + * any parser, require adjustment. Such a source is effectively a + * stream source snuck in through the SAX API. + */ + static class Parsing extends XMLCopier + { + private AdjustingSAXSource m_source; + + Parsing(Writable tgt, SAXSource src) throws SAXException + { + super(tgt); + InputSource is = src.getInputSource(); + /* + * No correctedDeclStream, no check for unwrapping: if some + * random {@code SQLXML} implementation is passing a stream + * to parse, it had better make sense to a vanilla parser. + */ + m_source = new AdjustingSAXSource(is, false); + } + + @Override + AdjustingSAXSource getAdjustable() + { + return m_source; + } + + @Override + Writable finish() throws IOException, SQLException + { + saxCopy(m_source.get(), + m_tgt.setResult( + m_tgt.backingIfNotFreed(), SAXResult.class)); + return m_tgt; + } + } + } + + static class StAX extends XMLCopier + { + private StAXSource m_source; + + StAX(Writable tgt, StAXSource src) + { + super(tgt); + m_source = src; + } + + @Override + Writable finish() throws IOException, SQLException + { + StAXResult str = m_tgt.setResult( + m_tgt.backingIfNotFreed(), StAXResult.class); + XMLInputFactory xif = XMLInputFactory.newDefaultFactory(); + xif.setProperty(xif.IS_NAMESPACE_AWARE, true); + XMLOutputFactory xof = XMLOutputFactory.newDefaultFactory(); + /* + * The Source has either an event reader or a stream reader. Use + * the event reader directly, or create one around the stream + * reader. + */ + XMLEventReader xer = m_source.getXMLEventReader(); + try + { + if ( null == xer ) + { + XMLStreamReader xsr = m_source.getXMLStreamReader(); + /* + * Before wrapping this XMLStreamReader in an + * XMLEventReader, wrap it in this trivial delegate + * first. The authors of XMLEventReaderImpl found + * themselves with a problem to solve, namely that + * XMLEventReader's hasNext() method isn't declared to + * throw any exceptions (XMLEventReader implements + * Iterator). So they solved it by just swallowing any + * exception thrown by the stream reader's hasNext, and + * returning false, so it just seems the XML abruptly + * ends for no reported reason. + * + * So, just wrap hasNext here to save any exception from + * below, and return true, thereby inviting the consumer + * to go ahead and call next, where we'll re-throw it. + */ + xsr = new StreamReaderDelegate(xsr) + { + XMLStreamException savedException; + + @Override + public boolean hasNext() throws XMLStreamException + { + try + { + return super.hasNext(); + } + catch ( XMLStreamException e ) + { + savedException = e; + return true; + } + } + + @Override + public int next() throws XMLStreamException + { + XMLStreamException e = savedException; + if ( null != e ) + { + savedException = null; + throw e; + } + return super.next(); + } + }; + xer = xif.createXMLEventReader(xsr); + } + /* + * Were you thinking the above could be simply + * createXMLEventReader(m_source) by analogy with + * the writer below? Good thought, but the XMLInputFactory + * implementation that's included in OpenJDK doesn't + * implement the case where the Source argument is a + * StAXSource! Two lines would do it. (And anyway, "the + * writer below" brings hollow, joyless laughter in Java 9 + * and later.) + */ + + /* + * Bother. If not for a regression in Java 9 and later, this + * would be a simple createXMLEventWriter(str). + * XXX This is not fully general, as str is known to be one + * of our native StAXResults, which (for now!) can only wrap + * a stream writer, never an event writer. + */ + XMLEventConsumer xec = + new XMLEventToStreamConsumer(str.getXMLStreamWriter()); + + while ( xer.hasNext() ) + xec.add(xer.nextEvent()); + + xer.close(); + } + catch ( XMLStreamException e ) + { + throw new SQLDataException(e.getMessage(), "22000", e); + } + return m_tgt; + } + } + + static class DOM extends XMLCopier + { + private DOMSource m_source; + + DOM(Writable tgt, DOMSource src) + { + super(tgt); + m_source = src; + } + + @Override + Writable finish() throws IOException, SQLException + { + DOMResult dr = m_tgt.setResult( + m_tgt.backingIfNotFreed(), DOMResult.class); + dr.setNode(m_source.getNode()); + return m_tgt; + } + } + } + + /** + * Implements setters for the later JAXP security properties, which use the + * same names for SAX, StAX, and DOM, so the individual setters can all be + * here with only the {@code setFirstSupportedProperty} method abstract. + */ + abstract static class + AdjustingJAXPParser> + implements Adjusting.XML.Parsing + { + static final Logger s_logger = + Logger.getLogger("org.postgresql.pljava.jdbc"); + + private static final String JDK17 = "jdk.xml."; + private static final String LIMIT = + "http://www.oracle.com/xml/jaxp/properties/"; // "legacy" since 17 + protected static final String DTDSUPPORT = "jdk.xml.dtd.support"; + protected static final String ALLOW = "allow"; + protected static final String IGNORE = "ignore"; + protected static final String DENY = "deny"; + + private Exception m_signaling; + private Exception m_quiet; + + protected void addSignaling(Exception e) + { + if ( null == e ) + return; + if ( null == m_signaling ) + m_signaling = e; + else + m_signaling.addSuppressed(e); + } + + protected void addQuiet(Exception e) + { + if ( null == e ) + return; + if ( null == m_quiet ) + m_quiet = e; + else + m_quiet.addSuppressed(e); + } + + protected boolean anySignaling() + { + return null != m_signaling; + } + + /** + * Returns whatever is on the signaling list, while logging (at + * {@code WARNING} level) whatever is on the quiet list. + *

      + * Both lists are left cleared. + * @return the head exception on the signaling list, or null if none + */ + protected Exception exceptions() + { + Exception e = m_quiet; + m_quiet = null; + if ( null != e ) + s_logger.log(WARNING, + "some XML processing limits were not successfully adjusted", + e); + e = m_signaling; + m_signaling = null; + return e; + } + + /** + * Common factor of subclass {@link #lax(boolean) lax(boolean)} + * instance methods. + *

      + * The work is done here, but the instance methods are implemented + * per-subclass to avoid unchecked casting of 'this'. + */ + protected static void lax(AdjustingJAXPParser o, boolean discard) + { + if ( null != o.m_quiet ) + { + if ( ! discard ) + o.addSignaling(o.m_quiet); + o.m_quiet = null; + } + } + + @Override + public T defaults() + { + return allowDTD(false).externalGeneralEntities(false) + .externalParameterEntities(false).loadExternalDTD(false) + .xIncludeAware(false).expandEntityReferences(false); + } + + @Override + public T ignoreDTD() + { + return setFirstSupportedProperty(IGNORE, DTDSUPPORT); + } + + @Override + public T elementAttributeLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "elementAttributeLimit", + LIMIT + "elementAttributeLimit"); + } + + @Override + public T entityExpansionLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "entityExpansionLimit", + LIMIT + "entityExpansionLimit"); + } + + @Override + public T entityReplacementLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "entityReplacementLimit", + LIMIT + "entityReplacementLimit"); + } + + @Override + public T maxElementDepth(int depth) + { + return setFirstSupportedProperty(depth, + JDK17 + "maxElementDepth", + LIMIT + "maxElementDepth"); + } + + @Override + public T maxGeneralEntitySizeLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "maxGeneralEntitySizeLimit", + LIMIT + "maxGeneralEntitySizeLimit"); + } + + @Override + public T maxParameterEntitySizeLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "maxParameterEntitySizeLimit", + LIMIT + "maxParameterEntitySizeLimit"); + } + + @Override + public T maxXMLNameLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "maxXMLNameLimit", + LIMIT + "maxXMLNameLimit"); + } + + @Override + public T totalEntitySizeLimit(int limit) + { + return setFirstSupportedProperty(limit, + JDK17 + "totalEntitySizeLimit", + LIMIT + "totalEntitySizeLimit"); + } + + @Override + public T accessExternalDTD(String protocols) + { + return setFirstSupportedProperty(protocols, ACCESS_EXTERNAL_DTD); + } + + @Override + public T accessExternalSchema(String protocols) + { + return setFirstSupportedProperty(protocols, ACCESS_EXTERNAL_SCHEMA); + } + + @Override + public T entityResolver(EntityResolver resolver) + { + throw new UnsupportedOperationException( + "A SAX EntityResolver cannot be set on a " + + getClass().getCanonicalName()); + } + + @Override + public T schema(Schema schema) + { + throw new UnsupportedOperationException( + "A Schema cannot be set on a " + + getClass().getCanonicalName()); + } + } + + /** + * Extends {@code AdjustingJAXPParser} with some of the older adjustments + * that can be made the same way for SAX and DOM (StAX should not extend + * this class, but just implement the adjustments its own different way). + */ + abstract static class SAXDOMCommon> + extends AdjustingJAXPParser + { + protected abstract Exception tryFirstSupportedFeature( + Exception caught, boolean value, String... names); + + protected abstract Exception tryFirstSupportedProperty( + Exception caught, Object value, String... names); + + protected abstract T self(); + + @Override + public T allowDTD(boolean v) { + Exception caught = + tryFirstSupportedProperty(null, v ? ALLOW : DENY, DTDSUPPORT); + + if ( null == caught ) + return self(); + + caught = tryFirstSupportedFeature(caught, !v, + "http://apache.org/xml/features/disallow-doctype-decl", + "http://xerces.apache.org/xerces2-j/features.html" + + "#disallow-doctype-decl"); + + addQuiet(caught); + return self(); + } + + @Override + public T externalGeneralEntities(boolean v) + { + return setFirstSupportedFeature( v, + "http://xml.org/sax/features/external-general-entities", + "http://xerces.apache.org/xerces2-j/features.html" + + "#external-general-entities", + "http://xerces.apache.org/xerces-j/features.html" + + "#external-general-entities"); + } + + @Override + public T externalParameterEntities(boolean v) + { + return setFirstSupportedFeature( v, + "http://xml.org/sax/features/external-parameter-entities", + "http://xerces.apache.org/xerces2-j/features.html" + + "#external-parameter-entities", + "http://xerces.apache.org/xerces-j/features.html" + + "#external-parameter-entities"); + } + + @Override + public T loadExternalDTD(boolean v) + { + return setFirstSupportedFeature( v, + "http://apache.org/xml/features/" + + "nonvalidating/load-external-dtd"); + } + } + + /** + * Error handler for SAX/DOM parsing that treats both "error" and + * "fatal error" as exception-worthy, and logs warnings at {@code WARNING} + * level. + */ + static class SAXDOMErrorHandler implements ErrorHandler + { + private static final SAXDOMErrorHandler s_nonWrappedInstance = + new SAXDOMErrorHandler(false); + /* + * Issue #312: localized error messages from the schema validator + * don't always use the same punctuation around the offending + * element name! Simplest to look for the element name (it's distinctive + * enough) and not for any punctuation--and check that only when + * wrapping is being applied, and then only once (the wrapping element, + * of course, will be first), and so avoid suppressing a later error by + * mistake should a document somehow happen to contain an element with + * the same name used here. + */ + static final Pattern s_wrapelement = Pattern.compile( + "^cvc-elt\\.1(?:\\.a)?+:.*pljava-content-wrap"); + private int m_wrapCount; + + static SAXDOMErrorHandler instance(boolean wrapped) + { + return + wrapped ? new SAXDOMErrorHandler(true) : s_nonWrappedInstance; + } + + private SAXDOMErrorHandler(boolean wrap) + { + m_wrapCount = wrap ? 1 : 0; + } + + @Override + public void error(SAXParseException exception) throws SAXException + { + /* + * When validating with XML Schema against a value being parsed as + * CONTENT, the 'invisible' pljava-content-wrap element may produce + * an error. This hack keeps it invisible; however, the validator is + * then more lenient if the 'visible' top-level element isn't found, + * and simply validates the elements that are declared in the schema + * wherever it happens to find them. + * + * The check is only applied when the input has been wrapped, and + * then only once (after all, the wrapping element will be the first + * to be seen). The "only once" part may be futile inasmuch as the + * validator switches to the lenient mode described above and may + * not even report subsequent mismatched elements. But the check + * still needs to be conditional (we do know whether we applied a + * wrapper or not), so the condition may as well be the right one. + */ + if ( 0 == m_wrapCount ) + throw exception; + Matcher m = s_wrapelement.matcher(exception.getMessage()); + if ( ! m.lookingAt() ) + throw exception; + -- m_wrapCount; + } + + @Override + public void fatalError(SAXParseException exception) throws SAXException + { + throw exception; + } + + @Override + public void warning(SAXParseException exception) throws SAXException + { + AdjustingJAXPParser.s_logger + .log(WARNING, exception.getMessage(), exception); + } + } + + static class AdjustingSourceResult + extends + AdjustingJAXPParser> + implements Adjusting.XML.SourceResult + { + private Writable m_result; + private Charset m_serverCS; + private XMLCopier m_copier; + + AdjustingSourceResult(Writable result, Charset serverCS) + { + m_result = result; + m_serverCS = serverCS; + } + + @Override + public AdjustingSourceResult set(Source source) throws SQLException + { + if ( source instanceof Adjusting.XML.Source ) + source = ((Adjusting.XML.Source)source).get(); + + if ( source instanceof StreamSource ) + return set((StreamSource)source); + + if ( source instanceof SAXSource ) + return set((SAXSource)source); + + if ( source instanceof StAXSource ) + return set((StAXSource)source); + + if ( source instanceof DOMSource ) + return set((DOMSource)source); + + m_result.free(); + throw new SQLDataException( + "XML source class " + source.getClass().getName() + + " unsupported"); + } + + @Override + public AdjustingSourceResult set(StreamSource source) + throws SQLException + { + if ( null == m_result ) + throw new IllegalStateException( + "AdjustingSourceResult too late to set source"); + + /* + * Foreign implementation also gets its choice whether to supply + * an InputStream or a Reader. + */ + InputStream is = source.getInputStream(); + Reader r = source.getReader(); + DeclProbe probe = new DeclProbe(); + try + { + if ( null != is ) + { + int b; + while ( -1 != (b = is.read()) ) + if ( ! probe.take((byte)b) ) + break; + String probedEncoding = probe.queryEncoding(); + m_copier = XMLCopier + .copierFor(m_result, m_serverCS, probedEncoding) + .prepare(probe, is); + } + else if ( null != r ) + { + int b; + while ( -1 != (b = r.read()) ) + if ( ! probe.take((char)b) ) + break; + String probedEncoding = probe.queryEncoding(); + m_copier = XMLCopier + .copierFor(m_result, m_serverCS, probedEncoding) + .prepare(probe, r); + } + else + throw new SQLDataException( + "Foreign SQLXML implementation has " + + "a broken StreamSource", "22000"); + } + catch ( IOException e ) + { + throw normalizedException(e); + } + finally + { + if ( null == m_copier ) + { + m_result.free(); + m_result = null; + } + } + return this; + } + + @Override + public AdjustingSourceResult set(SAXSource source) + throws SQLException + { + if ( null == m_result ) + throw new IllegalStateException( + "AdjustingSourceResult too late to set source"); + + if ( null != source.getXMLReader() ) + m_copier = new XMLCopier.SAX(m_result, source); + else + { + try + { + m_copier = new XMLCopier.SAX.Parsing(m_result, source); + } + catch ( SAXException e ) + { + throw normalizedException(e); + } + } + + return this; + } + + @Override + public AdjustingSourceResult set(StAXSource source) + throws SQLException + { + if ( null == m_result ) + throw new IllegalStateException( + "AdjustingSourceResult too late to set source"); + + m_copier = new XMLCopier.StAX(m_result, source); + return this; + } + + @Override + public AdjustingSourceResult set(DOMSource source) + throws SQLException + { + if ( null == m_result ) + throw new IllegalStateException( + "AdjustingSourceResult too late to set source"); + + m_copier = new XMLCopier.DOM(m_result, source); + return this; + } + + @Override + public AdjustingSourceResult set(String source) + throws SQLException + { + if ( null == m_result ) + throw new IllegalStateException( + "AdjustingSourceResult too late to set source"); + + return set(new StreamSource(new StringReader(source))); + } + + @Override + public AdjustingSourceResult get() throws SQLException + { + return this; // for this class, get is a noop + } + + @Override + public AdjustingSourceResult lax(boolean discard) + { + theAdjustable().lax(discard); + return this; + } + + @Override + public SQLXML getSQLXML() throws SQLException + { + if ( null == m_result ) + throw new IllegalStateException( + "AdjustingSourceResult getSQLXML called more than once"); + if ( null == m_copier ) + throw new IllegalStateException( + "AdjustingSourceResult getSQLXML called before set"); + + // Exception handling/logging for adjustments will happen in + // theAdjustable().get(), during finish() here. + + Writable result = null; + try + { + result = m_copier.finish(); + } + catch ( IOException e ) + { + throw normalizedException(e); + } + finally + { + Writable r = m_result; + m_result = null; + m_serverCS = null; + m_copier = null; + if ( null == result ) + r.free(); + } + return result; + } + + @Override + public void setSystemId(String systemId) + { + throw new UnsupportedOperationException( + "SourceResult does not support setSystemId"); + } + + @Override + public String getSystemId() + { + throw new UnsupportedOperationException( + "SourceResult does not support getSystemId"); + } + + private Adjusting.XML.Source theAdjustable() + { + if ( null == m_copier ) + throw new IllegalStateException( + "AdjustingSourceResult too early or late to adjust"); + return m_copier.getAdjustable(); + } + + @Override + public AdjustingSourceResult allowDTD(boolean v) + { + theAdjustable().allowDTD(v); + return this; + } + + @Override + public AdjustingSourceResult externalGeneralEntities(boolean v) + { + theAdjustable().externalGeneralEntities(v); + return this; + } + + @Override + public AdjustingSourceResult externalParameterEntities(boolean v) + { + theAdjustable().externalParameterEntities(v); + return this; + } + + @Override + public AdjustingSourceResult loadExternalDTD(boolean v) + { + theAdjustable().loadExternalDTD(v); + return this; + } + + @Override + public AdjustingSourceResult xIncludeAware(boolean v) + { + theAdjustable().xIncludeAware(v); + return this; + } + + @Override + public AdjustingSourceResult expandEntityReferences(boolean v) + { + theAdjustable().expandEntityReferences(v); + return this; + } + + @Override + public AdjustingSourceResult setFirstSupportedFeature( + boolean value, String... names) + { + theAdjustable().setFirstSupportedFeature(value, names); + return this; + } + + @Override + public AdjustingSourceResult setFirstSupportedProperty( + Object value, String... names) + { + theAdjustable().setFirstSupportedProperty(value, names); + return this; + } + + @Override + public AdjustingSourceResult entityResolver(EntityResolver resolver) + { + theAdjustable().entityResolver(resolver); + return this; + } + + @Override + public AdjustingSourceResult schema(Schema schema) + { + theAdjustable().schema(schema); + return this; + } + } + + static class AdjustingStreamResult + extends AdjustingJAXPParser> + implements Adjusting.XML.StreamResult + { + private VarlenaWrapper.Output m_vwo; + private Charset m_serverCS; + private AdjustingSAXSource m_verifierSource; + private boolean m_preferWriter = false; + private boolean m_hasCalledDefaults; + + AdjustingStreamResult(VarlenaWrapper.Output vwo, Charset serverCS) + throws SQLException + { + m_vwo = vwo; + m_serverCS = serverCS; + try + { + /* + * When used as a verifier, an AdjustingSAXSource can be created + * with wrapping=false unconditionally, as it won't be using the + * result for anything and has no need to unwrap it. At verify + * time, the presence of wrapping still gets checked, if only to + * set up the ErrorHandler correctly in case schema validation + * has been requested. + */ + m_verifierSource = new AdjustingSAXSource(null, false); + } + catch ( SAXException e ) + { + throw normalizedException(e); + } + } + + @Override + public void setSystemId(String systemId) + { + throw new IllegalStateException( + "AdjustingStreamResult used before get()"); + } + + @Override + public String getSystemId() + { + throw new IllegalStateException( + "AdjustingStreamResult used before get()"); + } + + private AdjustingSAXSource theVerifierSource() + { + return theVerifierSource(true); + } + + private AdjustingSAXSource theVerifierSource(boolean afterDefaults) + { + if ( null == m_verifierSource ) + throw new IllegalStateException( + "AdjustingStreamResult too late to adjust after get()"); + + if ( afterDefaults && ! m_hasCalledDefaults ) + { + m_hasCalledDefaults = true; + m_verifierSource.defaults(); + /* Don't touch m_preferWriter here, only in real defaults() */ + } + + return m_verifierSource; + } + + @Override + public AdjustingStreamResult preferBinaryStream() + { + theVerifierSource(false); // shorthand error check + m_preferWriter = false; + return this; + } + + @Override + public AdjustingStreamResult preferCharacterStream() + { + theVerifierSource(false); // shorthand error check + m_preferWriter = true; + return this; + } + + @Override + public StreamResult get() throws SQLException + { + if ( null == m_verifierSource ) + throw new IllegalStateException( + "AdjustingStreamResult get() called more than once"); + + // Exception handling/logging for theVerifierSource happens here + XMLReader xr = theVerifierSource().get().getXMLReader(); + + OutputStream os; + + try + { + m_vwo.setVerifier(new Verifier(xr)); + os = new DeclCheckedOutputStream(m_vwo, m_serverCS); + } + catch ( IOException e ) + { + throw normalizedException(e); + } + + StreamResult sr; + + if ( m_preferWriter ) + sr = new StreamResult( + new OutputStreamWriter(os, m_serverCS.newEncoder())); + else + sr = new StreamResult(os); + + m_vwo = null; + m_verifierSource = null; + m_serverCS = null; + + return sr; + } + + @Override + public AdjustingStreamResult lax(boolean discard) + { + theVerifierSource().lax(discard); + return this; + } + + @Override + public AdjustingStreamResult allowDTD(boolean v) + { + theVerifierSource().allowDTD(v); + return this; + } + + @Override + public AdjustingStreamResult externalGeneralEntities(boolean v) + { + theVerifierSource().externalGeneralEntities(v); + return this; + } + + @Override + public AdjustingStreamResult externalParameterEntities(boolean v) + { + theVerifierSource().externalParameterEntities(v); + return this; + } + + @Override + public AdjustingStreamResult loadExternalDTD(boolean v) + { + theVerifierSource().loadExternalDTD(v); + return this; + } + + @Override + public AdjustingStreamResult xIncludeAware(boolean v) + { + theVerifierSource().xIncludeAware(v); + return this; + } + + @Override + public AdjustingStreamResult expandEntityReferences(boolean v) + { + theVerifierSource().expandEntityReferences(v); + return this; + } + + @Override + public AdjustingStreamResult setFirstSupportedFeature( + boolean value, String... names) + { + theVerifierSource().setFirstSupportedFeature(value, names); + return this; + } + + @Override + public AdjustingStreamResult defaults() + { + m_hasCalledDefaults = true; + theVerifierSource().defaults(); + return preferBinaryStream(); + } + + @Override + public AdjustingStreamResult setFirstSupportedProperty( + Object value, String... names) + { + theVerifierSource().setFirstSupportedProperty(value, names); + return this; + } + + @Override + public AdjustingStreamResult entityResolver(EntityResolver resolver) + { + theVerifierSource().entityResolver(resolver); + return this; + } + + @Override + public AdjustingStreamResult schema(Schema schema) + { + theVerifierSource(false).schema(schema); + return this; + } + } + + static class AdjustingSAXSource + extends SAXDOMCommon> + implements Adjusting.XML.SAXSource + { + private SAXParserFactory m_spf; + private XMLReader m_xr; + private InputSource m_is; + private boolean m_wrapped; + private boolean m_hasCalledDefaults; + + static class Dummy extends AdjustingSAXSource + { + static final Dummy INSTANCE = new Dummy(); + private Dummy() { } + + @Override + protected Exception tryFirstSupportedFeature( + Exception caught, boolean value, String... names) + { + return caught; + } + + @Override + protected Exception tryFirstSupportedProperty( + Exception caught, Object value, String... names) + { + return caught; + } + + @Override + public AdjustingSAXSource entityResolver(EntityResolver resolver) + { + return this; + } + + @Override + public AdjustingSAXSource schema(Schema schema) + { + return this; + } + } + + private AdjustingSAXSource() // only for Dummy + { + } + + AdjustingSAXSource(InputSource is, boolean wrapped) + throws SAXException + { + m_is = is; + m_wrapped = wrapped; + m_spf = SAXParserFactory.newDefaultInstance(); + m_spf.setNamespaceAware(true); + } + + AdjustingSAXSource(XMLReader xr, InputSource is) + throws SAXException + { + m_xr = xr; + m_is = is; + } + + @Override + public void setSystemId(String systemId) + { + throw new IllegalStateException( + "AdjustingSAXSource used before get()"); + } + + @Override + public String getSystemId() + { + throw new IllegalStateException( + "AdjustingSAXSource used before get()"); + } + + private SAXParserFactory theFactory() + { + if ( null == m_spf ) + throw new IllegalStateException( + "AdjustingSAXSource too late to set schema after " + + "other adjustments"); + return m_spf; + } + + private XMLReader theReader() + { + if ( anySignaling() ) + return null; + + if ( null != m_spf ) + { + try + { + m_xr = m_spf.newSAXParser().getXMLReader(); + } + catch ( SAXException | ParserConfigurationException e ) + { + addSignaling(e); + return null; + } + + m_spf = null; + if ( m_wrapped ) + m_xr = new SAXUnwrapFilter(m_xr); + + /* + * If this AdjustingSAXSource has been created for use as a + * verifier, it was passed false for m_wrapped unconditionally, + * which is mostly harmless, but may mean this is the wrong + * error handler, if schema validation has been requested. + * That's ok; the verifier checks for wrapping and will set the + * right error handler if need be. + */ + m_xr.setErrorHandler(SAXDOMErrorHandler.instance(m_wrapped)); + + if ( ! m_hasCalledDefaults ) + defaults(); + } + + if ( null == m_xr ) + throw new IllegalStateException( + "AdjustingSAXSource too late to adjust after get()"); + + return m_xr; + } + + @Override + public SAXSource get() throws SQLException + { + if ( null == m_xr && null == m_spf ) + throw new IllegalStateException( + "AdjustingSAXSource get() called more than once"); + + XMLReader xr = theReader(); + + Exception e = exceptions(); + if ( null != e ) + throw normalizedException(e); + + SAXSource ss = new SAXSource(xr, m_is); + m_xr = null; + m_is = null; + return ss; + } + + @Override + public AdjustingSAXSource lax(boolean discard) + { + lax(this, discard); + return this; + } + + @Override + public AdjustingSAXSource defaults() + { + m_hasCalledDefaults = true; + super.defaults(); + return this; + } + + @Override + public AdjustingSAXSource xIncludeAware(boolean v) + { + return setFirstSupportedFeature( v, + "http://apache.org/xml/features/xinclude"); + } + + @Override + public AdjustingSAXSource expandEntityReferences(boolean v) + { + // not a thing in SAX ? + return this; + } + + @Override + protected Exception tryFirstSupportedFeature( + Exception caught, boolean value, String... names) + { + XMLReader r = theReader(); + if ( null == r ) // pending exception, nothing to be done + return caught; + + return setFirstSupported(r::setFeature, value, + List.of(SAXNotRecognizedException.class, + SAXNotSupportedException.class), + caught, this::addSignaling, names); + } + + @Override + protected Exception tryFirstSupportedProperty( + Exception caught, Object value, String... names) + { + XMLReader r = theReader(); + if ( null == r ) // pending exception, nothing to be done + return caught; + + return setFirstSupported(r::setProperty, value, + List.of(SAXNotRecognizedException.class, + SAXNotSupportedException.class), + caught, this::addSignaling, names); + } + + @Override + protected AdjustingSAXSource self() + { + return this; + } + + @Override + public AdjustingSAXSource setFirstSupportedFeature( + boolean value, String... names) + { + addQuiet(tryFirstSupportedFeature(null, value, names)); + return this; + } + + @Override + public AdjustingSAXSource setFirstSupportedProperty( + Object value, String... names) + { + addQuiet(tryFirstSupportedProperty(null, value, names)); + return this; + } + + @Override + public AdjustingSAXSource entityResolver(EntityResolver resolver) + { + XMLReader r = theReader(); + if ( null != r ) + r.setEntityResolver(resolver); + return this; + } + + @Override + public AdjustingSAXSource schema(Schema schema) + { + theFactory().setSchema(schema); + return this; + } + } + + /* + * For the moment, an AdjustingSAXResult doesn't adjust anything at all, + * as a Verifier isn't used when writing through SAX. But it has to be here, + * just because if the client asks only for Adjusting.XML.Result, meaning we + * get to pick, SAX is the flavor we pick. + */ + static class AdjustingSAXResult + extends SAXDOMCommon> + implements Adjusting.XML.SAXResult + { + private SAXResult m_sr; + + AdjustingSAXResult(SAXResult sr) + { + m_sr = sr; + } + + @Override + public void setSystemId(String systemId) + { + throw new IllegalStateException( + "AdjustingSAXResult used before get()"); + } + + @Override + public String getSystemId() + { + throw new IllegalStateException( + "AdjustingSAXResult used before get()"); + } + + private AdjustingSAXResult checkedNoOp() + { + if ( null == m_sr ) + throw new IllegalStateException( + "AdjustingSAXResult too late to adjust after get()"); + return this; + } + + @Override + public SAXResult get() throws SQLException + { + if ( null == m_sr ) + throw new IllegalStateException( + "AdjustingSAXResult get() called more than once"); + + SAXResult sr = m_sr; + m_sr = null; + return sr; + } + + @Override + public AdjustingSAXResult lax(boolean discard) + { + lax(this, discard); + return this; + } + + @Override + public AdjustingSAXResult xIncludeAware(boolean v) + { + return checkedNoOp(); + } + + @Override + public AdjustingSAXResult expandEntityReferences(boolean v) + { + return checkedNoOp(); + } + + @Override + protected Exception tryFirstSupportedFeature( + Exception caught, boolean value, String... names) + { + checkedNoOp(); + return null; + } + + @Override + protected Exception tryFirstSupportedProperty( + Exception caught, Object value, String... names) + { + checkedNoOp(); + return null; + } + + @Override + protected AdjustingSAXResult self() + { + return this; + } + + @Override + public AdjustingSAXResult setFirstSupportedFeature( + boolean value, String... names) + { + return checkedNoOp(); + } + + @Override + public AdjustingSAXResult setFirstSupportedProperty( + Object value, String... names) + { + return checkedNoOp(); + } + + @Override + public AdjustingSAXResult entityResolver(EntityResolver resolver) + { + return checkedNoOp(); + } + + @Override + public AdjustingSAXResult schema(Schema schema) + { + return checkedNoOp(); + } + } + + static class AdjustingStAXSource + extends AdjustingJAXPParser> + implements Adjusting.XML.StAXSource + { + private XMLInputFactory m_xif; + private InputStream m_is; + private Charset m_serverCS; + private boolean m_wrapped; + + AdjustingStAXSource(InputStream is, Charset serverCS, boolean wrapped) + throws XMLStreamException + { + m_xif = XMLInputFactory.newDefaultFactory(); + m_xif.setProperty(m_xif.IS_NAMESPACE_AWARE, true); + m_is = is; + m_serverCS = serverCS; + m_wrapped = wrapped; + } + + @Override + public void setSystemId(String systemId) + { + throw new IllegalStateException( + "AdjustingStAXSource used before get()"); + } + + @Override + public String getSystemId() + { + throw new IllegalStateException( + "AdjustingStAXSource used before get()"); + } + + private XMLInputFactory theFactory() + { + if ( null == m_xif ) + throw new IllegalStateException( + "AdjustingStAXSource too late to adjust after get()"); + return m_xif; + } + + @Override + public StAXSource get() throws SQLException + { + if ( null == m_xif ) + throw new IllegalStateException( + "AdjustingStAXSource get() called more than once"); + + StAXSource ss = null; + try + { + XMLStreamReader xsr = m_xif.createXMLStreamReader( + m_is, m_serverCS.name()); + if ( m_wrapped ) + xsr = new StAXUnwrapFilter(xsr); + m_xif = null; // too late for any more adjustments + ss = new StAXSource(xsr); + } + catch ( Exception e ) + { + addSignaling(e); + } + + Exception e = exceptions(); + if ( null != e ) + throw normalizedException(e); + + return ss; + } + + @Override + public AdjustingStAXSource lax(boolean discard) + { + lax(this, discard); + return this; + } + + @Override + public AdjustingStAXSource allowDTD(boolean v) { + Exception caught = + tryFirstSupported(null, v ? ALLOW : DENY, DTDSUPPORT); + + if ( null == caught ) + return this; + + caught = tryFirstSupported(caught, v, XMLInputFactory.SUPPORT_DTD); + + addQuiet(caught); + return this; + } + + @Override + public AdjustingStAXSource externalGeneralEntities(boolean v) + { + return setFirstSupportedFeature( v, + XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES); + } + + @Override + public AdjustingStAXSource externalParameterEntities(boolean v) + { + return this; + } + + @Override + public AdjustingStAXSource loadExternalDTD(boolean v) + { + return setFirstSupportedFeature( !v, + "http://java.sun.com/xml/stream/properties/" + + "ignore-external-dtd"); + } + + @Override + public AdjustingStAXSource xIncludeAware(boolean v) + { + return this; + } + + @Override + public AdjustingStAXSource expandEntityReferences(boolean v) + { + return setFirstSupportedFeature( v, + XMLInputFactory.IS_REPLACING_ENTITY_REFERENCES); + } + + private Exception tryFirstSupported( + Exception caught, Object value, String... names) + { + XMLInputFactory xif = theFactory(); + return setFirstSupported(xif::setProperty, value, + List.of(IllegalArgumentException.class), + caught, this::addSignaling, names); + } + + @Override + public AdjustingStAXSource setFirstSupportedFeature( + boolean value, String... names) + { + addQuiet(tryFirstSupported(null, value, names)); + return this; + } + + @Override + public AdjustingStAXSource setFirstSupportedProperty( + Object value, String... names) + { + addQuiet(tryFirstSupported(null, value, names)); + return this; + } + } + + static class AdjustingDOMSource + extends SAXDOMCommon> + implements Adjusting.XML.DOMSource + { + private DocumentBuilderFactory m_dbf; + private InputStream m_is; + private boolean m_wrapped; + private EntityResolver m_resolver; + + AdjustingDOMSource(InputStream is, boolean wrapped) + { + m_dbf = DocumentBuilderFactory.newDefaultInstance(); + m_dbf.setNamespaceAware(true); + m_is = is; + m_wrapped = wrapped; + } + + @Override + public void setSystemId(String systemId) + { + throw new IllegalStateException( + "AdjustingDOMSource used before get()"); + } + + @Override + public String getSystemId() + { + throw new IllegalStateException( + "AdjustingDOMSource used before get()"); + } + + private DocumentBuilderFactory theFactory() + { + if ( null == m_dbf ) + throw new IllegalStateException( + "AdjustingDOMSource too late to adjust after get()"); + return m_dbf; + } + + @Override + public DOMSource get() throws SQLException + { + if ( null == m_dbf ) + throw new IllegalStateException( + "AdjustingDOMSource get() called more than once"); + + DOMSource ds = null; + try + { + DocumentBuilder db = m_dbf.newDocumentBuilder(); + db.setErrorHandler(SAXDOMErrorHandler.instance(m_wrapped)); + if ( null != m_resolver ) + db.setEntityResolver(m_resolver); + ds = new DOMSource(db.parse(m_is)); + if ( m_wrapped ) + domUnwrap(ds); + m_dbf = null; + m_is = null; + } + catch ( Exception e ) + { + addSignaling(e); + } + + Exception e = exceptions(); + + if ( null == e ) + return ds; + + /* + * If changing these wrapping conventions, change them also in + * XMLCopier.saxCopy() + */ + if ( e instanceof SAXException ) + throw new SQLDataException(e.getMessage(), "22000", e); + + if ( e instanceof IOException ) + throw new SQLException(e.getMessage(), "58030", e); + + throw normalizedException(e); + } + + @Override + public AdjustingDOMSource lax(boolean discard) + { + lax(this, discard); + return this; + } + + @Override + public AdjustingDOMSource xIncludeAware(boolean v) + { + theFactory().setXIncludeAware(v); + return this; + } + + @Override + public AdjustingDOMSource expandEntityReferences(boolean v) + { + theFactory().setExpandEntityReferences(v); + return this; + } + + @Override + protected Exception tryFirstSupportedFeature( + Exception caught, boolean value, String... names) + { + DocumentBuilderFactory dbf = theFactory(); + return setFirstSupported(dbf::setFeature, value, + List.of(ParserConfigurationException.class), + caught, this::addSignaling, names); + } + + @Override + protected Exception tryFirstSupportedProperty( + Exception caught, Object value, String... names) + { + DocumentBuilderFactory dbf = theFactory(); + return setFirstSupported(dbf::setAttribute, value, + List.of(IllegalArgumentException.class), + caught, this::addSignaling, names); + } + + @Override + protected AdjustingDOMSource self() + { + return this; + } + + @Override + public AdjustingDOMSource setFirstSupportedFeature( + boolean value, String... names) + { + addQuiet(tryFirstSupportedFeature(null, value, names)); + return this; + } + + @Override + public AdjustingDOMSource setFirstSupportedProperty( + Object value, String... names) + { + addQuiet(tryFirstSupportedProperty(null, value, names)); + return this; + } + + @Override + public AdjustingDOMSource entityResolver(EntityResolver resolver) + { + m_resolver = resolver; + return this; + } + + @Override + public AdjustingDOMSource schema(Schema schema) + { + theFactory().setSchema(schema); + return this; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java index 2e7ad16d..2b5d62cf 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowReader.java @@ -1,17 +1,23 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden + * Copyright (c) 2004-2019 Tada AB and other contributors, as listed below. * Copyright (c) 2010, 2011 PostgreSQL Global Development Group * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; import java.sql.ResultSet; import java.sql.SQLException; -import org.postgresql.pljava.internal.Backend; +import static org.postgresql.pljava.internal.Backend.doInPG; +import org.postgresql.pljava.internal.DualState; import org.postgresql.pljava.internal.TupleDesc; /** @@ -24,34 +30,76 @@ public class SingleRowReader extends SingleRowResultSet { private final TupleDesc m_tupleDesc; - private final long m_pointer; + private final State m_state; - public SingleRowReader(long pointer, TupleDesc tupleDesc) - throws SQLException + private static class State + extends DualState.SingleGuardedLong { - m_pointer = pointer; - m_tupleDesc = tupleDesc; + private State( + DualState.Key cookie, SingleRowReader srr, long ro, long hth) + { + super(cookie, srr, ro, hth); + } + + /** + * Return the HeapTupleHeader pointer. + *

      + * This is a transitional implementation: ideally, each method requiring + * the native state would be moved to this class, and hold the pin for + * as long as the state is being manipulated. Simply returning the + * guarded value out from under the pin, as here, is not great practice, + * but as long as the value is only used in instance methods of + * SingleRowReader, or subclasses, or something with a strong reference + * to this SingleRowReader, and only on a thread for which + * {@code Backend.threadMayEnterPG()} is true, disaster will not strike. + * It can't go Java-unreachable while an instance method's on the call + * stack, and the {@code Invocation} marking this state's native scope + * can't be popped before return of any method using the value. + */ + private long getHeapTupleHeaderPtr() throws SQLException + { + pin(); + try + { + return guardedLong(); + } + finally + { + unpin(); + } + } } - public void close() + /** + * Construct a {@code SingleRowReader} from a {@code HeapTupleHeader} + * and a {@link TupleDesc TupleDesc}. + * @param cookie Capability obtained from native code to construct a + * {@code SingleRowReader} instance. + * @param resourceOwner Value identifying a scope in PostgreSQL during which + * the native state encapsulated here will be valid. + * @param hth Native pointer to a PG {@code HeapTupleHeader} + * @param tupleDesc A {@code TupleDesc}; the Java class this time. + */ + public SingleRowReader(DualState.Key cookie, long resourceOwner, long hth, + TupleDesc tupleDesc) + throws SQLException { + m_state = new State(cookie, this, resourceOwner, hth); + m_tupleDesc = tupleDesc; } - public void finalize() + @Override + public void close() { - synchronized(Backend.THREADLOCK) - { - _free(m_pointer); - } } - protected Object getObjectValue(int columnIndex) + @Override // defined in ObjectResultSet + protected Object getObjectValue(int columnIndex, Class type) throws SQLException { - synchronized(Backend.THREADLOCK) - { - return _getObject(m_pointer, m_tupleDesc.getNativePointer(), columnIndex); - } + return doInPG(() -> _getObject( + m_state.getHeapTupleHeaderPtr(), m_tupleDesc.getNativePointer(), + columnIndex, type)); } /** @@ -160,13 +208,13 @@ public boolean isClosed() // End of implementation of JDBC 4 methods. // ************************************************************ + @Override // defined in SingleRowResultSet protected final TupleDesc getTupleDesc() { return m_tupleDesc; } - protected native void _free(long pointer); - - private static native Object _getObject(long pointer, long tupleDescPointer, int index) + private static native Object _getObject( + long pointer, long tupleDescPointer, int index, Class type) throws SQLException; } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowResultSet.java index 042680f0..f23b3080 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowResultSet.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2004-2018 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -103,7 +108,7 @@ public boolean isAfterLast() throws SQLException } /** - * Will always return false since a SingleRowWriter + * Will always return false since a SingleRowResultSet * starts on the one and only row. */ public boolean isBeforeFirst() throws SQLException @@ -229,7 +234,7 @@ public void moveToCurrentRow() } /** - * This feature is not supported on a SingleRowWriter. + * This feature is not supported on a SingleRowResultSet. * @throws SQLException indicating that this feature is not supported. */ public void moveToInsertRow() @@ -281,8 +286,9 @@ public void updateObject(int columnIndex, Object x, int scale) // ************************************************************ /** - * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. Cursors - * are actually closed when a function returns to SQL. + * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. A single-row result + * set serves a special purpose in the call or return of a function, and is + * not guaranteed to be usable beyond that function's return. */ public int getHoldability() { diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowWriter.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowWriter.java index 810dc2e7..99f21341 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowWriter.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SingleRowWriter.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * Copyright (c) 2010, 2011 PostgreSQL Global Development Group * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -22,6 +27,10 @@ /** * A single row, updateable ResultSet, specially made for functions and * procedures that returns complex types or sets. + *

      + * A {@link TupleDesc} must be passed to the constructor. After values have + * been written, the native pointer to a formed {@link Tuple} can be retrieved + * using {@link #getTupleAndClear}. * * @author Thomas Hallgren */ @@ -31,6 +40,10 @@ public class SingleRowWriter extends SingleRowResultSet private final Object[] m_values; private Tuple m_tuple; + /** + * Construct a {@code SingleRowWriter} given a descriptor of the tuple + * structure it should produce. + */ public SingleRowWriter(TupleDesc tupleDesc) throws SQLException { @@ -38,7 +51,12 @@ public SingleRowWriter(TupleDesc tupleDesc) m_values = new Object[tupleDesc.size()]; } - protected Object getObjectValue(int columnIndex) + /** + * Returns the value most recently written in the current tuple at the + * specified index, or {@code null} if none has been written. + */ + @Override // defined in ObjectRresultSet + protected Object getObjectValue(int columnIndex, Class type) throws SQLException { if(columnIndex < 1) @@ -50,6 +68,7 @@ protected Object getObjectValue(int columnIndex) * Returns true if the row contains any non null * values since all values of the row are null initially. */ + @Override public boolean rowUpdated() throws SQLException { @@ -60,6 +79,7 @@ public boolean rowUpdated() return false; } + @Override public void updateObject(int columnIndex, Object x) throws SQLException { @@ -69,23 +89,25 @@ public void updateObject(int columnIndex, Object x) if(x == null) m_values[columnIndex-1] = x; - Class c = m_tupleDesc.getColumnClass(columnIndex); - if(!c.isInstance(x) + Class c = m_tupleDesc.getColumnClass(columnIndex); + TypeBridge.Holder xAlt = TypeBridge.wrap(x); + if(null == xAlt && !c.isInstance(x) && !(c == byte[].class && (x instanceof BlobValue))) { if(Number.class.isAssignableFrom(c)) - x = SPIConnection.basicNumericCoersion(c, x); + x = SPIConnection.basicNumericCoercion(c, x); else if(Time.class.isAssignableFrom(c) || Date.class.isAssignableFrom(c) || Timestamp.class.isAssignableFrom(c)) - x = SPIConnection.basicCalendricalCoersion(c, x, Calendar.getInstance()); + x = SPIConnection.basicCalendricalCoercion(c, x, Calendar.getInstance()); else - x = SPIConnection.basicCoersion(c, x); + x = SPIConnection.basicCoercion(c, x); } - m_values[columnIndex-1] = x; + m_values[columnIndex-1] = null == xAlt ? x : xAlt; } + @Override public void cancelRowUpdates() throws SQLException { @@ -95,6 +117,7 @@ public void cancelRowUpdates() /** * Cancels all changes but doesn't really close the set. */ + @Override public void close() throws SQLException { @@ -106,8 +129,8 @@ public void copyRowFrom(ResultSet rs) throws SQLException { int top = m_values.length; - for(int idx = 0; idx < top; ++idx) - m_values[idx] = rs.getObject(idx+1); + for(int idx = 1; idx <= top; ++idx) + updateObject(idx, rs.getObject(idx)); } /** @@ -132,6 +155,7 @@ public long getTupleAndClear() return m_tuple.getNativePointer(); } + @Override // defined in SingleRowResultSet protected final TupleDesc getTupleDesc() { return m_tupleDesc; @@ -141,21 +165,13 @@ protected final TupleDesc getTupleDesc() // Implementation of JDBC 4 methods. // ************************************************************ + @Override public boolean isClosed() throws SQLException { return m_tuple == null; } - /** - * Returns {@link ResultSet#CLOSE_CURSORS_AT_COMMIT}. Cursors - * are actually closed when a function returns to SQL. - */ - public int getHoldability() - { - return ResultSet.CLOSE_CURSORS_AT_COMMIT; - } - // ************************************************************ // End of implementation of JDBC 4 methods. // ************************************************************ diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/StatementClosedException.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/StatementClosedException.java index 3f532de6..300c78ba 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/StatementClosedException.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/StatementClosedException.java @@ -1,14 +1,21 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren */ package org.postgresql.pljava.jdbc; import java.sql.SQLException; /** + * An {@code SQLException} specific to the case of attempted use of a + * {@code Statement} that has been closed. * @author Thomas Hallgren */ public class StatementClosedException extends SQLException diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSet.java index 60ea3409..ed608594 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSet.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Filip Hrbek + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -13,27 +19,38 @@ /** * A Synthetic ResultSet that provides direct access to data stored - * in a {@link java.util.ArrayList}. This kind of ResultSet has nothing + * in a {@link java.util.ArrayList}; chiefly used to return tabular information + * from {@code ...MetaData} objects. This kind of ResultSet has nothing * common with any statement. * * @author Filip Hrbek */ public class SyntheticResultSet extends ResultSetBase { - private final ResultSetField[] m_fields; - private final ArrayList m_tuples; - private final HashMap m_fieldIndexes; + private final ResultSetField[] m_fields; + private final ArrayList m_tuples; + private final HashMap m_fieldIndexes; - SyntheticResultSet(ResultSetField[] fields, ArrayList tuples) + /** + * Construct a {@code SyntheticResultSet} whose column types are described + * by an array of {@code ResultSetField} instances, and whose rows are + * supplied as an {@code ArrayList} whose elements are themselves arrays of + * {@code Object}. + * @throws SQLException if a non-null reference at index j in any + * 'row' array is an instance of a class that does not satisfy the + * {@link ResultSetField#canContain canContain} method of the + * {@code ResultSetField} instance at index j. + */ + SyntheticResultSet(ResultSetField[] fields, ArrayList tuples) throws SQLException { super(tuples.size()); m_fields = fields; m_tuples = tuples; - m_fieldIndexes = new HashMap(); + m_fieldIndexes = new HashMap<>(); int i = m_fields.length; while(--i >= 0) - m_fieldIndexes.put(m_fields[i].getColumnLabel(), new Integer(i+1)); + m_fieldIndexes.put(m_fields[i].getColumnLabel(), i+1); Object[][] tupleTest = (Object[][]) m_tuples.toArray(new Object[0][]); Object value; @@ -55,25 +72,35 @@ public class SyntheticResultSet extends ResultSetBase } } - public void close() + @Override + public void close() throws SQLException { m_tuples.clear(); super.close(); } + @Override public int findColumn(String columnName) throws SQLException { - Integer idx = (Integer)m_fieldIndexes.get(columnName.toUpperCase()); + Integer idx = m_fieldIndexes.get(columnName.toUpperCase()); if(idx != null) { - return idx.intValue(); + return idx; } throw new SQLException("No such field: '" + columnName + "'"); } - protected Object getObjectValue(int columnIndex) + /** + * Returns exactly the object that was supplied at {@code columnIndex} + * (less one) in the current row. + *

      + * Ignores the {@code type} argument and returns whatever object is there. + * If it is not what the caller needed, let the caller complain. + */ + @Override // defined in ObjectResultSet + protected Object getObjectValue(int columnIndex, Class type) throws SQLException { return getCurrentRow()[columnIndex-1]; @@ -85,14 +112,16 @@ protected final Object[] getCurrentRow() int row = this.getRow(); if(row < 1 || row > m_tuples.size()) throw new SQLException("ResultSet is not positioned on a valid row"); - return (Object[])m_tuples.get(row-1); + return m_tuples.get(row-1); } + @Override public boolean isLast() throws SQLException { return this.getRow() == m_tuples.size(); } + @Override public boolean next() throws SQLException { int row = this.getRow(); @@ -104,6 +133,11 @@ public boolean next() throws SQLException return false; } + /** + * Returns metadata describing this {@code SyntheticResultSet}, based on the + * {@link ResultSetField ResultSetField}s supplied to the constructor. + */ + @Override public ResultSetMetaData getMetaData() throws SQLException { diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSetMetaData.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSetMetaData.java index e941fd29..e85e9f54 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSetMetaData.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/SyntheticResultSetMetaData.java @@ -1,16 +1,20 @@ /* - * Copyright (c) 2005, 2006 TADA AB - Taby Sweden - * Copyright (c) 2005, 2010, 2011 PostgreSQL Global Development Group + * Copyright (c) 2005-2018 Tada AB and other contributors, as listed below. * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Filip Hrbek + * PostgreSQL Global Development Group + * Chapman Flack */ package org.postgresql.pljava.jdbc; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; import org.postgresql.pljava.internal.Oid; @@ -132,31 +136,4 @@ protected final int getFieldLength(int column) throws SQLException { return m_fields[column-1].getLength(); } - - // ************************************************************ - // Non-implementation of JDBC 4 methods. - // ************************************************************ - - public boolean isWrapperFor(Class iface) - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".isWrapperFor( Class ) not implemented yet.", - "0A000" ); - } - - public T unwrap(Class iface) - throws SQLException - { - throw new SQLFeatureNotSupportedException - ( this.getClass() - + ".unwrapClass( Class ) not implemented yet.", - "0A000" ); - } - - // ************************************************************ - // End of non-implementation of JDBC 4 methods. - // ************************************************************ - } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/TriggerResultSet.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/TriggerResultSet.java index 29a0ee55..31bd4ec2 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/TriggerResultSet.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/TriggerResultSet.java @@ -1,10 +1,15 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. * Copyright (c) 2010, 2011 PostgreSQL Global Development Group * - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://wiki.tada.se/index.php?title=PLJava_License + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -23,7 +28,7 @@ */ public class TriggerResultSet extends SingleRowResultSet { - private ArrayList m_tupleChanges; + private ArrayList m_tupleChanges; private final TupleDesc m_tupleDesc; private final Tuple m_tuple; private final boolean m_readOnly; @@ -39,6 +44,7 @@ public TriggerResultSet(TupleDesc tupleDesc, Tuple tuple, boolean readOnly) /** * Cancel all changes made to the Tuple. */ + @Override public void cancelRowUpdates() throws SQLException { @@ -48,6 +54,7 @@ public void cancelRowUpdates() /** * Cancels all changes but doesn't really close the set. */ + @Override public void close() throws SQLException { @@ -58,6 +65,7 @@ public void close() * Returns the concurrency for this ResultSet. * @see java.sql.ResultSet#getConcurrency */ + @Override public int getConcurrency() throws SQLException { return m_readOnly ? CONCUR_READ_ONLY : CONCUR_UPDATABLE; @@ -66,6 +74,7 @@ public int getConcurrency() throws SQLException /** * Returns true if this row has been updated. */ + @Override public boolean rowUpdated() throws SQLException { @@ -75,6 +84,7 @@ public boolean rowUpdated() /** * Store this change for later use */ + @Override public void updateObject(int columnIndex, Object x) throws SQLException { @@ -82,24 +92,26 @@ public void updateObject(int columnIndex, Object x) throw new UnsupportedFeatureException("ResultSet is read-only"); if(m_tupleChanges == null) - m_tupleChanges = new ArrayList(); + m_tupleChanges = new ArrayList<>(); - m_tupleChanges.add(new Integer(columnIndex)); + m_tupleChanges.add(columnIndex); m_tupleChanges.add(x); } /** - * Return a 2 element array describing the changes that has been made to - * the contained Tuple. The first element is an int[] containing - * the index of each changed value. The second element is an Object[] - * with containing the corresponding values. + * Return a 3 element array describing the changes that have been made to + * the contained Tuple. The first element the original Tuple, the second + * an {@code int[]} containing + * the index of each changed value, and the third an {@code Object[]} + * containing the corresponding values. * - * @return The 2 element array or null if no change has been made. + * @return The 3 element array or null if no change has + * been made. */ public Object[] getChangeIndexesAndValues() { - ArrayList changes = m_tupleChanges; + ArrayList changes = m_tupleChanges; if(changes == null) return null; @@ -114,12 +126,24 @@ public Object[] getChangeIndexesAndValues() for(int idx = 0; idx < top; ++idx) { indexes[idx] = ((Integer)changes.get(vIdx++)).intValue(); - values[idx] = changes.get(vIdx++); + Object v = changes.get(vIdx++); + TypeBridge.Holder vAlt = TypeBridge.wrap(v); + values[idx] = null == vAlt ? v : vAlt; } return new Object[] { m_tuple, indexes, values }; } - protected Object getObjectValue(int columnIndex) + /** + * If the value has not been changed, forwards to + * {@link Tuple#getObject(TupleDesc,int,Class) Tuple.getObject}, with the + * usual behavior for type coercion; if it has been changed, returns the + * exact object that was supplied with the change. + *

      + * When the caller is the JDBC 4.1 {@link #getObject(int,Class)}, the caller + * will check and complain if the returned object is not of the right class. + */ + @Override // defined in ObjectResultSet + protected Object getObjectValue(int columnIndex, Class type) throws SQLException { // Check if this value has been changed. @@ -132,9 +156,10 @@ protected Object getObjectValue(int columnIndex) if(columnIndex == ((Integer)changes.get(idx)).intValue()) return changes.get(idx + 1); } - return m_tuple.getObject(this.getTupleDesc(), columnIndex); + return m_tuple.getObject(this.getTupleDesc(), columnIndex, type); } + @Override // defined in SingleRowResultSet protected final TupleDesc getTupleDesc() { return m_tupleDesc; @@ -146,6 +171,7 @@ protected final TupleDesc getTupleDesc() // ************************************************************ + @Override public boolean isClosed() throws SQLException { diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeBridge.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeBridge.java new file mode 100644 index 00000000..0b1aa03a --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeBridge.java @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2018-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.jdbc; + +import static java.util.Collections.addAll; +import java.util.List; +import java.util.LinkedList; + +/** + * Encapsulate some information about Java object classes and their possible + * mappings to PostgreSQL types. + *

      + * This may be a temporary class that goes away entirely in a future major + * release of PL/Java that revamps how type mappings are determined. Or, it may + * evolve and take on greater responsibility in a revamped scheme: type mapping + * information is, at present, diffused and duplicated a lot of places in + * PL/Java, and bringing it into one place would not be a bad thing. + *

      + * For now, in the 1.5.x series, this is a simple stopgap so that the few places + * in PL/Java where an object type can be passed to PostgreSQL (SingleRowWriter, + * TriggerResultSet, SQLOutputToTuple, PreparedStatement) are able to pass an + * object that isn't of the class expected by default, and have the right native + * conversion get selected. All of those sites currently work by some variant of + * putting supplied objects into an Object array or list later passed to the + * native code, and when an object will not be of the expected class, what is + * stored in the array should be a TypeBridge.Holder for it. + */ +public abstract class TypeBridge +{ + /** + * Canonical name of the Java class or interface that this TypeBridge + * 'captures'. + *

      + * Held as a string so that the class does not need to be loaded for a + * TypeBridge to be made for it. There can be TypeBridges for classes that + * not all supported JRE versions provide. + */ + protected final String m_canonName; + + /** + * Oid of the PostgreSQL type to be associated by default with this Java + * class or interface. + *

      + * Stored as a simple int here, not a PL/Java Oid object, which I am tempted + * to deprecate. + */ + protected final int m_defaultOid; + + /** + * If the Java class associated with the TypeBridge is loaded and + * available, it can be cached here. + *

      + * That will always be the case after a {@link #captures} method has + * returned {@code true}. + */ + protected Class m_cachedClass; + + @SuppressWarnings("unchecked") + protected void setCachedClass(Class cls) + { + m_cachedClass = (Class)cls; + } + + /** + * List of TypeBridges to check, in order, for one that 'captures' a given + * class. + *

      + * This list is populated as TypeBridges are constructed, and whatever code + * calls the factory methods must take responsibility for the order of the + * list, by not constructing one TypeBridge earlier than another one that it + * would capture. + *

      + * This can't be checked automatically because the classes in question may + * not yet be loaded, or even available. + */ + private static List> m_candidates = new LinkedList<>(); + + /** + * Return an object wrapped, if it is of any type captured by a known + * TypeBridge. + * @param o An object, representing a value to be presented to PostgreSQL. + * @return A Holder wrapping o, or null if no known TypeBridge captures the + * type of o, or o itself is null. + */ + public static TypeBridge.Holder wrap(U o) + { + if ( null == o ) + return null; + Class c = o.getClass(); + for ( TypeBridge tb : m_candidates ) + if ( tb.captures(c) ) + { + @SuppressWarnings("unchecked") + TypeBridge tbt = (TypeBridge)tb; + return tbt.new Holder(o); + } + if ( o instanceof TypeBridge.Holder ) + throw new IllegalArgumentException("Not valid as argument: " + + o.toString()); + return null; + } + + private TypeBridge(String cName, int dfltOid) + { + if ( null == cName ) + throw new NullPointerException("TypeBridge cName must be nonnull."); + m_canonName = cName; + m_defaultOid = dfltOid; + m_candidates.add(this); + } + + /* + * For now, anyway, these factory methods are private; only native code + * will be calling them. + */ + + /** + * Construct a TypeBridge given the canonical name of a Java type that need + * not be loaded, but is known to be a class (not an interface). + */ + private static TypeBridge ofClass(String cName, int dOid) + { + return new OfClass<>(cName, dOid); + } + + /** + * Construct a TypeBridge given the canonical name of a Java type that need + * not be loaded, but is known to be an interface (not a class). + */ + private static TypeBridge ofInterface(String cName, int dOid) + { + return new OfInterface<>(cName, dOid); + } + + /** + * Construct a TypeBridge directly from a Class object, when available. + */ + private static TypeBridge of(Class c, int dOid) + { + String cn = c.getCanonicalName(); + TypeBridge tb = + c.isInterface() ? ofInterface(cn, dOid) : ofClass(cn, dOid); + tb.m_cachedClass = c; + return tb; + } + + /** + * Determine whether this TypeBridge 'captures' a given Class. + *

      + * If the class this TypeBridge represents has already been loaded and is + * cached here, the test is a simple {@code isAssignableFrom}. Otherwise, + * the test is conducted by climbing the superclasses or superinterfaces, as + * appropriate, of the passed Class, comparing canonical names. If a match + * is found, the winning Class object is cached before returning + * {@code true}. + */ + public final boolean captures(Class c) + { + if ( null != m_cachedClass ) + return m_cachedClass.isAssignableFrom(c); + return virtuallyCaptures(c); + } + + /** + * Method the two subclasses implement to conduct the "Class-less" + * superclass or superinterface check, respectively. + */ + protected abstract boolean virtuallyCaptures(Class c); + + /** + * TypeBridge subclass representing a class (not an interface). + *

      + * Its {@code virtuallyCaptures} method simply climbs the superclass chain. + */ + final static class OfClass extends TypeBridge + { + private OfClass(String cn, int oid) { super(cn, oid); } + + @Override + protected boolean virtuallyCaptures(Class c) + { + for ( ; null != c ; c = c.getSuperclass() ) + { + if ( ! m_canonName.equals(c.getCanonicalName()) ) + continue; + setCachedClass(c); + return true; + } + return false; + } + } + + /** + * TypeBridge subclass representing an interface (not a class). + *

      + * Its {@code virtuallyCaptures} method climbs the superinterfaces, + * breadth first. + */ + final static class OfInterface extends TypeBridge + { + private OfInterface(String cn, int oid) { super(cn, oid); } + + @Override + protected boolean virtuallyCaptures(Class c) + { + List> q = new LinkedList<>(); + q.add(c); + + while ( 0 < q.size() ) + { + c = q.remove(0); + + if ( ! c.isInterface() ) + { + addAll(q, c.getInterfaces()); + c = c.getSuperclass(); + if ( null != c ) + q.add(c); + continue; + } + + if ( m_canonName.equals(c.getCanonicalName()) ) + { + setCachedClass(c); + return true; + } + addAll(q, c.getInterfaces()); + } + return false; + } + } + + /** + * Class that holds an object reference being passed from Java to PG, when + * the object is of one of the known classes that were not accepted by + * PL/Java's JDBC driver before PL/Java 1.5.1. + *

      + * When a native-code Object-to-Datum coercer encounters a Holder instead of + * an object of the normally-expected class for the PostgreSQL type, it can + * retrieve the class, classname, default PG type oid, and the payload + * object itself, from the Holder, and obtain and apply a different coercer + * appropriate to the class. + */ + public final class Holder + { + private final S m_payload; + + private Holder(S o) + { + m_payload = o; + } + + public Class bridgedClass() + { + return m_cachedClass; + } + + public String className() + { + return m_canonName; + } + + public S payload() + { + return m_payload; + } + + public int defaultOid() + { + return m_defaultOid; + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeOid.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeOid.java index 12f86b88..9112bd8b 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeOid.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/TypeOid.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2021 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.jdbc; @@ -12,22 +18,62 @@ * Provides constants for well-known backend OIDs for the types we commonly * use. */ -public class TypeOid { - public static final Oid INVALID = new Oid(0); - public static final Oid INT2 = new Oid(21); - public static final Oid INT4 = new Oid(23); - public static final Oid INT8 = new Oid(20); - public static final Oid TEXT = new Oid(25); - public static final Oid NUMERIC = new Oid(1700); - public static final Oid FLOAT4 = new Oid(700); - public static final Oid FLOAT8 = new Oid(701); - public static final Oid BOOL = new Oid(16); - public static final Oid DATE = new Oid(1082); - public static final Oid TIME = new Oid(1083); - public static final Oid TIMESTAMP = new Oid(1114); - public static final Oid TIMESTAMPTZ = new Oid(1184); - public static final Oid BYTEA = new Oid(17); - public static final Oid VARCHAR = new Oid(1043); - public static final Oid OID = new Oid(26); - public static final Oid BPCHAR = new Oid(1042); +public class TypeOid +{ + /* + * These constants (well, the Oid reference ones) have been here for ages, + * so some code auditing is needed to determine where they are used, before + * going a different direction with Oid. + */ + public static final int InvalidOid = 0; + public static final int INT2OID = 21; + public static final int INT4OID = 23; + public static final int INT8OID = 20; + public static final int TEXTOID = 25; + public static final int NUMERICOID = 1700; + public static final int FLOAT4OID = 700; + public static final int FLOAT8OID = 701; + public static final int BOOLOID = 16; + public static final int DATEOID = 1082; + public static final int TIMEOID = 1083; + public static final int TIMESTAMPOID = 1114; + public static final int TIMESTAMPTZOID = 1184; + public static final int BYTEAOID = 17; + public static final int VARCHAROID = 1043; + public static final int OIDOID = 26; + public static final int BPCHAROID = 1042; + + public static final Oid INVALID = new Oid(InvalidOid); + public static final Oid INT2 = new Oid(INT2OID); + public static final Oid INT4 = new Oid(INT4OID); + public static final Oid INT8 = new Oid(INT8OID); + public static final Oid TEXT = new Oid(TEXTOID); + public static final Oid NUMERIC = new Oid(NUMERICOID); + public static final Oid FLOAT4 = new Oid(FLOAT4OID); + public static final Oid FLOAT8 = new Oid(FLOAT8OID); + public static final Oid BOOL = new Oid(BOOLOID); + public static final Oid DATE = new Oid(DATEOID); + public static final Oid TIME = new Oid(TIMEOID); + public static final Oid TIMESTAMP = new Oid(TIMESTAMPOID); + public static final Oid TIMESTAMPTZ = new Oid(TIMESTAMPTZOID); + public static final Oid BYTEA = new Oid(BYTEAOID); + public static final Oid VARCHAR = new Oid(VARCHAROID); + public static final Oid OID = new Oid(OIDOID); + public static final Oid BPCHAR = new Oid(BPCHAROID); + + /* + * Added in 2019. The numeric constant will be used, but no need is foreseen + * for an Oid-reference constant. + */ + public static final int PG_NODE_TREEOID = 194; + /* + * Likewise in 2020. + */ + public static final int TRIGGEROID = 2279; + + /* + * Before Java 8 with the @Native annotation, a class needs at least one + * native method to trigger generation of a .h file. + */ + private static native void _dummy(); } diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/UnsupportedFeatureException.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/UnsupportedFeatureException.java index dff8d6c0..6b4e822c 100644 --- a/pljava/src/main/java/org/postgresql/pljava/jdbc/UnsupportedFeatureException.java +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/UnsupportedFeatureException.java @@ -1,17 +1,25 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Thomas Hallgren + * Chapman Flack */ package org.postgresql.pljava.jdbc; -import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; /** + * An {@code SQLException} specific to the case of attempted use of + * an unsupported feature. * @author Thomas Hallgren */ -public class UnsupportedFeatureException extends SQLException +public class UnsupportedFeatureException extends SQLFeatureNotSupportedException { private static final long serialVersionUID = 7956037664745636982L; diff --git a/pljava/src/main/java/org/postgresql/pljava/jdbc/XMLEventToStreamConsumer.java b/pljava/src/main/java/org/postgresql/pljava/jdbc/XMLEventToStreamConsumer.java new file mode 100644 index 00000000..9a7d99a8 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/jdbc/XMLEventToStreamConsumer.java @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2019-2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.jdbc; + +import javax.xml.namespace.QName; + +import javax.xml.stream.Location; +import javax.xml.stream.XMLStreamConstants; +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamWriter; + +import javax.xml.stream.events.*; + +import javax.xml.stream.util.XMLEventConsumer; + +import org.postgresql.pljava.internal.Checked; + +/** + * Consume a stream of StAX {@code XMLEvent}s, writing them to an + * {@code XMLStreamWriter}. + *

      + * This entire class would be completely unnecessary if not for the + * regression in Java 9 and later that leaves + * {@code XMLOutputFactory.createXMLEventWriter} throwing a + * {@code ClassCastException} if passed a {@code StAXResult} wrapping an + * arbitrary {@code XMLStreamWriter} implementation, which works perfectly + * up through Java 8. Java 9 breaks it, demanding this soul-crushing workaround. + *

      + * Making the best of a bad situation, in reimplementing this, it is possible to + * honor the distinction between empty elements and start/end tags with nothing + * in between. An {@code XMLStreamWriter} has distinct methods + * {@code writeStartElement} and {@code writeEmptyElement}, but the + * {@code XMLEventWriter} API offers no obvious way to pass along that + * distinction. There is a nonobvious way, though, if the {@code StartElement} + * and {@code EndElement} events have parser-supplied {@code Location}s, and + * they are equal (and not 'unknown' in all values). + */ +class XMLEventToStreamConsumer +implements XMLEventConsumer, XMLStreamConstants +{ + protected final XMLStreamWriter m_xsw; + protected StartElement m_startElement; + protected Location m_location; + + /** + * Construct an {@code XMLEventToStreamConsumer} that writes to the + * given {@code XMLStreamWriter}. + */ + XMLEventToStreamConsumer(XMLStreamWriter xsw) + { + if ( null == xsw ) + throw new NullPointerException("XMLEventToStreamConsumer"); + m_xsw = xsw; + } + + /** + * Dispatch an {@code XMLEvent} to the corresponding specialized + * {@code add} method. + */ + @Override + public void add(XMLEvent event) throws XMLStreamException + { + if ( null == event ) + throw new NullPointerException("XMLEventToStreamConsumer.add"); + + switch ( event.getEventType() ) + { + case COMMENT: add( (Comment) event); break; + case PROCESSING_INSTRUCTION: add((ProcessingInstruction) event); break; + case CDATA: // fallthrough + case CHARACTERS: add( (Characters) event); break; + case DTD: add( (DTD) event); break; + case ENTITY_REFERENCE: add( (EntityReference) event); break; + case START_DOCUMENT: add( (StartDocument) event); break; + case END_DOCUMENT: add( (EndDocument) event); break; + case START_ELEMENT: add( (StartElement) event); break; + case END_ELEMENT: add( (EndElement) event); break; + default: + throw new XMLStreamException( + "Unexpected XMLEvent type " + event.getEventType()); + } + } + + protected void addNonEmptyIfCached() throws XMLStreamException + { + if ( null == m_startElement ) + return; + add(m_startElement, false); + m_startElement = null; + m_location = null; + } + + protected void add(Comment event) throws XMLStreamException + { + addNonEmptyIfCached(); + m_xsw.writeComment(event.getText()); + } + + protected void add(ProcessingInstruction event) throws XMLStreamException + { + addNonEmptyIfCached(); + m_xsw.writeProcessingInstruction(event.getTarget(), event.getData()); + } + + protected void add(Characters event) throws XMLStreamException + { + addNonEmptyIfCached(); + String content = event.getData(); + if ( event.isCData() ) + m_xsw.writeCData(content); + else + m_xsw.writeCharacters(content); + } + + protected void add(DTD event) throws XMLStreamException + { + // no element precedes a DTD + m_xsw.writeDTD(event.getDocumentTypeDeclaration()); + } + + protected void add(EntityReference event) throws XMLStreamException + { + addNonEmptyIfCached(); + m_xsw.writeEntityRef(event.getName()); + } + + protected void add(StartDocument event) throws XMLStreamException + { + String version = event.getVersion(); + String encoding = event.getCharacterEncodingScheme(); + if ( event.encodingSet() ) + m_xsw.writeStartDocument(encoding, version); + else + m_xsw.writeStartDocument(version); + } + + protected void add(EndDocument event) throws XMLStreamException + { + if ( null != m_startElement ) + { + add(m_startElement, true); + m_startElement = null; + m_location = null; + } + m_xsw.writeEndDocument(); + } + + protected void add(StartElement event) + throws XMLStreamException + { + addNonEmptyIfCached(); + m_startElement = event; + m_location = event.getLocation(); + } + + protected void add(StartElement event, boolean empty) + throws XMLStreamException + { + QName qn = event.getName(); + if ( empty ) + m_xsw.writeEmptyElement( + qn.getPrefix(), qn.getLocalPart(), qn.getNamespaceURI()); + else + m_xsw.writeStartElement( + qn.getPrefix(), qn.getLocalPart(), qn.getNamespaceURI()); + Checked.Consumer.use((Namespace n) -> add(n)) + .in(event.getNamespaces()::forEachRemaining); + Checked.Consumer.use((Attribute a) -> add(a)) + .in(event.getAttributes()::forEachRemaining); + } + + protected void add(EndElement event) throws XMLStreamException + { + if ( null != m_startElement ) + { + boolean empty = locationsEqual(m_location, event.getLocation()); + add(m_startElement, empty); + m_startElement = null; + m_location = null; + if ( empty ) + return; + } + m_xsw.writeEndElement(); + } + + protected void add(Attribute a) throws XMLStreamException + { + QName n = a.getName(); + m_xsw.writeAttribute( + n.getPrefix(), n.getNamespaceURI(), n.getLocalPart(), a.getValue()); + } + + protected void add(Namespace n) throws XMLStreamException + { + m_xsw.writeNamespace(n.getPrefix(), n.getNamespaceURI()); + } + + protected static boolean locationsEqual(Location a, Location b) + { + if ( null == a || null == b ) + return false; + if ( ! locationIdsEqual(a.getPublicId(), b.getPublicId()) ) + return false; + if ( ! locationIdsEqual(a.getSystemId(), b.getSystemId()) ) + return false; + int aOffset = a.getCharacterOffset(); + if ( b.getCharacterOffset() != aOffset ) + return false; + int aColumn = a.getColumnNumber(); + if ( b.getColumnNumber() != aColumn ) + return false; + int aLine = a.getLineNumber(); + if ( b.getLineNumber() != aLine ) + return false; + return ( -1 != aOffset ) || ( -1 != aColumn && -1 != aLine ); + } + + private static boolean locationIdsEqual(String a, String b) + { + if ( a == b ) + return true; + if ( null != a ) + return a.equals(b); + return b.equals(a); + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/management/Commands.java b/pljava/src/main/java/org/postgresql/pljava/management/Commands.java index 97f74a6e..1ca62ea9 100644 --- a/pljava/src/main/java/org/postgresql/pljava/management/Commands.java +++ b/pljava/src/main/java/org/postgresql/pljava/management/Commands.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2015 Tada AB and other contributors, as listed below. + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -18,12 +18,23 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.nio.charset.Charset; +import java.net.Authenticator; +import java.net.HttpURLConnection; +import java.net.PasswordAuthentication; +import java.net.URI; +import java.net.URL; +import java.net.URLConnection; +import java.net.URLPermission; +import java.nio.ByteBuffer; +import static java.nio.charset.StandardCharsets.UTF_8; import java.nio.charset.CharsetDecoder; -import java.io.UnsupportedEncodingException; +import java.nio.charset.CharacterCodingException; +import java.security.Permission; +import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLData; +import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.SQLNonTransientException; @@ -31,6 +42,8 @@ import java.sql.Statement; import java.text.ParseException; import java.util.ArrayList; +import static java.util.Arrays.fill; +import static java.util.Objects.requireNonNullElse; import java.util.jar.Attributes; import java.util.jar.JarEntry; import java.util.jar.JarInputStream; @@ -43,160 +56,172 @@ import org.postgresql.pljava.Session; import org.postgresql.pljava.SessionManager; +import static org.postgresql.pljava.annotation.processing.DDRWriter.eQuote; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import static + org.postgresql.pljava.sqlgen.Lexicals.Identifier.Qualified.nameFromCatalog; + import org.postgresql.pljava.internal.AclId; import org.postgresql.pljava.internal.Backend; +import org.postgresql.pljava.internal.Checked; import org.postgresql.pljava.internal.Oid; -import org.postgresql.pljava.jdbc.SQLUtils; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; +import static org.postgresql.pljava.jdbc.SQLUtils.getDefaultConnection; import org.postgresql.pljava.sqlj.Loader; +import static org.postgresql.pljava.sqlj.Loader.PUBLIC_SCHEMA; import org.postgresql.pljava.annotation.Function; import org.postgresql.pljava.annotation.SQLAction; import org.postgresql.pljava.annotation.SQLType; +import static org.postgresql.pljava.annotation.Function.OnNullInput.CALLED; import static org.postgresql.pljava.annotation.Function.Security.DEFINER; /** - * This methods of this class are implementations of SQLJ commands. - *

      SQLJ functions

      - *

      install_jar

      - * The install_jar command loads a jar file from a location appointed by an URL - * or a binary image that constitutes the contents of a jar file into the SQLJ - * jar repository. It is an error if a jar with the given name already exists in - * the repository. - *

      Usage 1

      + * This methods of this class are implementations of SQLJ procedures (and some + * related ones beyond what ISO 9075-13 specifies). + *

      SQLJ procedures

      + *

      install_jar

      + * The install_jar procedure loads a jar file from a location appointed by an + * URL or a binary image that constitutes the contents of a jar file into the + * SQLJ jar repository. It is an error if a jar with the given name already + * exists in the repository. + *

      Usage 1

      *
      SELECT sqlj.install_jar(<jar_url>, <jar_name>, <deploy>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.install_jar(url...)
      * - * + * * * * - * + * * * * - * + * * * *
      Parameters for sqlj.install_jar(url...)
      jar_urljar_urlThe URL that denotes the location of the jar that should be loaded
      jar_namejar_nameThis is the name by which this jar can be referenced once it has been * loaded
      deploydeployTrue if the jar should be deployed according to a {@link * org.postgresql.pljava.management.SQLDeploymentDescriptor deployment * descriptor}, false otherwise
      - *

      Usage 2

      + *

      Usage 2

      *
      SELECT sqlj.install_jar(<jar_image>, <jar_name>, <deploy>); *
      - *

      Parameters

      - *
      Parameters for + *

      Parameters

      + *
      * - * + * * * * - * + * * * * - * + * * * *
      Parameters for * sqlj.install_jar(bytea...)
      jar_imagejar_imageThe byte array that constitutes the contents of the jar that should be * loaded
      jar_namejar_nameThis is the name by which this jar can be referenced once it has been * loaded
      deploydeployTrue if the jar should be deployed according to a {@link * org.postgresql.pljava.management.SQLDeploymentDescriptor deployment * descriptor}, false otherwise
      - *

      replace_jar

      - * The replace_jar will replace a loaded jar with another jar. Use this command - * to update already loaded files. It's an error if the jar is not found. - *

      Usage 1

      + *

      replace_jar

      + * The replace_jar procedure will replace a loaded jar with another jar. Use + * this command to update already loaded files. It's an error if the jar is not + * found. + *

      Usage 1

      *
      SELECT sqlj.replace_jar(<jar_url>, <jar_name>, <redeploy>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.replace_jar(url...)
      * - * + * * * * - * + * * * * - * + * * * *
      Parameters for sqlj.replace_jar(url...)
      jar_urljar_urlThe URL that denotes the location of the jar that should be loaded
      jar_namejar_nameThe name of the jar to be replaced
      redeployredeployTrue if the old and new jar should be undeployed and deployed according * to their respective {@link * org.postgresql.pljava.management.SQLDeploymentDescriptor deployment * descriptors}, false otherwise
      - *

      Usage 2

      + *

      Usage 2

      *
      SELECT sqlj.replace_jar(<jar_image>, <jar_name>, <redeploy>); *
      - *

      Parameters

      - *
      Parameters for + *

      Parameters

      + *
      * - * + * * * * - * + * * * * - * + * * * *
      Parameters for * sqlj.replace_jar(bytea...)
      jar_imagejar_imageThe byte array that constitutes the contents of the jar that should be * loaded
      jar_namejar_nameThe name of the jar to be replaced
      redeployredeployTrue if the old and new jar should be undeployed and deployed according * to their respective {@link * org.postgresql.pljava.management.SQLDeploymentDescriptor deployment * descriptors}, false otherwise
      - *

      remove_jar

      - * The remove_jar will drop the jar from the jar repository. Any classpath that - * references this jar will be updated accordingly. It's an error if the jar is - * not found. - *

      Usage

      + *

      remove_jar

      + * The remove_jar procedure will drop the jar from the jar repository. Any + * classpath that references this jar will be updated accordingly. It's an error + * if no such jar is installed. + *

      Usage

      *
      SELECT sqlj.remove_jar(<jar_name>, <undeploy>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.remove_jar
      * - * + * * * * - * + * * * *
      Parameters for sqlj.remove_jar
      jar_namejar_nameThe name of the jar to be removed
      undeployundeployTrue if the jar should be undeployed according to its {@link * org.postgresql.pljava.management.SQLDeploymentDescriptor deployment * descriptor}, false otherwise
      - *

      get_classpath

      - * The get_classpath will return the classpath that has been defined for the - * given schema or NULL if the schema has no classpath. It's an error if the - * given schema does not exist. - *

      Usage

      + *

      get_classpath

      + * The get_classpath procedure will return the classpath that has been defined + * for the given schema or NULL if the schema has no classpath. It's an error if + * the given schema does not exist. + *

      Usage

      *
      SELECT sqlj.get_classpath(<schema>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.get_classpath
      * * * * *
      Parameters for sqlj.get_classpath
      schemaThe name of the schema
      - *

      set_classpath

      - * The set_classpath will define a classpath for the given schema. A classpath - * consists of a colon separated list of jar names. It's an error if the given - * schema does not exist or if one or more jar names references non existent - * jars. - *

      Usage

      + *

      set_classpath

      + * The set_classpath procedure will define a classpath for the given schema. A + * classpath consists of a colon separated list of jar names. It's an error if + * the given schema does not exist or if one or more jar names references + * non-existent jars. + *

      Usage

      *
      SELECT sqlj.set_classpath(<schema>, <classpath>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.set_classpath
      * * * @@ -206,14 +231,14 @@ * * *
      Parameters for sqlj.set_classpath
      schemaThe name of the schemaThe colon separated list of jar names
      - *

      add_type_mapping

      - * The add_type_mapping defines the mapping between an SQL type and a Java - * class. - *

      Usage

      + *

      add_type_mapping

      + * The add_type_mapping procedure defines the mapping between an SQL type and a + * Java class. + *

      Usage

      *
      SELECT sqlj.add_type_mapping(<sqlTypeName>, <className>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.add_type_mapping
      * * * * *
      Parameters for sqlj.add_type_mapping
      sqlTypeNameThe name of the SQL type. The name can be qualified with a @@ -226,14 +251,14 @@ * effect for the current schema
      - *

      drop_type_mapping

      - * The drop_type_mapping removes the mapping between an SQL type and a Java - * class. - *

      Usage

      + *

      drop_type_mapping

      + * The drop_type_mapping procedure removes the mapping between an SQL type and a + * Java class. + *

      Usage

      *
      SELECT sqlj.drop_type_mapping(<sqlTypeName>); *
      - *

      Parameters

      - *
      + *

      Parameters

      + *
      Parameters for sqlj.drop_type_mapping
      * * * * *
      Parameters for sqlj.drop_type_mapping
      sqlTypeNameThe name of the SQL type. The name can be qualified with a @@ -241,6 +266,41 @@ * to the current setting of the search_path.
      + *

      alias_java_language

      + * The {@link #aliasJavaLanguage alias_java_language procedure} issues + * a PostgreSQL {@code CREATE LANGUAGE} command to define a named "language" + * that is an alias for PL/Java. The name can appear in the + * Java security policy to grant + * specific permissions to functions created in this "language". + *

      Usage

      + *
      + * {@code SELECT sqlj.alias_java_language(, sandboxed => );} + *
      + *

      Parameters

      + *
      + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
      Parameters for sqlj.alias_java_language
      aliasThe name desired for the language alias. Language names are not + * schema-qualified.
      sandboxedWhether to create a sandboxed "{@code TRUSTED}" language, in which + * functions can be created by any role granted {@code USAGE} permission (true), + * or an unsandboxed one in which only superusers may create functions (false). + *
      orReplaceOptional parameter, default false. + * See {@link #aliasJavaLanguage the method documentation} for details.
      commentOptional parameter. If empty string (the default), a comment is supplied. + * See {@link #aliasJavaLanguage the method documentation} for details.
      * * @author Thomas Hallgren * @author Chapman Flack @@ -248,37 +308,50 @@ /* * Attention: any evolution of the schema here needs to be reflected in * o.p.p.internal.InstallHelper.SchemaVariant and .recognizeSchema(). + * + * Schema-qualification of a type with a typmod, e.g. pg_catalog.varchar(100), + * is possible from PostgreSQL 8.3 onward, but not in 8.2. As a compromise, use + * the two-word CHARACTER VARYING syntax, to evade capture by a user type. + * + * In this (1.5.0) incarnation of the schema, jar_repository and jar_entry are + * both indexed by SERIAL columns. The replace_jar operation is an UPDATE to + * jar_repository (so the jar's id is preserved), but deletes and reinserts to + * jar_entry (so ALL classes get new ids). This makes the entryId sufficient as + * a class-cache token to ensure old cached versions are recognized as invalid + * (although at the cost of doing so for *every single class* in a jar even if + * many are unchanged). It is used that way in the cache-token construction in + * o.p.p.sqlj.Loader, which could need to be revisited if this behavior changes. */ -@SQLAction(install={ +@SQLAction(provides="sqlj.tables", install={ " CREATE TABLE sqlj.jar_repository(" + " jarId SERIAL PRIMARY KEY," + -" jarName VARCHAR(100) UNIQUE NOT NULL," + -" jarOrigin VARCHAR(500) NOT NULL," + -" jarOwner NAME NOT NULL," + -" jarManifest TEXT" + -" )", +" jarName CHARACTER VARYING(100) UNIQUE NOT NULL," + +" jarOrigin CHARACTER VARYING(500) NOT NULL," + +" jarOwner pg_catalog.NAME NOT NULL," + +" jarManifest pg_catalog.TEXT" + +" ) DISTRIBUTED REPLICATED", " COMMENT ON TABLE sqlj.jar_repository IS" + " 'Information on jars loaded by PL/Java, one row per jar.'", " GRANT SELECT ON sqlj.jar_repository TO public", " CREATE TABLE sqlj.jar_entry(" + " entryId SERIAL PRIMARY KEY," + -" entryName VARCHAR(200) NOT NULL," + +" entryName CHARACTER VARYING(200) NOT NULL," + " jarId INT NOT NULL" + " REFERENCES sqlj.jar_repository ON DELETE CASCADE," + -" entryImage BYTEA NOT NULL," + +" entryImage pg_catalog.BYTEA NOT NULL," + " UNIQUE(jarId, entryName)" + -" )", +" ) DISTRIBUTED REPLICATED", " COMMENT ON TABLE sqlj.jar_entry IS" + " 'Name and content of each entry in every jar loaded by PL/Java.'", " GRANT SELECT ON sqlj.jar_entry TO public", " CREATE TABLE sqlj.jar_descriptor(" + " jarId INT REFERENCES sqlj.jar_repository ON DELETE CASCADE," + -" ordinal INT2," + +" ordinal pg_catalog.INT2," + " PRIMARY KEY (jarId, ordinal)," + " entryId INT NOT NULL REFERENCES sqlj.jar_entry ON DELETE CASCADE" + -" )", +" ) DISTRIBUTED REPLICATED", " COMMENT ON TABLE sqlj.jar_descriptor IS" + " 'Associates each jar with zero-or-more deployment descriptors (a row " + "for each), with ordinal indicating their order of mention in the " + @@ -286,12 +359,12 @@ " GRANT SELECT ON sqlj.jar_descriptor TO public", " CREATE TABLE sqlj.classpath_entry(" + -" schemaName VARCHAR(30) NOT NULL," + -" ordinal INT2 NOT NULL," + +" schemaName CHARACTER VARYING(30) NOT NULL," + +" ordinal pg_catalog.INT2 NOT NULL," + " jarId INT NOT NULL" + " REFERENCES sqlj.jar_repository ON DELETE CASCADE," + " PRIMARY KEY(schemaName, ordinal)" + -" )", +" ) DISTRIBUTED REPLICATED", " COMMENT ON TABLE sqlj.classpath_entry IS" + " 'Associates each schema with zero-or-more jars (a row " + "for each), with ordinal indicating their order of precedence in the " + @@ -300,9 +373,9 @@ " CREATE TABLE sqlj.typemap_entry(" + " mapId SERIAL PRIMARY KEY," + -" javaName VARCHAR(200) NOT NULL," + -" sqlName NAME NOT NULL" + -" )", +" javaName CHARACTER VARYING(200) NOT NULL," + +" sqlName pg_catalog.NAME NOT NULL" + +" ) DISTRIBUTED REPLICATED", " COMMENT ON TABLE sqlj.typemap_entry IS" + " 'A row for each SQL type <-> Java type custom mapping.'", " GRANT SELECT ON sqlj.typemap_entry TO public" @@ -310,49 +383,170 @@ " DROP TABLE sqlj.typemap_entry", " DROP TABLE sqlj.jar_repository CASCADE" }) +@SQLAction(provides="alias_java_language", install={ +" SELECT " + +" pg_catalog.set_config('pljava.implementors', 'alias_java_language,' " + +" || pg_catalog.current_setting('pljava.implementors'), true)" +}) public class Commands { private final static Logger s_logger = Logger.getLogger(Commands.class .getName()); + private static final Identifier.Simple s_public_schema = + Identifier.Simple.fromCatalog("public"); + + /** + * An {@link Authenticator} that will try the {@code userinfo} of the + * requesting URL if present. + *

      + * Beware that such URLs will appear in + * {@code sqlj.jar_repository.jarorigin} if used to install a jar! + */ + private static class EmbeddedPwdAuthenticator extends Authenticator + { + private EmbeddedPwdAuthenticator() { } + + static final EmbeddedPwdAuthenticator INSTANCE = + new EmbeddedPwdAuthenticator(); + + @Override + protected PasswordAuthentication getPasswordAuthentication() + { + String userinfo = + URI.create(getRequestingURL().toString()).getUserInfo(); + if ( null == userinfo ) + return null; + int len = userinfo.length(); + int uend = userinfo.indexOf(':'); + int pstart; + if ( -1 == uend ) + uend = pstart = len; + else + pstart = 1 + uend; + String u = userinfo.substring(0, uend); + char[] p = new char[len - pstart]; + try + { + userinfo.getChars(pstart, len, p, 0); + return new PasswordAuthentication(u, p); + } + finally + { + fill(p, '\245'); // PasswordAuthentication clones it + } + } + } + /** * Reads the jar found at the specified URL and stores the entries in the * jar_entry table. * * @param jarId The id used for the foreign key to the jar_repository table + * @param urlString The url to be read + */ + static void addClassImages(int jarId, String urlString) + throws SQLException + { + try + { + @SuppressWarnings("deprecation") // until next PL/Java major rev + URL url = new URL(urlString); + URLConnection uc = url.openConnection(); + uc.setRequestProperty("Accept", + "application/java-archive, " + + "application/jar;q=0.9, application/jar-archive;q=0.9, " + + "application/x-java-archive;q=0.9, " + + "application/*;q=0.3, */*;q=0.2" + ); + long[] sz = new long[1]; + Permission[] least = { uc.getPermission() }; + + if ( uc instanceof HttpURLConnection ) + { + /* + * Augment what uc returned as the least privilege set needed + * to connect. HttpURLConnection's getPermission method is older + * than URLPermission, and it only returns a SocketPermission. + * Set up 'least' to include both, so as not to end up with an + * empty permission set when 'least' includes one and the policy + * granted the other. + */ + least = new Permission[] { + least[0], + new URLPermission(urlString, "GET:Accept") + }; + + /* + * In case authentication is needed, set an Authenticator that + * will try userinfo from the URL if present. (Beware that jar + * origin URLs are stored in sqlj.jar_repository.jarorigin!) + */ + ((HttpURLConnection)uc).setAuthenticator( + EmbeddedPwdAuthenticator.INSTANCE); + } + + /* + * Do uc.connect() with PL/Java implementation's permissions, but + * narrowed to only what uc says it needs to make this connection. + */ + try ( + InputStream urlStream = doPrivileged(() -> + { + uc.connect(); + sz[0] = uc.getContentLengthLong(); + return uc.getInputStream(); + }, null, least) + ) + { + addClassImages(jarId, urlStream, sz[0]); + } + } + catch(IOException e) + { + throw new SQLException("reading jar file: " + + e.toString(), "58030", e); + } + } + + /** + * Add class images from an already opened stream. * @param urlStream An InputStream (opened on what may have been a URL) * @param sz The expected size of the stream, used as a worst-case * mark/reset limit. The caller might pass -1 if the URLConnection can't * determine a size in advance (a generous guess will be made in that case). * @throws SQLException */ - public static void addClassImages(int jarId, InputStream urlStream, int sz) + static void addClassImages(int jarId, InputStream urlStream, long sz) throws SQLException { - PreparedStatement stmt = null; - PreparedStatement descIdFetchStmt = null; - PreparedStatement descIdStoreStmt = null; - ResultSet rs = null; - - try + try ( + Connection conn = getDefaultConnection(); + PreparedStatement stmt = conn.prepareStatement( + "INSERT INTO sqlj.jar_entry(entryName, jarId, entryImage) " + + "VALUES (?, ?, ?)"); + PreparedStatement descIdFetchStmt = conn.prepareStatement( + "SELECT entryId FROM sqlj.jar_entry " + + "WHERE jarId OPERATOR(pg_catalog.=) ?" + + " AND entryName OPERATOR(pg_catalog.=) ?"); + PreparedStatement descIdStoreStmt = conn.prepareStatement( + "INSERT INTO sqlj.jar_descriptor (jarId, entryId, ordinal)" + + " VALUES ( ?, ?, ? )"); + ) { byte[] buf = new byte[1024]; ByteArrayOutputStream img = new ByteArrayOutputStream(); - stmt = SQLUtils - .getDefaultConnection() - .prepareStatement( - "INSERT INTO sqlj.jar_entry(entryName, jarId, entryImage) VALUES(?, ?, ?)"); BufferedInputStream bis = new BufferedInputStream( urlStream); String manifest = rawManifest( bis, sz); JarInputStream jis = new JarInputStream(bis); if(manifest != null) { - PreparedStatement us = SQLUtils - .getDefaultConnection() + try ( PreparedStatement us = conn .prepareStatement( - "UPDATE sqlj.jar_repository SET jarManifest = ? WHERE jarId = ?"); - try + "UPDATE sqlj.jar_repository SET jarManifest = ? " + + "WHERE jarId OPERATOR(pg_catalog.=) ?"); + ) { us.setString(1, manifest); us.setInt(2, jarId); @@ -360,10 +554,6 @@ public static void addClassImages(int jarId, InputStream urlStream, int sz) throw new SQLException( "Jar repository update did not update 1 row"); } - finally - { - SQLUtils.close(us); - } } for(;;) @@ -392,30 +582,24 @@ public static void addClassImages(int jarId, InputStream urlStream, int sz) } Matcher ddr = ddrSection.matcher( null != manifest ? manifest : ""); - Matcher cnt = mfCont.matcher( ""); + Matcher continuations = mfCont.matcher( ""); for ( int ordinal = 0; ddr.find(); ++ ordinal ) { - String entryName = cnt.reset( ddr.group( 1)).replaceAll( ""); - if ( descIdFetchStmt == null ) - descIdFetchStmt = SQLUtils.getDefaultConnection() - .prepareStatement( - "SELECT entryId FROM sqlj.jar_entry" - + " WHERE jarId = ? AND entryName = ?"); + String entryName = + continuations.reset( ddr.group( 1)).replaceAll( ""); descIdFetchStmt.setInt(1, jarId); descIdFetchStmt.setString(2, entryName); - rs = descIdFetchStmt.executeQuery(); - if(!rs.next()) - throw new SQLException( - "Failed to refetch row in sqlj.jar_entry"); - int deployImageId = rs.getInt(1); + int deployImageId; + try ( ResultSet rs = descIdFetchStmt.executeQuery() ) + { + if ( ! rs.next() ) + throw new SQLException( + "Failed to refetch row in sqlj.jar_entry"); + + deployImageId = rs.getInt(1); + } - if ( descIdStoreStmt == null ) - descIdStoreStmt = SQLUtils.getDefaultConnection() - .prepareStatement( - "INSERT INTO sqlj.jar_descriptor" - + " (jarId, entryId, ordinal) VALUES" - + " ( ?, ?, ? )"); descIdStoreStmt.setInt(1, jarId); descIdStoreStmt.setInt(2, deployImageId); descIdStoreStmt.setInt(3, ordinal); @@ -427,26 +611,19 @@ public static void addClassImages(int jarId, InputStream urlStream, int sz) } catch(IOException e) { - throw new SQLException("I/O exception reading jar file: " - + e.getMessage(), "58030", e); - } - finally - { - SQLUtils.close(rs); - SQLUtils.close(descIdStoreStmt); - SQLUtils.close(descIdFetchStmt); - SQLUtils.close(stmt); + throw new SQLException("reading jar file: " + + e.toString(), "58030", e); } } private final static Pattern ddrSection = Pattern.compile( - "(?<=[\\r\\n])Name: ((?:.|(?:\\r\\n?|\\n) )+)(?:(?:\\r\\n?|\\n))" + - "(?:[^\\r\\n]+(?:\\r\\n?|\\n)(?![\\r\\n]))*" + - "SQLJDeploymentDescriptor: (?:(?:\\r\\n?|\\r) )*TRUE(?!\\S)", + "(?<=[\\r\\n])Name: ((?:.|(?:\\r\\n?+|\\n) )++)(?:\\r\\n?+|\\n)" + + "(?:[^\\r\\n]++(?:\\r\\n?+|\\n)(?![\\r\\n]))*" + + "SQLJDeploymentDescriptor: (?:(?:\\r\\n?+|\\r) )*+TRUE(?!\\S)", Pattern.CASE_INSENSITIVE ); - private final static Pattern mfCont = Pattern.compile( "(?:\\r\\n?|\\n) "); + private final static Pattern mfCont = Pattern.compile( "(?:\\r\\n?+|\\n) "); /** * Read and return a manifest, rewinding the buffered input stream. @@ -470,20 +647,21 @@ public static void addClassImages(int jarId, InputStream urlStream, int sz) * leaves little choice but to sneak in ahead of the JarInputStream and * pluck out the original manifest as a zip entry. */ - private static String rawManifest( BufferedInputStream bis, int markLimit) + private static String rawManifest( BufferedInputStream bis, long markLimit) throws IOException { + if ( Integer.MAX_VALUE < markLimit ) + markLimit = -1; // just pretend it wasn't specified // If the caller can't say how long the stream is, this mark() limit // should be plenty - bis.mark( markLimit > 0 ? markLimit : 32*1024*1024); + bis.mark( markLimit > 0 ? (int)markLimit : 32*1024*1024); ZipInputStream zis = new ZipInputStream( bis); for ( ZipEntry ze; null != (ze = zis.getNextEntry()); ) { if ( "META-INF/MANIFEST.MF".equals( ze.getName()) ) { StringBuilder sb = new StringBuilder(); - // I'll take my chances on a required charset not being there! - CharsetDecoder u8 = Charset.forName( "UTF-8").newDecoder(); + CharsetDecoder u8 = UTF_8.newDecoder(); InputStreamReader isr = new InputStreamReader( zis, u8); char[] b = new char[512]; for ( int got; -1 != (got = isr.read(b)); ) @@ -510,12 +688,15 @@ private static String rawManifest( BufferedInputStream bis, int markLimit) * @throws SQLException if the type or class cannot be found, or if the * invoking user does not own the type. */ - @Function(schema="sqlj", name="add_type_mapping", security=DEFINER) + @Function(schema="sqlj", name="add_type_mapping", security=DEFINER, + requires="sqlj.tables") public static void addTypeMapping(String sqlTypeName, String javaClassName) throws SQLException { - PreparedStatement stmt = null; - try + try(PreparedStatement stmt = getDefaultConnection() + .prepareStatement( + "INSERT INTO sqlj.typemap_entry(javaName, sqlName)" + + " VALUES(?,?)")) { ClassLoader loader = Loader.getCurrentLoader(); Class cls = loader.loadClass(javaClassName); @@ -524,10 +705,6 @@ public static void addTypeMapping(String sqlTypeName, String javaClassName) + " does not implement java.sql.SQLData"); sqlTypeName = getFullSqlNameOwned(sqlTypeName); - stmt = SQLUtils - .getDefaultConnection() - .prepareStatement( - "INSERT INTO sqlj.typemap_entry(javaName, sqlName) VALUES(?,?)"); stmt.setString(1, javaClassName); stmt.setString(2, sqlTypeName); stmt.executeUpdate(); @@ -537,10 +714,6 @@ public static void addTypeMapping(String sqlTypeName, String javaClassName) throw new SQLException( "No such class: " + javaClassName, "46103", e); } - finally - { - SQLUtils.close(stmt); - } Loader.clearSchemaLoaders(); } @@ -554,22 +727,19 @@ public static void addTypeMapping(String sqlTypeName, String javaClassName) * @throws SQLException if the type cannot be found, or if the * invoking user does not own the type. */ - @Function(schema="sqlj", name="drop_type_mapping", security=DEFINER) + @Function(schema="sqlj", name="drop_type_mapping", security=DEFINER, + requires="sqlj.tables") public static void dropTypeMapping(String sqlTypeName) throws SQLException { - PreparedStatement stmt = null; - try + try(PreparedStatement stmt = getDefaultConnection() + .prepareStatement( + "DELETE FROM sqlj.typemap_entry " + + "WHERE sqlName OPERATOR(pg_catalog.=) ?")) { sqlTypeName = getFullSqlNameOwned(sqlTypeName); - stmt = SQLUtils.getDefaultConnection().prepareStatement( - "DELETE FROM sqlj.typemap_entry WHERE sqlName = ?"); stmt.setString(1, sqlTypeName); stmt.executeUpdate(); } - finally - { - SQLUtils.close(stmt); - } Loader.clearSchemaLoaders(); } @@ -583,46 +753,44 @@ public static void dropTypeMapping(String sqlTypeName) throws SQLException * no classpath. * @throws SQLException */ - @Function(schema="sqlj", name="get_classpath", security=DEFINER) + @Function(schema="sqlj", name="get_classpath", security=DEFINER, + requires="sqlj.tables") public static String getClassPath(String schemaName) throws SQLException { - ResultSet rs = null; - PreparedStatement stmt = null; - try - { - if(schemaName == null || schemaName.length() == 0) - schemaName = "public"; - else - schemaName = schemaName.toLowerCase(); - - stmt = SQLUtils - .getDefaultConnection() - .prepareStatement( - "SELECT r.jarName" - + " FROM sqlj.jar_repository r INNER JOIN sqlj.classpath_entry c ON r.jarId = c.jarId" - + " WHERE c.schemaName = ? ORDER BY c.ordinal"); + return getClassPath(Identifier.Simple.fromJava(schemaName)); + } - stmt.setString(1, schemaName); - rs = stmt.executeQuery(); + public static String getClassPath(Identifier.Simple schema) + throws SQLException + { + try(PreparedStatement stmt = getDefaultConnection() + .prepareStatement( + "SELECT r.jarName" + + " FROM" + + " sqlj.jar_repository r" + + " INNER JOIN sqlj.classpath_entry c" + + " ON r.jarId OPERATOR(pg_catalog.=) c.jarId" + + " WHERE c.schemaName OPERATOR(pg_catalog.=) ?" + + " ORDER BY c.ordinal")) + { + stmt.setString(1, schema.pgFolded()); StringBuffer buf = null; - while(rs.next()) + try(ResultSet rs = stmt.executeQuery()) { - if(buf == null) - buf = new StringBuffer(); - else - buf.append(':'); - buf.append(rs.getString(1)); + while(rs.next()) + { + if(buf == null) + buf = new StringBuffer(); + else + buf.append(':'); + buf.append(rs.getString(1)); + } } return (buf == null) ? null : buf.toString(); } - finally - { - SQLUtils.close(rs); - SQLUtils.close(stmt); - } } - public static String getCurrentSchema() throws SQLException + static Identifier.Simple getCurrentSchema() throws SQLException { Session session = SessionManager.current(); return ((org.postgresql.pljava.internal.Session)session) @@ -644,9 +812,9 @@ public static String getCurrentSchema() throws SQLException * system. * @see #setClassPath */ - @Function(schema="sqlj", name="install_jar", security=DEFINER) - public static void installJar( - @SQLType("bytea") byte[] image, String jarName, boolean deploy) + @Function(schema="sqlj", name="install_jar", security=DEFINER, + requires="sqlj.tables") + public static void installJar(byte[] image, String jarName, boolean deploy) throws SQLException { installJar("streamed byte image", jarName, deploy, image); @@ -667,7 +835,8 @@ public static void installJar( * system. * @see #setClassPath */ - @Function(schema="sqlj", name="install_jar", security=DEFINER) + @Function(schema="sqlj", name="install_jar", security=DEFINER, + requires="sqlj.tables") public static void installJar(String urlString, String jarName, boolean deploy) throws SQLException { @@ -685,7 +854,8 @@ public static void installJar(String urlString, String jarName, * descriptor of the jar. * @throws SQLException if the named jar cannot be found in the repository. */ - @Function(schema="sqlj", name="remove_jar", security=DEFINER) + @Function(schema="sqlj", name="remove_jar", security=DEFINER, + requires="sqlj.tables") public static void removeJar(String jarName, boolean undeploy) throws SQLException { @@ -693,7 +863,7 @@ public static void removeJar(String jarName, boolean undeploy) AclId[] ownerRet = new AclId[1]; int jarId = getJarId(jarName, ownerRet); if(jarId < 0) - throw new SQLException("No Jar named '" + jarName + throw new SQLException("No jar named '" + jarName + "' is known to the system", "4600B"); @@ -705,20 +875,17 @@ public static void removeJar(String jarName, boolean undeploy) if(undeploy) deployRemove(jarId, jarName); - PreparedStatement stmt = SQLUtils - .getDefaultConnection() - .prepareStatement("DELETE FROM sqlj.jar_repository WHERE jarId = ?"); - try + try ( PreparedStatement stmt = getDefaultConnection() + .prepareStatement( + "DELETE FROM sqlj.jar_repository " + + "WHERE jarId OPERATOR(pg_catalog.=) ?"); + ) { stmt.setInt(1, jarId); if(stmt.executeUpdate() != 1) throw new SQLException( "Jar repository update did not update 1 row"); } - finally - { - SQLUtils.close(stmt); - } Loader.clearSchemaLoaders(); } @@ -734,9 +901,9 @@ public static void removeJar(String jarName, boolean undeploy) * deployment descriptor of the new jar. * @throws SQLException if the named jar cannot be found in the repository. */ - @Function(schema="sqlj", name="replace_jar", security=DEFINER) - public static void replaceJar( - @SQLType("bytea") byte[] jarImage, String jarName, + @Function(schema="sqlj", name="replace_jar", security=DEFINER, + requires="sqlj.tables") + public static void replaceJar(byte[] jarImage, String jarName, boolean redeploy) throws SQLException { replaceJar("streamed byte image", jarName, redeploy, jarImage); @@ -754,7 +921,8 @@ public static void replaceJar( * deployment descriptor of the new jar. * @throws SQLException if the named jar cannot be found in the repository. */ - @Function(schema="sqlj", name="replace_jar", security=DEFINER) + @Function(schema="sqlj", name="replace_jar", security=DEFINER, + requires="sqlj.tables") public static void replaceJar(String urlString, String jarName, boolean redeploy) throws SQLException { @@ -769,20 +937,26 @@ public static void replaceJar(String urlString, String jarName, * * @param schemaName Name of the schema for which this path is valid. * @param path Colon separated list of names. Each name must denote the name - * of a jar that is present in the jar repository. + * of a jar that is present in the jar repository. An empty + * string or null equivalently set no class path for the schema. * @throws SQLException If no schema can be found with the givene name, or * if one or several names of the path denotes a nonexistant jar * file. */ - @Function(schema="sqlj", name="set_classpath", security=DEFINER) + @Function(schema="sqlj", name="set_classpath", security=DEFINER, + requires="sqlj.tables") public static void setClassPath(String schemaName, String path) throws SQLException { if(schemaName == null || schemaName.length() == 0) schemaName = "public"; + setClassPath(Identifier.Simple.fromJava(schemaName), path); + } - schemaName = schemaName.toLowerCase(); - if("public".equals(schemaName)) + public static void setClassPath(Identifier.Simple schema, String path) + throws SQLException + { + if(s_public_schema.equals(schema)) { if(!AclId.getOuterUser().isSuperuser()) throw new SQLSyntaxErrorException( // yeah, for 42501, really @@ -791,27 +965,27 @@ public static void setClassPath(String schemaName, String path) } else { - Oid schemaId = getSchemaId(schemaName); + Oid schemaId = getSchemaId(schema); if(schemaId == null) throw new SQLNonTransientException( - "No such schema: " + schemaName, "3F000"); + "No such schema: " + schema, "3F000"); if(!AclId.getOuterUser().hasSchemaCreatePermission(schemaId)) throw new SQLSyntaxErrorException( "Permission denied. User must have create permission on " + "the target schema in order to set the classpath", "42501"); } - PreparedStatement stmt; - ArrayList entries = null; + ArrayList entries = null; if(path != null && path.length() > 0) { // Collect and verify that all entries in the path represents a // valid jar // - entries = new ArrayList(); - stmt = SQLUtils.getDefaultConnection().prepareStatement( - "SELECT jarId FROM sqlj.jar_repository WHERE jarName = ?"); - try + entries = new ArrayList<>(); + try(PreparedStatement stmt = getDefaultConnection() + .prepareStatement( + "SELECT jarId FROM sqlj.jar_repository " + + "WHERE jarName OPERATOR(pg_catalog.=) ?")) { for(;;) { @@ -830,87 +1004,232 @@ public static void setClassPath(String schemaName, String path) throw new SQLNonTransientException( "No such jar: " + jarName, "46102"); - entries.add(new Integer(jarId)); + entries.add(jarId); if(colon < 0) break; } } - finally - { - SQLUtils.close(stmt); - } } // Delete the old classpath // - stmt = SQLUtils.getDefaultConnection().prepareStatement( - "DELETE FROM sqlj.classpath_entry WHERE schemaName = ?"); - try + try(PreparedStatement stmt = getDefaultConnection() + .prepareStatement( + "DELETE FROM sqlj.classpath_entry " + + "WHERE schemaName OPERATOR(pg_catalog.=) ?")) { - stmt.setString(1, schemaName); + stmt.setString(1, schema.pgFolded()); stmt.executeUpdate(); } - finally - { - SQLUtils.close(stmt); - } if(entries != null) { // Insert the new path. // - stmt = SQLUtils - .getDefaultConnection() + try(PreparedStatement stmt = getDefaultConnection() .prepareStatement( - "INSERT INTO sqlj.classpath_entry(schemaName, ordinal, jarId) VALUES(?, ?, ?)"); - try + "INSERT INTO sqlj.classpath_entry("+ + " schemaName, ordinal, jarId) VALUES(?, ?, ?)")) { int top = entries.size(); for(int idx = 0; idx < top; ++idx) { - int jarId = ((Integer)entries.get(idx)).intValue(); - stmt.setString(1, schemaName); + int jarId = entries.get(idx); + stmt.setString(1, schema.pgFolded()); stmt.setInt(2, idx + 1); stmt.setInt(3, jarId); stmt.executeUpdate(); } } - finally - { - SQLUtils.close(stmt); - } } Loader.clearSchemaLoaders(); } - private static boolean assertInPath(String jarName, - String[] originalSchemaAndPath) throws SQLException + /** + * Run runnable while a temporary class path including + * jarName, if needed, is imposed on the current + * (head-of-{@code search_path}) schema. + *

      + * The temporary class path is imposed if jarName is not already + * included in the current schema's class path, and also not in the public + * schema's class path if the current schema is not the public one. + * + * @param jarName Caller must have checked (as with {@code assertJarName}) + * that this is a sensible jar name, in particular without the colons that + * separate a PL/Java class path. + * @param schemaMayVanish Caller passes true if this is a {@code remove_jar} + * action, when it should not be surprising if undoing the temporary class + * path fails because the schema is gone after the undeploy steps. + * @param runnable The deploy/undeploy actions to take while the temporary + * class path is possibly imposed. + */ + private static void withJarInPath(String jarName, boolean schemaMayVanish, + Checked.Runnable runnable) throws SQLException { - String currentSchema = getCurrentSchema(); - String currentClasspath = getClassPath(currentSchema); - originalSchemaAndPath[0] = currentSchema; - originalSchemaAndPath[1] = currentClasspath; - if(currentClasspath == null) + String jarNameX = ':' + jarName + ':'; + Identifier.Simple originalSchema = getCurrentSchema(); + String originalClasspath = + requireNonNullElse(getClassPath(originalSchema), ""); + + boolean found = false; + + if ( ! originalClasspath.isEmpty() ) + found = (':'+originalClasspath+':').contains(jarNameX); + else if ( ! PUBLIC_SCHEMA.equals(originalSchema) ) { - setClassPath(currentSchema, jarName); - return true; + String fallbackClasspath = + requireNonNullElse(getClassPath(PUBLIC_SCHEMA), ""); + found = (':'+fallbackClasspath+':').contains(jarNameX); } - String[] elems = currentClasspath.split(":"); - int idx = elems.length; - boolean found = false; - while(--idx >= 0) - if(elems[idx].equals(jarName)) + if ( ! found ) + { + String newPath = jarName; + if ( ! originalClasspath.isEmpty() ) + newPath += ':' + originalClasspath; + setClassPath(originalSchema, newPath); + } + + runnable.run(); + + /* + * This is not a finally, because if something went wrong PostgreSQL + * won't allow the SPI operations in setClassPath anyway, and that's + * also ok, because if something went wrong PostgreSQL will roll back + * the transaction. + */ + if ( ! found ) + { + try { - found = true; - break; + setClassPath(originalSchema, originalClasspath); } + catch ( SQLException e ) + { + if ( ! schemaMayVanish || ! "3F000".equals(e.getSQLState()) ) + throw e; + } + } + } - if(found) - return false; + /** + * Creates a named PostgreSQL {@code LANGUAGE} that refers to PL/Java; + * its name may be referred to in the Java security policy to grant selected + * permissions to functions created in this "language". + *

      + * More on configuring Java permissions specific to this alias can be found + * in the policy documentation. + *

      + * PostgreSQL normally grants {@code USAGE} to {@code PUBLIC} if a sandboxed + * language is created. This routine does not, so that {@code USAGE} on the + * new alias can then be {@code GRANT}ed to specific roles or to + * {@code PUBLIC} as desired. + * @param alias Name for this "language". + * @param sandboxed Whether this alias should be a sandboxed/"TRUSTED" + * language that USAGE can be granted on, or an unsandboxed one that only + * superusers can create functions in. Must be specified. + * @param orReplace Whether to succeed even if a language by the same name + * already exists; if so, the sandboxed bit, handler entry points, and + * comment may all be changed. Default is false. + * @param comment A comment to associate with the alias "language". If an + * empty string (the default), a default comment will be constructed. Pass + * null explicitly to avoid setting any comment (or changing any existing + * comment, in the orReplace case). + */ + @Function( + schema="sqlj", name="alias_java_language", onNullInput=CALLED, + requires="sqlj.tables", implementor="alias_java_language" + ) + public static void aliasJavaLanguage( + String alias, + Boolean sandboxed, + @SQLType(defaultValue="false") Boolean orReplace, + @SQLType(defaultValue="") String comment) + throws SQLException + { + if ( null == alias ) + throw new SQLDataException( + "parameter \"alias\" may not be null", "22004"); + if ( null == sandboxed ) + throw new SQLDataException( + "parameter \"sandboxed\" may not be null", "22004"); + if ( null == orReplace ) + throw new SQLDataException( + "parameter \"orReplace\" may not be null", "22004"); + + if ( "".equals(comment) ) + comment = "PL/Java language alias that may be assigned " + + "distinct permissions in the security policy. Routines may " + + "be created in this \"language\" by " + ( sandboxed + ? "any role with USAGE permission." : "superusers only." ); + + Identifier.Simple aliasIdent = Identifier.Simple.fromJava(alias); + + String libraryPath = Backend.myLibraryPath(); + + try ( + Connection conn = getDefaultConnection(); + PreparedStatement ps = conn.prepareStatement( + "SELECT DISTINCT" + + " cn.nspname, cf.proname, vn.nspname, vf.proname" + + " FROM" + + " (VALUES (?,?)) AS params(sandboxed, libpath)," + + " pg_catalog.pg_language AS lan" + + " JOIN pg_catalog.pg_proc AS cf" + + " ON lan.lanplcallfoid OPERATOR(pg_catalog.=) cf.oid" + + " JOIN pg_catalog.pg_namespace AS cn" + + " ON cf.pronamespace OPERATOR(pg_catalog.=) cn.oid" + + " JOIN pg_catalog.pg_proc AS vf" + + " ON lan.lanvalidator OPERATOR(pg_catalog.=) vf.oid" + + " JOIN pg_catalog.pg_namespace AS vn" + + " ON vf.pronamespace OPERATOR(pg_catalog.=) vn.oid" + + " WHERE" + + " lanispl AND lanpltrusted OPERATOR(pg_catalog.=) sandboxed" + + " AND cf.probin OPERATOR(pg_catalog.=) libpath" + + " AND vf.probin OPERATOR(pg_catalog.=) libpath"); + ) + { + Identifier.Qualified callHandler; + Identifier.Qualified valHandler; - setClassPath(currentSchema, jarName + ':' + currentClasspath); - return true; + ps.setBoolean(1, sandboxed); + ps.setString(2, libraryPath); + try ( ResultSet rs = ps.executeQuery() ) + { + if ( ! rs.next() ) + throw new SQLException( + "Failed to find handlers for " + + (sandboxed ? "" : "un") + "sandboxed PL/Java"); + + callHandler = nameFromCatalog(rs.getString(1), rs.getString(2)); + valHandler = nameFromCatalog(rs.getString(3), rs.getString(4)); + + if ( rs.next() ) + throw new SQLException( + "Failed to find handlers uniquely for " + + (sandboxed ? "" : "un") + "sandboxed PL/Java"); + } + + try ( Statement s = conn.createStatement() ) + { + s.execute( + "CREATE " + + ( orReplace ? "OR REPLACE " : "" ) + + ( sandboxed ? "TRUSTED " : "" ) + "LANGUAGE " + + aliasIdent + + " HANDLER " + callHandler + + " VALIDATOR " + valHandler); + if ( sandboxed ) // GRANT/REVOKE not even allowed on unTRUSTED + s.execute( + "REVOKE USAGE ON LANGUAGE " + aliasIdent + + " FROM PUBLIC"); + if ( null == comment ) + return; + s.execute( + "COMMENT ON LANGUAGE " + aliasIdent + " IS " + + eQuote(comment)); + } + } } /** @@ -918,7 +1237,6 @@ private static boolean assertInPath(String jarName, * jar. * * @param jarName The name to check. - * @throws IOException */ private static void assertJarName(String jarName) throws SQLException { @@ -945,12 +1263,11 @@ private static void deployInstall(int jarId, String jarName) { SQLDeploymentDescriptor[] depDesc = getDeploymentDescriptors(jarId); - String[] originalSchemaAndPath = new String[2]; - boolean classpathChanged = assertInPath(jarName, originalSchemaAndPath); - for ( SQLDeploymentDescriptor dd : depDesc ) - dd.install(SQLUtils.getDefaultConnection()); - if (classpathChanged) - setClassPath(originalSchemaAndPath[0], originalSchemaAndPath[1]); + withJarInPath(jarName, false, () -> + { + for ( SQLDeploymentDescriptor dd : depDesc ) + dd.install(getDefaultConnection()); + }); } private static void deployRemove(int jarId, String jarName) @@ -958,64 +1275,51 @@ private static void deployRemove(int jarId, String jarName) { SQLDeploymentDescriptor[] depDesc = getDeploymentDescriptors(jarId); - String[] originalSchemaAndPath = new String[2]; - boolean classpathChanged = assertInPath(jarName, originalSchemaAndPath); - for ( int i = depDesc.length ; i --> 0 ; ) - depDesc[i].remove(SQLUtils.getDefaultConnection()); - try - { - if (classpathChanged) - setClassPath(originalSchemaAndPath[0],originalSchemaAndPath[1]); - } - catch ( SQLException sqle ) + withJarInPath(jarName, true, () -> { - if ( ! "3F000".equals(sqle.getSQLState()) ) - throw sqle; - } + for ( int i = depDesc.length ; i --> 0 ; ) + depDesc[i].remove(getDefaultConnection()); + }); } private static SQLDeploymentDescriptor[] getDeploymentDescriptors(int jarId) throws SQLException { - ResultSet rs = null; - PreparedStatement stmt = SQLUtils.getDefaultConnection() + try ( PreparedStatement stmt = getDefaultConnection() .prepareStatement( "SELECT e.entryImage" + " FROM sqlj.jar_descriptor d INNER JOIN sqlj.jar_entry e" - + " ON d.entryId = e.entryId" - + " WHERE d.jarId = ?" + + " ON d.entryId OPERATOR(pg_catalog.=) e.entryId" + + " WHERE d.jarId OPERATOR(pg_catalog.=) ?" + " ORDER BY d.ordinal"); - try + ) { stmt.setInt(1, jarId); - rs = stmt.executeQuery(); - ArrayList sdds = - new ArrayList(); - while(rs.next()) + try ( ResultSet rs = stmt.executeQuery() ) { - byte[] bytes = rs.getBytes(1); - // According to the SQLJ standard, this entry must be - // UTF8 encoded. - // - sdds.add( - new SQLDeploymentDescriptor(new String(bytes, "UTF8"))); + ArrayList sdds = new ArrayList<>(); + while ( rs.next() ) + { + ByteBuffer bytes = ByteBuffer.wrap(rs.getBytes(1)); + // According to the SQLJ standard, this entry must be + // UTF8 encoded. + // + sdds.add( new SQLDeploymentDescriptor( + UTF_8.newDecoder().decode(bytes).toString())); + } + return sdds.toArray( new SQLDeploymentDescriptor[sdds.size()]); } - return sdds.toArray( new SQLDeploymentDescriptor[sdds.size()]); } - catch(UnsupportedEncodingException e) + catch(CharacterCodingException e) { - // Excuse me? No UTF8 encoding? - // - throw new SQLException("JVM does not support UTF8!!"); + throw new SQLDataException( + "deployment descriptor is not well-formed UTF-8", "22021", e); } catch(ParseException e) { - throw new SQLException(e.getMessage() + " at " + e.getErrorOffset()); - } - finally - { - SQLUtils.close(rs); - SQLUtils.close(stmt); + throw new SQLSyntaxErrorException(String.format( + "%1$s at %2$s", e.getMessage(), e.getErrorOffset()), + "42601", e); } } @@ -1028,38 +1332,33 @@ private static String getFullSqlNameOwned(String sqlTypeName) throws SQLException { Oid typeId = Oid.forTypeName(sqlTypeName); - s_logger.info("Type id = " + typeId.toString()); + s_logger.finer("Type id = " + typeId.toString()); AclId invoker = AclId.getOuterUser(); - ResultSet rs = null; - PreparedStatement stmt = SQLUtils.getDefaultConnection() + try(PreparedStatement stmt = getDefaultConnection() .prepareStatement( "SELECT n.nspname, t.typname," + " pg_catalog.pg_has_role(?, t.typowner, 'USAGE')" + " FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n" - + " WHERE t.oid = ? AND n.oid = t.typnamespace"); - - try + + " WHERE t.oid OPERATOR(pg_catalog.=) ?" + + " AND n.oid OPERATOR(pg_catalog.=) t.typnamespace")) { stmt.setObject(1, invoker); stmt.setObject(2, typeId); - rs = stmt.executeQuery(); - if(!rs.next()) - throw new SQLException("Unable to obtain type info for " - + typeId); + try(ResultSet rs = stmt.executeQuery()) + { + if(!rs.next()) + throw new SQLException("Unable to obtain type info for " + + typeId); - if ( ! rs.getBoolean(3) ) - throw new SQLSyntaxErrorException( // yeah, for 42501, really - "Permission denied. Only superuser or type's owner " + - "may add or drop a type mapping.", "42501"); + if ( ! rs.getBoolean(3) ) + throw new SQLSyntaxErrorException( // yes, for 42501, really + "Permission denied. Only superuser or type's owner " + + "may add or drop a type mapping.", "42501"); - return rs.getString(1) + '.' + rs.getString(2); - } - finally - { - SQLUtils.close(rs); - SQLUtils.close(stmt); + return rs.getString(1) + '.' + rs.getString(2); + } } } @@ -1067,8 +1366,7 @@ private static int getJarId(PreparedStatement stmt, String jarName, AclId[] ownerRet) throws SQLException { stmt.setString(1, jarName); - ResultSet rs = stmt.executeQuery(); - try + try(ResultSet rs = stmt.executeQuery()) { if(!rs.next()) return -1; @@ -1080,10 +1378,6 @@ private static int getJarId(PreparedStatement stmt, String jarName, } return id; } - finally - { - SQLUtils.close(rs); - } } /** @@ -1099,53 +1393,44 @@ private static int getJarId(PreparedStatement stmt, String jarName, private static int getJarId(String jarName, AclId[] ownerRet) throws SQLException { - PreparedStatement stmt = SQLUtils - .getDefaultConnection() + try(PreparedStatement stmt = getDefaultConnection() .prepareStatement( - "SELECT jarId, jarOwner FROM sqlj.jar_repository WHERE jarName = ?"); - try + "SELECT jarId, jarOwner FROM sqlj.jar_repository"+ + " WHERE jarName OPERATOR(pg_catalog.=) ?")) { return getJarId(stmt, jarName, ownerRet); } - finally - { - SQLUtils.close(stmt); - } } /** * Returns the Oid for the given Schema. * - * @param schemaName The name of the schema. + * @param schema The name of the schema. * @return The Oid of the given schema or null if no such * schema is found. * @throws SQLException */ - private static Oid getSchemaId(String schemaName) throws SQLException + private static Oid getSchemaId(Identifier.Simple schema) throws SQLException { - ResultSet rs = null; - PreparedStatement stmt = SQLUtils.getDefaultConnection() + try(PreparedStatement stmt = getDefaultConnection() .prepareStatement( - "SELECT oid FROM pg_catalog.pg_namespace WHERE nspname = ?"); - try + "SELECT oid FROM pg_catalog.pg_namespace " + + "WHERE nspname OPERATOR(pg_catalog.=) ?")) { - stmt.setString(1, schemaName); - rs = stmt.executeQuery(); - if(!rs.next()) - return null; - return (Oid)rs.getObject(1); - } - finally - { - SQLUtils.close(rs); - SQLUtils.close(stmt); + stmt.setString(1, schema.pgFolded()); + try(ResultSet rs = stmt.executeQuery()) + { + if(!rs.next()) + return null; + return (Oid)rs.getObject(1); + } } } private static void installJar(String urlString, String jarName, boolean deploy, byte[] image) throws SQLException { - if ( Backend.isCreatingExtension() ) + if ( Backend.isCreatingExtension() && deploy ) throw new SQLFeatureNotSupportedException( "A jar cannot (yet) be installed as an extension in its " + "own right.", "0A000"); @@ -1157,11 +1442,10 @@ private static void installJar(String urlString, String jarName, + "' already exists", "46002"); - PreparedStatement stmt = SQLUtils - .getDefaultConnection() - .prepareStatement( - "INSERT INTO sqlj.jar_repository(jarName, jarOrigin, jarOwner) VALUES(?, ?, ?)"); - try + try ( PreparedStatement stmt = getDefaultConnection().prepareStatement( + "INSERT INTO sqlj.jar_repository(jarName, jarOrigin, jarOwner)" + + " VALUES(?, ?, ?)"); + ) { stmt.setString(1, jarName); stmt.setString(2, urlString); @@ -1170,10 +1454,6 @@ private static void installJar(String urlString, String jarName, throw new SQLException( "Jar repository insert did not insert 1 row"); } - finally - { - SQLUtils.close(stmt); - } AclId[] ownerRet = new AclId[1]; int jarId = getJarId(jarName, ownerRet); @@ -1181,20 +1461,33 @@ private static void installJar(String urlString, String jarName, throw new SQLException("Unable to obtain id of '" + jarName + "'"); if(image == null) - Backend.addClassImages(jarId, urlString); + addClassImages(jarId, urlString); else { InputStream imageStream = new ByteArrayInputStream(image); addClassImages(jarId, imageStream, image.length); } Loader.clearSchemaLoaders(); - if(deploy) + if(!deploy) + return; + + try + { deployInstall(jarId, jarName); + deploy = false; // flag that deployInstall completed + } + finally + { + if ( deploy ) // or in case it didn't complete ... + Loader.clearSchemaLoaders(); + } } private static void replaceJar(String urlString, String jarName, boolean redeploy, byte[] image) throws SQLException { + assertJarName(jarName); + AclId[] ownerRet = new AclId[1]; int jarId = getJarId(jarName, ownerRet); if(jarId < 0) @@ -1210,13 +1503,12 @@ private static void replaceJar(String urlString, String jarName, if(redeploy) deployRemove(jarId, jarName); - PreparedStatement stmt = SQLUtils - .getDefaultConnection() + try ( PreparedStatement stmt = getDefaultConnection() .prepareStatement( "UPDATE sqlj.jar_repository " + "SET jarOrigin = ?, jarOwner = ?, jarManifest = NULL " - + "WHERE jarId = ?"); - try + + "WHERE jarId OPERATOR(pg_catalog.=) ?"); + ) { stmt.setString(1, urlString); stmt.setString(2, user.getName()); @@ -1225,31 +1517,36 @@ private static void replaceJar(String urlString, String jarName, throw new SQLException( "Jar repository update did not update 1 row"); } - finally - { - SQLUtils.close(stmt); - } - stmt = SQLUtils.getDefaultConnection().prepareStatement( - "DELETE FROM sqlj.jar_entry WHERE jarId = ?"); - try + try ( PreparedStatement stmt = getDefaultConnection().prepareStatement( + "DELETE FROM sqlj.jar_entry WHERE jarId OPERATOR(pg_catalog.=) ?"); + ) { stmt.setInt(1, jarId); stmt.executeUpdate(); } - finally - { - SQLUtils.close(stmt); - } + if(image == null) - Backend.addClassImages(jarId, urlString); + addClassImages(jarId, urlString); else { InputStream imageStream = new ByteArrayInputStream(image); addClassImages(jarId, imageStream, image.length); } + Loader.clearSchemaLoaders(); - if(redeploy) + + if(!redeploy) + return; + + try + { deployInstall(jarId, jarName); + } + catch ( Error | RuntimeException | SQLException e ) + { + Loader.clearSchemaLoaders(); + throw e; + } } } diff --git a/pljava/src/main/java/org/postgresql/pljava/management/DDRExecutor.java b/pljava/src/main/java/org/postgresql/pljava/management/DDRExecutor.java index fee752c9..52ac52c2 100644 --- a/pljava/src/main/java/org/postgresql/pljava/management/DDRExecutor.java +++ b/pljava/src/main/java/org/postgresql/pljava/management/DDRExecutor.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015- Tada AB and other contributors, as listed below. + * Copyright (c) 2015-2016 Tada AB and other contributors, as listed below. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the The BSD 3-Clause License @@ -14,17 +14,12 @@ import java.sql.Connection; import java.sql.SQLException; -import java.util.ArrayList; - -import java.util.regex.Pattern; -import java.util.regex.Matcher; - import org.postgresql.pljava.Session; import org.postgresql.pljava.SessionManager; import org.postgresql.pljava.internal.Backend; -import static org.postgresql.pljava.sqlgen.Lexicals.ISO_PG_JAVA_IDENTIFIER; +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; /** * Abstract class for executing one deployment descriptor {@code } @@ -74,14 +69,6 @@ protected DDRExecutor() { } private static final DDRExecutor NOOP = new Noop(); - /* - * Capture group 1 is an identifier. Presence/absence of group 2 (comma- - * whitespace) indicates whether to parse more. - */ - private static final Pattern settingsRx = Pattern.compile(String.format( - "\\G(%1$s)(,\\s*)?", ISO_PG_JAVA_IDENTIFIER - )); - /** * Execute the command {@code sql} using the connection {@code conn}, * according to whatever meaning of "execute" the target {@code DDRExecutor} @@ -105,37 +92,22 @@ public abstract void execute( String sql, Connection conn) * an unadorned {@code } instead of an * {@code }. */ - public static DDRExecutor forImplementor( String name) + public static DDRExecutor forImplementor( Identifier name) throws SQLException { if ( null == name ) return PLAIN; - String[] imps = implementors(); + Iterable imps = + Backend.getListConfigOption( "pljava.implementors"); - for ( String i : imps ) - if ( name.equalsIgnoreCase( i) ) + for ( Identifier i : imps ) + if ( name.equals( i) ) return PLAIN; return NOOP; } - private static String[] implementors() throws SQLException - { - String settingString = Backend.getConfigOption( "pljava.implementors"); - ArrayList al = new ArrayList(); - Matcher m = settingsRx.matcher( settingString); - while ( m.find() ) - { - al.add( m.group( 1)); - if ( -1 != m.start( 2) ) - continue; - if ( m.hitEnd() ) - return al.toArray( new String [ al.size() ]); - } - throw new SQLException("Failed to parse current pljava.implementors"); - } - static class Noop extends DDRExecutor { public void execute( String sql, Connection conn) diff --git a/pljava/src/main/java/org/postgresql/pljava/management/SQLDeploymentDescriptor.java b/pljava/src/main/java/org/postgresql/pljava/management/SQLDeploymentDescriptor.java index afc67904..168ce5d6 100644 --- a/pljava/src/main/java/org/postgresql/pljava/management/SQLDeploymentDescriptor.java +++ b/pljava/src/main/java/org/postgresql/pljava/management/SQLDeploymentDescriptor.java @@ -1,8 +1,14 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2023 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.management; @@ -11,6 +17,13 @@ import java.text.ParseException; import java.util.ArrayList; import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; +import static org.postgresql.pljava.sqlgen.Lexicals.identifierFrom; +import static + org.postgresql.pljava.sqlgen.Lexicals.ISO_AND_PG_IDENTIFIER_CAPTURING; /** * This class deals with parsing and executing the deployment descriptor as @@ -87,10 +100,8 @@ */ public class SQLDeploymentDescriptor { - private final ArrayList m_installCommands = - new ArrayList(); - private final ArrayList m_removeCommands = - new ArrayList(); + private final ArrayList m_installCommands = new ArrayList<>(); + private final ArrayList m_removeCommands = new ArrayList<>(); private final StringBuffer m_buffer = new StringBuffer(); private final char[] m_image; @@ -98,6 +109,12 @@ public class SQLDeploymentDescriptor private int m_position = 0; + private static final Pattern s_beginImpl = Pattern.compile(String.format( + "^(?i:BEGIN)\\s++(?:%1$s)\\s*+", ISO_AND_PG_IDENTIFIER_CAPTURING)); + + private static final Pattern s_endImpl = Pattern.compile(String.format( + "(?descImage into a series of * {@code Command} objects each having an SQL command and, if present, an @@ -164,7 +181,7 @@ private void readDescriptor() { m_logger.entering("org.postgresql.pljava.management.SQLDeploymentDescriptor", "readDescriptor"); if(!"SQLACTIONS".equals(this.readIdentifier())) - throw this.parseError("Excpected keyword 'SQLActions'"); + throw this.parseError("Expected keyword 'SQLActions'"); this.readToken('['); this.readToken(']'); @@ -213,38 +230,29 @@ else if("REMOVE".equals(actionType)) // ::= // BEGIN ... END // - // If it is, and if the implementor name corresponds to the one - // defined for this deployment, then extract the SQL token stream. + // If it is, keep track of the with the cmd. // - String implementorName; - int top = cmd.length(); - if(top >= 15 - && "BEGIN ".equalsIgnoreCase(cmd.substring(0, 6)) - && Character.isJavaIdentifierStart(cmd.charAt(6))) + Identifier implementorName = null; + if(cmd.length() >= 15) { - int pos; - for(pos = 7; pos < top; ++pos) - if(!Character.isJavaIdentifierPart(cmd.charAt(pos))) - break; - - if(cmd.charAt(pos) != ' ') - throw this.parseError( - "Expected whitespace after "); - - implementorName = cmd.substring(6, pos); - int iLen = implementorName.length(); - - int endNamePos = top - iLen; - int endPos = endNamePos - 4; - if(!implementorName.equalsIgnoreCase(cmd.substring(endNamePos)) - || !"END ".equalsIgnoreCase(cmd.substring(endPos, endNamePos))) - throw this.parseError( - "Implementor block must end with END "); - - cmd = cmd.substring(pos+1, endPos); + Matcher m = s_beginImpl.matcher(cmd); + if ( m.find() ) + { + Identifier begIdent = identifierFrom(m); + int pos = m.end(); + m = s_endImpl.matcher(cmd); + if ( ! m.find(pos) ) + throw this.parseError( + "BEGIN without matching END"); + Identifier endIdent = identifierFrom(m); + if ( ! endIdent.equals(begIdent) ) + throw this.parseError(String.format( + "BEGIN \"%1$s\" and END \"%2$s\" do not match", + begIdent, endIdent)); + implementorName = begIdent; + cmd = cmd.substring(pos, m.start()); + } } - else - implementorName = null; commands.add(new Command(cmd.trim(), implementorName)); @@ -324,7 +332,7 @@ else if(inQuote == c) default: if(inQuote == 0 && Character.isWhitespace((char)c)) { - // Change multiple whitespace into one singe space. + // Change multiple whitespace into one single space. // m_buffer.append(' '); c = this.skipWhite(); @@ -337,7 +345,7 @@ else if(inQuote == c) } } if(inQuote != 0) - throw this.parseError("Untermintated " + (char)inQuote + + throw this.parseError("Unterminated " + (char)inQuote + " starting at position " + startQuotePos); throw this.parseError("Unexpected EOF. Expecting ';' to end command"); @@ -511,7 +519,7 @@ class Command /** The sql to execute (if this command is not suppressed). Never null. */ final String sql; - private final String tag; + private final Identifier tag; /** * Execute this {@code Command} using a {@code DDRExecutor} chosen @@ -523,7 +531,7 @@ void execute( Connection conn) throws SQLException ddre.execute( sql, conn); } - Command(String sql, String tag) + Command(String sql, Identifier tag) { this.sql = sql.trim(); this.tag = tag; diff --git a/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java b/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java new file mode 100644 index 00000000..f259bbc1 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/mbeans/DualStateStatistics.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.mbeans; + +import javax.management.MXBean; + +import org.postgresql.pljava.internal.DualState; // for javadoc + +/** + * Bean exposing some {@link DualState DualState} allocation and lifecycle + * statistics for viewing in a JMX management client. + */ +@MXBean +public interface DualStateStatistics +{ + long getConstructed(); + long getEnlistedScoped(); + long getEnlistedUnscoped(); + long getDelistedScoped(); + long getDelistedUnscoped(); + long getJavaUnreachable(); + long getJavaReleased(); + long getNativeReleased(); + long getResourceOwnerPasses(); + long getReferenceQueuePasses(); + long getReferenceQueueItems(); + long getContendedLocks(); + long getContendedPins(); + long getRepeatedlyDeferred(); + long getGcReleaseRaces(); + long getReleaseReleaseRaces(); +} diff --git a/pljava/src/main/java/org/postgresql/pljava/mbeans/package-info.java b/pljava/src/main/java/org/postgresql/pljava/mbeans/package-info.java new file mode 100644 index 00000000..80870843 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/mbeans/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + */ +/** + *

      Interfaces defining the views on PL/Java's internals that are available + * through the Java Management Extensions, isolated here in an exportable + * package, as the interfaces must be accessible to a JMX module that is + * dynamic and unnamed. + */ +package org.postgresql.pljava.mbeans; diff --git a/pljava/src/main/java/org/postgresql/pljava/nopolicy/FrozenProperties.java b/pljava/src/main/java/org/postgresql/pljava/nopolicy/FrozenProperties.java new file mode 100644 index 00000000..f4b00e68 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/nopolicy/FrozenProperties.java @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.nopolicy; + +import java.io.InputStream; +import java.io.Reader; + +import static java.util.Arrays.copyOfRange; +import java.util.Collection; +import static java.util.Collections.unmodifiableCollection; +import static java.util.Collections.unmodifiableSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; + +import java.util.function.BiFunction; +import java.util.function.Function; + +import static java.util.stream.Collectors.toSet; + +/** + * An unmodifiable subclass of {@link Properties}. + *

      + * The overidden methods violate the superclass API specs to the extent that the + * specs allow modification, or the returning of modifiable sets or collections. + *

      + * When any overridden method would, per the spec, modify the map, the method + * will throw {@link UnsupportedOperationException} instead. + */ +public final class FrozenProperties extends Properties +{ + /** + * Constructs a {@code FrozenProperties} instance from an existing + * {@link Properties} instance. + *

      + * The instance will have a defaults list (also frozen) if p has + * defaults that have not been superseded by later settings. Defaults are + * flattened into a single default properties instance, even if p + * had a defaults instance chaining to another or a chain of others. + * @param p the instance whose entries are to be copied + */ + public FrozenProperties(Properties p) + { + super(defaults(p)); + super.putAll(p); // putAll copies only non-default entries + } + + /** + * Constructor used internally to return a frozen instance with only + * p's defaults (entries with keys in subset). + */ + private FrozenProperties(Properties p, Set subset) + { + // super(subset.size()); // has no @Since but first appears in Java 10 + for ( String s : subset ) + super.put(s, p.get(s)); + } + + /** + * Returns a {@code FrozenProperties} instance representing defaults of + * p not superseded by later settings. + * @return FrozenProperties with the defaults, or null if none + */ + private static FrozenProperties defaults(Properties p) + { + Set defaultedNames = + p.stringPropertyNames().stream().filter(n -> ! p.containsKey(n)) + .collect(toSet()); + if ( defaultedNames.isEmpty() ) + return null; + return new FrozenProperties(p, defaultedNames); + } + + @Override + public Object setProperty(String key, String value) + { + throw readonly(); + } + + @Override + public void load(Reader reader) + { + throw readonly(); + } + + @Override + public void load(InputStream inStream) + { + throw readonly(); + } + + @Override + public void loadFromXML(InputStream in) + { + throw readonly(); + } + + @Override + public void clear() + { + throw readonly(); + } + + @Override + public Object computeIfAbsent( + Object key, Function mappingFunction) + { + Object v = get(key); + if ( null != v ) + return v; + v = mappingFunction.apply(key); + if ( null != v ) + throw readonly(); + return null; + } + + @Override + public Object computeIfPresent( + Object key, BiFunction remappingFunction) + { + Object v = get(key); + if ( null == v ) + return null; + v = remappingFunction.apply(key, v); // if it throws, let it. Else: + throw readonly(); + } + + @Override + public Set> entrySet() + { + return unmodifiableSet(super.entrySet()); + } + + @Override + public Set keySet() + { + return unmodifiableSet(super.keySet()); + } + + @Override + public Object merge(Object key, Object value, + BiFunction remappingFunction) + { + throw readonly(); + } + + @Override + public Object put(Object key, Object value) + { + throw readonly(); + } + + @Override + public void putAll(Map t) + { + if ( 0 < t.size() ) + throw readonly(); + } + + @Override + public Object remove(Object key) + { + Object v = get(key); + if ( null != v ) + throw readonly(); + return null; + } + + @Override + public Collection values() + { + return unmodifiableCollection(super.values()); + } + + private static UnsupportedOperationException readonly() + { + UnsupportedOperationException e = + new UnsupportedOperationException("FrozenProperties modification"); + StackTraceElement[] t = e.getStackTrace(); + e.setStackTrace(copyOfRange(t, 1, t.length)); + return e; + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/nopolicy/package-info.java b/pljava/src/main/java/org/postgresql/pljava/nopolicy/package-info.java new file mode 100644 index 00000000..4b122cae --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/nopolicy/package-info.java @@ -0,0 +1,6 @@ +/** + * Java classes needed to preserve any semblance of a reliable environment + * in Java 24 and later with no security policy enforcement. + * @author Chapman Flack + */ +package org.postgresql.pljava.nopolicy; diff --git a/pljava/src/main/java/org/postgresql/pljava/policy/TrialPolicy.java b/pljava/src/main/java/org/postgresql/pljava/policy/TrialPolicy.java new file mode 100644 index 00000000..320a66c4 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/policy/TrialPolicy.java @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.policy; + +import java.lang.reflect.ReflectPermission; + +import java.net.URI; + +import java.security.CodeSource; +import java.security.NoSuchAlgorithmException; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.security.SecurityPermission; +import java.security.URIParameter; + +import java.util.ArrayList; +import java.util.Arrays; +import static java.util.Collections.emptyEnumeration; +import static java.util.Collections.enumeration; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.List; + +import static org.postgresql.pljava.elog.ELogHandler.LOG_LOG; +import static org.postgresql.pljava.internal.Backend.log; +import static org.postgresql.pljava.internal.Backend.threadMayEnterPG; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; + +/** + * An implementation of {@link Policy} intended for temporary use while + * identifying needed permission grants for existing code. + *

      + * This policy is meant to operate as a fallback in conjunction with the normal + * PL/Java policy specified with the {@code pljava.policy_urls} configuration + * setting. This policy is activated by specifying an additional policy file + * URL with {@code -Dorg.postgresql.pljava.policy.trial=}url in the + * {@code pljava.vmoptions} setting. + *

      + * Permission checks that are allowed by the normal policy in + * {@code pljava.policy_urls} are allowed with no further checking. Permissions + * denied by that policy are checked in this one. If denied in this policy, that + * is the end of the matter. A permission check that is denied by the normal + * policy but allowed by this one is allowed, with a message to the server log. + *

      + * The log message begins with {@code POLICY DENIES/TRIAL POLICY ALLOWS:} + * and the requested permission, followed by an abbreviated stack trace. + * To minimize log volume, the stack trace includes a frame above and below + * each crossing of a module or protection domain boundary; a single {@code ...} + * replaces intermediate frames within the same module and domain. + * At the position in the trace of the protection domain that failed the policy + * check, a line is inserted with the domain's code source and principals, + * such as {@code >> sqlj:examples [PLPrincipal.Sandboxed: java] <<}. This + * abbreviated trace should be well suited to the purpose of determining where + * any additional permission grants ought to be made. + *

      + * Because each check that is logged is then allowed, it can be possible to see + * multiple log entries for the same permission check, one for each domain in + * the call stack that is not granted the permission in the normal policy. + *

      About false positives

      + * It is not uncommon to have software that checks in normal operation for + * certain permissions, catches exceptions, and proceeds to function normally. + * Use of this policy, if it is configured to grant the permissions being + * checked, will produce log entries for those 'hidden' checks and may create + * the appearance that permissions need to be granted when, in fact, the + * software would show no functional impairment without them. It is difficult + * to distinguish such false positives from other log entries for permissions + * that do need to be granted for the software to properly function. + *

      + * One approach would be to try to determine, from the log entries, which + * functions of the software led to the permission checks that were logged, and + * specifically test those functions in a database session that has been set up + * with a different policy file that does not grant those permissions. If the + * software then functions without incident, it may be concluded that those + * log entries were false positives. + */ +public class TrialPolicy extends Policy +{ + private static final String TYPE = "JavaPolicy"; + private static final RuntimePermission GET_PROTECTION_DOMAIN = + new RuntimePermission("getProtectionDomain"); + private final Policy realPolicy; + private final Policy limitPolicy; + private final StackWalker walker = + StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE); + + public TrialPolicy(String limitURI) throws NoSuchAlgorithmException + { + URIParameter lim = new URIParameter(URI.create(limitURI)); + realPolicy = Policy.getInstance(TYPE, null); + limitPolicy = Policy.getInstance(TYPE, lim); + } + + @Override + public PermissionCollection getPermissions(CodeSource codesource) + { + return realPolicy.getPermissions(codesource); + } + + @Override + public PermissionCollection getPermissions(ProtectionDomain domain) + { + return realPolicy.getPermissions(domain); + } + + @Override + public boolean implies( + ProtectionDomain domain, java.security.Permission permission) + { + if ( realPolicy.implies(domain, permission) ) + return true; + + if ( ! limitPolicy.implies(domain, permission) ) + { + /* + * The TrialPolicy.Permission below is an unusual one: like Java's + * own AllPermission, its implies() can be true for permissions of + * other classes than its own. Java's AllPermission is handled + * magically, and this one must be also, because deep down, the + * built-in Policy implementation keeps its PermissionCollections + * segregated by permission class. It would not notice on its own + * that 'permission' might be implied by a permission that is held + * but is of some other class. + */ + if ( ! limitPolicy.implies(domain, Permission.INSTANCE) + || ! Permission.INSTANCE.implies(permission) ) + return false; + } + + /* + * Construct a (with any luck, useful) abbreviated stack trace, using + * the first frame encountered at each change of protection domain while + * walking up the stack, saving the index of the first entry for the + * domain being checked. + */ + List stack = new ArrayList<>(); + int matchingDomainIndex = doPrivileged(() -> walker.walk(s -> + { + ProtectionDomain lastDomain = null; + StackWalker.StackFrame lastFrame = null; + Module lastModule = null; + Module thisModule = getClass().getModule(); + int matchIndex = -1; + int walkIndex = 0; + int newDomainIndex = 0; // walkIndex of first frame in a new domain + for ( StackWalker.StackFrame f : + (Iterable)s.skip(5)::iterator ) + { + ++ walkIndex; + Class frameClass = f.getDeclaringClass(); + Module frameModule = frameClass.getModule(); + ProtectionDomain frameDomain = frameClass.getProtectionDomain(); + if ( ! equals(lastDomain, frameDomain) + || null != lastModule && ! lastModule.equals(frameModule) ) + { + if ( null != lastFrame && walkIndex > 1 + newDomainIndex ) + { + if ( walkIndex > 2 + newDomainIndex ) + stack.add(null); // will be rendered as ... + stack.add(lastFrame.toStackTraceElement()); + } + if ( -1 == matchIndex && equals(domain, frameDomain) ) + matchIndex = stack.size(); + stack.add(f.toStackTraceElement()); + lastModule = frameModule; + lastDomain = frameDomain; + newDomainIndex = walkIndex; + } + + /* + * Exit the walk early, skip boring EntryPoints. + */ + if ( frameModule.equals(thisModule) + && "org.postgresql.pljava.internal.EntryPoints" + .equals(frameClass.getName()) ) + { + if ( newDomainIndex == walkIndex ) + stack.remove(stack.size() - 1); + -- walkIndex; + break; + } + + lastFrame = f; + } + + if ( null != lastFrame && walkIndex > 1 + newDomainIndex ) + stack.add(lastFrame.toStackTraceElement()); + + if ( -1 == matchIndex ) + matchIndex = stack.size(); + return matchIndex; + }), null, GET_PROTECTION_DOMAIN); + + /* + * Construct a string representation of the trace. + */ + StringBuilder sb = new StringBuilder( + "POLICY DENIES/TRIAL POLICY ALLOWS: " + permission + '\n'); + Iterator it = stack.iterator(); + int i = 0; + for ( ;; ) + { + if ( matchingDomainIndex == i ++ ) + sb.append(">> ") + .append(domain.getCodeSource().getLocation()) + .append(' ') + .append(Arrays.toString(domain.getPrincipals())) + .append(" <<\n"); + if ( ! it.hasNext() ) + break; + StackTraceElement e = it.next(); + sb.append(null == e ? "..." : e.toString()); + if ( it.hasNext() || matchingDomainIndex == i ) + sb.append('\n'); + } + + /* + * This is not the best way to avoid blocking on log(); in some flavors + * of pljava.java_thread_pg_entry, threadMayEnterPG can return false + * simply because it's not /known/ that PG could be entered right now, + * and this could send the message off to System.err at times even if + * log() would have completed with no blocking. But the always accurate + * "could I enter PG right now without blocking?" method isn't provided + * yet. + */ + if ( threadMayEnterPG() ) + log(LOG_LOG, sb.toString()); + else + System.err.println(sb); + + return true; + } + + @Override + public void refresh() + { + realPolicy.refresh(); + limitPolicy.refresh(); + } + + /* + * Compare two protection domains, only by their code source for now. + * It appears that StackWalker doesn't invoke domain combiners, so the + * frames seen in the walk won't match the principals of the argument + * to implies(). + */ + private boolean equals(ProtectionDomain a, ProtectionDomain b) + { + if ( null == a || null == b) + return a == b; + + CodeSource csa = a.getCodeSource(); + CodeSource csb = b.getCodeSource(); + + if ( null == csa || null == csb ) + return csa == csb; + + return csa.equals(csb); + } + + /** + * A permission like {@code java.security.AllPermission}, but without + * any {@code FilePermission} (the real policy's sandboxed/unsandboxed + * grants should handle those), nor a couple dozen varieties of + * {@code RuntimePermission}, {@code SecurityPermission}, and + * {@code ReflectPermission} that would typically not be granted without + * clear intent. + *

      + * This permission can be granted in a {@code TrialPolicy} while identifying + * any straggling permissions needed by some existing code, without quite + * the excitement of granting {@code AllPermission}. Any of the permissions + * excluded from this one can also be granted in the {@code TrialPolicy}, + * of course, if there is reason to believe the code might need them. + *

      + * The proper spelling in a policy file is + * {@code org.postgresql.pljava.policy.TrialPolicy$Permission}. + *

      + * This permission will probably only work right in a {@code TrialPolicy}. + * Any permission whose {@code implies} method can return true for + * permissions of other classes than its own may be ineffective in a stock + * Java policy, where permission collections are kept segregated by the + * class of the permission to be checked. Java's {@code AllPermission} gets + * special-case treatment in the stock implementation, and this permission + * likewise has to be treated specially in {@code TrialPolicy}. The only + * kind of custom permission that can genuinely drop in and work is one + * whose {@code implies} method only imposes semantics on the names/actions + * of different instances of that permission class. + *

      + * A permission that does not live on the boot classpath is initially read + * from a policy file as an instance of {@code UnresolvedPermission}, and + * only gets resolved when a permission check is made, checking for an + * instance of its actual class. That is another complication when + * implementing a permission that may imply permissions of other classes. + *

      + * A permission implemented in a different named module must be in a package + * that is exported to {@code java.base}. + */ + public static final class Permission extends java.security.Permission + { + private static final long serialVersionUID = 6401893677037633706L; + + /** + * An instance of this permission (not a singleton, merely one among + * possible others). + */ + static final Permission INSTANCE = new Permission(); + + public Permission() + { + super(""); + } + + public Permission(String name, String actions) + { + super(""); + } + + @Override + public boolean equals(Object other) + { + return other instanceof Permission; + } + + @Override + public int hashCode() + { + return 131113; + } + + @Override + public String getActions() + { + return null; + } + + @Override + public PermissionCollection newPermissionCollection() + { + return new Collection(); + } + + @Override + public boolean implies(java.security.Permission p) + { + if ( p instanceof Permission ) + return true; + + if ( p instanceof java.io.FilePermission ) + return false; + + if ( Holder.EXCLUDERHS.stream().anyMatch(r -> p.implies(r)) ) + return false; + + if ( Holder.EXCLUDELHS.stream().anyMatch(l -> l.implies(p)) ) + return false; + + return true; + } + + static class Collection extends PermissionCollection + { + private static final long serialVersionUID = 917249873714843122L; + + Permission the_permission = null; + + @Override + public void add(java.security.Permission p) + { + if ( isReadOnly() ) + throw new SecurityException( + "attempt to add a Permission to a readonly " + + "PermissionCollection"); + + if ( ! (p instanceof Permission) ) + throw new IllegalArgumentException( + "invalid in homogeneous PermissionCollection: " + p); + + if ( null == the_permission ) + the_permission = (Permission) p; + } + + @Override + public boolean implies(java.security.Permission p) + { + if ( null == the_permission ) + return false; + return the_permission.implies(p); + } + + @Override + public Enumeration elements() + { + if ( null == the_permission ) + return emptyEnumeration(); + return enumeration(List.of(the_permission)); + } + } + + static class Holder + { + static final List EXCLUDERHS = List.of( + new RuntimePermission("createClassLoader"), + new RuntimePermission("getClassLoader"), + new RuntimePermission("setContextClassLoader"), + new RuntimePermission("enableContextClassLoaderOverride"), + new RuntimePermission("setSecurityManager"), + new RuntimePermission("createSecurityManager"), + new RuntimePermission("shutdownHooks"), + new RuntimePermission("exitVM"), + new RuntimePermission("setFactory"), + new RuntimePermission("setIO"), + new RuntimePermission("getStackWalkerWithClassReference"), + new RuntimePermission("setDefaultUncaughtExceptionHandler"), + new RuntimePermission("manageProcess"), + new ReflectPermission("suppressAccessChecks"), + new SecurityPermission("createAccessControlContext"), + new SecurityPermission("setPolicy"), + new SecurityPermission("createPolicy.JavaPolicy") + ); + + static final List EXCLUDELHS = List.of( + new RuntimePermission("exitVM.*"), + new RuntimePermission("defineClassInPackage.*"), + new ReflectPermission("newProxyInPackage.*"), + new SecurityPermission("setProperty.*") + ); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/policy/package-info.java b/pljava/src/main/java/org/postgresql/pljava/policy/package-info.java new file mode 100644 index 00000000..dc411b58 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/policy/package-info.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Purdue University + */ +/** + * Package implementing custom Java security policy useful while migrating + * existing code to policy-based PL/Java; allows permission checks denied by the + * main policy to succeed, while logging them so any needed permission grants + * can be identified and added to the main policy. + *

      + * This package is exported to {@code java.base} to provide a custom + * {@code Permission} that can be granted in policy. + */ +package org.postgresql.pljava.policy; diff --git a/pljava/src/main/java/org/postgresql/pljava/sqlj/EntryStreamHandler.java b/pljava/src/main/java/org/postgresql/pljava/sqlj/EntryStreamHandler.java index 42b92ad9..1ac02cd9 100644 --- a/pljava/src/main/java/org/postgresql/pljava/sqlj/EntryStreamHandler.java +++ b/pljava/src/main/java/org/postgresql/pljava/sqlj/EntryStreamHandler.java @@ -73,7 +73,8 @@ public void connect() { stmt = SQLUtils.getDefaultConnection().prepareStatement( - "SELECT entryName, entryImage FROM sqlj.jar_entry WHERE entryId = ?"); + "SELECT entryName, entryImage FROM sqlj.jar_entry " + + "WHERE entryId OPERATOR(pg_catalog.=) ?"); stmt.setInt(1, m_entryId); rs = stmt.executeQuery(); if(rs.next()) diff --git a/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java b/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java new file mode 100644 index 00000000..268c6be0 --- /dev/null +++ b/pljava/src/main/java/org/postgresql/pljava/sqlj/Handler.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.sqlj; + +import java.io.IOException; + +import java.net.URL; +import java.net.URLConnection; +import java.net.URLStreamHandler; +import java.net.spi.URLStreamHandlerProvider; + +/** + * Provider for an {@code sqlj:jarname} URL stream handler. + *

      + * This is only used to allow the security policy to grant permissions to jars + * by name. The handler is otherwise nonfunctional; its {@code openConnection} + * method throws an exception. + */ +public class Handler extends URLStreamHandlerProvider +{ + private static final Handler INSTANCE = new Handler(); + + public static URLStreamHandlerProvider provider() + { + return INSTANCE; + } + + @Override + public URLStreamHandler createURLStreamHandler(String protocol) + { + switch ( protocol ) + { + case "sqlj": + return SQLJ.INSTANCE; + default: + return null; + } + } + + static class SQLJ extends URLStreamHandler + { + static final SQLJ INSTANCE = new SQLJ(); + + @Override + protected URLConnection openConnection(URL u) throws IOException + { + throw new IOException( + "URL of sqlj: protocol can't really be opened"); + } + + @Override + protected void parseURL(URL u, String spec, int start, int limit) + { + if ( spec.length() > limit ) + throw new IllegalArgumentException( + "sqlj: URL should not contain #"); + if ( spec.length() == start ) + throw new IllegalArgumentException( + "sqlj: URL should not have empty path part"); + setURL(u, u.getProtocol(), null, -1, null, null, + spec.substring(start), null, null); + } + } +} diff --git a/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java b/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java index 546c1884..fc9d501a 100644 --- a/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java +++ b/pljava/src/main/java/org/postgresql/pljava/sqlj/Loader.java @@ -1,165 +1,322 @@ /* - * Copyright (c) 2004, 2005, 2006 TADA AB - Taby Sweden - * Portions Copyright (c) 2010 - Greenplum Inc - * Distributed under the terms shown in the file COPYRIGHT - * found in the root folder of this project or at - * http://eng.tada.se/osprojects/COPYRIGHT.html + * Copyright (c) 2004-2025 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Tada AB + * Chapman Flack */ package org.postgresql.pljava.sqlj; import java.io.File; -import java.io.FileFilter; import java.io.IOException; +import java.io.InputStream; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import static java.lang.invoke.MethodType.methodType; + import java.net.MalformedURLException; import java.net.URL; +import java.net.URLClassLoader; + +import java.security.CodeSigner; +import java.security.CodeSource; +import java.security.Principal; +import java.security.ProtectionDomain; + import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLData; import java.sql.SQLException; import java.sql.Statement; + import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; -import java.util.LinkedList; -import java.util.ListIterator; +import java.util.LinkedHashSet; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; + import java.util.logging.Level; import java.util.logging.Logger; +import static java.util.stream.Collectors.groupingBy; + +import org.postgresql.pljava.sqlgen.Lexicals.Identifier; + import org.postgresql.pljava.internal.Backend; +import org.postgresql.pljava.internal.Checked; import org.postgresql.pljava.internal.Oid; -import org.postgresql.pljava.jdbc.SQLUtils; -import org.postgresql.pljava.sqlj.JarLoader; +import static org.postgresql.pljava.internal.Privilege.doPrivileged; +import static org.postgresql.pljava.internal.UncheckedException.unchecked; -/** - * - * The Loader class is used by PL/Java to load java bytecode into the VM. - * - * In the postgres distribution this bytecode is always loaded from the - * sqlj.jar_repository table. +import org.postgresql.pljava.jdbc.Invocation; +import static org.postgresql.pljava.jdbc.SQLUtils.getDefaultConnection; + +/* + * Import an interface (internal-only, at least for now) that allows overriding + * the default derivation of the read_only parameter to SPI query execution + * functions. The default behavior follows the recommendation in the SPI docs + * to use read_only => true if the currently-executing PL function is declared + * IMMUTABLE, and read_only => false otherwise. * - * In Greenplum that is difficult because it would require uniform access - * to the sqlj.jar_repository table from every segment, which is currently - * not well supported by our architecture. + * Several queries in this class will use this interface to force read_only to + * be false, even though the queries clearly do nothing but reading. The reason + * may not be obvious: * - * Instead we mostly use our own loader JarLoader() which reads a jarfile - * from the filesystem. Note that the security manager allows READING from - * "$GPHOME/lib/postgresql/java/*.class" files, even in trusted mode, since - * this is shared between all databases any user can theoretically see every - * installed jar. + * One effect of the read_only parameter in SPI is the selection of the snapshot + * used to evaluate the query. When read_only is true, a snapshot from the + * beginning of the command is used, which cannot see even the modifications + * made in this transaction since that point. * - * Note: This creates a couple differences from the postgres implementation that - * currently pull us further from the sql standard embedded java implementation. - * Specifically: - * - We do not support per-database/per-schema CLASSPATH - * - CLASSPATH is instead set by a user GUC. + * Where that becomes a problem is during evaluation of a deployment descriptor + * as part of install_jar or replace_jar. The command began by loading some new + * or changed classes, and is now executing deployment commands, which may very + * well need to load those classes. But some of the loading requests may happen + * to come through functions that are declared IMMUTABLE (type IO functions, for + * example), which, under the default behavior, would mean SPI gets passed + * read_only => true and selects a snapshot from before the new classes were + * there, and loading fails. That is why read_only is always forced false here. + */ +import org.postgresql.pljava.jdbc.SPIReadOnlyControl; + +/** + * Class loader to load from jars installed in the database with + * {@code SQLJ.INSTALL_JAR}. + * @author Thomas Hallgren */ public class Loader extends ClassLoader { - private static final String PUBLIC_SCHEMA = "public"; - private static final Map s_schemaLoaders = new HashMap(); - private static final Map s_typeMap = new HashMap(); - private final static Logger s_logger = Logger.getLogger(Loader.class.getName()); + private static final Logger s_logger = + Logger.getLogger(Loader.class.getName()); - /* Greeplum Additions */ - private final LinkedList m_jarloaders; - private final String[] m_classpath; - // private final Map m_filespace; // Not yet supported - private static String m_current_classpath; + /** + * A distinguished singleton instance to serve as a type-safe "sentinel" + * reference in context classloader management (as Java considers null to be + * a meaningful {@code setContextClassLoader} argument). + */ + public static final ClassLoader SENTINEL = new Loader(); /** - * Create a new Loader. - * @param entries - * @param parent + * The enumeration of URLs returned by {@code findResources}. + *

      + * The returned URLs have a "dbf:" scheme and expose the integer surrogate + * keys of jar entries, not a very stable way to refer to an entry in a jar, + * but perhaps adequate for now, as no one will be constructing such URLs + * or obtaining them except from {@code findResources} here. */ - Loader(String classpath) - throws SQLException + static class EntryEnumeration implements Enumeration { - super(Loader.class.getClassLoader()); + private final int[] m_entryIds; + private int m_top = 0; - m_jarloaders = new LinkedList(); + EntryEnumeration(int[] entryIds) + { + m_entryIds = entryIds; + } - if (classpath == null || classpath.length() == 0) - m_classpath = new String[0]; - else - m_classpath = classpath.split(":"); + public boolean hasMoreElements() + { + return (m_top < m_entryIds.length); + } + + public URL nextElement() + throws NoSuchElementException + { + if (m_top >= m_entryIds.length) + throw new NoSuchElementException(); + return entryURL(m_entryIds[m_top++]); + } + } + public static final Identifier.Simple PUBLIC_SCHEMA = + Identifier.Simple.fromCatalog("public"); + + private static final Map + s_schemaLoaders = new HashMap<>(); + + private static final + Map>> + s_typeMap = new HashMap<>(); + + private static final Object s_fallbackLoaderLock = new Object(); + private static volatile ClassLoader s_fallbackLoader; - /* Find the directory that contains our jar files */ - String jarpath = Backend.getLibraryPath() + "/java/"; + /** + * Removes all cached schema loaders, functions, and type maps. This + * method is called by the utility functions that manipulate the + * data that has been cached. It is not intended to be called + * from user code. + */ + public static void clearSchemaLoaders() + { + s_schemaLoaders.clear(); + s_typeMap.clear(); + Backend.clearFunctionCache(); + s_fallbackLoader = null; + } - /* Create a new JarLoader for every element in the classpath */ - for (int i = 0; i < m_classpath.length; i++) + private static boolean isMissingSqlj(SQLException e) + { + for (SQLException ex = e; ex != null; ex = ex.getNextException()) { - try - { - String searchPath; + String state = ex.getSQLState(); + if ("42P01".equals(state) || "3F000".equals(state)) + return true; + } + return false; + } + + private static ClassLoader getFallbackLoader() + { + ClassLoader loader = s_fallbackLoader; + if ( loader != null ) + return loader; + + synchronized ( s_fallbackLoaderLock ) + { + loader = s_fallbackLoader; + if ( loader != null ) + return loader; + + Set urls = new LinkedHashSet<>(); + addPathUrls(safeGetConfigOption("pljava_classpath"), urls); + if ( urls.isEmpty() ) + loader = ClassLoader.getSystemClassLoader(); + else + loader = doPrivileged(() -> new FallbackClassLoader( + urls.toArray(new URL[0]), + ClassLoader.getSystemClassLoader())); + s_fallbackLoader = loader; + return loader; + } + } + + private static final class FallbackClassLoader extends URLClassLoader + { + FallbackClassLoader(URL[] urls, ClassLoader parent) + { + super(urls, parent); + } + + @Override + public URL getResource(String name) + { + return doPrivileged(() -> super.getResource(name)); + } - if (!m_classpath[i].startsWith("/")) + @Override + public InputStream getResourceAsStream(String name) + { + return doPrivileged(() -> { + URL url = super.getResource(name); + if ( url == null ) + return null; + try { - searchPath = jarpath + m_classpath[i]; + return url.openStream(); } - else + catch ( IOException e ) { - searchPath = m_classpath[i]; + return null; } + }); + } + } - URL url = null; - - File tmp = new File(searchPath); - - // if directory then lets get all the jar files - if (tmp.isDirectory()) { - File jarFiles[] = tmp.listFiles(new FileFilter() { - public boolean accept(File pathname) { - // not interested in directories - if (pathname.isDirectory()) return false; - // only interested in jar files - return pathname.getPath().endsWith("jar"); - } - }); - for (int j=0; j urls) + { + if ( path == null || path.trim().isEmpty() ) + return; + + String[] parts = + path.split(java.util.regex.Pattern.quote(File.pathSeparator)); + for ( String part : parts ) + { + String entry = part.trim(); + if ( entry.isEmpty() ) + continue; + File file = new File(entry); + try + { + if ( file.isDirectory() ) + { + urls.add(file.toURI().toURL()); + File[] jarFiles = file.listFiles((dir, name) -> + name.endsWith(".jar")); + if ( jarFiles == null ) + continue; + for ( File jar : jarFiles ) + urls.add(jar.toURI().toURL()); } - else + else if ( file.isFile() ) { - url = new URL("file:///" + searchPath); - JarLoader loader = new JarLoader(this, url); - m_jarloaders.add(loader); + urls.add(file.toURI().toURL()); } - } - catch (MalformedURLException e) - { - // XXX - Ignore malformed URLs? - throw new SQLException("Malformed URL Exception: " + - e.getMessage()); - } - catch (IOException e) + catch ( MalformedURLException e ) { - // XXX - Ignore jar doesn't exist? - throw new SQLException("IOException reading jar: " + - e.getMessage()); + throw unchecked(e); } } } - /** - * Removes all cached schema loaders, functions, and type maps. This - * method is called by the utility functions that manipulate the - * data that has been cached. It is not intended to be called - * from user code. - */ - public static void clearSchemaLoaders() + private static boolean sqljTablesExist(Connection conn) + throws SQLException { - s_schemaLoaders.clear(); - s_typeMap.clear(); - Backend.clearFunctionCache(); + return sqljTableExists(conn, "jar_repository") + && sqljTableExists(conn, "classpath_entry") + && sqljTableExists(conn, "jar_entry"); + } + + private static boolean sqljTableExists(Connection conn, String tableName) + throws SQLException + { + try ( PreparedStatement stmt = conn.prepareStatement( + "SELECT 1 FROM pg_catalog.pg_class c " + + "JOIN pg_catalog.pg_namespace n " + + "ON n.oid OPERATOR(pg_catalog.=) c.relnamespace " + + "WHERE n.nspname OPERATOR(pg_catalog.=) 'sqlj' " + + "AND c.relname OPERATOR(pg_catalog.=) ?")) + { + stmt.unwrap(SPIReadOnlyControl.class).clearReadOnly(); + stmt.setString(1, tableName); + try ( ResultSet rs = stmt.executeQuery() ) + { + return rs.next(); + } + } } /** @@ -171,42 +328,150 @@ public static void clearSchemaLoaders() public static ClassLoader getCurrentLoader() throws SQLException { - /* - * Because Greenplum doesn't support per-schema classpaths we just map - * everything to the public schema. - */ - return getSchemaLoader(PUBLIC_SCHEMA); + String schema; + try ( + Statement stmt = getDefaultConnection().createStatement(); + ResultSet rs = + stmt.executeQuery("SELECT pg_catalog.current_schema()"); + ) + { + if(!rs.next()) + throw new SQLException("Unable to determine current schema"); + schema = rs.getString(1); + } + return getSchemaLoader(Identifier.Simple.fromCatalog(schema)); } /** * Obtain a loader that has been configured for the class path of the * schema named schemaName. Class paths are defined using the * SQL procedure sqlj.set_classpath. - * @param schemaName The name of the schema. + * @param schema The name of the schema as an Identifier.Simple. * @return A loader. */ - public static ClassLoader getSchemaLoader(String schemaName) + public static ClassLoader getSchemaLoader(Identifier.Simple schema) throws SQLException { + if(schema == null ) + schema = PUBLIC_SCHEMA; + + ClassLoader loader = s_schemaLoaders.get(schema); + if(loader != null) + { + if ( loader instanceof Loader ) + { + Connection conn = getDefaultConnection(); + if ( sqljTablesExist(conn) ) + return loader; + s_schemaLoaders.remove(schema); + } + else + return loader; + } + + /* + * Under-construction map from an entry name to an array of integer + * surrogate keys for entries with matching names in jars on the path. + */ + Map classImages = new HashMap<>(); + /* - * Rather than having a different loader per schemaName, instead - * we simply create a different loader per CLASSPATH. + * Under-construction map from an integer entry key to a + * CodeSource representing the jar it belongs to. */ - String classpath = Backend.getConfigOption("pljava_classpath"); - if (classpath == null) - classpath = ""; - if (!classpath.equals(m_current_classpath)) + Map codeSources = new HashMap<>(); + + Connection conn = getDefaultConnection(); + if ( ! sqljTablesExist(conn) ) { - clearSchemaLoaders(); - m_current_classpath = classpath; + loader = getFallbackLoader(); + s_schemaLoaders.put(schema, loader); + return loader; + } + try ( + // Read the entries so that the one with highest prio is read last. + // + PreparedStatement outer = conn.prepareStatement( + "SELECT r.jarId, r.jarName" + + " FROM" + + " sqlj.jar_repository r" + + " INNER JOIN sqlj.classpath_entry c" + + " ON r.jarId OPERATOR(pg_catalog.=) c.jarId" + + " WHERE c.schemaName OPERATOR(pg_catalog.=) ?" + + " ORDER BY c.ordinal DESC"); + PreparedStatement inner = conn.prepareStatement( + "SELECT entryId, entryName FROM sqlj.jar_entry " + + "WHERE jarId OPERATOR(pg_catalog.=) ?"); + ) + { + outer.unwrap(SPIReadOnlyControl.class).clearReadOnly(); + inner.unwrap(SPIReadOnlyControl.class).clearReadOnly(); + outer.setString(1, schema.pgFolded()); + try ( ResultSet rs = outer.executeQuery() ) + { + while(rs.next()) + { + @SuppressWarnings("deprecation") // until PL/Java major rev + URL jarUrl = new URL("sqlj:" + rs.getString(2)); + CodeSource cs = new CodeSource(jarUrl, (CodeSigner[])null); + + inner.setInt(1, rs.getInt(1)); + try ( ResultSet rs2 = inner.executeQuery() ) + { + while(rs2.next()) + { + int entryId = rs2.getInt(1); + String entryName = rs2.getString(2); + codeSources.put(entryId, cs); + int[] oldEntry = classImages.get(entryName); + if(oldEntry == null) + classImages.put(entryName, new int[] { entryId }); + else + { + int last = oldEntry.length; + int[] newEntry = new int[last + 1]; + newEntry[0] = entryId; + System.arraycopy(oldEntry, 0, newEntry, 1, last); + classImages.put(entryName, newEntry); + } + } + } + } + } + catch ( MalformedURLException e ) + { + throw unchecked(e); + } + } + catch (SQLException e) + { + if (isMissingSqlj(e)) + { + Invocation.clearErrorCondition(); + loader = getFallbackLoader(); + s_schemaLoaders.put(schema, loader); + return loader; + } + throw e; } - ClassLoader loader = (ClassLoader) s_schemaLoaders.get(classpath); - if (loader == null) + ClassLoader parent = ClassLoader.getSystemClassLoader(); + if(classImages.size() == 0) + // + // No classpath defined for the schema. Default to + // classpath of public schema or to the fallback loader if the + // request already is for the public schema. + // + loader = schema.equals(PUBLIC_SCHEMA) + ? getFallbackLoader() : getSchemaLoader(PUBLIC_SCHEMA); + else { - loader = (ClassLoader) new Loader(classpath); - s_schemaLoaders.put(classpath, loader); + String name = "schema:" + schema.nonFolded(); + loader = doPrivileged(() -> + new Loader(classImages, codeSources, parent, name)); } + + s_schemaLoaders.put(schema, loader); return loader; } @@ -219,43 +484,380 @@ public static ClassLoader getSchemaLoader(String schemaName) * @param schema The schema * @return The Map, possibly empty but never null. */ - public static Map getTypeMap(final String schema) throws SQLException + public static Map> getTypeMap( + final Identifier.Simple schema) + throws SQLException + { + Map> typesForSchema = + s_typeMap.get(schema); + if(typesForSchema != null) + return typesForSchema; + + s_logger.finer("Creating typeMappings for schema " + schema); + typesForSchema = new HashMap>() + { + public Class get(Oid key) + { + s_logger.finer("Obtaining type mapping for OID " + key + + " for schema " + schema); + return super.get(key); + } + }; + ClassLoader loader = Loader.getSchemaLoader(schema); + try ( + Statement stmt = Checked.Supplier.use((() -> + { + Statement s = getDefaultConnection().createStatement(); + s.unwrap(SPIReadOnlyControl.class).clearReadOnly(); + return s; + })).get(); + ResultSet rs = stmt.executeQuery( + "SELECT javaName, sqlName FROM sqlj.typemap_entry"); + ) + { + while(rs.next()) + { + try + { + String javaClassName = rs.getString(1); + String sqlName = rs.getString(2); + Class cls = loader.loadClass(javaClassName); + if(!SQLData.class.isAssignableFrom(cls)) + throw new SQLException("Class " + javaClassName + + " does not implement java.sql.SQLData"); + + Oid typeOid = Oid.forTypeName(sqlName); + typesForSchema.put(typeOid, cls.asSubclass(SQLData.class)); + s_logger.finer("Adding type mapping for OID " + typeOid + + " -> class " + cls.getName() + " for schema " + schema); + } + catch(ClassNotFoundException e) + { + // Ignore, type is not know to this schema and that is ok + } + } + if(typesForSchema.isEmpty()) + typesForSchema = Map.of(); + s_typeMap.put(schema, typesForSchema); + return typesForSchema; + } + catch (SQLException e) + { + if (isMissingSqlj(e)) + { + Invocation.clearErrorCondition(); + typesForSchema = Map.of(); + s_typeMap.put(schema, typesForSchema); + return typesForSchema; + } + throw e; + } + } + + private static URL entryURL(int entryId) + { + try + { + @SuppressWarnings("deprecation") // Java >= 20: URL.of(uri,handler) + URL u = doPrivileged(() -> new URL( + "dbf", + "localhost", + -1, + "/" + entryId, + EntryStreamHandler.getInstance())); + return u; + } + catch(MalformedURLException e) + { + throw unchecked(e); + } + } + + /** + * Map from name of entry (resource or expanded class name) to an array of + * the integer surrogate keys for jar entries, in the order of jars on this + * loader's jar path that contain entries matching the name. + */ + private final Map m_entries; + private final Map m_domains; + + /** + * Private constructor used only to create the "sentinel" (non-)loader. + *

      + * Any attempt to use it will incur null pointer exceptions, but it would be + * a bug already for such use to be attempted. + */ + private Loader() { - /* XXX - needs implementation to support TypeMaps */ - return Collections.EMPTY_MAP; + m_entries = null; + m_domains = null; + m_j9Helper = null; } - protected Class findClass(final String name) + /** + * Create a new Loader. + * @param entries + * @param parent + */ + Loader( + Map entries, + Map sources, ClassLoader parent, String name) + { + super(name, parent); + m_entries = entries; + m_j9Helper = ifJ9getHelper(); // null if not under OpenJ9 with sharing + + Principal[] noPrincipals = new Principal[0]; + + m_domains = new HashMap<>(); + + sources.entrySet().stream() + .collect(groupingBy(Map.Entry::getValue)) + .entrySet().stream().forEach(e -> + { + ProtectionDomain pd = new ProtectionDomain( + e.getKey(), null /* no permissions */, this, noPrincipals); + e.getValue().forEach(ee -> m_domains.put(ee.getKey(), pd)); + }); + } + + @Override + protected Class findClass(final String name) throws ClassNotFoundException { - // Scan through all jar files in the classpath - for (ListIterator iter = m_jarloaders.listIterator(); iter.hasNext(); ) + String path = name.replace('.', '/').concat(".class"); + int[] entryId = m_entries.get(path); + if(entryId != null) { - JarLoader loader = (JarLoader) iter.next(); - try + ProtectionDomain pd = m_domains.get(entryId[0]); + + /* + * Check early whether running on OpenJ9 JVM and the shared cache + * has the class. It is possible this early because the entryId is + * being used to generate the token, and it is known before even + * doing the jar_entry query. It would be possible to use something + * like the row's xmin instead, in which case this test would have + * to be moved after retrieving the row. + * + * ifJ9findSharedClass can only return a byte[], a String, or null. + */ + Object o = ifJ9findSharedClass(name, entryId[0]); + if ( o instanceof byte[] ) { - Class r = loader.findClass(name); - if (r != null) - return r; + byte[] img = (byte[]) o; + return defineClass(name, img, 0, img.length, pd); } - catch (ClassNotFoundException e) + String ifJ9token = (String) o; // used below when storing class + + try ( + // This code relies heavily on the fact that the connection + // is a singleton and that the prepared statement will live + // for the duration of the loader. (This comment has said so + // since January 2004; the prepared statement has been getting + // closed in a finally block since November 2004, and that + // hasn't broken anything, and it is currently true that + // prepared statements are backed by ExecutionPlans that stick + // around in an MRU cache after being closed.) + // + PreparedStatement stmt = Checked.Supplier.use((() -> + { + PreparedStatement s = getDefaultConnection() + .prepareStatement( + "SELECT entryImage FROM sqlj.jar_entry " + + "WHERE entryId OPERATOR(pg_catalog.=) ?"); + s.unwrap(SPIReadOnlyControl.class).clearReadOnly(); + s.setInt(1, entryId[0]); + return s; + })).get(); + ResultSet rs = stmt.executeQuery(); + ) + { + if(rs.next()) + { + byte[] img = rs.getBytes(1); + + Class cls = defineClass(name, img, 0, img.length, pd); + + ifJ9storeSharedClass(ifJ9token, cls); // noop for null token + return cls; + } + } + catch(SQLException e) { - // Ignore exception, look in other loaders (JAR files) + Invocation.clearErrorCondition(); + Logger.getAnonymousLogger().log(Level.INFO, + "Failed to load class", e); + throw new ClassNotFoundException(name + " due to: " + + e.getMessage(), e); } } throw new ClassNotFoundException(name); } + @Override protected URL findResource(String name) { - // Scan through all jar files in the classpath - for (ListIterator iter = m_jarloaders.listIterator(); iter.hasNext();) + int[] entryIds = m_entries.get(name); + if(entryIds == null) + return null; + + return entryURL(entryIds[0]); + } + + @Override + protected Enumeration findResources(String name) + throws IOException + { + int[] entryIds = m_entries.get(name); + if(entryIds == null) + entryIds = new int[0]; + return new EntryEnumeration(entryIds); + } + + /* + * Detect and integrate with the OpenJ9 JVM class sharing facility. + * https://www.ibm.com/developerworks/library/j-class-sharing-openj9/#usingthehelperapi + * https://github.com/eclipse/openj9/blob/master/jcl/src/openj9.sharedclasses/share/classes/com/ibm/oti/shared/ + */ + + private static final Object s_j9HelperFactory; + private static final MethodHandle s_j9GetTokenHelper; + private static final MethodHandle s_j9FindSharedClass; + private static final MethodHandle s_j9StoreSharedClass; + private final Object m_j9Helper; + + /** + * Return an OpenJ9 {@code SharedClassTokenHelper} if running on an OpenJ9 + * JVM with sharing enabled; otherwise return null. + */ + private Object ifJ9getHelper() + { + if ( null == s_j9HelperFactory ) + return null; + try + { + return s_j9GetTokenHelper.invoke(s_j9HelperFactory, this); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Find a class definition in the OpenJ9 shared cache (if running under + * OpenJ9, and sharing is enabled, and the class is there). + * @param className name of the class to seek. + * @param tokenSource something passed by the caller from which we can + * generate a token that is sure to be different if the class has been + * updated. For now, just the int entryId, which is sufficient because that + * is a SERIAL column and entries are deleted/reinserted by replace_jar. + * There is just the one caller, so the type and usage of this parameter can + * be changed to whatever is appropriate should the schema evolve. + * @return null if not running under J9 with sharing; a {@code byte[]} if + * the class is found in the shared cache, or a {@code String} token that + * should be passed to {@code ifJ9storeSharedClass} later. + */ + private Object ifJ9findSharedClass(String className, int tokenSource) + { + if ( null == m_j9Helper ) + return null; + + String token = Integer.toString(tokenSource); + + try + { + byte[] cookie = (byte[]) + s_j9FindSharedClass.invoke(m_j9Helper, token, className); + if ( null == cookie ) + return token; + return cookie; + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /** + * Store a newly-defined class in the OpenJ9 shared class cache if running + * under OpenJ9 with sharing enabled (implied if {@code token} is non-null, + * per the convention that its value came from {@code ifJ9findSharedClass}). + * @param token A token generated by {@code ifJ9findSharedClass}, non-null + * only if J9 sharing is active and the class is not already cached. This + * method is a noop if {@code token} is null. + * @param cls The newly-defined class. + */ + private void ifJ9storeSharedClass(String token, Class cls) + { + if ( null == token ) + return; + assert(null != m_j9Helper); + + try + { + s_j9StoreSharedClass.invoke(m_j9Helper, token, cls); + } + catch ( Throwable t ) + { + throw unchecked(t); + } + } + + /* + * Detect if this is an OpenJ9 JVM with sharing enabled, setting the related + * static fields for later reflective access to its sharing helpers if so. + */ + static + { + Object factory = null; + MethodHandle getHelper = null; + MethodHandle findShared = null; + MethodHandle storeShared = null; + + try + { + /* If this throws ClassNotFoundException, the JVM isn't OpenJ9. */ + Class shared = ClassLoader.getSystemClassLoader().loadClass( + "com.ibm.oti.shared.Shared"); + + MethodHandles.Lookup lup = MethodHandles.publicLookup(); + + MethodHandle getFactory = lup.unreflect(shared.getMethod( + "getSharedClassHelperFactory", (Class[])null)); + + /* If getFactory returns null, sharing is not enabled. */ + factory = getFactory.invoke(); + if ( null != factory ) + { + Class factoryClass = getFactory.type().returnType(); + getHelper = lup.unreflect( + factoryClass.getMethod("getTokenHelper",ClassLoader.class)); + Class helperClass = getHelper.type().returnType(); + findShared = lup.findVirtual(helperClass, "findSharedClass", + methodType(byte[].class, String.class, String.class)); + storeShared = lup.findVirtual(helperClass, "storeSharedClass", + methodType(boolean.class, String.class, Class.class)); + } + } + catch ( ClassNotFoundException cnfe ) + { + /* Not running on an OpenJ9 JVM. Leave all the statics null. */ + } + catch ( Error | RuntimeException e ) + { + throw e; + } + catch ( Throwable t ) + { + throw new ExceptionInInitializerError(t); + } + finally { - JarLoader loader = (JarLoader) iter.next(); - URL url = loader.findResource(name); - if (url != null) - return url; + s_j9HelperFactory = factory; + s_j9GetTokenHelper = getHelper; + s_j9FindSharedClass = findShared; + s_j9StoreSharedClass = storeShared; } - return null; } -} \ No newline at end of file +} diff --git a/pljava/src/test/java/CharsetTest.java b/pljava/src/test/java/CharsetTest.java new file mode 100644 index 00000000..5049605d --- /dev/null +++ b/pljava/src/test/java/CharsetTest.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import junit.framework.TestCase; + +import static org.junit.Assert.*; +import org.junit.Ignore; +import static org.hamcrest.CoreMatchers.*; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; + +import java.nio.charset.Charset; +import static java.nio.charset.StandardCharsets.US_ASCII; + +import static java.util.regex.Pattern.matches; + +public class CharsetTest extends TestCase +{ + public CharsetTest(String name) { super(name); } + + public void testSQL_ASCII() throws Exception + { + Charset sqa = Charset.forName("SQL_ASCII"); + assertNotNull(sqa); + + assertTrue(sqa.contains(sqa)); + assertTrue(sqa.contains(US_ASCII)); + + ByteBuffer bb = ByteBuffer.allocate(256); + + while ( bb.hasRemaining() ) + bb.put((byte)bb.position()); + + bb.flip(); + + CharBuffer cb = sqa.decode(bb); + + assertTrue(matches("[\\u0000-\\u007f]{128}+" + + "(?:[\\ufdd8-\\ufddf][\\ufde0-\\ufdef]){128}+", cb)); + + cb.rewind(); + + ByteBuffer bb2 = sqa.encode(cb); + bb.rewind(); + + assertTrue(bb2.equals(bb)); + } +} diff --git a/pljava/src/test/java/CheckedTest.java b/pljava/src/test/java/CheckedTest.java new file mode 100644 index 00000000..2d1da5d9 --- /dev/null +++ b/pljava/src/test/java/CheckedTest.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2020 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import java.util.OptionalDouble; +import java.util.OptionalInt; +import java.util.OptionalLong; + +import java.util.stream.Stream; +import java.util.stream.DoubleStream; +import java.util.stream.IntStream; +import java.util.stream.LongStream; + +import static org.postgresql.pljava.internal.Checked.closing; +import static org.postgresql.pljava.internal.Checked.OptionalBase.ofNullable; + +public class CheckedTest +{ + public void compilability() + { + try + { + Checked.Consumer + .use((String n) -> { Class.forName(n); }) + .in(l -> { Stream.of("Foo").forEach(l); }); + } + catch ( ClassNotFoundException e ) + { + } + + try + { + Checked.DoubleConsumer + .use(v -> {throw new IllegalAccessException();}) + .in(l -> { DoubleStream.of(4.2).forEach(l); }); + + Checked.IntConsumer + .use(v -> {throw new IllegalAccessException();}) + .in(l -> { IntStream.of(42).forEach(l); }); + + Checked.LongConsumer + .use(v -> {throw new IllegalAccessException();}) + .in(l -> { LongStream.of(4).forEach(l); }); + + Checked.Runnable + .use(() -> {throw new IllegalAccessException();}) + .in(r -> { Stream.of("").forEach(o -> {r.run();}); }); + } + catch ( IllegalAccessException e ) + { + } + + Boolean zl = + Checked.Supplier + .use(() -> Boolean.TRUE) + .inReturning(s -> s.get()); + + boolean z = + Checked.BooleanSupplier + .use(() -> true) + .inBooleanReturning(zs -> zs.getAsBoolean()); + + double d = + Checked.DoubleSupplier + .use(() -> 4.2) + .inDoubleReturning(ds -> ds.getAsDouble()); + + int i = + Checked.IntSupplier + .use(() -> 4) + .inIntReturning(is -> is.getAsInt()); + + long j = + Checked.LongSupplier + .use(() -> 4) + .inLongReturning(ls -> ls.getAsLong()); + + byte b = + Checked.ByteSupplier + .use(() -> 4) + .inByteReturning(bs -> bs.getAsByte()); + + short s = + Checked.ShortSupplier + .use(() -> 4) + .inShortReturning(ss -> ss.getAsShort()); + + char c = + Checked.CharSupplier + .use(() -> 4) + .inCharReturning(cs -> cs.getAsChar()); + + float f = + Checked.FloatSupplier + .use(() -> 2.4f) + .inFloatReturning(fs -> fs.getAsFloat()); + + try (Checked.AutoCloseable ac = // Java 10: var + closing(() -> {throw new IllegalAccessException();})) + { + } + catch ( IllegalAccessException e ) + { + } + + OptionalDouble opd = ofNullable(4.2); + OptionalInt opi = ofNullable(4); + OptionalLong opj = ofNullable(4L); + Checked.OptionalBoolean opz = ofNullable(true); + Checked.OptionalByte opb = ofNullable((byte)2); + Checked.OptionalShort ops = ofNullable((short)2); + Checked.OptionalChar opc = ofNullable('2'); + Checked.OptionalFloat opf = ofNullable(2f); + } +} diff --git a/pljava/src/test/java/FunctionCreationTest.java b/pljava/src/test/java/FunctionCreationTest.java new file mode 100644 index 00000000..b3316e54 --- /dev/null +++ b/pljava/src/test/java/FunctionCreationTest.java @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2019 Tada AB and other contributors, as listed below. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the The BSD 3-Clause License + * which accompanies this distribution, and is available at + * http://opensource.org/licenses/BSD-3-Clause + * + * Contributors: + * Chapman Flack + */ +package org.postgresql.pljava.internal; + +import junit.framework.TestCase; + +import static org.junit.Assert.*; +import org.junit.Ignore; +import static org.hamcrest.CoreMatchers.*; + +import java.lang.reflect.Method; +import java.lang.reflect.Type; + +@Ignore("Function class has native method now, can't unit test") +public class FunctionCreationTest extends TestCase +{ + public FunctionCreationTest(String name) { super(name); } + + public void testSpecialization() throws Exception + { + Method specialization = + Function.class.getDeclaredMethod( + "specialization", Type.class, Class.class); + specialization.setAccessible(true); + + Method m1 = FunctionCreationTest.class.getMethod("testM1"); + Type m1rt = m1.getGenericReturnType(); + + assertNull(specialization.invoke(null, m1rt, Number.class)); + + Type[] expected = new Type[] { String.class }; + Type[] actual = + (Type[])specialization.invoke(null, m1rt, ThreadLocal.class); + + assertArrayEquals( + "failure - did not find String in ThreadLocal", + expected, actual); + + Method m2 = FunctionCreationTest.class.getMethod("testM2"); + Type m2rt = m2.getGenericReturnType(); + + actual = (Type[])specialization.invoke(null, m2rt, ThreadLocal.class); + + assertArrayEquals( + "failure - did not find String in Foo extends ThreadLocal", + expected, actual); + + Method m3 = FunctionCreationTest.class.getMethod("testM3"); + Type m3rt = m3.getGenericReturnType(); + + actual = (Type[])specialization.invoke(null, m3rt, ThreadLocal.class); + + assertArrayEquals( + "failure - on Baz extends Bar extends ThreadLocal", + expected, actual); + } + + public ThreadLocal testM1() { return null; } + + public Foo testM2() { return null; } + + public Baz testM3() { return null; } + + static class Foo extends ThreadLocal { } + + static class Bar extends ThreadLocal { } + + static class Baz extends Bar { } +} diff --git a/pom.xml b/pom.xml index 8a45af3c..276a8dc3 100644 --- a/pom.xml +++ b/pom.xml @@ -3,13 +3,14 @@ 4.0.0 org.postgresql pljava.app - 1.5.0 + 1.6.10 pom Greenplum PL/Java http://gpdb.docs.pivotal.io/ Java stored procedures for Greenplum UTF-8 + ${project.version} @@ -27,6 +28,15 @@ + + Mailing list for PL/Java users and developers + pljava-dev@lists.postgresql.org + https://www.postgresql.org/list/pljava-dev/ + + https://web.archive.org/web/*/http://lists.pgfoundry.org/pipermail/pljava-dev/ + news://news.gmane.io/gmane.comp.db.postgresql.pljava + + Mailing list for Greenplum developers gpdb-dev@greenplum.org @@ -52,10 +62,10 @@ pljava-api pljava pljava-so - pljava-deploy pljava-ant pljava-examples pljava-packaging + pljava-pgxs + + + nashornmod + + [15,) + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + org.openjdk.nashorn + nashorn-core + 15.4 + + + + + + + + + + + junit + junit + 4.13.1 + test + + + + + + + org.apache.maven.plugins + maven-install-plugin + 3.1.0 + + + org.apache.maven.plugins + maven-resources-plugin + 3.3.0 + + + org.apache.maven.plugins + maven-compiler-plugin + 3.10.1 + + + org.apache.maven.plugins maven-compiler-plugin - 2.5.1 - 1.6 - 1.6 ${project.build.sourceEncoding} + 9 + true + true org.apache.maven.plugins maven-jar-plugin - 2.6 + 3.3.0 + + + org.apache.maven.plugins + maven-surefire-plugin + 3.0.0-M7 org.apache.maven.plugins maven-site-plugin - 3.4 - - - org.apache.maven.doxia - doxia-module-markdown - 1.6 - - - net.trajano.wagon - wagon-git - 2.0.4 - - + 3.12.1 false @@ -118,19 +173,7 @@ org.apache.maven.plugins maven-project-info-reports-plugin - 2.8 - - - org.apache.maven.plugins - maven-javadoc-plugin - 2.10.3 - - - - javadoc-no-fork - - - + 3.4.5 diff --git a/release.mk b/release.mk index 71576b1e..003550f3 100644 --- a/release.mk +++ b/release.mk @@ -1,3 +1,3 @@ -PLJAVA_OSS_VERSION = 1.5.0 +PLJAVA_OSS_VERSION = 1.6.10 PLJAVA_PIVOTAL_VERSION = 2.0.0 PLJAVA_PIVOTAL_RELEASE = 0 diff --git a/src/graphics/README b/src/graphics/README new file mode 100644 index 00000000..61707761 --- /dev/null +++ b/src/graphics/README @@ -0,0 +1,15 @@ +pljava_logo.psd is original Photoshop art by Annelie Hallgren. + +pljava_logo.svg is derived from that by Chapman Flack and Inkscape's +tracing feature. + +pljava_logo.svg should not, in its current form, be used directly in web +content. It refers to the "Nimbus Sans L" font by name, which not all web +browsers will honor. A version to be used directly should have the font +subsetted and directly embedded, which may become possible in a newer +version of Inkscape: https://gitlab.com/inkscape/inbox/issues/301 +Until then, it would require a separate step with another tool. + +On the other hand, the current pljava_logo.svg is well suited to generating +clean PNG or other formats at various resolutions, which can then be used +in site content. diff --git a/src/graphics/pljava_logo.psd b/src/graphics/pljava_logo.psd new file mode 100644 index 00000000..a705607e Binary files /dev/null and b/src/graphics/pljava_logo.psd differ diff --git a/src/graphics/pljava_logo.svg b/src/graphics/pljava_logo.svg new file mode 100644 index 00000000..de720661 --- /dev/null +++ b/src/graphics/pljava_logo.svg @@ -0,0 +1,129 @@ + + + + + PL/Java logo SVG + + + + + + image/svg+xml + + PL/Java logo SVG + + + Annelie Hallgren + + + https://github.com/tada/pljava/blob/master/src/graphics/pljava_logo.psd + PL/Java logo from original in Photoshop by Annelie Hallgren, converted by Chapman Flack to SVG by tracing in Inkscape. + + + + + + + + PL/JAVA + + + + + + + + diff --git a/src/graphics/pljava_logo_cropped.psd b/src/graphics/pljava_logo_cropped.psd new file mode 100644 index 00000000..476f7a73 Binary files /dev/null and b/src/graphics/pljava_logo_cropped.psd differ diff --git a/src/java/test/org/postgresql/pljava/test/Tester.java b/src/java/test/org/postgresql/pljava/test/Tester.java index c59d5a82..857a0d71 100644 --- a/src/java/test/org/postgresql/pljava/test/Tester.java +++ b/src/java/test/org/postgresql/pljava/test/Tester.java @@ -861,44 +861,4 @@ private void executeMetaDataFunction(Statement stmt, String functionCall) } } } - - public void testResultSet() throws SQLException - { - String sql; - Statement stmt = m_connection.createStatement(); - ResultSet rs = null; - - try - { - System.out.println("*** ResultSet test:"); - sql = "SELECT * FROM javatest.executeSelect(" - + "'select ''Foo'' as t_varchar, 1::integer as t_integer, " - + "1.5::float as t_float, 23.67::decimal(8,2) as t_decimal, " - + "''2005-06-01''::date as t_date, ''20:56''::time as t_time, " - + "''2006-02-04 23:55:10''::timestamp as t_timestamp')"; - - rs = stmt.executeQuery(sql); - System.out.println("SQL = " + sql); - System.out.println("results:"); - while(rs.next()) - { - System.out.println(rs.getString(1)); - } - rs.close(); - } - finally - { - if(rs != null) - { - try - { - rs.close(); - } - catch(SQLException e) - { - } - rs = null; - } - } - } } diff --git a/src/site/markdown/build/build.md b/src/site/markdown/build/build.md index 1ff13f30..9c0dee7a 100644 --- a/src/site/markdown/build/build.md +++ b/src/site/markdown/build/build.md @@ -9,7 +9,10 @@ and produce the files you need, but *not* install them into PostgreSQL. To do that, continue with the [installation instructions][inst]. [mvn]: https://maven.apache.org/ -[java]: http://www.oracle.com/technetwork/java/javase/downloads/index.html +[orjava]: http://www.oracle.com/technetwork/java/javase/downloads/index.html +[OpenJDK]: https://adoptopenjdk.net/ +[hsj9]: https://www.eclipse.org/openj9/oj9_faq.html +[GraalVM]: https://www.graalvm.org/downloads/ **In case of build difficulties:** @@ -25,13 +28,18 @@ There is a "troubleshooting the build" section at the end of this page. at the command line, which should tell you the version you have installed. -0. The [Java Development Kit][java] (not just the Java Runtime Environment) +0. The Java Development Kit (not just the Java Runtime Environment) version that you plan to use should be installed, also ideally in your search path so that javac -version - just works. + just works. PL/Java can be built with [Oracle Java][orjava] or [OpenJDK][], + the latter with [either the Hotspot or the OpenJ9 JVM][hsj9], or with + [GraalVM][]. It is not necessary to use the same JDK to build PL/Java that + will later be used to run it in the database, and PL/Java applications can + generally take advantage of recent features in whatever Java version is + used at run time. (See more on [version compatibility](versions.html).) 0. The PostgreSQL server version that you intend to use should be installed, and on your search path so that the command @@ -52,7 +60,13 @@ There is a "troubleshooting the build" section at the end of this page. mvn --version - succeeds. + succeeds. It reports not only the version of Maven, but the version of Java + that Maven has found and is using, which must be a Java version supported + for building PL/Java (see more on [version compatibility](versions.html)). + If Maven is not finding and using the intended Java version, the environment + variable `JAVA_HOME` can be set to point to the desired Java installation, + and `mvn --version` should then confirm that the Java being found is the + one intended. If you have more than one version installed of PostgreSQL, Java, or the compile/link tools, make sure the ones found on your search path are the @@ -68,13 +82,17 @@ Please review any of the following that apply to your situation: * Building on [Mac OS X](macosx.html) * Building on [Solaris](solaris.html) * Building on [Ubuntu](ubuntu.html) +* Building on [Linux `ppc64le`](ppc64le-linux-gpp.html) * Building on Microsoft Windows: [with Visual Studio](buildmsvc.html) | [with MinGW-w64](mingw64.html) * Building on an EnterpriseDB PostgreSQL distribution that bundles system libraries, or other situations where [a linker runpath](runpath.html) can help -* Building on a platform that - [requires PostgreSQL libraries at link time](linkpglibs.html) +* Building if you are + [making a package for a software distribution](package.html) +* Building [with debugging or optimization options](debugopt.html) + +[protofail]: versions.html#Maven_failures_when_downloading_dependencies ## Obtaining PL/Java sources @@ -105,6 +123,12 @@ sources. From a clone, you can also build specific released versions, by first using `git checkout` with the tag that identifies the release. +Building from unreleased, development sources will be of most interest when +hacking on PL/Java itself. The GitHub "Branches" page can be used to see which +branch has had the most recent development activity (this will not always be +the branch named `master`; periods of development can be focused on the branch +corresponding to current releases). + [git]: https://git-scm.com/ ## The build @@ -130,6 +154,12 @@ to [try out PL/Java in PostgreSQL][inst]. [inst]: ../install/install.html +### PostgreSQL version to build against + +If several versions of PostgreSQL are installed on the build host, select +the one to be built for by adding the full path of its `pg_config` executable +with `-Dpgsql.pgconfig=` on the `mvn` command line. + ### I know PostgreSQL and PGXS. Explain Maven! [Maven][mvn] is a widely used tool for building and maintaining projects in @@ -198,57 +228,12 @@ build issues that are commonly asked about.* [btwp]: https://github.com/tada/pljava/wiki/Build-tips -#### Not all `[ERROR]`s are errors - -In the part of the build that compiles the native code, you may see lines of -output starting with `[ERROR]`, but the build completes and shows success for -all subprojects. - -Maven is capturing output from the C compiler and adding a tag at the front of -each line. If the line from the C compiler contains the string `warning:` then -Maven adds a `[WARNING]` tag at the front of the line; otherwise it adds -`[ERROR]`. That is how Maven can turn a multiple-line warning, like - -``` -type/String.c: In function 'String_createJavaString': -type/String.c:132:43: warning: conversion to 'jlong' from 'Size' may change - the sign of the result [-Wsign-conversion] - bytebuf = JNI_newDirectByteBuffer(utf8, srcLen); - ^ -``` - -(where only the second line contains `warning:`) into what looks like one -`[WARNING]` and several `[ERROR]`s. - -If the compiler reports any actual errors, the build will fail. - -#### Disable nuisance warnings where possible - -The Maven plugin that drives the C compiler enables, by default, many -types of warning that would be impractical to fix. Those can clutter the -output (especially with Maven tagging them with `[ERROR]`) so that if the -build does fail because of an actual error, it is difficult to read back -through the `[ERROR]`s that were not errors, to find the one that was. - -If the compiler is `gcc`, an extra option `-Pwnosign` can be given on the -`mvn` command line, and will suppress the most voluminous and least useful -warnings. It adds the compiler option `-Wno-sign-conversion` which might not -be understood by other compilers, so may not have the intended effect if the -compiler is not `gcc`. - -#### Compile with a single core, for clarity of messages - -On a machine with many cores, messages from several compilation threads may be -intermingled in the output so that related messages are hard to identify. -The option `-Dnar.cores=1` will force the messages into a sequential order -(and has little effect on the speed of a PL/Java build). - #### Capture the output of `mvn -X` The `-X` option will add a lot of information on the details of Maven's build activities. - mvn -X -Pwnosign -Dnar.cores=1 clean install + mvn -X clean install #### Avoid capturing the first run of Maven @@ -256,3 +241,11 @@ On the first run, Maven will produce a lot of output while downloading all of the dependencies needed to complete the build. It is better, if the build fails, to simply run Maven again and capture the output of that run, which will not include all of the downloading activity. + +As an alternative, the flood of messages reflecting successful dependency +downloads in a first run can be suppressed by adding this option on the `mvn` +command line: + +``` +-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn +``` diff --git a/src/site/markdown/build/buildmsvc.md b/src/site/markdown/build/buildmsvc.md index d9af4183..715a0542 100644 --- a/src/site/markdown/build/buildmsvc.md +++ b/src/site/markdown/build/buildmsvc.md @@ -21,9 +21,6 @@ PostgreSQL and PL/Java. Using a *newer* version of Visual Studio (including the Community 2015 version) will generally work, while older versions are more likely to be problematic. -* PostgreSQL 9.1 to 9.3 were built using Visual Studio 2010. -* PostgreSQL 9.4 was built using Visual Studio 2013. - ## Software Prerequisites 0. You will need an appropriate version of [Microsoft Visual Studio][msvc]. When @@ -50,11 +47,6 @@ likely to be problematic. an `INCLUDEDIR-SERVER` line, and list the directory it refers to. There should be a bunch of `*.h` files there. -0. OpenSSL headers: if using an EnterpriseDB PostgreSQL build older than 9.3, - these will be missing. They can be obtained from a 9.3 or later - EDB PostgreSQL build by copying the `include/openssl` directory and - its contents. - 0. You will need to install [Maven][mvn] and add it to your PATH so that mvn --version @@ -219,26 +211,5 @@ dependency when [building your own projects that _use_ PL/Java][jproj]. ### Troubleshooting the build -If something fails, two tricks may be helpful. The C compilation may produce -a lot of nuisance warnings, because the Maven plugin driving it enables many -types of warning that would be impractical to fix. With many warnings it may -be difficult to pick out messages that matter. - -If the link step of the build reports that the symbol `rint` is undefined you -are probably using an older version of Visual Studio (2010) with a newer version -of Postgresql (9.4). This symbol is defined in Visual Studio 2013 and later and -the Postgresql 9.4 headers lack the appropriate conditional options for the -older compilers. You will need to use a newer version of Visual Studio. - -On a machine with many cores, messages from several compilation threads may be -intermingled in the output so that related messages are hard to identify. -The option `-Dnar.cores=1` will force the messages into a sequential order -(and has little effect on the speed of a PL/Java build). - -The `-X` option will add a lot of information on the details of Maven's -build activities. - - mvn -X -Dnar.cores=1 clean install - -There is a more comprehensive "troubleshooting the build" section +There is an extensive "troubleshooting the build" section on the [main build page][mbp]. diff --git a/src/site/markdown/build/debugopt.md b/src/site/markdown/build/debugopt.md new file mode 100644 index 00000000..ced08e4a --- /dev/null +++ b/src/site/markdown/build/debugopt.md @@ -0,0 +1,41 @@ +# Building for debugging or with optimization + +Some options can be given on the `mvn` command line to control whether +debugging information is included in the built files, or omitted to save +space at the cost of making use of a debugger less practical. It is also +possible to tailor how aggressively the C compiler will optimize the +native-code portion of PL/Java. + +## Debugging information in the Java portion of PL/Java + +`-Dmaven.compiler.debug=` with a value of `true` or `false` can be given on +the `mvn` command line. If `true`, debugging information is included so a +runtime debugger (`jdb`, Eclipse, etc.) can see local variables, source lines, +etc. The default is `true`. + +## Debugging information in the native portion of PL/Java + +`-Dso.debug=` with a value of `true` or `false` on the `mvn` command line +will control whether debugging information is included in PL/Java's native +code shared object. This is most useful when developing PL/Java itself, or, +perhaps, troubleshooting a low-level issue. The default is `false`. + +Although it is not required, debugging of PL/Java's native code can be +more comfortable when the PostgreSQL server in use was also configured +and built with `--enable-debug`. + +## Compiler optimization in the native portion of PL/Java + +PL/Java used to support a `-Dso.optimize` option earlier. However, it is not +yet implemented in the current build system. Following is the description +of how the option worked when it was supported. + +`-Dso.optimize=` can be given on the `mvn` command line, with a value +chosen from `none`, `size`, `speed`, `minimal`, `full`, `aggressive`, +`extreme`, or `unsafe`. Depending on the compiler, these settings may +not all be distinct optimization levels. The default is `none`. + +Because `none` has long been the default, PL/Java has not seen extensive +testing at higher optimization levels, which should, therefore, be considered +experimental. Before reporting an issue, please make sure it is reproducible +with no optimization. diff --git a/src/site/markdown/build/freebsd.md b/src/site/markdown/build/freebsd.md index 7995f7d1..613650e0 100644 --- a/src/site/markdown/build/freebsd.md +++ b/src/site/markdown/build/freebsd.md @@ -1,23 +1,7 @@ # Building on FreeBSD -At one time, [FreeBSD][]'s threading library would malfunction if it was -dynamically loaded after the start of a program that did not use threads -itself. That was a problem for PL/Java on FreeBSD, because PostgreSQL -itself does not use threads, but Java does. The only known workaround was -to build PostgreSQL itself from source, with the thread library included -in linking. - -The same problem was [reported to affect other PostgreSQL extensions][rep] -such as `plv8` and `imcs` also. - -The [manual page for FreeBSD's libthr][manthr] was edited -[in February 2015][thrdif] to remove the statement of that limitation, -and the updated manual page appears first in [FreeBSD 10.2][rel102], -so in FreeBSD 10.2 or later, PL/Java (and other affected extensions) -may work without the need to build PostgreSQL from source. +Building on [FreeBSD][] should proceed just as it does on Linux, +as of late 2023, according to Achilleos Mantzios, who provided the patch +adding the necessary build rules. [FreeBSD]: https://www.freebsd.org/ -[rep]: https://lists.freebsd.org/pipermail/freebsd-hackers/2014-April/044961.html -[manthr]: https://www.freebsd.org/cgi/man.cgi?query=libthr&apropos=0&sektion=3&manpath=FreeBSD+10.2-RELEASE&arch=default&format=html -[thrdif]: https://svnweb.freebsd.org/base/head/lib/libthr/libthr.3?r1=272153&r2=278627 -[rel102]: https://www.freebsd.org/releases/10.2R/announce.html diff --git a/src/site/markdown/build/linkpglibs.md b/src/site/markdown/build/linkpglibs.md deleted file mode 100644 index 70bab635..00000000 --- a/src/site/markdown/build/linkpglibs.md +++ /dev/null @@ -1,28 +0,0 @@ -# Including `pgtypes`, `pq`, `ecpg` libraries at link time - -The PL/Java build process has, for some time, explicitly included these -three PostgreSQL libraries when linking PL/Java. In many cases this is -unnecessary and, in fact, better avoided. Leaving these out at link time -eliminates the chance of certain run-time library version mismatches. - -However, there may be some platforms where these libraries must be included -in the link. If you have a failing build, especially if the failure involves -undefined symbol errors, try the build again, adding - - -Plinkpglibs - -on the `mvn` command line. If that helps, please report your platform and -configuration so we know which platforms require it. - -If it doesn't help, the problem lies somewhere else. - -## Library version mismatches when using `-Plinkpglibs` - -If you must use this option when building, and you will use PL/Java on a -system where several PostgreSQL versions are installed and one has been marked -as the system default, it is possible to see version-mismatch problems where -PL/Java running in one of the non-default PostgreSQL versions will have found -the libraries from the default version. - -That problem and its solutions are described near the end of the -[Building PL/Java with a `RUNPATH`](runpath.html) page. diff --git a/src/site/markdown/build/macosx.md.vm b/src/site/markdown/build/macosx.md.vm index e7665c1a..7baafc9c 100644 --- a/src/site/markdown/build/macosx.md.vm +++ b/src/site/markdown/build/macosx.md.vm @@ -73,17 +73,6 @@ Note that on Mac OS X, the variable should point to a `libjli.dylib` file if it is available (Java 7 and later), not to a `libjvm.dylib` as you would otherwise expect. See `No Java runtime present` below for details. -$h2 Troubleshooting the build - -$h3 Aggressive compiler warnings - -On OS X, it seems the `-Pwnosign` Maven build option does not succeed in -suppressing the many useless sign-conversion warnings, so if the build does -fail, it can be difficult to find the real problem because of so many -surrounding messages. One technique is to direct the `mvn -X -Dnar.cores=1 clean -install` output into a file, then search that file for the strings `fatal error` -or `error generated`. - $h2 Troubleshooting installation $h3 `No Java runtime present, requesting install` or `Java SE 6` download dialog diff --git a/src/site/markdown/build/package.md b/src/site/markdown/build/package.md new file mode 100644 index 00000000..34679182 --- /dev/null +++ b/src/site/markdown/build/package.md @@ -0,0 +1,239 @@ +# Packaging PL/Java for a software distribution + +If you are responsible for creating or maintaining a PL/Java package +for a particular software distribution, thank you. PL/Java reaches a +larger community of potential users thanks to your efforts. To minimize +frustration for your users and yourself, please consider these notes +when building your package. + +## What is the default `pljava.libjvm_location`? + +Users of a PL/Java source build nearly always have to set the PostgreSQL +variable `pljava.libjvm_location` before the extension will work, because +there is too much variation in where Java gets installed across systems +for PL/Java to supply a useful default. + +When you package for a particular platform, you may have the advantage of +knowing the conventional location for Java on that platform, and you can +improve the PL/Java setup experience for users of your package by adding +`-Dpljava.libjvmdefault=...` on the `mvn` command line when building, +where the `...` is the path to the JVM library shared object where it +would be by default on your target platform. See [here][locatejvm] to find +the exact file this should refer to. + +When building a package, you are +encouraged to set the default `pljava.libjvm_location` to the library of a +JRE version that is expected to be present on your platform. + +[locatejvm]: ../install/locatejvm.html +[bug190]: https://github.com/tada/pljava/issues/190 + +## What kind of a package is this? + +Your package may be for a distribution that has formal guidelines for how +to package software in certain categories, such as "Java applications", +"Java libraries", or "PostgreSQL extensions". That may force a judgment +as to which of those categories PL/Java falls in. + +### If possible: it's a PostgreSQL extension + +PL/Java has the most in common with other PostgreSQL extensions (even though +it happens to involve Java). It has nearly nothing in common with "Java +applications" or "Java libraries" as those are commonly understood. It is +neither something that can run on its own as an application, nor a library +that would be placed on the classpath in the usual fashion for other Java code +to use. It is only usable within PostgreSQL under its own distinctive rules. + +### Not recommended: Java application or library guidelines + +Formal guidelines developed for packaging Java applications or libraries +are likely to impose requirements that have no value or are inappropriate +in PL/Java's case. The necessary locations for PL/Java's components are +determined by the rules of the PostgreSQL extension mechanism, not other +platform rules that may apply to conventional Java libraries, for example. + +A packaging system's built-in treatment for Java libraries may even actively +break PL/Java. One packaging system apparently unpacks and repacks +jar files in a way that adds spurious entries. It has that "feature" to +address an obscure issue involving +[multilib conflicts for packages that use GCJ][repack], which doesn't apply +to PL/Java at all, and when the repacking silently added spurious entries +to PL/Java's self-installer jar, it took time to track down why unexpected +things were getting installed. + +If you are using that packaging system, please be sure to follow the step +shown in that link to disable the repacking of jars. + +[repack]: https://www.redhat.com/archives/fedora-devel-java-list/2008-September/msg00040.html + +### An exception: `pljava-api` + +The one part of PL/Java that could, if desired, be handled in the manner of +Java libraries is `pljava-api`. This single jar file is needed on the classpath +when compiling Java code that will be loaded into PL/Java in the database. +That means it could be +appropriate to provide `pljava-api` in a separate `-devel` package, if your +packaging guidelines encourage such a distinction, where it would be installed +in the expected place for a conventional Java library. (The API jar must still +be included in the main package also, installed in the location where PostgreSQL +expects it. There may be no need, therefore, for the main package to depend on +the `-devel` package.) + +A `-devel` package providing `pljava-api` might appropriately follow +java library packaging guidelines to ensure it appears on a developer's +classpath when compiling code to run in PL/Java. Ideally, such a package +would also place the `pljava-api` artifact into the local Maven repository, +if any. (PL/Java's [hello world example](../use/hello.html) illustrates using +Maven to build code for use in PL/Java, which assumes the local Maven repo +contains `pljava-api`.) + +To build `pljava-api` in isolation. simply run +`mvn --projects pljava-api clean install`. It builds quickly and independently +of the rest of the project, with fewer build dependencies than the project +as a whole. + +That `mvn clean install` also puts the `pljava-api` artifact into the local +Maven repository on the build host. A `-devel` package will ideally put the +same artifact into the local Maven repository of the installation target. +(While the other subprojects in a PL/Java full build also create artifacts +in the build host's local Maven repository, they can be ignored; `pljava-api` +is the useful one to have in an installation target host's repository.) + +### PL/Java API javadocs + +The PL/Java build does not automatically build javadocs. Those that go with +`pljava-api` can be easily generated by running +`mvn --projects pljava-api site` to build them, then collecting +the `apidocs` subtree from `target/site`. They can be included in the same +package as `pljava-api` or in a separate javadoc package, as your guidelines +may require. + +### An `examples` package? + +A full PL/Java build also builds `pljava-examples`, which typically will also +be installed into PostgreSQL's _SHAREDIR_`/pljava` directory. If the packaging +guidelines encourage placing examples into a separate package, this jar file +can be excluded from the main package and delivered in a separate one. +The examples can be built in isolation by running +`mvn --projects pljava-examples clean package`, as long as the `pljava-api` has +been built first and installed into the build host's local Maven repository. + +Note that many of the examples do double duty as tests, as described in +_confirming the build_ below. + +Unless they are not wanted, +the XML examples based on the Saxon library should also be built, +by adding `-Psaxon-examples` to the `mvn` command line. + +## Scripting the build + +Options on the `mvn` command line may be useful in the scripted build for +the package. + +`-Dpljava.libjvmdefault=`_path/to/jvm-shared-object_ +: As suggested earlier, please use this option to build a useful default +into PL/Java for the `pljava.libjvm_location` PostgreSQL variable, so users +of your package will not need to set that variable before +`CREATE EXTENSION pljava` works. + +`-Dpgsql.pgconfig=`_path/to/pg\_config_ +: If the build host may have more than one PostgreSQL version installed, +a package specific to one version can be built by using this option to point +to the `pg_config` command in the `bin` directory of the needed PostgreSQL +version. (The same effect was always possible by making sure that `bin` +directory was at the front of the `PATH` when invoking `mvn`, but this option +on the `mvn` command makes it more explicit.) + +## Patching PL/Java + +If your packaging project requires patches to PL/Java, and not simply the +passing of options at build or packaging time as described on this page, +please [open an issue][issue] so that the possibility of addressing your +need without patching can be discussed. + +[issue]: https://github.com/tada/pljava/issues + +## Confirming the build + +A full build also produces a `pljava-examples` jar, containing many examples +that double as tests. Many of these are run from the deployment descriptor +if the PL/Java extension is created in a PostgreSQL instance and then the +examples jar is loaded with `install_jar` passing `deploy => true`, which +should complete with no warnings. + +Some tests involving Unicode are skipped if the `server_encoding` is not +`utf-8`, so it is best to run them in a server instance created with that +encoding. + +To simplify automated testing, the jar file that is the end product of a full +PL/Java source build contains a class that can serve as a PostgreSQL test +harness from Java's `jshell` script engine. It is documented [here][node], +and the continuous-integration scripts in PL/Java's own source-control +repository can be consulted as examples of its use. + +[node]: ../develop/node.html + +## Packaging the built items + +The end product of a full PL/Java source build is a jar file that functions as +a self-extracting installer when run by `java -jar`. It contains the files that +are necessary on a target system to use PL/Java with PostgreSQL, including +those needed to support `ALTER EXTENSION UPGRADE`. + +It also contains the `pljava-api` jar, needed for developing Java code to use +in a database with PL/Java, and the `pljava-examples` jar. As discussed above, +the examples jar may be omitted from a base package and supplied separately, +if packaging guidelines require, and the API jar may be included also in a +`-devel` package that installs it in a standard Java-library location. (However, +the API jar cannot be omitted from the base package; it is needed at runtime, in +the `SHAREDIR/pljava` location where the extension expects it.) + +The self-extracting jar consults `pg_config` at the time of extraction to +determine where the files should be installed. + +Given this jar as the result of the build, there are three broad approaches +to constructing a package: + +| Approach | Pro | Con | +----|----|----| +Capture self-extracting jar in package, deliver to target system and run it as a post-install action | Simple, closest to a vanilla PL/Java build. | May not integrate well into package manager for querying, uninstalling, or verifying installed files; probably leaves the self-installing jar on the target system, where it serves no further purpose. | +Run self-extracting jar at packaging time, and package the files it installs | Still simple, captures the knowledge embedded in the installer jar; integrates better with package managers needing the list of files installed. | Slightly less space-efficient? | +Ignore the self-extracting jar and hardcode a list of the individual files resulting from the build to be captured in the package | ? | Brittle, must reverse-engineer what pljava-packaging and installer jar are doing, from release to release. Possible to miss things. | + +The sweet spot seems to be the middle approach. + +When running the self-extractor, its output can be captured for a list of the +files installed. (As always, parsing that output can get complicated if the +pathnames have newlines or other tricky characters. The names of PL/Java-related +files in the jar do not, so there is no problem as long as no tricky characters +are in the PostgreSQL installation directory names reported by `pg_config`.) + +A package specific to a PostgreSQL version can pass +`-Dpgconfig=`_path/to/pg\_config_ to Java when running the self-extractor, +to ensure the locations are obtained from the desired version's `pg_config`. +(This is the extraction-time analog of the `-Dpgsql.pgconfig` that can be +passed to `mvn` at build time.) + +If necessary to satisfy some packaging guideline, individual locations +obtained from `pg_config` can be overridden with more specific options +such as `-Dpgconfig.sharedir=...` as described in the [install][] guide. +Or, the packaging script might simply move files, or edit the paths they +will have on the target system. + +In addition to the files named in the self-extractor's output, additional +files could be included in the package (if guidelines require the README +or COPYRIGHT, for example). As discussed above, the `pljava-examples` jar could +be filtered from the list if it will be delivered in a separate +package, and the `pljava-api` jar could be additionally delivered in a separate +`-devel` package (but must not be excluded from the base package). + +[install]: ../install/install.html + +## Late-breaking packaging news and tips + +A [Packaging Tips][tips] page on the PL/Java wiki will be created for +information on packaging issues that may be reported and resolved between +released updates to this documentation. Please be sure to check there for +any packaging issue not covered here. + +[tips]: https://github.com/tada/pljava/wiki/Packaging-tips diff --git a/src/site/markdown/build/ppc64le-linux-gpp.md b/src/site/markdown/build/ppc64le-linux-gpp.md new file mode 100644 index 00000000..372087c6 --- /dev/null +++ b/src/site/markdown/build/ppc64le-linux-gpp.md @@ -0,0 +1,9 @@ +# Building on Linux ppc64le with GNU tools + +Until the `nar-maven-plugin` upstream has architecture-os-linker entries +for `ppc64le.Linux.gpp`, the `pljava-so` project directory +contains an extra settings file `aol.ppc64le-linux-gpp.properties`. To use it, +add `-Dnar.aolProperties=pljava-so/aol.ppc64le-linux-gpp.properties` +to the `mvn` command line: + + mvn -Dnar.aolProperties=pljava-so/aol.ppc64le-linux-gpp.properties clean install diff --git a/src/site/markdown/build/runpath.md b/src/site/markdown/build/runpath.md index 44879054..fcbf90b7 100644 --- a/src/site/markdown/build/runpath.md +++ b/src/site/markdown/build/runpath.md @@ -81,24 +81,3 @@ Other platform-specific options for solving the same problem, such as `ldconfig` on Linux, may be available. Before there was `pljava.libjvm_location`, it used to be common to have to know these tricks. Now it should be uncommon, but in rare cases can still be useful. - -## Other situations where a RUNPATH may be needed - -On a system where several versions of PostgreSQL are installed, and one of -them has been made the default, there may be entries in the standard system -library directories that point to PostgreSQL-specific libraries (like -`libpgtypes`, `libpq`, `libecpg`) *for the version selected as default*. - -It is possible that when you load PL/Java into one of the non-default -PostgreSQL installations, it will find the wrong versions of those libraries -by looking in the system locations first. - -*This should only be possible if, for some reason, you needed the -`-Plinkpglibs` option when building PL/Java.* Without that option, those -PostgreSQL-specific libraries should be resolved within the PostgreSQL -backend itself, where the correct versions will be found. - -If your platform requires you to use `-Plinkpglibs`, and a problem with the -wrong library versions being found at run time results, it also can be solved -by explicitly using the `pg_config --libdir` value from the appropriate -PostgreSQL version, using any of the methods described above. diff --git a/src/site/markdown/build/ubuntu.md b/src/site/markdown/build/ubuntu.md index 3d59c24f..26b20a06 100644 --- a/src/site/markdown/build/ubuntu.md +++ b/src/site/markdown/build/ubuntu.md @@ -11,19 +11,4 @@ packages, you may also need to separately install: * `libecpg-dev` * `libkrb5-dev` -## Self-extracting jar may fail with some Ubuntu-packaged Java versions - -The final product of the build is a jar file meant to be self-extracting -(it contains a short JavaScript snippet that runs `pg_config` to learn where -the extracted files should be put), but there seem to be issues with the -JavaScript engine in some Ubuntu-packaged Java 6 and Java 7 versions. -Java 8 works fine. There is more information in an -[Ubuntu bug report][ubr] and a [StackOverflow thread][sot]. - -In the worst case, if Java 8 is not an option and one of the affected Java 6 -or 7 builds must be used, simply extract the jar file normally and move the -few files it contains into their proper locations. - [gnxbi]: build.html -[ubr]: https://bugs.launchpad.net/ubuntu/+source/openjdk-7/+bug/1553654 -[sot]: http://stackoverflow.com/questions/35713768/ diff --git a/src/site/markdown/build/versions.md b/src/site/markdown/build/versions.md index 0bf0b591..b79cfeb7 100644 --- a/src/site/markdown/build/versions.md +++ b/src/site/markdown/build/versions.md @@ -1,22 +1,63 @@ # Versions of external packages needed to build and use PL/Java -As of November 2015, the following version constraints are known. +As of fall 2025, the following version constraints are known. ## Java -No version of Java before 1.6 ("Java 6") is supported. The PL/Java code -makes use of Java features first appearing in Java 6. +No version of Java before 9 is supported. The PL/Java code +makes use of Java features first appearing in Java 9. -As for later versions of Java, backward compatibility in the language is -generally good. The most likely problem areas with a new Java version will -be additions to the JDBC API that PL/Java has not yet implemented. +PL/Java's [security policy enforcement][policy] is available only when the Java +version at run time is 9 through 23. On Java 24 or later runtime, PL/Java 1.6.x +can only run [with no policy enforcement][nopolicy]. This is independed of +the Java version used at build time, and so the availability of enforcement +can be changed at any time after building, by changing the +`pljava.libjvm_location` [configuration variable][jvml] to point to a Java +shared object of a different version. + +Other than the loss of policy enforcement in Java 24, backward compatibility +in the language is +generally good. Before Java 8, most likely problem areas with a new Java +version tended to be additions to the JDBC API that PL/Java had not yet +implemented. Since Java 8, even JDBC additions have not caused problems for +existing PL/Java code, as they have taken advantage of the default-methods +feature introduced in that release. + +In the PL/Java 1.6.x series, the build can be done with Java 9 or newer. +Once built, PL/Java is able to use another Java 9 or later JVM at run time, +simply by setting +[the `pljava.libjvm_location` variable][jvml] to the desired version's library. + +PL/Java can run application code written for a later Java version than PL/Java +itself was built with, as long as that later JRE version is used at run time. +That also allows PL/Java to take advantage of recent Java implementation +advances such as [class data sharing][cds]. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if it detects it is affected by that bug, and the solution can +be to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug was fixed in Java 21. + +PL/Java has been successfully used with [Oracle Java][orj] and with +[OpenJDK][], which is available with +[either the Hotspot or the OpenJ9 JVM][hsj9]. It can also be built and used +with [GraalVM][]. + +If building with GraalVM, please add `-Dpolyglot.js.nashorn-compat=true` on +the `mvn` command line. + +[jvml]: ../use/variables.html +[cds]: ../install/vmoptions.html#Class_data_sharing +[orj]: https://www.oracle.com/technetwork/java/javase/downloads/index.html +[OpenJDK]: https://adoptopenjdk.net/ +[hsj9]: https://www.eclipse.org/openj9/oj9_faq.html +[GraalVM]: https://www.graalvm.org/ +[JDK-8309515]: https://bugs.openjdk.org/browse/JDK-8309515 ## Maven -PL/Java can be built with Maven versions at least as far back as 3.0.4. -As shown in the [Maven release history][mvnhist], **Maven releases after -3.2.5 require Java 7 or later**. If you wish to *build* PL/Java using a -Java 6 development kit, you must use a Maven version not newer than 3.2.5. +PL/Java can be built with Maven versions as far back as 3.5.2. +Maven's requirements can be seen in the [Maven release history][mvnhist]. [mvnhist]: https://maven.apache.org/docs/history.html @@ -30,13 +71,11 @@ versions 4.3.0 or later are recommended in order to avoid a ## PostgreSQL -PL/Java does not currently support PostgreSQL releases before 8.2. -Recent work is known to have introduced dependencies on 8.2 features. - -The current aim is to avoid deliberately breaking compatibility back -to 8.2. (A former commercial fork of PostgreSQL 8.2 recently returned -to the open-source fold with a *really* old version of PL/Java, so -the aim is that the current PL/Java should be a possible upgrade there.) +The PL/Java 1.6 series does not support PostgreSQL earlier than 9.5. More current PostgreSQL versions, naturally, are the focus of development and receive more attention in testing. + +PL/Java 1.6.10 has been successfully built and run on at least one platform +with PostgreSQL versions from 18 to 9.5, the latest maintenance +release for each. diff --git a/src/site/markdown/develop/coercion.md b/src/site/markdown/develop/coercion.md index 2df11976..113545b5 100644 --- a/src/site/markdown/develop/coercion.md +++ b/src/site/markdown/develop/coercion.md @@ -20,13 +20,13 @@ begin. The standard also provides an `SQLJ.ALTER_JAVA_PATH` function that gives complete control, based on the jar where a search begins, of which other jars should be searched for dependencies. -By contrast, PL/Java (through and including 1.5) *does not* include the +By contrast, PL/Java (through and including 1.6) *does not* include the jar name in `AS` clauses, and provides an [`SQLJ.SET_CLASSPATH`][scp] function that can set a distinct class path for any schema in the database. The schema `public` can also have a class path, which becomes the fallback for any search that is not resolved on another schema's class path. -[scp]: ../pljava/apidocs/index.html?org/postgresql/pljava/management/Commands.html#setClassPath(java.lang.String,%20java.lang.String) +[scp]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#set_classpath The class named in an SQL function declaration's `AS` clause is looked up on the *class path for the schema in which the function is declared*, with @@ -41,8 +41,8 @@ in PL/Java with the [@BaseUDT annotation][baseudt]), which is completely integrated into PostgreSQL's type system and is usable from in or out of Java just like any other PostgreSQL type. -[basetype]: http://www.postgresql.org/docs/current/static/sql-createtype.html#AEN80283 -[baseudt]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/BaseUDT.html +[basetype]: http://www.postgresql.org/docs/9.5/static/sql-createtype.html#AEN81321 +[baseudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/BaseUDT.html For the other flavors of user-defined type (described below), [`SQLJ.ADD_TYPE_MAPPING`][atm] (a PL/Java function, not in the standard) must @@ -50,8 +50,8 @@ be called to record the connection between the new type's SQL name and the Java class that implements it. The [@MappedUDT annotation][mappedudt] generates a call to this function along with any other SQL commands declaring the type. -[atm]: ../pljava/apidocs/index.html?org/postgresql/pljava/management/Commands.html#addTypeMapping(java.lang.String,%20java.lang.String) -[mappedudt]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/MappedUDT.html +[atm]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#add_type_mapping +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html What it records is simply the SQL type name as a string, and the Java class name as a string, and these mappings apply database-wide. But internally, @@ -68,6 +68,20 @@ a PL/Java function, the type map for the schema in which the target function is declared and, at other times, the map for the schema in which the innermost executing PL/Java function on the call stack is declared. +Starting in PL/Java 1.6.3, a PL/Java function is entered with the current +thread's [context class loader][ccl] set according to the schema where the +function is declared, and therefore the rules for applying the type map +just described can be simplified: the type map is the one maintained by +the current context class loader, provided Java code has not changed the +context loader from the initial setting. To date, the code actually obtaining +the type map has not been changed to get it _from_ the context class loader, +so the type map would not be affected by Java code changing the context loader. + +There are [more details](contextloader.html) on the management of +the context class loader. + +[ccl]: https://docs.oracle.com/javase/9/docs/api/java/lang/Thread.html#getContextClassLoader-- + ### PL/Java's object system implemented in C In PL/Java, some behavior is implemented in Java using familiar Java @@ -78,10 +92,135 @@ from `PgObject`. Often there is a close relationship between a C 'class' and a Java class of the same name, with instances of one holding references to the other. +#### Types + The `type` subdirectory in `pljava-so` contains -the C sources for a class `Type`, which inherits directly from `PgObject`, -and many subclasses of `Type` representing different known SQL types -and how they correspond to Java types. +the C sources for a class `Type`, which extends a `TypeClass`, which inherits +from `PgObject`. A `TypeClass` is associated with a single Java (primitive +or reference) type, and might have only a single `Type` that extends it, +associated with a single PostgreSQL type. In that simple case, the singleton +`Type` instance can be directly "registered" in the caches that are keyed +by PostgreSQL type oid or by Java type, respectively, by the function +`Type_registerType`. + +#### Type obtainers + +It is also possible that a single `TypeClass` can be extended by more than +one `Type`, one for each of multiple PostgreSQL types. In that case, an +alternate function `Type_registerType2` will cache, not a single already-created +`Type` instance, but a `TypeObtainer` function, which can be used to obtain +a `Type` extending its associated `TypeClass` and bound to a specific PostgreSQL +type. + +An obtainer function should not allocate a brand new `Type` on every call, but +return an existing `Type` if there already is one for the requested PostgreSQL +type. If a `TypeClass` and its associated Java type can only sensibly map a +small few PostgreSQL types, it could even be overkill for the obtainer to use a +hash map or the like to remember the instances it has returned; it could simply +have a few static variables to cache the few instances it will need, and return +the right one after comparing its oid argument to a few constants. + +The `TypeClass` for `SQLXML` works that way, with an obtainer that will only +return a `Type` instance for PostgreSQL `xml`, or for PostgreSQL `text` (in case +the Java caller wants to process a text value known to contain XML, or is being +used in a PostgreSQL server that was built without the `xml` type). + +An alternative to using an obtainer in that case would be for the initialization +method of the `TypeClass` to simply create more than one `Type` right away, and +register them all directly with `Type_registerType`, needing no obtainer +function. An example is the `TypeClass` representing `java.sql.Timestamp`, which +creates two `Type` instances and registers them immediately, one each for the +PostgreSQL `timestamp` and `timestamptz` types, as both are mapped to this +Java class by default. + +#### Exceptional behavior of `String` + +At the other extreme, the `TypeClass` for `String` provides an obtainer that +will supply a `Type` for any PostgreSQL type it is asked to, and will rely on +the PostgreSQL text input and output methods for that type to handle the +conversion. This is how it is possible in PL/Java to request or supply a +`String` whatever the underlying PostgreSQL type. + +The obtainer for `String`, at present, does not do any bookkeeping to return +one `Type` per PostgreSQL type oid it is called for. It simply allocates a new +one on every call. That makes it an exception to the [comment in `Type.h`][thsc] +specifying singleton behavior, but the exception is as ancient as the comment. + +[thsc]: https://github.com/tada/pljava/blob/d2285d74/src/C/include/pljava/type/Type.h#L105 + +#### Obtainer vs. direct registration + +In the more common case where a `TypeClass` will only sensibly have a few `Type` +children, the choice to simply create and register those directly or to use a +`TypeObtainer` can be influenced by a few considerations. + +The `TypeClass` for `java.sql.Timestamp` directly registers its two children +because it is the default mapping according to JDBC for both PostgreSQL types +`timestamp` and `timestamptz`. The two `Type`s are directly registered, keyed +by those two type oids, and directly retrieved from the cache when a PostgreSQL +value of either type has to be mapped. + +In contrast, JDBC 4.2 introduced non-default mappings for both SQL types: +a `timestamp` can map to a `java.time.LocalDateTime`, and a `timestamptz` can +map to a `java.time.OffsetDateTime`, but only when the Java code explicitly +requests. So, the `TypeClass` for `LocalDateTime` does not directly register +a `Type` corresponding to SQL `timestamp`. It registers a type obtainer, which +can only return a singleton `Type` for that exact SQL type, and does so when +asked. + +For the same reason, the `TypeClass` for `SQLXML` relies on an obtainer. +Although an alternate mapping for the `text` type, it would normally be +the default mapping for type `xml` according to JDBC 4, and would simply +register that `Type` directly. However, PL/Java has long mapped the `xml` +type to `String` by default, so for now (until a later, major release), +it treats `SQLXML` as an alternative mapping Java code may explicitly use. + +#### Lazy initialization + +In the case of the new JDBC 4.2 date/time optional mappings, there is another +reason for each new `TypeClass` to provide a `TypeObtainer`, even though each +`TypeObtainer` will only support exactly one PostgreSQL type. The corresponding +Java classes do not exist before Java 8, and PL/Java supports earlier releases, +so it cannot unconditionally load those classes at initialization time. Each +corresponding `TypeClass` defers that part of its initialization to the first +call of its obtainer, which only happens if the Java code has referred to the +class and therefore it's known to exist. + +A side benefit of this approach is laziness in its own right: less class loading +done at initialization before even knowing whether the classes will be needed. +In future work, it may be possible to further reduce PL/Java's +time-to-first-result by applying the technique more widely to +types that use direct registration now. + +#### `Type_canReplaceType` + +When there is a registered default mapping from a PostgreSQL type to +a `Type` _a_, and the Java type associated with that `TypeClass` is not the one +used in the Java code, the Java type expected by the code will be +looked up and resolved to a `TypeClass`, and from there by its +type obtainer to a second `Type` _b_. The `Type_canReplaceType` method of _b_ +will be called, passing _a_. If it returns `true`, the `Type` _b_ and its +methods will be used instead of _a_ to handle the coercions from +PostgreSQL `Datum` to Java type and vice versa. Otherwise, PL/Java will +seek a chain of PostgreSQL type coercions to bridge the gap. + +The design is slightly awkward at present, because `Type_canReplaceType` +is applied to two `Type`s (or has one as receiver and one as argument, in the +"C objects" view), so it has to be applied to the result, _b_, of the type +obtainer, essentially to find out whether calling that obtainer was worth +doing. A simpler design might result by changing its argument to a `TypeClass`. + +In the current design, redundant checks are largely avoided by not expecting +the type obtainer to do error reporting. If it supports more than one PostgreSQL +type, it should use the PostgreSQL type oid that is passed to determine which +`Type` instance to return. If the PostgreSQL oid is not one of those, it should +simply return whichever `Type` instance represents its primary or most +natural mapping. It does not need to report that the PostgreSQL oid is +unsupported; it can leave that to its can-replace method. A corollary is that +a type obtainer supporting exactly one PostgreSQL type may return its +singleton `Type` instance unconditionally, ignoring its argument. + +#### Coercions Each C `Type` has a method `coerceDatum` that takes a PostgreSQL `Datum` and produces the corresponding Java value, and a method `coerceObject` that @@ -143,14 +282,78 @@ SQL declaration is a domain, constraints on the domain are not checked, allowing the function to return values of the base type that should not be possible in the domain. This is a bug. -### Parameters supplied to a JDBC `PreparedStatement` from Java - -These are passed through the `coerceObject` method of a C `Type` selected -according to the SQL type that the query plan has for the parameter. The -type map for the innermost PL/Java invocation on the call stack is consulted -if necessary, so these rules are equivalent to the first two in the -"parameters and return values" case. However, see "additional JDBC coercions" -below. +### A general rule, with one present exception + +As the steps above reveal, for both directions of conversion, it is the +_PostgreSQL_ type that starts the algorithm off. The known mappings are +used to find a prospective Java type from it, and then if the actual Java +type appearing in the code is not the expected one, plans are adjusted +accordingly. + +This pattern is seen elsewhere in the ISO SQL standard, in Part 14 on +XML-related specifications, which include how to convert values of SQL types +to XML Schema data types and the reverse. Again, for both conversion directions, +the algorithms begin with the SQL type, then adjust if the prospective mapped +type is not the one expected. + +#### Parameters supplied to a JDBC `PreparedStatement` from Java + +The sole exception in PL/Java is the JDBC `PreparedStatement`, and only for +the _parameters supplied to_ the statement. _Results from it_ are handled +consistently with the general rule. + +Ordinarily, when preparing a query that contains parameters, PostgreSQL's +parsing and analysis will reach conclusions about what SQL types the parameters +will need to have so that the query makes sense. JDBC presents those conclusions +to the Java code through the `getParameterMetaData` method once the query has +been prepared, so that the Java code can supply values of appropriate types, +or necessary coercions can be done. The (client side) pgJDBC driver is able +to implement `getParameterMetaData` because the PostgreSQL frontend-backend +protocol allows for sending a query to prepare and having the server send back +a `ParameterDescription` message with the needed type information. + +For curious historical reasons, PostgreSQL has been able to supply remote +clients with that `ParameterDescription` information since PG 7.4 came out +in 2003, but a module _loaded right inside the backend_ like PL/Java could +not request the same information using SPI until PG 9.0 in 2010, and +[still not easily][sne]. By then, PL/Java had long been 'faking' +`ParameterMetaData` in a way that reverses the usual type mapping pattern. + +#### How `ParameterMetaData` gets faked + +PL/Java, when creating a `PreparedStatement`, does not submit the query +immediately to PostgreSQL for analysis. Instead, it initializes all of +the parameter types to unknown, and allows the Java code to go ahead and +call the `set...()` methods to supply values. Using the supplied _Java_ types +as starting points, it fills in the parameter types by following the usual +mappings backward. If the Java code does, in fact, call `getParameterMetaData`, +PL/Java returns the types determined that way for any parameters that have +already been set, and (arbitrarily) `VARCHAR` for any that have not. Only +when the Java code executes the statement the first time does PL/Java submit +the query to PostgreSQL to prepare, passing along the type mappings assumed +so far, and hoping PostgreSQL can make sense of it. + +While getting the general rule wrong and differing from client-side pgJDBC, +this is not completely unworkable, and has been PL/Java's behavior +[since 2004][fpm]. Any resulting surprises can generally be resolved by +some rewriting of the query or use of other PL/Java JDBC methods that more +directly indicate the intended PostgreSQL types. +[Some small changes in PL/Java 1.5.1][tpps] may help in some cases. 1.5.1 also +introduces `TypeBridge`s, described later on this page. + +A future major release of PL/Java should use the additions to PostgreSQL SPI +and bring the treatment of `PreparedStatement` parameters into conformance +with the general rule. (That release, therefore, will have to support +PostgreSQL versions no earlier than 9.0.) + +[sne]: https://www.postgresql.org/message-id/874liv1auh.fsf%40news-spur.riddles.org.uk +[fpm]: https://github.com/tada/pljava/blob/86793a2f/src/java/org/postgresql/pljava/jdbc/SPIPreparedStatement.java#L425 +[tpps]: ../releasenotes.html#Typing_of_parameters_in_prepared_statements + +JDBC defines some `setObject` and `setNull` methods on `PreparedStatement` +that must be passed a `java.sql.Types` constant. The JDBC constant will be +mapped to a PostgreSQL type OID through a fixed mapping coded in +`Oid_forSqlType`. ### Values read or written through the JDBC `ResultSet` interface @@ -190,7 +393,7 @@ other thing PL/Java can do. which can then be associated with a Java class using [SQLJ.ADD_TYPE_MAPPING][atm]. From outside of Java code, it can be manipulated like any PostgreSQL composite type, while to Java code it - will be presented as an instance of the associated Java class--a new + will be presented as an instance of the associated Java class---a new instance at every conversion, however. Java code is provided `SQLInput` and `SQLOutput` implementations that retrieve and set the typed attributes of the composite. Created by a Java class @@ -309,6 +512,11 @@ PL/Java base types. Methods for doing that are covered on the ## Additional JDBC coercions +The JDBC standard facilities for managing a type map are not implemented +or used, and `getTypeMap` will always return `null`. All of PL/Java's uses +of the type map managed with `SQLJ.ADD_TYPE_MAPPING` take place below the +level of the JDBC mechanisms. + When reading or writing values through any of the JDBC interfaces (except `SQLInput`/`SQLOutput` in raw mode), there is another layer of type coercion that can be applied, after (when reading) or before (when @@ -317,10 +525,42 @@ implemented entirely in Java, and can be found in `SPIConnection.java` under the names `basicCoersion`, `basicNumericCoersion`, and `basicCalendricalCoersion`. -The JDBC standard facilities for managing a type map are not implemented -or used, and `getTypeMap` will always return `null`. All of PL/Java's uses -of the type map managed with `SQLJ.ADD_TYPE_MAPPING` take place below the -level of the JDBC mechanisms. +These three, however, are inconsistently applied. They are used on values +written by Java to the single-row writable `ResultSet`s that PL/Java provides +for composite function results, but not those written to the similar +`ResultSet`s provided to triggers, or prepared statement parameters, or +`SQLOutput` in typed tuple mode. They also cannot be assumed to cover all +cases since JDBC 4.1 and 4.2 introduced new type mappings that can be used +in place of the default ones (such as `java.time.OffsetTime` for `timetz`). + +Therefore, a future PL/Java release will probably phase out those three methods +in favor of a more general method. + +## The `TypeBridge` class + +A start on the replacement of those three methods has already been made in the +work to support the `java.time` types and `SQLXML` in PL/Java 1.5.1. The +support of these alternative mappings requires that the Java types be +recognized as alternate mappings known to the native code, and passed intact +to the native layer with no attempt to coerce them to the expected types first. +To do that, a Java value that is of one of the known supported alternate types +is wrapped in a `TypeBridge.Holder` to link the value with explicit information +on the needed type conversion. As the first step in phasing out the +inconsistently-applied `SPIConnection` basic coercions, they are never applied +at all to a `TypeBridge.Holder`. At present, `TypeBridge`s are used only +for the newly-added type mappings, to avoid a behavior change for pre-existing +ones. + +The `TypeBridge` class is not intended as a mechanism for user-extensible +type mappings (the existing facilities for user-defined types should be used). +There will be a small, stable number of `TypeBridge`s corresponding to known +type mappings added in the JDBC spec, or otherwise chosen for native support +in PL/Java. For any `TypeBridge` wrapping a Java value there must be a +native-code `TypeClass` registered for the Java class the bridge is meant +to carry. There is one function in `Type.c` to initialize and register all of +the known handful of `TypeBridge`s. When new ones are added, the list must be +kept in an order such that if bridge _a_ is registered before bridge _b_, then +_a_ will not capture the Java type registered to _b_. ## The user-defined-type function slot switcheroo @@ -360,6 +600,17 @@ PostgreSQL does call through those slots, PL/Java always does a raw binary transfer using the `libpq` API directly (for fixed-size representations), `bytearecv`/`byteasend` for `varlena` representations, or `unknownrecv`/`unknownsend` for C string representations. +Responsible code in `type/UDT.c` is commented with "Assumption 2". A future version could revisit this limitation, and allow PL/Java UDTs to specify custom binary transfer formats also. + +"Assumption 1" in `UDT.c` is that any PostgreSQL type declared with +`internallength=-2` (meaning it is stored as a variable number of nonzero +bytes terminated by a zero byte) must have a human-readable representation +identical to its stored form, and must be converted to and from Java using +the `INPUT` and `OUTPUT` slots. A `MappedUDT` does not have functions in +those slots, and therefore "Assumption 1" rules out any such type as target +of a `MappedUDT`. + +A future version could revisit this limitation also. diff --git a/src/site/markdown/develop/contextloader.md b/src/site/markdown/develop/contextloader.md new file mode 100644 index 00000000..49f7da5e --- /dev/null +++ b/src/site/markdown/develop/contextloader.md @@ -0,0 +1,104 @@ +# The thread context class loader + +Starting with PL/Java 1.6.3, within an SQL-declared PL/Java function, the +class loader returned by `Thread.currentThread().getContextClassLoader` +is the one that corresponds to the per-schema classpath that has been set +with [`SQLJ.SET_CLASSPATH`][scp] for the schema where the function is +declared (assuming no Java code uses `setContextClassLoader` to change it). + +Many available Java libraries, as well as built-in Java facilities using the +[`ServiceLoader`][slo], refer to the context class loader, so this behavior +ensures they will see the classes that are available on the classpath that was +set up for the PL/Java function. In versions where PL/Java did not set the +context loader, awkward arrangements could be needed in user code for the +desired classes or services to be found. + +## Limits on the implementation + +To set this loader with minimal overhead on function entry, PL/Java uses native +access to a `Thread` field. It is possible that some Java runtimes can exist +where the expected field is not present, and PL/Java will fall back (with a +warning) to not managing the context loader. The warning can be suppressed +by explicitly configuring PL/Java not to manage the context loader, as described +below. + +It is also possible for an application or library to create subclasses +of `Thread` that override the behavior of `getContextClassLoader` so that +the value set by PL/Java will have no effect. PL/Java does not detect such +a case to work around it. + +When PL/Java is used [with policy enforcement][policy], a clear sign of code +that does subclass `Thread` in this way is that it will need the +`enableContextClassLoaderOverride` [`RuntimePermission`][runtimeperm] to be +granted in the [policy][]. When PL/Java is used [without enforcement][nopolicy], +there will be no such clear sign, making a problem of this kind harder to trace. + +## Effects on application code + +With this change as of PL/Java 1.6.3, application or library code that uses +the [`ServiceLoader`][slo], or otherwise refers to the context class loader, +will find services or resources available on the class path that was set up +for the function. Typically, this behavior is wanted. In prior PL/Java versions, +services and resources might be found only if they were available to the +system class loader. + +For example, a call like `javax.xml.transform.TransformerFactory.newInstance()` +might return Java's built-in XSLT 1.0 implementation if there is nothing else +on the class path, but return an XSLT 3.0 implementation if the configured +PL/Java class path includes a Saxon jar. + +If there are cases where an application intends to use a built-in Java +implementation regardless of the class path, there may be a method available +that specifies that behavior. For example, +[`TransformerFactory.newDefaultInstance()`][tfndi] will always return Java's +own `Transformer` implementation. + +If an application misbehaves as a result of finding implementations on the +class path it was not finding before, and cannot be conveniently fixed by +adjusting the class path or changing to `newDefaultInstance`-like methods +in the code, PL/Java can be configured for its old behavior of not setting +the context class loader, as described below. + +## Effects on UDT methods + +User-defined types implemented in PL/Java have support methods that are +transparently invoked to convert database values to Java values and back. +This can happen within a PL/Java function, when it gets or sets values in +`ResultSet`, `PreparedStatement`, `SQLInput`, or `SQLOutput` objects, and +also conceptually "before" or "after" the function proper, to convert its +incoming parameters and its return value(s). In all such contexts, the UDT +methods are considered to act on behalf of that target PL/Java function, +and the context class loader they see is the one for the schema where the +target function is declared. + +A [`BaseUDT`][baseudt] implemented in PL/Java has support methods that are +declared to PostgreSQL as SQL functions in their own right. In addition to being +transparently called on behalf of another PL/Java function, with the behavior +described above, they can be called directly by PostgreSQL like any other +SQL function. When that happens, like any other declared function, they will +have the context class loader set according to the schema containing the +declaration. + +## Suppressing context loader management + +Some circumstances may call for keeping the pre-1.6.3 behavior +where no management of the context class loader was done. That could be to +avoid unplanned effects on applications as described above, or to suppress +the warning message if running on a JVM where PL/Java's technique doesn't work. + +To suppress the loader management, add + +``` +-Dorg.postgresql.pljava.context.loader=unmanaged +``` + +in the `pljava.vmoptions` [setting](../use/variables.html). + + +[scp]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#set_classpath +[slo]: https://docs.oracle.com/javase/9/docs/api/java/util/ServiceLoader.html +[tfndi]: https://docs.oracle.com/javase/9/docs/api/javax/xml/transform/TransformerFactory.html#newDefaultInstance-- +[runtimeperm]: https://docs.oracle.com/en/java/javase/14/docs/api/java.base/java/lang/RuntimePermission.html +[baseudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/BaseUDT.html +[policy]: ../use/policy.html +[nopolicy]: ../use/unenforced.html diff --git a/src/site/markdown/develop/develop.md b/src/site/markdown/develop/develop.md index d092cefb..5044ad49 100644 --- a/src/site/markdown/develop/develop.md +++ b/src/site/markdown/develop/develop.md @@ -5,4 +5,6 @@ PL/Java's implementation. They will be most often of interest to those developing PL/Java itself, but may also be useful when a deeper understanding of some PL/Java behavior is needed. +* [The testing harness `Node.class` in PL/Java's self-installer jar](node.html) * [Passing of data types between PostgreSQL and Java](coercion.html) +* [The thread context class loader in a PL/Java function](contextloader.html) diff --git a/src/site/markdown/develop/node.md b/src/site/markdown/develop/node.md new file mode 100644 index 00000000..9cb80ba5 --- /dev/null +++ b/src/site/markdown/develop/node.md @@ -0,0 +1,456 @@ +# The testing harness `Node.class` in PL/Java's self-installer jar + +The end product of a PL/Java build is a jar file containing the actual +files (including other jars) that need to be installed on a target system, +plus some logic allowing it to be run with `java -jar` and extract itself, +consulting the target system's `pg_config` to learn where to put the files. +That is unchanged from PL/Java 1.5. + +In 1.6, however, the class added in the jar to support the self-extraction +has a number of new methods useful for integration testing. + +The new methods are unused in a simple extraction with `java -jar`, but are +available, for example, to Java's [jshell][] scriptable interpreter. +Starting `jshell` with PL/Java's installer jar on its class path creates +a rather versatile environment for scripting tests of PL/Java in one or more +temporary database instances. + +This is currently done in the multi-platform CI test configurations in the +project's repository, as a way to keep as much as possible of the testing code +common across platforms. + +The overall flavor, and even some of the method names, follow the `PostgresNode` +Perl module that became part of PostgreSQL's "PGXS" extension-building tools +in 2015, so a quick review of that follows. + +For PostgreSQL 15, the module distributed with PostgreSQL was renamed from +`PostgresNode` to `PostgreSQL::Test::Cluster`, with no essential change in +functionality (though `get_new_node` did become, simply, `new`). To avoid +needless churn, this Java class still has the historical name and methods. + +## Similarities to the upstream `PostgreSQL::Test::Cluster` Perl module + +When used from a testing script written in Perl, the methods of +`PostgreSQL::Test::Cluster` +make it easy to spin up and tear down one or more PostgreSQL instances, running +in temporary directories, listening on temporary ports, non-interfering with +each other or with production instances using the standard locations and ports, +and without needing the permissions that guard those 'real' locations and ports. +A Perl test script might be simply: + +```perl +my $n1 = get_new_node("TestNode1"); +$n1->init(); # run initdb in n1's temporary location +$n1->start(); # start a server listening on n1's temporary port +$n1->safe_psql("postgres", "select 42"); +$n1->stop(); # stop the server +$n1->clean_node(); # recursively delete the temporary location +``` + +`PostgreSQL::Test::Cluster` illustrates the immense utility of making just a few +well-chosen methods available, when there is already an expressive scripting +language at hand (Perl) for putting those methods to use. + +Early Java versions lacked any batteries-included support for scripting, but the +arrival of `jshell` with Java 9 changed that. Start up `jshell` with PL/Java's +installer jar on its classpath, and you have an interactive, scriptable version +of Java, with the methods of `Node.class` available in it. + +The ones that correspond to the Perl example above have the same names, for +familiarity (right down to the Perlish spelling with underscores rather than +Javaish camelCase): + +```java +import org.postgresql.pljava.packaging.Node +Node n1 = Node.get_new_node("TestNode1") +n1.init() +n1.start() +/* ... */ +n1.stop() +n1.clean_node() +``` + +`jshell` has to be run with a rather lengthy command line to get to this point; +more on that later. But once started, it presents a familiar +`PostgreSQL::Test::Cluster`-like +environment. As the example shows, `jshell` is lenient about statement-ending +semicolons. (Using them is still advisable, though; that leniency has fiddly +exceptions, such as not applying to pasted text.) + +## `Node.class` in detail + +A `Node` will register a VM shutdown hook to make sure `stop` and `clean_node` +happen if you forget and exit `jshell`, though forgetting is not recommended. +For using `jshell` interactively, these methods are convenient. If writing a +script, the equivalent `try`-with-resources forms may be tidier: + +```java +try (AutoCloseable t1 = n1.initialized_cluster()) +{ + try (AutoCloseable t2 = n1.started_server()) + { + /* ... */ + } +} +``` + +The server will be stopped when `t2` goes out of scope, and the file tree +created by `initdb` will be removed when `t1` goes out of scope. + +The `try` form is less convenient for interactive use, because `jshell` is not +very interactive when gathering a compound statement like a `try`. None of your +actions actually happen until you supply the final closing brace, and then they +all happen at once and the instance is torn down. But for any sort of finished +test script, the `try` form will be natural. + +The full set of `Node` methods available can be seen +[in its javadocs][nodeapi]. + +### Connecting to the server + +Running `initdb` and starting a server are all well and good, but sooner or +later a test may need to connect to it. That requires a JDBC driver to be on the +classpath also; either `PGJDBC` or `pgjdbc-ng` will work, with a few minor +differences. + +New profiles have been added to PL/Java's Maven build, and can be activated with +`-Ppgjdbc` or `-Ppgjdbc-ng` on the `mvn` command line. They have no effect but +to declare an extra dependency on the corresponding dependencies-included driver +jar. It is not used in the build, but Maven will have downloaded it to the local +repository, and that location can be added to `jshell`'s classpath to make the +driver available. (In the case of `PGJDBC`, adding the jar to the module path +also works.) + +That addition leads to the final long unwieldy command line needed to start +`jshell`, which can be seen in all its glory toward the end of this page. +Once that is copied and pasted into a terminal and any local paths changed, the +rest is easy: + +```java +import org.postgresql.pljava.packaging.Node; +Node n1 = Node.get_new_node("TestNode1"); +n1.init(); +n1.start(); +import java.sql.Connection; +Connection c1 = n1.connect(); +``` + +Once you have an open connection (or several), the convenience methods `Node` +provides for using them are `static`. A connection is already to a specific +`Node`, so there is no need for the convenience methods to be invoked on a +`Node` instance. They are `static`, and simply take a `Connection` as the first +parameter. + +```java +import static org.postgresql.pljava.packaging.Node.qp; // query-print +qp(c1, "CREATE TABLE foo (bar int, baz text)"); +qp(c1, "INSERT INTO foo (VALUES (1, 'Howdy!'))"); +qp(c1, "SELECT 1/0"); +qp(c1, "SELECT pg_sleep(1.5)"); +qp(c1, "SELECT * FROM foo"); +``` + +This example shows `qp` used several different ways: with a DDL statement that +returns no result, a DML statement that returns an update count, a statement +that returns an error, one that calls a `void`-returning function (and therefore +produces a one-row result with one column typed `void` and always null), and one +that returns a general query result. What it prints: + +``` +jshell> qp(c1, "CREATE TABLE foo (bar int, baz text)"); + +jshell> qp(c1, "INSERT INTO foo (VALUES (1, 'Howdy!'))"); + + +jshell> qp(c1, "SELECT 1/0"); + + +jshell> qp(c1, "SELECT pg_sleep(1.5)"); + + +jshell> qp(c1, "SELECT * FROM foo"); + ... + text + + + + + 1 + Howdy! + + + + +jshell> +``` + +The XMLish output style comes from using Java's built-in `WebRowSet.writeXml` +method for dumping general result sets. It is more verbose than one would like, +and easily flummoxed by unusual or PG-specific column types, but it is as useful +a way to readably dump a typical result set as one could hope to write in four +lines of Java. (This is meant as a _small_ class useful for testing, not as a +reimplementation of `psql`!) + +The writing of update counts and diagnostics as +`success`/`error`/`warning`/`info` XML elements naturally follows to keep +the output format consistent. The `void` output is special treatment for +the common case of a result set with only the `void` column type, to spare the +effort of generating a whole `WebRowSet` XML that only shows nothing is there. + +The results shown above were obtained with `pgjdbc-ng`. If using `PGJDBC`, you +will notice these minor differences: + +* For the first case shown, DDL with no result, `PGJDBC` will present a zero-row + success result, the same as for DML that did not affect any rows. This has + to be taken into account if writing a state machine to check results, as + discussed further below. +* The `message` attribute produced for an error will have a prefix of `ERROR: ` + (or the corresponding word in the PostgreSQL server's configured language). + +#### `qp` dissected + +`qp` is for interactive, exploratory use, generating printed output. For +scripting purposes, `q` gives direct access to result objects; `qp` is nothing +but a wrapper that calls `q` with the same arguments, and what `q` returns is +passed directly to another method (in fact, another of several overloads of +`qp`) to be printed. + +What `q` returns is a `Stream`. Although declared with element type +`Object`, the stream will only deliver instances of: `ResultSet`, `Long` (an +update count), or throwables (caught exceptions, or `SQLWarning` instances). The +JDBC `Statement` is polled for new `SQLWarning`s before checking for each next +result (`ResultSet` or update count). An error or exception that is thrown and +caught will be placed on the stream when caught (and will be the last thing on +the stream, though it may carry chains of cause, suppressed, or next exceptions +that may follow it if `flattenDiagnostics` is used on the stream). + +All 'notices' from PostgreSQL (severity below `ERROR` but at or above +`client_min_messages`) are turned into `SQLWarning` instances by `pgjdbc-ng`, +which does not provide any API to get the original PostgreSQL severity, or any +of the details other than the message and SQLState code. `Node` classifies them +as `info` if the SQLState 'class' (leftmost two positions) is `00`, otherwise as +`warning`. Exceptions of any other kind are classified as `error`. + +`PGJDBC` also turns notices below `ERROR` into `SQLWarning` instances, but +provides access to the severity tag from the server, so `Node` uses that to +classify them as `warning` or `info`, instead of the class `00` rule. If the +severity is `WARNING` (or happens to be null for some reason), `Node` will +classify the notice as `warning`; any other severity will be classified as +`info`. (Note, however, that this scheme will not work if the server is +configured for a language that uses a different word for `WARNING`. To make it +work in that case, you can call `Node.set_WARNING_localized` in advance, passing +the word that PostgreSQL uses for `WARNING` in your language.) + +As it happens, there is also an overload of `qp` with just one `Stream` +parameter. If you have already run a query with `q` and have the result stream, +and decide you just want to print that, just pass it to `qp`. There are other +overloads of `qp` for the individual objects you might encounter in a result +stream. One static import of the name `qp` will allow printing many things. + +#### More specialized convenience methods + +`Node` also supplies several more specialized methods: `setConfig` for +PostgreSQL configuration variables (`qp` is fine for a literal string "set foo +to bar" command, but for computed values, `setConfig` uses a prepared statement +and binding), and wrappers for PL/Java `install_jar`, `remove_jar`, and +`set_classpath`. + +The `installExamplesJar` method supplies the correct as-installed path to the +jar file (which we know, because this _is_ the self-installer code, remember?). +The boolean method `examplesNeedSaxon` introspects in the examples jar to see if +it includes the Saxon examples, and therefore needs the Saxon jar in place +before it can be deployed. + +As the Saxon jar is probably already in a local Maven repository, `installSaxon` +will install it from there, given a path to the repository root and the desired +version of Saxon-HE. Not to be outdone, `installSaxonAndExamplesAndPath` +combines the steps in correct order to install the Saxon jar, place it on the +classpath, install and deploy the examples jar, and set a final classpath that +includes both. + +```java +import static java.nio.file.Paths.get; +import java.sql.Connection; +import org.postgresql.pljava.packaging.Node; +import static org.postgresql.pljava.packaging.Node.qp; + +Node n1 = Node.get_new_node("TestNode1"); + +try ( + AutoCloseable t1 = n1.initialized_cluster(); + AutoCloseable t2 = n1.started_server(Map.of( + "client_min_messages", "info", + "pljava.vmoptions", + "-Xcheck:jni -enableassertions:org.postgresql.pljava..." + )); +) +{ + try ( Connection c = n1.connect() ) + { + qp(c, "CREATE EXTENSION pljava"); + } + + /* + * Get a new connection; 'create extension' always sets a near-silent logging + * level, and PL/Java only checks once at VM start time, so in the same + * session where 'create extension' was done, logging is somewhat suppressed. + */ + try ( Connection c = n1.connect() ) + { + qp(Node.installSaxonAndExamplesAndPath(c, + get(System.getProperty("user.home"), ".m2", "repository").toString(), + "10.9", + true)); + } +} +/exit +``` + +The above example puts together most of the ideas covered here. While it does +demonstrate installing and deploying the examples jar (which runs all of the +tests contained in its deployment code), this example merely prints the output, +rather than examining it programmatically to evaluate success, and it does not +use its exit status to communicate success or failure to its invoker, as one +would expect of a test. + +Worked-out examples that do the rest of that can be seen in the project +repository in the configuration files for the CI testing services. + +#### `stateMachine` for checking results + +One last `Node` method most useful for checking returned results +programmatically is `stateMachine` (full description in +[the javadocs][nodeapi]). For example, the `installSaxonAndExamplesAndPath` call +above returns a concatenation of four object streams: one from +`sqlj.install_jar` loading the Saxon jar, one from `sqlj.set_classpath` adding +it to the path, one from `sqlj.install_jar` loading the PL/Java examples jar +(which runs the tests in its deployment descriptor), and finally the +`sqlj.set_classpath` placing both jars on the path. + +Each of those streams should end with a `void` result set, preceded by zero +or more `info` or `warning` (any `error` should be counted as test failure). +In the third stream, from installing the examples jar, any `warning` should +also be counted as a test failure: the tests in the deployment descriptor +report failures that way to avoid aborting the query, so more results can be +reported. + +Using `stateMachine`, that can be expressed as a small set of states and +transitions that match that expected sequence. A state returns a positive +number _n_ to consume the input object it is looking at and transition to +state _n_ for the next item of input, or a negative number _-n_ to go to +state _n_ still with the same input item. The final accepting state returns +`true`; `false` from any state reports a mismatch. A `null` is supplied by +`stateMachine` after the last item on the input `Stream` (which therefore +must not contain nulls): + +```java +succeeding &= stateMachine( + "descriptive string for this state machine", + null, + + Node.installSaxonAndExamplesAndPath(c, + System.getProperty("mavenRepo"), + System.getProperty("saxonVer"), + true) + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), // so they also appear in the log + + // states 1,2: maybe diagnostics, then a void result set (saxon install) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 3 : false, + + // states 3,4: maybe diagnostics, then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 3 : -4, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 5 : false, + + // states 5,6: maybe diagnostics, then void result set (example install) + (o,p,q) -> isDiagnostic(o, Set.of("error", "warning")) ? 5 : -6, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 7 : false, + + // states 7,8: maybe diagnostics, then a void result set (set classpath) + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 7 : -8, + (o,p,q) -> isVoidResultSet(o, 1, 1) ? 9 : false, + + // state 9: must be end of input + (o,p,q) -> null == o +); + +``` + +The `isDiagnostic` method shown above isn't part of the `Node` class; in the +actual test configurations in the repository, it is trivially defined in +`jshell` a few lines earlier. Not everything needs to be built in. + +The difference in treatment of no-result DDL statements between drivers (where +`pgjdbc-ng` really has no result, and `PGJDBC` has a zero-row update count as +it would for a DML statement) can complicate writing a state machine that works +with both drivers. `Node` predefines a state function +`NOTHING_OR_PGJDBC_ZERO_COUNT` that consults the `s_urlForm` static field to +determine which driver is in use, and then moves to the numerically next state, +after consuming + +* nothing, if the driver is `pgjdbc-ng`, or +* a single zero row count (rejecting any other input), if the driver + is `PGJDBC`. + +```java +succeeding &= stateMachine( + "descriptive string for this state machine", + null, + + q(c, "CREATE TABLE foo (bar int, baz text)") + .flatMap(Node::semiFlattenDiagnostics) + .peek(Node::peek), // so they also appear in the log + + (o,p,q) -> isDiagnostic(o, Set.of("error")) ? 1 : -2, + + Node.NOTHING_OR_PGJDBC_ZERO_COUNT, + + (o,p,q) -> null == o +); + +``` + +## Invoking `jshell` to use `Node.class` + +As hinted above, the command needed to get `jshell` started so all the foregoing +goodness can happen is a bit unwieldy with options. It looks like this: + +```sh +jshell \ + --execution local \ + "-J--class-path=$packageJar:$jdbcJar" \ + "--class-path=$packageJar" \ + "-J--add-modules=java.sql.rowset" \ + "-J-Dpgconfig=$pgConfig" \ + "-J-Dcom.impossibl.shadow.io.netty.noUnsafe=true" +``` + +where _$packageJar_ is a PL/Java self-installer jar, _$jdbcJar_ should point +to a "fat jar" for the JDBC driver of choice (`postgresql-`_version_`.jar` or +`pgjdbc-ng-all-`_version_`.jar`), and _$pgConfig_ should point to +the `pg_config` executable for the PostgreSQL installation that should be used. +(If there is only one PostgreSQL installation or the right `pg_config` will be +found on the search path, it doesn't have to be specified.) + +The `-J--add-modules` is needed because even though `jshell` treats +`java.sql.rowset` as available by default, the local JVM it is running on +(because of `--execution local`) wouldn't know that without being told. + +The path given to `jshell` itself (`--class-path` without the `-J`) does not +need to mention _$jdbcJar_, because that can be a provider of the +`java.sql.Driver` service without having to be visible. If the script will +want to use driver-specific extension classes, then the jar does have to be +on `jshell`'s class path too. + +When the driver is `PGJDBC`, it can be placed on the class path or on the module +path (in which case it becomes the named module `org.postgresql.jdbc`). Again, +it does not need to be on `jshell`'s module path also---the one without +`-J`---unless the script will be referring to driver-specific classes. + +The `noUnsafe` setting, needed only when the driver is `pgjdbc-ng`, silences +a complaint from the `netty` library about Java (correctly!) denying it access +to private internals. + +[jshell]: https://docs.oracle.com/en/java/javase/15/jshell/introduction-jshell.html +[nodeapi]: ../pljava-packaging/apidocs/org/postgresql/pljava/packaging/Node.html#method-summary diff --git a/src/site/markdown/examples/examples.md.vm b/src/site/markdown/examples/examples.md.vm index 7f9de068..24eb3e65 100644 --- a/src/site/markdown/examples/examples.md.vm +++ b/src/site/markdown/examples/examples.md.vm @@ -58,7 +58,10 @@ descriptor goes farther, and calls several functions provided as test cases, so this use of `install_jar` may take a few extra seconds and produce some test-related output. (To see _successful_ test-related output, be sure to set `client_min_messages` to a level at least as detailed as `INFO` before -invoking `install_jar`.) +the session's first use of PL/Java.) + +**See the end of this page if your attempt to install the examples jar +fails with "unable to find static method" or "no such class" messages.** $h2 Trying the examples @@ -95,3 +98,59 @@ Then use some example function: ------------- 4 ``` + +$h2 Optionally-built example code + +Some example code is included in source form but not built by default, +because it assumes a later Java version than PL/Java should require to build, +or pulls in extra libraries, or both. Optional examples can be built by adding +corresponding `-P` profile options to the `mvn` command line when building. + +$h3 Using the Saxon XML-processing library for true `XMLQUERY` and `XMLTABLE` + +In the source directory `org/postgresql/pljava/example/saxon` is example code +for XML processing functions similar to `XMLCAST`, `XMLEXISTS`, `XMLQUERY`, and +`XMLTABLE` but using +the XQuery language as the SQL/XML standard actually specifies (in contrast +to the similar functions built into PostgreSQL, which support only XPath, +and XPath 1.0, at that). + +This code is not built by default because it pulls in the sizeable [Saxon-HE][] +library from Saxonica, and because it requires Java 8. + +To include these optional functions when building the examples, be sure to use +a Java 8 or later build environment, and add `-Psaxon-examples` to the `mvn` +command line. + +The Saxon example [documentation is here](../examples/saxon.html). + +[Saxon-HE]: http://www.saxonica.com/html/products/products.html + +$h2 Exception resolving class or method (message when installing examples) + +As described above, there are some optionally-built examples included +in the source. For example, there are XML Query examples that are not built +by default because they depend on the Saxon jar. + +If your examples jar was built with the optional examples enabled, then +PL/Java will normally validate that all of the functions it creates can be used. +That validation can fail if a needed dependency, such as the Saxon jar, is +not already installed and on the classpath. The error message will not directly +say the Saxon jar is missing, but may say it failed to find a class (whose name +will suggest it should be part of Saxon). + +There are two ways to proceed: + +* Install the required dependency first. Use `sqlj.install_jar` to install + the Saxon jar (as described [here](../examples/saxon.html)), and + `sqlj.set_classpath` to make it accessible, and *then* use + `sqlj.install_jar` to install the examples jar itself. The dependency will + be satisfied and all of the example functions will work. + +* Use `SET check_function_bodies TO off` before installing the examples jar. + That will simply relax the strict checking at `CREATE FUNCTION` time, so + that all of the example functions will be created. The ones that require + Saxon, of course, won't work; `SET check_function_bodies TO off` simply + means you get the errors later, when trying to use the functions, instead + of when creating them. If you install the dependency jar later and + add it to the class path, those functions will then work. diff --git a/src/site/markdown/examples/saxon.md b/src/site/markdown/examples/saxon.md new file mode 100644 index 00000000..de4f7249 --- /dev/null +++ b/src/site/markdown/examples/saxon.md @@ -0,0 +1,502 @@ +## Optionally-built example code for XML processing with Saxon + +In the source directory `org/postgresql/pljava/example/saxon` is example code +for XML processing functions similar to `XMLCAST`, `XMLEXISTS`, `XMLQUERY`, and +`XMLTABLE`, but using the XQuery language as the SQL/XML standard actually +specifies (in contrast to similar functions built into PostgreSQL, which support +only XPath, and XPath 1.0, at that). + +The example also implements the four new string functions and one predicate +added in SQL:2006 for regular expression processing using the standardized +XQuery regular expression syntax: `LIKE_REGEX`, `OCCURRENCES_REGEX`, `POSITION_REGEX`, +`SUBSTRING_REGEX`, and `TRANSLATE_REGEX`. + +There is also, for completeness, an implementation of `XMLTEXT`, which is +trivial and does not require an XQuery library at all, but is missing from +core PostgreSQL and easy to implement here. + +This code is not built by default, because it pulls in the sizeable [Saxon-HE][] +library from Saxonica. + +To include these optional functions when building the examples, +add `-Psaxon-examples` to the `mvn` command line. + +The functions are presented as examples, not as a full implementation; +for one thing, there is no test suite included to verify their conformance. +Nevertheless, they are intended to be substantially usable subject to the limits +described here, and testing and reports of shortcomings are welcome. + +In addition to the open-source and freely-licensed Saxon-HE, the Saxon library +is available in two paid editions, which implement more of the features of +XQuery 3.1 than Saxon-HE does. It should be possible to drop either of those +jar files in place of Saxon-HE (with a working license key) if features are +needed beyond what Saxon-HE provides. Its developers publish +[a matrix][saxmatrix] identifying the features provided in each edition. + +### Extension to ISO SQL/XML + +Wherever ISO SQL/XML requires one of these functions to accept an XQuery +[expression][xqexpr], in fact an XQuery [main module][xqmainmod] will be +accepted. Therefore, a query can be preceded by a prolog that declares +namespaces, options, local variables and functions, etc. This may simplify +porting queries from Oracle, which permits the same extension. + +### Using the Saxon examples + +The simplest installation method is to use `sqlj.install_jar` twice, once to +install (perhaps with the name `saxon`) the Saxon-HE jar that Maven will have +downloaded during the build, and once to install the PL/Java examples jar in the +usual way (perhaps with the name `examples` and with `deploy => true`). The +Saxon jar will be found in your Maven repository (likely `~/.m2/repository/` +unless you have directed it elsewhere) below the path `net/sf/saxon`. + +The function `sqlj.set_classpath` is used to make installed jars available. +After installing the Saxon jar, if you installed it with the name `saxon`, +add it to the class path: + +``` +SELECT sqlj.set_classpath('public', 'saxon'); +``` + +This must be done before installing the `examples` jar, so that its dependencies +on Saxon can be resolved. + +After both jars are installed, make sure they are both on the classpath. If +the examples jar was installed with the name `examples`: + +``` +SELECT sqlj.set_classpath('public', 'examples:saxon'); +``` + +*Note: an alternative, shorter procedure is to use +`SET check_function_bodies TO off;` before loading the examples jar. +With the checking turned off, the jar can be installed even if the Saxon jar +has not been installed yet, or has not been added to the class path, so the +order of steps is less critical. Naturally, the example functions that use Saxon +will not work until it has been installed and added to the class path. +`SET check_function_bodies TO off;` simply arranges that missing dependency +errors will be reported later when the functions are used, rather than when +they are created.* + +### Calling XML functions without SQL syntactic sugar + +The XML querying and `XMLTABLE` functions built into PostgreSQL get special +treatment from the SQL parser to give them syntax that is more SQLish than +an ordinary function call. + +The functions provided here have to work as ordinary SQL user-defined +functions, so calls to them can look a bit more verbose when written out +in SQL, but in a way that can be recognized as a straightforward rewriting +of the SQLish standard syntax. + +For example, suppose there is a table `catalog_as_xml` with a single row +whose `x` column is a (respectably sized) XML document recording the stuff +in `pg_catalog`. It could be created like this: + + CREATE TABLE catalog_as_xml(x) AS + SELECT schema_to_xml('pg_catalog', false, true, ''); + +#### An `XMLQUERY`-like function + +In the syntax of the SQL/XML standard, here is a query that would return an XML +element representing the declaration of the function with the name +`numeric_avg` (if PostgreSQL really had the standard `XMLQUERY` function built +in): + + SELECT XMLQUERY('/pg_catalog/pg_proc[proname eq $FUNCNAME]' + PASSING BY VALUE x, 'numeric_avg' AS FUNCNAME + RETURNING CONTENT EMPTY ON EMPTY) + FROM catalog_as_xml; + +It binds the 'context item' of the query to `x`, and the `FUNCNAME` +parameter to the given value, then evaluates the query and returns XML +"CONTENT" (a tree structure with a document node at the root, but not +necessarily meeting all the requirements of an XML "DOCUMENT"). It can be +rewritten as this call to the `xq_ret_content` method provided here: + + SELECT javatest.xq_ret_content('/pg_catalog/pg_proc[proname eq $FUNCNAME]', + PASSING => p, nullOnEmpty => false) + FROM catalog_as_xml, + LATERAL (SELECT x AS ".", 'numeric_avg' AS "FUNCNAME") AS p; + +In the rewritten form, the type of value returned is determined by which +function is called, and the parameters to pass to the query are moved out to +a separate `SELECT` that supplies their values, types, and names (with +the context item now given the name ".") and is passed by its alias into the +query function. + +An alert reader may notice that the example above includes a named parameter, +`FUNCNAME`, and it is spelled in uppercase in the XQuery expression that uses +it, and is spelled in uppercase _and quoted_ in the sub-`SELECT` that supplies +it. The reason is an unconditional `toUppercase()` in PL/Java's internal JDBC +driver, which is not anything the JDBC standard requires, but has been there +in PL/Java since 2005. For now, therefore, no matter how a parameter name is +spelled in the sub-`SELECT`, it must appear in uppercase in the XQuery +expression using it, or it will not be recognized. A future PL/Java release +is highly likely to stop forcibly uppercasing the names. At that time, any code +relying on the uppercasing will break. Therefore, it is wisest, until then, to +call this function with all parameter names spelled in uppercase both in the +SQL and in the XQuery text, and on the SQL side that requires quoting the name +to avoid the conventional lowercasing done by PostgreSQL. + +In the standard, parameters and results (of XML types) can be passed +`BY VALUE` or `BY REF`, where the latter means that the same +nodes will retain their XQuery node identities over calls (note that this is +a meaning unrelated to what "by value" and "by reference" usually mean in +PostgreSQL's documentation). PostgreSQL's implementation of the XML type +provides no way for `BY REF` semantics to be implemented, so everything +happening here happens `BY VALUE` implicitly, and does not need to be +specified. + +#### An `XMLEXISTS`-like predicate + +In the syntax of the SQL/XML standard, here is a query that would return a +boolean result indicating whether an SQL function named `numeric_avg` +is declared (if PostgreSQL really had the standard `XMLEXISTS` function built +in): + + SELECT XMLEXISTS('/pg_catalog/pg_proc[proname eq $FUNCNAME]' + PASSING BY VALUE x, 'numeric_avg' AS FUNCNAME) + FROM catalog_as_xml; + +It can be rewritten as this call to the `xmlexists` method provided here: + + SELECT "xmlexists"('/pg_catalog/pg_proc[proname eq $FUNCNAME]', + PASSING => p) + FROM catalog_as_xml, + LATERAL (SELECT x AS ".", 'numeric_avg' AS "FUNCNAME") AS p; + +As for the `XMLQUERY`-like function above, , the context item and a parameter +are supplied by a separate query producing the row `p` that is given as the +`PASSING` argument to `"xmlexists"`. The parameter name is capitalized for the +reasons explained above for the `XMLQUERY`-like function. + +#### An `XMLTABLE`-like function + +The function `xmltable` here implements (much of) the +standard function of the same name. Because it is the same name, it has to +be either schema-qualified or double-quoted in a call to avoid confusion +with the reserved word. A rewritten form of the +[first example in the PostgreSQL manual][xmltex1] could be: + + SELECT xmltable.* + FROM + xmldata, + + LATERAL (SELECT data AS ".", 'not specified'::text AS "DPREMIER") AS p, + + "xmltable"('//ROWS/ROW', PASSING => p, COLUMNS => ARRAY[ + 'data(@id)', null, 'COUNTRY_NAME', + 'COUNTRY_ID', 'SIZE[@unit eq "sq_km"]', + 'concat(SIZE[@unit ne "sq_km"], " ", SIZE[@unit ne "sq_km"]/@unit)', + 'let $e := PREMIER_NAME + return if ( empty($e) ) then $DPREMIER else $e' + ]) AS ( + id int, ordinality int, "COUNTRY_NAME" text, country_id text, + size_sq_km float, size_other text, premier_name text + ); + +[xmltex1]: https://www.postgresql.org/docs/10/static/functions-xml.html#FUNCTIONS-XML-PROCESSING-XMLTABLE + +Again, the context item and a parameter (here the desired default value for +`PREMIER`, passed in as the parameter `DPREMIER`) are supplied by a separate +query producing the row `p` that is given as `"xmltable"`'s `PASSING` argument. +The result column names and types are now specified in the `AS` list following +the function call, and the column XML Query expressions are supplied as the +`COLUMNS` array. The array must have length equal to the result column `AS` +list (there is no defaulting an omitted column expression to an element test +using the column's name, as there is in the standard function). The array is +allowed to have one null element, marking that column `FOR ORDINALITY`. + +The parameter being passed into the XQuery expressions here, `DPREMIER`, is +spelled in uppercase (and, on the SQL side, quoted), for the reasons explained +above for the `XMLQUERY`-like function. + +In the first column expression, `@id` is wrapped in `data()` to return the value +of the attribute, as `@id` by itself would be a bare XML attribute node, outside +of any XML element. Many implementations (including the XPath-based +pseudo-XMLTABLE built in to PostgreSQL) will allow a bare attribute node in a +column expression result, and assume the attribute's value is wanted, but a +strict interpretation of the spec appears to require raising `err:XPTY0004` in +that case. So, just use `data()` to wrap any attribute node being returned in +a column expression. + +More on that issue and the spec can be found at "About bare attribute nodes" +[in the code comments][assignrowvalues]. + +#### An `XMLCAST`-like function + +The ISO SQL `XMLCAST` is used to convert XML content into a value of an SQL +data type, or an SQL value to an XML value, following the same +precisely-specified conversion rules that are used for the parameters and +results of the `XMLQUERY` and `XMLTABLE` functions. It can also convert from +one XML type to another, though in PostgreSQL, which has just one XML type, the +conversion is trivial. In a DBMS with support for the full set of XML types +such as `XML(CONTENT)`, `XML(DOCUMENT)`, and `XML(SEQUENCE)`, the rules for +casting one to another are more interesting. + +This ordinary-function implementation of `XMLCAST` is used by rewriting an +SQL standard form like + + SELECT XMLCAST(value AS wantedtype) + +into a form like + + SELECT result FROM (select value) as v, "xmlcast"(v) AS (result wantedtype) + +where either: _value_ is of `xml` type, _wantedtype_ is `xml`, or both; in +other words, the only case `XMLCAST` does not handle is where neither the input +nor result is of `xml` type. Because casting XML to XML is not exciting in +PostgreSQL, the most useful cases are XML to another SQL type, or the reverse. + +#### The ISO SQL XQuery regular expression features + +The SQL standard specifies a string predicate, `LIKE_REGEX`, for testing a +string against an [XQuery regular expression][xqre] (an extension of +[XML Schema regular expression syntax][xsre]), and four string functions also +based on XQuery regular expressions: `OCCURRENCES_REGEX`, `POSITION_REGEX`, +`SUBSTRING_REGEX`, and `TRANSLATE_REGEX`. + +The "flags" parameter to any of these can include any of the +[XQuery regular expression flags `s`, `m`, `i`, `x`, and `q`][xqflags]. + +As with the `XMLQUERY` and `XMLTABLE` functions, some straightforward rewriting +is needed from the SQL-standard syntax into calls of these ordinary functions. + +In the current implementation, all of these functions recognize newlines in the +way specified by XQuery, not the modified way specified for ISO SQL, as further +explained below after the function descriptions. To leave a clear path to a +full implementation, these versions all accept an additional parameter +`w3cNewlines`, which must always be present, for now, as `w3cNewlines => true`. +Specifying `false`, or omitting this parameter, will mean the ISO SQL newline +treatment is wanted, and will be rejected as an unsupported feature +in this implementation. + +To avoid clutter, the `w3cNewlines => true` is not shown in the examples below. + +##### [`LIKE_REGEX`][lrx] + +A predicate that is `true` if a string matches the regular expression. +The standard syntaxes + + value LIKE_REGEX pattern + value LIKE_REGEX pattern FLAG flags + +can be rewritten to + + like_regex(value, pattern) + like_regex(value, pattern, flags) + like_regex(value, pattern, flag => flags) + +##### [`OCCURRENCES_REGEX`][orx] + +A function to count the occurrences of a pattern in a string. The count can +start from a specific position in the string (the first character has +position 1), and the position can be counted using Unicode characters, or using +octets of the string's encoded form. For now, only `USING CHARACTERS` is +implemented, which can be indicated by passing `usingOctets => false` or +simply omitting it, as `false` is the default. Standard syntax examples like + + OCCURRENCES_REGEX(pattern IN str) + OCCURRENCES_REGEX(pattern FLAG flags IN str) + OCCURRENCES_REGEX(pattern IN str FROM position USING CHARACTERS) + +can be rewritten to + + occurrences_regex(pattern, str) + occurrences_regex(pattern, flag => flags, "in" => str) + occurrences_regex(pattern, str, "from" => position) + +##### [`POSITION_REGEX`][prx] + +A function to return the position of a regular expression match in a string, +which can optionally return the position of a specific occurrence of the match +(the first, if not specified), or of a particular capturing group within the +desired match. The position reported can be of the first character of the match +of interest (`START`), or of the first character following the match (`AFTER`). +As for `OCCURRENCES_REGEX`, all positions can be expressed `USING CHARACTERS` or +`USING OCTETS`, but only the default `USING CHARACTERS` is implemented here. + +Standard syntax examples like + + POSITION_REGEX(START pattern IN str) + POSITION_REGEX(AFTER pattern IN str) + POSITION_REGEX(START pattern IN str OCCURRENCE n) + POSITION_REGEX(START pattern IN str OCCURRENCE n GROUP m) + POSITION_REGEX(START pattern IN str FROM pos OCCURRENCE n GROUP m) + +can be rewritten to + + position_regex(pattern, str) + position_regex(pattern, str, after => true) + position_regex(pattern, str, occurrence => n) + position_regex(pattern, str, occurrence => n, "group" => m) + position_regex(pattern, str, "from" => pos, occurrence => n, "group" => m) + +The result is always relative to the start of the string, not the starting +position. That is, `POSITION_REGEX('d' IN 'abcdef' FROM 3)` is 4, not 2. + +##### [`SUBSTRING_REGEX`][srx] + +Returns the substring that matched the regular expression, or a specific +occurrence of the expression, or a specific capturing group within the +desired occurrence. Standard syntax examples like + + SUBSTRING_REGEX(pattern IN str) + SUBSTRING_REGEX(pattern FLAG flags IN str) + SUBSTRING_REGEX(pattern IN str FROM position) + SUBSTRING_REGEX(pattern IN str OCCURRENCE n GROUP m) + +can be rewritten to + + substring_regex(pattern, str) + substring_regex(pattern, flag => flags, "in" => str) + substring_regex(pattern, str, "from" => position) + substring_regex(pattern, str, occurrence => n, "group" => m) + +##### [`TRANSLATE_REGEX`][trx] + +Returns a string built from the input string by replacing one specified +occurrence, or all occurrences, of a matching pattern. The +replacement text can include `$0` to include the entire substring +that matched, or `$`_n_ for _n_ a digit 1 through 9, +to include what matched a capturing group in the pattern. +The default behavior of replacing all occurrences applies when +`occurrence` is not specified. + +Standard syntax examples like + + TRANSLATE_REGEX(pattern IN str WITH repl) + TRANSLATE_REGEX(pattern IN str WITH repl OCCURRENCE n) + TRANSLATE_REGEX(pattern FLAG flags IN str WITH repl) + TRANSLATE_REGEX(pattern IN str WITH repl FROM position) + +can be rewritten to + + translate_regex(pattern, str, "with" => repl) + translate_regex(pattern, str, "with" => repl, occurrence => n) + translate_regex(pattern, flag => flags, "in" => str, "with" => repl) + translate_regex(pattern, str, "with" => repl, "from" => position) + +##### Recognition of newlines + +A standard XQuery library provides regular expressions that follow the W3C +XQuery rules for newline recognition, in which the `^` and `$` anchors +recognize only the `LINE FEED` character, `U&'\000a'`, the `.` metacharacter +in non-`dotall` mode matches anything other than a `LINE FEED` or +`CARRIAGE RETURN` `U&'\000d'`, the `\s` multicharacter escape matches only +those two characters plus space and horizontal tab, and `\S` is the exact +complement of `\s`. + +The ISO SQL specification for these XQuery regular expression features +contains a modification of those rules to conform instead to +[Unicode Technical Standard 18 rule 1.6][uts18rl16], in which several more +Unicode characters are recognized as line boundaries, plus the two-character +sequence `CARRIAGE RETURN` `LINE FEED` (which counts only as one line boundary). +The modified meaning of `\S` becomes "any _single_ character that is not matched +by a _single_ character that matches" `\s` (emphasis added), leaving it no +longer the exact complement of `\s`. + +It is difficult to implement the ISO SQL behavior over a standard XQuery +library, so this implementation, for now, does not do so. All of these +functions implement the standard W3C XQuery behavior, which can be "requested" +by passing `w3cNewlines => true`. Without `w3cNewlines => true`, +the call will be interpreted as intending the ISO SQL behavior, and an +`SQLFeatureNotSupportedException` (SQLSTATE `0A000`) will be raised. + +##### Nonstandard features + +The Saxon XQuery library, implemented in Java, offers the ability to use Java +regular expressions rather than XQuery ones, by passing a _flag_ argument +that ends with `;j` (an invalid flag string per the XQuery spec). This should +not be used in code that intends to be standards-conformant or to run on another +DBMS or XQuery library, but can be useful in some cases for features that Java +regular expressions offer (such as lookahead and lookbehind predicates) that +XQuery regular expressions do not. + +###### Java regular expressions and empty-match replacements + +This example implementation of `TRANSLATE_REGEX` will detect when a Java +expression rather than an XQuery one is being used, and will then permit +replacement of a zero-length match, rather than raising error `2201U` as the +standard requires. As Java regular expressions include zero-width lookahead and +lookbehind operators, a Java regex can usefully locate zero-width sites for +replacements to be applied. + +There are still subtleties involved. A site that is identified by +_negative_ lookahead or lookbehind operators (`(?!)` and `(? '!', + flag => ';j', w3cNewlines => true); + translate_regex + ----------------- + foobar + +The reason is that the specification of `TRANSLATE_REGEX` is as if the +matched substring, here an empty string, is matched again _in isolation_ +against the original regex to do the replacement, and that empty string no +longer has the `o` and `b` that the original lookbehind and lookahead matched. +It can be made to work by adding an alternative that matches a truly empty +string (`\A\z` in Java syntax): + + SELECT translate_regex('(?<=o)(?=b)|\A\z', 'foobar', "with" => '!', + flag => ';j', w3cNewlines => true); + translate_regex + ----------------- + foo!bar + +That workaround would also cause the replacement to happen if the input string +is completely empty to start with, which might not be what's wanted. + +### Minimizing startup time + +Saxon is a large library, and benefits greatly from precompilation into a +memory-mappable persistent cache, using the +[application class data sharing][appcds] feature in Oracle Java or in +OpenJDK with Hotspot, or the [class sharing][j9cds] feature in OpenJDK with +OpenJ9. + +The OpenJ9 feature is simpler to set up. Because it can cache classes straight +from PL/Java installed jars, the setup can be done exactly as described above, +and the OpenJ9 class sharing, if enabled, will just work. OpenJ9 class-sharing +setup [instructions are here][j9cds]. + +The Hotspot `AppCDS` feature is more work to set up, and can only cache classes +on the JVM system classpath, so the Saxon jar would have to be installed on +the filesystem and named in `pljava.classpath` instead of simply installing it +in PL/Java. It also needs to be stripped of its `jarsigner` metadata, which the +Hotspot `AppCDS` can't handle. Hotspot `AppCDS` setup +[general instructions are here][appcds], and specific details for setting up +this example for `AppCDS` can be found on the +[performance-tuning wiki page][ptwp] in the section devoted to it. + +A comparison shown on that performance-tuning page appears +to give Hotspot a significant advantage for a Saxon-heavy workload, so the more +complex Hotspot setup may remain worthwhile as long as that comparison holds. + +The `AppCDS` feature in Oracle Java is still (when last checked) a commercial +feature, not to be used in production without a specific license from Oracle. +OpenJDK, as of Java 10, ships Hotspot with the same feature included, without +the encumbrance. + + +[appcds]: ../install/appcds.html +[j9cds]: ../install/oj9vmopt.html#How_to_set_up_class_sharing_in_OpenJ9 +[Saxon-HE]: http://www.saxonica.com/html/products/products.html +[ptwp]: https://github.com/tada/pljava/wiki/Performance-tuning +[assignrowvalues]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#assignRowValues +[xqre]: https://www.w3.org/TR/xpath-functions-31/#regex-syntax +[xsre]: https://www.w3.org/TR/xmlschema-2/#regexs +[xqflags]: https://www.w3.org/TR/xpath-functions-31/#flags +[uts18rl16]: http://www.unicode.org/reports/tr18/#RL1.6 +[lrx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#like_regex +[orx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#occurrences_regex +[prx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#position_regex +[srx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#substring_regex +[trx]: ../pljava-examples/apidocs/org/postgresql/pljava/example/saxon/S9.html#translate_regex +[saxmatrix]: https://www.saxonica.com/html/products/feature-matrix-9-9.html +[xqexpr]: https://www.w3.org/TR/xquery-31/#id-expressions +[xqmainmod]: https://www.w3.org/TR/xquery-31/#dt-main-module diff --git a/src/site/markdown/index.md b/src/site/markdown/index.md index d2b66c6c..27dfaf76 100644 --- a/src/site/markdown/index.md +++ b/src/site/markdown/index.md @@ -47,9 +47,9 @@ deployment descriptor, making the new types/functions/triggers available for use. [JDBC]: https://docs.oracle.com/javase/tutorial/jdbc/ -[pljapi]: pljava-api/apidocs/index.html?org/postgresql/pljava/package-summary.html#package_description +[pljapi]: pljava-api/apidocs/org.postgresql.pljava/module-summary.html [annotations]: https://docs.oracle.com/javase/tutorial/java/annotations/ -[oppa]: pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/package-summary.html#package_description +[oppa]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#package-description [trgann]: https://github.com/tada/pljava/blob/master/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java [depdesc]: https://github.com/tada/pljava/wiki/Sql-deployment-descriptor [jar]: https://docs.oracle.com/javase/tutorial/deployment/jar/index.html diff --git a/src/site/markdown/install/appcds.md b/src/site/markdown/install/appcds.md index 7047caff..fdb117c4 100644 --- a/src/site/markdown/install/appcds.md +++ b/src/site/markdown/install/appcds.md @@ -1,21 +1,30 @@ -# How to set up application class data sharing - -[Application class data sharing][appcds] is a feature, currently unique -to the Oracle JVM (8u40 and later) that extends the ordinary Java class -data sharing feature to also include selected classes from the application -class path. In PL/Java terms, that means that not only Java's own internal -classes, but PL/Java's also, can be saved in a preprocessed shared archive -and quickly mapped when any backend starts PL/Java. For an overview, see +# How to set up application class data sharing in Hotspot + +For the Hotspot JVM, [Application class data sharing][appcds] is a feature, +first released in the Oracle JVM (8u40 and later) that extends the ordinary +Java class data sharing feature to also include selected classes from the +application class path. In PL/Java terms, that means that not only Java's own +internal classes, but PL/Java's also, can be saved in a preprocessed shared +archive and quickly mapped when any backend starts PL/Java. For an overview, see the [PL/Java VM options page][vmop]. +Starting with Java 10, the feature is also available in +[OpenJDK with Hotspot][OpenJDK]. From Java 8 onward, a different feature +with the same effect is available in [OpenJDK with OpenJ9][OpenJDK]; that +feature is covered [on its own page][cdsJ9]. + [appcds]: http://docs.oracle.com/javase/8/docs/technotes/tools/unix/java.html#app_class_data_sharing [vmop]: vmoptions.html [bcl]: http://www.oracle.com/technetwork/java/javase/terms/license/index.html +[OpenJDK]: https://adoptopenjdk.net/ +[cdsJ9]: oj9vmopt.html#How_to_set_up_class_sharing_in_OpenJ9 +[o]: https://blogs.oracle.com/java-platform-group/oracle-jdk-releases-for-java-11-and-later +[dcdsa]: https://docs.oracle.com/en/java/javase/13/docs/specs/man/java.html#dynamic-cds-archive ## License considerations -For Java 8, application class data sharing is a "commercial feature" in -Oracle's JVM, and will not work unless `pljava.vmoptions` also contain +In Oracle Java, application class data sharing was a "commercial feature" first +released in Java 8, not usable unless `pljava.vmoptions` also include `-XX:+UnlockCommercialFeatures` , with implications described in the "supplemental license terms" of the Oracle [binary code license for Java SE][bcl]. The license seems @@ -25,14 +34,47 @@ negotiating an additional agreement with Oracle if the feature will be used purpose." It is available to consider for any application where the additional performance margin can be given a price. -Looking ahead, in the source for OpenJDK 9 (`share/vm/runtime/globals.hpp`) -are promising signs that equivalent functionality will be available there. - -## Setup +The same feature in OpenJDK with Hotspot is available from Java 10 onward, +and does not require any additional license or `-XX:+UnlockCommercialFeatures` +option. + +Starting in Java 11, Oracle offers +[Oracle-branded downloads of both "Oracle JDK" and "Oracle's OpenJDK builds"][o] +that are "functionally identical aside from some cosmetic and packaging +differences". "Oracle's OpenJDK builds" may be used for production or +commercial purposes with no additional licensing, while any such use of +"Oracle JDK" requires a commercial license. The application class data sharing +feature is available in both, and no longer requires the +`-XX:+UnlockCommercialFeatures` option in either case (not in +"Oracle's OpenJDK builds" because their use is unrestricted, and not in +"Oracle JDK" because the "commercial feature" is now, effectively, the entire +JDK). + +The equivalent feature in OpenJDK with OpenJ9, +[described separately][cdsJ9], is available from Java 8 onward, also with no +additional license or setup needed. + +## Setup for Hotspot, earlier than Java 13 + +The setup instructions on this page are for Hotspot, whether in Oracle Java +or OpenJDK with Hotspot. The two differ only in that, wherever an +`-XX:+UnlockCommercialFeatures` option is shown in the steps below, +**it is needed in Oracle Java 8, 9, or 10, but not in OpenJDK/Hotspot, or +Oracle JDK 11 or later**. + +The Java version also affects the `-XX:+UseAppCDS` option shown below. +For Java 8 through 10, the option must be used for application class data +sharing to be enabled. In Java 11, the feature is enabled by default (though +the shared archive must still be created as described here), and the +`-XX:+UseAppCDS` option is no longer necessary; it will be accepted but +ignored with a warning. **In Java 12 and later, `-XX:+UseAppCDS` is not +needed and will be rejected as unrecognized, making PL/Java fail to load.** Setting up PL/Java to use application class data sharing is a three-step process. Each step is done by setting a different combination of options -in `pljava.vmoptions`. These are the three steps in overview: +in `pljava.vmoptions`. A slightly different procedure, described further +below, appears in Java 13. Up through Java 12, these are the three steps +in overview: 1. Make a list of classes to be preloaded, by saving the names of classes that are loaded while executing some desired code in PL/Java. @@ -60,7 +102,7 @@ final `pljava.vmoptions` setting at the end. Classes eligible to go in the shared archive are the Java system classes (including anything in the deprecated `java.ext.dirs` or `java.endorsed.dirs` directories), classes in the PL/Java jar itself, and any others in jars named in -`pljava.classpath`. Classes from PL/Java application jars loaded into the +`pljava.module_path`. Classes from PL/Java application jars loaded into the database normally with `sqlj.install_jar` are not candidates for the shared archive. The feature will speed the startup of PL/Java itself, but application classes are still loaded from the database in PL/Java's usual way. @@ -144,7 +186,7 @@ VM options you may have chosen to set. ``` =# SET pljava.vmoptions TO --# '-XX:+UnlockCommercialFeatures -XX:+UseAppCDS -Xshare:on ' +-# '-XX:+UnlockCommercialFeatures -XX:+UseAppCDS -Xshare:auto ' -# '-XX:SharedArchiveFile=/usr/pgsql/lib/pljava.jsa'; SET =# SELECT sqlj.get_classpath('public'); -- just checking it works @@ -159,6 +201,32 @@ SET Alternatively, use `ALTER SYSTEM` (or edit the `postgresql.conf` file) to save the setting for all databases in the cluster. +The use of `-Xshare:auto` rather than `-Xshare:on` in the final production +settings may be surprising, but is recommended. On operating systems with +address-space layout randomization, it is possible for some backends to +(randomly) fail to map the shared archive. With `-Xshare:auto`, they will +simply proceed without sharing and with higher resource usage, which may not +be ideal, but the same event with `-Xshare:on` would be a hard failure. + +## Setup for Hotspot, as of Java 13 + +Java 13 introduces a [dynamic CDS archive][dcdsa] feature, with fewer steps +to generate a usable archive. In essence, it combines the first two earlier +steps (generate a list of loaded classes from a sample run, then generate +an archive from the list) into a single step: do a sample run with the +option `-XX:ArchiveClassesAtExit=/tmp/pljava.jsa` and the archive will be +written to the named file when the backend exits. + +Then, as in the earlier procedure, move the archive file to a more permanent +and less writable location, and name it with `-XX:SharedArchiveFile=` in the +production `pljava.vmoptions` settings. That is the only option needed to +enable application class data sharing as of Java 13, as `-Xshare:auto` is +the default, and the earlier `-XX:+UnlockCommercialFeatures` and +`-XX:+UseAppCDS` options are obsolete. + +The [dynamic CDS archive documentation][dcdsa] covers the setup in useful +detail. + ## Java libraries If your own PL/Java code depends on other Java libraries distributed as diff --git a/src/site/markdown/install/install.md.vm b/src/site/markdown/install/install.md.vm index cb671b00..a8fa4e50 100644 --- a/src/site/markdown/install/install.md.vm +++ b/src/site/markdown/install/install.md.vm @@ -8,11 +8,19 @@ #set($h2 = '##') #set($h3 = '###') +$h2 Selecting a current Java version to use with PL/Java + +Whichever JDK version you use to build PL/Java, you may want to +use PL/Java with another Java version at run time, so your PL/Java application +code can use the newer Java features. When you reach the step +[setting PL/Java configuration variables](#PLJava_configuration_variables), +the `pljava.libjvm_location` variable will allow you to do that. + $h2 For the impatient After completing the [build][bld]: - java -jar pljava-packaging/target/pljava-pgX.Y-arch-os-link.jar + java -jar pljava-packaging/target/pljava-pgX.jar (run the above with sufficient privilege to write in the PostgreSQL installation directories, or read further for how to install in @@ -22,8 +30,8 @@ After completing the [build][bld]: CREATE EXTENSION pljava; GRANT USAGE ON LANGUAGE java TO ...; -- see "Usage permission" below -where *pgX.Y* represents the PostgreSQL version, and *arch*, *os*, and -*link* are ... wait, you're impatient, just look in the directory, you'll +where *pgX* represents the PostgreSQL version, and +... wait, you're impatient, just look in the directory, you'll see the jar file there. *Upgrading an older PL/Java installation? Use @@ -31,10 +39,9 @@ see the jar file there. __Upgrade installations__ below*. -*Not running PostgreSQL 9.1 or higher? Use +*Avoiding `CREATE EXTENSION` for some reason? Use `LOAD 'libpljava-so-${project.version}';` instead of the `CREATE EXTENSION` -command. (It works in later versions too, if you prefer it to -`CREATE EXTENSION`.) Using a Mac? Be sure to add `.bundle` at the end of the file name +command. Using a Mac? Be sure to add `.bundle` at the end of the file name in the `LOAD` command. Windows? Remove `lib` from the front. Something else? Keep reading.* @@ -90,8 +97,8 @@ you will have to become patient, and read the rest of this page. **You will most probably have to set `pljava.libjvm_location`.** See the next section. -**It is useful to consider `pljava.vmoptions`.** See the -[VM options page][vmop]. +**It is useful to consider `pljava.vmoptions`. For Java 18 or later it is +necessary.** See the [VM options page][vmop]. [vmop]: vmoptions.html @@ -114,11 +121,11 @@ You can set these variables within a database session, before issuing the `LOAD` or `CREATE EXTENSION` command. (In case you don't always get things right on the first try, you might set them after, too.) For example: - SET pljava.libjvm_location TO '/usr/lib/jvm/java-1.8.0/lib/...'; + SET pljava.libjvm_location TO '/usr/lib/jvm/java-9/lib/...'; `pljava.libjvm_location` : You are looking for a file named `libjvm` (or just `jvm` on some platforms, - such as Windows) with extension `.so`, `.dll`, + such as Windows) with extension `.so`, `.dll`, `.bundle`, or `.dylib` typically, buried a few directories/folders down in the location where Java is installed. If more than one Java version is installed, be sure to find the library from the version you want @@ -126,24 +133,43 @@ things right on the first try, you might set them after, too.) For example: Then set this variable to the full pathname, including the filename and extension. -`pljava.vmoptions` -: While it should not be necessary to set these before seeing the first signs - of life from PL/Java, there are useful options to consider setting here - before calling the installation complete. Some are described on the - [VM options page][vmop]. + The version of Java this variable points to will determine whether PL/Java + can operate [with security policy enforcement][policy] or must be used + [with no policy enforcement][unenforced]. + +`pljava.allow_unenforced` +: When using PL/Java with no policy enforcement, this variable must be set + as described on the [PL/Java with no policy enforcement][unenforced] page. + +`pljava.allow_unenforced_udt` +: When using PL/Java with no policy enforcement, if PL/Java + [mapped user-defined types][mappedudt] are to be used, this variable must + be set as described on the + [PL/Java with no policy enforcement][unenforced] page. -`pljava.classpath` +`pljava.vmoptions` +: JVM options can be set here, a number of which are described on the + [VM options page][vmop]. For the most part, they are not essential to + seeing the first signs of life from PL/Java and can be left for tuning + later. However, on Java 18 and later, it is necessary to choose + a `-Djava.security.manager=...` setting before PL/Java will run at all. + Details are on the [VM options page][vmop]. + +`pljava.module_path` : There is probably no need to set this variable unless installation locations were changed, in which case, it should be set to the final installed full - pathname of the file that is called - `pljava/sharedir/pljava/pljava-${project.version}.jar` in the installer jar. + pathnames of the files that are called + `pljava/sharedir/pljava/pljava-${project.version}.jar` and + `pljava/sharedir/pljava/pljava-api-${project.version}.jar` in the installer + jar. The pathnames should be separated by the appropriate character for your + platform; often a colon, or a semicolon on Windows. Note: this variable isn't meant to point to the code you develop and - use in PL/Java--that's what the [`sqlj.install_jar function`][sqjij] + use in PL/Java---that's what the [`sqlj.install_jar function`][sqjij] is for. [sqjij]: https://github.com/tada/pljava/wiki/SQL-functions -Those two are not the only PL/Java configuration variables there are, +Those three are not the only PL/Java configuration variables there are, but it is unlikely you would have to change any others before installation succeeds. For the rest, there is a [configuration variable reference][varref] page. @@ -176,7 +202,7 @@ Another approach is to save them to the server's configuration file. If you wish PL/Java to be available for all databases in a cluster, it may be more convenient to put the settings in the file than to issue `ALTER DATABASE` for several databases, but `pg_ctl reload` will be needed -to make changed settings effective. Starting with PostgreSQL 9.4, +to make changed settings effective. `ALTER SYSTEM` may be used as an alternative to editing the file. If you have several databases in the cluster and you favor the @@ -186,60 +212,32 @@ sure that `CREATE EXTENSION` just works, in any database where PL/Java is wanted. Different per-database settings can still be made if one database needs them. -For PostgreSQL releases [earlier than 9.2][pre92], the configuration file is -the _only_ way to make your settings persistent. - $h2 Upgrade installations PL/Java performs an upgrade installation if there is already an `sqlj` schema with tables that match a known PL/Java schema from version 1.3.0 or later. It will convert, preserving data, to the current schema if necessary. -*Remember that PL/Java runs independently -in each database session where it is in use. Older PL/Java versions active in -other sessions can be disrupted by the schema change.* - -A trial installation of a PL/Java update can be done in a transaction, and -rolled back if desired, leaving the schema as it was. Any concurrent sessions -with active older PL/Java versions will not be disrupted by the altered schema -as long as the transaction remains open, *but they may block for the duration, -so whatever testing will be done within the transaction should be done quickly -if that could be an issue*. - -$h3 Upgrading, outside the extension framework - -On PostgreSQL pre-9.1, or whenever PL/Java has not been installed -with `CREATE EXTENSION`, it can be updated with a `LOAD` command just -as in a fresh installation. This must be done in a fresh session (in -which nothing has caused PL/Java to load since establishing the connection). +A database cluster using PL/Java can be binary-upgraded using `pg_upgrade` +when certain requirements are met. -$h3 Upgrading, within the extension framework - -On PostgreSQL 9.1 or later where PL/Java has been installed with -`CREATE EXTENSION`, it can be updated with -[`ALTER EXTENSION pljava UPDATE`][aeu], as long as -`SELECT * FROM pg_extension_update_paths('pljava')` shows a one-step path -from the version currently installed to the version desired. - -[aeu]: http://www.postgresql.org/docs/current/static/sql-alterextension.html - -As with the `LOAD` method, an `ALTER EXTENSION ... UPDATE` must be done -in a fresh session, before anything has loaded PL/Java; this also precludes -an update with a multi-step path in a single command, but the intent is to -always provide a one-step path between _released_ versions. - -If you will be following development (`SNAPSHOT`) versions, the installation -method using `LOAD` may be simpler, as updates between snapshots with the -same version string make no sense to the extension framework. +For more on both procedures, see [Upgrading](upgrade.html). $h2 Usage permission Installation of PL/Java creates two "languages", `java` and `javau`. Functions that specify `LANGUAGE javau` can be created only by superusers, -and are subject to very few restrictions at runtime. Functions that specify +and PL/Java's default policy grants them some filesystem access. Functions that +specify `LANGUAGE java` can be created by any user or role that has been granted -`USAGE` permission `ON LANGUAGE java`. They run under a security manager that -denies access to the host filesystem. +`USAGE` permission `ON LANGUAGE java`. The default policy grants them no extra +permissions. The exact permissions granted in either case can be customized +in [`pljava.policy`][policy]. + +__Important: The above description applies when PL/Java is run +[with policy enforcement][policy], available on Java 23 and older. +On stock Java 24 and later, PL/Java can only be run with no policy enforcement, +and the implications should be reviewed carefully [here][unenforced].__ PostgreSQL, by default, would grant `USAGE` to `PUBLIC` on the `java` language, but PL/Java takes a more conservative approach on a new installation. @@ -251,16 +249,19 @@ if a site prefers that traditional policy. In a repeat or upgrade installation (the language `java` already exists), no change will be made to the access permissions granted on it. +When running [with no policy enforcement][unenforced], PL/Java allows only +database superusers to create functions even in the `java` language, +disregarding any `USAGE` grants. + $h2 Special topics Be sure to read these additional sections if: -* You are installing to [a PostgreSQL release earlier than 9.2][pre92] +* You intend to use [Java 24 or later][unenforced] * You are installing on [a system using SELinux][selinux] * You are installing on [Mac OS X][osx] * You are installing on [Ubuntu][ubu] and the self-extracting jar won't work -[pre92]: prepg92.html [selinux]: selinux.html [osx]: ../build/macosx.html [ubu]: ../build/ubuntu.html @@ -272,9 +273,10 @@ $h3 Puzzling error message from `CREATE EXTENSION` ERROR: relation "see doc: do CREATE EXTENSION PLJAVA in new session" already exists -For PL/Java, `CREATE EXTENSION` (which works in PostgreSQL 9.1 and later) is a -wrapper around installation via `LOAD` (which works in all versions PL/Java -supports). A quirk of this arrangement is that PostgreSQL treats `LOAD` as a +For PL/Java, `CREATE EXTENSION` is a wrapper around installation via `LOAD` +(which was needed for PostgreSQL versions now of only historical interest, +and remains supported for cases where `CREATE EXTENSION` is too inflexible). +A quirk of this arrangement is that PostgreSQL treats `LOAD` as a no-op for the remainder of a session once the library has been loaded, so `CREATE EXTENSION pljava` works in a *fresh* session, but not in one where PL/Java's native code is already in place. @@ -289,9 +291,6 @@ If PL/Java loading fails with undefined-symbol errors that seem to refer to common system libraries (`libldap`, for example), see [Building PL/Java with a `RUNPATH`](../build/runpath.html). -In case of errors that seem to refer to symbols of PostgreSQL itself, -see [this page](../build/linkpglibs.html). - $h2 More background and special considerations These last sections cover a little more of what happens under the hood. @@ -304,7 +303,7 @@ Because PL/Java, by design, runs entirely in the backend process created for each connection to PostgreSQL, to configure it does not require any cluster-wide actions such as stopping or restarting the server, or editing the configuration file; any necessary settings can be made in SQL over -an ordinary connection (in PostgreSQL 9.2 and later, anyway). +an ordinary connection. _Caution: if you are installing a new, little-tested PL/Java build, be aware that in the unexpected case of a crash, the `postmaster` will kick other @@ -319,7 +318,7 @@ PostgreSQL superuser), the commands for first-time PL/Java setup are: ``` SET client_min_messages TO NOTICE; -- or anything finer (INFO, DEBUG, ...) SET pljava.libjvm_location TO ' use the libjvm path here '; -SET pljava.classpath TO ' use the pljava.jar path here '; +SET pljava.module_path TO ' use the pljava and pljava-api jar paths here '; LOAD ' use the PL/Java native library path here '; ``` (The `client_min_messages` setting is only to ensure you do not miss @@ -332,9 +331,9 @@ in the [`examples.jar` supplied in the build][examples]. [examples]: ../examples/examples.html -Although typically only `pljava.libjvm_location` and `pljava.classpath` need -to be set, there is a [reference to PL/Java configuration variables][varref] -if you need it. +Although typically only `pljava.libjvm_location` and `pljava.module_path` need +to be right for PL/Java to function, there is a +[reference to PL/Java configuration variables][varref] if you need it. [varref]: ../use/variables.html @@ -376,18 +375,29 @@ given just the basename of the file instead of a full path. Or, if `dynamic_library_path` is already set, the file can be placed in any directory on that list for the same effect. -If the `pljava-\${project.version}.jar` file is placed in the default location +If the `pljava-\${project.version}.jar` and `pljava-api-\${project.version}.jar` +files are placed in the default location (typically a `pljava` subdirectory of the PostgreSQL "share" directory), then -`pljava.classpath` will not need to be set. +`pljava.module_path` will not need to be set. + +The self-extracting jar file produced by the build, assuming it is run with +adequate permission, will extract the files into appropriate locations +determined by querying `pg_config` on the target system. If that system +may have more than one PostgreSQL installation and you wish to control +which one the files get installed into, pass the full path to that +installation's `pg_config` executable with `-Dpgconfig=` on that +`java -jar ...` command line. (In more difficult cases, each category +of file location, such as `pgconfig.sharedir`, can be separately overridden +on the command line.) **If you are a distribution maintainer** packaging PL/Java for a certain platform, and you know or control that platform's conventions for where -the Java `libjvm` should be found, or where PostgreSQL extension files -(architecture-dependent and -independent) should go, please build your -PL/Java package with those locations as the defaults for the corresponding -PL/Java variables, and with the built files in those locations. - -_Todo: add maven build options usable by distro spinners to set those defaults._ +the Java `libjvm` should be found, please supply that full path on the `mvn` +command line with `-Dpljava.libjvmdefault=` to make it the default for +`pljava.libjvm_location`, so users on that platform can see a working PL/Java +with no need to set that variable in the usual case. That tip and more are +covered +in [packaging PL/Java for a software distribution](../build/package.html). $h3 PostgreSQL superuser with access as user running postgres @@ -407,3 +417,7 @@ $h3 PostgreSQL superuser, OS user distinct from the user running postgres In this case, simply place the files in any location where you can make them readable by the user running `postgres`, and set the `pljava.*` variables accordingly. + +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html diff --git a/src/site/markdown/install/locate.md.vm b/src/site/markdown/install/locate.md.vm index 2c36e8e3..2d94bc5b 100644 --- a/src/site/markdown/install/locate.md.vm +++ b/src/site/markdown/install/locate.md.vm @@ -31,11 +31,9 @@ work with a `.jar` file no matter what. Relative to the root of the build tree, the jar file is found at -`pljava-packaging/target/pljava-${pgversion}-${naraol}.jar` +`pljava-packaging/target/pljava-${pgversion}.jar` -where `${pgversion}` resembles `pg9.4` and `${naraol}` is an -*architecture-os-linker* triple, for example `amd64-Linux-gpp` -or `amd64-Windows-msvc`. It contains these things: +where `${pgversion}` resembles `pg16`. The jar contains these things: `pljava/pkglibdir/libpljava-\${project.version}.so` (or `.dll`, etc.) : The architecture-dependent, native library portion of the PL/Java @@ -47,7 +45,7 @@ or `amd64-Windows-msvc`. It contains these things: (more below). `pljava/sharedir/pljava/pljava-api-\${project.version}.jar` -: The jar file that should be named on the `javac` classpath when compiling +: The jar file that should be named on the `javac` module path when compiling code for PL/Java (see "How to compile against the PL/Java API" in the [user guide][use]). @@ -62,13 +60,26 @@ or `amd64-Windows-msvc`. It contains these things: : Various files scripting what `CREATE EXTENSION` or `ALTER EXTENSION ... UPDATE` really do. +`pljava/sysconfdir/pljava.policy` +: Policy file defining the Java permissions granted to the languages `java` + and `javaU`, to any custom language aliases, or to specific jars, as + described [here][policy]. Unused if PL/Java is run + [without policy enforcement][nopolicy]. + It could happen that future versions add more files in the jar before updating this page. Also, every jar file has a `MANIFEST.MF`, and this file also contains a `JarX.class` to make it self-extracting; these are not otherwise important to PL/Java. See the [installation page][inst] for how to control the self-extraction. +Another file, `Node.class`, present in this jar is also unimportant for +normal installation, but provides some facilities for automated testing, +as described [here][node]. + [examples]: ../examples/examples.html +[node]: ../develop/node.html +[policy]: ../use/policy.html +[nopolicy]: ../use/unenforced.html Extract the needed files from this archive and place them in appropriate locations, then complete the [installation][inst]. @@ -80,6 +91,19 @@ not to extract files from the packaged archive into some other location, but simply to `SET` the `pljava.*` variables to point to the files right where they were generated in the build tree. +$h3 The PL/Java API `jar` file + +This file is built by the `pljava-api` subproject, +so relative to the source root where the build was +done, it will be found in `pljava-api/target/pljava-api-\${project.version}.jar` +with `\${project.version}` replaced in the obvious way, +for example `${project.version}`. + +This file needs to be named in `pljava.module_path` along with the internals +`jar` file below. If convenient, check the default value of `pljava.module_path` +and place both these files where it expects them to be; then it will not need +to be set. + $h3 The architecture-independent PL/Java `jar` file This file is built by the `pljava` subproject, @@ -88,20 +112,19 @@ done, it will be found in `pljava/target/pljava-\${project.version}.jar` with `\${project.version}` replaced in the obvious way, for example `${project.version}`. -In the simplest installation, determine the default value of `pljava.classpath` -and place the jar file at that exact name. In a typical distribution, the -default will be `$sharedir/pljava/pljava-\${project.version}.jar`. +In the simplest installation, determine the default value of +`pljava.module_path` and place the jar file at that exact name, alongside +the PL/Java API jar described above. +In a typical distribution, the default will be +`$sharedir/pljava/pljava-\${project.version}.jar`. $h3 The architecture-dependent PL/Java native library This is built by the `pljava-so` subproject. Its filename extension can depend on the operating system: `.so` on many systems, `.dll` on Windows, `.bundle` on Mac OS X / Darwin. Relative to the source root where the build was performed, it -is at the end of a long and redundant path that contains the project version -(twice), an "architecture-OS-linker" string (twice), and a build type -("plugin"), also twice. +is found in the `pljava-so/pljava-pgxs` directory. -An example, for version `${project.version}` and arch-os-linker of -`amd64-Linux-gpp` is (very deep breath): +An example for version `${project.version}` is: -`pljava-so/target/nar/pljava-so-${project.version}-amd64-Linux-gpp-plugin/lib/amd64-Linux-gpp/plugin/libpljava-so-${project.version}.so` +`pljava-so/pljava-pgxs/libpljava-so-${project.version}.so` diff --git a/src/site/markdown/install/locatejvm.md b/src/site/markdown/install/locatejvm.md index bddceed0..24c4b6f8 100644 --- a/src/site/markdown/install/locatejvm.md +++ b/src/site/markdown/install/locatejvm.md @@ -15,7 +15,7 @@ the name `libjvm.*` under the Java home directory (which is reported by `mvn -v` assuming the JRE that you used for running Maven is the one you intend to use at run time). -The filename extension may be `.so` on many systems, `.dylib` on Mac OS X +The filename extension may be `.so` on many systems, `.bundle` on Mac OS X (see the [Mac OS X build notes](../build/macosx.html) for more about OS X), or `.dll` on Windows. As Windows also leaves off the `lib` in front, on that platform you would look for `jvm.dll`. @@ -27,6 +27,18 @@ by a process, this works: strace -e open java 2>&1 | grep libjvm ``` +## Version of the Java library selected + +The library pointed to be `pljava.libjvm_location` must be a Java 9 or later +JVM for the PL/Java 1.6 series. The actual version of the library will determine +what Java language features are available for PL/Java functions to use. + +The Java version also influences whether PL/Java can operate +[with security policy enforcement][policy] or +[with no policy enforcement][unenforced]. For stock Java 24 or later, it is only +possible to operate with no enforcement, and the implications detailed for +[PL/Java with no policy enforcement][unenforced] should be carefully reviewed. + ## Using a less-specific path The methods above may find the `libjvm` object on a very specific path @@ -47,3 +59,7 @@ generic one like `jre`, linked to whichever Java version is considered current. Using an alias that is too generic could possibly invite headaches if the default Java version is ever changed to one your PL/Java modules were not written for (or PL/Java itself was not built for). + + +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html diff --git a/src/site/markdown/install/oj9vmopt.md b/src/site/markdown/install/oj9vmopt.md new file mode 100644 index 00000000..7a8b1b37 --- /dev/null +++ b/src/site/markdown/install/oj9vmopt.md @@ -0,0 +1,172 @@ +# PL/Java VM option recommendations for the OpenJ9 JVM + +The OpenJ9 JVM accepts a number of standard options that are the same as +those accepted by Hotspot, but also many nonstandardized ones that are not. +A complete list of options it accepts can be found [here][oj9opts]. + +There is one option that should be considered for any PL/Java configuration: + +* [`-Xquickstart`][xqs] + +It can reduce the JVM startup time by doing less JIT compilation and at lower +optimization levels. On the other hand, if the work to be done in PL/Java is +substantial enough, the increased run time of the less-optimized code can make +the overall performance effect net negative. It should be measured under +expected conditions. + +[xqs]: https://www.ibm.com/support/knowledgecenter/SSYKE2_8.0.0/com.ibm.java.vm.80.doc/docs/xquickstart.html + +Beyond that, and the usual opportunities to adjust memory allocations and +garbage-collector settings, anyone setting up PL/Java with OpenJ9 should +seriously consider setting up class sharing, which is much simpler in +OpenJ9 than in Hotspot, and is the subject of the rest of this page. + +## How to set up class sharing in OpenJ9 + +OpenJ9 is an [alternative to the Hotspot JVM][hsj9] that is available in +[OpenJDK][] (which can be downloaded with the choice of either JVM). + +OpenJ9 includes a _dynamically managed_ class data sharing feature: it is +able to cache ahead-of-time compiled versions of classes in a file to be +sharably memory-mapped by all backends running PL/Java. The shared cache +significantly reduces both the aggregate memory footprint of multiple +backend JVMs and the per-JVM startup time. It is [described here][ej9cds]. + +The OpenJ9 class-sharing feature is similar to Hotspot's +[application class data sharing][iads], but with a major advantage in the +context of PL/Java: it is able to share not only classes of the Java runtime +itself and those on `pljava.module_path` (PL/Java's own internals), but also +classes from application jars loaded with `sqlj.install_jar`. The Hotspot +counterpart can share only the first two of those categories. + +OpenJ9 sharing is also free of the commercial-license encumbrance on the +Hotspot feature in Oracle Java 8 and later (OpenJDK with Hotspot also includes +the feature, without the encumbrance, but only from Java 10 on). +OpenJ9 sharing is also much less fuss to set up. + +To see how much less, the Hotspot setup is a manual, three-step affair +to be done in advance of production use. You choose some code to run that you +hope will exercise all the classes you would like in the shared +archive and dump the loaded-class list, then generate the shared archive +from that list, and finally save the right option in `pljava.vmoptions` to have +the shared archive used at run time. + +By contrast, you set up OpenJ9 to share classes with the following step: + +1. Add an `-Xshareclasses` option to `pljava.vmoptions` to tell OpenJ9 to + share classes. + +OpenJ9 will then, if the first time, create a shared archive and dynamically +manage it, adding ahead-of-time-compiled versions of classes as they are +used in your application. + +[oj9opts]: https://www.ibm.com/support/knowledgecenter/SSYKE2_8.0.0/com.ibm.java.vm.80.doc/docs/x_jvm_commands.html +[ej9cds]: https://www.ibm.com/developerworks/library/j-class-sharing-openj9/ +[iads]: appcds.html +[vmop]: vmoptions.html +[OpenJDK]: https://adoptopenjdk.net/ +[hsj9]: https://www.eclipse.org/openj9/oj9_faq.html +[shclutil]: https://www.ibm.com/developerworks/library/j-class-sharing-openj9/#sharedclassesutilities + +### Setup + +Arrange `pljava.vmoptions` to contain an option `-Xshareclasses`. + +The option can take various suboptions. Two interesting ones are: + + -Xshareclasses:name=/path/to/some/file + -Xshareclasses:cacheDir=/path/to/some/dir + +to control where PL/Java's shared class versions get cached. The first variant +specifies the exact file that will be memory mapped, while the second specifies +what directory will contain the (automatically named) file. + +Using either suboption (or both; suboptions are separated by commas), you can +arrange for PL/Java's shared classes to be cached separately from other uses +of Java on the same system. You could even, by saving different +`pljava.vmoptions` settings per database or per user, arrange separate class +caches for distinct applications using PL/Java. + +All of the suboptions accepted by `-Xshareclasses` are listed [here][xsc]. + +[xsc]: https://www.ibm.com/support/knowledgecenter/SSYKE2_8.0.0/com.ibm.java.vm.80.doc/docs/xshareclasses.html + +#### Hotspot-like loaded-once-and-frozen class share, or dynamic one + +If you wish to emulate the Hotspot class sharing feature where a shared class +archive is created ahead of time and then frozen, you can let the application +run for a while with the `-Xshareclasses` option not containing `readonly`, +until the shared cache has been well warmed, and then add `readonly` to the +`-Xshareclasses` option as saved in `pljava.vmoptions`. + +It will then be necessary (as it is with Hotspot) to expressly repeat the +process when new versions of the JRE or PL/Java are installed, or (unlike +Hotspot, which does not share them) application jars are updated. This is +not because OpenJ9 would continue loading the wrong versions from cache, +but because it would necessarily bypass the cache to load the current ones. + +If the `readonly` option is not used, the OpenJ9 shared cache will dynamically +cache new versions of classes as they are loaded. It does not, however, purge +older versions automatically. There are [shared classes utilities][shclutil] +available to monitor utilization of the cache space, and to reset caches if +needed. + +With a dynamic shared cache, OpenJ9 may also continue to refine the shared +data even for unchanged classes that have already been cached. It does not +replace the originally cached representations, but over time can add JIT hints +based on profile data collected in longer-running processes, which can help +new, shorter-lived processes more quickly reach the same level of optimization +as key methods are just-in-time recompiled. + +### Effect of `sqlj.replace_jar` + +When PL/Java replaces a jar, the class loaders and cached function mappings +are reset in the backend that replaced the jar, so subsequent PL/Java function +calls in that backend will use the new classes. + +In other sessions active at the time the jar is replaced, without OpenJ9 class +sharing, execution will continue with the already-loaded classes, unless/until +another class needs to be loaded from the old jar, which will fail with a +`ClassNotFoundException`. + +With OpenJ9 class sharing, other sessions may continue executing even as they +load classes, as long as the old class versions are found in the shared cache. + +### Java libraries + +If your own PL/Java code depends on other Java libraries distributed as +jars, the usual recommendation would be to install those as well into the +database with `sqlj.install_jar`, and use `sqlj.set_classpath` to make them +available. That keeps everything handled uniformly within the database. +With OpenJ9 sharing, there is no downside to this approach, as classes +installed in the database are shared, just as those on the system classpath. + +### Thorough class verification + +When using class sharing, consider adding `-Xverify:all` to +the other VM options, perhaps once while warming a cache that you will then +treat as `readonly`. Java sometimes applies more relaxed verification to +classes it loads from the system classpath. With class sharing in use, classes +may be loaded and verified early, then saved in the shared archive for quick +loading later. In those circumstances, the cost of requesting verification for +all classes may not be prohibitive, while increasing robustness against damaged +class files. + +### Cache invalidation if database or PL/Java reinitialized + +The way that PL/Java's class loading currently integrates with OpenJ9 class +sharing relies on a PostgreSQL `SERIAL` column to distinguish updated versions +of classes loaded with `sqlj.install_jar`/`replace_jar`. + +If the database is recreated, PL/Java is deinstalled and reinstalled, or +anything else happens to restart the `SERIAL` sequence, it may be wise to +destroy any existing OpenJ9 class share, to avoid incorrectly matching +older cached versions of classes. + +### Performance tuning tips on the wiki + +Between releases of this documentation, breaking news, tips, and metrics +on PL/Java performance tuning may be shared +[on the performance-tuning wiki page][ptwp]. + +[ptwp]: https://github.com/tada/pljava/wiki/Performance-tuning diff --git a/src/site/markdown/install/prepg92.md b/src/site/markdown/install/prepg92.md deleted file mode 100644 index b1fb8fbf..00000000 --- a/src/site/markdown/install/prepg92.md +++ /dev/null @@ -1,63 +0,0 @@ -# Installation on PostgreSQL releases earlier than 9.2 - -In PostgreSQL releases 9.2 and later, PL/Java can be installed entirely -without disturbing the `postgresql.conf` file or reloading/restarting the -server: the configuration variables can be set interactively in a session -until PL/Java loads sucessfully, then saved with a simple -`ALTER DATABASE` _dbname_ `SET` _var_ `FROM CURRENT` for each setting -that had to be changed. - -Releases earlier than 9.2 are slightly less convenient. It is still possible -to work out the right settings in an interactive session, but once found, -the settings must be added to `postgresql.conf` to be persistent, and the -postmaster signalled (with `pg_ctl reload`) to pick up the new settings. - -Releases before 9.2 also require setting `custom_variable_classes` in -`postgresql.conf` to include the prefix `pljava`, and that assignment must -be earlier in the file than any settings of `pljava.*` variables. - -## Trying settings interactively - -It is still possible to do an exploratory session to find the variable settings -that work before touching `postgresql.conf` at all, but -the details are slightly different. - -In later PostgreSQL versions, you would typically use some `SET` commands -followed by a `LOAD` (followed, perhaps, by more `SET` commands unless you -always get things right on the first try). - -Before release 9.2, however, the order has to be `LOAD` first, which typically -will lead to an incompletely-started warning because the configuration settings -have not been made yet. _Then_, because the module has been loaded, -`pljava.*` variables will be recognized and can be set and changed until -PL/Java successfully loads, just as in the newer versions of PostgreSQL. - -Once working settings are found, edit `postgresql.conf`, make sure that -`custom_variable_classes` includes `pljava`, copy in the variable settings -that worked, and use `pg_ctl reload` to make the new settings effective. - -## But what if I want the load to fail and it doesn't? - -The procedure above relies on the way loading stops when the settings are not -right, giving you a chance to adjust them interactively. That turns out to be -a problem if there are previously-saved settings, or the original defaults, -that happen to *work* even if they are not the settings you want. In that case, -the `LOAD` command starts PL/Java right up, leaving you no chance in the -interactive session to change anything. - -To escape that behavior, there is one more very simple configuration variable, -`pljava.enable`. If it is `off`, `LOAD`ing PL/Java will always stop early and -allow you to set other variables before setting `pljava.enable` to `on`. - -To answer the hen-and-egg question of how to set `pljava.enable` to `off` -before loading the module, it _defaults_ to `off` on PostgreSQL releases -earlier than 9.2, so you will always have the chance to test your settings -interactively (and you will always have to set it explicitly `on` when -you are ready). - -If it is already `on` because of an earlier configuration saved in -`postgresql.conf`, it will be recognized in your interactive session and you -can set it `off` as needed. - -On later PostgreSQL releases with no such complications, it defaults to `on` -and can be ignored. diff --git a/src/site/markdown/install/selinux.md b/src/site/markdown/install/selinux.md index f5bedc4a..81cd840f 100644 --- a/src/site/markdown/install/selinux.md +++ b/src/site/markdown/install/selinux.md @@ -1,7 +1,7 @@ # Using PL/Java with SELinux These notes were made running PostgreSQL and PL/Java on a Red Hat system, -but should be applicable--with possible changes to details--on other systems +but should be applicable---with possible changes to details---on other systems running SELinux. Anything that gets run by `postgres` itself runs under a special SELinux context diff --git a/src/site/markdown/install/smproperty.md b/src/site/markdown/install/smproperty.md new file mode 100644 index 00000000..e58d2dd5 --- /dev/null +++ b/src/site/markdown/install/smproperty.md @@ -0,0 +1,49 @@ +# Available policy-enforcement settings by Java version + +In the PostgreSQL [configuration variable][variables] `pljava.vmoptions`, +whether and how to set the `java.security.manager` property depends on +the Java version in use (that is, on the version of the Java library that +the `pljava.libjvm_location` configuration variable points to). + +There are two ways of setting the `java.security.manager` property that may be +allowed or required depending on the Java version in use. + +`-Djava.security.manager=allow` +: PL/Java's familiar operating mode in which + security policy is enforced. More on that mode can be found in + [Configuring permissions in PL/Java][policy]. + +`-Djava.security.manager=disallow` +: A mode required on Java 24 and later, in which there is no enforcement of + policy. Before setting up PL/Java in this mode, the implications in + [PL/Java with no policy enforcement][unenforced] should be carefully + reviewed. + +This table lays out the requirements by specific version of Java. + +|Java version|Available settings| +|---------|:---| +|9–11|There must be no appearance of `-Djava.security.manager` in `pljava.vmoptions`. Mode will be policy-enforcing.| +|12–17|Either `-Djava.security.manager=allow` or `-Djava.security.manager=disallow` may appear in `pljava.vmoptions`. Default is policy-enforcing (same as `allow`) if neither appears.| +|18–23|One of `-Djava.security.manager=allow` or `-Djava.security.manager=disallow` must appear in `pljava.vmoptions`, or PL/Java will fail to start. There is no default.| +|24–|`-Djava.security.manager=disallow` must appear in `pljava.vmoptions`, or PL/Java will fail to start.| +[Allowed `java.security.manager` settings by Java version] + +When `pljava.libjvm_location` points to a Java 17 or earlier JVM, there is +no special VM option needed, and PL/Java will operate with policy enforcement +by default. However, when `pljava.libjvm_location` points to a Java 18 or later +JVM, `pljava.vmoptions` must contain either `-Djava.security.manager=allow` or +`-Djava.security.manager=disallow`, to select operation with or without policy +enforcement, respectively. No setting other than `allow` or `disallow` will +work. Only `disallow` is available for stock Java 24 or later. + +The behavior with `allow` (and the default before Java 18) is further described +in [Configuring permissions in PL/Java][policy]. + +The behavior with `disallow`, the only mode offered for Java 24 and later, +is detailed in [PL/Java with no policy enforcement][unenforced], which +should be carefully reviewed when PL/Java will be used in this mode. + +[variables]: ../use/variables.html +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html diff --git a/src/site/markdown/install/upgrade.md b/src/site/markdown/install/upgrade.md new file mode 100644 index 00000000..4c397c3a --- /dev/null +++ b/src/site/markdown/install/upgrade.md @@ -0,0 +1,95 @@ +# Upgrading + +## Upgrading the PL/Java version in a database + +PL/Java performs an upgrade installation if there is already an `sqlj` schema +with tables that match a known PL/Java schema from version 1.3.0 or later. It +will convert, preserving data, to the current schema if necessary. + +*Remember that PL/Java runs independently +in each database session where it is in use. Older PL/Java versions active in +other sessions can be disrupted by the schema change.* + +A trial installation of a PL/Java update can be done in a transaction, and +rolled back if desired, leaving the schema as it was. Any concurrent sessions +with active older PL/Java versions will not be disrupted by the altered schema +as long as the transaction remains open, *but they may block for the duration, +so whatever testing will be done within the transaction should be done quickly +if that could be an issue*. + +### Upgrading, outside the extension framework + +On PostgreSQL pre-9.1, or whenever PL/Java has not been installed +with `CREATE EXTENSION`, it can be updated with a `LOAD` command just +as in a fresh installation. This must be done in a fresh session (in +which nothing has caused PL/Java to load since establishing the connection). + +### Upgrading, within the extension framework + +On PostgreSQL 9.1 or later where PL/Java has been installed with +`CREATE EXTENSION`, it can be updated with +[`ALTER EXTENSION pljava UPDATE`][aeu], as long as +`SELECT * FROM pg_extension_update_paths('pljava')` shows a one-step path +from the version currently installed to the version desired. + +[aeu]: http://www.postgresql.org/docs/current/static/sql-alterextension.html + +As with the `LOAD` method, an `ALTER EXTENSION ... UPDATE` must be done +in a fresh session, before anything has loaded PL/Java; this also precludes +an update with a multi-step path in a single command, but the intent is to +always provide a one-step path between _released_ versions. + +If you will be following development (`SNAPSHOT`) versions, the installation +method using `LOAD` may be simpler, as updates between snapshots with the +same version string make no sense to the extension framework. + +## Upgrading the PostgreSQL major version with PL/Java in use + +### Binary upgrading with `pg_upgrade` + +Using the [`pg_upgrade`][pgu] tool [contributed to PostgreSQL in 9.0][pguc], +an entire PostgreSQL cluster can be upgraded to a later major version in a +more direct process than the dump to SQL and reload formerly required. +The binary upgrade is possible as long as the cluster and databases meet +certain requirements, which should be studied in the +[`pg_upgrade` manual page][pgu] version for the PostgreSQL release being +upgraded *to*. + +PL/Java adds a few additional considerations: + +* `pg_upgrade` will check in advance that every loadable module used in + the old cluster can be loaded in the new cluster, but the schema and + data will be copied over by `pg_upgrade` itself. That means that a + PL/Java build for the new PostgreSQL version must be *installed in + the directory structure* for the new cluster before running `pg_upgrade`, + but *not* installed into any databases (the new cluster should not have + had any non-system objects created yet). + +* In the steps of [Installing PL/Java](install.html), that means that the + self-extracting `java -jar ...` command must have been run (or the + equivalent package-installation command, if you are getting PL/Java + through a packaging system for your OS), but no `CREATE EXTENSION` or + `LOAD` command should have been run to configure it in any database. If + using the extracting jar, to be sure of installing it to the right cluster, + add `-Dpgconfig=`*pgconfigpath* at the end, where *pgconfigpath* is the + full path to the *new* cluster's `pg_config` executable. + +* PL/Java releases before 1.5.1 were not aware of `pg_upgrade` operation. + To avoid possible errors during the upgrade involving OID or object + clashes, the PL/Java release installed for the new cluster should be + 1.5.1 or later. + +* When `pg_upgrade` tests that all needed modules are present, it expects + the names to match. The PL/Java module name includes the PL/Java version, + so the versions installed in the old and new clusters should be the same. + Given that 1.5.1 or later should be installed in the new cluster, + if any databases in the old cluster are using an older PL/Java version, + PL/Java should be upgraded in each (as described at the top of this page) + before running `pg_upgrade`. To be sure of installing a newer PL/Java + build into the old cluster, if using the extracting jar, add + `-Dpgconfig=`*oldpgconfigpath* at the end of the `java -jar ...` command + line, with *oldpgconfigpath* the full path to the old cluster's `pg_config` + executable. + +[pgu]: https://www.postgresql.org/docs/current/static/pgupgrade.html +[pguc]: https://www.postgresql.org/docs/9.0/static/release-9-0.html#AEN103668 diff --git a/src/site/markdown/install/vmoptions.md b/src/site/markdown/install/vmoptions.md index efca87c5..674cb9fa 100644 --- a/src/site/markdown/install/vmoptions.md +++ b/src/site/markdown/install/vmoptions.md @@ -4,6 +4,64 @@ The PostgreSQL configuration variable `pljava.vmoptions` can be used to supply custom options to the Java VM when it is started. Several of these options are likely to be worth setting. +If using [the OpenJ9 JVM][hsj9], be sure to look also at the +[VM options specific to OpenJ9][vmoptJ9]. + +## Selecting operation with or without security policy enforcement + +PL/Java can operate [with security policy enforcement][policy], its former +default and only mode, or [with no policy enforcement][unenforced], the only +mode available on stock Java 24 and later. + +When `pljava.libjvm_location` points to a Java 17 or earlier JVM, there is +no special VM option needed, and PL/Java will operate with policy enforcement +by default. However, when `pljava.libjvm_location` points to a Java 18 or later +JVM, `pljava.vmoptions` must contain either `-Djava.security.manager=allow` or +`-Djava.security.manager=disallow`, to select operation with or without policy +enforcement, respectively. No setting other than `allow` or `disallow` will +work. Only `disallow` is available for stock Java 24 or later. + +For just how to configure specific Java versions, see +[Available policy-enforcement settings by Java version][smprop]. + +Before operating with `disallow`, the implications detailed in +[PL/Java with no policy enforcement][unenforced] should be carefully reviewed. + +[policy]: ../use/policy.html +[unenforced]: ../use/unenforced.html +[smprop]: smproperty.html + +## Adding to the set of readable modules + +By default, a small set of Java modules (including `java.base`, +`org.postgresql.pljava`, and `java.sql` and its transitive dependencies, +which include `java.xml`) will be readable to any Java code installed with +`install_jar`. + +While those modules may be enough for many uses, other modules are easily added +using `--add-modules` within `pljava.vmoptions`. For example, +`--add-modules=java.net.http,java.rmi` would make the HTTP Client and WebSocket +APIs readable, along with the Remote Method Invocation API. + +For convenience, the module `java.se` simply transitively requires all the +modules that make up the full Java SE API, so `--add-modules=java.se` will make +that full API available to PL/Java code without further thought. The cost, +however, may be that PL/Java uses more memory and starts more slowly than if +only a few needed modules were named. + +For just that reason, there is also a `--limit-modules` option that can be used +to trim the set of readable modules to the minimum genuinely needed. More on the +use of that option [here][limitmods]. + +[limitmods]: ../use/jpms.html#Limiting_the_module_graph + +Third-party modular code can be made available by adding the modular jars +to `pljava.module_path` (see [configuration variables](../use/variables.html)) +and naming those modules in `--add-modules`. PL/Java currently treats all jars +loaded with `install_jar` as unnamed-module, legacy classpath code. + +For more, see [PL/Java and the Java Platform Module System](../use/jpms.html). + ## Byte order for PL/Java-implemented user-defined types PL/Java is free of byte-order issues except when using its features for building @@ -19,19 +77,33 @@ the topic and an advance notice of an expected future migration step. Class data sharing is a commonly-supported Java virtual machine feature that reduces both VM startup time and memory footprint by having many of -Java's runtime classes preprocessed into a `classes.jsa` file that can +Java's runtime classes preprocessed into a file that can be quickly memory-mapped into the process at Java startup, and shared -if there are multiple processes running Java VMs. It is enabled by including +if there are multiple processes running Java VMs. - -Xshare:on +How to set it up differs depending on the Java VM in use, Hotspot +(in [Oracle Java][orjava] or [OpenJDK with Hotspot][OpenJDK]), or OpenJ9 +(an [alternate JVM][hsj9] also available with [OpenJDK][]). The instructions on +this page are specific to Hotspot. For the corresponding feature in OpenJ9, +which is worth a good look, see the [class sharing in OpenJ9][cdsJ9] page. +For Hotspot, the class data sharing is enabled by including +`-Xshare:on` or `-Xshare:auto` in the `pljava.vmoptions` string. In rough terms in 64-bit Java 8 it can reduce the 'Metaspace' size per PL/Java backend by slightly over 4 MB, and cut about 15 percent from the PL/Java startup time per process. Sharing may be enabled automatically if the Java VM runs in `client` mode -(described below), but usually *must* be turned on with `-Xshare:on` if the -VM runs in `server` mode. +(described below), but may need to be turned on with `-Xshare:on` or +`-Xshare=auto` if the VM runs in `server` mode. + +The `on` setting can be useful for quickly confirming that sharing works, +as it will report a hard failure if anything is amiss. However, `auto` is +recommended in production: on an operating system with address-space layout +randomization, it is possible for some backends to (randomly) fail to map +the share. Under the `auto` setting, they will proceed without sharing (and +with higher resource usage, which may not be ideal), where with the `on` +setting they would simply fail. *Note: the earliest documentation on class data sharing, dating to Java 1.5, suggested that the feature was not available at all in server mode. In recent @@ -65,9 +137,10 @@ installed. It can be created with the simple command `java -Xshare:dump` ### Preloading PL/Java's classes as well as the Java runtime's -The basic class data sharing feature includes only Java's own runtime -classes in the shared archive. When using Java 8 from Oracle, 8u40 or -later, an expanded feature called [application class data sharing][appcds] +In Hotspot, the basic class data sharing feature includes only Java's own +runtime classes in the shared archive. When using Java 8 from Oracle, 8u40 or +later, or OpenJDK with Hotspot starting with Java 10, an expanded feature +called [application class data sharing][appcds] is available, with which you can build a shared archive that preloads PL/Java's classes as well as Java's own. In rough terms this doubles the improvement in startup time seen with basic class data sharing alone, @@ -75,10 +148,14 @@ for a total improvement (compared to no sharing) of 30 to 35 percent. It also allows the memory per backend to be even further scaled back, as discussed under "plausible settings" below. +([In OpenJ9][cdsJ9], the basic class sharing feature since Java 8 already +includes this ability, with no additional setup steps needed.) + #### Licensing considerations Basic class data sharing is widely available with no restriction, but -*application class data sharing* is a "commercial feature" exclusive to +*application class data sharing* [in Oracle Java][orjava] is a +"commercial feature" that first appeared in Oracle Java 8. It will not work unless `pljava.vmoptions` also contain `-XX:+UnlockCommercialFeatures` , with implications described in the "supplemental license terms" of the Oracle @@ -89,14 +166,61 @@ negotiating an additional agreement with Oracle if the feature will be used purpose." It is available to consider for any application where the additional performance margin can be given a price. -Looking ahead to Java 9, there are some promising signs that OpenJDK may have -an equivalent feature. - -Here are the [instructions for setting up application class data sharing][iads]. - +[In OpenJDK (with Hotspot)][OpenJDK], starting in Java 10, the same feature +is available and set up in the same way, but is freely usable; it does not +require any additional license, and does not require any +`-XX:+UnlockCommercialFeatures` to be added to the options. + +Starting in Java 11, Oracle offers +[Oracle-branded downloads of both "Oracle JDK" and "Oracle's OpenJDK builds"][o] +that are "functionally identical aside from some cosmetic and packaging +differences". "Oracle's OpenJDK builds" may be used for production or +commercial purposes with no additional licensing, while any such use of +"Oracle JDK" requires a commercial license. The application class data sharing +feature is available in both, and no longer requires the +`-XX:+UnlockCommercialFeatures` in either case (not in "Oracle's OpenJDK builds" +because their use is unrestricted, and not in "Oracle JDK" because the +"commercial feature" is now, effectively, the entire JDK). + +[In OpenJDK (with OpenJ9)][OpenJDK], the class-sharing feature present from +Java 8 onward will naturally share PL/Java's classes as well as the Java +runtime's, with no additional setup steps. + +Here are the instructions for +[setting up application class data sharing in Hotspot][iads]. + +Here are instructions for [setting up class sharing (including application +classes!) in OpenJ9][cdsJ9]. + +[orjava]: http://www.oracle.com/technetwork/java/javase/downloads/index.html +[OpenJDK]: https://adoptopenjdk.net/ +[hsj9]: https://www.eclipse.org/openj9/oj9_faq.html [appcds]: http://docs.oracle.com/javase/8/docs/technotes/tools/unix/java.html#app_class_data_sharing [bcl]: http://www.oracle.com/technetwork/java/javase/terms/license/index.html [iads]: appcds.html +[vmoptJ9]: oj9vmopt.html +[cdsJ9]: oj9vmopt.html#How_to_set_up_class_sharing_in_OpenJ9 +[o]: https://blogs.oracle.com/java-platform-group/oracle-jdk-releases-for-java-11-and-later +[cdsaot]: http://web.archive.org/web/20191020025455/https://blog.gilliard.lol/2017/10/04/AppCDS-and-Clojure.html + +## `-XX:AOTLibrary=` + +JDK 9 and later have included a tool, `jaotc`, that does ahead-of-time +compilation of class files to native code, producing a shared-object file +that can be named with the `-XX:AOTLibrary` option. Options to the `jaotc` +command can specify which jars, modules, individual classes, or individual +methods to compile and include. Optionally, `jaotc` can include additional +metadata with the compiled code (at the cost of a slightly larger file), +so that the Java runtime's tiered JIT compiler can still further optimize +the compiled-in-advance methods that turn out to be hot spots. + +To make a library of manageable size, a list of touched classes and methods +from a sample run can be made, much as described above for application class +data sharing. + +A [blog post by Matthew Gilliard][cdsaot] reports successfully combining `jaotc` +compilation and application class data sharing with good results, and goes into +some detail on the preparation steps. ## `-XX:+DisableAttachMechanism` @@ -157,6 +281,8 @@ The `G1` collector, favored as `ConcMarkSweep`'s replacement, uses slightly more space to work, while `Parallel` and `ParallelOld` will occupy more than double the space of any of these. +[gcchoice]: https://docs.oracle.com/javase/8/docs/technotes/guides/vm/gctuning/collectors.html + ### Plausible settings The optimal memory settings and garbage collector for a specific PL/Java @@ -187,3 +313,11 @@ collection. In a test using PL/Java to do trivial work (nothing but `SELECT sqlj.get_classpath('public')`), the sweet spot comes around `-Xms5m` (which seems to end up allocating 6, but completes with no GC in my testing). + +### Performance tuning tips on the wiki + +Between releases of this documentation, breaking news, tips, and metrics +on PL/Java performance tuning may be shared +[on the performance-tuning wiki page][ptwp]. + +[ptwp]: https://github.com/tada/pljava/wiki/Performance-tuning diff --git a/src/site/markdown/releasenotes-pre1_6.md.vm b/src/site/markdown/releasenotes-pre1_6.md.vm new file mode 100644 index 00000000..730a7deb --- /dev/null +++ b/src/site/markdown/releasenotes-pre1_6.md.vm @@ -0,0 +1,1654 @@ +# Release notes, releases prior to PL/Java 1.6 + +#set($h2 = '##') +#set($h3 = '###') +#set($h4 = '####') +#set($h5 = '#####') +#set($gborgbug = 'http://web.archive.org/web/20061208113236/http://gborg.postgresql.org/project/pljava/bugs/bugupdate.php?') +#set($pgfbug = 'https://web.archive.org/web/*/http://pgfoundry.org/tracker/?func=detail&atid=334&group_id=1000038&aid=') +#set($pgffeat = 'https://web.archive.org/web/*/http://pgfoundry.org/tracker/?func=detail&atid=337&group_id=1000038&aid=') +#set($ghbug = 'https://github.com/tada/pljava/issues/') +#set($ghpull = 'https://github.com/tada/pljava/pull/') + +## A nice thing about using Velocity is that each release can be entered at +## birth using h2 as its main heading, h3 and below within ... and then, when +## it is moved under 'earlier releases', just define those variables to be +## one heading level finer. Here goes: +#set($h2 = '###') +#set($h3 = '####') +#set($h4 = '#####') +#set($h5 = '######') + +$h2 PL/Java 1.5.8 + +1.5.8 adds support for PostgreSQL 14, fixes two bugs, and begins preparation +for the impact of changes to Java's permission enforcement coming in Java 17 +and later with JEP 411. + +$h3 PL/Java with Java 17 and later: JEP 411 + +Current versions of PL/Java rely on Java security features that will be affected +by JEP 411, beginning with Java 17. Java 17 itself will continue to provide the +needed capabilities, with only deprecation marks and warnings added. Java 17 is +also positioned as a long-term support release, so the option of continuing to +run a current (1.5.8 or latest 1.6) PL/Java release without loss of function +will be available, if needed, by continuing to run with Java 17. + +The PL/Java 1.5 series has had a good run, and is not expected to receive +backports of future, post-JEP 411 functionality. + +For more on how PL/Java will adapt, please read about [JEP 411][jep411] on +the PL/Java wiki. + +$h3 Bugs fixed + +* [MalformedInputException: Input length = 1 in 1.5](${ghbug}340) + + The bug was exercised in a database where `server_encoding` was `SQL_ASCII`. + The more-principled treatment of `SQL_ASCII` in PL/Java 1.6 has been + backported, and is described [here][sqlascii]. + +* [Crash in autovacuum if a PL/Java functional index exists](${ghbug}355) + + Functional indexes over PL/Java functions can now be autovacuumed + in PG 9.5 or later. In earlier versions, a `feature_not_supported` error + will be reported instead of crashing. That can be avoided by setting the + `autovacuum_enabled` storage parameter to `false` on any table having + a Java-based functional index, and running explicit `VACUUM` periodically + on such tables. Prior to PG 8.4, there was no such per-table setting, and + the only workaround would be to forego autovacuum database-wide and use + explicit `VACUUM` on some schedule. + +[jep411]: https://github.com/tada/pljava/wiki/JEP-411 +[sqlascii]: use/charsets.html#The_special_encoding_SQL_ASCII + +$h3 Credits + +Thanks to `yazun` and `ricdhen` for reporting the bugs fixed in this release. + +$h2 PL/Java 1.5.7 (16 November 2020) + +1.5.7 is a bug-fix release, with a single issue backpatched from the 1.6 +branch, correcting a problem in XML Schema validation in some non-`en_US` +locales. + +$h3 Bugs fixed + +* [XML Schema regression-test failure in de_DE locale](${ghbug}312) + +$h3 Credits + +Thanks to Christoph Berg for the report. + +$h2 PL/Java 1.5.6 (4 October 2020) + +This release adds support for PostgreSQL 13. + +It includes improvements to the JDBC 4.0 `java.sql.SQLXML` API that first became +available in 1.5.1, an update of the ISO SQL/XML examples based on the Saxon +product to Saxon 10 (which now includes support for XML Query higher-order +functions in the freely-licensed Saxon-HE), some improvements to internals, +and a number of bug fixes. + +$h3 Version compatibility + +PL/Java 1.5.6 can be built against recent PostgreSQL versions including 13, +and older ones back to 8.2, using Java SE 8 or later. The source code avoids +features newer than Java 6, so building with Java 7 or 6 should also be +possible, but is no longer routinely tested. The Java version used at runtime +does not have to be the same version used for building. PL/Java itself can run +on any Java version 6 or later if built with Java 11 or earlier; it can run +on Java 7 or later if built with Java 12 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +PL/Java 1.5.6 cannot be built with Java 15 or later, as the Nashorn JavaScript +engine used in the build process no longer ships with Java 15. It can be built +with [GraalVM][], if `-Dpolyglot.js.nashorn-compat` is added to the `mvn` +command line. It will run on Java 15 if built with an earlier JDK or with Graal. + +When used with GraalVM as the runtime VM, PL/Java functions can use Graal's +"polyglot" capabilities to execute code in any other language available on +GraalVM. In this release, it is not yet possible to directly declare a function +in a language other than Java. + +$h3 Changes + +$h4 Improvements to the `java.sql.SQLXML` type + +Additions to the `Adjusting.XML` API support +[limiting resource usage][xmlreslim] in XML processing, controlling +[resolution][xmlresolv] of external documents and resources, +[validation against a schema][xmlschema], and integration of an +[XML catalog][xmlcatalog] to locally satisfy requests for external documents. + +Corrections and new documentation of [whitespace handling][xmlws] in XML values +of `CONTENT` form, and implementation [limitations][xmlimpl]. + +$h4 Improvements to the Saxon-based ISO SQL/XML example functions + +Updated the dependency for these optional examples to Saxon 10. Probably the +most significant of the [Saxon 10 changes][saxon10], for PostgreSQL's purposes, +will be that the XQuery [higher-order function feature][xqhof] is now included +in the freely-licensed Saxon-HE, so that it is now possible without cost to +integrate a modern XQuery 3.1 implementation that is lacking only the +[schema-aware feature][xqsaf] and the [typed data feature][xqtdf] (for those, +the paid Saxon-EE product is needed), and the [static typing feature][xqstf] +(which is not in any Saxon edition). + +To compensate for delivering the higher-order function support in -HE, +Saxonica moved certain optimizations to -EE. This seems a justifiable trade, as +it is better for development purposes to have the more complete implementation +of the language, leaving better optimization to be bought if and when needed. + +Thanks to a tip from Saxon's developer, the returning of results to SQL is now +done in a way that may incur less copying in some cases. + +$h4 Internals + +* Many sources of warnings reported by the Java VM's `-Xcheck:jni` option have + been tracked down, making it practical to use `-Xcheck:jni` in testing. +* Reduced pressure on the garbage collector in management of references to + PostgreSQL native state. + +$h3 Enhancement requests addressed + +* Work around PostgreSQL [API breakage in EnterpriseDB 11](${ghbug}260) + +$h3 Bugs fixed + +* [Support of arrays in composite types](${ghbug}300) +* [Order-dependent behavior caching array types](${ghbug}310) +* [Date conversion errors possible with PostgreSQL 10 on Windows/MSVC](${ghbug}297) +* [Build issue with Windows/MinGW-w64](${ghbug}282) +* ["xmltable" with XML output column or parameter](${ghbug}280) +* [Google Summer of Code catches 15-year-old PL/Java bug](${ghbug}274) +* [Several bugs in SQLXML handling](${ghbug}272) +* Work around an exception from `Reference.clear` on OpenJ9 JVM +* Bugs in SQL generator when supplying a function parameter name, or the + `category`, `delimiter`, or `storage` attribute of a user-defined type. + +$h3 Updated PostgreSQL APIs tracked + +* Removal of `CREATE EXTENSION ... FROM unpackaged` +* `numvals` in `SPITupleTable` +* `detoast.h` +* `detoast_external_attr` + +$h3 Credits + +There is a PL/Java 1.5.6 thanks in part to +Christoph Berg, +Chapman Flack, +Kartik Ohri, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +The work of Kartik Ohri in summer 2020 was supported by Google Summer of Code. + +[xmlreslim]: use/sqlxml.html#Additional_adjustments_in_recent_Java_versions +[xmlresolv]: use/sqlxml.html#Supplying_a_SAX_or_DOM_EntityResolver_or_Schema +[xmlschema]: use/sqlxml.html#Validation_against_a_schema +[xmlcatalog]: use/sqlxml.html#Using_XML_Catalogs_when_running_on_Java_9_or_later +[xmlws]: use/sqlxml.html#Effect_on_parsing_of_whitespace +[xmlimpl]: use/sqlxml.html#Known_limitations +[saxon10]: https://www.saxonica.com/html/documentation/changes/v10/installation.html +[xqhof]: https://www.w3.org/TR/xquery-31/#id-higher-order-function-feature +[xqsaf]: https://www.w3.org/TR/xquery-31/#id-schema-aware-feature +[xqtdf]: https://www.w3.org/TR/xquery-31/#id-typed-data-feature +[xqstf]: https://www.w3.org/TR/xquery-31/#id-static-typing-feature + +$h2 PL/Java 1.5.5 (4 November 2019) + +This bug-fix release fixes runtime issues reported in 32-bit `i386` builds, some +of which would not affect a more common 64-bit architecture, but some of which +could under the wrong circumstances, so this release should be used in +preference to 1.5.4 or 1.5.3 on any architecture. + +It is featurewise identical to 1.5.4, so those release notes, below, should be +consulted for the details of user-visible changes. + +Thanks to Christoph Berg for the `i386` testing that exposed these issues. + +$h3 Bugs fixed + +* [32bit i386 segfault](${ghbug}246) + +$h2 PL/Java 1.5.4 (29 October 2019) + +This minor release fixes a build issue reported with Java 11, and adds +support for building with Java 13. Issues with building the javadocs in later +Java versions are resolved. A work-in-progress feature that can +[apply the SQLXML API to other tree-structured data types](use/xmlview.html) +is introduced. + +Documentation updates include coverage of +[changes to Application Class Data Sharing](install/appcds.html) in recent +Hotspot versions, and ahead-of-time compilation using +[jaotc](install/vmoptions.html#a-XX:AOTLibrary). + +Otherwise, the release notes for 1.5.3, below, should be +consulted for the details of recent user-visible changes. + +$h3 Bugs fixed + +* [Build failure with Java 11 and --release](${ghbug}235) +* [Build with Java 13](${ghbug}236) +* [Javadoc build fails in Java 11+](${ghbug}239) +* [Javadoc build fails in Java 13](${ghbug}241) + +$h2 PL/Java 1.5.3 (4 October 2019) + +This release adds support for PostgreSQL 12, and removes the former +requirement to build with a Java release earlier than 9. + +It includes a rework of of threading and resource management, improvements to +the JDBC 4.0 `java.sql.SQLXML` API that first became available in 1.5.1, and +a substantially usable example providing the functionality of ISO SQL +`XMLEXISTS`, `XMLQUERY`, `XMLTABLE`, `XMLCAST`, `LIKE_REGEX`, +`OCCURRENCES_REGEX`, `POSITION_REGEX`, `SUBSTRING_REGEX`, and `TRANSLATE_REGEX`. +Some bugs are fixed. + +$h3 Version compatibility + +PL/Java 1.5.3 can be built against recent PostgreSQL versions including 12, +and older ones back to 8.2, using Java SE 8 or later. The source code avoids +features newer than Java 6, so building with Java 7 or 6 should also be +possible, but is no longer routinely tested. The Java version used at runtime +does not have to be the same version used for building. PL/Java itself can run +on any Java version 6 or later if built with Java 11 or earlier; it can run +on Java 7 or later if built with Java 12. PL/Java functions can be written for, +and use features of, whatever Java version will be loaded at run time. See +[version compatibility][versions] for more detail. + +When used with [GraalVM][] as the runtime VM, PL/Java functions can use its +"polyglot" capabilities to execute code in any other language available on +GraalVM. In this release, it is not yet possible to directly declare a function +in a language other than Java. + +$h3 Changes + +$h4 Threading/synchronization, finalizers, and new configuration variable + +Java is multithreaded while PostgreSQL is not, requiring ways to prevent +Java threads from entering PostgreSQL at the wrong times, while cleaning up +native resources in PostgreSQL when PL/Java references are released, and +_vice versa_. + +PL/Java has historically used an assortment of approaches including Java +object finalizers, which have long been deprecated informally, and formally +since Java 9. Finalizers enter PostgreSQL from a thread of their own, and the +synchronization approach used in PL/Java 1.5.2 and earlier has been associated +with occasional hangs at backend exit when using an OpenJ9 JVM at runtime. + +A redesigned approach using a new `DualState` class was introduced in 1.5.1, +at first only used in implementing the `java.sql.SQLXML` type, a newly-added +feature. In 1.5.3, other approaches used in the rest of PL/Java's code base are +migrated to use `DualState` also, and all uses of the deprecated Java object +finalizers have been retired. With the new techniques, the former occasional +OpenJ9 hangs have not been observed. + +This represents the most invasive change to PL/Java's thread synchronization +in many years, so it may be worthwhile to reserve extra time for +testing applications. + +A new [configuration variable](use/variables.html), +`pljava.java_thread_pg_entry`, allows adjusting the thread policy. The default +setting, `allow`, preserves PL/Java's former behavior, allowing Java threads +entry into PostgreSQL one at a time, only when any thread already in PG code +has entered or returned to Java. + +With object finalizers no longer used, PL/Java itself does not need the `allow` +mode, but there may be application code that does. Application code can be +tested by setting the `error` mode, which will raise an error for any attempted +entry to PG from a thread other than the original thread that launched PL/Java. +If an application runs in `error` mode with no errors, it can also be run in +`block` mode, which may be more efficient, as it eliminates many locking +operations that happen in `allow` or `error` mode. However, if `block` mode +is used with an application that has not been fully tested in `error` mode +first, and the application does attempt to enter PostgreSQL from a Java thread +other than the initial one, the result can be blocked threads or a deadlocked +backend that has to be killed. + +A JMX management client like `JConsole` or `jvisualvm` can identify threads that +are blocked, if needed. The new `DualState` class also offers some statistics +that can be viewed in `JConsole`, or `jvisualvm` with the `VisualVM-MBeans` +plugin. + +$h4 Improvements to the `java.sql.SQLXML` type + +Support for this JDBC 4.0 type was added in PL/Java 1.5.1. Release 1.5.3 +includes the following improvements: + +* A new ["Adjusting" API](use/sqlxml.html#Extended_API_to_configure_XML_parsers) + exposes configuration settings for Java XML parsers that may be created + internally during operations on `SQLXML` instances. That allows the default + settings to restrict certain XML parser features as advocated by the + [Open Web Application Security Project][OWASP] when XML content may be + coming from untrusted sources, with a simple API for relaxing those + restrictions when appropriate for XML content from a known source. +* It is now possible for a PL/Java function to return, pass into a + `PreparedStatement`, etc., an `SQLXML` instance that PL/Java did not create. + For example, a PL/Java function could use another database's JDBC driver to + obtain a `SQLXML` value from that database, and use that as its own return + value. Transparently, the content is copied to a PL/Java `SQLXML` instance. + The copy can also be done explicitly, allowing the "Adjusting" API to be + used if the default XML parser restrictions should be relaxed. +* Behavior when the server encoding is not UTF-8, or when it is not an + IANA-registered encoding (even if Java has a codec for it), has been + improved. + +$h4 Improvements to the Saxon-based ISO SQL/XML example functions + +Since PL/Java 1.5.1, the supplied examples have included a not-built-by-default +[example supplying ISO SQL/XML features missing from core PostgreSQL][exsaxon]. +It is not built by default because it raises the minimum Java version to 8, and +brings in the Saxon-HE XML-processing library. + +In 1.5.3, the example now provides versions of the ISO SQL `XMLEXISTS`, +`XMLQUERY`, `XMLTABLE`, and `XMLCAST` functions based on the W3C XQuery +language as ISO SQL specifies (while PostgreSQL has an "XMLTABLE" function +since release 10 and "XMLEXISTS" since 9.1, they have +[numerous limitations][appD31] inherited from a library that does not support +XQuery, and additional peculiarities prior to PostgreSQL 12), and the ISO SQL +`LIKE_REGEX`, `OCCURRENCES_REGEX`, `POSITION_REGEX`, `SUBSTRING_REGEX`, and +`TRANSLATE_REGEX` functions that apply XQuery regular expressions. It also +includes the `XMLTEXT` function, which is rather trivial, but also missing from +core PostgreSQL, and supplied here for completeness. + +As plain user-defined functions without special treatment in PostgreSQL's SQL +parser, these functions cannot be used with the exact syntax specified in +ISO SQL, but require simple rewriting into equivalent forms that are valid +ordinary PostgreSQL function calls. The rewritten forms are intended to be easy +to read and correspond closely to the ISO syntax. + +While still presented as examples and not a full implementation, these functions +are now intended to be substantially usable (subject to minor +[documented limits][exsaxon]), and testing and reports of shortcomings are +welcome. + +$h4 ResultSet holdability again + +A `ResultSet` obtained from a query done in PL/Java would return the value +`CLOSE_CURSORS_AT_COMMIT` to a caller of its `getHoldability` method, but in +reality would become unusable as soon as the PL/Java function creating it +returned to PostgreSQL. That was fixed in PL/Java 1.5.1 for a `ResultSet` +obtained from a `Statement`, but not for one obtained from a +`PreparedStatement`. It now correctly remains usable to the end of the +transaction in either case. + +$h4 Savepoint behavior at rollback + +Per JDBC, a `Savepoint` still exists after being used in a rollback, and can be +used again; the rollback only invalidates any `Savepoint` that had been created +after the one being rolled back. That should be familiar behavior, as it is the +same as PostgreSQL's own SQL `SAVEPOINT` behavior. It is also correct in pgJDBC, +which has test coverage to confirm it. PL/Java has been doing it wrong. + +In 1.5.3 it now has the JDBC-specified behavior. For compatibility with existing +application code, the meaning of the `pljava.release_lingering_savepoints` +[configuration variable](use/variables.html) has been adjusted. The setting +tells PL/Java what to do if a `Savepoint` still exists, neither released nor +rolled back, at the time a function exits. If `on`, the savepoint is released +(committed); if `off`, the savepoint is rolled back. A warning is issued in +either case. + +In an existing function that used savepoints and assumed that a rolled-back +savepoint would be no longer live, it will now be normal for such a savepoint +to reach the function exit still alive. To recognize this case, PL/Java tracks +whether any savepoint has been rolled back at least once. At function exit, any +savepoint that has been neither released nor ever rolled back is disposed of +according to the `release_lingering_savepoints` setting and with a warning, +as before, but any savepoint that has already been rolled back at least once +is simply released, regardless of the variable setting, and without producing +a warning. + +$h4 Control of function parameter names in generated SQL + +When generating the `CREATE FUNCTION` command in a deployment descriptor +according to an annotated Java function, PL/Java ordinarily gives the function +parameters names that match their Java names, unquoted. Because PostgreSQL +allows named notation when calling a function, the parameter names in its +declaration become part of its signature that cannot later be changed without +dropping and re-creating the function. + +In some cases, explicit control of the SQL parameter names may be wanted, +independently of the Java names: to align with an external standard, perhaps, +or when either the SQL or the Java name would collide with a reserved word. +For that purpose, the (already slightly overloaded) `@SQLType` annotation now +has a `name` attribute that can specify the SQL name of the annotated parameter. + +$h4 Documentation + +The user guide and guide for packagers contained incorrect instructions for +using Maven to build a single subproject of PL/Java (such as `pljava-api` or +`pljava-examples`) instead of the full project. Those have been corrected. + +$h3 Enhancement requests addressed + +* [Allow building with Java releases newer than 8](${ghbug}212) + +$h3 Bugs fixed + +* [ResultSet holdability still wrong when using PreparedStatement](${ghbug}209) +* [Can't return (or set/update PreparedStatement/ResultSet) non-PL/Java SQLXML object](${ghbug}225) +* [JDBC Savepoint behavior](${ghbug}228) +* Writing `SQLXML` via StAX when server encoding is not UTF-8 +* StAX rejecting server encoding if not an IANA-registered encoding +* Error handling when PL/Java startup fails + (may have been [issue 211](${ghbug}211)) +* SPI connection management for certain set-returning functions + +$h3 Updated PostgreSQL APIs tracked + +* Retirement of `dynloader.h` +* Retirement of magical Oids +* Retirement of `nabstime` +* Retirement of `pg_attrdef.adsrc` +* Extensible `TupleTableSlot`s +* `FunctionCallInfoBaseData` + +$h3 Credits + +There is a PL/Java 1.5.3 thanks in part to +Christoph Berg, +Chapman Flack, +`ppKrauss`, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +[GraalVM]: https://www.graalvm.org/ +[OWASP]: https://www.owasp.org/index.php/About_The_Open_Web_Application_Security_Project +[appD31]: https://www.postgresql.org/docs/12/xml-limits-conformance.html + +$h2 PL/Java 1.5.2 (5 November 2018) + +A pure bug-fix release, correcting a regression in 1.5.1 that was not caught +in pre-release testing, and could leave +[conversions between PostgreSQL `date` and `java.sql.Date`](${ghbug}199) off +by one day in certain timezones and times of the year. + +1.5.1 added support for the newer `java.time` classes from JSR 310 / JDBC 4.2, +which are [recommended as superior alternatives](use/datetime.html) to the +older conversions involving `java.sql.Date` and related classes. The new +versions are superior in part because they do not have hidden timezone +dependencies. + +However, the change to the historical `java.sql.Date` conversion behavior was +inadvertent, and is fixed in this release. + +$h3 Open issues with date/time/timestamp conversions + +During preparation of this release, other issues of longer standing were also +uncovered in the legacy conversions between PG `date`, `time`, and +`timestamp` classes and the `java.sql` types. They are detailed in +[issue #200](${ghbug}200). Because they are not regressions but long-established +behavior, they are left untouched in this release, and will be fixed in +a future release. + +The Java 8 `java.time` conversions are free of these issues as well. + +$h2 PL/Java 1.5.1 (17 October 2018) + +This release adds support for PostgreSQL 9.6, 10, and 11, +and plays more nicely with `pg_upgrade`. If a PostgreSQL installation +is to be upgraded using `pg_upgrade`, and is running a version of +PL/Java before 1.5.1, the PL/Java version should first be upgraded +in the running PostgreSQL version, and then the PostgreSQL `pg_upgrade` +can be done. + +The documentation is expanded on the topic of shared-memory precompiled +class cache features, which can substantially improve JVM startup time +and memory footprint, and are now available across Oracle Java, OpenJDK +with Hotspot, and OpenJDK with OpenJ9. When running on OpenJ9, PL/Java +cooperates with the JVM to include even the application's classes +(those loaded with `install_jar`) in the shared cache, something not +yet possible with Hotspot. While the advanced sharing feature in Oracle +Java is still subject to a commercial licensing encumbrance, the equivalent +(or superior, with OpenJ9) features in OpenJDK are not encumbered. + +Significant new functionality includes new datatype mapping support: +SQL `date`, `time`, and `timestamp` values can be mapped to the new +Java classes of the `java.time` package in Java 8 and later (JSR 310 / +JDBC 4.2), which are much more faithful representations of the values +in SQL. Values of `xml` type can be manipulated efficiently using the +JDBC 4.0 `SQLXML` API, supporting several different APIs for XML +processing in Java. + +For Java code that does not use the new date/time classes in the +`java.time` package, some minor conversion inaccuracies (less than +two seconds) in the older mapping to `java.sql.Timestamp` have been +corrected. + +Queries from PL/Java code now produce `ResultSet`s that are usable to the +end of the containing transaction, as they had already been claiming to be. + +With PostgreSQL 9.6 support comes the ability to declare functions +`PARALLEL { UNSAFE | RESTRICTED | SAFE }`, and with PG 10 support, +transition tables are available to triggers. + +$h3 Security + +$h4 Schema-qualification + +PL/Java now more consistently schema-qualifies objects in queries and DDL +it generates internally, as a measure of defense-in-depth in case the database +it is installed in has not been [protected][prot1058] from [CVE-2018-1058][]. + +_No schema-qualification work has been done on the example code._ If the +examples jar will be installed, it should be in a database that +[the recommended steps have been taken to secure][prot1058]. + +$h4 Some large-object code removed + +1.5.1 removes the code at issue in [CVE-2016-0768][], which pertained to +PostgreSQL large objects, but had never been documented or exposed as API. + +This is not expected to break any existing code at all, based on further +review showing the code in question had also been simply broken, since 2006, +with no reported issues in that time. That discovery would support an argument +for downgrading the severity of the reported vulnerability, but with no need +to keep that code around, it is more satisfying to remove it entirely. + +Developers wishing to manipulate large objects in PL/Java are able to do so +using the SPI JDBC interface and the large-object SQL functions already +available in every PostgreSQL version PL/Java currently supports. + +[CVE-2018-1058]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1058 +[prot1058]: https://wiki.postgresql.org/wiki/A_Guide_to_CVE-2018-1058:_Protect_Your_Search_Path#Next_Steps:_How_Can_I_Protect_My_Databases.3F + +$h3 Version compatibility + +PL/Java 1.5.1 can be built against recent PostgreSQL versions including 11, +and older ones back to 8.2, using Java SE 8, 7, or 6. It can _run_ using newer +Java versions including Java 11. PL/Java functions can be written for, and use +features of, the Java version loaded at run time. See +[version compatibility][versions] for more detail. + +OpenJDK is supported, and can be downloaded in versions using the Hotspot or the +OpenJ9 JVM. Features of modern Java VMs can drastically reduce memory footprint +and startup time, in particular class-data sharing. Several choices of Java +runtime now offer such features: Oracle Java has a simple class data sharing +feature for the JVM itself, freely usable in all supported versions, and an +"application class data sharing" feature in Java 8 and later that can also share +the internal classes of PL/Java, but is a commercial feature requiring a +license from Oracle. As of Java 10, the same application class sharing feature +is present in OpenJDK/Hotspot, where it is freely usable without an additional +license. OpenJDK/OpenJ9 includes a different, and very sophisticated, class +sharing feature, freely usable from Java 8 onward. More on these features +can be found [in the installation docs][vmopts]. + +$h3 Changes + +$h4 Typing of parameters in prepared statements + +PL/Java currently does not determine the necessary types of `PreparedStatement` +parameters from the results of PostgreSQL's own type analysis of the query +(as a network client would, when using PostgreSQL's "extended query" protocol). +PostgreSQL added the means to do so in SPI only in PostgreSQL 9.0, and a future +PL/Java major release should use it. However, this release does make two small +changes to the current behavior. + +Without the query analysis results from PostgreSQL, PL/Java tries to type the +prepared-statement parameters based on the types of values supplied by the +application Java code. It now has two additional ways to do so: + +* If Java code supplies a Java user-defined type (UDT)---that is, an object + implementing the `SQLData` interface---PL/Java will now call the `SQLData` + method `getSQLTypeName` on that object and use the result to pin down + the PostgreSQL type of the parameter. Existing code should already provide + this method, but could, in the past, have returned a bogus result without + detection, as PL/Java did not use it. + +* Java code can use the three-argument form of `setNull` to specify the exact + PostgreSQL type for a parameter, and then another method can be used to + supply a non-null value for it. If the following non-null value has + a default mapping to a different PostgreSQL type, in most cases it will + overwrite the type supplied with `setNull` and re-plan the query. That was + PL/Java's existing behavior, and was not changed for this minor release. + However, the new types introduced in this release---the `java.time` types + and `SQLXML`---behave in the way that should become universal in a future + major release: the already-supplied PostgreSQL type will be respected, and + PL/Java will try to find a usable coercion to it. + +$h4 Inaccuracies converting TIMESTAMP and TIMESTAMPTZ + +When converting between PostgreSQL values of `timestamp` or `timestamptz` type +and the pre-Java 8 original JDBC type `java.sql.Timestamp`, there were cases +where values earlier than 1 January 2000 would produce exceptions rather than +converting successfully. Those have been fixed. + +Also, converting in the other direction, from `java.sql.Timestamp` to a +PostgreSQL timestamp, an error of up to 1.998 seconds (averaging 0.999) +could be introduced. + +That error has been corrected. If an application has stored Java `Timestamp`s +and corresponding SQL `timestamp`s generated in the past and requires them +to match, it could be affected by this change. + +$h4 New date/time/timestamp API in Java 8 `java.time` package + +The old, and still default, mappings in JDBC from the SQL `date`, `time`, and +`timestamp` types to `java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp`, +were never well suited to represent the PostgreSQL data types. The `Time` and +`Timestamp` classes were used to map both the with-timezone and without-timezone +variants of the corresponding SQL types and, clearly, could not represent both +equally well. These Java classes all contain timezone dependencies, requiring +the conversion to involve timezone calculations even when converting non-zoned +SQL types, and making the conversion results for non-zoned types implicitly +depend on the current PostgreSQL session timezone setting. + +Applications are strongly encouraged to adopt Java 8 as a minimum language +version and use the new-in-Java-8 types in the `java.time` package, which +eliminate those problems and map the SQL types much more faithfully. +For PL/Java function parameters and returns, the class in the method declaration +can simply be changed. For retrieving date/time/timestamp values from a +`ResultSet` or `SQLInput` object, use the variants of `getObject` / `readObject` +that take a `Class` parameter. The class to use is: + +| PostgreSQL type | `java.time` class | +|--:|:--| +|`date`|`LocalDate`| +|`time without time zone`|`LocalTime`| +|`time with time zone`|`OffsetTime`| +|`timestamp without time zone`|`LocalDateTime`| +|`timestamp with time zone`|`OffsetDateTime`| +[Correspondence of PostgreSQL date/time types and Java 8 `java.time` classes] + +Details on these mappings are [added to the documentation](use/datetime.html). + +$h4 Newly supported `java.sql.SQLXML` type + +PL/Java has not, until now, supported the JDBC 4.0 `SQLXML` type. PL/Java +functions have been able to work with PostgreSQL XML values by mapping them +as Java `String`, but that conversion could introduce character encoding issues +outside the control of the XML APIs, and also has memory implications if an +application stores, or generates in queries, large XML values. Even if the +processing to be done in the application could be structured to run in constant +bounded memory while streaming through the XML, a conversion to `String` +requires the whole, uncompressed, character-serialized value to be brought into +the Java heap at once, and any heap-size tuning has to account for that +worst-case size. The `java.sql.SQLXML` API solves those problems by allowing +XML manipulation with any of several Java XML APIs with the data remaining in +PostgreSQL native memory, never brought fully into the Java heap unless that is +what the application does. Heap sizing can be based on the just the +application's processing needs. + +The `SQLXML` type can take the place of `String` in PL/Java function parameters +and returns simply by changing their declarations from `String` to `SQLXML`. +When retrieving XML values from `ResultSet` or `SQLInput` objects, the legacy +`getObject / readObject` methods will continue to return `String` for existing +application compatibility, so the specific `getSQLXML / readSQLXML` methods, or +the forms of `getObject / readObject` with a `Class` parameter and passing +`SQLXML.class`, must be used. A [documentation page](use/sqlxml.html) has been +added, and the [PassXML example][exxml] illustrates use of the API. + +A [not-built-by-default new example][exsaxon] (because it depends on Java 8 and +the Saxon-HE XML-processing library) provides a partial implementation of true +`XMLQUERY` and `XMLTABLE` functions for PostgreSQL, using the standard-specified +XML Query language rather than the XPath 1.0 of the native PostgreSQL functions. + +[exxml]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/PassXML.html +[exsaxon]: examples/saxon.html + +$h4 New Java property exposes the PostgreSQL server character-set encoding + +A Java system property, `org.postgresql.server.encoding`, is set to the +canonical name of a supported Java `Charset` that corresponds to PostgreSQL's +`server_encoding` setting, if one can be found. If the server encoding's name +is not recognized as any known Java `Charset`, this property will be unset, and +some functionality, such as the `SQLXML` API, may be limited. If a Java +`Charset` does exist (or is made available through a `CharsetProvider`) that +does match the PostgreSQL server encoding, but is not automatically selected +because of a naming mismatch, the `org.postgresql.server.encoding` property can +be set (with a `-D` in `pljava.vmoptions`) to select it by name. + +$h4 ResultSet holdability + +A `ResultSet` obtained from a query done in PL/Java would return the value +`CLOSE_CURSORS_AT_COMMIT` to a caller of its `getHoldability` method, but in +reality would become unusable as soon as the PL/Java function creating it +returned to PostgreSQL. It now remains usable to the end of the transaction, +as claimed. + +$h4 PostgreSQL 9.6 and parallel query + +A function in PL/Java can now be [annotated][apianno] +`parallel={UNSAFE | RESTRICTED | SAFE}`, with `UNSAFE` the default. +A new [user guide section][ugparqry] explains the possibilities and +tradeoffs. (Good cases for marking a PL/Java function `SAFE` may be +rare, as pushing such a function into multiple background processes +will require them all to start JVMs. But if a practical application +arises, PostgreSQL's `parallel_setup_cost` can be tuned to help the +planner make good plans.) + +Although `RESTRICTED` and `SAFE` Java functions work in simple tests, +there has been no exhaustive audit of the code to ensure that PL/Java's +internal workings never violate the behavior constraints on such functions. +The support should be considered experimental, and could be a fruitful +area for beta testing. + +[ugparqry]: use/parallel.html + +$h4 Tuple counts widened to 64 bits with PostgreSQL 9.6 + +To accommodate the possibility of more than two billion tuples in a single +operation, the SPI implementation of the JDBC `Statement` interface now +provides the JDK 8-specified `executeLargeBatch` and `getLargeUpdateCount` +methods defined to return `long` counts. The original `executeBatch` and +`getUpdateCount` methods remain but, obviously, cannot return counts that +exceed `INT_MAX`. In case the count is too large, `getUpdateCount` will throw +an `ArithmeticException`; `executeBatch` will store `SUCCESS_NO_INFO` for +any statement in the batch that affected too many tuples to report. + +For now, a `ResultSetProvider` cannot be used to return more than `INT_MAX` +tuples, but will check that condition and throw an error to ensure predictable +behavior. + +$h4 `pg_upgrade` + +PL/Java should be upgraded to 1.5.1 in a database cluster, before that +cluster is binary-upgraded to a newer PostgreSQL version using `pg_upgrade`. +A new [Upgrading][upgrading] installation-guide section centralizes information +on both upgrading PL/Java in a database, and upgrading a database with PL/Java +in it. + +[upgrading]: install/upgrade.html + +$h4 Suppressing row operations from triggers + +In PostgreSQL, a `BEFORE ROW` trigger is able to allow the proposed row +operation, allow it with modified values, or silently suppress the operation +for that row. Way back in PL/Java 1.1.0, the way to produce the 'suppress' +outcome was for the trigger method to throw an exception. Since PL/Java 1.2.0, +however, an exception thrown in a trigger method is used to signal an error +to PostgreSQL, and there has not been a way to suppress the row operation. + +The `TriggerData` interface now has a [`suppress`][tgsuppress] method that +the trigger can invoke to suppress the operation for the row. + +[tgsuppress]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/TriggerData.html#suppress() + +$h4 Constraint triggers + +New attributes in the `@Trigger` annotation allow the SQL generator to +create constraint triggers (a type of trigger that can be created with SQL +since PostgreSQL 9.1). Such triggers will be delivered by the PL/Java runtime +(to indicate that a constraint would be violated, a constraint trigger +method should throw an informative exception). However, the trigger method +will have access, through the `TriggerData` interface, only to the properties +common to ordinary triggers; methods on that interface to retrieve properties +specific to constraint triggers have not been added for this release. + +$h4 PostgreSQL 10 and trigger transition tables + +A trigger [annotation][apianno] can now specify `tableOld="`_name1_`"` or +`tableNew="`_name2_`"`, or both, and the PL/Java function servicing the +trigger can do SPI JDBC queries and see the transition table(s) under the +given name(s). The [triggers example code][extrig] has been extended with +a demonstration. + +[extrig]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java + +$h4 Logging from Java + +The way the Java logging system has historically been plumbed to PostgreSQL's, +as described in [issue 125](${ghbug}125), can be perplexing both because it +is unaffected by later changes to the PostgreSQL settings after PL/Java is +loaded in the session, and because it has honored only `log_min_messages` +and ignored `client_min_messages`. The second part is easy to fix, so in +1.5.1 the threshold where Java discards messages on the fast path is +determined by the finer of `log_min_messages` and `client_min_messages`. + +$h4 Conveniences for downstream package maintainers + +The `mvn` command to build PL/Java will now accept an option to provide +a useful default for `pljava.libjvm_location`, when building a package for +a particular software environment where the likely path to Java is known. + +The `mvn` command will also accept an option to specify, by the path to +the `pg_config` executable, the PostgreSQL version to build against, in +case multiple versions exist on the build host. This was already possible +by manipulating `PATH` ahead of running `mvn`, but the option makes it more +explicit. + +A new [packaging section][packaging] in the build guide documents those +and a number of considerations for making a PL/Java package. + +[packaging]: build/package.html + +$h3 Enhancement requests addressed + +$h4 In 1.5.1-BETA3 + +* [Add a ddr.reproducible option to SQL generator](${ghbug}186) + +$h4 In 1.5.1-BETA2 + +* [java 8 date/time api](${ghbug}137) +* [Annotations don't support CREATE CONSTRAINT TRIGGER](${ghbug}138) +* [Let annotations give defaults to row-type parameters](${ghpull}153) +* [Improve DDR generator on the dont-repeat-yourself dimension for UDT type mapping](${ghpull}159) +* [Support the JDBC 4.0 SQLXML type](${ghpull}171) + +$h3 Bugs fixed + +$h4 In 1.5.1-BETA3 + +* [self-install jar ClassCastException (...ConsString to String), some java 6/7 runtimes](${ghbug}179) +* [i386 libjvm_location gets mangled as .../jre/lib/1/server/libjvm.so](${ghbug}176) +* [java.lang.ClassNotFoundException installing examples jar](${ghbug}178) +* [Preprocessor errors building on Windows with MSVC](${ghbug}182) +* [Saxon example does not build since Saxon 9.9 released](${ghbug}185) +* [Segfault in VarlenaWrapper.Input on 32-bit](${ghbug}177) +* [Windows: self-install jar silently fails to replace existing files](${ghbug}189) +* [ERROR: java.sql.SQLException: _some Java class name_](${ghbug}192) +* [SetOfRecordTest with timestamp column influenced by environment ](${ghbug}195) + +$h4 In 1.5.1-BETA2 + +* [PostgreSQL 10: SPI_modifytuple failed with SPI_ERROR_UNCONNECTED](${ghbug}134) +* [SPIConnection prepareStatement doesn't recognize all parameters](${ghbug}136) +* [Ordinary (non-constraint) trigger has no way to suppress operation](${ghbug}142) +* [ResultSetHandle and column definition lists](${ghbug}146) +* [PreparedStatement doesn't get parameter types from PostgreSQL](${ghbug}149) + _(partial improvements)_ +* [internal JDBC: inaccuracies converting TIMESTAMP and TIMESTAMPTZ](${ghbug}155) +* [Missing type mapping for Java return `byte[]`](${ghbug}157) +* [The REMOVE section of DDR is in wrong order for conditionals](${ghbug}163) +* [Loading PL/Java reinitializes timeouts in PostgreSQL >= 9.3](${ghbug}166) +* [JDBC ResultSet.CLOSE_CURSORS_AT_COMMIT reported, but usable life shorter](${ghbug}168) + +$h4 In 1.5.1-BETA1 + +* [Add support for PostgreSQL 9.6](${ghbug}108) +* [Clarify documentation of ResultSetProvider](${ghbug}115) +* [`pg_upgrade` (upgrade failure from 9.5 to 9.6)](${ghbug}117) +* [Java logging should honor `client_min_messages` too](${ghbug}125) + +$h3 Updated PostgreSQL APIs tracked + +* `heap_form_tuple` +* 64-bit `SPI_processed` +* 64-bit `Portal->portalPos` +* 64-bit `FuncCallContext.call_cntr` +* 64-bit `SPITupleTable.alloced` and `.free` +* `IsBackgroundWorker` +* `IsBinaryUpgrade` +* `SPI_register_trigger_data` +* `SPI` without `SPI_push`/`SPI_pop` +* `AllocSetContextCreate` +* `DefineCustom...Variable` (no `GUC_LIST_QUOTE` in extensions) + +$h3 Credits + +There is a PL/Java 1.5.1 thanks in part to +Christoph Berg, +Thom Brown, +Luca Ferrari, +Chapman Flack, +Petr Michalek, +Steve Millington, +Kenneth Olson, +Fabian Zeindl, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +$h2 PL/Java 1.5.0 (29 March 2016) + +This, the first PL/Java numbered release since 1.4.3 in 2011, combines +compatibility with the latest PostgreSQL and Java versions with modernized +build and installation procedures, automatic generation of SQL deployment +code from Java annotations, and many significant fixes. + +$h3 Security + +Several security issues are addressed in this release. Sites already +using PL/Java are encouraged to update to 1.5.0. For several of the +issues below, practical measures are described to mitigate risk until +an update can be completed. + +[CVE-2016-0766][], a privilege escalation requiring an authenticated +PostgreSQL connection, is closed by installing PL/Java 1.5.0 (including +prereleases) or by updating PostgreSQL itself to at least 9.5.1, 9.4.6, +9.3.11, 9.2.15, 9.1.20. Vulnerable systems are only those running both +an older PL/Java and an older PostgreSQL. + +[CVE-2016-0767][], in which an authenticated PostgreSQL user with USAGE +permission on the `public` schema may alter the `public` schema classpath, +is closed by release 1.5.0 (including prereleases). If updating to 1.5.0 +must be delayed, risk can be mitigated by revoking public `EXECUTE` permission +on `sqlj.set_classpath` and granting it selectively to responsible users or +roles. + +This release brings a policy change to a more secure-by-default posture, +where the ability to create functions in `LANGUAGE java` is no longer +automatically granted to `public`, but can be selectively granted to roles +that will have that responsibility. The change reduces exposure to a known +issue present in 1.5.0 and earlier versions, that will be closed in a future +release ([CVE-2016-0768][], see **large objects, access control** below). + +The new policy will be applied in a new installation; permissions will not +be changed in an upgrade, but any site can move to this policy, even before +updating to 1.5.0, with `REVOKE USAGE ON LANGUAGE java FROM public;` followed by +explicit `GRANT` commands for the users/roles expected to create Java +functions. + +[CVE-2016-2192][], in which an authenticated user can alter type mappings +without owning the types involved. Exploitability is limited by other +permissions, but if type mapping is a feature being used at a site, one +can interfere with proper operation of code that relies on it. A mitigation +is simply to `REVOKE EXECUTE ... FROM PUBLIC` on the `sqlj.add_type_mapping` +and `sqlj.drop_type_mapping` functions, and grant the privilege only to +selected users or roles. As of 1.5.0, these functions require the invoker +to be superuser or own the type being mapped. + +[CVE-2016-0766]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0766 +[CVE-2016-0767]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0767 +[CVE-2016-0768]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0768 +[CVE-2016-2192]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2192 + +$h3 Version compatibility + +PL/Java 1.5.0 can be built against recent PostgreSQL versions including 9.5, +using Java SE 8, 7, or 6. See [version compatibility][versions] for more +detail. OpenJDK is well supported. Support for GCJ has been dropped; features +of modern Java VMs that are useful to minimize footprint and startup time, +such as class-data sharing, are now more deeply covered +[in the installation docs][vmopts]. + +[versions]: build/versions.html +[vmopts]: install/vmoptions.html + +$h3 Build procedures + +Since 2013, PL/Java has been hosted [on GitHub][ghpljava] and built +using [Apache Maven][mvn]. See the new [build instructions][bld] for details. + +Reported build issues for specific platforms have been resolved, +with new platform-specific build documentation +for [OS X][osxbld], [Solaris][solbld], [Ubuntu][ububld], +[Windows MSVC][msvcbld], and [Windows MinGW-w64][mgwbld]. + +The build produces a redistributable installation archive usable with +the version of PostgreSQL built against and the same operating system, +architecture, and linker. The type of archive is `jar` on all platforms, as +all PL/Java installations will have Java available. + +[ghpljava]: https://github.com/tada/pljava +[mvn]: http://maven.apache.org/ +[bld]: build/build.html +[msvcbld]: build/buildmsvc.html +[mgwbld]: build/mingw64.html +[osxbld]: build/macosx.html +[solbld]: build/solaris.html +[ububld]: build/ubuntu.html + +$h3 Installation procedures + +The jar produced by the build is executable and will self-extract, +consulting `pg_config` on the destination system to find the correct +default locations for the extracted files. Any location can be overridden. +(Enhancement requests [6][gh6], [9][gh9]) + +PL/Java now uses a PostgreSQL configuration variable, `pljava.libjvm_location`, +to find the Java runtime to use, eliminating the past need for highly +platform-specific tricks like link-time options or runtime-loader configuration +just so that PL/Java could find Java. PostgreSQL configuration variables are +now the only form of configuration needed for PL/Java, and the `libjvm_location` +should be the only setting needed if file locations have not been overridden. + +In PostgreSQL 9.1 and later, PL/Java can be installed with +`CREATE EXTENSION pljava`. Regardless of PostgreSQL version, installation +has been simplified. Former procedures involving `Deployer` or `install.sql` +are no longer required. Details are in the [new installation instructions][ins]. + +$h4 Schema migration + +The tables used internally by PL/Java have changed. If PL/Java 1.5.0 is +loaded in a database with an existing `sqlj` schema populated by an earlier +PL/Java version (1.3.0 or later), the structure will be updated without data +loss (enhancement request [12][gh12]). *Remember that PL/Java runs independently +in each database session where it is in use. Older PL/Java versions active in +other sessions can be disrupted by the schema change.* + +A trial installation of PL/Java 1.5.0 can be done in a transaction, and +rolled back if desired, leaving the schema as it was. Any concurrent sessions +with active older PL/Java versions will not be disrupted by the altered schema +as long as the transaction remains open, *but they may block for the duration, +so such a test transaction should be kept short*. + +[ins]: install/install.html + +$h3 Changes + +$h4 Behavior of `readSQL` and `writeSQL` for base and mirror user-defined types + +In the course of fixing [issue #98][gh98], the actual behavior of +`readSQL` and `writeSQL` with base or mirror types, which had not +previously been documented, [now is](develop/coercion.html), along with +other details of PL/Java's type coercion rules found only in the code. +Because machine byte order was responsible for issue #98, it now (a) is +selectable, and (b) has different, appropriate, defaults for mirror UDTs +(which need to match PostgreSQL's order) and for base UDTs (which must +stay big-endian because of how binary `COPY` is specified). +A [new documentation section](use/byteorder.html) explains in detail. + +$h4 `USAGE` to `PUBLIC` no longer default for `java` language + +Of the two languages installed by PL/Java, functions that declare +`LANGUAGE javau` can be created only by superusers, while those that +declare `LANGUAGE java` can be created by any user or role granted the +`USAGE` privilege on the language. + +In the past, the language `java` has been created with PostgreSQL's +default permission granting `USAGE` to `PUBLIC`, but PL/Java 1.5.0 +leaves the permission to be explicitly granted to those users or roles +expected to create Java functions, in keeping with least-privilege +principles. See **large objects, access control** under **known issues** +for background. + +$h4 SQL generated by Java annotations + +Java code developed for use by PL/Java can carry in-code annotations, +used by the Java compiler to generate the SQL commands to declare the +new functions, types, triggers, etc. in PostgreSQL (enhancement request +[1011112][], though different in implementation). This eliminates the need +to have Java code and the corresponding SQL commands developed in parallel, +and the class of errors possible when both are not updated together. It +also allows compile-time checks that the Java methods or classes being +annotated are suitable (correct access modifiers, signatures, etc.) +for their declared SQL purposes, rather than discovering +such issues only upon loading the code into PostgreSQL and trying to use it. + +The Java compiler writes the generated SQL into a "deployment descriptor" +file (`pljava.ddr` by default), as specified by the SQL/JRT standard. The +file can be included in a `jar` archive with the compiled code, and the +commands will be executed by PL/Java when the `install_jar` function is +used to load the jar. + +SQL generation is covered in the [updated user documentation][user], +and illustrated in the [Hello, World example][hello] and +[several other supplied examples][exanno]. Reference information +is [in the API documentation][apianno]. It is currently usable to declare +functions, triggers, and user-defined types, both base and composite. + +[user]: use/use.html +[hello]: use/hello.html +[exanno]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation +[apianno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#package-description + +The history of this feature in PL/Java is long, with the first related commits +appearing in 2005, six years in advance of an enhancement request for it. +It became generally usable in 2013 when building with +Java SE 6 or later, using the annotation processing framework Java introduced +in that release. 1.5.0 is the first PL/Java numbered release to feature it. + +$h5 Annotation keyword changes + +If you have been using the SQL generation feature in prerelease `git` builds of +2013 or later, be aware that some annotation keywords have changed in finalizing +the 1.5.0 release. Java code that was compiled using the earlier keywords will +continue to work, but will have to be updated before it can be recompiled. + +* For functions: `effects=(VOLATILE,STABLE,IMMUTABLE)` was formerly `type=` +* For functions: `type=` (_an explicit SQL return type for the function_) + was formerly `complexType=` +* For functions: `trust=(SANDBOXED,UNSANDBOXED)` was formerly + `(RESTRICTED,UNRESTRICTED)` +* For triggers: `called=(BEFORE,AFTER,INSTEAD_OF)` was formerly `when=` + and conflicted with the `WHEN` clause introduced for triggers + in PostgreSQL 9.0. + +$h4 A jar may have more than one deployment descriptor + +PL/Java formerly allowed only one entry in a jar to be a deployment +descriptor (that is, a file of SQL commands to be executed upon loading +or unloading the jar). The SQL/JRT standard allows multiple entries to +be deployment descriptors, executed in the order they are mentioned +_in the jar manifest_, or the reverse of that order when the jar is +being unloaded. PL/Java now conforms to the standard. + +The behavior is useful during transition to annotation-driven deployment +descriptor generation for a project that already has a manually-maintained +deployment descriptor. PL/Java's own `pljava-examples` project is an +illustration, in the midst of such a transition itself. + +Note the significance placed by SQL/JRT on the order of entries in a jar +manifest, whose order is normally _not_ significant according to the Jar File +Specification. Care can be needed when manipulating manifests with automated +tools that may not preserve order. + +$h4 Conditional execution within deployment descriptors + +Deployment descriptors have a primitive conditional-execution provision +defined in the SQL/JRT standard: commands wrapped in a +`BEGIN IMPLEMENTOR ` _identifier_ construct will only be executed if the +_identifier_ is recognized by the SQL/JRT implementation in use. The design +makes possible jars that can be installed on different database systems that +provide SQL/JRT features, with database-specific commands wrapped in +`BEGIN IMPLEMENTOR` blocks with an _identifier_ specific to the system. +By default, PL/Java recognizes the _identifier_ `postgresql` (matched without +regard to case). + +PL/Java extends the standard by allowing the PostgreSQL configuration +variable `pljava.implementors` to contain a list of identifiers that will +be recognized. SQL code in a deployment descriptor can conditionally add +or remove identifiers in this list to influence which subsequent implementor +blocks will be executed, giving a still-primitive but more general control +structure. + +In sufficiently recent PostgreSQL versions, the same effect could be +achieved using `DO` statements and PL/pgSQL control structures, but this +facility in PL/Java does not require either to be available. + +$h4 Interaction with `SET ROLE` corrected + +PL/Java formerly was aware of the user ID associated with the running +session, but not any role ID that user may have acquired with `SET ROLE`. +The result would commonly be failed permission checks made by PL/Java when +the session user did not have the needed permission, but had `SET ROLE` to +a role that did. Likewise, within `install_jar`, PL/Java would execute +deployment descriptor commands as the original session user rather than +as the user's current role, with permission failures a likely result. + +Correcting this issue has changed the PL/Java API, but without a bump +of major version because the prior API, while deprecated, is still available. + +* [`getOuterUserName`][goun] and [`executeAsOuterUser`][eaou] are new, and + correctly refer to the session user or current role, when active. +* [`getSessionUserName`][gsun] and [`executeAsSessionUser`][easu] are still + present but deprecated, and _their semantics are changed_. They are now + deprecated aliases for the corresponding new methods, which honor the + set role. Use cases that genuinely need to refer only to the _session_ user + and ignore the role should be rare, and should be discussed on the mailing + list or opened as issues. + +#set($sessapi = 'pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Session.html#') + +[goun]: ${sessapi}getOuterUserName() +[eaou]: ${sessapi}executeAsOuterUser(java.sql.Connection,java.lang.String) +[gsun]: ${sessapi}getSessionUserName() +[easu]: ${sessapi}executeAsSessionUser(java.sql.Connection,java.lang.String) + +$h4 Unicode transparency + +Since the resolution of [bug 21][gh21], PL/Java contains a regression test +to ensure that character strings passed and returned between PostgreSQL and +Java will round-trip without alteration for the full range of Unicode +characters, _when the database encoding is set to `UTF8`_. + +More considerations apply when the database encoding is anything other +than `UTF8`, and especially when it is `SQL_ASCII`. Please see +[character encoding support][charsets] for more. + +[charsets]: use/charsets.html + +$h3 Enhancement requests addressed + +* [Use Annotations instead of DDL Manifest][1011112] +* [Installation of pljava on postgresql servers][gh9] +* [Find an alternative way to install the pljava.so in `/usr/lib`][gh6] +* [Provide database migration][gh12] +* [Support types with type modifiers][1011140] (partial: see [example][typmex]) +* [Build process: accommodate Solaris 10][gh102] + +[1011112]: ${pgffeat}1011112 +[1011140]: ${pgffeat}1011140 +[gh9]: ${ghbug}9 +[gh6]: ${ghbug}6 +[gh12]: ${ghbug}12 +[gh102]: ${ghbug}102 + +[typmex]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java + +$h3 Bugs fixed + +$h4 Since 1.5.0-BETA3 + +* [Build process: accept variation in PostgreSQL version string][gh101] +* [Build process: accommodate PostgreSQL built with private libraries][gh103] +* Clarified message when `CREATE EXTENSION` fails because new session needed +* Reduced stack usage in SQL generator + (small-memory build no longer needs `-Xss`) + +$h4 In 1.5.0-BETA3 + +* [Bogus mirror-UDT values on little-endian hardware][gh98] +* [Base UDT not registered if first access isn't in/out/send/recv][gh99] +* `TupleDesc` leak warnings with composite UDTs +* also added regression test from [1010962][] report + +$h4 In 1.5.0-BETA2 + +* [Generate SQL for trigger function with no parameters][gh92] +* [openssl/ssl.h needed on osx el-capitan (latest 10.11.3)/postgres 9.5][gh94] + (documented) +* [Source location missing for some annotation errors][gh95] +* [OS X El Capitan "Java 6" dialog when loading ... Java 8][gh96] +* pljava-api jar missing from installation jar + +$h4 In 1.5.0-BETA1 + +* [SPIPreparedStatement.setObject() fails with Types.BIT][1011119] +* [SSLSocketFactory throws IOException on Linux][1011095] +* [PL/Java fails to compile with -Werror=format-security][1011181] +* [PL/Java does not build on POWER 7][1011197] +* [The built in functions do not use the correct error codes][1011206] +* [TupleDesc reference leak][1010962] +* [String conversion to enum fails][gh4] +* [segfault if SETOF RECORD-returning function used without AS at callsite][gh7] +* [pl/java PG9.3 Issue][gh17] +* [No-arg functions unusable: "To many parameters - expected 0"][gh8] +* [Exceptions in static initializers are masked][gh54] +* [UDT in varlena form breaks if length > 32767][gh52] +* [PL/Java kills unicode?][gh21] +* [Type.c expects pre-8.3 find_coercion_pathway behavior][gh65] +* [Support PostgreSQL 9.5][gh48] +* [pl/java getting a build on MacOSX - PostgreSQL 9.3.2][gh22] +* [build pljava on windows for PostgreSQL 9.2][gh23] +* [Error while installing PL/Java with Postgresql 9.3.4 64 bit on Windows 7 64 bit System][gh28] +* [pljava does not compile on mac osx ver 10.11.1 and postgres 9.4][gh63] +* [pljava does not compile on centos 6.5 and postgres 9.4][gh64] +* [Error installing pljava with Windows 7 64 Bit and Postgres 9.4][gh71] +## JNI_getIntArrayRegion instead of JNI_getShortArrayRegion +## Eclipse IDE artifacts +## Site +## Warnings +## Javadoc + +[1011119]: ${pgfbug}1011119 +[1011095]: ${pgfbug}1011095 +[1011181]: ${pgfbug}1011181 +[1011197]: ${pgfbug}1011197 +[1011206]: ${pgfbug}1011206 +[1010962]: ${pgfbug}1010962 +[gh4]: ${ghbug}4 +[gh7]: ${ghbug}7 +[gh8]: ${ghbug}8 +[gh17]: ${ghbug}17 +[gh54]: ${ghbug}54 +[gh52]: ${ghbug}52 +[gh21]: ${ghbug}21 +[gh65]: ${ghbug}65 +[gh48]: ${ghbug}48 +[gh22]: ${ghbug}22 +[gh23]: ${ghbug}23 +[gh28]: ${ghbug}28 +[gh63]: ${ghbug}63 +[gh64]: ${ghbug}64 +[gh71]: ${ghbug}71 +[gh92]: ${ghbug}92 +[gh94]: ${ghbug}94 +[gh95]: ${ghbug}95 +[gh96]: ${ghbug}96 +[gh98]: ${ghbug}98 +[gh99]: ${ghbug}99 +[gh101]: ${ghbug}101 +[gh103]: ${ghbug}103 + +$h3 Updated PostgreSQL APIs tracked + +Several APIs within PostgreSQL itself have been added or changed; +PL/Java now uses the current versions of these where appropriate: + +* `find_coercion_pathway` +* `set_stack_base` +* `GetOuterUserId` +* `GetUserNameFromId` +* `GetUserIdAndSecContext` +* `pg_attribute_*` +* Large objects: truncate, and 64-bit offsets + +$h3 Known issues and areas for future work + +$h4 Developments in PostgreSQL not yet covered + +Large objects, access control +: PL/Java does not yet expose PostgreSQL large objects with a documented, + stable API, and the support it does contain was developed against pre-9.0 + PostgreSQL versions, where no access control applied to large objects and + any object could be accessed by any database user. PL/Java's behavior is + proper for PostgreSQL before 9.0, but improper on 9.0+ where it would be + expected to honor access controls on large objects ([CVE-2016-0768][]). + This will be corrected in a future release. For this and earlier releases, + the recommendation is to selectively grant `USAGE` on the `java` language to + specific users or roles responsible for creating Java functions; see + "default `USAGE` permssion" under Changes. + +`INSTEAD OF` triggers, triggers on `TRUNCATE` +: These are supported by annotations and the SQL generator, and the runtime + will deliver them to the specified method, but the `TriggerData` interface + has no new methods to recognize these cases (that is, no added + methods analogous to `isFiredAfter`, `isFiredByDelete`). For a method + expressly coded to be a `TRUNCATE` trigger or an `INSTEAD OF` trigger, + that is not a problem, but care should be taken when coding a trigger + method to handle more than one type of trigger, or creating triggers of + these new types that call a method developed pre-PL/Java-1.5.0. Such a + method could be called with a `TriggerData` argument whose existing + `isFired...` methods all return `false`, likely to put the method on an + unexpected code path. + + A later PL/Java version should introduce trigger interfaces that better + support such evolution of PostgreSQL in a type-safe way. + +Constraint triggers +: Constraint trigger syntax is not supported by annotations and the SQL + generator. If declared (using hand-written SQL), they will be delivered + by the runtime, but without any constraint-trigger-specific information + available to the called method. + +Event triggers +: Event triggers are not yet supported by annotations or the SQL generator, + and will not be delivered by the PL/Java runtime. + +Range types +: No predefined mappings for range types are provided. + +`PRE_PREPARE`, `PRE_COMMIT`, `PARALLEL_ABORT`, `PARALLEL_PRE_COMMIT`, and `PARALLEL_COMMIT` transaction callbacks, `PRE_COMMIT` subtransaction callbacks +: Listeners for these events cannot be registered and the events will not + be delivered. + +$h4 Imperfect integration with PostgreSQL dependency tracking + +In a dump/restore, manual intervention can be needed if the users/roles +recorded as owners of jars are missing or have been renamed. A current +[thread on `pgsql-hackers`][ownhack] should yield a better solution for +a future release. + +[ownhack]: http://www.postgresql.org/message-id/56783412.6090005@anastigmatix.net + +$h4 Quirk if deployment descriptor loads classes from same jar + +The `install_jar` function installs a jar, optionally reading deployment +descriptors from the jar and executing the install actions they contain. +It is possible for those actions to load classes from the jar just installed. +(This would be unlikely if the install actions are limited to typical setup, +function/operator/datatype creation, but likely, if the install actions also +include basic function tests, or if the setup requirements are more +interesting.) + +If, for any class in the jar, the first attempt to load that class is made +while resolving a function declared `STABLE` or `IMMUTABLE`, a +`ClassNotFoundException` results. The cause is PostgreSQL's normal treatment of +a `STABLE` or `IMMUTABLE` function, which relies on a snapshot from the start of +the `install_jar` query, when the jar was not yet installed. A workaround is to +ensure that the install actions cause each needed class to be loaded, such as +by calling a `VOLATILE` function it supplies, before calling one that is +`STABLE` or `IMMUTABLE`. (One could even write install actions to declare a +needed function `VOLATILE` before the first call and then redeclare it.) + +This issue should be resolved as part of a broader rework of class loading +in a future PL/Java release. + +$h4 Partial implementation of JDBC 4 and later + +The changes to make PL/Java build under Java SE 6 and later, with version 4.0 +and later of JDBC, involved providing the specified methods so +compilation would succeed, with real implementations for some, but for others +only stub versions that throw `SQLFeatureNotSupportedException` if used. +Regrettably, there is nothing in the documentation indicating which methods +have real implementations and which do not; to create such a list would require +an audit of that code. If a method throws the exception when you call it, it's +one of the unimplemented ones. + +Individual methods may be fleshed out with implementations as use cases arise +that demand them, but for a long-term roadmap, it seems more promising to +reduce the overhead of maintaining another JDBC implementation by sharing +code with `pgjdbc`, as has been [discussed on pljava-dev][jdbcinherit]. + +[jdbcinherit]: http://lists.pgfoundry.org/pipermail/pljava-dev/2015/002370.html + +$h4 Exception handling and logging + +PL/Java does interconvert between PostgreSQL and Java exceptions, but with +some loss of fidelity in the two directions. PL/Java code has some access +to most fields of a PostgreSQL error data structure, but only through +internal PL/Java API that is not expected to remain usable, and code written +for PL/Java has never quite had first-class standing in its ability to +_generate_ exceptions as information-rich as those from PostgreSQL itself. + +PL/Java in some cases generates the _categorized `SQLException`s_ introduced +with JDBC 4.0, and in other cases does not. + +This area may see considerable change in a future release. +[Thoughts on logging][tol] is a preview of some of the considerations. + +[tol]: https://github.com/tada/pljava/wiki/Thoughts-on-logging + +$h4 Types with type modifiers and `COPY` + +Although it is possible to create a PL/Java user-defined type that accepts +a type modifier (see the [example][typmex]), such a type will not yet be +handled by SQL `COPY` or any other operation that requires the `input` or +`receive` function to handle the modifier. This is left for a future release. + +$h3 Credits + +PL/Java 1.5.0 owes its being to original creator Thomas Hallgren and +many contributors: + +Daniel Blanch Bataller, +Peter Brewer, +Frank Broda, +Chapman Flack, +Marty Frasier, +Bear Giles, +Christian Hammers, +Hal Hildebrand, +Robert M. Lefkowitz, +Eugenie V. Lyzenko, +Dos Moonen, +Asif Naeem, +Kenneth Olson, +Johann Oskarsson, +Thomas G. Peters, +Srivatsan Ramanujam, +Igal Sapir, +Jeff Shaw, +Rakesh Vidyadharan, +`grunjol`, +`mc-soi`. + +Periods in PL/Java's development have been sponsored by EnterpriseDB. + +In the 1.5.0 release cycle, multiple iterations of testing effort +have been generously contributed by Kilobe Systems and by Pegasystems, Inc. + +## From this point on, the entries were reconstructed from old notes at the +## same time as the 1.5.0 notes were drafted, and they use a finer level of +## heading. So restore the 'real' values of the heading variables from here +## to the end of the file. +#set($h2 = '##') +#set($h3 = '###') +#set($h4 = '####') +#set($h5 = '#####') + +$h3 PL/Java 1.4.3 (15 September 2011) + +Notable changes in this release: + +* Works with PostgreSQL 9.1 +* Correctly links against IBM Java. +* Reads microseconds correctly in timestamps. + +Bugs fixed: + +* [Be clear about not building with JDK 1.6][1010660] +* [Does not link with IBM VM][1010970] +* [SPIConnection.getMetaData() is incorrectly documented][1010971] +* [PL/Java 1.4.2 Does not build with x86_64-w64-mingw32][1011025] +* [PL/Java does not build with PostgreSQL 9.1][1011091] + +Feature Requests: + +* [Allow pg_config to be set externally to the Makefile][1011092] +* [Add option to have pljava.so built with the runtime path of libjvm.so][1010955] + +[1010660]: ${pgfbug}1010660 +[1010970]: ${pgfbug}1010970 +[1010971]: ${pgfbug}1010971 +[1011025]: ${pgfbug}1011025 +[1011091]: ${pgfbug}1011091 + +[1011092]: ${pgffeat}1011092 +[1010955]: ${pgffeat}1010955 + +$h3 PL/Java 1.4.2 (11 December 2010) + +Bugfixes: + +* [Function returning complex objects with POD arrays cause a segfault][1010956] +* [Segfault when assigning an array to ResultSet column][1010953] +* [Embedded array support in returned complex objects][1010482] + +[1010956]: ${pgfbug}1010956 +[1010953]: ${pgfbug}1010953 +[1010482]: ${pgfbug}1010482 + +$h3 PL/Java 1.4.1 (9 December 2010) + +Note: Does not compile with Java 6. Use JDK 1.5 or 1.4. + +Compiles with PostgreSQL 8.4 and 9.0. + +Connection.getCatalog() has been implemented. + +Bugfixes: + +* [Compiling error with postgresql 8.4.1][1010759] +* [org.postgresql.pljava.internal.Portal leak][1010712] +* [build java code with debugging if server has debugging enabled][1010189] +* [Connection.getCatalog() returns null][1010653] +* [VM crash in TransactionListener][1010462] +* [Link against wrong library when compiling amd64 code on Solaris][1010954] + +[1010759]: ${pgfbug}1010759 +[1010712]: ${pgfbug}1010712 +[1010189]: ${pgfbug}1010189 +[1010653]: ${pgfbug}1010653 +[1010462]: ${pgfbug}1010462 +[1010954]: ${pgfbug}1010954 + +Other commits: + +For a multi-threaded pljava function we need to adjust stack_base_ptr +before calling into the backend to avoid stack depth limit exceeded +errors. Previously this was done only on query execution, but we need +to do it on iteration of the ResultSet as well. + +When creating a variable length data type, the code was directly +assigning the varlena header length rather than going through an +access macro. The header format changed for the 8.3 release and this +manual coding was not noticed and changed accordingly. Use +SET_VARSIZE to do this correctly. + +Handle passed by value data types by reading and writing directly to +the Datum rather than dereferencing it. + +If the call to a type output function is the first pljava call in a +session, we get a crash. The first pljava call results in a SPI +connection being established and torn down. The type output function +was allocating the result in the SPI memory context which gets +destroyed prior to returning the data to the caller. Allocate the +result in the correct context to survive function exit. + +Clean up a warning about byteasend and bytearecv not having a +prototype when building against 9.0 as those declarations are now in a +new header file. + + +$h3 PL/Java 1.4.0 (1 February 2008) + +Warning! The recent postgresql security releases changed the API of a function +that PL/Java uses. The source can be built against either version, but the +binaries will only run against the version they were built against. The PL/Java +binaries for 1.4.0 have all been built against the latest server releases (which +you should be using anyway). If you are using an older you will have to build +from source. The binary releases support: 8.3 - All versions. 8.2 - 8.2.6 and +up. 8.1 - 8.1.11 and up. 8.0 - 8.0.15 and up. + +$h3 PL/Java 1.3.0 (18 June 2006) + +This release is about type mapping and the creation of new types in PL/Java. An +extensive effort has gone into making the PL/Java type system extremely +flexible. Not only can you map arbitrary SQL data types to java classes. You can +also create new scalar types completely in Java. Read about the Changes in +version 1.3. + +$h4 Changes + +* A much improved type mapping system that will allow you to: + + * [Map any SQL type to a Java class][maptype] + * [Create a Scalar UDT in Java][scalarudt] + * [Map array and pseudo types][deftypemap] + +[maptype]: https://github.com/tada/pljava/wiki/Mapping-an-sql-type-to-a-java-class +[scalarudt]: https://github.com/tada/pljava/wiki/Creating-a-scalar-udt-in-java +[deftypemap]: https://github.com/tada/pljava/wiki/Default-type-mapping + +* Get the OID for a given relation ([feature request 1319][1319]) +* Jar manifest included in the SQLJ Jar repository + ([feature request 1525][1525]) + +$h4 Fixed bugs + +* [Reconnect needed for jar manipulation to take effect][1531] +* [Backends hang with test suite][1504] +* [Keeps crashing while making a call to a function][1560] +* [Memory Leak in Statement.executeUpdate][1556] +* [jarowner incorrect after dump and reload][1506] +* [Missing JAR manifest][1525] +* [TZ adjustments for Date are incorrect][1547] +* [Functions returning sets leaks memory][1542] +* [drop lib prefix][1423] +* ["oid" column is not available in trigger's NEW/OLD ResultSet][1317] +* [fails to run with GCJ, too][1480] +* [Compile failure with 8.1.4][1558] +* [fails to build with GCJ][1479] +* [Record returning function cannot be called with different structures within one session][1440] +* [Cannot map function with complex return type to method that uses non primitive arguments][1551] +* [Get OID for given relation][1319] + +[1531]: ${gborgbug}1531 +[1504]: ${gborgbug}1504 +[1560]: ${gborgbug}1560 +[1556]: ${gborgbug}1556 +[1506]: ${gborgbug}1506 +[1525]: ${gborgbug}1525 +[1547]: ${gborgbug}1547 +[1542]: ${gborgbug}1542 +[1423]: ${gborgbug}1423 +[1317]: ${gborgbug}1317 +[1480]: ${gborgbug}1480 +[1558]: ${gborgbug}1558 +[1479]: ${gborgbug}1479 +[1440]: ${gborgbug}1440 +[1551]: ${gborgbug}1551 +[1319]: ${gborgbug}1319 + +$h3 PL/Java 1.2.0 (20 Nov 2005) + +The PL/Java 1.2.0 release is primarily targeted at the new PostgreSQL 8.1 but +full support for 8.0.x is maintained. New features include support IN/OUT +parameters, improved meta-data handling, and better memory management. + +$h3 PL/Java 1.1.0 (14 Apr 2005) + +PL/Java 1.1.0 includes a lot of new features such as `DatabaseMetaData`, +`ResultSetMetaData`, language handlers for both trusted and untrusted language, +additional semantics for functions returning `SETOF`, and simple ObjectPooling. + +$h3 PL/Java 1.0.1 (07 Feb 2005) + +This release resolves a couple of important security issues. The most important +one is perhaps that PL/Java now is a trusted language. See [Security][] for more +info. Filip Hrbek, now member of the PL/Java project, contributed what was +needed to make this happen. + +[Security]: https://github.com/tada/pljava/wiki/Security + +$h3 PL/Java 1.0.0 (23 Jan 2005) + +Today, after a long period of fine tuning, PL/Java 1.0.0 was finally released. diff --git a/src/site/markdown/releasenotes.md.vm b/src/site/markdown/releasenotes.md.vm index d30ee634..e80c13af 100644 --- a/src/site/markdown/releasenotes.md.vm +++ b/src/site/markdown/releasenotes.md.vm @@ -5,929 +5,1026 @@ #set($h4 = '####') #set($h5 = '#####') #set($gborgbug = 'http://web.archive.org/web/20061208113236/http://gborg.postgresql.org/project/pljava/bugs/bugupdate.php?') -#set($pgfbug = 'http://pgfoundry.org/tracker/?func=detail&atid=334&group_id=1000038&aid=') -#set($pgffeat = 'http://pgfoundry.org/tracker/?func=detail&atid=337&group_id=1000038&aid=') +#set($pgfbug = 'https://web.archive.org/web/*/http://pgfoundry.org/tracker/?func=detail&atid=334&group_id=1000038&aid=') +#set($pgffeat = 'https://web.archive.org/web/*/http://pgfoundry.org/tracker/?func=detail&atid=337&group_id=1000038&aid=') #set($ghbug = 'https://github.com/tada/pljava/issues/') +#set($ghpull = 'https://github.com/tada/pljava/pull/') -$h2 PL/Java 1.5.1 +$h2 PL/Java 1.6.10 -This release chiefly adds support for PostgreSQL 9.6 and 10, -and plays more nicely with `pg_upgrade`. With PostgreSQL 9.6 support -comes the ability to declare functions -`PARALLEL { UNSAFE | RESTRICTED | SAFE }`, and with PG 10 support, -transition tables are available to triggers. +This is the tenth minor update in the PL/Java 1.6 series. It adds support +for building and running with Java 25 and with PostgreSQL 18, and improvements +to exception handling and logging that should be helpful in debugging the common +developer mistake of catching a PostgreSQL exception and proceeding without +rolling back to a prior savepoint. Further information on the changes +may be found below. -$h3 Security +When run on Java 24 or later, there is no security policy enforcement; please +review the release notes below for PL/Java 1.6.9 for more detail on that. -1.5.1 removes the code at issue in [CVE-2016-0768][], which pertained to -PostgreSQL large objects, but had never been documented or exposed as API. +$h3 Version compatibility + +PL/Java 1.6.10 can be built against recent PostgreSQL versions including 18, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later, but for security policy enforcement the +runtime Java version must be 23 or earlier. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +$h3 Changes + +$h4 Runtime + +$h5 Exception-handling and logging improvements for mishandled exceptions + +When a PL/Java function calls back into PostgreSQL, an exception may be thrown. +At that point, there are two things a PL/Java function may legitimately do: +it may re-throw the exception (or throw some other exception), so that the +PL/Java function returns exceptionally and PostgreSQL cleans up the transaction, +or it may roll back to a savepoint that was established before the exception was +thrown, and then proceed and return non-exceptionally. To catch such an +exception and simply proceed, without rolling back to a prior savepoint, is +a common new-developer mistake. + +In past PL/Java versions, the troubleshooting for such a common mistake was +uncommonly difficult, because the mishandling might not be detected until +PL/Java made some later, even quite unrelated, call back into PostgreSQL, and +the exception thrown at that point might have no connection to the original +exception that was mishandled. + +PL/Java 1.6.10 adds new exception-handling and logging behavior that should +greatly simplify debugging such common mistakes. Details are in the new +documentation section [Catching PostgreSQL exceptions in Java][catch]. + +[catch]: use/catch.html + +$h3 Bugs fixed + +* [ClassNotFoundException using PL/Java 1.6.8 on Postgres 17.2](${ghbug}523) +* [support for PostgreSQL 18](${ghbug}524) +* [SQL generator may use non-reproducible order in rare cases](${ghbug}527) + +$h3 Credits + +Thanks to Achilleas Mantzios for starting on the PostgreSQL 18 changes, `daddeo` +for making the case to improve the treatment and logging of mishandled +exceptions. + +$h2 Earlier releases + +## A nice thing about using Velocity is that each release can be entered at +## birth using h2 as its main heading, h3 and below within ... and then, when +## it is moved under 'earlier releases', just define those variables to be +## one heading level finer. Here goes: +#set($h2 = '###') +#set($h3 = '####') +#set($h4 = '#####') +#set($h5 = '######') + +$h2 PL/Java 1.6.9 -This is not expected to break any existing code at all, based on further -review showing the code in question had also been simply broken, since 2006, -with no reported issues in that time. That discovery would support an argument -for downgrading the severity of the reported vulnerability, but with no need -to keep that code around, it is more satisfying to remove it entirely. +This is the ninth minor update in the PL/Java 1.6 series. It adds support +for building and running with Java 24 _but only with no security enforcement_, +as explained below. Other than some minor bug fixes, that is the most notable +change. Further information on the changes may be found below. -Developers wishing to manipulate large objects in PL/Java are able to do so -using the SPI JDBC interface and the large-object SQL functions already -available in every PostgreSQL version PL/Java currently supports. +$h3 Version compatibility + +PL/Java 1.6.9 can be built against recent PostgreSQL versions including 17, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later, but for security policy enforcement the +runtime Java version must be 23 or earlier. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if it detects it is affected by that bug, and the solution can +be to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug was fixed in Java 21. + +$h3 Security policy enforcement unavailable in Java 24 and later + +PL/Java 1.6 has historically enforced a flexible and fine-grained security +policy allowing it to offer, in PostrgreSQL parlance, both a 'trusted' and +'untrusted' procedural language with configurable limits on the allowed +behavior for both. That mode of operation, described in +[Configuring permissions in PL/Java][policy], remains fully supported +in PL/Java 1.6.9 _as long as the Java version used at runtime is Java 23 or +earlier_. + +The crucial Java language features making that possible have been removed +by the developers of Java, beginning with Java 24. PL/Java 1.6.9 can be used +with a Java 24 or later runtime, but only as an 'untrusted' language with no +policy enforcement, as described in +[PL/Java with no policy enforcement][nopolicy]. + +Because enforcement depends only on the Java version at runtime, a simple change +to the `pljava.libjvm_location` [configuration variable][variables] allows the +flexibility to host user code using the latest Java 24+ language features (but +with no policy enforcement) in an application where that is acceptable, or, +where the newest language features are not needed, to continue to use an older +supported Java version, including the long-term-support Java 21, with policy +enforced. + +The details in [PL/Java with no policy enforcement][nopolicy] should be +carefully reviewed before using PL/Java in that mode. The section on generic +Java hardening tips can also be a source of good practices for defense-in-depth +even when running with policy enforcement. $h3 Changes -$h4 PostgreSQL 9.6 and parallel query +$h4 Supplied examples -A function in PL/Java can now be [annotated][apianno] -`parallel={UNSAFE | RESTRICTED | SAFE}`, with `UNSAFE` the default. -A new [user guide section][ugparqry] explains the possibilities and -tradeoffs. (Good cases for marking a PL/Java function `SAFE` may be -rare, as pushing such a function into multiple background processes -will require them all to start JVMs. But if a practical application -arises, PostgreSQL's `parallel_setup_cost` can be tuned to help the -planner make good plans.) +$h5 Softened dependency of examples jar on Saxon library -Although `RESTRICTED` and `SAFE` Java functions work in simple tests, -there has been no exhaustive audit of the code to ensure that PL/Java's -internal workings never violate the behavior constraints on such functions. -The support should be considered experimental, and could be a fruitful -area for beta testing. +The Maven build produces a `pljava-examples` jar from the supplied example code, +and can produce that jar with or without the examples that depend on the Saxon +XML library, based on a build-time profile setting. In past releases, the +examples jar, if built with the Saxon examples included, could not be deployed +(`sqlj.install_jar` with `deploy => true` would fail) in a database where a +Saxon jar had not already been installed and placed on the classpath. For cases +where the Saxon examples are not of interest, that inability to deploy the +examples jar based on a choice made at build time was an inconvenience. -[ugparqry]: use/parallel.html +The examples jar now, if built with the Saxon examples included, simply will +leave those examples undeployed, if Saxon classes cannot be found on +the classpath at the time of deployment. -$h4 Tuple counts widened to 64 bits with PostgreSQL 9.6 +$h5 New example function to examine Java's boot module layer -To accommodate the possibility of more than two billion tuples in a single -operation, the SPI implementation of the JDBC `Statement` interface now -provides the JDK 8-specified `executeLargeBatch` and `getLargeUpdateCount` -methods defined to return `long` counts. The original `executeBatch` and -`getUpdateCount` methods remain but, obviously, cannot return counts that -exceed `INT_MAX`. In case the count is too large, `getUpdateCount` will throw -an `ArithmeticException`; `executeBatch` will store `SUCCESS_NO_INFO` for -any statement in the batch that affected too many tuples to report. +The discussion of [PL/Java with no policy enforcement][nopolicy] encourages +attention to which Java modules are made available in Java's boot module layer. +A new example function is supplied to return that information. -For now, a `ResultSetProvider` cannot be used to return more than `INT_MAX` -tuples, but will check that condition and throw an error to ensure predictable -behavior. +$h4 Continuous integration -$h4 `pg_upgrade` +Workflows for GitHub Actions, AppVeyor, and Travis have had duplicated code +factored out into a single `jshell` script in the new `CI` directory. -PL/Java should be upgraded to 1.5.1 in a database cluster, before that -cluster is binary-upgraded to a newer PostgreSQL version using `pg_upgrade`. -A new [Upgrading][upgrading] installation-guide section centralizes information -on both upgrading PL/Java in a database, and upgrading a database with PL/Java -in it. +The script is able to use either the `PGJDBC` or `pgjdbc-ng` driver for +connecting to a test server instance. The CI configuration has been using +`pgjdbc-ng` but now uses `PGJDBC`, to avoid a `pgjdbc-ng` dependency on +a library whose native operations Java 24 now warns about and a future +Java release will forbid. -[upgrading]: install/upgrade.html +The GitHub Actions CI workflow now covers Ubuntu, Mac OS on both Intel and ARM, +and Windows using both MSVC and MinGW-w64. -$h4 Suppressing row operations from triggers +$h4 Documentation -In PostgreSQL, a `BEFORE ROW` trigger is able to allow the proposed row -operation, allow it with modified values, or silently suppress the operation -for that row. Way back in PL/Java 1.1.0, the way to produce the 'suppress' -outcome was for the trigger method to throw an exception. Since PL/Java 1.2.0, -however, an exception thrown in a trigger method is used to signal an error -to PostgreSQL, and there has not been a way to suppress the row operation. +$h5 Java stack traces and debugger control -The `TriggerData` interface now has a [`suppress`][tgsuppress] method that -the trigger can invoke to suppress the operation for the row. +The needed settings of `client_min_messages` or `log_min_messages` for Java +exception stacktraces to be shown have been mentioned in passing in too many +places that were not the user documentation, while never clearly stated there. +There is now a [new section](use/use.html#Java_exception_stack_traces) for that, +and also one for [connecting a debugger](use/use.html#Connecting_a_debugger). -[tgsuppress]: pljava-api/apidocs/index.html?org/postgresql/pljava/TriggerData.html#suppress() +$h5 Links into API docs now assume Java 21 `javadoc` version will be used -$h4 Constraint triggers +While it should be possible to build the API documentation with the `javadoc` +tool of whatever Java version is used at build time, different versions of the +tool introduce changes in HTML output, such as anchor names, that affect +links into the API documentation from other pages, such as the build/install/use +documentation in Markdown. To build a full set of documentation with working +links, an assumption must be made about the version of `javadoc` that will be +used. Links have been updated on the assumption that the API docs will be built +with the `javadoc` tool of Java 21. + +$h3 Bugs fixed + +* [SQL generator unexpected case-sensitive matching of implementor tags](${ghbug}515) +* [Class path used during jar deploy/undeploy can be bad](${ghbug}516) + +$h3 Credits -New attributes in the `@Trigger` annotation allow the SQL generator to -create constraint triggers (a type of trigger that can be created with SQL -since PostgreSQL 9.1). Such triggers will be delivered by the PL/Java runtime -(to indicate that a constraint would be violated, a constraint trigger -method should throw an informative exception). However, the trigger method -will have access, through the `TriggerData` interface, only to the properties -common to ordinary triggers; methods on that interface to retrieve properties -specific to constraint triggers have not been added for this release. +Thanks in release 1.6.9 to ZhangHuiGui for highlighting the need to better +document debugging arrangements and Java stack traces. -$h4 PostgreSQL 10 and trigger transition tables +[policy]: use/policy.html +[nopolicy]: use/unenforced.html +[variables]: use/variables.html -A trigger [annotation][apianno] can now specify `tableOld="`_name1_`"` or -`tableNew="`_name2_`"`, or both, and the PL/Java function servicing the -trigger can do SPI JDBC queries and see the transition table(s) under the -given name(s). The [triggers example code][extrig] has been extended with -a demonstration. +$h2 PL/Java 1.6.8 (19 October 2024) -[extrig]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/Triggers.java +This is the eighth minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 17, confirms compatibility with Java 23, and makes some slight +build-process improvements to simplify troubleshooting reported build problems. +Further information on the changes may be found below. -$h4 Logging from Java +$h3 Version compatibility -The way the Java logging system has historically been plumbed to PostgreSQL's, -as described in [issue 125](${ghbug}125), can be perplexing both because it -is unaffected by later changes to the PostgreSQL settings after PL/Java is -loaded in the session, and because it has honored only `log_min_messages` -and ignored `client_min_messages`. The second part is easy to fix, so in -1.5.1 the threshold where Java discards messages on the fast path is -determined by the finer of `log_min_messages` and `client_min_messages`. +PL/Java 1.6.8 can be built against recent PostgreSQL versions including 17, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. -$h4 Conveniences for downstream package maintainers +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if it detects it is affected by that bug, and the solution can +be to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug was fixed in Java 21. -The `mvn` command to build PL/Java will now accept an option to provide -a useful default for `pljava.libjvm_location`, when building a package for -a particular software environment where the likely path to Java is known. +$h3 Changes -The `mvn` command will also accept an option to specify, by the path to -the `pg_config` executable, the PostgreSQL version to build against, in -case multiple versions exist on the build host. This was already possible -by manipulating `PATH` ahead of running `mvn`, but the option makes it more -explicit. +$h4 Build system + +While building the PL/Java native code, Maven output will include the full +`PG_VERSION_STR` from the PostgreSQL development files that have been found +to build against. The string includes platform, compiler, and build notes, as +reported by the `version` function in SQL. This information should always be +included when reporting a PL/Java native build issue, so including it in the +Maven build output will make issues easier to report. + +When building with Maven's `-X` / `--debug` option for additional debug output, +the command arguments of the constructed compiling and linking commands will be +included in the output, which can be useful in troubleshooting a build problem. +The arguments are shown just as the compiler/linker is meant to ultimately +receive them; on a Unix-like platform, that is the Java `List` exactly as seen +with `ProcessBuilder.command()`. On Windows, that `List` is shown just before +the final application of extra quoting that simply ensures the compiler/linker +receives it correctly. + +When building with a platform or environment that does not satisfy the `probe` +predicate of any of the included platform build rules, a Maven error message +will clearly say so. In earlier versions, an uninformative null pointer +exception resulted instead. The new message includes guidance on how to add a +build rule set for a new platform or environment, and possibly contribute it for +inclusion in PL/Java. + +$h4 Documentation + +The build documentation now prominently notes that `mvn --version` will show the +version of Java that Maven has found to use for the build. There had been build +issues reported that could be traced to Maven finding a different Java +installation than expected, when that version was not usable to build +PL/Java 1.6. + +The documentation has been shorn of many lingering references to PostgreSQL +versions older than 9.5, the oldest that PL/Java 1.6 supports, and other +holdovers from pre-1.6 PL/Java. -A new [packaging section][packaging] in the build guide documents those -and a number of considerations for making a PL/Java package. +$h3 Enhancement requests addressed -[packaging]: build/package.html +* [PostgreSQL 17 support](${ghpull}499) $h3 Bugs fixed -$h4 Since 1.5.1-BETA1 +* [Unhelpful output when build fails because no platform rules matched](${ghbug}485) -* [PostgreSQL 10: SPI_modifytuple failed with SPI_ERROR_UNCONNECTED](${ghbug}134) -* [SPIConnection prepareStatement doesn't recognize all parameters](${ghbug}136) -* [Annotations don't support CREATE CONSTRAINT TRIGGER](${ghbug}138) -* [Ordinary (non-constraint) trigger has no way to suppress operation](${ghbug}142) +$h3 Credits -$h4 In 1.5.1-BETA1 +Thanks in release 1.6.8 to Francisco Miguel Biete Banon for determining the +changes needed for PostgreSQL 17. -* [Add support for PostgreSQL 9.6](${ghbug}108) -* [Clarify documentation of ResultSetProvider](${ghbug}115) -* [`pg_upgrade` (upgrade failure from 9.5 to 9.6)](${ghbug}117) -* [Java logging should honor `client_min_messages` too](${ghbug}125) +$h2 PL/Java 1.6.7 (3 April 2024) -$h3 Updated PostgreSQL APIs tracked +This is the seventh minor update in the PL/Java 1.6 series. It adds support +for FreeBSD and for building and running with Java 22, and fixes some bugs, +with few other notable changes. Further information on the changes may be found +below. -* `heap_form_tuple` -* 64-bit `SPI_processed` -* 64-bit `Portal->portalPos` -* 64-bit `FuncCallContext.call_cntr` -* 64-bit `SPITupleTable.alloced` and `.free` -* `IsBackgroundWorker` -* `IsBinaryUpgrade` -* `SPI_register_trigger_data` -* `SPI` without `SPI_push`/`SPI_pop` -* `AllocSetContextCreate` +$h3 Version compatibility -$h2 Earlier releases +PL/Java 1.6.7 can be built against recent PostgreSQL versions including 16, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. -## A nice thing about using Velocity is that each release can be entered at -## birth using h2 as its main heading, h3 and below within ... and then, when -## it is moved under 'earlier releases', just define those variables to be -## one heading level finer. Here goes: -#set($h2 = '###') -#set($h3 = '####') -#set($h4 = '#####') -#set($h5 = '######') +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if detects it is affected by that bug, and the solution can be +to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug has been fixed in Java 21. + +$h3 Changes + +$h4 Changes in XML support + +$h5 Java 22's new XML property to control DTD processing is supported + +Java 22 introduces a new property, `jdk.xml.dtd.support`, which can take values +`allow`, `deny`, and `ignore`. + +The values `allow` and `deny` provide a new way to specify behavior that could +already be requested by other means, and the `allowDTD(boolean)` method of +PL/Java's `Adjusting.XML` API now tries this property first, falling back to the +older means on Java releases that do not support it. -$h2 PL/Java 1.5.0 - -This, the first PL/Java numbered release since 1.4.3 in 2011, combines -compatibility with the latest PostgreSQL and Java versions with modernized -build and installation procedures, automatic generation of SQL deployment -code from Java annotations, and many significant fixes. - -$h3 Security - -Several security issues are addressed in this release. Sites already -using PL/Java are encouraged to update to 1.5.0. For several of the -issues below, practical measures are described to mitigate risk until -an update can be completed. - -[CVE-2016-0766][], a privilege escalation requiring an authenticated -PostgreSQL connection, is closed by installing PL/Java 1.5.0 (including -prereleases) or by updating PostgreSQL itself to at least 9.5.1, 9.4.6, -9.3.11, 9.2.15, 9.1.20. Vulnerable systems are only those running both -an older PL/Java and an older PostgreSQL. - -[CVE-2016-0767][], in which an authenticated PostgreSQL user with USAGE -permission on the `public` schema may alter the `public` schema classpath, -is closed by release 1.5.0 (including prereleases). If updating to 1.5.0 -must be delayed, risk can be mitigated by revoking public `EXECUTE` permission -on `sqlj.set_classpath` and granting it selectively to responsible users or -roles. - -This release brings a policy change to a more secure-by-default posture, -where the ability to create functions in `LANGUAGE java` is no longer -automatically granted to `public`, but can be selectively granted to roles -that will have that responsibility. The change reduces exposure to a known -issue present in 1.5.0 and earlier versions, that will be closed in a future -release ([CVE-2016-0768][], see **large objects, access control** below). - -The new policy will be applied in a new installation; permissions will not -be changed in an upgrade, but any site can move to this policy, even before -updating to 1.5.0, with `REVOKE USAGE ON LANGUAGE java FROM public;` followed by -explicit `GRANT` commands for the users/roles expected to create Java -functions. - -[CVE-2016-2192][], in which an authenticated user can alter type mappings -without owning the types involved. Exploitability is limited by other -permissions, but if type mapping is a feature being used at a site, one -can interfere with proper operation of code that relies on it. A mitigation -is simply to `REVOKE EXECUTE ... FROM PUBLIC` on the `sqlj.add_type_mapping` -and `sqlj.drop_type_mapping` functions, and grant the privilege only to -selected users or roles. As of 1.5.0, these functions require the invoker -to be superuser or own the type being mapped. - -[CVE-2016-0766]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0766 -[CVE-2016-0767]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0767 -[CVE-2016-0768]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-0768 -[CVE-2016-2192]: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2192 +The value `ignore` offers a previously-unavailable behavior where an XML +document with a DTD can be successfully parsed but with its DTD ignored. A new +method `ignoreDTD()` is added to the `Adjusting.XML` API to request this +treatment, and will only succeed on Java 22 or later. The last-invoked of this +method and `allowDTD(boolean)` will govern. + +In Java 22, bug [JDK-8329295][] can cause parsing to fail when `ignoreDTD` is in +effect, if the document has only a minimal DTD and the SAX or DOM API is used. + +$h4 Build system + +The build logic that is implemented in JavaScript is now executed using the +Nashorn engine, either included with Java through release 14, or downloaded +by Maven for Java 15 and later. The build system was formerly downloading the +JavaScript engine from GraalVM to build on Java 15 and later, but a new version +of that engine needed for Java 22 would have complicated version management. + +Versions of some Maven plugins used at build time +[have been updated](${ghpull}468) where critical vulnerabilities were reported. + +$h3 Enhancement requests addressed + +* [Build on FreeBSD](${ghpull}478) +* [Vulnerable Maven plugins used at build time](${ghbug}449) + +$h3 Bugs fixed + +* ["PostgreSQL backend function after an elog(ERROR)" in class loading](${ghbug}471) +* [XML parsing errors reported as XX000 when DOM API is used](${ghbug}481) + +$h3 Credits + +Thanks in release 1.6.7 to Francisco Miguel Biete Banon, Bear Giles, Achilleas +Mantzios, `hunterpayne`, `kamillo`. + +[JDK-8329295]: https://bugs.openjdk.org/browse/JDK-8329295 + +$h2 PL/Java 1.6.6 (19 September 2023) + +This is the sixth minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 16 and confirms compatibility with Java 21, and fixes some bugs, +with few other notable changes. Further information on the changes may be found +below. $h3 Version compatibility -PL/Java 1.5.0 can be built against recent PostgreSQL versions including 9.5, -using Java SE 8, 7, or 6. See [version compatibility][versions] for more -detail. OpenJDK is well supported. Support for GCJ has been dropped; features -of modern Java VMs that are useful to minimize footprint and startup time, -such as class-data sharing, are now more deeply covered -[in the installation docs][vmopts]. +PL/Java 1.6.6 can be built against recent PostgreSQL versions including 16, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. -[versions]: build/versions.html -[vmopts]: install/vmoptions.html - -$h3 Build procedures - -Since 2013, PL/Java has been hosted [on GitHub][ghpljava] and built -using [Apache Maven][mvn]. See the new [build instructions][bld] for details. - -Reported build issues for specific platforms have been resolved, -with new platform-specific build documentation -for [OS X][osxbld], [Solaris][solbld], [Ubuntu][ububld], -[Windows MSVC][msvcbld], and [Windows MinGW-w64][mgwbld]. - -The build produces a redistributable installation archive usable with -the version of PostgreSQL built against and the same operating system, -architecture, and linker. The type of archive is `jar` on all platforms, as -all PL/Java installations will have Java available. - -[ghpljava]: https://github.com/tada/pljava -[mvn]: http://maven.apache.org/ -[bld]: build/build.html -[msvcbld]: build/buildmsvc.html -[mgwbld]: build/mingw64.html -[osxbld]: build/macosx.html -[solbld]: build/solaris.html -[ububld]: build/ubuntu.html - -$h3 Installation procedures - -The jar produced by the build is executable and will self-extract, -consulting `pg_config` on the destination system to find the correct -default locations for the extracted files. Any location can be overridden. -(Enhancement requests [6][gh6], [9][gh9]) - -PL/Java now uses a PostgreSQL configuration variable, `pljava.libjvm_location`, -to find the Java runtime to use, eliminating the past need for highly -platform-specific tricks like link-time options or runtime-loader configuration -just so that PL/Java could find Java. PostgreSQL configuration variables are -now the only form of configuration needed for PL/Java, and the `libjvm_location` -should be the only setting needed if file locations have not been overridden. - -In PostgreSQL 9.1 and later, PL/Java can be installed with -`CREATE EXTENSION pljava`. Regardless of PostgreSQL version, installation -has been simplified. Former procedures involving `Deployer` or `install.sql` -are no longer required. Details are in the [new installation instructions][ins]. - -$h4 Schema migration - -The tables used internally by PL/Java have changed. If PL/Java 1.5.0 is -loaded in a database with an existing `sqlj` schema populated by an earlier -PL/Java version (1.3.0 or later), the structure will be updated without data -loss (enhancement request [12][gh12]). *Remember that PL/Java runs independently -in each database session where it is in use. Older PL/Java versions active in -other sessions can be disrupted by the schema change.* - -A trial installation of PL/Java 1.5.0 can be done in a transaction, and -rolled back if desired, leaving the schema as it was. Any concurrent sessions -with active older PL/Java versions will not be disrupted by the altered schema -as long as the transaction remains open, *but they may block for the duration, -so such a test transaction should be kept short*. - -[ins]: install/install.html +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if detects it is affected by that bug, and the solution can be +to use a Java version earlier than 20, or one recent enough to have the bug +fixed. The bug has been fixed in Java 21. + +PL/Java 1.6.6 will definitely no longer build on PostgreSQL versions older +than 9.5. It has made no attempt to support them since 1.6.0, and lingering +conditional code for older versions has now been removed. $h3 Changes -$h4 Behavior of `readSQL` and `writeSQL` for base and mirror user-defined types - -In the course of fixing [issue #98][gh98], the actual behavior of -`readSQL` and `writeSQL` with base or mirror types, which had not -previously been documented, [now is](develop/coercion.html), along with -other details of PL/Java's type coercion rules found only in the code. -Because machine byte order was responsible for issue #98, it now (a) is -selectable, and (b) has different, appropriate, defaults for mirror UDTs -(which need to match PostgreSQL's order) and for base UDTs (which must -stay big-endian because of how binary `COPY` is specified). -A [new documentation section](use/byteorder.html) explains in detail. - -$h4 `USAGE` to `PUBLIC` no longer default for `java` language - -Of the two languages installed by PL/Java, functions that declare -`LANGUAGE javau` can be created only by superusers, while those that -declare `LANGUAGE java` can be created by any user or role granted the -`USAGE` privilege on the language. - -In the past, the language `java` has been created with PostgreSQL's -default permission granting `USAGE` to `PUBLIC`, but PL/Java 1.5.0 -leaves the permission to be explicitly granted to those users or roles -expected to create Java functions, in keeping with least-privilege -principles. See **large objects, access control** under **known issues** -for background. - -$h4 SQL generated by Java annotations - -Java code developed for use by PL/Java can carry in-code annotations, -used by the Java compiler to generate the SQL commands to declare the -new functions, types, triggers, etc. in PostgreSQL (enhancement request -[1011112][], though different in implementation). This eliminates the need -to have Java code and the corresponding SQL commands developed in parallel, -and the class of errors possible when both are not updated together. It -also allows compile-time checks that the Java methods or classes being -annotated are suitable (correct access modifiers, signatures, etc.) -for their declared SQL purposes, rather than discovering -such issues only upon loading the code into PostgreSQL and trying to use it. - -The Java compiler writes the generated SQL into a "deployment descriptor" -file (`pljava.ddr` by default), as specified by the SQL/JRT standard. The -file can be included in a `jar` archive with the compiled code, and the -commands will be executed by PL/Java when the `install_jar` function is -used to load the jar. - -SQL generation is covered in the [updated user documentation][user], -and illustrated in the [Hello, World example][hello] and -[several other supplied examples][exanno]. Reference information -is [in the API documentation][apianno]. It is currently usable to declare -functions, triggers, and user-defined types, both base and composite. - -[user]: use/use.html -[hello]: use/hello.html -[exanno]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation -[apianno]: pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/package-summary.html#package_description - -The history of this feature in PL/Java is long, with the first related commits -appearing in 2005, six years in advance of an enhancement request for it. -It became generally usable in 2013 when building with -Java SE 6 or later, using the annotation processing framework Java introduced -in that release. 1.5.0 is the first PL/Java numbered release to feature it. - -$h5 Annotation keyword changes - -If you have been using the SQL generation feature in prerelease `git` builds of -2013 or later, be aware that some annotation keywords have changed in finalizing -the 1.5.0 release. Java code that was compiled using the earlier keywords will -continue to work, but will have to be updated before it can be recompiled. - -* For functions: `effects=(VOLATILE,STABLE,IMMUTABLE)` was formerly `type=` -* For functions: `type=` (_an explicit SQL return type for the function_) - was formerly `complexType=` -* For functions: `trust=(SANDBOXED,UNSANDBOXED)` was formerly - `(RESTRICTED,UNRESTRICTED)` -* For triggers: `called=(BEFORE,AFTER,INSTEAD_OF)` was formerly `when=` - and conflicted with the `WHEN` clause introduced for triggers - in PostgreSQL 9.0. - -$h4 A jar may have more than one deployment descriptor - -PL/Java formerly allowed only one entry in a jar to be a deployment -descriptor (that is, a file of SQL commands to be executed upon loading -or unloading the jar). The SQL/JRT standard allows multiple entries to -be deployment descriptors, executed in the order they are mentioned -_in the jar manifest_, or the reverse of that order when the jar is -being unloaded. PL/Java now conforms to the standard. - -The behavior is useful during transition to annotation-driven deployment -descriptor generation for a project that already has a manually-maintained -deployment descriptor. PL/Java's own `pljava-examples` project is an -illustration, in the midst of such a transition itself. - -Note the significance placed by SQL/JRT on the order of entries in a jar -manifest, whose order is normally _not_ significant according to the Jar File -Specification. Care can be needed when manipulating manifests with automated -tools that may not preserve order. - -$h4 Conditional execution within deployment descriptors - -Deployment descriptors have a primitive conditional-execution provision -defined in the SQL/JRT standard: commands wrapped in a -`BEGIN IMPLEMENTOR ` _identifier_ construct will only be executed if the -_identifier_ is recognized by the SQL/JRT implementation in use. The design -makes possible jars that can be installed on different database systems that -provide SQL/JRT features, with database-specific commands wrapped in -`BEGIN IMPLEMENTOR` blocks with an _identifier_ specific to the system. -By default, PL/Java recognizes the _identifier_ `postgresql` (matched without -regard to case). - -PL/Java extends the standard by allowing the PostgreSQL configuration -variable `pljava.implementors` to contain a list of identifiers that will -be recognized. SQL code in a deployment descriptor can conditionally add -or remove identifiers in this list to influence which subsequent implementor -blocks will be executed, giving a still-primitive but more general control -structure. - -In sufficiently recent PostgreSQL versions, the same effect could be -achieved using `DO` statements and PL/pgSQL control structures, but this -facility in PL/Java does not require either to be available. - -$h4 Interaction with `SET ROLE` corrected - -PL/Java formerly was aware of the user ID associated with the running -session, but not any role ID that user may have acquired with `SET ROLE`. -The result would commonly be failed permission checks made by PL/Java when -the session user did not have the needed permission, but had `SET ROLE` to -a role that did. Likewise, within `install_jar`, PL/Java would execute -deployment descriptor commands as the original session user rather than -as the user's current role, with permission failures a likely result. - -Correcting this issue has changed the PL/Java API, but without a bump -of major version because the prior API, while deprecated, is still available. - -* [`getOuterUserName`][goun] and [`executeAsOuterUser`][eaou] are new, and - correctly refer to the session user or current role, when active. -* [`getSessionUserName`][gsun] and [`executeAsSessionUser`][easu] are still - present but deprecated, and _their semantics are changed_. They are now - deprecated aliases for the corresponding new methods, which honor the - set role. Use cases that genuinely need to refer only to the _session_ user - and ignore the role should be rare, and should be discussed on the mailing - list or opened as issues. - -#set($sessapi = 'pljava-api/apidocs/index.html?org/postgresql/pljava/Session.html#') - -[goun]: ${sessapi}getOuterUserName() -[eaou]: ${sessapi}executeAsOuterUser(java.sql.Connection,java.lang.String) -[gsun]: ${sessapi}getSessionUserName() -[easu]: ${sessapi}executeAsSessionUser(java.sql.Connection,java.lang.String) - -$h4 Unicode transparency - -Since the resolution of [bug 21][gh21], PL/Java contains a regression test -to ensure that character strings passed and returned between PostgreSQL and -Java will round-trip without alteration for the full range of Unicode -characters, _when the database encoding is set to `UTF8`_. - -More considerations apply when the database encoding is anything other -than `UTF8`, and especially when it is `SQL_ASCII`. Please see -[character encoding support][charsets] for more. +$h4 Changes in XML support -[charsets]: use/charsets.html +$h5 Java 17's standardized XML feature and property names added + +Java 17 added standardized, easy-to-remember names for a number of features and +properties the underlying XML implementations had formerly supported under +implementation-specific names. PL/Java's `Adjusting.XML` API already needed to +know those various other names, to attempt using them to configure the desired +features and properties. Now it tries the new standard names too. + +$h5 Better control when a feature or property can't be set as intended + +The original documentation for the feature and property setters in the +`Adjusting.XML` API said "the adjusting methods are best-effort and do not +provide an indication of whether the requested adjustment was made". (At the +same time, failures could produce voluminous output to the log.) + +The new [`lax(boolean)`][adjlax] method offers more control. If not used, +adjustment failures are logged (as before, but more compactly in the case of +multiple failures in one sequence of adjustments). Or, `lax(true)` can be used +to silently discard any failures up to that point in a sequence of adjustments, +or `lax(false)` to have the exceptions chained together and thrown. + +The addition of the new Java 17 standardized names can complicate +version-agnostic configuration of other elements in the Java XML APIs, such as +`Transformer`, that are not directly covered by PL/Java's +`Adjusting.XML.Parsing` methods. Client code may find the new `Adjusting.XML` +method [`setFirstSupported`][adjsfs] convenient for that purpose; +[an example][egsfs] illustrates. + +$h4 Packaging / testing + +$h5 Support choice of `PGJDBC` or `pgjdbc-ng` in `Node` + +The package jar produced as the last step of the build includes a +[test harness](develop/node.html) similar to the `PostgresNode` Perl module. +It formerly worked only with the `pgjdbc-ng` driver. Now it works with either +`PGJDBC` or `pgjdbc-ng`, and provides features for writing test scripts that +do not depend on the driver chosen. + +$h4 Source code + +* Minor changes to support PostgreSQL 16 +* Conditional code supporting PostgreSQL versions older than 9.5 removed +* Old non-HTML5 elements (rejected by Javadoc 17 and later) removed from + doc comments +* The `pureNonVirtualCalled` method removed to quiet warnings from recent + C compilers +* Schema qualification in embedded SQL added to two operators that had been + overlooked in the earlier round of adding such qualification +$h3 Bugs fixed + +* [`NEWLINE` pattern can fail to match](${ghbug}455) + +[adjlax]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Adjusting.XML.Parsing.html#method-summary +[adjsfs]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Adjusting.XML.html#method-detail +[egsfs]: https://github.com/tada/pljava/blob/V1_6_6/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/PassXML.java#L528 + +$h2 PL/Java 1.6.5 (13 June 2023) + +This is the fifth minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 15 and fixes some bugs, with few other notable changes. Further +information on the changes may be found below. + +$h3 Version compatibility + +PL/Java 1.6.5 can be built against recent PostgreSQL versions including 15, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later. PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +Some builds of Java 20 are affected by a bug, [JDK-8309515][]. PL/Java will +report an error if detects it is affected by that bug, and the solution can be +to use a Java version earlier than 20, or one recent enough to have the bug +fixed. + +$h3 Changes + +$h4 Changes affecting administration + +$h5 Bugs affecting `install_jar` from http/https URLs fixed + +CI testing now makes sure that http URLs work and the appropriate +`java.net.URLPermission` can be granted in `pljava.policy` where the comments +indicate. + +$h4 Improvements to the annotation-driven SQL generator + +$h5 PL/Java functions can be declared on interfaces as well as classes + +The SQL/JRT specification has always only said 'class', but it could be +debated whether 'class' was intended strictly or inclusively. As there is +no technical obstacle to using static methods declared on an interface, +and PL/Java's runtime already could do so, the SQL generator no longer +disallows `@Function` annotations on them. + +$h5 SQL generator reports compatibility with a more recent Java source version + +Because PL/Java 1.6 retains compatibility for building on Java versions +back to 9, the SQL generator previously reported 9 as the source version +supported. This produced warnings building user code to target a later version +of Java, which were only an issue for sites using a fail-on-warning policy. + +The SQL generator now reports its supported source version as the earlier of: +the Java version being used, or the latest Java version on which it has been +successfully tested. In this release, that is Java 20. + +$h4 Improvements to documentation + +$h5 Use of `--add-modules` to access Java modules not read by default, explained + +By default, PL/Java starts up with a fairly small set of Java modules readable. +The documentation did not explain the use of `--add-modules` in +`pljava.vmoptions` to expand that set when user code will refer to other +modules. That is [now documented][addm]. $h3 Enhancement requests addressed -* [Use Annotations instead of DDL Manifest][1011112] -* [Installation of pljava on postgresql servers][gh9] -* [Find an alternative way to install the pljava.so in `/usr/lib`][gh6] -* [Provide database migration][gh12] -* [Support types with type modifiers][1011140] (partial: see [example][typmex]) -* [Build process: accommodate Solaris 10][gh102] +* [Allow functions from an interface](${ghbug}426) + +$h3 Bugs fixed + +* [Crash on startup with `SQL_ASCII` database and bad `vmoptions`](${ghbug}416) +* [Installed by `LOAD` then packaged as extension broken in recent PostgreSQL updates](${ghbug}434) +* [Java 20 breaks `LexicalsTest.testSeparator`](${ghbug}435) +* [Not found JDK 11 `java.net.http.HttpClient`](${ghbug}419) (documentation added) +* [`SocketPermission` on `install_jar`](${ghbug}425) +* [The timer in `_destroyJavaVM` does not take effect](${ghbug}407) +* PostgreSQL 15 support [410](${ghbug}410), [412](${ghbug}412) +* [Cannot specify java release other than '9' for `maven-compiler-plugin`](${ghbug}403) +* [Fail validation if function declares `TRANSFORM FOR TYPE`](${ghbug}402) +* ["cannot parse AS string" for 1-letter identifiers](${ghbug}438) + +$h3 Credits + +Thanks in release 1.6.5 to Francisco Miguel Biete Banon, Christoph Berg, Frank +Blanning, Stephen Frost, Casey Lai, Krzysztof Nienartowicz, Yuril Rashkovskii, +Tim Van Holder, `aadrian`, `sincatter`, `tayalrun1`. + +[addm]: install/vmoptions.html#Adding_to_the_set_of_readable_modules +[versions]: build/versions.html +[JDK-8309515]: https://bugs.openjdk.org/browse/JDK-8309515 + +$h2 PL/Java 1.6.4 (19 January 2022) + +This is the fourth minor update in the PL/Java 1.6 series. It is a minor +bug-fix release with few other notable changes. Further information +on the changes may be found below. + +$h3 Changes + +$h4 Changes affecting administration + +$h5 JEP 411 advisory message downgraded + +PL/Java follows certain administrative actions with an advisory message +about upcoming Java changes that will affect policy enforcement. In 1.6.3, +that message was always at `WARNING` level. In 1.6.4, it is downgraded +to `NOTICE` level when running on Java versions before 17. + +As before, updates on how PL/Java will adapt to the future Java changes +can be watched by bookmarking [the JEP 411 topic][jep411] on the PL/Java wiki. + +$h4 Changes to runtime behavior + +$h5 Java's thread context class loader + +Starting in 1.6.3, PL/Java has made sure the thread context class loader +on entry to a function reflects the initiating loader for the implementing +class, but in 1.6.3, the loader could be set too many times when executing +a set-returning function. That could lead to the wrong loader being restored +on function exit, likely only noticeable in an application with jars in +multiple schemas and with nested Java function invocations. + +That is fixed in this release. + +$h4 Improvements to the annotation-driven SQL generator + +$h5 Correct SQL now emitted for a function with _one_ `OUT` parameter + +PostgreSQL requires a different form of declaration for a function with +one `OUT` parameter than for a function with two or more. PL/Java was +emitting the wrong form for the one-out-parameter case. + +The documentation has been clarified and new [examples][exoneout] added +to illustrate how Java methods should be structured and annotated for +both cases. + +The intended treatment can be ambiguous for some Java methods, without +explicit annotations. For example, a Java method that returns `boolean` +and has a trailing `ResultSet` parameter could be a single-row +composite-returning function, or an ordinary function with a row-typed +input parameter and `boolean` return. PL/Java has always silently chosen +which interpretation to apply in such cases, and still does, but it now +emits a comment into the generated deployment descriptor, explaining +what annotation to use if a different meaning was intended. + +$h5 `BaseUDT` annotation now has constants for known type categories + +PostgreSQL user-defined types can be declared in categories, some of which +are predefined. PL/Java's `BaseUDT` annotation now includes constants for +those, which can be used in place of the single-letter codes for readability. + +$h3 Bugs fixed + +* [Wrong SQL generated for function with one `OUT` parameter](${ghbug}386) +* [`Session` object not reliably singleton](${ghbug}388) +* [Set-returning function has context classloader set too many times](${ghbug}389) +* [`java.time.LocalDate` mismapping within 30 years of +/-infinity](${ghbug}390) + +[exoneout]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method-summary + +$h2 PL/Java 1.6.3 (10 October 2021) -[1011112]: ${pgffeat}1011112 -[1011140]: ${pgffeat}1011140 -[gh9]: ${ghbug}9 -[gh6]: ${ghbug}6 -[gh12]: ${ghbug}12 -[gh102]: ${ghbug}102 +This is the third minor update in the PL/Java 1.6 series. It adds support +for PostgreSQL 14, continues to improve the runtime behavior and +the annotation-driven SQL generator, and fixes several bugs. Further information +on the changes may be found below. -[typmex]: $project.scm.url/pljava-examples/src/main/java/org/postgresql/pljava/example/annotation/IntWithMod.java +$h3 PL/Java with Java 17 and later: JEP 411 + +Current versions of PL/Java rely on Java security features that will be affected +by JEP 411, beginning with Java 17. Java 17 itself will continue to provide the +needed capabilities, with only deprecation marks and warnings added. Java 17 is +also positioned as a long-term support release, so the option of continuing to +run this PL/Java release will be available, with no loss of function, +by continuing to run with Java versions up to and including 17. + +For more on how PL/Java will adapt, please bookmark [the JEP 411 topic][jep411] +on the PL/Java wiki. + +For this release, PL/Java will suppress a JEP 411-related warning from the +Java runtime itself that would otherwise be emitted for every PostgreSQL backend +that starts Java, and instead will issue a more informative "migration advisory" +warning only on certain administrative actions no more than once per session, +with a goal of ensuring that responsible administrators are aware of the +expected future developments. The advisory message includes the URL to +the wiki topic above. + +This release also adds code to detect if it is on a future, post-Java 17 runtime +where the needed functionality is not available, and throw informative +exceptions with suggested corrective actions. + +[jep411]: https://github.com/tada/pljava/wiki/JEP-411 + +$h3 Changes + +$h4 Changes affecting administration + +$h5 New function now created on update + +The new [`SQLJ.ALIAS_JAVA_LANGUAGE`][sqljajl] function introduced with 1.6 will now +be present after an `ALTER EXTENSION UPDATE` if it was not before. + +$h5 Message if the Java runtime ends the backend process during startup + +The Java runtime can behave antisocially for some startup issues, such +as a misspelled jar in `pljava.module_path`, and silently terminate the +backend process rather than reporting an error. (It writes a message, not +to standard error, but to standard output, which from a PostgreSQL backend +is not captured for logging, and may never be seen.) A message is now generated +in that case, to provide at least some clue what has gone wrong. + +[sqljajl]: pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#alias_java_language + +$h4 Changes to runtime behavior + +$h5 Java's thread context class loader + +PL/Java now supplies a known value for the current Java thread's context +class loader on entry to a PL/Java function. It is the class loader for the +PostgreSQL schema where the function is declared. The context class loader is +referred to by numerous Java libraries and by Java's `ServiceLoader` +class, which may expect to find services and resources along the PL/Java class +path that has been configured for the function. Neglecting to set the context +class loader opened the door to unexpected loading failures like +[issue #361](${ghbug}361) that can require awkward or slow contortions +to work around in user Java code. + +This change is [documented in more detail](develop/contextloader.html). +An opt-out setting is available. + +$h4 Improvements to the annotation-driven SQL generator + +$h5 Now able to declare function parameters that default to null + +It has been possible to annotate a function parameter with +[`@SQLType(defaultValue=...)`][sqlt] to give it any non-null default value, +but because Java disallows null annotation values, that notation wasn't +usable to declare a parameter that defaults to null. The new notation +[`@SQLType(optional=true)`][sqlt] means exactly that. + +$h5 Fixes a bug in synthesis of `COMMUTATOR`/`NEGATOR` operators + +It is now possible to code one method in Java and declare a full +complement of operators based on it by combining commutation and negation. +A [new example][egsyn4] is provided. + +[egsyn4]: https://github.com/tada/pljava/commit/6bd5aa0 +[sqlt]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/SQLType.html $h3 Bugs fixed -$h4 Since 1.5.0-BETA3 - -* [Build process: accept variation in PostgreSQL version string][gh101] -* [Build process: accommodate PostgreSQL built with private libraries][gh103] -* Clarified message when `CREATE EXTENSION` fails because new session needed -* Reduced stack usage in SQL generator - (small-memory build no longer needs `-Xss`) - -$h4 In 1.5.0-BETA3 - -* [Bogus mirror-UDT values on little-endian hardware][gh98] -* [Base UDT not registered if first access isn't in/out/send/recv][gh99] -* `TupleDesc` leak warnings with composite UDTs -* also added regression test from [1010962][] report - -$h4 In 1.5.0-BETA2 - -* [Generate SQL for trigger function with no parameters][gh92] -* [openssl/ssl.h needed on osx el-capitan (latest 10.11.3)/postgres 9.5][gh94] - (documented) -* [Source location missing for some annotation errors][gh95] -* [OS X El Capitan "Java 6" dialog when loading ... Java 8][gh96] -* pljava-api jar missing from installation jar - -$h4 In 1.5.0-BETA1 - -* [SPIPreparedStatement.setObject() fails with Types.BIT][1011119] -* [SSLSocketFactory throws IOException on Linux][1011095] -* [PL/Java fails to compile with -Werror=format-security][1011181] -* [PL/Java does not build on POWER 7][1011197] -* [The built in functions do not use the correct error codes][1011206] -* [TupleDesc reference leak][1010962] -* [String conversion to enum fails][gh4] -* [segfault if SETOF RECORD-returning function used without AS at callsite][gh7] -* [pl/java PG9.3 Issue][gh17] -* [No-arg functions unusable: "To many parameters - expected 0"][gh8] -* [Exceptions in static initializers are masked][gh54] -* [UDT in varlena form breaks if length > 32767][gh52] -* [PL/Java kills unicode?][gh21] -* [Type.c expects pre-8.3 find_coercion_pathway behavior][gh65] -* [Support PostgreSQL 9.5][gh48] -* [pl/java getting a build on MacOSX - PostgreSQL 9.3.2][gh22] -* [build pljava on windows for PostgreSQL 9.2][gh23] -* [Error while installing PL/Java with Postgresql 9.3.4 64 bit on Windows 7 64 bit System][gh28] -* [pljava does not compile on mac osx ver 10.11.1 and postgres 9.4][gh63] -* [pljava does not compile on centos 6.5 and postgres 9.4][gh64] -* [Error installing pljava with Windows 7 64 Bit and Postgres 9.4][gh71] -## JNI_getIntArrayRegion instead of JNI_getShortArrayRegion -## Eclipse IDE artifacts -## Site -## Warnings -## Javadoc - -[1011119]: ${pgfbug}1011119 -[1011095]: ${pgfbug}1011095 -[1011181]: ${pgfbug}1011181 -[1011197]: ${pgfbug}1011197 -[1011206]: ${pgfbug}1011206 -[1010962]: ${pgfbug}1010962 -[gh4]: ${ghbug}4 -[gh7]: ${ghbug}7 -[gh8]: ${ghbug}8 -[gh17]: ${ghbug}17 -[gh54]: ${ghbug}54 -[gh52]: ${ghbug}52 -[gh21]: ${ghbug}21 -[gh65]: ${ghbug}65 -[gh48]: ${ghbug}48 -[gh22]: ${ghbug}22 -[gh23]: ${ghbug}23 -[gh28]: ${ghbug}28 -[gh63]: ${ghbug}63 -[gh64]: ${ghbug}64 -[gh71]: ${ghbug}71 -[gh92]: ${ghbug}92 -[gh94]: ${ghbug}94 -[gh95]: ${ghbug}95 -[gh96]: ${ghbug}96 -[gh98]: ${ghbug}98 -[gh99]: ${ghbug}99 -[gh101]: ${ghbug}101 -[gh103]: ${ghbug}103 +* [`ALTER EXTENSION UPDATE` does not create `sqlj.alias_java_language`](${ghbug}341) +* [... fails on 4 cross-type operators by commute/negate from 1 function](${ghbug}343) +* [Build failure dependent on `pg_config` output](${ghbug}347) +* [Protection domain when validator runs initializer](${ghbug}342) +* [Improve experience when `pljava.module_path` is incorrect](${ghbug}350) +* ["Failed to recognize schema" possible during `pg_upgrade`](${ghbug}352) +* Fixes and test coverage in `Lexicals.Identifier` serialization +* [Segmentation fault during `autovacuum`](${ghbug}355) +* [`java.nio.charset.MalformedInputException: Input length = 1`](${ghbug}340) +* [Thread context class loader](${ghbug}361) +* [`MappedUDT` and types with `typlen=-2`](${ghbug}370) $h3 Updated PostgreSQL APIs tracked -Several APIs within PostgreSQL itself have been added or changed; -PL/Java now uses the current versions of these where appropriate: - -* `find_coercion_pathway` -* `set_stack_base` -* `GetOuterUserId` -* `GetUserNameFromId` -* `GetUserIdAndSecContext` -* `pg_attribute_*` -* Large objects: truncate, and 64-bit offsets - -$h3 Known issues and areas for future work - -$h4 Developments in PostgreSQL not yet covered - -Large objects, access control -: PL/Java does not yet expose PostgreSQL large objects with a documented, - stable API, and the support it does contain was developed against pre-9.0 - PostgreSQL versions, where no access control applied to large objects and - any object could be accessed by any database user. PL/Java's behavior is - proper for PostgreSQL before 9.0, but improper on 9.0+ where it would be - expected to honor access controls on large objects ([CVE-2016-0768][]). - This will be corrected in a future release. For this and earlier releases, - the recommendation is to selectively grant `USAGE` on the `java` language to - specific users or roles responsible for creating Java functions; see - "default `USAGE` permssion" under Changes. - -`INSTEAD OF` triggers, triggers on `TRUNCATE` -: These are supported by annotations and the SQL generator, and the runtime - will deliver them to the specified method, but the `TriggerData` interface - has no new methods to recognize these cases (that is, no added - methods analogous to `isFiredAfter`, `isFiredByDelete`). For a method - expressly coded to be a `TRUNCATE` trigger or an `INSTEAD OF` trigger, - that is not a problem, but care should be taken when coding a trigger - method to handle more than one type of trigger, or creating triggers of - these new types that call a method developed pre-PL/Java-1.5.0. Such a - method could be called with a `TriggerData` argument whose existing - `isFired...` methods all return `false`, likely to put the method on an - unexpected code path. - - A later PL/Java version should introduce trigger interfaces that better - support such evolution of PostgreSQL in a type-safe way. - -Constraint triggers -: Constraint trigger syntax is not supported by annotations and the SQL - generator. If declared (using hand-written SQL), they will be delivered - by the runtime, but without any constraint-trigger-specific information - available to the called method. - -Event triggers -: Event triggers are not yet supported by annotations or the SQL generator, - and will not be delivered by the PL/Java runtime. - -Range types -: No predefined mappings for range types are provided. - -`PRE_PREPARE`, `PRE_COMMIT`, `PARALLEL_ABORT`, `PARALLEL_PRE_COMMIT`, and `PARALLEL_COMMIT` transaction callbacks, `PRE_COMMIT` subtransaction callbacks -: Listeners for these events cannot be registered and the events will not - be delivered. - -$h4 Imperfect integration with PostgreSQL dependency tracking - -In a dump/restore, manual intervention can be needed if the users/roles -recorded as owners of jars are missing or have been renamed. A current -[thread on `pgsql-hackers`][ownhack] should yield a better solution for -a future release. - -[ownhack]: http://www.postgresql.org/message-id/56783412.6090005@anastigmatix.net - -$h4 Quirk if deployment descriptor loads classes from same jar - -The `install_jar` function installs a jar, optionally reading deployment -descriptors from the jar and executing the install actions they contain. -It is possible for those actions to load classes from the jar just installed. -(This would be unlikely if the install actions are limited to typical setup, -function/operator/datatype creation, but likely, if the install actions also -include basic function tests, or if the setup requirements are more -interesting.) - -If, for any class in the jar, the first attempt to load that class is made -while resolving a function declared `STABLE` or `IMMUTABLE`, a -`ClassNotFoundException` results. The cause is PostgreSQL's normal treatment of -a `STABLE` or `IMMUTABLE` function, which relies on a snapshot from the start of -the `install_jar` query, when the jar was not yet installed. A workaround is to -ensure that the install actions cause each needed class to be loaded, such as -by calling a `VOLATILE` function it supplies, before calling one that is -`STABLE` or `IMMUTABLE`. (One could even write install actions to declare a -needed function `VOLATILE` before the first call and then redeclare it.) - -This issue should be resolved as part of a broader rework of class loading -in a future PL/Java release. - -$h4 Partial implementation of JDBC 4 and later - -The changes to make PL/Java build under Java SE 6 and later, with version 4.0 -and later of JDBC, involved providing the specified methods so -compilation would succeed, with real implementations for some, but for others -only stub versions that throw `SQLFeatureNotSupportedException` if used. -Regrettably, there is nothing in the documentation indicating which methods -have real implementations and which do not; to create such a list would require -an audit of that code. If a method throws the exception when you call it, it's -one of the unimplemented ones. - -Individual methods may be fleshed out with implementations as use cases arise -that demand them, but for a long-term roadmap, it seems more promising to -reduce the overhead of maintaining another JDBC implementation by sharing -code with `pgjdbc`, as has been [discussed on pljava-dev][jdbcinherit]. - -[jdbcinherit]: http://lists.pgfoundry.org/pipermail/pljava-dev/2015/002370.html - -$h4 Exception handling and logging - -PL/Java does interconvert between PostgreSQL and Java exceptions, but with -some loss of fidelity in the two directions. PL/Java code has some access -to most fields of a PostgreSQL error data structure, but only through -internal PL/Java API that is not expected to remain usable, and code written -for PL/Java has never quite had first-class standing in its ability to -_generate_ exceptions as information-rich as those from PostgreSQL itself. - -PL/Java in some cases generates the _categorized `SQLException`s_ introduced -with JDBC 4.0, and in other cases does not. - -This area may see considerable change in a future release. -[Thoughts on logging][tol] is a preview of some of the considerations. - -[tol]: https://github.com/tada/pljava/wiki/Thoughts-on-logging - -$h4 Types with type modifiers and `COPY` - -Although it is possible to create a PL/Java user-defined type that accepts -a type modifier (see the [example][typmex]), such a type will not yet be -handled by SQL `COPY` or any other operation that requires the `input` or -`receive` function to handle the modifier. This is left for a future release. +* Regularized spelling of `pg_type` OID symbols +* Changed TOAST pointer format supporting selectable TOAST compression methods +* Demise of `ErrorData.show_funcname` (it was a vestige of frontend/backend + protocol v2, long obsolete) $h3 Credits -PL/Java 1.5.0 owes its being to original creator Thomas Hallgren and -many contributors: +Thanks to Krzysztof Nienartowicz, `ricdhen`, and `JanaParthasarathy`, who +among them reported five of the issues fixed in this release. -Daniel Blanch Bataller, -Peter Brewer, -Frank Broda, -Chapman Flack, -Marty Frasier, -Bear Giles, -Christian Hammers, -Hal Hildebrand, -Robert M. Lefkowitz, -Eugenie V. Lyzenko, -Dos Moonen, -Asif Naeem, -Kenneth Olson, -Johann Oskarsson, -Thomas G. Peters, -Srivatsan Ramanujam, -Igal Sapir, -Jeff Shaw, -Rakesh Vidyadharan, -`grunjol`, -`mc-soi`. - -Periods in PL/Java's development have been sponsored by EnterpriseDB. - -In the 1.5.0 release cycle, multiple iterations of testing effort -have been generously contributed by Kilobe Systems and by Pegasystems, Inc. +$h2 PL/Java 1.6.2 -$h2 Earlier releases +This is the second minor update in the PL/Java 1.6 series, with two bugs fixed +(including one that was likely to be a blocker for many applications). It adds +a 'trial' security policy, useful when migrating code from PL/Java 1.5 to +identify permission grants that may be needed, and some minor but useful +example functionality. + +$h3 Changes + +$h4 Bug blocking use from an unprivileged PostgreSQL role fixed -$h3 PL/Java 1.4.3 (15 September 2011) +In 1.6.0 and 1.6.1, PL/Java could fail to start in a backend process if the +effective PostgreSQL role was not a superuser or a member of +`pg_read_all_settings`. That is fixed in this release. -Notable changes in this release: +$h4 Trial security policy for migrating code from PL/Java pre-1.6.0 -* Works with PostgreSQL 9.1 -* Correctly links against IBM Java. -* Reads microseconds correctly in timestamps. +When migrating code from pre-1.6.0 PL/Java versions, it may be necessary to +add permission grants to the PL/Java 1.6 security policy, which is distributed +as a minimal starting point. -Bugs fixed: +[Configuring permissions in PL/Java](use/policy.html) covers the topic in +general, and [Migrating to policy-based permissions](use/trial.html) covers +the new 'trial' policy introduced in 1.6.2 to simplify the process. -* [Be clear about not building with JDK 1.6][1010660] -* [Does not link with IBM VM][1010970] -* [SPIConnection.getMetaData() is incorrectly documented][1010971] -* [PL/Java 1.4.2 Does not build with x86_64-w64-mingw32][1011025] -* [PL/Java does not build with PostgreSQL 9.1][1011091] +$h4 Example functions using XSLT 1.0 polished for simpler use -Feature Requests: +Although 1.0 is obsolete and very limited compared to current versions of XSLT, +the implementation in Java has two attractive properties. First, it is +provided in the Java runtime with no need of a separate download such as Saxon, +and second, it allows use of any accessible Java methods or constructors from +XPath expressions, which can often make it more useful than the limitations +of strict XSLT 1.0 would suggest. -* [Allow pg_config to be set externally to the Makefile][1011092] -* [Add option to have pljava.so built with the runtime path of libjvm.so][1010955] +The [example functions][PassXML] `prepareXMLTransform`, +`prepareXMLTransformWithJava`, and `transformXML`, which demonstrate how such +functions can be coded, have also been given streamlined parameter lists with +defaults to make them simple to use for real work. -[1010660]: ${pgfbug}1010660 -[1010970]: ${pgfbug}1010970 -[1010971]: ${pgfbug}1010971 -[1011025]: ${pgfbug}1011025 -[1011091]: ${pgfbug}1011091 +One small but useful result is an easy way to indent XML for readability, +simply by passing a null transform name and `indent => true` to +`transformXML`. -[1011092]: ${pgffeat}1011092 -[1010955]: ${pgffeat}1010955 +$h3 Bugs fixed + +* [must be superuser or a member of `pg_read_all_settings`](${ghbug}331) +* [Operator annotation can reject explicit operand types in error](${ghbug}330) + +$h3 Credits + +Thanks to Francisco Biete for the report of [#331](${ghbug}331). + +[PassXML]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/PassXML.html#method-summary + +$h2 PL/Java 1.6.1 (16 November 2020) + +_Note: 1.6.1 was released with [a bug](${ghbug}331) likely to be a blocker +for many applications. It was fixed in 1.6.2._ + +This is the first minor update in the PL/Java 1.6 series, with two bugs fixed. +It also adds functionality in the SQL generator, allowing automated +declaration of new PostgreSQL aggregates, casts, and operators, and functions +with `OUT` parameters. + +$h3 Changes + +$h4 Limitations when built with Java 10 or 11 removed + +PL/Java can now be built with any Java 9 or later (latest tested is 15 at +time of writing), and the built artifact can use any Java 9 or later at +run time (as selected by the `pljava.libjvm_location` configuration variable). + +That was previously true when built with Java 9 or with Java 12 or later, but +not when built with 10 (would not run on 9) or with 11 (would not run on 9 +or 10). Those limits have been removed. + +$h4 Functions with `OUT` parameters + +PL/Java has long been able to declare a function that returns a composite +type (or a set of such), by returning a named composite PostgreSQL type, or +by being declared to return `RECORD`. + +The former approach requires separately declaring a new composite type to +PostgreSQL so it can be named as the function return. The `RECORD` approach +does not require pre-declaring a type, but requires every caller of the +function to supply a column definition list at the call site. + +Declaring the function [with `OUT` parameters][outprm] offers a middle ground, +where the function has a fixed composite return type with known member +names and types, callers do not need to supply a column definition list, +and no separate declaration of the type is needed. + +There is no change to how such a function is coded at the Java source level; +the new annotation element only changes the SQL generated to declare the +function to PostgreSQL. [Examples][outprmeg] are provided. + +$h4 Generation of aggregate, cast, and operator declarations + +The SQL generator now recognizes [`@Aggregate`][agganno], [`@Cast`][castanno], +and [`@Operator`][opranno] annotations, generating the corresponding SQL +deploy/undeploy scripts. Some examples (for [aggregates][aggeg], +[casts][casteg], and [operators][opreg]) are provided. The reduction +in boilerplate needed for a realistically-complete example can be seen +in [this comparison][bg160161] of Bear Giles's `pljava-udt-type-extension` +example; the two branches compared here are (1) using only the annotations +supported in PL/Java 1.6.0 and (2) using also the new support in 1.6.1. + +$h3 Bugs fixed + +* [1.6.0: opening a ResourceBundle (or a resource) fails](${ghbug}322) +* [Better workaround needed for javac 10 and 11 --release bug](${ghbug}328) + +[outprm]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html#annotation-interface-element-detail +[outprmeg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ReturnComposite.html#method-detail +[agganno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Aggregate.html +[castanno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Cast.html +[opranno]: pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Operator.html +[aggeg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Aggregates.html +[casteg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/IntWithMod.html +[opreg]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/ComplexScalar.html +[bg160161]: https://github.com/beargiles/pljava-udt-type-extension/compare/98f1a6e...jcflack:3e56056 + +$h3 Credits + +Thanks to Bear Giles for the `pljava-udt-type-extension` example, which not only +illustrates the SQL generation improvements in this release, but also exposed +both of the bugs fixed here. -$h3 PL/Java 1.4.2 (11 December 2010) +$h2 PL/Java 1.6.0 (18 October 2020) -Bugfixes: +_Note: 1.6.0 was released with [a bug](${ghbug}331) likely to be a blocker +for many applications. It was fixed in 1.6.2._ -* [Function returning complex objects with POD arrays cause a segfault][1010956] -* [Segfault when assigning an array to ResultSet column][1010953] -* [Embedded array support in returned complex objects][1010482] +This is the first release of a significantly refactored PL/Java 1.6 branch +with a number of new features and changes. It requires Java 9 or later at +build and run time, but retains the ability to run PL/Java application code +built for earlier versions. It should be used with PostgreSQL 9.5 or later. +For applications _requiring_ an older Java or PostgreSQL version, the latest +release in the PL/Java 1.5 line remains an option. -[1010956]: ${pgfbug}1010956 -[1010953]: ${pgfbug}1010953 -[1010482]: ${pgfbug}1010482 +**Note to package maintainers**: these release notes should be reviewed before +an installation moves to 1.6.0 from a 1.5 or earlier version, so it is best +packaged in a way that requires an affirmative choice to upgrade. -$h3 PL/Java 1.4.1 (9 December 2010) +$h3 Version compatibility + +PL/Java 1.6.0 can be built against recent PostgreSQL versions including 13, and +older ones back to 9.5, using Java SE 9 or later. The Java version used at +runtime does not have to be the same version used for building. PL/Java itself +can run on any Java version 9 or later if built with Java 9 or with 12 or later +(bugs in the Java 10 and 11 compilers prevent running on 9 if built with 10, +or on 9 or 10 if built with 11). PL/Java functions can be +written for, and use features of, whatever Java version will be loaded at run +time. See [version compatibility][versions] for more detail. + +When used with GraalVM as the runtime VM, PL/Java functions can use Graal's +"polyglot" capabilities to execute code in any other language available on +GraalVM. In this release, it is not yet possible to directly declare a function +in a language other than Java. + +If building with GraalVM, please add `-Dpolyglot.js.nashorn-compat=true` on +the `mvn` command line. + +$h3 Changes + +$h4 New configurable permissions may require configuration + +Prior to 1.6.0, PL/Java hard-coded the permissions that were available to +functions declared in the 'trusted' language `java` or the 'untrusted' language +`javaU`. With 1.6.0, the exact permissions available for both cases can be +configured in the `pljava.policy` file (found in the directory reported by +`pg_config --sysconfdir`) as described in the +[new policy documentation][policy]. + +Java's policy language can conditionally grant permissions but not deny them +if another clause grants them. Therefore, the default policy must be somewhat +restrictive, so a desired policy can be built from it with grant clauses. + +In the 1.6.0 default policy, 'trusted' (`java`) code has minimal permissions, +suitable for general computation and interacting with the database, and +'untrusted' (`javaU`) code has only the additional permission to access the +file system. Existing user functions that worked in PL/Java 1.5.x and performed +other actions, such as making network connections, will need the appropriate +permissions (such as `java.net.URLPermission` or `java.net.SocketPermission`) +granted via the policy file. + +The policy can grant permissions more selectively than just to `java` +or `javaU`. The [new documentation][policy] covers the details, and also how +to log, for troubleshooting purposes, the permissions being requested. + +Whatever the reason, all down the years, a favorite "is PL/Java working?" check +found online has been to read a Java system property with `System.getProperty`. +Not all of those examples pick properties that can be read under the default +policy. So, even some familiar habits like that may need revision, at least to +use a property like `java.version` that is readable by default. + +The former hard-coded permissions were by turns too lax or too strict, depending +on what was needed, and interfered in some cases with the operation of the Java +runtime itself, breaking (at least) its XSLT implementation and the profiling +functions of `visualvm`. This release fixes those issues. + +$h4 Validation at `CREATE FUNCTION` time may force changes to deployment procedures + +PL/Java can now detect problems with a function declaration, including missing +dependencies, at the time of `CREATE FUNCTION`, rather than allowing the +function to be created and reporting failure later when it is called. -Note: Does not compile with Java 6. Use JDK 1.5 or 1.4. +This change may have an impact on some established procedures. For example, +when installing a jar that contains deployment commands, deployment may +fail if another required jar has not been installed and added to the class +path first; in the past, the order did not matter. For details, see +[this section][linkage] in the documentation for the supplied examples, +and the description of `check_function_bodies` in the +[configuration variable reference](use/variables.html). -Compiles with PostgreSQL 8.4 and 9.0. +$h4 Java 9 module system; `pljava.classpath` -> `pljava.module_path` -Connection.getCatalog() has been implemented. +Because PL/Java itself is now modular code conforming to the module system +introduced with Java 9, one configuration variable has changed: +`pljava.classpath` is now `pljava.module_path`. -Bugfixes: +As before, its default value will be correct when PL/Java is installed to +the usual locations. It should be rare for any installation to have needed +to think about the old one, or to need to think about the new one. For a +rare installation that does, the details are [in the documentation][jpms]. -* [Compiling error with postgresql 8.4.1][1010759] -* [org.postgresql.pljava.internal.Portal leak][1010712] -* [build java code with debugging if server has debugging enabled][1010189] -* [Connection.getCatalog() returns null][1010653] -* [VM crash in TransactionListener][1010462] -* [Link against wrong library when compiling amd64 code on Solaris][1010954] - -[1010759]: ${pgfbug}1010759 -[1010712]: ${pgfbug}1010712 -[1010189]: ${pgfbug}1010189 -[1010653]: ${pgfbug}1010653 -[1010462]: ${pgfbug}1010462 -[1010954]: ${pgfbug}1010954 +In this release, user code is not treated as modular; the `SQLJ.INSTALL_JAR` +routine still treats its jars as unnamed-module code on a class path, as before. -Other commits: - -For a multi-threaded pljava function we need to adjust stack_base_ptr -before calling into the backend to avoid stack depth limit exceeded -errors. Previously this was done only on query execution, but we need -to do it on iteration of the ResultSet as well. +$h4 Improvements to the annotation-driven SQL generator -When creating a variable length data type, the code was directly -assigning the varlena header length rather than going through an -access macro. The header format changed for the 8.3 release and this -manual coding was not noticed and changed accordingly. Use -SET_VARSIZE to do this correctly. +$h5 Infers additional implicit ordering dependencies -Handle passed by value data types by reading and writing directly to -the Datum rather than dereferencing it. +The SQL generator can now respect the implicit ordering constraints among +user-defined types and functions that either use the types or are used in their +definitions, which can eliminate many `provides`/`requires` annotation elements +that had to be added by hand for PL/Java 1.5. The reduction in boilerplate +needed for a realistic example can be seen by comparing the annotated version +of Bear Giles's `pljava-udt-type-extension` example at +[this commit][udtd32f84e] (pre-1.6) and [this one][udt0066a1e] (1.6.0). -If the call to a type output function is the first pljava call in a -session, we get a crash. The first pljava call results in a SPI -connection being established and torn down. The type output function -was allocating the result in the SPI memory context which gets -destroyed prior to returning the data to the caller. Allocate the -result in the correct context to survive function exit. +$h5 Generates variadic function declarations -Clean up a warning about byteasend and bytearecv not having a -prototype when building against 9.0 as those declarations are now in a -new header file. +PL/Java 1.6 can declare functions that can be called from SQL with varying +numbers of arguments. [Example code][variadic] is provided. +$h4 Better support for PostgreSQL's `SQL_ASCII` encoding -$h3 PL/Java 1.4.0 (1 February 2008) +PostgreSQL's legacy `SQL_ASCII` encoding is difficult to use in Java because +128 of its code points have no defined mapping to Unicode, which Java uses. +The page on [database character set encodings][charsets] has a section +suggesting workable approaches if PL/Java is used in a database with that +encoding. A new addition among those options is a Java `Charset` supporting +the encoding names `X-PGSQL_ASCII` or `SQL_ASCII`, which maps the ASCII +characters as expected, and reversibly encodes the others using Unicode +permanently-undefined codepoints. -Warning! The recent postgresql security releases changed the API of a function -that PL/Java uses. The source can be built against either version, but the -binaries will only run against the version they were built against. The PL/Java -binaries for 1.4.0 have all been built against the latest server releases (which -you should be using anyway). If you are using an older you will have to build -from source. The binary releases support: 8.3 - All versions. 8.2 - 8.2.6 and -up. 8.1 - 8.1.11 and up. 8.0 - 8.0.15 and up. +$h4 Build system, continuous integration, quality assurance -$h3 PL/Java 1.3.0 (18 June 2006) +* The `nar-maven-plugin` formerly used in the build has been replaced with + a newly-developed Maven plugin tailored to PostgreSQL extension building. -This release is about type mapping and the creation of new types in PL/Java. An -extensive effort has gone into making the PL/Java type system extremely -flexible. Not only can you map arbitrary SQL data types to java classes. You can -also create new scalar types completely in Java. Read about the Changes in -version 1.3. - -$h4 Changes +* The new plugin respects the flags reported by `pg_config` when building + the native library. -* A much improved type mapping system that will allow you to: +* Building with the same flags used for PostgreSQL has eliminated the flood + of uninformative warnings that, in prior versions, made troubleshooting + actual build problems difficult. + +* Travis-CI and AppVeyor now regularly build and test PL/Java for + Linux (x86_64 and ppc64le), Mac OS, and Windows (using MSVC + and MinGW-w64), with results visible at GitHub. - * [Map any SQL type to a Java class][maptype] - * [Create a Scalar UDT in Java][scalarudt] - * [Map array and pseudo types][deftypemap] - -[maptype]: https://github.com/tada/pljava/wiki/Mapping-an-sql-type-to-a-java-class -[scalarudt]: https://github.com/tada/pljava/wiki/Creating-a-scalar-udt-in-java -[deftypemap]: https://github.com/tada/pljava/wiki/Default-type-mapping - -* Get the OID for a given relation ([feature request 1319][1319]) -* Jar manifest included in the SQLJ Jar repository - ([feature request 1525][1525]) - -$h4 Fixed bugs +* PL/Java's self-installer jar now includes utilities to simplify + integration testing, similar to the `PostgresNode` Perl module provided + with PostgreSQL. It is used in the Travis and AppVeyor builds to keep + platform-specific code to a minimum, and may be useful for other purposes. + Some [documentation](develop/node.html) is included. + +* Having fixed the permission issues that were breaking the profiling functions + of `visualvm`, it will be easier to incorporate profiling into future + development. + +$h3 Enhancement requests addressed + +* [Add regression testing](${ghbug}11) +* [`CFLAGS` from `pg_config` when building `pljava-so`](${ghbug}152) + +$h3 Bugs fixed + +* [`-Dpljava.libjvmlocation` breaks Windows build](${ghbug}190) +* [XML Schema regression-test failure in de_DE locale](${ghbug}312) + +$h3 Updated PostgreSQL APIs tracked + +* 64-bit `FuncCallContext.call_cntr` (`ResultSetProvider`/`ResultSetHandle` + can now return more than `INT_MAX` rows) + +$h3 Credits + +There is a PL/Java 1.6.0 thanks in part to +Christoph Berg, +Chapman Flack, +Kartik Ohri, +original creator Thomas Hallgren, +and the many contributors to earlier versions. + +The work of Kartik Ohri in summer 2020 on the build system renovation and +continuous integration was supported by Google Summer of Code. + +[policy]: use/policy.html +[linkage]: examples/examples.html#Exception_resolving_class_or_method_.28message_when_installing_examples.29 +[udtd32f84e]: https://github.com/jcflack/pljava-udt-type-extension/commit/d32f84e +[udt0066a1e]: https://github.com/jcflack/pljava-udt-type-extension/commit/0066a1e +[variadic]: pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Variadic.html#method-detail +[charsets]: use/charsets.html +[jpms]: use/jpms.html -* [Reconnect needed for jar manipulation to take effect][1531] -* [Backends hang with test suite][1504] -* [Keeps crashing while making a call to a function][1560] -* [Memory Leak in Statement.executeUpdate][1556] -* [jarowner incorrect after dump and reload][1506] -* [Missing JAR manifest][1525] -* [TZ adjustments for Date are incorrect][1547] -* [Functions returning sets leaks memory][1542] -* [drop lib prefix][1423] -* ["oid" column is not available in trigger's NEW/OLD ResultSet][1317] -* [fails to run with GCJ, too][1480] -* [Compile failure with 8.1.4][1558] -* [fails to build with GCJ][1479] -* [Record returning function cannot be called with different structures within one session][1440] -* [Cannot map function with complex return type to method that uses non primitive arguments][1551] -* [Get OID for given relation][1319] - -[1531]: ${gborgbug}1531 -[1504]: ${gborgbug}1504 -[1560]: ${gborgbug}1560 -[1556]: ${gborgbug}1556 -[1506]: ${gborgbug}1506 -[1525]: ${gborgbug}1525 -[1547]: ${gborgbug}1547 -[1542]: ${gborgbug}1542 -[1423]: ${gborgbug}1423 -[1317]: ${gborgbug}1317 -[1480]: ${gborgbug}1480 -[1558]: ${gborgbug}1558 -[1479]: ${gborgbug}1479 -[1440]: ${gborgbug}1440 -[1551]: ${gborgbug}1551 -[1319]: ${gborgbug}1319 - -$h3 PL/Java 1.2.0 (20 Nov 2005) - -The PL/Java 1.2.0 release is primarily targeted at the new PostgreSQL 8.1 but -full support for 8.0.x is maintained. New features include support IN/OUT -parameters, improved meta-data handling, and better memory management. - -$h3 PL/Java 1.1.0 (14 Apr 2005) - -PL/Java 1.1.0 includes a lot of new features such as `DatabaseMetaData`, -`ResultSetMetaData`, language handlers for both trusted and untrusted language, -additional semantics for functions returning `SETOF`, and simple ObjectPooling. - -$h3 PL/Java 1.0.1 (07 Feb 2005) - -This release resolves a couple of important security issues. The most important -one is perhaps that PL/Java now is a trusted language. See [Security][] for more -info. Filip Hrbek, now member of the PL/Java project, contributed what was -needed to make this happen. - -[Security]: https://github.com/tada/pljava/wiki/Security - -$h3 PL/Java 1.0.0 (23 Jan 2005) - -Today, after a long period of fine tuning, PL/Java 1.0.0 was finally released. +$h2 [Releases prior to PL/Java 1.6.0](releasenotes-pre1_6.html) diff --git a/src/site/markdown/use/catch.md b/src/site/markdown/use/catch.md new file mode 100644 index 00000000..b4754c1a --- /dev/null +++ b/src/site/markdown/use/catch.md @@ -0,0 +1,132 @@ +# Catching PostgreSQL exceptions in Java + +When your Java code calls into PostgreSQL to do database operations, +a PostgreSQL error may result. It gets converted into a special subclass +of `SQLException` that (internally to PL/Java) retains all the elements +of the PostgreSQL error report. If your Java code does not catch this exception +and it propagates all the way out of your function, it gets turned back into +the original error report and is handled by PostgreSQL in the usual way. + +Your Java code can also catch this exception in any `catch` block that +covers `SQLException`. After catching one, there are two legitimate things +your Java code can do with it: + +0. Perform some cleanup as needed and rethrow it, or construct some other, + more-descriptive or higher-level exception and throw that, so that the + exception continues to propagate and your code returns exceptionally + to PostgreSQL. + +0. Roll back to a previously-established `Savepoint`, perform any other + recovery actions needed, and continue processing, without throwing or + rethrowing anything. + +If your code catches a PostgreSQL exception, and continues without rethrowing +it or throwing a new one, and also without rolling back to a prior `Savepoint`, +that is a bug. Without rolling back, the current PostgreSQL transaction is +spoiled and any later calls your Java function tries to make into PostgreSQL +will throw their own exceptions because of that. Historically, such bugs have +been challenging to track down, as you may end up only seeing a later exception +having nothing at all to do with the one that was originally mishandled, +which you never see. + +## Tips for debugging mishandled exceptions + +Some features arriving in PL/Java 1.6.10 simplify debugging code that catches +but mishandles exceptions. + +### More-informative in-failed-transaction exception + +First, the exception that results when a call into PostgreSQL fails because of +an earlier mishandled exception has been made more informative. It has an +`SQLState` of `25P02` (PostgreSQL's "in failed SQL transaction" code), and its +`getCause` method actually returns the unrelated earlier exception that was +mishandled (and so, in that sense, really is the original 'cause'). Java code +that catches this exception can use `getStackTrace` to examine its stack +trace, or call `getCause` and examine the stack trace of the earlier exception. +The stack trace of the failed-transaction exception shows the context of the +later call that failed because of the earlier mishandling, and the stack trace +of the 'cause' shows the context of the original mishandled problem. + +Note, however, that while your code may mishandle an exception, the next call +into PostgreSQL that is going to fail as a result might not be made from your +code at all. It could, for example, happen in PL/Java's class loader and appear +to your code as an unexplained `ClassNotFoundException`. The failed-transaction +`SQLException` and its cause should often be retrievable from the `cause` chain +of whatever exception you get, but could require following multiple `cause` +links. + +### Additional logging + +Additionally, there is logging that can assist with debugging when it isn't +practical to add to your Java code or run with a debugger to catch and examine +exceptions. + +When your Java function returns to PostgreSQL, normally or exceptionally, +PL/Java checks whether there was any PostgreSQL error raised during your +function's execution but not resolved by rolling back to a savepoint. + +If there was, the logging depends on whether your function is returning normally +or exceptionally. + +#### If your function has returned normally + +If a PostgreSQL error was raised, and was not resolved by rolling back to +a savepoint, and your function is making a normal non-exception return, then, +technically, your function has mishandled that exception. The mishandling may be +more benign (your function made no later attempts to call into PostgreSQL that +failed because of it) or less benign (if one or more later calls did get made +and failed). In either case, an exception stack trace will be logged, but the +log level will differ. + +_Note: "More benign" still does not mean "benign". Such code may be the cause +of puzzling PostgreSQL warnings about active snapshots or unclosed resources, +or it may produce no visible symptoms, but it is buggy and should be found and +fixed._ + +In the more-benign case, it is possible that your code has long been mishandling +that exception without a problem being noticed, and it might not be desirable +for new logging added in PL/Java 1.6.10 to create a lot of new log traffic about +it. Therefore, the stack trace will be logged at `DEBUG1` level. You can use +`SET log_min_messages TO DEBUG1` to see any such stack traces. + +In the less-benign case, the mishandling is likely to be causing some problem, +and the stack trace will be logged at `WARNING` level, and so will appear in the +log unless you have configured warnings not to be logged. The first +in-failed-transaction exception is the one whose stack trace will be logged, and +that stack trace will include `Caused by:` and the original mishandled exception +with its own stack trace. + +#### If your function has returned exceptionally + +If a PostgreSQL error was raised and your function is returning +exceptionally, then there may have been no mishandling at all. The exception +emerging from your function may be the original PostgreSQL exception, +or a higher-level one your code constructed around it. That would be normal, +non-buggy behavior. + +It is also possible, though, that your code could have caught a PostgreSQL +exception, mishandled it, and later returned exceptionally on account of some +other, even unrelated, exception. PL/Java has no way to tell the difference, +so it will log the PostgreSQL exception in this case too, but only at `DEBUG2` +level. + +PL/Java's already existing pre-1.6.10 practice is to log an exception stack +trace at `DEBUG1` level any time your function returns exceptionally. Simply +by setting `log_level` to `DEBUG1`, then, you can see the stack trace of +whatever exception caused the exceptional return of your function. If that +exception was a direct result of the original PostgreSQL exception or of a later +in-failed-transaction exception, then the `cause` chain in its stack trace +should have all the information you need. + +If, on the other hand, the exception causing your function's exceptional return +is unrelated and its `cause` chain does not include that information, then by +bumping the log level to `DEBUG2` you can ensure the mishandled exception's +stack trace also is logged. + +### Example + +PL/Java's supplied examples include a [`MishandledExceptions`][] class creating +a `mishandle` function that can be used to demonstrate the effects of +mishandling and what is visble at different logging levels. + +[`MishandledExceptions`]: ../pljava-examples/apidocs/org/postgresql/pljava/example/annotation/MishandledExceptions.html#method-detail diff --git a/src/site/markdown/use/charsets.md b/src/site/markdown/use/charsets.md index f0f41cb5..1b873e0a 100644 --- a/src/site/markdown/use/charsets.md +++ b/src/site/markdown/use/charsets.md @@ -39,11 +39,26 @@ The encoding `SQL_ASCII`, as described [in the PostgreSQL documentation][mbc], ### Using PL/Java with server encoding `SQL_ASCII` -When the server encoding is `SQL_ASCII`, the only safe way for PL/Java to treat -it is as strict ASCII, throwing exceptions if asked to convert any string -involving characters outside of ASCII. - -If PL/Java must be used with `SQL_ASCII` as the server encoding, the cases are +Java strings are Unicode by definition; PL/Java must not create strings where +some of the characters have their Unicode meanings while others mean something +else. PL/Java does supply a `Charset` with encoder and decoder for `SQL_ASCII`, +which behaves as follows: + +* Encoded bytes in the ASCII range map to the corresponding Unicode characters. +* Other encoded bytes are stuffed, two `char`s for each byte, into a range of + codepoints Unicode defines as permanently unassigned. For those codepoints, + `Character.getType` returns `UNASSIGNED`, `Character.getName` returns null, + `Character.UnicodeScript.of` returns `UNKNOWN`, and they will not match + patterns for letters, digits, punctuation, or generally anything else + interesting (other than `\p{Cn}`, the exact test for noncharacters). + +The mapping is transparently reversed when such a Java string is returned +to PostgreSQL. With this convention, Java code can work usefully with +`SQL_ASCII` encoded data, matching and manipulating the ASCII parts, while +treating the non-defined subsequences as opaque and returning them to PostgreSQL +unchanged. + +If PL/Java is used with `SQL_ASCII` as the server encoding, the cases are (by increasing complexity): 0. The database contains no non-ASCII data (or none that will be touched @@ -52,15 +67,19 @@ If PL/Java must be used with `SQL_ASCII` as the server encoding, the cases are 0. The database contains non-ASCII data all known to be in one standard encoding. It would be simplest for the database to be recreated with this encoding selected, but that may be impractical for various reasons. - In that case, this can be handled in the same way as the next case. + In that case, this can be handled in the same way as the next case, or + PL/Java can be 'lied to' about the server encoding by including a + `-Dorg.postgresql.server.encoding=...` in `pljava.vmoptions` that names + the known correct encoding instead. 0. The database contains non-ASCII data in _more than one_ encoding, with the application somehow knowing which encoding is used where. That is completely possible because `SQL_ASCII` does not guarantee or validate anything (which means it can also happen over time without being intended). - The rigorous approach in this case is to write Java code expecting and - returning `bytea` and some indication of the encoding to be used, and - perform explicit conversions. + Java code can find regions of strings that match the pattern + `(?:[\ufdd8-\ufddf][\ufde0-\ufdef])++` and pass those regions back through + the `SQL_ASCII` encoder, and then the decoder for whatever other encoding + it determines should apply. ## Using PL/Java with standard (not `SQL_ASCII`) encodings other than `UTF8` @@ -82,6 +101,8 @@ except `UTF8`: > The Unicode escape syntax works only when the server encoding is UTF8. > When other server encodings are used, only code points in the ASCII range > (up to \007F) can be specified. + + PostgreSQL 13 eliminates this limitation. * The [`ascii` and `chr` functions][acfns] behave two different ways, depending on whether the server encoding is *a single-byte encoding*, or *any multi-byte encoding other than `UTF8`*. diff --git a/src/site/markdown/use/datetime.md b/src/site/markdown/use/datetime.md new file mode 100644 index 00000000..6a4f0481 --- /dev/null +++ b/src/site/markdown/use/datetime.md @@ -0,0 +1,127 @@ +# Mapping between PostgreSQL and Java date/time types + +## Legacy JDBC mappings + +The first mappings to be specified in JDBC used the JDBC-specific classes +`java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp`, all of which +are based on `java.util.Date` (but only as an implementation detail; they +should always be treated as their own types and not as instances of +`java.util.Date`). + +PL/Java function parameters and returns can be declared in Java to have those +types, objects of those types can be passed to `PreparedStatement.setObject`, +`ResultSet.updateObject`, and `SQLOutput.writeObject` methods, as well as to +the methods that are specific to those types. The JDBC `getObject` and +`readObject` methods that do not take a `Class` parameter will return +objects of those types when retrieving PostgreSQL date or time values. + +### Shortcomings + +Those classes have never been a good representation for PostgreSQL date/time +values, because they are based on `java.util.Date`, which implies knowledge of +a time zone, even when they are used to represent PostgreSQL values with no time +zone at all. For all of these conversions but one, PL/Java must do time zone +computations, with the one exception being, unintuitively, `timestamp with time +zone`. The conversions of non-zoned values involve a hidden dependency on the +PostgreSQL session's current setting of `TimeZone`, which can vary from session +to session at the connecting client's preference. + +There are known issues of long standing in PL/Java's conversions to and from +these types, detailed in [issue #200][issue200]. While these particular issues +are expected to be fixed in a future PL/Java release, the Java 8 / JDBC 4.2 +mappings described next are the strongly-recommended alternative to the legacy +mappings, avoiding these issues entirely. + +[issue200]: https://github.com/tada/pljava/issues/200 + +## Java 8 / JDBC 4.2 date/time mappings + +Java 8 introduced the much improved set of date/time classes in the `java.time` +package specified by [JSR 310][jsr310]. JDBC 4.2 (the version in Java 8) +allows those as alternate Java class mappings of the SQL types `date`, +`time` (with and without timezone), and `timestamp` (with/without timezone). +These new types are a much better fit to the corresponding PostgreSQL types than +the original JDBC `java.sql` `Date`/`Time`/`Timestamp` classes. + +To avoid a breaking change, JDBC 4.2 does not modify what any of the +pre-existing JDBC API does by default. The `getDate`, `getTime`, and +`getTimestamp` methods on a `ResultSet` still return the same `java.sql` types, +and so does `getObject` in the form that does not specify a class. Instead, the +update takes advantage of the general purpose `ResultSet.getObject` methods that +take a `Class` parameter (added in JDBC 4.1), and likewise the +`SQLInput.readObject` method with a `Class` parameter (overlooked in 4.1 but +added in 4.2), so a caller can request a `java.time` class by passing the right +`Class`: + +| PostgreSQL type | Pass to `getObject`/`readObject` | +|--:|:--| +|`date`|`java.time.LocalDate.class`| +|`time without time zone`|`java.time.LocalTime.class`| +|`time with time zone`|`java.time.OffsetTime.class`| +|`timestamp without time zone`|`java.time.LocalDateTime.class`| +|`timestamp with time zone`|`java.time.OffsetDateTime.class`| +[Correspondence of PostgreSQL date/time types and Java 8 `java.time` classes] + +The `java.time` types can also be used as parameter and return types of PL/Java +functions without special effort (the generated function declarations will make +the right conversions happen), and passed to the setter methods of prepared +statements, writable result sets (for triggers or composite-returning +functions), and `SQLOutput` for UDTs. + +Conversions to and from these types never involve the PostgreSQL session time +zone, which can vary from session to session. Any code developed for PL/Java +and Java 8 or newer is strongly encouraged to use these types for date/time +manipulations, for their much better fit to the PostgreSQL types. + +PostgreSQL accepts 24:00:00.000000 as a valid time, while a day for +`LocalTime` or `OffsetTime` maxes out at the preceding nanosecond. That is +still a distinguishable value (as the PostgreSQL resolution is only to +microseconds), so the PostgreSQL 24 value is bidirectionally mapped to that. + +### Mapping of time and timestamp with time zone + +When a `time with time zone` is mapped to a `java.time.OffsetTime`, the Java +value will have a zone offset equal to the one assigned to the value in +PostgreSQL, and so in the reverse direction. + +When a `timestamp with time zone` is mapped to a `java.time.OffsetDateTime`, +the Java value will always have a zone offset of zero (UTC). When an +`OffsetDateTime` created in Java is mapped to a PostgreSQL +`timestamp with time zone`, if its offset is not zero, the value adjusted to UTC +is used. + +These different behaviors accurately reflect how PostgreSQL treats +the two types differently. + +### Infinite dates and timestamps + +PostgreSQL allows `date` and `timestamp` (with or without time zone) values of +`infinity` and `-infinity`. + +There is no such notion in the corresponding Java classes (the original JDBC +ones or the JDBC 4.2 / JSR 310 ones), but PL/Java will map those PostgreSQL +values repeatably to certain values of the Java classes, and will map Java +objects with those exact values back to PostgreSQL `infinity` or `-infinity` +on the return trip. Java code that needs to recognize those values could do +an initial query returning `infinity` and `-infinity` and save the resulting +Java values to compare others against. It must compare with `equals()`; it +cannot assume that the mapping will produce the very same Java objects +repeatedly, but only objects with equal values. + +When dates and timestamps are mapped to the `java.time` classes, +the mapping will have +the useful property that `-infinity` really is earlier than other +PostgreSQL-representable values, and `infinity` really is later. That does not +hold under the old `java.sql.Timestamp` mapping, where both values will be +distant from the present but not further specified. + +#### Infinite timestamps without `integer_datetimes` + +In PostgreSQL builds with `integer_datetimes` as `off` (a configuration that is +non-default since PostgreSQL 8.4, and impossible since PG 10), an error results +if a timestamp being converted to Java has either infinite value. As uses of +infinite timestamps are probably rare and the configuration is long out of use, +there is no plan to lift this limitation unless an issue is opened to address a +practical need. + +[jsr310]: https://www.threeten.org/ diff --git a/src/site/markdown/use/hello.md.vm b/src/site/markdown/use/hello.md.vm index 4170a7d9..9731671d 100644 --- a/src/site/markdown/use/hello.md.vm +++ b/src/site/markdown/use/hello.md.vm @@ -116,10 +116,18 @@ individual projects with shorter `pom.xml` files naming this as the parent. + + org.apache.maven.plugins + maven-compiler-plugin + 3.10.1 + + 9 + + org.apache.maven.plugins maven-jar-plugin - 2.6 + 3.3.0 @@ -286,7 +294,7 @@ The [@Function annotation][funcanno] declares that the `hello` function should be available from SQL, so a `pljava.ddr` file will be added to the jar, containing the SQL commands to make that happen. -[funcanno]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/annotation/Function.html +[funcanno]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html One more try with `mvn clean package` and there it is: @@ -314,16 +322,16 @@ SQLActions[]={ "BEGIN INSTALL BEGIN PostgreSQL CREATE OR REPLACE FUNCTION hello( - toWhom varchar) - RETURNS varchar + toWhom pg_catalog.varchar) + RETURNS pg_catalog.varchar LANGUAGE java VOLATILE - AS 'com.example.proj.Hello.hello(java.lang.String)' + AS 'java.lang.String=com.example.proj.Hello.hello(java.lang.String)' END PostgreSQL; END INSTALL", "BEGIN REMOVE BEGIN PostgreSQL DROP FUNCTION hello( - toWhom varchar) + toWhom pg_catalog.varchar) END PostgreSQL; END REMOVE" } @@ -455,6 +463,6 @@ From here, consider: * The user guide pages [on the wiki][uwik] * The many pre-built [examples][] -[pljapi]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/package-summary.html#package_description +[pljapi]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/package-summary.html#package-description [uwik]: https://github.com/tada/pljava/wiki/User-guide [examples]: ../examples/examples.html diff --git a/src/site/markdown/use/jpms.md b/src/site/markdown/use/jpms.md new file mode 100644 index 00000000..4770e06a --- /dev/null +++ b/src/site/markdown/use/jpms.md @@ -0,0 +1,164 @@ +# PL/Java and the Java Platform Module System + +Java 9 introduced the [Java Platform Module System][jpms] (JPMS), allowing +Java code to be strongly encapsulated into modules that declare their +dependency relationships to other modules explicitly. + +One consequence is that where a pre-Java-9 Java runtime would have +a "class path", a Java-9-or-later runtime has a "module path" as well as +a legacy class path. + +Pre-1.6.0 releases of PL/Java were not modularized, and can be used on +older Java runtimes. They will also run on Java 9 and later, but still only +as non-modular code. The Java runtime is launched with a class path that +includes a PL/Java jar and usually nothing else. That single jar includes +both API and implementation classes. + +PL/Java 1.6.0 or later is structured as an API module (named +`org.postgresql.pljava`) and an implementation module (named +`org.postgresql.pljava.internal`) supplied in two distinct jar files. +The Java runtime is launched with a *module path*, not a class path, +that names both the implementation and the API jar, and usually nothing else. +For a class path, the runtime is launched by default with none at all. + +User code developed to run in PL/Java is normally installed via SQL +calling the `sqlj.install_jar` and `sqlj.set_classpath` functions, and these +mechanisms are independent of the Java runtime's class path at launch. + +In Java 9 and later, both a module path and a class path are supported so that +newer, modularized code and legacy, non-modular code can interoperate, and +legacy code can be migrated over time: + +* A jar file that embodies an explicit named module should be placed on + the module path. It participates fully in the Java module system, and + its access to other modules will be determined by its explicit + `requires`/`exports`/`uses`/`provides`/`opens` relationships. + +* A jar file containing legacy, non-modular code should be placed on the + class path, and is treated as part of an unnamed module that has access + to the exports and opens of all other _readable_ modules. Such code will, + therefore, continue to work as it did before Java 9, provided the needed + modules are _readable_, as explained below. (Even a jar containing Java 9+ + modular code will be treated this way, if found on the class path rather + than the module path.) + +* A jar file can be placed on the module path even if it does not contain + an explicit named module. In that case, it becomes an "automatic" module, + with a name derived from the jar file name, or a better name can be + specified in its manifest. It will continue to have access to the + exports of any other modules, like an unnamed module, but other modules + will be able to declare dependencies on it by name. This can be a useful + intermediate step in migrating legacy code to modules. + +As of PL/Java 1.6, while PL/Java's own implementation is modularized, it +implements a version of the ISO SQL Java Routines and Types specification +that does not include Java module system concepts. Its `sqlj.set_classpath` +function manipulates an internal class path, not a module path, and a jar +installed with `sqlj.install_jar` behaves as legacy code in an unnamed module. + +## Readable versus observable modules + +Using Java's terminology, modules that can be found on the module path are +_observable_. Not all of those are automatically _readable_; the _readable_ +ones in a JVM instance are initially those encountered, at JVM start-up, in +the "recursive enumeration" step of [module resolution][resolution]. + +Recursive enumeration begins with some root modules, and proceeds until all of +the modules on which they (transitively) depend have been added to the readable +set. When PL/Java is launched in a session, PL/Java's own module is a root, +and so the readable modules will include those PL/Java itself depends on, +such as `java.base`, `java.sql`, and the other modules `java.sql` names with +`requires transitive` directives. + +Those modules may be enough for many uses of PL/Java. However, if code for use +in PL/Java will refer to other modules, +[`--add-modules` in `pljava.vmoptions`][addm] can be used to add more roots. +Because of recursive enumeration, it is enough to add just one module, or a few +modules, whose dependencies recursively cover whatever modules will be needed. + +At one extreme for convenience, Java provides a module, `java.se`, that simply +declares dependencies on the other modules that make up the full Java SE API. +Therefore, `--add-modules=java.se` will ensure that any PL/Java code is able to +refer to any of the Java SE API. However, PL/Java instances may use less memory +and start up more quickly if an effort is made to add only modules actually +needed. + +### Limiting the module graph + +Less conveniently perhaps, but advantageously for memory footprint and quick +startup, the [`--limit-modules`][limitmods] option can be used. As of this +writing in early 2025, starting up a simple PL/Java installation on Java 24 +with no `--add-modules` option results in 48 modules resolved, and the 48 +include some unlikely choices for PL/Java purposes, such as `java.desktop`, +`jdk.unsupported.desktop`, `jdk.javadoc`, and others. + +With the option `--limit-modules=org.postgresql.pljava.internal` added, only +nine modules are resolved---the transitive closure of those required by PL/Java +itself---and all of PL/Java's supplied examples successfully run. + +The `--add-modules` option can then be used to make any other actually-needed +modules available again. Those named with `--add-modules` are implicitly added +to those named with `--limit-modules`, so there is no need to change the +`--limit-modules` setting when adding another module. For example, + +``` +--limit-modules=org.postgresql.pljava.internal --add-modules=java.net.http +``` + +will allow use of `java.net.http` in addition to the nine modules resolved for +PL/Java itself. + +Limiting the module graph can be especially advisable when running PL/Java with +no security policy enforcement, as required on stock Java 24 and later. The page +[PL/Java with no policy enforcement][unenforced] should be carefully reviewed +for other implications of running PL/Java that way. + +The supplied [examples jar][examples] provides a function, [java_modules][], +that can be used to see what modules have been resolved into Java's boot module +layer. + +For more detail on why the boot layer includes the modules it does, +`-Djdk.module.showModuleResolution=true` can be added temporarily in +`pljava.vmoptions`, and a log of module requirements and bindings will be sent +to the standard output of the backend process when PL/Java starts. PostgreSQL, +however, may normally start backend processes with standard output going +nowhere, so the logged information may be invisible unless running PostgreSQL +in [a test harness][node]. + +## Configuring the launch-time module path + +The configuration variable `pljava.module_path` controls the +module path used to launch the Java runtime. Its default is constructed to +include the expected locations of PL/Java's own implementation and API jars, +and nothing else, so there is typically no need to set it explicitly +unless those jars have been installed at unusual locations. +Its syntax is simply the pathnames of the jar files, separated by the +correct path separator character for the platform (often a colon, or a +semicolon on Windows). + +There may at times be a reason to place additional modular jars on this +module path. Whenever it is explicitly set, it must still include the +correct locations of the PL/Java implementation and API jars. + +## Configuring the launch-time class path + +The launch-time class path has an empty default, which means (because +PL/Java has a main module) that there is no class path. It does *not* +default to finding class files in the backend's current directory (which, +in a pre-Java-9 runtime, is what an empty class path would mean). + +There may at times be a reason to place some jar or jars on the launch-time +class path, rather than installing them in SQL with `sqlj.install_jar` in +the usual way. It can be set by adding a `-Djava.class.path=...` in the +`pljava.vmoptions` configuration variable. Like the module path, its syntax +is simply the jar file pathnames, separated by the platform's path separator +character. + +[jpms]: https://cr.openjdk.java.net/~mr/jigsaw/spec/ +[resolution]: https://docs.oracle.com/javase/9/docs/api/java/lang/module/package-summary.html#resolution +[addm]: ../install/vmoptions.html#Adding_to_the_set_of_readable_modules +[limitmods]: https://openjdk.org/jeps/261#Limiting-the-observable-modules +[unenforced]: unenforced.html +[examples]: ../examples/examples.html +[java_modules]: ../pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Modules.html#method-detail +[node]: ../develop/node.html diff --git a/src/site/markdown/use/parallel.md b/src/site/markdown/use/parallel.md new file mode 100644 index 00000000..11524d89 --- /dev/null +++ b/src/site/markdown/use/parallel.md @@ -0,0 +1,81 @@ +# PL/Java in parallel query or background worker + +With some restrictions, PL/Java can be used in [parallel queries][parq], from +PostgreSQL 9.6, and in some [background worker processes][bgworker]. + +[bgworker]: https://www.postgresql.org/docs/current/static/bgworker.html +[parq]: https://www.postgresql.org/docs/current/static/parallel-query.html + +## Background worker processes + +Because PL/Java requires access to a database containing the `sqlj` schema, +PL/Java is only usable in a worker process that initializes a database +connection, which must happen before the first use of any function that +depends on PL/Java. + +## Parallel queries + +Like any user-defined function, a PL/Java function can be +[annotated][paranno] with a level of "parallel safety", `UNSAFE` by default. + +When a function labeled `UNSAFE` is used in a query, the query cannot be +parallelized at all. If a query contains a function labeled `RESTRICTED`, parts +of the query may execute in parallel, but the part that calls the `RESTRICTED` +function will be executed only in the lead process. A function labeled `SAFE` +may be executed in every process participating in the query. + +[paranno]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/Function.html#parallel + +### Parallel setup cost + +PostgreSQL parallel query processing uses multiple operating-system processes, +and these processes are new for each parallel query. If a PL/Java function is +labeled `PARALLEL SAFE` and is pushed by the query planner to run in the +parallel worker processes, each new process will start a Java virtual machine. +The cost of doing so will reduce the expected advantage of parallel execution. + +To inform the query planner of this trade-off, the value of the PostgreSQL +configuration variable [`parallel_setup_cost`][parsetcost] should be increased. +The startup cost can be minimized with attention to the +[PL/Java VM option recommendations][vmopt], including class data sharing. + +[parsetcost]: https://www.postgresql.org/docs/current/static/runtime-config-query.html#GUC-PARALLEL-SETUP-COST +[vmopt]: ../install/vmoptions.html + +### Limits on `RESTRICTED`/`SAFE` function behavior + +There are stringent limits on what a function labeled `RESTRICTED` may do, +and even more stringent limits on what may be done in a function labeled `SAFE`. +The PostgreSQL manual describes the limits in the section +[Parallel Labeling for Functions and Aggregates][parlab]. + +[parlab]: https://www.postgresql.org/docs/current/static/parallel-safety.html#PARALLEL-LABELING + +While PostgreSQL does check for some inappropriate operations from a +`PARALLEL SAFE` or `RESTRICTED` function, for the most part it relies on +functions being labeled correctly. When in doubt, the conservative approach +is to label a function `UNSAFE`, which can't go wrong. A function mistakenly +labeled `RESTRICTED` or `SAFE` could produce unpredictable results. + +#### Internal workings of PL/Java + +While a given PL/Java function itself may clearly qualify as `RESTRICTED` or +`SAFE` by inspection, there may still be cases where a forbidden operation +results from the internal workings of PL/Java itself. This has not been seen +in testing (simple parallel queries with `RESTRICTED` or `SAFE` PL/Java +functions work fine), but to rule out the possibility would require a careful +audit of PL/Java's code. Until then, it would be prudent for any application +involving parallel query with `RESTRICTED` or `SAFE` PL/Java functions +to be first tested in a non-production environment. + +### Further reading + +A [Parallel query and PL/Java][pqwiki] page on the PL/Java wiki is provided +to collect experience and tips regarding this significant new capability +that may be gathered in between updates to this documentation. + +[README.parallel][rmp] in the PostgreSQL source, for more detail on why parallel +query works the way it does. + +[rmp]: https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/access/transam/README.parallel +[pqwiki]: https://github.com/tada/pljava/wiki/Parallel-query-and-PLJava diff --git a/src/site/markdown/use/policy.md b/src/site/markdown/use/policy.md new file mode 100644 index 00000000..e71c4640 --- /dev/null +++ b/src/site/markdown/use/policy.md @@ -0,0 +1,407 @@ +# Configuring permissions in PL/Java + +This page describes how PL/Java operates when enforcing security policy, +available when using Java 23 or earlier. + +When using PL/Java with stock Java 24 or later, please see instead the +[PL/Java without policy enforcement][unenforced] page. + +To operate with policy enforcement as described here, no special configuration +is needed on Java 17 and earlier, while on Java 18 through 23, an entry +`-Djava.security.manager=allow` in [`pljava.vmoptions`][confvar] must be present +for PL/Java to start. For just how to configure specific Java versions, see +[Available policy-enforcement settings by Java version][smprop]. + +## `TRUSTED` (and untrusted) procedural languages + +PostgreSQL allows a procedural language to be installed with or without +the designation `TRUSTED`. For a language designated `TRUSTED`, functions +can be created in that language by any user (PostgreSQL role) with `USAGE` +permission on that language, as configured with the SQL commands +`GRANT USAGE ON LANGUAGE ...` and `REVOKE USAGE ON LANGUAGE ...`. For a +language that is _not_ designated `TRUSTED`, only a database superuser +may create functions that use it. No `USAGE` permission can be granted on it. + +In either case, once any function has been created, that function may +be executed by any user/role granted `EXECUTE` permission on the function +itself; a language's `USAGE` privilege (or superuser status, if the language +is not `TRUSTED`) is only needed to create a function that uses the language. + +Because PL functions execute in the database server, a general-purpose +programming language with no restrictions on access to its containing process +or the server file system may be used for actions or access that PostgreSQL +would normally not permit. A superuser can implement such a function using +a non-`TRUSTED` PL, and design the function to enforce its own limits and +be safe for use by whatever roles will be granted `EXECUTE` permission on it. + +A `TRUSTED` PL is expected to enforce appropriate restrictions so that +non-superusers can be allowed to use it to create functions on their own, +while still subject to PostgreSQL's normal protections. + +Both kinds have their uses, and many of the available PLs, including PL/Java, +install two similarly-named 'languages' to permit both. Although either can be +renamed, a normal installation of PL/Java will create the language `java` with +the `TRUSTED` property, and `javaU` without it. + +*Note: like any SQL identifier, these language names are case-insensitive +when not quoted, and are stored in lowercase in PostgreSQL. The spelling with +capital `U` for untrusted is a common convention.* + +### `TRUSTED`/untrusted versus sandboxed/unsandboxed + +In various places in PL/Java's API, and in the sections below, the words +'sandboxed' or 'unsandboxed' are used in place of the PostgreSQL `TRUSTED` or +untrusted, respectively. That choice reflects a little trick of language some +readers may notice when new to PostgreSQL: it is about equally easy to read +'trusted'/'untrusted' in two opposite ways. (Is this language trusted because of +how tightly I restrict it? Or do I restrict it less tightly because I trust it? +Is it like a teenager with the car keys?) Old hands at PostgreSQL know which +reading is correct, but because some users of PL/Java may be old hands at Java +and newcomers to PostgreSQL, it seems safer for PL/Java to use terms that +should give the right idea to readers in both groups. + +## Permissions available in sandboxed/unsandboxed PL/Java + +Most PLs that offer both variants, including PL/Java before 1.6, hardcode +the differences between what a function in each language is allowed to do. +The sandboxed language would apply a fixed set of limitations, such as +forbidding access to the server's file system, and those limits were +not adjustable. + +A needed function that would only access one, specific, known-safe file, or +perhaps would need no file access but have to make a network connection to one +known server, might _almost_ be written under those predetermined restrictions, +but that wouldn't count. It would simply have to be created for the unsandboxed +language instead, and written defensively against a much wider range of possible +misuses or mistakes. + +Beginning with 1.6, PL/Java takes a more configurable approach. Using the Java +[policy file syntax][pfsyn], any of the permissions known to the JDK can +be granted to chosen Java code. The default policy file installed with PL/Java +includes these lines: + +``` +grant principal org.postgresql.pljava.PLPrincipal$Sandboxed * { +}; + +grant principal org.postgresql.pljava.PLPrincipal$Unsandboxed * { + + // Java does not circumvent operating system access controls; + // this grant will still be limited to what the OS allows a + // PostgreSQL backend process to do. + permission java.io.FilePermission + "<>", "read,write,delete,readlink"; +}; +``` + +A few observations fall out. Whatever the names may suggest, neither alternative +is truly "unsandboxed". Both are subject to the same Java policy, but can +be granted different permissions within it. + +As distributed, the only difference between the two is access to the filesystem. +The "sandboxed" case grants no additional permissions at all, and the +"unsandboxed" case adds read, readlink, write, and delete permission for any +file (still subject to the operating system permissions in effect for the +PostgreSQL server process, which will be enforced independently of Java). + +The permissions granted for either case are freely configurable. Granting +the more lenient or dangerous permissions to the "unsandboxed" language is +conventional, and reflected in the way PostgreSQL is more restrictive about +what roles can create functions in that language. + +The [permissions known to the JDK][jdkperms] are plentiful and fine-grained. +New permissions can also be defined and required in custom code, and selectively +granted in the policy like any other permission. + +The `PLPrincipal` indicating sandboxed/unsandboxed is only one of the conditions +that can be referred to in a policy to control the permissions granted. Others +are described below. + +## Sources of Java policy + +Java's standard `Policy` implementation will read from a sequence of policy +files specified as URLs. The first is normally part of the Java installation, +supplying permission grants necessary for trouble-free operation of the JVM +itself, and a second will be read, if present, from a user's home directory. + +PL/Java, by default, uses the first Java-supplied URL, for the policy file +installed with Java, followed by the file `pljava.policy` in the directory +reported by `pg_config --sysconfdir`. A default version of that file is +installed with PL/Java. + +The `pljava.policy` file, by default, is used _instead of_ any `.java.policy` +file in the OS user's home directory that Java would normally load. There +probably is no such file in the `postgres` user's home directory, and if +for any reason there is one, it probably was not put there with PL/Java in mind. + +The [configuration variable][confvar] `pljava.policy_urls` can be +used to name different, or additional, policy files. + +Permission grants are cumulative in Java's standard `Policy` implementation: +there is no policy syntax to _deny_ a permission if it is conveyed by some other +applicable grant in any of the files on the `policy_urls` list. If an +application must restrict a permission that is granted unconditionally +in the Java-supplied policy file, for example, the typical approach would be +to copy that file, remove the grant of that permission, and alter +`pljava.policy_urls` to read the modified file in place of the original. + +## Conditional and unconditional permission grants + +A `grant` in a policy can be unconditional, for example: + +``` +grant { + permission java.util.PropertyPermission + "sqlj.defaultconnection", "read"; +}; +``` + +That grant (which is included in the default `pljava.policy`) allows any Java +code to read that property. + +Conditional grants to `PLPrincipal$Sandboxed` and `PLPrincipal$Unsandboxed` were +shown above. + +It is also possible to condition a grant on the codebase (represented as +a URL) of the code being executed. If the `SQLJ.INSTALL_JAR` function is used +to install PL/Java's examples jar under the name `examples`, this grant will +allow the JSR-310 test example to work: + +``` +grant codebase "sqlj:examples" { + permission java.util.PropertyPermission "user.timezone", "write"; +}; +``` + +The `sqlj` URL scheme is (trivially, and otherwise nonfunctionally) defined +within PL/Java to allow forming a codebase URL from the name of an installed +jar. + +### Grant conditions currently unsupported + +A reader familiar with Java security policy may consider granting permissions +based on the signer identity of a cryptographically signed jar, or on a +`Principal` representing the PostgreSQL role executing the current function. +In this version of PL/Java, such grants are not yet supported. + +While it is not yet possible to grant permissions based on a principal +representing the PostgreSQL session user or role, it is possible for +a superuser, with `ALTER ROLE ... SET`, to set user-specific values of +`pljava.policy_urls` that will load different, or additional, policy files. +While that will only reflect the connected user at the start of the session +and not any role changes during the session, it may be enough for some uses. + +### `PLPrincipal` with a language name + +The grants for sandboxed/unsandboxed shown above have a `*` wildcard after +the principal class name. It is possible to replace the wildcard with the name +of the language (as used in SQL with `CREATE LANGUAGE` and `CREATE FUNCTION`) +in which a function is declared. + +A basic installation of PL/Java creates just two named languages, `java` and +`javaU`, declared as `TRUSTED`/sandboxed and untrusted/unsandboxed, +respectively. In such an installation, these grants would be effectively +equivalent to those shown earlier: + +``` +grant principal org.postgresql.pljava.PLPrincipal$Sandboxed "java" { +}; + +grant principal org.postgresql.pljava.PLPrincipal$Unsandboxed "javaU" { + permission java.io.FilePermission + "<>", "read,readlink,write,delete"; +}; +``` + +However, it is possible to use `CREATE LANGUAGE` to create any number of +named languages that share PL/Java's handler entries and can be used to +declare PL/Java functions. For example, suppose `CREATE TRUSTED LANGUAGE` is +used to create another language entry with the name `java_tzset` and this +grant is included in the policy: + +``` +grant principal org.postgresql.pljava.PLPrincipal$Sandboxed "java_tzset" { + permission java.util.PropertyPermission "user.timezone", "write"; +}; +``` + +If the JSR-310 test example in PL/Java's examples jar is declared with +`LANGUAGE java_tzset` rather than `LANGUAGE java` (as, in fact, it is), +it will be able to set the time zone and succeed. + +The [`SQLJ.ALIAS_JAVA_LANGUAGE`][sqljajl] function can be used to create such +aliases conveniently. + +When grants to specific named languages and grants with the wildcard are +present, code will have all of the permissions granted to the specific +language by name, in addition to all permissions that appear in grants to the +language class (`PLPrincipal$Sandboxed` or `PLPrincipal$Unsandboxed`, whichever +applies) with a wildcard name. + +A grant is silently ignored unless the class and the name both match. If the +`java_tzset` language were declared as above but a grant entry used the right +name but the `PLPrincipal$Unsandboxed` class by mistake, that grant would be +silently ignored. + +### Grants to a codebase compared with grants to a principal + +Whenever a Java operation requires a permission check, it could be on a call +stack several levels deep, perhaps involving code from more than one codebase +(or, more generally, "protection domain"). The Java rule is that the needed +permission must be in effect, one way or another, for every protection domain +on the call stack at the point where the permission is needed. In other words, +the available permissions are the _intersection_, over all domains on the stack, +of the permissions in effect for each domain. The rationale is that the proposed +action must not only be something the currently executing method is allowed to +do; there is a calling method causing this method to do it, so it must also be +something the caller is allowed to do, and so on up the stack. (For one crucial +exception to this rule, see [handling privileges][dopriv].) + +Permissions granted to a `Principal` are not so tightly bound to what specific +code is executing; the same code may execute at different times on behalf of +more than one principal. A principal often represents a user or role for whom +the code is executing, though role principals are not implemented in this +PL/Java release. The sandboxed/unsandboxed function distinction is represented +as a kind of `Principal` because it, too, is a property of the thread of +execution, from its entry at the SQL-declared function entry point and through +any number of protection domains the thread may traverse. Any permissions +granted by principal may be thought of as combined with any codebase-specific +permissions in every domain present on the stack. + +### Entry points other than SQL-declared functions + +Not every entry into PL/Java is through an SQL-declared function with an +associated language name or sandboxed/unsandboxed property. For those that are +not, permission decisions are based on an "access control context" +(essentially, the in-effect `Principal`s and initial protection domains) +constructed as described here. + +#### Set-returning functions + +While a set-returning function _is_ declared as an SQL function, the +initial call is followed by repeated calls to the returned iterator or +ResultSet provider or handle, and a final call to close the provider or handle. +The access control context constructed for the initial call is saved, and reused +while iterating and closing. + +#### Savepoint and transaction listeners + +Java code may register listeners for callbacks at lifecycle stages of savepoints +or transactions. Each callback will execute in the access control context of +the code that registered it, except that PL/Java's own domain will also be +represented on the stack. Because effective permissions are an intersection +over all domains on the stack, if any permission has been granted to the +callback's codebase that is not also granted to PL/Java's own code, the +callback code will be unable to exercise that permission except within +a [`doPrivileged`][dopriv] block. + +#### Mapped UDT `readSQL`/`writeSQL` methods + +When a Java user-defined type is defined without fully integrating it into +PostgreSQL's type system as a `BaseUDT`, its `readSQL` and `writeSQL` methods +will not have corresponding SQL function declarations, but will be called +directly as PL/Java converts values between PostgreSQL and Java form. Those +calls will be made without any `PLPrincipal`, sandboxed or unsandboxed, so +they will execute with only the permissions granted to their codebase or +unconditionally. + +The conversion functions for a `BaseUDT` do have SQL function declarations, and +will execute in a context constructed based on the declaration in the usual way. + +### SQL-declared functions not in PL/Java-managed jars + +It is possible to issue an SQL `CREATE FUNCTION` naming a method from a codebase +that is not a PL/Java-managed `sqlj:` jar, such as a jar on the filesystem +module path, or a method of the Java runtime itself. For example, many how-to +articles can be found on the web that demonstrate a successful PL/Java +installation by declaring an SQL function that directly calls +`java.lang.System.getProperty`. + +Such declarations are allowed, but will execute as if called from a protection +domain with the same `Principal`s, if any, that PL/Java would normally supply, +and no other permissions but those the policy grants unconditionally. + +_Note: many of the how-to articles that can be found on the +web happen to demonstrate their `System.getProperty`-calling example functions +on some property that isn't readable under Java's default policy. +Those examples should be changed to use a property that is normally readable, +such as `java.version` or `org.postgresql.pljava.version`._ + +### Class static initializers + +If a class contains several methods that would be given different +access control contexts (declared with different `trust` or +`language` attributes, say), the permissions available when the class +initializer runs will be those of whichever function is called first +in a given session. Therefore, when putting actions that require +permissions into a class's static initializer, those actions should require +only the common subset of permissions that the initializer could be run with +no matter which function is called or declared first. Actions that require +other specific permissions could be deferred until the first call of +a function known to be granted those permissions. + +Such actions can be left in the static initializer if a function granted +the needed permissions is known to always be the first one that the application +will call in any given session. + +## Troubleshooting + +When in doubt what permissions may need to be granted in `pljava.policy` to run +some existing PL/Java code, these techniques may be helpful. + +### Running PL/Java with a 'trial' policy + +To simplify the job of finding the permissions needed by some existing code, +it is possible to run PL/Java at first with a 'trial' policy, allowing code to +run while logging permissions that `pljava.policy` has not granted. The log +entries have a condensed format meant to be convenient for this use. +Trial policy configuration is described [here][trial]. + +### Using policy debug features provided by Java + +Java itself offers a number of debugging switches to reveal details of +permission decisions. It may be useful to add `-Djava.security.debug=access` in +the setting of `pljava.vmoptions`, and observe the messages on the PostgreSQL +backend's standard error (which should be included in the log file, +if `logging_collector` is `on`). It is not necessary to change the +`pljava.vmoptions` setting cluster-wide, such as in `postgresql.conf`; it can +be set in a single session for troubleshooting purposes. + +Other options for `java.security.debug` can be found in +[Troubleshooting Security][tssec]. Some can be used to filter the logging down +to requests for specific permissions or from a specific codebase. + +The log output produced by Java's debug options can be voluminous compared to +the condensed output of PL/Java's trial policy. + +## Forward compatibility + +The current implementation makes use of the Java classes +`Subject` and `SubjectDomainCombiner` in the `javax.security.auth` package. +That should be regarded as an implementation detail; it may change in a future +release, so relying on it is not recommended. + +The developers of Java have elected to phase out important language features +used by PL/Java to enforce policy. The functionality has been removed in +Java 24. For migration planning, this version of PL/Java can still enable +policy enforcement in Java versions up to and including 23, and Java 17 and 21 +are positioned as long-term support releases. (There is a likelihood, +increasing with later Java versions, even before policy stops being enforceable, +that some internal privileged operations by Java itself, or other libraries, +will cease to work transparently, and may have to be manually added to a site's +PL/Java policy.) + +For details on how PL/Java will adapt, please bookmark +[the JEP 411 topic][jep411] on the PL/Java wiki. + + +[pfsyn]: https://docs.oracle.com/en/java/javase/14/security/permissions-jdk1.html#GUID-7942E6F8-8AAB-4404-9FE9-E08DD6FFCFFA +[jdkperms]: https://docs.oracle.com/en/java/javase/14/security/permissions-jdk1.html#GUID-1E8E213A-D7F2-49F1-A2F0-EFB3397A8C95 +[confvar]: variables.html +[dopriv]: https://docs.oracle.com/en/java/javase/14/security/java-se-platform-security-architecture.html#GUID-E8898CB5-65BB-4D1A-A574-8F7112FC353F +[sqljajl]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#alias_java_language +[tssec]: https://docs.oracle.com/en/java/javase/14/security/troubleshooting-security.html +[trial]: trial.html +[unenforced]: unenforced.html +[jep411]: https://github.com/tada/pljava/wiki/JEP-411 +[smprop]: ../install/smproperty.html diff --git a/src/site/markdown/use/sqlxml.md b/src/site/markdown/use/sqlxml.md new file mode 100644 index 00000000..1c4f5de3 --- /dev/null +++ b/src/site/markdown/use/sqlxml.md @@ -0,0 +1,538 @@ +# Working with XML + +## In PL/Java before 1.5.1 + +PL/Java functions before 1.5.1 have been able to access a value of XML type as +a `String` object. That has been workable, but an extra burden if porting code +that used the JDBC 4.0 `java.sql.SQLXML` API, and with notable shortcomings. + +### Shortcomings + +#### Character set encoding + +PostgreSQL stores XML values serialized according to `server_encoding`, and +depending on that setting, conversion to a Java `String` can involve +transcoding. + +XML has rules to handle characters that may be representable in one encoding +but not another, but the `String` conversion is unaware of them, and may fail +to produce a transcoding that represents the same XML value. + +#### Memory footprint + +While a database design using XML may be such that each XML datum is +individually very small, it is also easy to store---or generate in +queries---large XML values. When mapped to a Java `String`, such an XML value +must have its full, uncompressed, character-serialized size allocated +on the Java heap and be copied there from native memory, before the Java +code even begins to make use of it. Even in cases where the Java processing to +be done could be organized to stream through parse events in constant-bounded +memory, the `String` representation forces the entire XML value to occupy Java +memory at once. Any tuning of PL/Java's heap size allowance could have to +consider a worst-case estimate of that size, or risk failures at run time. + +## The JDBC 4.0 `java.sql.SQLXML` API + +PL/Java 1.5.1 adds support for this API. A Java parameter or return type in a +a PL/Java function can be declared to be `SQLXML`, and such objects can be +retrieved from `ResultSet` and `SQLInput` objects, and used as +`PreparedStatement` parameters or in `SQLOutput` and updatable `ResultSet` +objects. + +### Reading a PostgreSQL XML value as a _readable_ `SQLXML` object + +An `SQLXML` instance can have the "conceptual states" _readable_ and _not +readable_, _writable_ and _not writable_. In PL/Java, an instance passed in as a +parameter to a function, or retrieved from a `ResultSet`, is _readable_ and _not +writable_, and can be used as input to Java processing using any of the +following methods: + +`getBinaryStream()` +: Obtain an `InputStream` with the raw, byte-stream-serialized XML, which will + have to be passed to an XML parser. The parser will have to determine the + encoding used from the declaration at the start of the stream, or assume + UTF-8 if there is none, as the standard provides. + +`getCharacterStream()` +: Like `getBinaryStream` but as a stream of Java characters, with the underlying + encoding already decoded. May be convenient for use with parsing code that + isn't able to recognize and honor the encoding declaration, but any standard + XML parser would work as well from `getBinaryStream`, which should be + preferred when possible. A parser working from the binary stream is able to + handle transcoding, if needed, in an XML-aware way. With this method, any + needed transcoding is done without XML-awareness to produce the character + stream. + +`getString()` +: Obtain the entire serialized XML value decoded as a Java `String`. Has the + same memory footprint and encoding implications discussed for the legacy + conversion to `String`, but may be convenient for some purposes or for + values known to be small. + +`getSource(javax.xml.transform.stream.StreamSource.class)` +: Equivalent to one of the first two methods, but with the stream wrapped in + a `Source` object, directly usable with Java XML transformation APIs. + +`getSource(javax.xml.transform.sax.SAXSource.class)` +: Obtain a `Source` object that presents the XML in parsed form via the SAX API, + where the caller can register callback methods for XML constructs of + interest, and then have Java stream through the XML value, calling those + methods. + +`getSource(javax.xml.transform.sax.StAXSource.class)` +: Obtain a `Source` object that presents the XML in parsed form via the StAX + API, where the value can be streamed through by calling StAX pull methods + to get one XML construct at a time. Java code written to this API can more + clearly reflect the expected structure of the XML document, compared to + code written in the callback style for SAX. + +`getSource(javax.xml.transform.sax.DOMSource.class)` +: Obtain a `Source` object presenting the XML fully parsed as a navigable, + in-memory DOM tree. + +`getSource(null)` +: Obtain a `Source` object of a type chosen by the implementation. Useful when + the `Source` object will be passed to a standard Java transformation API, + which can handle any of the above forms, letting the `SQLXML` implementation + choose one that it implements efficiently. + +Exactly one of these methods can be called exactly once on a _readable_ `SQLXML` +object, which is thereafter _not readable_. (The _not readable_ state prevents +a second call to any of the getter methods; it does not, of course, prevent +reading the XML content through the one stream, `String`, or `Source` obtained +from the getter method that was just called.) + +Except in the `String` or DOM form, which bring the entire XML value into Java +memory at once, the XML content is streamed directly from native PostgreSQL +memory as Java code reads it, never accumulating in the Java heap unless that +is what the application code does with it. Java heap sizing, therefore, can +be based on just what the application Java code will do with the data. + +The most convenient API to use in an application will often be SAX or StAX, +in which the code can operate at the level of already-parsed, natural XML +constructs. Code designed to work with a navigable DOM tree can easily obtain +that form (but it should be understood that DOM will pull the entire content +into Java memory at once, in a memory-hungry form that can easily be twenty +times the size of the serialized value). + +#### Obtaining a _readable_ `SQLXML` object + +To obtain a _readable_ instance, declare `java.sql.SQLXML` as the type of a +function parameter where PostgreSQL will pass an XML argument, or use the +`getSQLXML` or `getObject(..., SQLXML.class)` methods on a `ResultSet`, or the +`readSQLXML` or `readObject(SQLXML.class)` methods on `SQLInput`. A fully +JDBC-4.0 compliant driver would also return `SQLXML` instances from the +non-specific `getObject` and `readObject` methods, but in PL/Java, those have +historically returned `String`. Because 1.5.1 is not a major release, their +behavior has not changed, and the more-specific methods must be used to obtain +`SQLXML` instances. + +### Creating/returning a PostgreSQL XML value with a _writable_ `SQLXML` object + +PL/Java will supply an empty `SQLXML` instance that is _writable_ and _not +readable_ via the `Connection` method `createSQLXML()`. It can be used as an +output destination for any of several Java XML APIs, through a selection of +`set...` methods exactly mirroring the available `get...` methods described +above. + +_The API is unusual: except for `setString`, which takes a `String` parameter +and returns `void` as a typical "setter" method would, the other setter methods +are used for the object they return---an `OutputStream`, `Writer`, or +`Result`---which the calling code should then use to add content to the XML +value._ + +Exactly one setter method can be called exactly once on a _writable_ `SQLXML` +object, which is thereafter _not writable_. (The _not writable_ state prevents +a second call to any setter method; XML content must still be written via the +stream or `Result` obtained from the one setter that was just called, except +in the case of `setString`, which populates the value at once.) Content being +written to the `SQLXML` object is accumulated in PostgreSQL native memory, +not the Java heap. + +A `SQLXML` object, once it has been fully written and closed, can be +returned from a Java function, passed as a `PreparedStatement` parameter to a +nested query, or stored into writable `ResultSet`s used for composite function +or trigger results. It can be used exactly once in any of those ways, which +transfer its ownership back to PostgreSQL, leaving it inaccessible from Java. + +#### When a _writable_ `SQLXML` object is considered closed + +A _writable_ `SQLXML` object cannot be presented to PostgreSQL before it is +closed to confirm that writing is complete. (One written by `setString` is +considered written, closed, and ready to use immediately.) + +When it is written using a stream obtained from `setBinaryStream`, +`setCharacterStream`, or +`setResult(javax.xml.transform.stream.StreamResult.class)`, it +is considered closed when the stream's `close` method is called. +This will typically _not_ be done by a Java `Transformer` with the stream +as its result, and so should be explicitly called after such a transformation +completes. + +When written using a `SAXResult`, it is considered closed when the +`ContentHandler`'s `endDocument` method is called, and when written using a +`StAXResult`, it is considered closed when the `XMLStreamWriter`'s +`writeEndDocument` method is called. When one of these flavors of `Result` is +used with a Java `Transformer`, these methods will have been called in the +normal course of the transformation, so nothing special needs to be done after +the transformation completes. + +What it means to `close` a `DOMResult` is murkier. The application code must +call the `DOMResult`'s `setNode` method, passing what will be the root node of +the result document. This can be done before or after (or while) child nodes and +content are added to that node. However, to avoid undefined behavior, +application code must make no further modification to that DOM tree after the +`SQLXML` object has been presented to PostgreSQL (whether via a +`PreparedStatement` `set` method, `ResultSet` `update` method, +`SQLOutput` `write` method, or returned as the function result). + +#### Using a `Result` object as a `Transformer` result + +Classes that extend `javax.xml.transform.Transformer` will generally accept +any flavor of `Result` object and select the right API to write the +transformation result to it. There is often no need to care which `Result` +flavor to provide, so it is common to call `setResult(null)` to let the +`SQLXML` implementation itself choose a flavor based on implementation-specific +efficiency considerations. + +In the case of a `DOMResult`, if the `Result` object is simply passed to a +`Transformer` without calling `setNode` first, the `Transformer` itself will +put an empty `Document` node there, which is then populated with the +transformation results. + +A `Document` node, however, enforces conformance to the strict rules of +`XML(DOCUMENT)` form (described below). If the content to be written will +conform only to the looser rules of `XML(CONTENT)` form, application code should +call `setNode` supplying an empty `DocumentFragment` node, before passing the +`Result` object to a `Transformer`. + +The flavor of `Result` returned by `setResult(null)` will never +(in PL/Java) be `DOMResult`. + +### Using an unread _readable_ `SQLXML` object as a written one + +The general rule that only a _writable_ instance (that has been written and +closed) can be used as a function result, or passed into a nested query, admits +one exception, allowing a _readable_ instance that Java code has obtained but +not read. That makes it simple for Java code to obtain an `SQLXML` instance +passed in as a parameter, or from a query, and use it directly as a result or a +nested-query parameter. Any one instance can be used this way no more than once. + +### `XML(DOCUMENT)` and `XML(CONTENT)` + +An XML value in SQL can have the type `XML(DOCUMENT)` or `XML(CONTENT)` (as +those are defined in the ISO SQL standard, 2006 and later), which PostgreSQL +does not currently treat as distinguishable types. The `DOCUMENT` form must have +exactly one root element, may have a document-type declaration (DTD), and has +strict limits on where other +constructs (other than comments and processing instructions) can occur. A value +in `CONTENT` form may have no root element, or more than one element at top +level, and other constructs such as character data outside of a root element +where `DOCUMENT` form would not allow them. + +#### How both forms are accommodated when reading + +Java code using a _readable_ `SQLXML` instance as input should be prepared to +encounter either form (unless it has out-of-band knowledge of which form will be +supplied). If it requests a `DOMSource`, `getNode()` will return a `Document` +node, if the value met all the requirements for `DOCUMENT`, or a +`DocumentFragment` node, if it was parsable as `CONTENT`. Java code requesting a +`SAXSource` or `StAXSource` should be prepared to handle a sequence of +constructs that might not be encountered when parsing a strictly conforming +`DOCUMENT`. Java code that requests an `InputStream`, `Reader`, `String`, or +`StreamSource` will be on its own to parse the data in whichever form appears. + +##### Effect on parsing of whitespace + +In `DOCUMENT` form, any whitespace outside of the single root element is +considered markup, not character data. When the value is parsable as `DOCUMENT`, +and read through PL/Java's `SAXSource` or `StAXSource`, no whitespace that +occurs outside of the root element will be reported to the application. +PL/Java's `DOMSource` will present a `Document` node with no whitespace +text-node children outside of the root element. + +If the value parses as `CONTENT`, PL/Java's `DOMSource` will present a +`DocumentFragment` node with all character data, including whitespace, +preserved. The streaming operation of the `SAXSource` and `StAXSource` is more +complicated, and lossy for whitespace (only if it occurs outside of any element) +ahead of the first parse event that would not be possible in `DOCUMENT` form. +All whitespace beyond that point is preserved. + +#### How both forms are accommodated when writing + +Java code using a _writable_ SQLXML instance to produce a result may write +either `DOCUMENT` or `CONTENT` form. If using `DOMResult`, it must supply a +`DocumentFragment` node to produce a `CONTENT` result, as a `Document` node will +enforce the `DOCUMENT` requirements. + +### An `SQLXML` object has transaction lifetime + +The JDBC spec provides that an `SQLXML` instance is "valid for the duration of +the transaction in which it was created." One PL/Java function can hold an +`SQLXML` instance (in a static or session variable or data structure), and other +PL/Java functions called later in the same transaction can continue reading from +or writing to it. If the transaction has committed or rolled back, those +operations will generate an exception. + +Once a _writable_ `SQLXML` object, or an unread, _readable_ one, has been +presented to PostgreSQL as the result of a PL/Java function or through a +`PreparedStatement`/`ResultSet`/`SQLOutput` setter method, it is no longer +accessible in Java. + +During a transaction, resources held by a `SQLXML` object are reclaimed as soon +as a _readable_ one has been fully read, or a _writable_ one has been presented +to PostgreSQL and PostgreSQL is done with it. If application code holds a +readable `SQLXML` object that it determines it will not read, or a writable one +it will not present to PostgreSQL, it can call the `free` method to allow the +resources to be reclaimed sooner than the transaction's end. + +### Lazy detoasting + +PostgreSQL can represent large XML values in "TOASTed" form, which may be in +memory but compressed (XML typically compresses to a small fraction of its +serialized size), or may be a small pointer to a location in storage. A +_readable_ `SQLXML` instance over a TOASTed value will not be detoasted until +Java code actually begins to read it, so the memory footprint of an instance +being held but not yet read is kept low. + +### Validation of content + +Some of the methods by which a _writable_ instance can be written are not +XML-specific APIs, but allow arbitrary content to be written (as a `String`, +`Writer`, or `OutputStream`). When written by those methods, type safety is +upheld by verifying that the written content can be successfully reparsed, +accepting either `DOCUMENT` or `CONTENT` form. + +It remains possible to declare the Java type `String` for function parameters +and returns of XML type, and to retrieve and supply `String` for `ResultSet` +columns and `PreparedStatement` parameters of XML type. This legacy mapping +from `String` to XML uses PostgreSQL's `xml_in` function to verify the form of a +`String` from Java. That function may reject some valid values if the server +configuration variable `xmloption` is not first set to `DOCUMENT` or `CONTENT` +to match the type of the value. + +#### Validation against a schema + +Java's XML APIs support validation using a choice of schema languages; +support for XML Schema 1.0 is included in the Java runtime, and implementations +of others can be placed on the class path. + +A `schema` method is available through the "Extended API to configure +XML parsers" described below, but will only work on a `SAXSource` or `DOMSource` +(or a `StreamResult`, which uses a SAX parser to validate the stream written). +Other limitations are described under "known limitations" below. + +More flexibly, `javax.xml.validation.Validator` or +`javax.xml.validation.ValidatorHandler` can be used in more situations and with +fewer limitations. + +### Usable with or without native XML support in PostgreSQL + +In symmetry to using Java `String` for SQL XML types, PL/Java allows the Java +`SQLXML` type to be used with PostgreSQL data of type `text`. This allows full +use of the Java XML APIs even in PostgreSQL instances built without XML support. +All of the `SQLXML` behaviors described above also apply in this usage. + +If a _readable_ `SQLXML` instance obtained from a `text` value is directly used +to set or return a value of PostgreSQL's XML type, the XML-ness of the content +is verified. + +## Extensions to the `java.sql.SQLXML` API + +### Extended API to configure XML parsers + +Retrieving or verifying the XML content in an `SQLXML` object can involve +applying an XML parser. The full XML specification includes features that can +require an XML parser to retrieve external resources or consume unexpected +amounts of memory. The full feature support may be an asset in an environment +where the XML content will always be from a known, trusted source, or a +liability if less is known about the XML content being processed. + +The [Open Web Application Security Project][OWASP] (OWASP) advocates for the +default use of settings that strictly limit the related features of Java XML +parsers, as outlined in a ["cheat sheet"][cheat] the organization publishes. + +However, the recommended defaults really are severely restrictive (for example, +disabling document-type declarations by default will cause PL/Java's `SQLXML` +implementation to reject all XML values that contain DTDs). Therefore, there +must be a simple and clear way for code to selectively adjust the settings, or +adopting the strictest settings by default would pose an unacceptable burden to +developers. + +The traditional Java way to adjust the XML parser is overwhelmingly fiddly, +involving `setFeature` or `setProperty` calls that identify the feature to be +set by passing an arcane URI that might be found in the documentation, or the +[cheat sheet][cheat], or cargo-culted from some other code base. In some cases, +the streamlined `SQLXML` API conceals the steps where adjustments would have +to be applied. With no better way to adjust the parser, it would be an +unrealistic developer burden to adopt the restrictive defaults and expect the +developer to relax them. + +Therefore, PL/Java has an extension API documented at the +[org.postgresql.pljava.Adjusting.XML class][adjx]. With the API, it is possible +to obtain a `Source` object from an `SQLXML` instance `sqx` in either the +standard or extended way shown in this example for a `SAXSource`: + + SAXSource src = sqx.getSource(SAXSource.class); // OR + SAXSource src = sqx.getSource(Adjusting.XML.SAXSource.class) + .allowDTD(true).get(); + +The first form would obtain a `SAXSource` configured with the restrictive, +OWASP-recommended defaults, which would reject any content with a DTD. The +second form would obtain a `SAXSource` configured to allow a DTD in the +content, with other parser features left at the restrictive defaults. + +#### Additional adjustments in recent Java versions + +Additional security-related adjustments have appeared in various Java releases, +and are described in the [Java API for XML Processing Security Guide][jaxps]. +They include a number of configurable limits on maximum sizes and nesting +depths, and limits to the set of protocols allowable for fetching external +resources. Corresponding methods are provided in [PL/Java's API][adjx]. +Also see "known limitations" below. + +#### Supplying a SAX or DOM `EntityResolver` or `Schema` + +Methods are provided to set an `EntityResolver` that controls how a SAX or DOM +parser resolves references to external entities, or a `Schema` by which a SAX +or DOM parser can validate content while parsing. Corresponding methods are +supplied in PL/Java's API, but are implemented only when operating on a +`SAXSource` or `DOMSource` (or `StreamResult`, affecting its validation of +the content written). + +For StAX, control of resolution is done with a slightly different class, +`XMLResolver`, which can be set on a StAX parser as an ordinary property; +this can be done with PL/Java's `setFirstMatchingProperty` method. + +A StAX parser cannot have a `Schema` directly assigned, but can be used +with a `javax.xml.validation.Validator`. + +Complete details can be found [in the API documentation][adjx]. + +#### Using XML Catalogs when running on Java 9 or later + +When running on Java 9 or later, a local XML Catalog can be set up to +efficiently and securely resolve what would otherwise be external resource +references. The registration of a Catalog on a Java 9 or later parser involves +only existing methods for setting features/properties, as described +[in the Catalog API documentation][catapi], and can be done with the +`setFirstSupportedFeature` and `setFirstSupportedProperty` methods +in PL/Java's `Adjusting` API. + +When running on Java 22 or later, there is also a fallback catalog that can +satisfy requests for a small number of DTDs that are defined by the Java +platform. The behavior when this fallback resolver cannot satisfy a request +can be configured by setting the `jdk.xml.jdkcatalog.resolve` property, for +which, again, the `setFirstSupportedProperty` method can be used. + +### Extended API to set the content of a PL/Java `SQLXML` instance + +When a `SQLXML` instance is returned from a PL/Java function, or passed in to +a PL/Java `ResultSet` or `PreparedStatement`, it is used directly if it is an +instance of PL/Java's internal implementation. + +However, a PL/Java function might reasonably use another JDBC driver and obtain +a `SQLXML` instance from a connection to some other database. If such a +'foreign' `SQLXML` object is returned from a function, or passed to a PL/Java +`ResultSet` or `PreparedStatement`, its content must first be copied to a new +instance created by PL/Java's driver. This happens transparently (but implies +that the 'foreign' instance must be in _readable_ state at the time, and +afterward will not be). + +The transparent copy is made by passing `null` as `sourceClass` to the foreign +object's `getSource` method, so the foreign object is in control of the type of +`Source` it will return. PL/Java will copy from a `StreamSource`, `SAXSource`, +`StAXSource`, or `DOMSource`. In the case of a `StreamSource`, an XML parser +will be involved, either to verify that the stream is XML, or to parse and +reserialize it if necessary to adapt its encoding to the server's. The parser +used by default will have the default, restrictive settings. + +To allow adjustment of those settings, the copying operation can be invoked +explicitly through the `Adjusting.XML.SourceResult` class. For example, when +_sx_ is a 'foreign' `SQLXML` object, the transparent operation + + return sx; + +is equivalent to + + return conn.createSQLXML().setResult(Adjusting.XML.SourceResult.class) + .set(sx.getSource(null)).get().getSQLXML(); + +where _conn_ is the PL/Java JDBC connection named by +`jdbc:default:connection`. To adjust the parser settings, as usual, adjusting +methods can be chained after the `set` and before the `get`. The explicit form +also allows passing a `sourceClass` other than `null` to the foreign object's +`getSource` method, if there is a reason not to let the foreign object choose +the type of `Source` to return. + +### `SQLXML` views of non-XML data + +There are the beginnings of a feature supporting +[XML views of non-XML data](xmlview.html), so that some data types that are +not XML, but are similarly tree-structured, can be manipulated in Java using +Java's extensive support for XML. + +[OWASP]: https://www.owasp.org/index.php/About_The_Open_Web_Application_Security_Project +[cheat]: https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#java +[adjx]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/Adjusting.XML.html +[jaxps]: https://docs.oracle.com/en/java/javase/13/security/java-api-xml-processing-jaxp-security-guide.html +[catapi]: https://docs.oracle.com/javase/9/core/xml-catalog-api1.htm#JSCOR-GUID-51446739-F878-4B70-A36F-47FBBE12A26A + +## Known limitations + +### Limitations of `StAX` support + +PL/Java's `StAXSource` supplies an `XMLStreamReader` that only supports the +expected usage pattern: + +``` +while ( streamReader.hasNext() ) +{ + streamReader.next(); + /* methods that query state of the current parse event */ +} +``` + +It would be unexpected to reorder that pattern so that queries of the current +event occur after `hasNext` but before `next`, and may produce +`IllegalStateException`s or incorrect results from a `StAXSource` supplied +by PL/Java. + +### Compatibility of `StAX` with `TrAX` (Java's transformation API) + +The `javax.xml.transform` APIs are required to accept any of a specified +four types of `Source` and `Result`: `StreamSource`, `DOMSource`, `SAXSource`, +or `StAXSource` (and their `Result` counterparts). However, `StAX` was a later +addition to the family. While `TrAX` is a mature and reliable transformation +API, and `StAX` is well suited for direct use in new code that will parse or +generate XML, the handful of internal bridge classes that were added +to the Java runtime for `StAX` and `TrAX` interoperation are not dependable, +especially when handling `XML(CONTENT)`. When supplying a `Source` or `Result` +to a `Transformer`, a variant other than `StAX` should be chosen whenever +possible, whether PL/Java's or any other implementation. + +For convenience, the `SQLXML` API allows passing a null value to `getSource` +or `setResult`, allowing the implementation to choose the type of `Source` +or `Result` to supply. PL/Java's implementation will never supply a `StAX` +variant when not explicitly requested. + +### Pay no attention to that man behind the curtain + +The processing done "behind the curtain" to be able to handle `XML(CONTENT)` +and `XML(DOCUMENT)` form, when the form is not known in advance, can have +some visible effects when combined with the newer [security limit][jaxps] +adjustments, or `schema` set on a SAX or DOM parser. For example, a very tight +setting of `maxElementDepth` may reveal that elements in the input are +nested one level deeper than expected, or a very tight `maxXMLNameLimit` may +reject a document whose expected names are all shorter. Schema validation for +some schemas and schema languages may likewise report an unexpected element +at the root of the document. + +Issues with `maxElementDepth` or `maxXMLNameLimit` can be avoided by using +generous settings chosen to limit extreme resource consumption rather than +trying to set them as tightly as possible. + +Problems with schema validation when assigning a `Schema` directly to the +SAX or DOM parser can be alleviated by using a `javax.xml.validation.Validator` +or `ValidatorHandler` instead, layered over PL/Java's parser, where it will +see the expected view of the content. diff --git a/src/site/markdown/use/trial.md b/src/site/markdown/use/trial.md new file mode 100644 index 00000000..4c0d1706 --- /dev/null +++ b/src/site/markdown/use/trial.md @@ -0,0 +1,186 @@ +# Migrating to policy-based permissions from an earlier PL/Java release + +When migrating existing code from a PL/Java 1.5 or earlier release to 1.6, +it may be necessary to add permission grants in the new `pljava.policy` file, +which grants few permissions by default. PL/Java's security policy configuration +is described [here][policy]. + +To simplify migration, it is possible to run with a 'trial' policy initially, +allowing code to run but logging permissions that may need to be added in +`pljava.policy`. + +## Configuring a trial policy + +Even when running with a trial policy, the [configuration variable][vbls] +`pljava.policy_urls` should point to the normal policy file(s), as usual. +That is where the ultimate policy for production will be developed. + +The trial policy is configured by creating another policy file somewhere, using +the same policy file syntax, and pointing to it with +`-Dorg.postgresql.pljava.policy.trial=`_url_ added to the configuration variable +`pljava.vmoptions`. + +Anything _this_ policy allows will be allowed, but will be logged if the regular +policy would have denied it. So you can make this one more generous than the +regular policy, and use the log entries to identify grants that might belong in +the regular policy. As you add the missing ones to the real policy, they stop +getting logged by this one, and the log gets quieter. You can make this one as +generous as you are comfortable making it during the period of testing and +tuning. + +At the very extreme of generosity it could be this: + +``` +grant { + permission java.security.AllPermission; +}; +``` + +and it would happily allow the code under test to do _anything at all_, while +logging whatever permissions aren't in the regular policy. (A side effect of +this would be to erase any distinction between `java` and `javaU` for as long as +the trial policy is in place.) Such a setting would be difficult to recommend in +general, but it might suffice if the only code being tested has already been in +use for years under PL/Java 1.5 and is well trusted, users of the database have +not been granted permission to install more PL/Java functions, and if +the purpose of testing is only to learn what permissions the code uses that +may need to be granted in the 1.6 policy. + +### Granting `TrialPolicy$Permission` + +When `AllPermission` is too broad, there is the difficulty that Java's +permission model does not have a subtractive mode; it is not simple to say +"grant `AllPermission` except for this list of the ones I'd really rather not." +Therefore, PL/Java offers a custom "meta-permission" with roughly that meaning: + +``` +grant { + permission org.postgresql.pljava.policy.TrialPolicy$Permission; +}; +``` + +`TrialPolicy$Permission` is effectively `AllPermission` but excluding any +`FilePermission` (so that `java`/`javaU` distinction stays meaningful) as well +as a couple dozen other various +`SecurityPermission`/`ReflectPermission`/`RuntimePermission` instances in the +"really rather not" category. If its hard-coded exclusion list excludes +any permissions that some unusual code under test might legitimately need, +those can be explicitly added to the trial policy too. + +Configuring a trial policy can be a bit of a balancing act: if it is very +generous, that minimizes the chance of breaking the code under test because of +a denied permission, but increases potential exposure if that code misbehaves. +A more limited trial policy decreases exposure but increase the risk of +service interruption if the code under test really does need some permission +that you weren't comfortable putting in the trial policy. Somewhere near +the sweet spot is where `TrialPolicy$Permission` is aimed. + +All other normal policy features also work in the trial policy. If your +code is installed in several different jars, you can use `grant codebase` +separately to put different outer limits around different jars, and completely +remove the grants for one jar after another as you are satisfied you have added +the right things for each one in the regular policy. You could also set +different limits for `java` and `javaU` by granting to the `PLPrincipal`, +just as you can in the regular policy. + +## About false positives + +One thing to be aware of is that the trial policy can give false alarms. It is +not uncommon for software to include configuration-dependent bits that +tentatively try certain actions, catch exceptions, and then proceed normally, +having discovered what the configuration allows. The trial policy can log +permission denials that happen in the course of such checks, even if the denial +has no functional impact on the code. + +There may be no perfect way to tell which denials being logged by the trial +policy are false alarms. One approach would be to collect a sampling of log +entries, figure out what user-visible functions of the code they were coming +from, and then start a dedicated session without the +`-Dorg.postgresql.pljava.policy.trial` setting (or with it pointing to a +different, more restrictive version of the policy, not granting the permissions +you're curious about), then exercise those functions of the code and see if +anything breaks. Other users could still have the more generous trial setting in +their sessions, so as not to be affected by your experiments. + +False positives, of course, are also affected by the choice of how generous to +make the trial policy. Log entries are only produced for permissions that the +regular policy denies but the trial policy allows. If the permissions being +silently checked by benign code are not granted in the trial policy, they will +be silently denied, just as they would in normal operation, and produce no +log entries. + +## Format of the log entries + +To avoid bloating logs too much, `TrialPolicy` emits an abbreviated form of +stack trace for each entry. The approach is to keep one stack frame above and +one below each crossing of a module or protection-domain boundary, with `...` +replacing intermediate frames within the same module/domain, and the code +source/principals of the denied domain shown wrapped in `>> <<`at +the appropriate position in the trace. For the purpose of identifying the +source of a permission request and the appropriate domain(s) to be granted +the permission, this is probably more usable than the very long full traces +available with `java.security.debug`. + +The messages are sent through the PostgreSQL log if the thread making the +permission check knows it can do so without blocking; otherwise they just go to +standard error, which should wind up in the PostgreSQL log anyway, if +`logging_collector` is on; otherwise it may be system-dependent where they go. + +There isn't really a reliable "can I do so without blocking?" check for every +setting of the `pljava.java_thread_pg_entry` configuration variable. +If it is set to `throw` (and that is a workable setting for the code under +test), the logging behavior will be more predictable; entries from the main +thread will go through PostgreSQL's log facility always, and those from any +other thread will go to standard error. + +Here is an example of two log entries, generated by the same permission check: + +``` +POLICY DENIES/TRIAL POLICY ALLOWS: ("java.net.SocketPermission" "127.0.0.1:5432" "connect,resolve") +java.base/java.security.ProtectionDomain.implies(ProtectionDomain.java:321) +... +java.base/java.net.Socket.(Socket.java:294) +>> null [PLPrincipal.Sandboxed: java] << +jdk.translet/die.verwandlung.GregorSamsa.template$dot$0() +... +jdk.translet/die.verwandlung.GregorSamsa.transform() +java.xml/com.sun.org.apache.xalan.internal.xsltc.runtime.AbstractTranslet.transform(AbstractTranslet.java:624) +... +java.xml/com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:383) +schema:public//org.postgresql.pljava.example.annotation.PassXML.transformXML(PassXML.java:561) + +POLICY DENIES/TRIAL POLICY ALLOWS: ("java.net.SocketPermission" "127.0.0.1:5432" "connect,resolve") +java.base/java.security.ProtectionDomain.implies(ProtectionDomain.java:321) +... +java.base/java.net.Socket.(Socket.java:294) +jdk.translet/die.verwandlung.GregorSamsa.template$dot$0() +... +jdk.translet/die.verwandlung.GregorSamsa.transform() +java.xml/com.sun.org.apache.xalan.internal.xsltc.runtime.AbstractTranslet.transform(AbstractTranslet.java:624) +... +java.xml/com.sun.org.apache.xalan.internal.xsltc.trax.TransformerImpl.transform(TransformerImpl.java:383) +>> sqlj:examples [PLPrincipal.Sandboxed: java] << +schema:public//org.postgresql.pljava.example.annotation.PassXML.transformXML(PassXML.java:561) +``` + +The example shows the use of an XSLT 1.0 transform that appears to +make use of the Java XSLT ability to call out to arbitrary Java, and is trying +to make a network connection back to PostgreSQL on `localhost`. Java's XSLTC +implementation compiles the transform to a class in `jdk.translet` with null +as its codebase, and the first log entry shows permission is denied at that +level (the protection domain shown as +`>> null [PLPrincipal.Sandboxed: java] <<`). + +A second log entry results because `TrialPolicy` turns the first failure to +success, allowing the permission check to continue, and it next fails at +the PL/Java function being called, in the `sqlj:examples` jar. Under the trial +policy, that also is logged and then allowed to succeed. + +The simplest way to allow this connection in the production policy would be +to grant the needed `java.net.SocketPermission` to `PLPrincipal$Sandboxed`, +as that is present in both denied domains. It would be possible to grant +the permission by codebase to `sqlj:examples` instead, but not to +the nameless codebase of the compiled XSLT transform. + +[policy]: policy.html +[vbls]: variables.html diff --git a/src/site/markdown/use/unenforced.md b/src/site/markdown/use/unenforced.md new file mode 100644 index 00000000..82636be1 --- /dev/null +++ b/src/site/markdown/use/unenforced.md @@ -0,0 +1,246 @@ +# PL/Java with no policy enforcement + +This page describes how PL/Java operates when it is not enforcing any security +policy, as when running on stock Java 24 or later. + +When the newest Java language features are not needed, it may be preferable to +use a Java 23 or earlier JVM to retain PL/Java's historically fine-grained and +configurable limits on what the Java code can do. For that case, please see +instead the [configuring permissions in PL/Java][policy] page. + +## History: policy enforcement pre-Java 24 + +PL/Java has historically been able to enforce configurable limits on the +behavior of Java code, and to offer more than one "procedural language" with +distinct names, such as `java` and `javau`, for declaring functions with +different limits on what they can do. In PostgreSQL parlance, the language named +without 'u' would be described as 'trusted', meaning any functions created in +that language would run with strict limits. Such functions could be created by +any PostgreSQL user granted `USAGE` permission on that language. The language +named with 'u' would be described as 'untrusted' and impose fewer limits on what +functions can do; accordingly, only PostgreSQL superusers would be allowed to +create functions in such a language. + +PL/Java, going further than many PLs, allowed tailoring of the exact policies +imposed for both `java` and `javau`, and also allowed creation of additional +language aliases beyond those two, with different tailored policies for each. + +Those capabilities remain available when PL/Java is used with Java versions +up through Java 23, and are described more fully in +[configuring permissions in PL/Java][policy]. + +## The present: Java 24 and later, no policy enforcement in PL/Java 1.6 + +The Java language features necessary for policy enforcement in the PL/Java 1.6 +series have been removed from the language as of Java 24. It is possible to +use Java 24 or later with an up-to-date 1.6-series PL/Java, but only by running +with no policy enforcement at all. + +That does not mean only that PL/Java's 'trusted' and 'untrusted' languages are +no longer different: it means that even the 'untrusted' language's more-relaxed +former limits can no longer be enforced. When run with enforcement disabled, +PL/Java is better described as a wholly-'untrusted' PL with nearly no limits on +what the Java code can do. + +The only limits a Java 24 or later runtime can impose on what the Java code can +do are those imposed by the isolation of modules in the +[Java Platform Module System][jpms] and by a small number of VM options, which +will be discussed further below. + +This picture is radically different from the historical one with enforcement. To +run PL/Java in this mode may be a reasonable choice if Java 24 or later language +features are wanted and if all of the Java code to be used is considered well +vetted, thoroughly trusted, and defensively written. + +For news of possible directions for policy enforcement in future PL/Java +versions, please bookmark [this wiki page][jep411]. + +## Opting in to PL/Java with no enforcement + +For PL/Java to run with no policy enforcement (and, therefore, for it to run +at all on Java 24 or later), specific configuration settings must be made to opt +in. + +### In `pljava.vmoptions` + +The string `-Djava.security.manager=disallow` must appear in the setting of +[`pljava.vmoptions`][vmoptions] or PL/Java will be unable to start on Java 24 +or later. + +For details on what `java.security.manager` settings to use on other Java +versions, see [Available policy-enforcement settings by Java version][smprop]. + +### in `pljava.allow_unenforced` + +Typically, a PL extension that provides only 'untrusted' execution will define +only a single, untrusted, PL name: `plpython3u` would be an example. + +PL/Java, however: + +* Has historically offered both a `javau` and a trusted `java` PL +* Still can offer both, when run on a Java 23 or older JVM +* May have been installed in a database with functions already created of both + types, and then switched to running on Java 24 and without enforcement +* Can also be switched back to a Java 23 or older JVM and provide enforcement + again + +Therefore, a PL/Java installation still normally provides two (or more) named +PLs, each being declared to PostgreSQL as either 'trusted' or not. + +When running with no enforcement, however: + +* Only PostgreSQL superusers can create functions, even using PL names shown as + 'trusted', and without regard to any grants of `USAGE` on those PLs. + + There may, however, be functions already defined in 'trusted' PLs that were + created by non-superusers with `USAGE` granted, at some earlier time when + PL/Java was running with enforcement. It may be important to audit those + functions' code before allowing them to run. + +* No PL/Java function at all will be allowed to run unless the name of its PL is + included in the `pljava.allow_unenforced` [configuration variable][vbls]. + +* When there are existing PL/Java functions declared in more than one named PL, + they can be audited in separate batches, with the name of each PL added + to the `pljava.allow_unenforced` setting after the functions declared + in that PL have been approved. Or, individual functions, once approved, can + be redeclared with the PL name changed to one already listed in + `pljava.allow_unenforced`. + +* Creation of a new function, even by a superuser, with a PL name not listed in + `pljava.allow_unenforced` will normally raise an error when PL/Java is + running without enforcement. This will not be detected, however, at times + when `check_function_bodies` is `off`, so is better seen as a reminder than + as a form of security. The more-important check is the one made when + the function executes. + +### in `pljava.allow_unenforced_udt` + +Java methods for input and output conversion of PL/Java +[mapped user-defined types][mappedudt], which are executed directly by PL/Java +and have no SQL declarations to carry a PL name, are allowed to execute only if +`pljava.allow_unenforced_udt` is `on`. The table `sqlj.typemap_entry` can be +queried for a list of mapped UDT Java classes to audit before changing this +setting to `on`. + +## Hardening for PL/Java with no policy enforcement + +### External hardening measures + +Developers of the Java language, in their rationale for removing the +Java features needed for policy enforcement, have placed strong emphasis on +available protections at the OS or container level, external to the process +running Java. For the case of PL/Java, that would mean typical hardening +measures such as running PostgreSQL in a container, using [SELinux][selinux], +perhaps in conjunction with [sepgsql][], and so on. + +Those external measures, however, generally confine what the process can do as a +whole. Because PL/Java executes within a PostgreSQL backend process, which must +still be allowed to do everything PostgreSQL itself does, it is difficult for an +external measure to restrict what Java code can do any more narrowly than that. + +### Java hardening measures + +Java features do remain that can be used to put some outer guardrails on what +the Java code can do. They include some specific settings that can be made in +`pljava.vmoptions`, and the module-isolation features of the +[Java Platform Module System][jpms] generally. These should be conscientiously +used: + +#### `--sun-misc-unsafe-memory-access=deny` + +This setting is first available in Java 23. It should be used whenever +available, and especially in Java 24 or later with no policy enforcement. +Without this setting, and in the absence of policy enforcement, any Java code +can access memory in ways that break the Java object model. + +The only reason not to set this option would be when knowingly using a Java +library that requires the access, if there is no update or alternative to using +that library. More modern code would use later APIs for which access can be +selectively granted to specific modules. + +#### `--illegal-native-access=deny` + +This setting is first available in Java 24 and should be used whenever +available. Without this setting, in the absence of policy enforcement, +any Java code can execute native code. There is arguably no good reason to +relax this setting, as options already exist to selectively grant such access +to specific modules that need it, if any. + +#### Module system protections + +Java's module system is one of the most important remaining mechanisms for +limiting what Java code may be able to do. Keeping unneeded modules out of the +module graph, advantageous already for startup speed and memory footprint, +also means whatever those modules do won't be available to Java code. + +The supplied [examples jar][examples] provides a function, [java_modules][], +that can be used to see what modules have been resolved into Java's boot module +layer. + +The `--limit-modules` VM option can be effectively used to resolve fewer modules +when PL/Java loads. As of this writing, in early 2025, starting PL/Java with no +`--add-modules` or `--limit-modules` options results in 48 modules in the graph, +while a simple `--limit-modules=org.postgresql.pljava.internal` added to +`pljava.vmoptions` reduces the graph to nine modules---all the transitive +requirements of PL/Java itself---and all of PL/Java's supplied examples +successfully run. Any additional modules needed for user code can be added back +with `--add-modules`. More details at [Limiting the module graph][limiting]. + +The `--sun-misc-unsafe-memory-access=deny` option mentioned above denies access +to certain methods of the `sun.misc.Unsafe` class, which is supplied by +the `jdk.unsupported` module. It may be preferable, when there is no other need +for it, to also make sure `jdk.unsupported` is not present in the module graph +at all. + +##### Modularize code needing special access + +It is currently less convenient in PL/Java 1.6 to provide user code in modular +form: the `sqlj.install_jar` and `sqlj.set_classpath` functions manage a class +path, not a module path. Supplying a module requires placing it on the file +system and adding it to `pljava.module_path`. + +The extra inconvenience may be worthwhile in some cases where there is a subset +of code that requires special treatment, such as an exception to the native +access restriction. Placing just that code into a named module on the module +path allows the exception to be made just for that module by name. With the +removal of Java's former fine-grained policy permissions, such module-level +exceptions are the finest-grained controls remaining in stock Java. + +For news of possible directions for policy enforcement in future PL/Java +versions, please bookmark [this wiki page][jep411]. + +### Defensive coding + +#### Java system properties + +It can be laborious to audit a code base for assumptions that a given Java +system property has a value that is reliable. In the case of no policy +enforcement, when any system property can be changed by any code at any time, +best practice is to rely on defensive copies taken early, before arbitrary +user code can have run. + +For example, `PrintWriter.println` uses a copy of the `line.separator` property +taken early in the JVM's own initialization, so code that relies on `println` to +write a newline will be more dependable than code using `line.separator` +directly. + +PL/Java itself takes a defensive copy of all system properties early in its own +startup, immediately after adding the properties that PL/Java sets. The +`frozenSystemProperties` method of the `org.postgresql.pljava.Session` object +returns this defensive copy, as a subclass of `java.util.Properties` that is +unmodifiable (throwing `UnsupportedOperationException` from methods where a +modification would otherwise result). + +[policy]: policy.html +[jpms]: jpms.html +[vmoptions]: ../install/vmoptions.html +[vbls]: variables.html +[jep411]: https://github.com/tada/pljava/wiki/JEP-411 +[selinux]: ../install/selinux.html +[sepgsql]: https://www.postgresql.org/docs/17/sepgsql.html +[limiting]: jpms.html#Limiting_the_module_graph +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html +[examples]: ../examples/examples.html +[java_modules]: ../pljava-examples/apidocs/org/postgresql/pljava/example/annotation/Modules.html#method-detail +[smprop]: ../install/smproperty.html diff --git a/src/site/markdown/use/use.md b/src/site/markdown/use/use.md index 66479dbd..4fe21948 100644 --- a/src/site/markdown/use/use.md +++ b/src/site/markdown/use/use.md @@ -19,8 +19,8 @@ to be in your local Maven repository. If you have built PL/Java from source using `mvn clean install`, it will be there already. If not, an easy way to install the API into your local repository -is to download the PL/Java source, _change into the `pljava-api` -directory_, and run `mvn clean install` there. It will quickly build +is to download the PL/Java source, and run +`mvn --projects pljava-api clean install` there. It will quickly build and install the API jar, without requiring the various build-time dependencies needed when all of PL/Java is being built. @@ -28,20 +28,161 @@ If not using Maven, you can simply add the `pljava-api` jar file to the class path for your Java compiler. Installation normally places the file in `SHAREDIR/pljava` where `SHAREDIR` is as reported by `pg_config`. -You can also compile successfully by placing the full `pljava` jar -file on the classpath instead of `pljava-api`, but in that case the -compiler will not alert you if your code inadvertently refers to non-API -internal PL/Java classes that may change from release to release. +## PL/Java configuration variables + +Several [configuration variables](variables.html) can affect PL/Java's +operation, including some common PostgreSQL variables as well as +PL/Java's own. + +### Enabling additional Java modules + +By default, PL/Java code can see a small set of Java modules, including +`java.base` and `java.sql` and a few others. To include others, use +[`--add-modules` in `pljava.vmoptions`][addm]. + +[addm]: ../install/vmoptions.html#Adding_to_the_set_of_readable_modules ## Special topics +### Configuring permissions + +When PL/Java is used with Java 23 or earlier, the permissions in effect +for PL/Java functions can be tailored, independently for functions declared to +the `TRUSTED` or untrusted language, as described [here](policy.html). + +When PL/Java is used with stock Java 24 or later, no such tailoring of +permissions is possible, and the +[PL/Java with no policy enforcement](unenforced.html) page should be carefully +reviewed. + +#### Tailoring permissions for code migrated from PL/Java pre-1.6 + +When migrating existing code from a PL/Java 1.5 or earlier release to 1.6, +it may be necessary to add permission grants in the new `pljava.policy` file, +which grants few permissions by default. To simplify migration, it is possible +to run with a 'trial' policy initially, allowing code to run but logging +permissions that may need to be added in `pljava.policy`. How to do that is +described [here](trial.html). + +### Catching and handling PostgreSQL exceptions in Java + +If the Java code calls back into PostgreSQL (such as through the internal JDBC +interface), errors reported by PostgreSQL are turned into Java exceptions and +can be caught in Java `catch` clauses, but they need to be properly handled. +More at [Catching PostgreSQL exceptions in Java](catch.html). + +### Debugging PL/Java functions + +#### Java exception stack traces + +PL/Java catches any Java exceptions uncaught by your Java code, and passes them +on as familiar PostgreSQL errors that will be reported to the client, or can be +caught, as with PL/pgSQL's `EXCEPTION` clause. However, the created PostgreSQL +error does not include the stack trace of the original Java exception. + +If either of the PostgreSQL settings `client_min_messages` or `log_min_messages` +is `DEBUG1` or finer, the Java exception stack trace will be printed to +the standard error channel of the backend process, where it will be collected +and saved in the server log if the PostgreSQL setting `logging_collector` is on. +Otherwise, it will go wherever the error channel of the backend process is +directed, possibly nowhere. + +#### Connecting a debugger + +To allow connecting a Java debugger, the PostgreSQL setting `pljava.vmoptions` +can be changed, in a particular session, to contain a string like: + +``` +-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=localhost:0 +``` + +On the first action in that session that uses PL/Java, the debugger transport +will be set up as specified. For the example above, PL/Java will listen for +a connection from a Java debugger at a randomly-chosen port, which will be +identified with this message (where _nnnnn_ is the port number): + +``` +Listening for transport dt_socket at address: nnnnn +``` + +A Java debugger can then be started and attached to the listening address and +port. + +The "Listening" message, however, is written to the standard output channel +of the PostgreSQL backend process. It may be immediately visible if you are +running PostgreSQL in a [test harness](../develop/node.html), but in a +production setting it may go nowhere. In such a setting, you may prefer to set +a specific port number, rather than 0, in the `pljava.vmoptions` setting, to +be sure of the port the debugger should attach to. Choosing a port that is not +already in use is then up to you. + +As an alternative, `server=y` can be changed to `server=n`, and PL/Java will +then attempt to attach to an already-listening debugger process. The +address:port should be adjusted to reflect where the debugger process is +listening. + +With `suspend=n`, PL/Java proceeds normally without waiting for the debugger +connection, but the debugger will be able to set break or watch points, and will +have control when Java exceptions are thrown. With `suspend=y`, PL/Java only +proceeds once the debugger is connected and in control. This setting is more +commonly used for debugging PL/Java itself. + +### The thread context class loader + +Starting with PL/Java 1.6.3, within an SQL-declared PL/Java function, the +class loader returned by `Thread.currentThread().getContextClassLoader` +is the one that corresponds to the per-schema classpath that has been set +with [`SQLJ.SET_CLASSPATH`][scp] for the schema where the function is +declared (assuming no Java code uses `setContextClassLoader` to change it). + +Many available Java libraries, as well as built-in Java facilities using the +[`ServiceLoader`][slo], refer to the context class loader, so this behavior +ensures they will see the classes that are available on the classpath that was +set up for the PL/Java function. In versions where PL/Java did not set the +context loader, awkward arrangements could be needed in user code for the +desired classes or services to be found. + +There are some limits on the implementation, and some applications may want +the former behavior where PL/Java did not touch the thread context loader. +More details are available [here](../develop/contextloader.html). + +[scp]: ../pljava/apidocs/org.postgresql.pljava.internal/org/postgresql/pljava/management/Commands.html#set_classpath +[slo]: https://docs.oracle.com/javase/9/docs/api/java/util/ServiceLoader.html + +### Choices when mapping data types + +#### Date and time types + +PostgreSQL `date`, `time`, and `timestamp` types can still be matched to the +original JDBC `java.sql.Date`, `java.sql.Time`, and `java.sql.Timestamp`, +but application code is encouraged to move to Java 8 or later and use the +[new classes in the `java.time` package in Java 8](datetime.html) instead. + +#### XML type + +PL/Java can map PostgreSQL `xml` data to `java.lang.String`, but there are +significant advantages to using the +[JDBC 4.0 `java.sql.SQLXML` type](sqlxml.html) for processing XML. + +### Parallel query + +PL/Java understands [background worker processes][bgworker] +in PostgreSQL 9.5 and later, +and PostgreSQL 9.6 introduced [parallel query][parq]. + +For details on PL/Java in a background worker or parallel query, see +[PL/Java in parallel query](parallel.html). + +[bgworker]: https://www.postgresql.org/docs/current/static/bgworker.html +[parq]: https://www.postgresql.org/docs/current/static/parallel-query.html + ### Character-set encodings PL/Java will work most seamlessly when the server encoding in PostgreSQL is `UTF8`. For other cases, please see the [character encoding notes][charsets]. [hello]: hello.html -[pljapi]: ../pljava-api/apidocs/index.html?org/postgresql/pljava/package-summary.html#package_description +[pljapi]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/package-summary.html#package-description [uwik]: https://github.com/tada/pljava/wiki/User-guide [examples]: ../examples/examples.html [charsets]: charsets.html diff --git a/src/site/markdown/use/variables.md b/src/site/markdown/use/variables.md index e404bf8b..a2605532 100644 --- a/src/site/markdown/use/variables.md +++ b/src/site/markdown/use/variables.md @@ -2,28 +2,48 @@ These PostgreSQL configuration variables can influence PL/Java's operation: +`check_function_bodies` +: Although not technically a PL/Java variable, `check_function_bodies` affects + how strictly PL/Java validates a new function at the time of a + `CREATE FUNCTION` (or when installing a jar file with `CREATE FUNCTION` + among its deployment actions). With `check_function_bodies` set to `on`, + PL/Java will make sure that the referenced class and method can be loaded + and resolved. If the referenced class depends on classes in other jars, + those other jars must be already installed and on the class path, so + loading jars with dependencies in the wrong order can incur validation + errors. With `check_function_bodies` set to `off`, only basic syntax is + checked at `CREATE FUNCTION` time, so it is possible to declare functions + or install jars in any order, postponing any errors about unresolved + dependencies until later when the functions are used. + `dynamic_library_path` -: Although strictly not a PL/Java variable, `dynamic_library_path` influences +: Another non-PL/Java variable, `dynamic_library_path` influences where the PL/Java native code object (`.so`, `.dll`, `.bundle`, etc.) can be found, if the full path is not given to the `LOAD` command. `server_encoding` : Another non-PL/Java variable, this affects all text/character strings exchanged between PostgreSQL and Java. `UTF8` as the database and server - encoding is _strongly_ recommended. If a different encoding is used, it + encoding is strongly recommended. If a different encoding is used, it should be any of the available _fully defined_ character encodings. In - particular, the PostgreSQL pseudo-encoding `SQL_ASCII` (which means - "characters within ASCII are ASCII, others are no-one-knows-what") will - _not_ work well with PL/Java, raising exceptions whenever strings contain - non-ASCII characters. (PL/Java can still be used in such a database, but - the application code needs to know what it's doing and use the right - conversion functions where needed.) - -`pljava.classpath` -: The class path to be passed to the Java application class loader. There - must be at least one (and usually only one) entry, the PL/Java jar file - itself. To determine the proper setting, see - [finding the files produced by a PL/Java build](../install/locate.html). + particular, the PostgreSQL pseudo-encoding `SQL_ASCII` does not fully + define what any values outside ASCII represent; it is usable, but + [subject to limitations][sqlascii]. + +`pljava.allow_unenforced` +: Only used when PL/Java is run with no policy enforcement, this setting is + a list of language names (such as `javau` and `java`) in which functions + will be allowed to execute. This setting has an empty default, and should + only be changed after careful review of the + [PL/Java with no policy enforcement][unenforced] page. + +`pljava.allow_unenforced_udt` +: Only used when PL/Java is run with no policy enforcement, this on/off + setting controls whether data conversion functions associated with + PL/Java [mapped user-defined types][mappedudt] + will be allowed to execute. This setting defaults to off, and should + only be changed after careful review of the + [PL/Java with no policy enforcement][unenforced] page. `pljava.debug` : A boolean variable that, if set `on`, stops the process on first entry to @@ -35,8 +55,7 @@ These PostgreSQL configuration variables can influence PL/Java's operation: `pljava.enable` : Setting this variable `off` prevents PL/Java startup from completing, until - the variable is later set `on`. It can be useful when - [installing PL/Java on PostgreSQL versions before 9.2][pre92]. + the variable is later set `on`. It can be useful in some debugging settings. `pljava.implementors` : A list of "implementor names" that PL/Java will recognize when processing @@ -46,22 +65,122 @@ These PostgreSQL configuration variables can influence PL/Java's operation: only on a system recognizing that name. By default, this list contains only the entry `postgresql`. A deployment descriptor that contains commands with other implementor names can achieve a rudimentary kind of conditional - execution if earlier commands adjust this list of names. + execution if earlier commands adjust this list of names, as described + [here][condex]. _Commas separate + elements of this list. Elements that are not regular identifiers need to be + surrounded by double-quotes; prior to PostgreSQL 11, that syntax can be used + directly in a `SET` command, while in 11 and after, such a value needs to be + a (single-quoted) string explicitly containing the double quotes._ + +`pljava.java_thread_pg_entry` +: A choice of `allow`, `error`, `block`, or `throw` controlling PL/Java's thread + management. Java makes heavy use of threading, while PostgreSQL may not be + accessed by multiple threads concurrently. PL/Java's historical behavior is + `allow`, which serializes access by Java threads into PostgreSQL, allowing + a different Java thread in only when the current one calls or returns into + Java. PL/Java formerly made some use of Java object finalizers, which + required this approach, as finalizers run in their own thread. + + PL/Java itself no longer requires the ability for any thread to access + PostgreSQL other than the original main thread. User code developed for + PL/Java, however, may still rely on that ability. To test whether it does, + the `error` or `throw` setting can be used here, and any attempt by a Java + thread other + than the main one to enter PostgreSQL will incur an exception (and stack + trace, written to the server's standard error channel). When confident that + there is no code that will need to enter PostgreSQL except on the main + thread, the `block` setting can be used. That will eliminate PL/Java's + frequent lock acquisitions and releases when the main thread crosses between + PostgreSQL and Java, and will simply indefinitely block any other Java + thread that attempts to enter PostgreSQL. This is an efficient setting, but + can lead to blocked threads or a deadlocked backend if used with code that + does attempt to access PG from more than one thread. (A JMX client, like + JConsole, can identify the blocked threads, should that occur.) + + The `throw` setting is like `error` but more efficient: under the `error` + setting, attempted entry by the wrong thread is detected in the native C + code, only after a lock operation and call through JNI. Under the `throw` + setting, the lock operations are elided and an entry attempt by the wrong + thread results in no JNI call and an exception thrown directly in Java. `pljava.libjvm_location` : Used by PL/Java to load the Java runtime. The full path to a `libjvm` shared object (filename typically ending with `.so`, `.dll`, or `.dylib`). To determine the proper setting, see [finding the `libjvm` library][fljvm]. + The version of the Java library pointed to by this variable will determine + whether PL/Java can run [with security policy enforcement][policy] or + [with no policy enforcement][unenforced]. + +`pljava.module_path` +: The module path to be passed to the Java application class loader. The default + is computed from the PostgreSQL configuration and is usually correct, unless + PL/Java's files have been installed in unusual locations. If it must be set + explicitly, there must be at least two (and usually only two) entries, the + PL/Java API jar file and the PL/Java internals jar file. To determine the + proper setting, see + [finding the files produced by a PL/Java build](../install/locate.html). + + If additional modular jars are added to the module path, + `--add-modules` in [`pljava.vmoptions`][addm] will make them readable by + PL/Java code. + + For more on PL/Java's "module path" and "class path", see + [PL/Java and the Java Platform Module System](jpms.html). + +`pljava.policy_urls` +: Only used when PL/Java is running [with security policy enforcement][policy]. + When running [with no policy enforcement][unenforced], this variable is + ignored. It is a list of URLs to Java security [policy files][policy] + determining the permissions available to PL/Java functions. Each URL should + be enclosed in double quotes; any double quote that is literally part of + the URL may be represented as two double quotes (in SQL style) or as + `%22` in the URL convention. Between double-quoted URLs, a comma is the + list delimiter. + + The Java installation's `java.security` file usually defines two policy + file locations: + + 0. A systemwide policy from the Java vendor, sufficient for the Java runtime + itself to function as expected + 0. A per-user location, where a policy file, if found, can add to the policy + from the systemwide file. + + The list in `pljava.policy_urls` will modify the list from the Java + installation, by default after the first entry, keeping the Java-supplied + systemwide policy but replacing the customary per-user file (there + probably isn't one in the home of the `postgres` user, and if there is + it is probably not tailored for PL/Java). + + Any entry in this list can start with _n_`=` (inside the quotes) for a + positive integer _n_, to specify which entry of Java's policy location list + it will replace (entry 1 is the systemwide policy, 2 the customary user + location). URLs not prefixed with _n_`=` will follow consecutively. If the + first entry is not so prefixed, `2=` is assumed. + + A final entry of `=` (in the required double quotes) will prevent + use of any remaining entries in the Java site-configured list. + + This setting defaults to + `"file:${org.postgresql.sysconfdir}/pljava.policy","="` + `pljava.release_lingering_savepoints` : How the return from a PL/Java function will treat any savepoints created within it that have not been explicitly either released (the savepoint - analog of "committed") or rolled back. + analog of "committed") or rolled back and released. If `off` (the default), they will be rolled back. If `on`, they will be released/committed. If possible, rather than setting this variable `on`, it would be safer to fix the function to release its own savepoints when appropriate. + A savepoint continues to exist after being used as a rollback target. + This is JDBC-specified behavior, but was not PL/Java's behavior before + release 1.5.3, so code may exist that did not explicitly release or roll + back a savepoint after rolling back to it once. To avoid a behavior change + for such code, PL/Java will always release a savepoint that is still live + at function return, regardless of this setting, if the savepoint has already + been rolled back. + `pljava.statement_cache_size` : The number of most-recently-prepared statements PL/Java will keep open. @@ -77,7 +196,13 @@ These PostgreSQL configuration variables can influence PL/Java's operation: may be adjusted in a future PL/Java version. Some important settings can be made here, and are described on the - [VM options page][vmop]. + [VM options page][vmop]. For Java 18 and later, this variable must include + a `-Djava.security.manager=allow` or `-Djava.security.manager=disallow]` + setting, determining whether PL/Java will run + [with security policy enforcement][policy] or + [with no policy enforcement][unenforced], and those pages should be reviewed + for the implications of the choice. Details vary by Java version; see + [Available policy-enforcement settings by Java version][smprop]. [pre92]: ../install/prepg92.html [depdesc]: https://github.com/tada/pljava/wiki/Sql-deployment-descriptor @@ -87,3 +212,10 @@ These PostgreSQL configuration variables can influence PL/Java's operation: [jow]: https://docs.oracle.com/javase/8/docs/technotes/tools/windows/java.html [jou]: https://docs.oracle.com/javase/8/docs/technotes/tools/unix/java.html [vmop]: ../install/vmoptions.html +[sqlascii]: charsets.html#Using_PLJava_with_server_encoding_SQL_ASCII +[addm]: ../install/vmoptions.html#Adding_to_the_set_of_readable_modules +[condex]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/package-summary.html#conditional-execution-in-the-deployment-descriptor-heading +[policy]: policy.html +[unenforced]: unenforced.html +[mappedudt]: ../pljava-api/apidocs/org.postgresql.pljava/org/postgresql/pljava/annotation/MappedUDT.html +[smprop]: ../install/smproperty.html diff --git a/src/site/markdown/use/xmlview.md b/src/site/markdown/use/xmlview.md new file mode 100644 index 00000000..597d23fa --- /dev/null +++ b/src/site/markdown/use/xmlview.md @@ -0,0 +1,237 @@ +# XML view of non-XML data + +Because Java has a rich ecosystem of APIs and tools for XML processing, +and JDBC supports those directly with the [SQLXML data type](sqlxml.html), +it may be useful to offer XML "views" of other PostgreSQL data types that +are not XML, but are similarly tree-structured. + +A preview of such a feature is included in this release, allowing values +of PostgreSQL's `pg_node_tree` type to be retrieved as if they were XML. + + +## `pg_node_tree` + +The `pg_node_tree` type is a representation of PostgreSQL internal data +structures, serialized to a text form found in various places in the +system catalogs: default expressions for attributes, types, or function +parameters, constraint and index expressions, trigger and policy qualifiers, +rewrite rule actions, and so on. + +To make full use of the information in a `pg_node_tree` would require access +to all of the PostgreSQL native structure definitions used in it (which could +become feasible in a future PostgreSQL version if some +[current work-in-progress][gfntugm] is completed and released). On the other +hand, depending on need, some partial information may be usefully extracted +from a `pg_node_tree` using a simple syntactic transformation and standard +tools for XML querying. + +For an example of the current `pg_node_tree` syntax, here is the +`yes_or_no_check` constraint in PostgreSQL 12 (on a little-endian machine): + +``` +SELECT conbin FROM pg_constraint WHERE conname = 'yes_or_no_check'; + +{SCALARARRAYOPEXPR :opno 98 :opfuncid 67 :useOr true :inputcollid 100 +:args ({RELABELTYPE :arg {COERCETODOMAINVALUE :typeId 1043 :typeMod 7 +:collation 100 :location 133} :resulttype 25 :resulttypmod -1 +:resultcollid 100 :relabelformat 2 :location -1} {ARRAYCOERCEEXPR +:arg {ARRAY :array_typeid 1015 :array_collid 100 :element_typeid 1043 +:elements ({CONST :consttype 1043 :consttypmod -1 :constcollid 100 +:constlen -1 :constbyval false :constisnull false :location 143 +:constvalue 7 [ 28 0 0 0 89 69 83 ]} {CONST :consttype 1043 :consttypmod -1 +:constcollid 100 :constlen -1 :constbyval false :constisnull false :location 150 +:constvalue 6 [ 24 0 0 0 78 79 ]}) :multidims false :location -1} +:elemexpr {RELABELTYPE :arg {CASETESTEXPR :typeId 1043 :typeMod -1 :collation 0} +:resulttype 25 :resulttypmod -1 :resultcollid 100 :relabelformat 2 :location -1} +:resulttype 1009 :resulttypmod -1 :resultcollid 100 :coerceformat 2 :location -1 +}) :location 139} +``` + +A Java function receiving a `pg_node_tree` as an argument could be declared +this way: + +```java +@Function +public static void pgNodeTreeAsXML(@SQLType("pg_node_tree") SQLXML pgt) +{ + ... +``` + +A parameter with the Java type `SQLXML` would normally lead to a parameter +type of `xml` in the generated SQL function declaration, but here the +`@SQLType` annotation is used to change that, declaring a function that accepts +a `pg_node_tree` in SQL, but presents it to Java as the `SQLXML` type. + +The [`pljava-examples` jar][ex] includes just such a function, only declared to +return `xml` rather than `void`. In fact, it returns its argument untouched, so +it can be treated as XML by the surrounding query. Its full implementation is: + +```java +@Function +public static SQLXML pgNodeTreeAsXML(@SQLType("pg_node_tree") SQLXML pgt) +throws SQLException +{ + return pgt; +} +``` + +Using that function (and the XQuery [serialize][] function with the `indent` +option for readability, courtesy of [XQuery-based `XMLTABLE`][xbxt]), the same +node tree can be viewed in a more familiar structured syntax: + +``` +SELECT + xmltable.* + FROM + pg_constraint, + LATERAL (SELECT PgNodeTreeAsXML(conbin) AS ".") AS p, + "xmltable"('serialize(., map{"indent":true()})', + passing => p, columns => '{.}') AS (indented text) + WHERE + conname = 'yes_or_no_check'; + + + 98 + 67 + true + 100 + + + + + + 1043 + 7 + 100 + 133 + + + 25 + -1 + 100 + 2 + -1 + + + + + 1015 + 100 + 1043 + + + + 1043 + -1 + 100 + -1 + false + false + 143 + 1C000000594553 + + + 1043 + -1 + 100 + -1 + false + false + 150 + 180000004E4F + + + + false + -1 + + + + + + + 1043 + -1 + 0 + + + 25 + -1 + 100 + 2 + -1 + + + 1009 + -1 + 100 + 2 + -1 + + + + 139 + +``` + +Although exact interpretation of all that isn't possible without heavy reference +to the PostgreSQL source, it can be eyeballed for a decent idea of what is +going on, and simple queries could extract useful information for some +purposes. For example, this simple XPath would return the `Oid`s of all types +that are used in constants within the expression: + +``` +number(//CONST/member[@name = 'consttype']) +``` + +### Some details of the mapping + +* A `` either has no attribute, and children that are, recursively, + `pg_node_tree` structures, or it has an `all` attribute with value + `int`, `oid`, or `bit` and its children all are `` elements with + numeric content representing integers, `Oid`s, or bit numbers in a bit set, + respectively. +* A `` representing a typed SQL `NULL` will have a `constvalue` member + with no `length` attribute and no content. Otherwise, the `constvalue` + member will have content of type `xs:hexBinary` and a `length` attribute + indicating how many octets of the binary content are used. For types + with `constbyval` true, the hex content will always be the full width of + a `Datum`, though the `length` may be smaller. For types with `constbyval` + false, the `length` attribute matches the length of the binary content. +* A `` with a `constlen` of `-1` represents a type with a `varlena` + representation, as described under [Database Physical Storage][dps]. + The `constvalue` in such a case is the entire `varlena`, including its + header. + +The two `` elements in the example above have type 1043 +(`CHARACTER VARYING`) and `varlena` representations, so the `constvalue` members +consist of a four-octet header followed by the three ASCII characters `YES` or +the two characters `NO`, respectively. The one-octet length difference changes +the `varlena` header value by four (from `18` to `1C`) because the two +lowest-order bits of the header (on little-endian hardware) are usurped for +TOAST. + +It is [possible][gfntugm1] that a future PostgreSQL version will +change the current idiosyncratic syntax, or serialize to JSON instead. + +## Limits of the current XML view implementation + +Implementation of XML views is work in progress. The current implementation +has these limitations: + +* It is read-only. There is no provision yet for writing an XML-viewable type + by returning `SQLXML` from a Java function, or passing `SQLXML` to a + `ResultSet`, `PreparedStatement`, or `SQLOutput`. +* A fully-compliant readable `SQLXML` implementation should support + `getBinaryStream`, `getCharacterStream`, `getString`, and `getSource` with + any of the four must-support subtypes of `Source`. The current XML-view + implementation will support only `getSource(SAXSource.class)` or + `getSource(null)` (which will return a `SAXSource`). All other cases will + throw an `SQLFeatureNotSupportedException`. + +[gfntugm]: https://www.postgresql.org/message-id/20190828234136.fk2ndqtld3onfrrp%40alap3.anarazel.de +[gfntugm1]: https://www.postgresql.org/message-id/20190921091527.GI31596%40fetter.org +[ex]: ../examples/examples.html +[serialize]: https://www.w3.org/TR/xpath-functions-31/#func-serialize +[xbxt]: ../examples/saxon.html#An_XMLTABLE-like_function +[dps]: https://www.postgresql.org/docs/9.4/storage-toast.html diff --git a/src/site/resources/css/site.css b/src/site/resources/css/site.css new file mode 100644 index 00000000..194643be --- /dev/null +++ b/src/site/resources/css/site.css @@ -0,0 +1,14 @@ +/* You can override this file with your own styles */ + +div.source { + border-left: none; + border-right: none; + margin: 0; + padding: 2px 0; +} + +div.source > div.source { + border: none; + padding: 0; + margin: 0; +} diff --git a/src/site/site.xml b/src/site/site.xml index cadcf4ef..8938581f 100644 --- a/src/site/site.xml +++ b/src/site/site.xml @@ -9,21 +9,25 @@ PL/Java - Stored procedures for PostgreSQL in Java images/pljava_logo.jpg PL/Java logo combining the PostgreSQL elephant and a Java bean - ${project.url} + ${this.url} - + - + href='https://www.postgresql.org/list/pljava-dev/'/> + + +