diff --git a/db-engine/src/main/java/com/splicemachine/db/impl/sql/catalog/SYSNATURALNUMBERSRowFactory.java b/db-engine/src/main/java/com/splicemachine/db/impl/sql/catalog/SYSNATURALNUMBERSRowFactory.java
index b3efaa3489c..ce373a9cb4e 100644
--- a/db-engine/src/main/java/com/splicemachine/db/impl/sql/catalog/SYSNATURALNUMBERSRowFactory.java
+++ b/db-engine/src/main/java/com/splicemachine/db/impl/sql/catalog/SYSNATURALNUMBERSRowFactory.java
@@ -54,7 +54,7 @@ public class SYSNATURALNUMBERSRowFactory extends CatalogRowFactory
private static final int SYSNATURALNUMBERS_COLUMN_COUNT = 1;
private static final int SYSNATURALNUMBERS_N = 1;
- private static final int MAX_NUMBER = 2048;
+ public static final int MAX_NUMBER = 2048;
private static final String[] uuids =
{
diff --git a/hbase_sql/src/test/java/com/splicemachine/derby/impl/sql/execute/upgrade/UpgradeTestIT.java b/hbase_sql/src/test/java/com/splicemachine/derby/impl/sql/execute/upgrade/UpgradeTestIT.java
new file mode 100644
index 00000000000..1a505edd1a4
--- /dev/null
+++ b/hbase_sql/src/test/java/com/splicemachine/derby/impl/sql/execute/upgrade/UpgradeTestIT.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2012 - 2020 Splice Machine, Inc.
+ *
+ * This file is part of Splice Machine.
+ * Splice Machine is free software: you can redistribute it and/or modify it under the terms of the
+ * GNU Affero General Public License as published by the Free Software Foundation, either
+ * version 3, or (at your option) any later version.
+ * Splice Machine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU Affero General Public License for more details.
+ * You should have received a copy of the GNU Affero General Public License along with Splice Machine.
+ * If not, see .
+ */
+
+package com.splicemachine.derby.impl.sql.execute.upgrade;
+
+import com.splicemachine.access.hbase.HBasePartitionAdmin;
+import com.splicemachine.db.impl.sql.catalog.SYSNATURALNUMBERSRowFactory;
+import com.splicemachine.derby.test.framework.SpliceSchemaWatcher;
+import com.splicemachine.derby.test.framework.SpliceUnitTest;
+import com.splicemachine.derby.test.framework.SpliceWatcher;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.junit.*;
+import org.junit.rules.RuleChain;
+import org.junit.rules.TestRule;
+
+/**
+ * Note: These tests need to be excluded in platform_it/pom.xml,
+ * as they are only mean to be run with the check_upgrade.sh script.
+ *
+ * e.g.
+ * bash pipelines/upgrade-testing/check_upgrade.sh 3.1.0.2000 -T 4
+ * this downloads a tar.gz (here https://splice-snapshots.s3.amazonaws.com/upgrade_tests/platform_it_3.1.0.2000.tar.gz)
+ * extracts that, then starts the cluster with that data (without clean), and then executes the UpgradeTestIT
+ *
+ * see also
+ * pipelines/upgrade-testing/check_upgrade.sh
+ * pipelines/upgrade-testing/create_upgrade_targz.sh
+ * https://s3.console.aws.amazon.com/s3/buckets/splice-snapshots?region=us-east-1&prefix=upgrade_tests/&showversions=false
+ */
+public class UpgradeTestIT extends SpliceUnitTest {
+
+ private static final String SCHEMA_NAME = UpgradeTestIT.class.getSimpleName().toUpperCase();
+ private static final SpliceWatcher spliceClassWatcher = new SpliceWatcher(SCHEMA_NAME);
+ private static final SpliceSchemaWatcher spliceSchemaWatcher = new SpliceSchemaWatcher(SCHEMA_NAME);
+ @Rule
+ public SpliceWatcher methodWatcher = new SpliceWatcher(SCHEMA_NAME);
+
+ @ClassRule
+ public static TestRule chain = RuleChain.outerRule(spliceClassWatcher)
+ .around(spliceSchemaWatcher);
+
+ @Test // 1983
+ public void testUpgradeScriptToAddBaseTableSchemaColumnsToSysTablesInSYSIBM() throws Exception {
+ String expected =
+ "NAME | CREATOR |TYPE |COLCOUNT |KEYCOLUMNS | KEYUNIQUE |CODEPAGE | BASE_NAME | BASE_SCHEMA |\n" +
+ "-------------------------------------------------------------------------------------------------\n" +
+ "SYSTABLES | SYSIBM | V | 9 | 0 | 0 | 1208 | NULL | NULL |";
+ SpliceUnitTest.sqlExpectToString(methodWatcher,
+ "select * from sysibm.SYSTABLES WHERE NAME='SYSTABLES' AND CREATOR='SYSIBM'",
+ expected, false);
+ }
+
+ @Test // 1985
+ public void testUpgradeScriptToAddSysNaturalNumbersTable() throws Exception {
+ Assert.assertEquals(SYSNATURALNUMBERSRowFactory.MAX_NUMBER,
+ methodWatcher.executeGetInt("select count(*) from SYS.SYSNATURALNUMBERS", 1) );
+ }
+
+ @Test // 1989
+ public void testUpgradeScriptToAddIndexColUseViewInSYSCAT() throws Exception {
+ SpliceUnitTest.sqlExpectToString(methodWatcher,
+ "select TABLENAME from sysvw.systablesview WHERE TABLENAME='INDEXCOLUSE'",
+ "TABLENAME |\n" +
+ "-------------\n" +
+ "INDEXCOLUSE |",
+ false);
+ }
+
+ @Test // 1992
+ public void testUpgradeScriptForTablePriorities() throws Exception {
+ try(Connection conn = ConnectionFactory.createConnection(new Configuration());
+ Admin admin= conn.getAdmin()) {
+ Assert.assertEquals(0, HBasePartitionAdmin.getToUpgradeStream(admin.listTableDescriptors()).count());
+ }
+ }
+
+}
diff --git a/hbase_storage/src/main/java/com/splicemachine/access/hbase/HBasePartitionAdmin.java b/hbase_storage/src/main/java/com/splicemachine/access/hbase/HBasePartitionAdmin.java
index fe653499f05..d4ae2cd35c3 100644
--- a/hbase_storage/src/main/java/com/splicemachine/access/hbase/HBasePartitionAdmin.java
+++ b/hbase_storage/src/main/java/com/splicemachine/access/hbase/HBasePartitionAdmin.java
@@ -24,6 +24,7 @@
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
import splice.com.google.common.collect.Lists;
import com.google.protobuf.InvalidProtocolBufferException;
@@ -764,6 +765,11 @@ public static void setHTablePriority(Admin admin, TableName tn,
admin.enableTable(tn);
}
+ public static Stream getToUpgradeStream(
+ List tableDescriptors) {
+ return tableDescriptors.stream().filter( td -> td.getPriority() != getPriorityShouldHave(td));
+ }
+
public static int upgradeTablePrioritiesFromList(Admin admin,
List tableDescriptors)
throws Exception
@@ -771,9 +777,7 @@ public static int upgradeTablePrioritiesFromList(Admin admin,
final int NUM_THREADS = 10;
ExecutorService executor = null;
try {
-
- List> upgradeTasks = tableDescriptors.stream()
- .filter( td -> td.getPriority() != getPriorityShouldHave(td))
+ List> upgradeTasks = getToUpgradeStream(tableDescriptors)
.map( td -> (Callable) () -> {
setHTablePriority(admin, td.getTableName(), td, getPriorityShouldHave(td));
return null;
diff --git a/pipelines/upgrade-testing/check_upgrade.sh b/pipelines/upgrade-testing/check_upgrade.sh
new file mode 100644
index 00000000000..85836089d33
--- /dev/null
+++ b/pipelines/upgrade-testing/check_upgrade.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+UPGRADE_URL=s3://splice-snapshots/upgrade_tests
+
+VERSION=${1} # e.g. 3.1.0.1971
+shift 1
+
+PREVIOUS_BRANCH=`git rev-parse --abbrev-ref HEAD`
+
+# creates a file platform_it_${VERSION}.tar.gz
+git checkout tags/${VERSION}
+cd platform_it
+rm -rf target *.log snappy*.jnilib
+cd ..
+
+./start-splice-cluster $*
+./start-splice-cluster -k
+
+rm -rf upgrade_test_TMP
+mkdir -p upgrade_test_TMP/platform_it/target
+cd upgrade_test_TMP
+cp -r ../platform_it/target/hbase platform_it/target/.
+cp -r ../platform_it/target/zookeeper platform_it/target/.
+tar -czvf ../platform_it_${VERSION}.tar.gz platform_it
+cd ..
+rm -rf upgrade_test_TMP
+
+git checkout ${PREVIOUS_BRANCH}
+
+
+# restart on that version
+./start-splice-cluster $*
+
+# clean up platform_it
+cd platform_it
+git clean -dfx
+cd ..
+
+# download the previous standalone data
+tar -xzvf platform_it_${VERSION}.tar.gz
+rm platform_it_${VERSION}.tar.gz
+
+# restart cluster
+./start-splice-cluster -l $*
+
+# test
+if mvn -B -e surefire:test -Pcore,cdh6.3.0 -Dtest='UpgradeTestIT#*' -DskipServerStart -DfailIfNoTests=false; then
+ echo "UPGRADE SUCCEEDED"
+ cat platform_it/splice.log | grep 'upgrade scripts'
+ cat platform_it/splice.log | grep 'Running upgrade script'
+else
+ echo "!!! UPGRADE FAILED !!!"
+fi
diff --git a/pipelines/upgrade-testing/check_upgrade_fast.sh b/pipelines/upgrade-testing/check_upgrade_fast.sh
new file mode 100644
index 00000000000..33fcf25b7a7
--- /dev/null
+++ b/pipelines/upgrade-testing/check_upgrade_fast.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+UPGRADE_URL=s3://splice-snapshots/upgrade_tests
+
+if [ $# -lt 2 ]
+then
+ echo "usage: bash create_upgrade_targz.sh {VERSION} {additional start-splice-cluster parameters}"
+ echo "------------------------------------------------------------------------------------------"
+ echo "uses a previously created tar.gz to test upgrade"
+ echo "e.g. bash create_upgrade_targz.sh 3.2.2021 -T 16"
+ echo "make sure you current branch has already been build"
+ echo "don't use -b, since we are deleting some files in platform_it/target"
+
+ exit 1
+fi
+
+VERSION=${1}
+shift
+
+# stop current cluster
+./start-splice-cluster -k
+
+# clean up platform_it
+cd platform_it
+git clean -dfx
+cd ..
+
+# download the previous standalone data
+aws s3 cp ${UPGRADE_URL}/platform_it_${VERSION}.tar.gz .
+tar -xzvf platform_it_${VERSION}.tar.gz
+rm platform_it_${VERSION}.tar.gz
+
+# restart cluster
+./start-splice-cluster -l $*
+
+# test
+if mvn -B -e surefire:test -Pcore,cdh6.3.0 -Dtest='UpgradeTestIT#*' -DskipServerStart -DfailIfNoTests=false; then
+ echo "UPGRADE SUCCEEDED"
+ cat platform_it/splice.log | grep 'upgrade scripts'
+ cat platform_it/splice.log | grep 'Running upgrade script'
+else
+ echo "!!! UPGRADE FAILED !!!"
+fi
diff --git a/pipelines/upgrade-testing/create_upgrade_targz.sh b/pipelines/upgrade-testing/create_upgrade_targz.sh
new file mode 100644
index 00000000000..15567529d73
--- /dev/null
+++ b/pipelines/upgrade-testing/create_upgrade_targz.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+UPGRADE_URL=s3://splice-snapshots/upgrade_tests
+
+
+if [ $# -lt 2 ]
+then
+ echo "usage: bash platform_it/src/test/bin/create_upgrade_targz.sh {VERSION} {test/upload} {additional start-splice-cluster parameters}"
+ echo "----------------------------------------------------------------------------------------------------------------------------------"
+ echo "creates a tar.gz of a spliceengine standalone cluster that can be used to test upgrade"
+ echo "e.g. bash create_upgrade_targz.sh 3.1.0.1971 test -T 16 -pcore,cdh6.3.0"
+ exit 1
+fi
+
+VERSION=${1} # e.g. 3.1.0.1971
+MODE=${2} # test or upload
+shift 2
+
+
+PREVIOUS_BRANCH=`git rev-parse --abbrev-ref HEAD`
+git stash
+
+# creates a file platform_it_${VERSION}.tar.gz
+git checkout tags/${VERSION}
+cd platform_it
+rm -rf target *.log snappy*.jnilib
+cd ..
+
+./start-splice-cluster $*
+./start-splice-cluster -k
+
+rm -rf upgrade_test_TMP
+mkdir -p upgrade_test_TMP/platform_it/target
+cd upgrade_test_TMP
+cp -r ../platform_it/target/hbase platform_it/target/.
+cp -r ../platform_it/target/zookeeper platform_it/target/.
+tar -czvf ../platform_it_${VERSION}.tar.gz platform_it
+cd ..
+rm -rf upgrade_test_TMP
+
+if [[ $MODE == "upload" ]]; then
+ aws s3 cp platform_it_${VERSION}.tar.gz ${UPGRADE_URL}/platform_it_${VERSION}.tar.gz
+fi
+
+git checkout ${PREVIOUS_BRANCH}
+git stash pop
diff --git a/platform_it/pom.xml b/platform_it/pom.xml
index b1d2afa1e62..15ab2225930 100644
--- a/platform_it/pom.xml
+++ b/platform_it/pom.xml
@@ -460,6 +460,8 @@
com/splicemachine/derby/impl/sql/execute/operations/window/test/framework/**
+ com/splicemachine/derby/impl/sql/execute/upgrade/UpgradeTestIT.java
+
alphabetical
@@ -495,6 +497,10 @@
**/*IT.java
+
+ com/splicemachine/derby/impl/sql/execute/upgrade/UpgradeTestIT.java
+
+
alphabetical
async
diff --git a/splice_machine/src/test/java/com/splicemachine/derby/test/framework/SpliceWatcher.java b/splice_machine/src/test/java/com/splicemachine/derby/test/framework/SpliceWatcher.java
index 521de5c1c99..1de1641677c 100644
--- a/splice_machine/src/test/java/com/splicemachine/derby/test/framework/SpliceWatcher.java
+++ b/splice_machine/src/test/java/com/splicemachine/derby/test/framework/SpliceWatcher.java
@@ -472,4 +472,13 @@ public void assertStrResult(String res, String query, Boolean sort) throws Excep
Assert.assertEquals( "failed asserting the results of sql\n" + query,
res, executeToString(query, sort));
}
+
+ public int executeGetInt(String sql, int index) throws SQLException {
+ try( Statement s = getOrCreateConnection().createStatement();
+ ResultSet rs = s.executeQuery(sql))
+ {
+ Assert.assertTrue(rs.next());
+ return rs.getInt(index);
+ }
+ }
}
diff --git a/start-splice-cluster b/start-splice-cluster
index edf7452911a..d7333e4f3fc 100755
--- a/start-splice-cluster
+++ b/start-splice-cluster
@@ -24,7 +24,8 @@ CHAOS="false"
MEMBERS=2
DEBUG_PATH=""
-
+MAX_WAIT_REGION_SERVERS=100 # *2 seconds
+MAX_WAIT_MASTER=200 # *2 seconds
##################################################################################
# Function to kill all splice test processes - zoo, SpliceTestPlatform, YARN, and
@@ -64,18 +65,28 @@ function is_port_open
}
##################################################################################
-# print message ${1} and wait until port {2} is open
+# print message ${1} and wait until port {2} is open, for max {3}*2 seconds
##################################################################################
function _wait_for_port
{
msg=${1}
port=${2}
+ wait_max=${3}
echo -n ${msg}
echo -n " "
counter=0
until is_port_open localhost ${port}; do
- echo -n "${counter} - "
+ if [ ${JENKINS_MODE} ]; then
+ echo "${msg} ${counter}"
+ else
+ echo -n "${counter} - "
+ fi
counter=$((counter+1))
+
+ if [ ${counter} -ge ${wait_max} ]; then
+ echo "Waited too long, aborting."
+ exit 2
+ fi
sleep 2
done
echo "done!"
@@ -175,7 +186,7 @@ function _start_region_servers {
function _wait_for_region_servers {
if [[ ${MEMBERS} -gt 0 ]]; then
for (( MEMBER=1; MEMBER<${MEMBERS}; MEMBER++ )); do
- _wait_for_port "Waiting for Region Server $(($MEMBER +1)) to be ready ..." $(( 1527 + ${MEMBER} ))
+ _wait_for_port "Waiting for Region Server $(($MEMBER +1)) to be ready ..." $(( 1527 + ${MEMBER} )) ${MAX_WAIT_REGION_SERVERS}
done
fi
}
@@ -198,7 +209,31 @@ function _start_master {
}
function _wait_for_master {
- _wait_for_port "Waiting for Master to be ready ..." 1527
+
+ msg="Waiting for Master to be ready ..."
+ port=1527
+ wait_max=${MAX_WAIT_MASTER}
+ echo -n ${msg}
+ echo -n " "
+ counter=0
+ until is_port_open localhost ${port}; do
+ if [ ${JENKINS_MODE} ]; then
+ echo "${msg} ${counter}"
+ else
+ echo -n "${counter} - "
+ fi
+ counter=$((counter+1))
+ if [ ${counter} -ge ${wait_max} ]; then
+ echo "Waited too long, aborting."
+ exit 2
+ fi
+ if cat ${SPLICE_LOG} | grep "\[ERROR\] Failed to execute goal" ; then
+ echo "ERROR: Spliceengine couldn't start."
+ exit 2
+ fi
+ sleep 2
+ done
+ echo "done!"
}
@@ -234,7 +269,7 @@ function _start_mem
function _wait_for_mem
{
- _wait_for_port "Waiting until Mem Platform is ready ..." 1527
+ _wait_for_port "Waiting until Mem Platform is ready ..." 1527 20
}
export -f _kill_em_all
@@ -263,6 +298,7 @@ function usage
echo " -K like k, but will also reset database to clean. Needs some rebuild afterwards (use -l)."
echo " -T use -T with maven (build with n threads)."
echo " -h => print this message"
+ echo " -L use linefeeds when waiting (jenkins mode)"
}
function verify_profiles
@@ -286,7 +322,7 @@ function verify_profiles
done
}
-while getopts "chkp:s:bld:fT:Ke" flag ; do
+while getopts "chkp:s:bld:fT:KeL" flag ; do
case $flag in
h* | \?)
usage
@@ -339,6 +375,9 @@ while getopts "chkp:s:bld:fT:Ke" flag ; do
# number of threads
MVN_THREADS="-T ${OPTARG}"
;;
+ L)
+ JENKINS_MODE=1
+ ;;
e)
# enterprise edition
ENTERPRISE_EDITION=1