Skip to content

Commit

Permalink
merging from master
Browse files Browse the repository at this point in the history
  • Loading branch information
hemanthmeka committed Aug 8, 2018
2 parents 32f1dbd + 5bde791 commit 4ea44a9
Show file tree
Hide file tree
Showing 109 changed files with 3,875 additions and 657 deletions.
98 changes: 82 additions & 16 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ allprojects {
apply plugin: 'nebula.ospackage-base'

group = 'io.snappydata'
version = '1.0.2-SNAPSHOT'
version = '1.0.2'

// apply compiler options
tasks.withType(JavaCompile) {
Expand Down Expand Up @@ -106,7 +106,7 @@ allprojects {
scalaBinaryVersion = '2.11'
scalaVersion = scalaBinaryVersion + '.8'
sparkVersion = '2.1.1'
snappySparkVersion = '2.1.1.1'
snappySparkVersion = '2.1.1.3'
sparkDistName = "spark-${sparkVersion}-bin-hadoop2.7"
log4jVersion = '1.2.17'
slf4jVersion = '1.7.25'
Expand All @@ -121,9 +121,10 @@ allprojects {
janinoVersion = '3.0.8'
derbyVersion = '10.12.1.1'
pegdownVersion = '1.6.0'
snappyStoreVersion = '1.6.2-SNAPSHOT'
snappyStoreVersion = '1.6.2'
snappydataVersion = version
pulseVersion = '1.5.1'
zeppelinInterpreterVersion = '0.7.3.1'
buildFlags = ''
createdBy = System.getProperty('user.name')
osArch = System.getProperty('os.arch')
Expand All @@ -132,7 +133,7 @@ allprojects {
buildDate = new Date().format('yyyy-MM-dd HH:mm:ss Z')
buildNumber = new Date().format('MMddyy')
jdkVersion = System.getProperty('java.version')
sparkJobServerVersion = '0.6.2.6'
sparkJobServerVersion = '0.6.2.7'
kolobokeVersion = '1.0.0'

gitCmd = "git --git-dir=${rootDir}/.git --work-tree=${rootDir}"
Expand Down Expand Up @@ -737,7 +738,6 @@ task product(type: Zip) {
if (hasGemFireConnectorProject){
dependsOn ":gemfire-connector:product"
}

}


Expand Down Expand Up @@ -858,17 +858,45 @@ task product(type: Zip) {
exclude '.git*'
}
}
if (isEnterpriseProduct && hasAqpProject) {
// copy enterprise shared libraries for optimized JNI calls
copy {
from aqpProject.projectDir.path + '/lib'
into "${snappyProductDir}/jars"
if (isEnterpriseProduct) {
if (hasAqpProject) {
// copy enterprise shared libraries for optimized JNI calls
copy {
from aqpProject.projectDir.path + '/lib'
into "${snappyProductDir}/jars"
}
copy {
from aqpProject.projectDir
into snappyProductDir
include 'NOTICE'
include '*EULA*'
}
}
copy {
from aqpProject.projectDir
into snappyProductDir
include 'NOTICE'
include '*EULA*'

def jdbcConnectorProject = project(":snappy-jdbc-connector_${scalaBinaryVersion}")
def gemfireConnectorProject = project(":gemfire-connector")
def gfeConnectorProject = project(":gemfire-connector:connector_${scalaBinaryVersion}")
def gfeFunctionProject = project(":gemfire-connector:gfeFunctions")
if (hasJdbcConnectorProject) {
copy {
from jdbcConnectorProject.jar.destinationDir
into "${snappyProductDir}/connectors"
}
}
if (hasGemFireConnectorProject) {
copy {
from gfeConnectorProject.jar.destinationDir
into "${snappyProductDir}/connectors"
}
copy {
from gfeFunctionProject.jar.destinationDir
into "${snappyProductDir}/connectors"
}
copy {
from "${gemfireConnectorProject.projectDir}/examples/quickstart/data"
into "${snappyProductDir}/connectors"
include "persons.jar"
}
}
}
copy {
Expand Down Expand Up @@ -897,11 +925,16 @@ task product(type: Zip) {
from("${examplesProject.projectDir}/src")
into "${snappyProductDir}/quickstart/src"
}

copy {
from("${clusterProject.projectDir}/benchmark")
into "${snappyProductDir}/benchmark"
}
def clientProject = project(':snappy-store:snappydata-store-client')
copy {
from clientProject.shadowJar.destinationDir
into "${snappyProductDir}/connectors"
include clientProject.shadowJar.archiveName
}
}
}

Expand Down Expand Up @@ -962,6 +995,7 @@ buildRpm {
requires('perl')
requires('curl')
dependsOn ':packageVSD'
dependsOn ':packageZeppelinInterpreter'
if (rootProject.hasProperty('hadoop-provided')) {
classifier 'without_hadoop'
}
Expand All @@ -976,6 +1010,7 @@ buildDeb {
requires('curl')
recommends('java8-sdk')
dependsOn ':packageVSD'
dependsOn ':packageZeppelinInterpreter'
if (rootProject.hasProperty('hadoop-provided')) {
classifier 'without-hadoop'
}
Expand All @@ -987,6 +1022,7 @@ distTar {
dependsOn product
// also package VSD
dependsOn ':packageVSD'
dependsOn ':packageZeppelinInterpreter'
classifier 'bin'
if (rootProject.hasProperty('hadoop-provided')) {
classifier 'without-hadoop-bin'
Expand All @@ -997,6 +1033,7 @@ distZip {
dependsOn product
// also package VSD
dependsOn ':packageVSD'
dependsOn ':packageZeppelinInterpreter'
classifier 'bin'
if (rootProject.hasProperty('hadoop-provided')) {
classifier 'without-hadoop-bin'
Expand Down Expand Up @@ -1138,6 +1175,34 @@ int getLast(includeTestFiles, pattern) {
}
}

task packageZeppelinInterpreter { doLast {
String zeppelinInterpreterJarName = "snappydata-zeppelin-${zeppelinInterpreterVersion}.jar"
String zeppelinInterpreterDir = System.env.ZEPPELIN_INTERPRETER_DIR

if (zeppelinInterpreterDir == null || zeppelinInterpreterDir.length() == 0) {
zeppelinInterpreterDir = "${projectDir}/../zeppelin-interpreter"
}

String zeppelinInterpreterLibDir = "${zeppelinInterpreterDir}/build-artifacts/libs"
if (file(zeppelinInterpreterDir).canWrite()) {
exec {
executable "${zeppelinInterpreterDir}/gradlew"
workingDir = zeppelinInterpreterDir
args 'clean', 'product', 'distTar'
}
println ''
println "Copying Zeppelin Interpreter jar from ${zeppelinInterpreterLibDir} to ${snappyProductDir}/jars"
println ''
copy {
from "${zeppelinInterpreterLibDir}"
into "${snappyProductDir}/jars"
include "${zeppelinInterpreterJarName}"
}
} else {
println "Skipping including Zeppelin Interpreter jar due to unwritable ${zeppelinInterpreterDir}"
}
} }

task packagePulse { doLast {
String pulseWarName = "pulse-${pulseVersion}.war"
String pulseDir = System.env.PULSEDIR
Expand Down Expand Up @@ -1199,6 +1264,7 @@ task sparkPackage {

packagePulse.mustRunAfter product
packageVSD.mustRunAfter product
packageZeppelinInterpreter.mustRunAfter product

distTar.mustRunAfter clean, cleanAll, product
distZip.mustRunAfter clean, cleanAll, product
Expand Down
10 changes: 5 additions & 5 deletions cluster/sbin/collect-debug-artifacts.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,27 +54,27 @@ while [ "$1" != "" ]; do
CONF_FILE="$2"
shift ;;
--conf=*|--config=*)
CONF_FILE="`echo "$2" | sed 's/^[^=]*=//'`" ;;
CONF_FILE="`echo "$1" | sed 's/^[^=]*=//'`" ;;
-x)
TAR_FILE="$2"
shift ;;
--extract=*|--xtract=*)
TAR_FILE="`echo "$2" | sed 's/^[^=]*=//'`" ;;
TAR_FILE="`echo "$1" | sed 's/^[^=]*=//'`" ;;
-o)
OUTPUT_DIR="$2"
shift ;;
--out=*|--outdir=*)
OUTPUT_DIR="`echo "$2" | sed 's/^[^=]*=//'`" ;;
OUTPUT_DIR="`echo "$1" | sed 's/^[^=]*=//'`" ;;
-s)
START_TIME="$2"
shift ;;
--start=*)
START_TIME="`echo "$2" | sed 's/^[^=]*=//'`" ;;
START_TIME="`echo "$1" | sed 's/^[^=]*=//'`" ;;
-e)
END_TIME="$2"
shift ;;
--end=*)
END_TIME="`echo "$2" | sed 's/^[^=]*=//'`" ;;
END_TIME="`echo "$1" | sed 's/^[^=]*=//'`" ;;
-h|--help)
usage
exit 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1023,4 +1023,34 @@ class QueryRoutingDUnitTest(val s: String)
conn.close()
}
}

def testAlterTableRowTable(): Unit = {
val serverHostPort = AvailablePortHelper.getRandomAvailableTCPPort
vm2.invoke(classOf[ClusterManagerTestBase], "startNetServer", serverHostPort)
val conn = DriverManager.getConnection("jdbc:snappydata://localhost:" + serverHostPort)
println(s"Connected to $serverHostPort")

val stmt = conn.createStatement();
try {
val createParentTable: String =
"create table parentT (cid int not null, sid int not null, qty int not null, " +
" constraint parent_pk primary key (cid, sid)) " +
"USING ROW OPTIONS ( PERSISTENT 'SYNCHRONOUS');"
val createChildTable: String =
"create table childT (oid int not null constraint child_pk primary key, cid int, " +
"sid int, qty int, constraint parent_fk foreign key (cid, sid)" +
"references parentT (cid, sid) on delete restrict) " +
"USING ROW OPTIONS ( PERSISTENT 'SYNCHRONOUS');"
val alterTableStmt: String = "alter table childT drop FOREIGN KEY parent_fk"
stmt.execute(createParentTable)
stmt.execute(createChildTable)
stmt.execute(alterTableStmt)
} finally {
stmt.execute("drop table childT")
stmt.execute("drop table parentT")
stmt.close()
conn.close()
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -77,16 +77,6 @@ class CatalogConsistencyDUnitTest(s: String) extends ClusterManagerTestBase(s) {
}

val routeQueryDisabledConn = getClientConnection(netPort1, false)
// should throw an exception since the catalog is repaired and table entry
// should have been removed
try {
// table should not exist in the store DD
routeQueryDisabledConn.createStatement().executeQuery("select * from column_table1")
} catch {
case se: SQLException if (se.getSQLState.equals("42X05")) =>
case unknown: Throwable => throw unknown
}

try {
// make sure that the column buffer does not exist
routeQueryDisabledConn.createStatement().executeQuery(
Expand Down Expand Up @@ -132,7 +122,7 @@ class CatalogConsistencyDUnitTest(s: String) extends ClusterManagerTestBase(s) {

val connection = getClientConnection(netPort1)
// repair the catalog
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG()")
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG('true', 'true')")
// column_table1 should not be found in either catalog after repair
assertTableDoesNotExist(netPort1, snc)
// other tables should exist
Expand Down Expand Up @@ -165,14 +155,15 @@ class CatalogConsistencyDUnitTest(s: String) extends ClusterManagerTestBase(s) {

val connection = getClientConnection(netPort1)
// repair the catalog
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG()")
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG('true', 'true')")
// column_table1 should not be found in either catalog after repair
assertTableDoesNotExist(netPort1, snc)
// other tables should exist
verifyTables(snc)
}

def testCatalogRepairedWhenLeadRestarted(): Unit = {
// Hive entry missing but DD entry exists
def testCatalogRepairedWhenLeadStopped1(): Unit = {
val netPort1 = AvailablePortHelper.getRandomAvailableTCPPort
vm2.invoke(classOf[ClusterManagerTestBase], "startNetServer", netPort1)

Expand All @@ -188,6 +179,53 @@ class CatalogConsistencyDUnitTest(s: String) extends ClusterManagerTestBase(s) {
if(sparkContext != null) sparkContext.stop()
ClusterManagerTestBase.stopAny()

val connection = getClientConnection(netPort1)
// repair the catalog
// does not actually repair, just adds warning to log file
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG('false', 'false')")
// actually repair the catalog
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG('true', 'true')")

ClusterManagerTestBase.startSnappyLead(ClusterManagerTestBase.locatorPort, bootProps)
snc = SnappyContext(sc)
// column_table1 should not be found in either catalog after repair
assertTableDoesNotExist(netPort1, snc)

// other tables should exist
verifyTables(snc)
}

// Hive entry exists but DD entry missing
def testCatalogRepairedWhenLeadStopped2(): Unit = {
val netPort1 = AvailablePortHelper.getRandomAvailableTCPPort
vm2.invoke(classOf[ClusterManagerTestBase], "startNetServer", netPort1)

var snc = SnappyContext(sc)

createTables(snc)

// drop column_table1 from store DD
val routeQueryDisabledConn = getClientConnection(netPort1, false)
routeQueryDisabledConn.createStatement().execute("drop table " +
ColumnFormatRelation.columnBatchTableName("app.column_table1"))
routeQueryDisabledConn.createStatement().execute("drop table column_table1")

// make sure that the table exists in Hive metastore
assert(JdbcExtendedUtils.tableExistsInMetaData("APP.COLUMN_TABLE1",
routeQueryDisabledConn, GemFireXDClientDialect))

// stop spark
val sparkContext = SnappyContext.globalSparkContext
if(sparkContext != null) sparkContext.stop()
ClusterManagerTestBase.stopAny()

val connection = getClientConnection(netPort1)
// repair the catalog
// does not actually repair, just adds warning to log file
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG('false', 'false')")
// actually repair the catalog
connection.createStatement().execute("CALL SYS.REPAIR_CATALOG('true', 'true')")

ClusterManagerTestBase.startSnappyLead(ClusterManagerTestBase.locatorPort, bootProps)
snc = SnappyContext(sc)
// column_table1 should not be found in either catalog after repair
Expand Down
Loading

0 comments on commit 4ea44a9

Please sign in to comment.