use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class SaveRestoreTestProjectBuilder method createSaveRestoreSchemaCatalog.
public Catalog createSaveRestoreSchemaCatalog() throws IOException {
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String catalogJar = testDir + File.separator + "saverestore-jni.jar";
addAllDefaults();
boolean status = compile(catalogJar);
assert (status);
// read in the catalog
byte[] bytes = MiscUtils.fileToBytes(new File(catalogJar));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
assert (serializedCatalog != null);
// create the catalog (that will be passed to the ClientInterface
Catalog catalog = new Catalog();
catalog.execute(serializedCatalog);
return catalog;
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class CatalogUtil method getNormalTableNamesFromInMemoryJar.
/**
* Get all normal table names from a in-memory catalog jar file.
* A normal table is one that's NOT a materialized view, nor an export table.
* @param InMemoryJarfile a in-memory catalog jar file
* @return A set of normal table names
*/
public static Set<String> getNormalTableNamesFromInMemoryJar(InMemoryJarfile jarfile) {
Set<String> tableNames = new HashSet<>();
Catalog catalog = new Catalog();
catalog.execute(getSerializedCatalogStringFromJar(jarfile));
Database db = catalog.getClusters().get("cluster").getDatabases().get("database");
for (Table table : getNormalTables(db, true)) {
tableNames.add(table.getTypeName());
}
for (Table table : getNormalTables(db, false)) {
tableNames.add(table.getTypeName());
}
return tableNames;
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class TestPlannerTool method testBadDDL.
public void testBadDDL() throws IOException {
// semicolons in in-lined comments are bad
VoltProjectBuilder builder = new VoltProjectBuilder();
builder.addLiteralSchema("CREATE TABLE A (C1 BIGINT NOT NULL, PRIMARY KEY(C1)); -- this; is bad");
builder.addPartitionInfo("A", "C1");
// semicolons in string literals are bad
builder.addLiteralSchema("create table t(id bigint not null, name varchar(5) default 'a;bc', primary key(id));");
builder.addPartitionInfo("t", "id");
// Add a newline string literal case just for fun
builder.addLiteralSchema("create table s(id bigint not null, name varchar(5) default 'a\nb', primary key(id));");
builder.addStmtProcedure("MakeCompileHappy", "SELECT * FROM A WHERE C1 = ?;", "A.C1: 0");
final File jar = new File("testbadddl-oop.jar");
jar.deleteOnExit();
builder.compile("testbadddl-oop.jar");
byte[] bytes = MiscUtils.fileToBytes(new File("testbadddl-oop.jar"));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
assertNotNull(serializedCatalog);
Catalog c = new Catalog();
c.execute(serializedCatalog);
DbSettings settings = new DbSettings(ClusterSettings.create().asSupplier(), NodeSettings.create());
CatalogContext context = new CatalogContext(0, 0, c, settings, bytes, null, new byte[] {}, 0, mock(HostMessenger.class));
m_pt = new PlannerTool(context.database, context.getCatalogHash());
// Bad DDL would kill the planner before it starts and this query
// would return a Stream Closed error
m_pt.planSqlForTest("select * from A;");
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class TestPlannerTool method testSimple.
public void testSimple() throws IOException {
TPCCProjectBuilder builder = new TPCCProjectBuilder();
builder.addAllDefaults();
final File jar = new File("tpcc-oop.jar");
jar.deleteOnExit();
//long start = System.nanoTime();
//for (int i = 0; i < 10000; i++) {
builder.compile("tpcc-oop.jar");
/* long end = System.nanoTime();
System.err.printf("Took %.3f seconds to compile.\n",
(end - start) / 1000000000.0);
start = end;
}*/
byte[] bytes = MiscUtils.fileToBytes(new File("tpcc-oop.jar"));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
Catalog catalog = new Catalog();
catalog.execute(serializedCatalog);
DbSettings settings = new DbSettings(ClusterSettings.create().asSupplier(), NodeSettings.create());
CatalogContext context = new CatalogContext(0, 0, catalog, settings, bytes, null, new byte[] {}, 0, mock(HostMessenger.class));
m_pt = new PlannerTool(context.database, context.getCatalogHash());
AdHocPlannedStatement result = null;
result = m_pt.planSqlForTest("select * from warehouse;");
System.out.println(result);
// try many tables joins
try {
result = m_pt.planSqlForTest("select * from WAREHOUSE, DISTRICT, CUSTOMER, CUSTOMER_NAME, HISTORY, STOCK, ORDERS, NEW_ORDER, ORDER_LINE where " + "WAREHOUSE.W_ID = DISTRICT.D_W_ID and " + "WAREHOUSE.W_ID = CUSTOMER.C_W_ID and " + "WAREHOUSE.W_ID = CUSTOMER_NAME.C_W_ID and " + "WAREHOUSE.W_ID = HISTORY.H_W_ID and " + "WAREHOUSE.W_ID = STOCK.S_W_ID and " + "WAREHOUSE.W_ID = ORDERS.O_W_ID and " + "WAREHOUSE.W_ID = NEW_ORDER.NO_W_ID and " + "WAREHOUSE.W_ID = ORDER_LINE.OL_W_ID and " + "WAREHOUSE.W_ID = 0");
} catch (Exception e) {
// V4.5 supports multiple table joins
fail();
}
// try just the right amount of tables
try {
result = m_pt.planSqlForTest("select * from CUSTOMER, STOCK, ORDERS, ORDER_LINE, NEW_ORDER where " + "CUSTOMER.C_W_ID = CUSTOMER.C_W_ID and " + "CUSTOMER.C_W_ID = STOCK.S_W_ID and " + "CUSTOMER.C_W_ID = ORDERS.O_W_ID and " + "CUSTOMER.C_W_ID = ORDER_LINE.OL_W_ID and " + "CUSTOMER.C_W_ID = NEW_ORDER.NO_W_ID and " + "CUSTOMER.C_W_ID = 0");
} catch (Exception e) {
e.printStackTrace();
fail();
}
// try garbage
try {
result = m_pt.planSqlForTest("ryan likes the yankees");
fail();
} catch (Exception e) {
}
try {
Thread.sleep(500);
} catch (InterruptedException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
try {
result = m_pt.planSqlForTest("ryan likes the yankees");
fail();
} catch (Exception e) {
}
result = m_pt.planSqlForTest("select * from warehouse;");
System.out.println(result);
}
use of org.voltdb.catalog.Catalog in project voltdb by VoltDB.
the class TestTwoSitePlans method setUp.
@SuppressWarnings("deprecation")
@Override
public void setUp() throws IOException, InterruptedException {
VoltDB.instance().readBuildInfo("Test");
// compile a catalog
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String catalogJar = testDir + File.separator + JAR;
TPCCProjectBuilder pb = new TPCCProjectBuilder();
pb.addDefaultSchema();
pb.addDefaultPartitioning();
pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);
pb.compile(catalogJar, 2, 0);
// load a catalog
byte[] bytes = MiscUtils.fileToBytes(new File(catalogJar));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
// create the catalog (that will be passed to the ClientInterface
catalog = new Catalog();
catalog.execute(serializedCatalog);
// update the catalog with the data from the deployment file
String pathToDeployment = pb.getPathToDeployment();
assertTrue(CatalogUtil.compileDeployment(catalog, pathToDeployment, false) == null);
cluster = catalog.getClusters().get("cluster");
CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures();
Procedure insertProc = procedures.get("InsertNewOrder");
assert (insertProc != null);
selectProc = procedures.get("MultiSiteSelect");
assert (selectProc != null);
// Each EE needs its own thread for correct initialization.
final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>();
final byte[] configBytes = LegacyHashinator.getConfigureBytes(2);
Thread site1Thread = new Thread() {
@Override
public void run() {
site1Reference.set(new ExecutionEngineJNI(cluster.getRelativeIndex(), 1, 0, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
site1Thread.start();
site1Thread.join();
final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>();
Thread site2Thread = new Thread() {
@Override
public void run() {
site2Reference.set(new ExecutionEngineJNI(cluster.getRelativeIndex(), 2, 1, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
site2Thread.start();
site2Thread.join();
// create two EEs
ee1 = site1Reference.get();
ee1.loadCatalog(0, catalog.serialize());
ee2 = site2Reference.get();
ee2.loadCatalog(0, catalog.serialize());
// cache some plan fragments
selectStmt = selectProc.getStatements().get("selectAll");
assert (selectStmt != null);
int i = 0;
// this kinda assumes the right order
for (PlanFragment f : selectStmt.getFragments()) {
if (i == 0)
selectTopFrag = f;
else
selectBottomFrag = f;
i++;
}
assert (selectTopFrag != null);
assert (selectBottomFrag != null);
if (selectTopFrag.getHasdependencies() == false) {
PlanFragment temp = selectTopFrag;
selectTopFrag = selectBottomFrag;
selectBottomFrag = temp;
}
// get the insert frag
Statement insertStmt = insertProc.getStatements().get("insert");
assert (insertStmt != null);
for (PlanFragment f : insertStmt.getFragments()) insertFrag = f;
// populate plan cache
ActivePlanRepository.clear();
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(selectBottomFrag), Encoder.decodeBase64AndDecompressToBytes(selectBottomFrag.getPlannodetree()), selectStmt.getSqltext());
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(selectTopFrag), Encoder.decodeBase64AndDecompressToBytes(selectTopFrag.getPlannodetree()), selectStmt.getSqltext());
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(insertFrag), Encoder.decodeBase64AndDecompressToBytes(insertFrag.getPlannodetree()), insertStmt.getSqltext());
// insert some data
ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L);
FastDeserializer fragResult2 = ee2.executePlanFragments(1, new long[] { CatalogUtil.getUniqueIdForFragment(insertFrag) }, null, new ParameterSet[] { params }, null, new String[] { selectStmt.getSqltext() }, null, null, 1, 1, 0, 42, Long.MAX_VALUE, false);
// ignore totalsize field in message
fragResult2.readInt();
VoltTable[] results = TableHelper.convertBackedBufferToTables(fragResult2.buffer(), 1);
assert (results[0].asScalarLong() == 1L);
params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L);
FastDeserializer fragResult1 = ee1.executePlanFragments(1, new long[] { CatalogUtil.getUniqueIdForFragment(insertFrag) }, null, new ParameterSet[] { params }, null, new String[] { selectStmt.getSqltext() }, null, null, 2, 2, 1, 42, Long.MAX_VALUE, false);
// ignore totalsize field in message
fragResult1.readInt();
results = TableHelper.convertBackedBufferToTables(fragResult1.buffer(), 1);
assert (fragResult1.buffer() != fragResult2.buffer());
assert (results[0].asScalarLong() == 1L);
}
Aggregations