use of org.voltdb.compiler.PlannerTool in project voltdb by VoltDB.
the class TestAdHocPlans method setUp.
@Before
public void setUp() throws Exception {
// For planner-only testing, we shouldn't care about IV2
VoltDB.Configuration config = setUpSPDB();
byte[] bytes = MiscUtils.fileToBytes(new File(config.m_pathToCatalog));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
Catalog catalog = new Catalog();
catalog.execute(serializedCatalog);
DbSettings dbSettings = new DbSettings(config.asClusterSettings().asSupplier(), NodeSettings.create(config.asPathSettingsMap()));
CatalogContext context = new CatalogContext(0, 0, catalog, dbSettings, bytes, null, new byte[] {}, 0, mock(HostMessenger.class));
m_pt = new PlannerTool(context.database, context.getCatalogHash());
}
use of org.voltdb.compiler.PlannerTool in project voltdb by VoltDB.
the class AdHocNTBase method compileAdHocSQL.
/**
* Compile a batch of one or more SQL statements into a set of plans.
* Parameters are valid iff there is exactly one DML/DQL statement.
*/
public static AdHocPlannedStatement compileAdHocSQL(CatalogContext context, String sqlStatement, boolean inferPartitioning, Object userPartitionKey, ExplainMode explainMode, boolean isSwapTables, Object[] userParamSet) throws AdHocPlanningException {
assert (context != null);
assert (sqlStatement != null);
final PlannerTool ptool = context.m_ptool;
// Take advantage of the planner optimization for inferring single partition work
// when the batch has one statement.
StatementPartitioning partitioning = null;
if (inferPartitioning) {
partitioning = StatementPartitioning.inferPartitioning();
} else if (userPartitionKey == null) {
partitioning = StatementPartitioning.forceMP();
} else {
partitioning = StatementPartitioning.forceSP();
}
try {
// I have no clue if this is threadsafe?
synchronized (PlannerTool.class) {
return ptool.planSql(sqlStatement, partitioning, explainMode != ExplainMode.NONE, userParamSet, isSwapTables);
}
} catch (Exception e) {
throw new AdHocPlanningException("Unexpected Ad Hoc Planning Error: " + e);
} catch (StackOverflowError error) {
// for Error or Throwable on the same try block.
throw new AdHocPlanningException("Encountered stack overflow error. " + "Try reducing the number of predicate expressions in the query.");
} catch (AssertionError ae) {
throw new AdHocPlanningException("Assertion Error in Ad Hoc Planning: " + ae);
}
}
use of org.voltdb.compiler.PlannerTool in project voltdb by VoltDB.
the class TestPlannerTool method testBadDDL.
public void testBadDDL() throws IOException {
// semicolons in in-lined comments are bad
VoltProjectBuilder builder = new VoltProjectBuilder();
builder.addLiteralSchema("CREATE TABLE A (C1 BIGINT NOT NULL, PRIMARY KEY(C1)); -- this; is bad");
builder.addPartitionInfo("A", "C1");
// semicolons in string literals are bad
builder.addLiteralSchema("create table t(id bigint not null, name varchar(5) default 'a;bc', primary key(id));");
builder.addPartitionInfo("t", "id");
// Add a newline string literal case just for fun
builder.addLiteralSchema("create table s(id bigint not null, name varchar(5) default 'a\nb', primary key(id));");
builder.addStmtProcedure("MakeCompileHappy", "SELECT * FROM A WHERE C1 = ?;", "A.C1: 0");
final File jar = new File("testbadddl-oop.jar");
jar.deleteOnExit();
builder.compile("testbadddl-oop.jar");
byte[] bytes = MiscUtils.fileToBytes(new File("testbadddl-oop.jar"));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
assertNotNull(serializedCatalog);
Catalog c = new Catalog();
c.execute(serializedCatalog);
DbSettings settings = new DbSettings(ClusterSettings.create().asSupplier(), NodeSettings.create());
CatalogContext context = new CatalogContext(0, 0, c, settings, bytes, null, new byte[] {}, 0, mock(HostMessenger.class));
m_pt = new PlannerTool(context.database, context.getCatalogHash());
// Bad DDL would kill the planner before it starts and this query
// would return a Stream Closed error
m_pt.planSqlForTest("select * from A;");
}
use of org.voltdb.compiler.PlannerTool in project voltdb by VoltDB.
the class TestPlannerTool method testSimple.
public void testSimple() throws IOException {
TPCCProjectBuilder builder = new TPCCProjectBuilder();
builder.addAllDefaults();
final File jar = new File("tpcc-oop.jar");
jar.deleteOnExit();
//long start = System.nanoTime();
//for (int i = 0; i < 10000; i++) {
builder.compile("tpcc-oop.jar");
/* long end = System.nanoTime();
System.err.printf("Took %.3f seconds to compile.\n",
(end - start) / 1000000000.0);
start = end;
}*/
byte[] bytes = MiscUtils.fileToBytes(new File("tpcc-oop.jar"));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
Catalog catalog = new Catalog();
catalog.execute(serializedCatalog);
DbSettings settings = new DbSettings(ClusterSettings.create().asSupplier(), NodeSettings.create());
CatalogContext context = new CatalogContext(0, 0, catalog, settings, bytes, null, new byte[] {}, 0, mock(HostMessenger.class));
m_pt = new PlannerTool(context.database, context.getCatalogHash());
AdHocPlannedStatement result = null;
result = m_pt.planSqlForTest("select * from warehouse;");
System.out.println(result);
// try many tables joins
try {
result = m_pt.planSqlForTest("select * from WAREHOUSE, DISTRICT, CUSTOMER, CUSTOMER_NAME, HISTORY, STOCK, ORDERS, NEW_ORDER, ORDER_LINE where " + "WAREHOUSE.W_ID = DISTRICT.D_W_ID and " + "WAREHOUSE.W_ID = CUSTOMER.C_W_ID and " + "WAREHOUSE.W_ID = CUSTOMER_NAME.C_W_ID and " + "WAREHOUSE.W_ID = HISTORY.H_W_ID and " + "WAREHOUSE.W_ID = STOCK.S_W_ID and " + "WAREHOUSE.W_ID = ORDERS.O_W_ID and " + "WAREHOUSE.W_ID = NEW_ORDER.NO_W_ID and " + "WAREHOUSE.W_ID = ORDER_LINE.OL_W_ID and " + "WAREHOUSE.W_ID = 0");
} catch (Exception e) {
// V4.5 supports multiple table joins
fail();
}
// try just the right amount of tables
try {
result = m_pt.planSqlForTest("select * from CUSTOMER, STOCK, ORDERS, ORDER_LINE, NEW_ORDER where " + "CUSTOMER.C_W_ID = CUSTOMER.C_W_ID and " + "CUSTOMER.C_W_ID = STOCK.S_W_ID and " + "CUSTOMER.C_W_ID = ORDERS.O_W_ID and " + "CUSTOMER.C_W_ID = ORDER_LINE.OL_W_ID and " + "CUSTOMER.C_W_ID = NEW_ORDER.NO_W_ID and " + "CUSTOMER.C_W_ID = 0");
} catch (Exception e) {
e.printStackTrace();
fail();
}
// try garbage
try {
result = m_pt.planSqlForTest("ryan likes the yankees");
fail();
} catch (Exception e) {
}
try {
Thread.sleep(500);
} catch (InterruptedException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
try {
result = m_pt.planSqlForTest("ryan likes the yankees");
fail();
} catch (Exception e) {
}
result = m_pt.planSqlForTest("select * from warehouse;");
System.out.println(result);
}
Aggregations