use of org.voltdb.catalog.Statement in project voltdb by VoltDB.
the class ReportMaker method generateStatementsTable.
static String generateStatementsTable(CatalogMap<Table> tables, Procedure procedure) {
StringBuilder sb = new StringBuilder();
sb.append(" <table class='table tableL2 table-condensed'>\n <thead><tr>" + "<th><span style='white-space: nowrap;'>Statement Name</span></th>" + "<th>Statement SQL</th>" + "<th>Params</th>" + "<th>R/W</th>" + "<th>Attributes</th>" + "</tr>\n");
for (Statement statement : procedure.getStatements()) {
sb.append(genrateStatementRow(tables, procedure, statement));
}
sb.append(" </thead>\n </table>\n");
return sb.toString();
}
use of org.voltdb.catalog.Statement in project voltdb by VoltDB.
the class MaterializedViewProcessor method compileFallbackQueriesAndUpdateCatalog.
// Compile the fallback query XMLs, add the plans into the catalog statement (ENG-8641).
private void compileFallbackQueriesAndUpdateCatalog(Database db, String query, List<VoltXMLElement> fallbackQueryXMLs, MaterializedViewHandlerInfo mvHandlerInfo) throws VoltCompilerException {
DatabaseEstimates estimates = new DatabaseEstimates();
for (int i = 0; i < fallbackQueryXMLs.size(); ++i) {
String key = String.valueOf(i);
Statement fallbackQueryStmt = mvHandlerInfo.getFallbackquerystmts().add(key);
VoltXMLElement fallbackQueryXML = fallbackQueryXMLs.get(i);
fallbackQueryStmt.setSqltext(query);
StatementCompiler.compileStatementAndUpdateCatalog(m_compiler, m_hsql, db, estimates, fallbackQueryStmt, fallbackQueryXML, fallbackQueryStmt.getSqltext(), // no user-supplied join order
null, DeterminismMode.FASTER, StatementPartitioning.forceSP());
}
}
use of org.voltdb.catalog.Statement in project voltdb by VoltDB.
the class DDLCompiler method addConstraintToCatalog.
/**
* Add a constraint on a given table to the catalog
* @param table The table on which the constraint will be enforced
* @param node The XML node representing the constraint
* @param indexReplacementMap
* @throws VoltCompilerException
*/
private void addConstraintToCatalog(Table table, VoltXMLElement node, Map<String, String> indexReplacementMap, Map<String, Index> indexMap) throws VoltCompilerException {
assert node.name.equals("constraint");
String name = node.attributes.get("name");
String typeName = node.attributes.get("constrainttype");
ConstraintType type = ConstraintType.valueOf(typeName);
String tableName = table.getTypeName();
if (type == ConstraintType.LIMIT) {
int tupleLimit = Integer.parseInt(node.attributes.get("rowslimit"));
if (tupleLimit < 0) {
throw m_compiler.new VoltCompilerException("Invalid constraint limit number '" + tupleLimit + "'");
}
if (tableLimitConstraintCounter.contains(tableName)) {
throw m_compiler.new VoltCompilerException("Too many table limit constraints for table " + tableName);
} else {
tableLimitConstraintCounter.add(tableName);
}
table.setTuplelimit(tupleLimit);
String deleteStmt = node.attributes.get("rowslimitdeletestmt");
if (deleteStmt != null) {
Statement catStmt = table.getTuplelimitdeletestmt().add("limit_delete");
catStmt.setSqltext(deleteStmt);
validateTupleLimitDeleteStmt(catStmt);
}
return;
}
if (type == ConstraintType.CHECK) {
String msg = "VoltDB does not enforce check constraints. ";
msg += "Constraint on table " + tableName + " will be ignored.";
m_compiler.addWarn(msg);
return;
} else if (type == ConstraintType.FOREIGN_KEY) {
String msg = "VoltDB does not enforce foreign key references and constraints. ";
msg += "Constraint on table " + tableName + " will be ignored.";
m_compiler.addWarn(msg);
return;
} else if (type == ConstraintType.MAIN) {
// should never see these
assert (false);
} else if (type == ConstraintType.NOT_NULL) {
// these get handled by table metadata inspection
return;
} else if (type != ConstraintType.PRIMARY_KEY && type != ConstraintType.UNIQUE) {
throw m_compiler.new VoltCompilerException("Invalid constraint type '" + typeName + "'");
}
// else, create the unique index below
// primary key code is in other places as well
// The constraint is backed by an index, therefore we need to create it
// TODO: We need to be able to use indexes for foreign keys. I am purposely
// leaving those out right now because HSQLDB just makes too many of them.
Constraint catalog_const = table.getConstraints().add(name);
String indexName = node.attributes.get("index");
assert (indexName != null);
// handle replacements from duplicate index pruning
if (indexReplacementMap.containsKey(indexName)) {
indexName = indexReplacementMap.get(indexName);
}
Index catalog_index = indexMap.get(indexName);
// Attach the index to the catalog constraint (catalog_const).
if (catalog_index != null) {
catalog_const.setIndex(catalog_index);
// This may be redundant.
catalog_index.setUnique(true);
boolean assumeUnique = Boolean.parseBoolean(node.attributes.get("assumeunique"));
catalog_index.setAssumeunique(assumeUnique);
}
catalog_const.setType(type.getValue());
}
use of org.voltdb.catalog.Statement in project voltdb by VoltDB.
the class ProcedureRunner method reflect.
// Returns a list that contains the names of the statements which are defined in the stored procedure.
protected ArrayList<String> reflect() {
Map<String, SQLStmt> stmtMap = null;
// fill in the sql for single statement procs
if (m_catProc.getHasjava() == false) {
try {
stmtMap = ProcedureCompiler.getValidSQLStmts(null, m_procedureName, m_procedure.getClass(), m_procedure, true);
SQLStmt stmt = stmtMap.get(VoltDB.ANON_STMT_NAME);
assert (stmt != null);
Statement statement = m_catProc.getStatements().get(VoltDB.ANON_STMT_NAME);
String s = statement.getSqltext();
SQLStmtAdHocHelper.setSQLStr(stmt, s);
m_cachedSingleStmt.stmt = stmt;
int numParams = m_catProc.getParameters().size();
m_paramTypes = new Class<?>[numParams];
for (ProcParameter param : m_catProc.getParameters()) {
VoltType type = VoltType.get((byte) param.getType());
if (param.getIsarray()) {
m_paramTypes[param.getIndex()] = type.vectorClassFromType();
continue;
}
// (ParameterConverter.tryToMakeCompatible) before falling through to the EE?
if (type == VoltType.INTEGER) {
type = VoltType.BIGINT;
} else if (type == VoltType.SMALLINT) {
type = VoltType.BIGINT;
} else if (type == VoltType.TINYINT) {
type = VoltType.BIGINT;
} else if (type == VoltType.NUMERIC) {
type = VoltType.FLOAT;
}
m_paramTypes[param.getIndex()] = type.classFromType();
}
} catch (Exception e) {
// shouldn't throw anything outside of the compiler
e.printStackTrace();
}
// iterate through the fields and deal with sql statements
try {
stmtMap = ProcedureCompiler.getValidSQLStmts(null, m_procedureName, m_procedure.getClass(), m_procedure, true);
} catch (Exception e1) {
// shouldn't throw anything outside of the compiler
e1.printStackTrace();
return null;
}
} else {
// this is where, in the case of java procedures, m_procMethod is set
for (final Method m : m_procedure.getClass().getDeclaredMethods()) {
String name = m.getName();
if (name.equals("run")) {
if (Modifier.isPublic(m.getModifiers()) == false) {
continue;
}
m_procMethod = m;
m_paramTypes = m.getParameterTypes();
break;
}
}
if (m_procMethod == null) {
throw new RuntimeException("No \"run\" method found in: " + m_procedure.getClass().getName());
}
// iterate through the fields and deal with sql statements
try {
stmtMap = ProcedureCompiler.getValidSQLStmts(null, m_procedureName, m_procedure.getClass(), m_procedure, true);
} catch (Exception e) {
// shouldn't happen here because it passed the compiler
VoltDB.crashLocalVoltDB("getValidSQLStmts threw exception during ProcedureRunner loading", true, e);
}
}
ArrayList<String> stmtNames = new ArrayList<String>(stmtMap.entrySet().size());
for (final Entry<String, SQLStmt> entry : stmtMap.entrySet()) {
String name = entry.getKey();
stmtNames.add(name);
// Label the SQLStmts with its variable name.
// This is useful for multi-partition stored procedures.
// When a statement is sent to another site for execution,
// that SP site can use this name to find the correct place to
// update the statistics numbers.
entry.getValue().setStmtName(name);
Statement s = m_catProc.getStatements().get(name);
if (s != null) {
/*
* Cache all the information we need about the statements in this stored
* procedure locally instead of pulling them from the catalog on
* a regular basis.
*/
SQLStmt stmt = entry.getValue();
// done in a static method in an abstract class so users don't call it
initSQLStmt(stmt, s);
//LOG.fine("Found statement " + name);
}
}
return stmtNames;
}
use of org.voltdb.catalog.Statement in project voltdb by VoltDB.
the class TestTwoSitePlans method setUp.
@SuppressWarnings("deprecation")
@Override
public void setUp() throws IOException, InterruptedException {
VoltDB.instance().readBuildInfo("Test");
// compile a catalog
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String catalogJar = testDir + File.separator + JAR;
TPCCProjectBuilder pb = new TPCCProjectBuilder();
pb.addDefaultSchema();
pb.addDefaultPartitioning();
pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);
pb.compile(catalogJar, 2, 0);
// load a catalog
byte[] bytes = MiscUtils.fileToBytes(new File(catalogJar));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
// create the catalog (that will be passed to the ClientInterface
catalog = new Catalog();
catalog.execute(serializedCatalog);
// update the catalog with the data from the deployment file
String pathToDeployment = pb.getPathToDeployment();
assertTrue(CatalogUtil.compileDeployment(catalog, pathToDeployment, false) == null);
cluster = catalog.getClusters().get("cluster");
CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures();
Procedure insertProc = procedures.get("InsertNewOrder");
assert (insertProc != null);
selectProc = procedures.get("MultiSiteSelect");
assert (selectProc != null);
// Each EE needs its own thread for correct initialization.
final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>();
final byte[] configBytes = LegacyHashinator.getConfigureBytes(2);
Thread site1Thread = new Thread() {
@Override
public void run() {
site1Reference.set(new ExecutionEngineJNI(cluster.getRelativeIndex(), 1, 0, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
site1Thread.start();
site1Thread.join();
final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>();
Thread site2Thread = new Thread() {
@Override
public void run() {
site2Reference.set(new ExecutionEngineJNI(cluster.getRelativeIndex(), 2, 1, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
site2Thread.start();
site2Thread.join();
// create two EEs
ee1 = site1Reference.get();
ee1.loadCatalog(0, catalog.serialize());
ee2 = site2Reference.get();
ee2.loadCatalog(0, catalog.serialize());
// cache some plan fragments
selectStmt = selectProc.getStatements().get("selectAll");
assert (selectStmt != null);
int i = 0;
// this kinda assumes the right order
for (PlanFragment f : selectStmt.getFragments()) {
if (i == 0)
selectTopFrag = f;
else
selectBottomFrag = f;
i++;
}
assert (selectTopFrag != null);
assert (selectBottomFrag != null);
if (selectTopFrag.getHasdependencies() == false) {
PlanFragment temp = selectTopFrag;
selectTopFrag = selectBottomFrag;
selectBottomFrag = temp;
}
// get the insert frag
Statement insertStmt = insertProc.getStatements().get("insert");
assert (insertStmt != null);
for (PlanFragment f : insertStmt.getFragments()) insertFrag = f;
// populate plan cache
ActivePlanRepository.clear();
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(selectBottomFrag), Encoder.decodeBase64AndDecompressToBytes(selectBottomFrag.getPlannodetree()), selectStmt.getSqltext());
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(selectTopFrag), Encoder.decodeBase64AndDecompressToBytes(selectTopFrag.getPlannodetree()), selectStmt.getSqltext());
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(insertFrag), Encoder.decodeBase64AndDecompressToBytes(insertFrag.getPlannodetree()), insertStmt.getSqltext());
// insert some data
ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L);
FastDeserializer fragResult2 = ee2.executePlanFragments(1, new long[] { CatalogUtil.getUniqueIdForFragment(insertFrag) }, null, new ParameterSet[] { params }, null, new String[] { selectStmt.getSqltext() }, null, null, 1, 1, 0, 42, Long.MAX_VALUE, false);
// ignore totalsize field in message
fragResult2.readInt();
VoltTable[] results = TableHelper.convertBackedBufferToTables(fragResult2.buffer(), 1);
assert (results[0].asScalarLong() == 1L);
params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L);
FastDeserializer fragResult1 = ee1.executePlanFragments(1, new long[] { CatalogUtil.getUniqueIdForFragment(insertFrag) }, null, new ParameterSet[] { params }, null, new String[] { selectStmt.getSqltext() }, null, null, 2, 2, 1, 42, Long.MAX_VALUE, false);
// ignore totalsize field in message
fragResult1.readInt();
results = TableHelper.convertBackedBufferToTables(fragResult1.buffer(), 1);
assert (fragResult1.buffer() != fragResult2.buffer());
assert (results[0].asScalarLong() == 1L);
}
Aggregations