use of org.hsqldb_voltpatches.lib.HsqlArrayList in project voltdb by VoltDB.
the class DatabaseInformationMain method SYSTEM_CROSSREFERENCE.
/**
* Retrieves a <code>Table</code> object describing, for each
* accessible referencing and referenced table, how the referencing
* tables import, for the purposes of referential integrity,
* the columns of the referenced tables.<p>
*
* Each row is a foreign key column description with the following
* columns: <p>
*
* <pre class="SqlCodeExample">
* PKTABLE_CAT VARCHAR referenced table catalog
* PKTABLE_SCHEM VARCHAR referenced table schema
* PKTABLE_NAME VARCHAR referenced table name
* PKCOLUMN_NAME VARCHAR referenced column name
* FKTABLE_CAT VARCHAR referencing table catalog
* FKTABLE_SCHEM VARCHAR referencing table schema
* FKTABLE_NAME VARCHAR referencing table name
* FKCOLUMN_NAME VARCHAR referencing column
* KEY_SEQ SMALLINT sequence number within foreign key
* UPDATE_RULE SMALLINT
* { Cascade | Set Null | Set Default | Restrict (No Action)}?
* DELETE_RULE SMALLINT
* { Cascade | Set Null | Set Default | Restrict (No Action)}?
* FK_NAME VARCHAR foreign key constraint name
* PK_NAME VARCHAR primary key or unique constraint name
* DEFERRABILITY SMALLINT
* { initially deferred | initially immediate | not deferrable }
* </pre> <p>
*
* @return a <code>Table</code> object describing how accessible tables
* import other accessible tables' primary key and/or unique
* constraint columns
*/
final Table SYSTEM_CROSSREFERENCE() {
Table t = sysTables[SYSTEM_CROSSREFERENCE];
if (t == null) {
t = createBlankTable(sysTableHsqlNames[SYSTEM_CROSSREFERENCE]);
addColumn(t, "PKTABLE_CAT", SQL_IDENTIFIER);
addColumn(t, "PKTABLE_SCHEM", SQL_IDENTIFIER);
// not null
addColumn(t, "PKTABLE_NAME", SQL_IDENTIFIER);
// not null
addColumn(t, "PKCOLUMN_NAME", SQL_IDENTIFIER);
addColumn(t, "FKTABLE_CAT", SQL_IDENTIFIER);
addColumn(t, "FKTABLE_SCHEM", SQL_IDENTIFIER);
// not null
addColumn(t, "FKTABLE_NAME", SQL_IDENTIFIER);
// not null
addColumn(t, "FKCOLUMN_NAME", SQL_IDENTIFIER);
// not null
addColumn(t, "KEY_SEQ", Type.SQL_SMALLINT);
// not null
addColumn(t, "UPDATE_RULE", Type.SQL_SMALLINT);
// not null
addColumn(t, "DELETE_RULE", Type.SQL_SMALLINT);
addColumn(t, "FK_NAME", SQL_IDENTIFIER);
addColumn(t, "PK_NAME", SQL_IDENTIFIER);
// not null
addColumn(t, "DEFERRABILITY", Type.SQL_SMALLINT);
// order: FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, and KEY_SEQ
// added for unique: FK_NAME
// false PK, as FKTABLE_CAT, FKTABLE_SCHEM and/or FK_NAME
// may be null
HsqlName name = HsqlNameManager.newInfoSchemaObjectName(sysTableHsqlNames[SYSTEM_CROSSREFERENCE].name, false, SchemaObject.INDEX);
t.createPrimaryKey(name, new int[] { 4, 5, 6, 8, 11 }, false);
return t;
}
PersistentStore store = database.persistentStoreCollection.getStore(t);
// calculated column values
String pkTableCatalog;
String pkTableSchema;
String pkTableName;
String pkColumnName;
String fkTableCatalog;
String fkTableSchema;
String fkTableName;
String fkColumnName;
Integer keySequence;
Integer updateRule;
Integer deleteRule;
String fkName;
String pkName;
Integer deferrability;
// Intermediate holders
Iterator tables;
Table table;
Table fkTable;
Table pkTable;
int columnCount;
int[] mainCols;
int[] refCols;
Constraint[] constraints;
Constraint constraint;
int constraintCount;
HsqlArrayList fkConstraintsList;
Object[] row;
DITableInfo pkInfo;
DITableInfo fkInfo;
// column number mappings
final int ipk_table_cat = 0;
final int ipk_table_schem = 1;
final int ipk_table_name = 2;
final int ipk_column_name = 3;
final int ifk_table_cat = 4;
final int ifk_table_schem = 5;
final int ifk_table_name = 6;
final int ifk_column_name = 7;
final int ikey_seq = 8;
final int iupdate_rule = 9;
final int idelete_rule = 10;
final int ifk_name = 11;
final int ipk_name = 12;
final int ideferrability = 13;
tables = database.schemaManager.databaseObjectIterator(SchemaObject.TABLE);
pkInfo = new DITableInfo();
fkInfo = new DITableInfo();
// We must consider all the constraints in all the user tables, since
// this is where reference relationships are recorded. However, we
// are only concerned with Constraint.FOREIGN_KEY constraints here
// because their corresponing Constraint.MAIN entries are essentially
// duplicate data recorded in the referenced rather than the
// referencing table. Also, we skip constraints where either
// the referenced, referencing or both tables are not accessible
// relative to the session of the calling context
fkConstraintsList = new HsqlArrayList();
while (tables.hasNext()) {
table = (Table) tables.next();
if (table.isView() || !isAccessibleTable(table)) {
continue;
}
constraints = table.getConstraints();
constraintCount = constraints.length;
for (int i = 0; i < constraintCount; i++) {
constraint = (Constraint) constraints[i];
if (constraint.getConstraintType() == Constraint.FOREIGN_KEY && isAccessibleTable(constraint.getRef())) {
fkConstraintsList.add(constraint);
}
}
}
// Do it.
for (int i = 0; i < fkConstraintsList.size(); i++) {
constraint = (Constraint) fkConstraintsList.get(i);
pkTable = constraint.getMain();
pkInfo.setTable(pkTable);
pkTableName = pkInfo.getName();
fkTable = constraint.getRef();
fkInfo.setTable(fkTable);
fkTableName = fkInfo.getName();
pkTableCatalog = pkTable.getCatalogName().name;
pkTableSchema = pkTable.getSchemaName().name;
fkTableCatalog = fkTable.getCatalogName().name;
fkTableSchema = fkTable.getSchemaName().name;
mainCols = constraint.getMainColumns();
refCols = constraint.getRefColumns();
columnCount = refCols.length;
fkName = constraint.getRefName().name;
pkName = constraint.getMainName().name;
deferrability = ValuePool.getInt(constraint.getDeferability());
//pkName = constraint.getMainIndex().getName().name;
deleteRule = ValuePool.getInt(constraint.getDeleteAction());
updateRule = ValuePool.getInt(constraint.getUpdateAction());
for (int j = 0; j < columnCount; j++) {
keySequence = ValuePool.getInt(j + 1);
pkColumnName = pkInfo.getColName(mainCols[j]);
fkColumnName = fkInfo.getColName(refCols[j]);
row = t.getEmptyRowData();
row[ipk_table_cat] = pkTableCatalog;
row[ipk_table_schem] = pkTableSchema;
row[ipk_table_name] = pkTableName;
row[ipk_column_name] = pkColumnName;
row[ifk_table_cat] = fkTableCatalog;
row[ifk_table_schem] = fkTableSchema;
row[ifk_table_name] = fkTableName;
row[ifk_column_name] = fkColumnName;
row[ikey_seq] = keySequence;
row[iupdate_rule] = updateRule;
row[idelete_rule] = deleteRule;
row[ifk_name] = fkName;
row[ipk_name] = pkName;
row[ideferrability] = deferrability;
t.insertSys(store, row);
}
}
return t;
}
use of org.hsqldb_voltpatches.lib.HsqlArrayList in project voltdb by VoltDB.
the class StatementSchema method dropUser.
private static void dropUser(Session session, HsqlName name, boolean cascade) {
Grantee grantee = session.database.getUserManager().get(name.name);
if (session.database.getSessionManager().isUserActive(name.name)) {
throw Error.error(ErrorCode.X_42539);
}
if (!cascade && session.database.schemaManager.hasSchemas(grantee)) {
HsqlArrayList list = session.database.schemaManager.getSchemas(grantee);
Schema schema = (Schema) list.get(0);
throw Error.error(ErrorCode.X_42502, schema.getName().statementName);
}
session.database.schemaManager.dropSchemas(grantee, cascade);
session.database.getUserManager().dropUser(name.name);
}
use of org.hsqldb_voltpatches.lib.HsqlArrayList in project voltdb by VoltDB.
the class StatementSchemaDefinition method getResult.
Result getResult(Session session) {
schemaName = statements[0].getSchemaName();
if (this.isExplain) {
return Result.newSingleColumnStringResult("OPERATION", describe(session));
}
StatementSchema cs;
Result result = statements[0].execute(session);
HsqlArrayList constraints = new HsqlArrayList();
if (statements.length == 1 || result.isError()) {
return result;
}
HsqlName oldSessionSchema = session.getCurrentSchemaHsqlName();
for (int i = 1; i < statements.length; i++) {
try {
session.setSchema(schemaName.name);
} catch (HsqlException e) {
}
statements[i].setSchemaHsqlName(schemaName);
session.parser.reset(statements[i].getSQL());
try {
session.parser.read();
switch(statements[i].getType()) {
case StatementTypes.GRANT:
case StatementTypes.GRANT_ROLE:
result = statements[i].execute(session);
break;
case StatementTypes.CREATE_TABLE:
cs = session.parser.compileCreate();
cs.isSchemaDefinition = true;
cs.setSchemaHsqlName(schemaName);
if (session.parser.token.tokenType != Tokens.X_ENDPARSE) {
throw session.parser.unexpectedToken();
}
result = cs.execute(session);
constraints.addAll((HsqlArrayList) cs.arguments[1]);
((HsqlArrayList) cs.arguments[1]).clear();
break;
case StatementTypes.CREATE_ROLE:
case StatementTypes.CREATE_SEQUENCE:
case StatementTypes.CREATE_TYPE:
case StatementTypes.CREATE_CHARACTER_SET:
case StatementTypes.CREATE_COLLATION:
result = statements[i].execute(session);
break;
case StatementTypes.CREATE_INDEX:
case StatementTypes.CREATE_TRIGGER:
case StatementTypes.CREATE_VIEW:
case StatementTypes.CREATE_DOMAIN:
case StatementTypes.CREATE_ROUTINE:
cs = session.parser.compileCreate();
cs.isSchemaDefinition = true;
cs.setSchemaHsqlName(schemaName);
if (session.parser.token.tokenType != Tokens.X_ENDPARSE) {
throw session.parser.unexpectedToken();
}
result = cs.execute(session);
break;
case StatementTypes.CREATE_ASSERTION:
case StatementTypes.CREATE_TRANSFORM:
case StatementTypes.CREATE_TRANSLATION:
case StatementTypes.CREATE_CAST:
case StatementTypes.CREATE_ORDERING:
throw session.parser.unsupportedFeature();
default:
throw Error.runtimeError(ErrorCode.U_S0500, "");
}
if (result.isError()) {
break;
}
} catch (HsqlException e) {
result = Result.newErrorResult(e, statements[i].getSQL());
}
}
if (!result.isError()) {
try {
for (int i = 0; i < constraints.size(); i++) {
Constraint c = (Constraint) constraints.get(i);
Table table = session.database.schemaManager.getUserTable(session, c.core.refTableName);
ParserDDL.addForeignKey(session, table, c, null);
}
} catch (HsqlException e) {
result = Result.newErrorResult(e, sql);
}
}
if (result.isError()) {
try {
session.database.schemaManager.dropSchema(schemaName.name, true);
session.database.logger.writeToLog(session, getDropSchemaStatement(schemaName));
} catch (HsqlException e) {
}
}
try {
// A VoltDB extension to disable
// Try not to explicitly throw an exception, just to catch and ignore it,
// but accidents can happen, so keep the try/catch anyway.
session.setSchemaNoThrow(oldSessionSchema.name);
/* disable 1 line ...
session.setSchema(oldSessionSchema.name);
... disabled 1 line */
// End of VoltDB extension
} catch (Exception e) {
}
return result;
}
use of org.hsqldb_voltpatches.lib.HsqlArrayList in project voltdb by VoltDB.
the class Table method moveDefinition.
/**
* For removal or addition of columns, constraints and indexes
*
* Does not work in this form for FK's as Constraint.ConstraintCore
* is not transfered to a referencing or referenced table
*/
Table moveDefinition(Session session, int newType, ColumnSchema column, Constraint constraint, Index index, int colIndex, int adjust, OrderedHashSet dropConstraints, OrderedHashSet dropIndexes) {
boolean newPK = false;
if (constraint != null && constraint.constType == Constraint.PRIMARY_KEY) {
newPK = true;
}
Table tn = new Table(database, tableName, newType);
if (tableType == TEMP_TABLE) {
tn.persistenceScope = persistenceScope;
}
for (int i = 0; i < getColumnCount(); i++) {
ColumnSchema col = (ColumnSchema) columnList.get(i);
if (i == colIndex) {
if (column != null) {
tn.addColumn(column);
}
if (adjust <= 0) {
continue;
}
}
tn.addColumn(col);
}
if (getColumnCount() == colIndex) {
tn.addColumn(column);
}
int[] pkCols = null;
if (hasPrimaryKey() && !dropConstraints.contains(getPrimaryConstraint().getName())) {
pkCols = primaryKeyCols;
pkCols = ArrayUtil.toAdjustedColumnArray(pkCols, colIndex, adjust);
} else if (newPK) {
pkCols = constraint.getMainColumns();
}
tn.createPrimaryKey(getIndex(0).getName(), pkCols, false);
for (int i = 1; i < indexList.length; i++) {
Index idx = indexList[i];
if (dropIndexes.contains(idx.getName())) {
continue;
}
int[] colarr = ArrayUtil.toAdjustedColumnArray(idx.getColumns(), colIndex, adjust);
// A VoltDB extension to support indexed expressions and assume unique attribute
Expression[] exprArr = idx.getExpressions();
boolean assumeUnique = idx.isAssumeUnique();
Expression predicate = idx.getPredicate();
// End of VoltDB extension
idx = tn.createIndexStructure(idx.getName(), colarr, idx.getColumnDesc(), null, idx.isUnique(), idx.isConstraint(), idx.isForward());
// A VoltDB extension to support indexed expressions and assume unique attribute and partial indexes
if (exprArr != null) {
idx = idx.withExpressions(adjustExprs(exprArr, colIndex, adjust));
}
if (predicate != null) {
idx = idx.withPredicate(adjustExpr(predicate, colIndex, adjust));
}
idx = idx.setAssumeUnique(assumeUnique);
// End of VoltDB extension
tn.addIndex(idx);
}
if (index != null) {
tn.addIndex(index);
}
HsqlArrayList newList = new HsqlArrayList();
if (newPK) {
constraint.core.mainIndex = tn.indexList[0];
constraint.core.mainTable = tn;
constraint.core.mainTableName = tn.tableName;
newList.add(constraint);
}
for (int i = 0; i < constraintList.length; i++) {
Constraint c = constraintList[i];
if (dropConstraints.contains(c.getName())) {
continue;
}
c = c.duplicate();
c.updateTable(session, this, tn, colIndex, adjust);
newList.add(c);
}
if (!newPK && constraint != null) {
constraint.updateTable(session, this, tn, -1, 0);
newList.add(constraint);
}
tn.constraintList = new Constraint[newList.size()];
newList.toArray(tn.constraintList);
tn.updateConstraintLists();
tn.setBestRowIdentifiers();
tn.triggerList = triggerList;
tn.triggerLists = triggerLists;
return tn;
}
use of org.hsqldb_voltpatches.lib.HsqlArrayList in project voltdb by VoltDB.
the class StatementDML method voltAppendSortAndSlice.
/**
* Appends XML for ORDER BY/LIMIT/OFFSET to this statement's XML.
* */
private void voltAppendSortAndSlice(Session session, VoltXMLElement xml) throws HSQLParseException {
if (m_sortAndSlice == null || m_sortAndSlice == SortAndSlice.noSort) {
return;
}
// Is target a view?
if (targetTable.getBaseTable() != targetTable) {
// will be some more work to do to resolve columns in ORDER BY properly.
throw new HSQLParseException("DELETE with ORDER BY, LIMIT or OFFSET is currently unsupported on views.");
}
if (m_sortAndSlice.hasLimit() && !m_sortAndSlice.hasOrder()) {
throw new HSQLParseException("DELETE statement with LIMIT or OFFSET but no ORDER BY would produce " + "non-deterministic results. Please use an ORDER BY clause.");
} else if (m_sortAndSlice.hasOrder() && !m_sortAndSlice.hasLimit()) {
// we let this slide?
throw new HSQLParseException("DELETE statement with ORDER BY but no LIMIT or OFFSET is not allowed. " + "Consider removing the ORDER BY clause, as it has no effect here.");
}
List<VoltXMLElement> newElements = voltGetLimitOffsetXMLFromSortAndSlice(session, m_sortAndSlice);
// This code isn't shared with how SELECT's ORDER BY clauses are serialized since there's
// some extra work that goes on there to handle references to SELECT clauses aliases, etc.
HsqlArrayList exprList = m_sortAndSlice.exprList;
if (exprList != null) {
VoltXMLElement orderColumnsXml = new VoltXMLElement("ordercolumns");
for (int i = 0; i < exprList.size(); ++i) {
Expression e = (Expression) exprList.get(i);
VoltXMLElement elem = e.voltGetXML(session);
orderColumnsXml.children.add(elem);
}
newElements.add(orderColumnsXml);
}
xml.children.addAll(newElements);
}
Aggregations