use of org.hsqldb_voltpatches.lib.OrderedHashSet in project voltdb by VoltDB.
the class DatabaseInformationFull method TRIGGER_ROUTINE_USAGE.
Table TRIGGER_ROUTINE_USAGE() {
Table t = sysTables[TRIGGER_ROUTINE_USAGE];
if (t == null) {
t = createBlankTable(sysTableHsqlNames[TRIGGER_ROUTINE_USAGE]);
addColumn(t, "TRIGGER_CATALOG", SQL_IDENTIFIER);
addColumn(t, "TRIGGER_SCHEMA", SQL_IDENTIFIER);
// not null
addColumn(t, "TRIGGER_NAME", SQL_IDENTIFIER);
addColumn(t, "SPECIFIC_CATALOG", SQL_IDENTIFIER);
addColumn(t, "SPECIFIC_SCHEMA", SQL_IDENTIFIER);
// not null
addColumn(t, "SPECIFIC_NAME", SQL_IDENTIFIER);
HsqlName name = HsqlNameManager.newInfoSchemaObjectName(sysTableHsqlNames[TRIGGER_ROUTINE_USAGE].name, false, SchemaObject.INDEX);
t.createPrimaryKey(name, new int[] { 0, 1, 2, 3, 4, 5 }, false);
return t;
}
PersistentStore store = database.persistentStoreCollection.getStore(t);
// column number mappings
final int trigger_catalog = 0;
final int trigger_schema = 1;
final int trigger_name = 2;
final int specific_catalog = 3;
final int specific_schema = 4;
final int specific_name = 5;
Iterator it;
Object[] row;
it = database.schemaManager.databaseObjectIterator(SchemaObject.TRIGGER);
while (it.hasNext()) {
TriggerDef trigger = (TriggerDef) it.next();
if (!session.getGrantee().isAccessible(trigger)) {
continue;
}
OrderedHashSet set = trigger.getReferences();
for (int i = 0; i < set.size(); i++) {
HsqlName refName = (HsqlName) set.get(i);
if (refName.type != SchemaObject.FUNCTION && refName.type != SchemaObject.PROCEDURE) {
continue;
}
if (!session.getGrantee().isAccessible(refName)) {
continue;
}
row = t.getEmptyRowData();
row[trigger_catalog] = database.getCatalogName().name;
row[trigger_schema] = trigger.getSchemaName().name;
row[trigger_name] = trigger.getName().name;
row[specific_catalog] = database.getCatalogName().name;
row[specific_schema] = refName.schema.name;
row[specific_name] = refName.name;
try {
t.insertSys(store, row);
} catch (HsqlException e) {
}
}
}
return t;
}
use of org.hsqldb_voltpatches.lib.OrderedHashSet in project voltdb by VoltDB.
the class StatementSchema method dropDomain.
private static void dropDomain(Session session, HsqlName name, boolean cascade) {
Type domain = (Type) session.database.schemaManager.getSchemaObject(name);
OrderedHashSet set = session.database.schemaManager.getReferencingObjects(domain.getName());
if (!cascade && set.size() > 0) {
HsqlName objectName = (HsqlName) set.get(0);
throw Error.error(ErrorCode.X_42502, objectName.getSchemaQualifiedStatementName());
}
Constraint[] constraints = domain.userTypeModifier.getConstraints();
set.clear();
for (int i = 0; i < constraints.length; i++) {
set.add(constraints[i].getName());
}
session.database.schemaManager.removeSchemaObjects(set);
session.database.schemaManager.removeSchemaObject(domain.getName(), cascade);
domain.userTypeModifier = null;
}
use of org.hsqldb_voltpatches.lib.OrderedHashSet in project voltdb by VoltDB.
the class TableWorks method makeNewTables.
OrderedHashSet makeNewTables(OrderedHashSet tableSet, OrderedHashSet dropConstraintSet, OrderedHashSet dropIndexSet) {
OrderedHashSet newSet = new OrderedHashSet();
for (int i = 0; i < tableSet.size(); i++) {
Table t = (Table) tableSet.get(i);
TableWorks tw = new TableWorks(session, t);
tw.makeNewTable(dropConstraintSet, dropIndexSet);
newSet.add(tw.getTable());
}
return newSet;
}
use of org.hsqldb_voltpatches.lib.OrderedHashSet in project voltdb by VoltDB.
the class TableWorks method dropConstraint.
void dropConstraint(String name, boolean cascade) {
Constraint constraint = table.getConstraint(name);
if (constraint == null) {
throw Error.error(ErrorCode.X_42501, name);
}
switch(constraint.getConstraintType()) {
case Constraint.MAIN:
throw Error.error(ErrorCode.X_28502);
case Constraint.PRIMARY_KEY:
case Constraint.UNIQUE:
{
OrderedHashSet dependentConstraints = table.getDependentConstraints(constraint);
// throw if unique constraint is referenced by foreign key
if (!cascade && !dependentConstraints.isEmpty()) {
Constraint c = (Constraint) dependentConstraints.get(0);
throw Error.error(ErrorCode.X_42533, c.getName().getSchemaQualifiedStatementName());
}
OrderedHashSet tableSet = new OrderedHashSet();
OrderedHashSet constraintNameSet = new OrderedHashSet();
OrderedHashSet indexNameSet = new OrderedHashSet();
for (int i = 0; i < dependentConstraints.size(); i++) {
Constraint c = (Constraint) dependentConstraints.get(i);
Table t = c.getMain();
if (t != table) {
tableSet.add(t);
}
t = c.getRef();
if (t != table) {
tableSet.add(t);
}
constraintNameSet.add(c.getMainName());
constraintNameSet.add(c.getRefName());
indexNameSet.add(c.getRefIndex().getName());
}
constraintNameSet.add(constraint.getName());
if (constraint.getConstraintType() == Constraint.UNIQUE) {
indexNameSet.add(constraint.getMainIndex().getName());
}
Table tn = table.moveDefinition(session, table.tableType, null, null, null, -1, 0, constraintNameSet, indexNameSet);
tn.moveData(session, table, -1, 0);
tableSet = makeNewTables(tableSet, constraintNameSet, indexNameSet);
if (constraint.getConstraintType() == Constraint.PRIMARY_KEY) {
int[] cols = constraint.getMainColumns();
for (int i = 0; i < cols.length; i++) {
tn.getColumn(cols[i]).setPrimaryKey(false);
tn.setColumnTypeVars(cols[i]);
}
}
//
database.schemaManager.removeSchemaObjects(constraintNameSet);
setNewTableInSchema(tn);
setNewTablesInSchema(tableSet);
updateConstraints(tn, emptySet);
updateConstraints(tableSet, constraintNameSet);
database.persistentStoreCollection.releaseStore(table);
database.schemaManager.recompileDependentObjects(tableSet);
database.schemaManager.recompileDependentObjects(tn);
table = tn;
// handle cascadingConstraints and cascadingTables
break;
}
case Constraint.FOREIGN_KEY:
{
OrderedHashSet constraints = new OrderedHashSet();
Table mainTable = constraint.getMain();
HsqlName mainName = constraint.getMainName();
boolean isSelf = mainTable == table;
constraints.add(mainName);
constraints.add(constraint.getRefName());
OrderedHashSet indexes = new OrderedHashSet();
indexes.add(constraint.getRefIndex().getName());
Table tn = table.moveDefinition(session, table.tableType, null, null, null, -1, 0, constraints, indexes);
tn.moveData(session, table, -1, 0);
//
database.schemaManager.removeSchemaObject(constraint.getName());
setNewTableInSchema(tn);
if (!isSelf) {
mainTable.removeConstraint(mainName.name);
}
database.persistentStoreCollection.releaseStore(table);
database.schemaManager.recompileDependentObjects(table);
table = tn;
break;
}
case Constraint.CHECK:
database.schemaManager.removeSchemaObject(constraint.getName());
if (constraint.isNotNull()) {
ColumnSchema column = table.getColumn(constraint.notNullColumnIndex);
column.setNullable(false);
table.setColumnTypeVars(constraint.notNullColumnIndex);
}
break;
// A VoltDB extension to support LIMIT PARTITION ROWS
case Constraint.LIMIT:
database.schemaManager.removeSchemaObject(constraint.getName());
break;
}
}
use of org.hsqldb_voltpatches.lib.OrderedHashSet in project voltdb by VoltDB.
the class TableWorks method dropIndex.
/**
* Because of the way indexes and column data are held in memory and on
* disk, it is necessary to recreate the table when an index is added to or
* removed from a non-empty table.
*
* <p> Originally, this method would break existing foreign keys as the
* table order in the DB was changed. The new table is now linked in place
* of the old table (fredt@users)
*
* @param indexName String
*/
void dropIndex(String indexName) {
Index index;
index = table.getIndex(indexName);
if (table.isIndexingMutable()) {
table.dropIndex(session, indexName);
} else {
OrderedHashSet indexSet = new OrderedHashSet();
indexSet.add(table.getIndex(indexName).getName());
Table tn = table.moveDefinition(session, table.tableType, null, null, null, -1, 0, emptySet, indexSet);
tn.moveData(session, table, -1, 0);
updateConstraints(tn, emptySet);
setNewTableInSchema(tn);
database.persistentStoreCollection.releaseStore(table);
table = tn;
}
if (!index.isConstraint()) {
database.schemaManager.removeSchemaObject(index.getName());
}
database.schemaManager.recompileDependentObjects(table);
}
Aggregations