use of org.hsqldb_voltpatches.lib.OrderedIntHashSet in project voltdb by VoltDB.
the class ParserDDL method readFKReferences.
private Constraint readFKReferences(Table refTable, HsqlName constraintName, OrderedHashSet refColSet) {
HsqlName mainTableName;
OrderedHashSet mainColSet = null;
readThis(Tokens.REFERENCES);
HsqlName schema;
if (token.namePrefix == null) {
schema = refTable.getSchemaName();
} else {
schema = database.schemaManager.getSchemaHsqlName(token.namePrefix);
}
if (refTable.getSchemaName() == schema && refTable.getName().name.equals(token.tokenString)) {
mainTableName = refTable.getName();
read();
} else {
mainTableName = readFKTableName(schema);
}
if (token.tokenType == Tokens.OPENBRACKET) {
mainColSet = readColumnNames(false);
} else {
// columns are resolved in the calling method
if (mainTableName == refTable.getName()) {
// fredt - FK statement is part of CREATE TABLE and is self-referencing
// reference must be to same table being created
} else {
/*
if (!mainTable.hasPrimaryKey()) {
throw Trace.error(Trace.CONSTRAINT_NOT_FOUND,
Trace.TABLE_HAS_NO_PRIMARY_KEY);
}
*/
}
}
int matchType = OpTypes.MATCH_SIMPLE;
if (token.tokenType == Tokens.MATCH) {
read();
switch(token.tokenType) {
case Tokens.SIMPLE:
read();
break;
case Tokens.PARTIAL:
throw super.unsupportedFeature();
case Tokens.FULL:
read();
matchType = OpTypes.MATCH_FULL;
break;
default:
throw unexpectedToken();
}
}
// -- In a while loop we parse a maximium of two
// -- "ON" statements following the foreign key
// -- definition this can be
// -- ON [UPDATE|DELETE] [NO ACTION|RESTRICT|CASCADE|SET [NULL|DEFAULT]]
int deleteAction = Constraint.NO_ACTION;
int updateAction = Constraint.NO_ACTION;
OrderedIntHashSet set = new OrderedIntHashSet();
while (token.tokenType == Tokens.ON) {
read();
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
if (token.tokenType == Tokens.DELETE) {
read();
if (token.tokenType == Tokens.SET) {
read();
switch(token.tokenType) {
case Tokens.DEFAULT:
{
read();
deleteAction = Constraint.SET_DEFAULT;
break;
}
case Tokens.NULL:
read();
deleteAction = Constraint.SET_NULL;
break;
default:
throw unexpectedToken();
}
} else if (token.tokenType == Tokens.CASCADE) {
read();
deleteAction = Constraint.CASCADE;
} else if (token.tokenType == Tokens.RESTRICT) {
read();
} else {
readThis(Tokens.NO);
readThis(Tokens.ACTION);
}
} else if (token.tokenType == Tokens.UPDATE) {
read();
if (token.tokenType == Tokens.SET) {
read();
switch(token.tokenType) {
case Tokens.DEFAULT:
{
read();
deleteAction = Constraint.SET_DEFAULT;
break;
}
case Tokens.NULL:
read();
deleteAction = Constraint.SET_NULL;
break;
default:
throw unexpectedToken();
}
} else if (token.tokenType == Tokens.CASCADE) {
read();
updateAction = Constraint.CASCADE;
} else if (token.tokenType == Tokens.RESTRICT) {
read();
} else {
readThis(Tokens.NO);
readThis(Tokens.ACTION);
}
} else {
throw unexpectedToken();
}
}
if (constraintName == null) {
constraintName = database.nameManager.newAutoName("FK", refTable.getSchemaName(), refTable.getName(), SchemaObject.CONSTRAINT);
}
return new Constraint(constraintName, refTable.getName(), refColSet, mainTableName, mainColSet, Constraint.FOREIGN_KEY, deleteAction, updateAction, matchType);
}
use of org.hsqldb_voltpatches.lib.OrderedIntHashSet in project voltdb by VoltDB.
the class ParserRoutine method readRoutineCharacteristics.
private void readRoutineCharacteristics(Routine routine) {
OrderedIntHashSet set = new OrderedIntHashSet();
boolean end = false;
while (!end) {
switch(token.tokenType) {
case Tokens.LANGUAGE:
{
if (!set.add(Tokens.LANGUAGE)) {
throw unexpectedToken();
}
read();
if (token.tokenType == Tokens.JAVA) {
read();
routine.setLanguage(Routine.LANGUAGE_JAVA);
} else if (token.tokenType == Tokens.SQL) {
read();
routine.setLanguage(Routine.LANGUAGE_SQL);
} else {
throw unexpectedToken();
}
break;
}
case Tokens.PARAMETER:
{
if (!set.add(Tokens.PARAMETER)) {
throw unexpectedToken();
}
read();
readThis(Tokens.STYLE);
if (token.tokenType == Tokens.JAVA) {
read();
routine.setParameterStyle(Routine.PARAM_STYLE_JAVA);
} else {
readThis(Tokens.SQL);
routine.setParameterStyle(Routine.PARAM_STYLE_SQL);
}
break;
}
case Tokens.SPECIFIC:
{
if (!set.add(Tokens.SPECIFIC)) {
throw unexpectedToken();
}
read();
HsqlName name = readNewSchemaObjectNameNoCheck(routine.getType());
routine.setSpecificName(name);
break;
}
case Tokens.DETERMINISTIC:
{
if (!set.add(Tokens.DETERMINISTIC)) {
throw unexpectedToken();
}
read();
routine.setDeterministic(true);
break;
}
case Tokens.NOT:
{
if (!set.add(Tokens.DETERMINISTIC)) {
throw unexpectedToken();
}
read();
readThis(Tokens.DETERMINISTIC);
routine.setDeterministic(false);
break;
}
case Tokens.MODIFIES:
{
if (!set.add(Tokens.SQL)) {
throw unexpectedToken();
}
if (routine.getType() == SchemaObject.FUNCTION) {
throw unexpectedToken();
}
read();
readThis(Tokens.SQL);
readThis(Tokens.DATA);
routine.setDataImpact(Routine.MODIFIES_SQL);
break;
}
case Tokens.NO:
{
if (!set.add(Tokens.SQL)) {
throw unexpectedToken();
}
read();
readThis(Tokens.SQL);
routine.setDataImpact(Routine.NO_SQL);
break;
}
case Tokens.READS:
{
if (!set.add(Tokens.SQL)) {
throw unexpectedToken();
}
read();
readThis(Tokens.SQL);
readThis(Tokens.DATA);
routine.setDataImpact(Routine.READS_SQL);
break;
}
case Tokens.CONTAINS:
{
if (!set.add(Tokens.SQL)) {
throw unexpectedToken();
}
read();
readThis(Tokens.SQL);
routine.setDataImpact(Routine.CONTAINS_SQL);
break;
}
case Tokens.RETURNS:
{
if (!set.add(Tokens.NULL) || routine.isProcedure()) {
throw unexpectedToken();
}
read();
readThis(Tokens.NULL);
readThis(Tokens.ON);
readThis(Tokens.NULL);
readThis(Tokens.INPUT);
routine.setNullInputOutput(true);
break;
}
case Tokens.CALLED:
{
if (!set.add(Tokens.NULL) || routine.isProcedure()) {
throw unexpectedToken();
}
read();
readThis(Tokens.ON);
readThis(Tokens.NULL);
readThis(Tokens.INPUT);
routine.setNullInputOutput(false);
break;
}
case Tokens.DYNAMIC:
{
if (!set.add(Tokens.RESULT) || routine.isFunction()) {
throw unexpectedToken();
}
read();
readThis(Tokens.RESULT);
readThis(Tokens.SETS);
readBigint();
break;
}
case Tokens.NEW:
{
if (routine.getType() == SchemaObject.FUNCTION || !set.add(Tokens.SAVEPOINT)) {
throw unexpectedToken();
}
read();
readThis(Tokens.SAVEPOINT);
readThis(Tokens.LEVEL);
routine.setNewSavepointLevel(true);
break;
}
case Tokens.OLD:
{
if (routine.getType() == SchemaObject.FUNCTION || !set.add(Tokens.SAVEPOINT)) {
throw unexpectedToken();
}
read();
readThis(Tokens.SAVEPOINT);
readThis(Tokens.LEVEL);
routine.setNewSavepointLevel(false);
throw super.unsupportedFeature(Tokens.T_OLD);
// break;
}
default:
end = true;
break;
}
}
}
use of org.hsqldb_voltpatches.lib.OrderedIntHashSet in project voltdb by VoltDB.
the class QueryExpression method resolveReferences.
public void resolveReferences(Session session) {
leftQueryExpression.resolveReferences(session);
rightQueryExpression.resolveReferences(session);
addUnresolvedExpressions(leftQueryExpression.unresolvedExpressions);
addUnresolvedExpressions(rightQueryExpression.unresolvedExpressions);
if (!unionCorresponding) {
columnCount = leftQueryExpression.getColumnCount();
int rightCount = rightQueryExpression.getColumnCount();
if (columnCount != rightCount) {
throw Error.error(ErrorCode.X_42594);
}
unionColumnTypes = new Type[columnCount];
leftQueryExpression.unionColumnMap = rightQueryExpression.unionColumnMap = new int[columnCount];
ArrayUtil.fillSequence(leftQueryExpression.unionColumnMap);
resolveColumnRefernecesInUnionOrderBy();
return;
}
String[] leftNames = leftQueryExpression.getColumnNames();
String[] rightNames = rightQueryExpression.getColumnNames();
if (unionCorrespondingColumns == null) {
unionCorrespondingColumns = new OrderedHashSet();
OrderedIntHashSet leftColumns = new OrderedIntHashSet();
OrderedIntHashSet rightColumns = new OrderedIntHashSet();
for (int i = 0; i < leftNames.length; i++) {
String name = leftNames[i];
int index = ArrayUtil.find(rightNames, name);
if (name.length() > 0 && index != -1) {
leftColumns.add(i);
rightColumns.add(index);
unionCorrespondingColumns.add(name);
}
}
if (unionCorrespondingColumns.isEmpty()) {
throw Error.error(ErrorCode.X_42579);
}
leftQueryExpression.unionColumnMap = leftColumns.toArray();
rightQueryExpression.unionColumnMap = rightColumns.toArray();
} else {
leftQueryExpression.unionColumnMap = new int[unionCorrespondingColumns.size()];
rightQueryExpression.unionColumnMap = new int[unionCorrespondingColumns.size()];
for (int i = 0; i < unionCorrespondingColumns.size(); i++) {
String name = (String) unionCorrespondingColumns.get(i);
int index = ArrayUtil.find(leftNames, name);
if (index == -1) {
throw Error.error(ErrorCode.X_42579);
}
leftQueryExpression.unionColumnMap[i] = index;
index = ArrayUtil.find(rightNames, name);
if (index == -1) {
throw Error.error(ErrorCode.X_42579);
}
rightQueryExpression.unionColumnMap[i] = index;
}
}
columnCount = unionCorrespondingColumns.size();
unionColumnTypes = new Type[columnCount];
resolveColumnRefernecesInUnionOrderBy();
}
use of org.hsqldb_voltpatches.lib.OrderedIntHashSet in project voltdb by VoltDB.
the class ParserDDL method readLikeTable.
private ColumnSchema[] readLikeTable(Table table) {
read();
boolean generated = false;
boolean identity = false;
boolean defaults = false;
Table likeTable = readTableName();
OrderedIntHashSet set = new OrderedIntHashSet();
while (true) {
boolean including = token.tokenType == Tokens.INCLUDING;
if (!including && token.tokenType != Tokens.EXCLUDING) {
break;
}
read();
switch(token.tokenType) {
case Tokens.GENERATED:
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
generated = including;
break;
case Tokens.IDENTITY:
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
identity = including;
break;
case Tokens.DEFAULTS:
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
defaults = including;
break;
default:
throw unexpectedToken();
}
read();
}
ColumnSchema[] columnList = new ColumnSchema[likeTable.getColumnCount()];
for (int i = 0; i < columnList.length; i++) {
ColumnSchema column = likeTable.getColumn(i).duplicate();
HsqlName name = database.nameManager.newColumnSchemaHsqlName(table.getName(), column.getName());
column.setName(name);
if (identity) {
if (column.isIdentity()) {
column.setIdentity(column.getIdentitySequence().duplicate());
}
} else {
column.setIdentity(null);
}
if (!defaults) {
column.setDefaultExpression(null);
}
if (!generated) {
column.setGeneratingExpression(null);
}
columnList[i] = column;
}
return columnList;
}
use of org.hsqldb_voltpatches.lib.OrderedIntHashSet in project voltdb by VoltDB.
the class ParserDDL method processAlterColumnSequenceOptions.
void processAlterColumnSequenceOptions(ColumnSchema column) {
OrderedIntHashSet set = new OrderedIntHashSet();
NumberSequence sequence = column.getIdentitySequence().duplicate();
while (true) {
boolean end = false;
switch(token.tokenType) {
case Tokens.RESTART:
{
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
read();
readThis(Tokens.WITH);
long value = readBigint();
sequence.setStartValue(value);
break;
}
case Tokens.SET:
read();
switch(token.tokenType) {
case Tokens.INCREMENT:
{
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
read();
readThis(Tokens.BY);
long value = readBigint();
sequence.setIncrement(value);
break;
}
case Tokens.NO:
read();
if (token.tokenType == Tokens.MAXVALUE) {
sequence.setDefaultMaxValue();
} else if (token.tokenType == Tokens.MINVALUE) {
sequence.setDefaultMinValue();
} else if (token.tokenType == Tokens.CYCLE) {
sequence.setCycle(false);
} else {
throw unexpectedToken();
}
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
read();
break;
case Tokens.MAXVALUE:
{
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
read();
long value = readBigint();
sequence.setMaxValueNoCheck(value);
break;
}
case Tokens.MINVALUE:
{
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
read();
long value = readBigint();
sequence.setMinValueNoCheck(value);
break;
}
case Tokens.CYCLE:
if (!set.add(token.tokenType)) {
throw unexpectedToken();
}
read();
sequence.setCycle(true);
break;
default:
throw Error.error(ErrorCode.X_42581, token.tokenString);
}
break;
default:
end = true;
break;
}
if (end) {
break;
}
}
sequence.checkValues();
column.getIdentitySequence().reset(sequence);
}
Aggregations