use of org.apache.derby.iapi.types.SQLInteger in project derby by apache.
the class SYSCONSTRAINTSRowFactory method makeRow.
// ///////////////////////////////////////////////////////////////////////////
//
// METHODS
//
// ///////////////////////////////////////////////////////////////////////////
/**
* Make a SYSCONTRAINTS row
*
* @return Row suitable for inserting into SYSCONTRAINTS.
*
* @exception StandardException thrown on failure
*/
public ExecRow makeRow(TupleDescriptor td, TupleDescriptor parent) throws StandardException {
DataValueDescriptor col;
ExecRow row;
int constraintIType;
UUID oid;
String constraintSType = null;
String constraintID = null;
String tableID = null;
String constraintName = null;
String schemaID = null;
boolean deferrable = ConstraintDefinitionNode.DEFERRABLE_DEFAULT;
boolean initiallyDeferred = ConstraintDefinitionNode.INITIALLY_DEFERRED_DEFAULT;
boolean enforced = ConstraintDefinitionNode.ENFORCED_DEFAULT;
int referenceCount = 0;
if (td != null) {
ConstraintDescriptor constraint = (ConstraintDescriptor) td;
/*
** We only allocate a new UUID if the descriptor doesn't already have one.
** For descriptors replicated from a Source system, we already have an UUID.
*/
oid = constraint.getUUID();
constraintID = oid.toString();
oid = constraint.getTableId();
tableID = oid.toString();
constraintName = constraint.getConstraintName();
constraintIType = constraint.getConstraintType();
switch(constraintIType) {
case DataDictionary.PRIMARYKEY_CONSTRAINT:
constraintSType = "P";
break;
case DataDictionary.UNIQUE_CONSTRAINT:
constraintSType = "U";
break;
case DataDictionary.CHECK_CONSTRAINT:
constraintSType = "C";
break;
case DataDictionary.FOREIGNKEY_CONSTRAINT:
constraintSType = "F";
break;
default:
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("invalid constraint type");
}
}
schemaID = constraint.getSchemaDescriptor().getUUID().toString();
// constraint characteristics
deferrable = constraint.deferrable();
initiallyDeferred = constraint.initiallyDeferred();
enforced = constraint.enforced();
referenceCount = constraint.getReferenceCount();
}
/* Insert info into sysconstraints */
/* RESOLVE - It would be nice to require less knowledge about sysconstraints
* and have this be more table driven.
*/
/* Build the row to insert */
row = getExecutionFactory().getValueRow(SYSCONSTRAINTS_COLUMN_COUNT);
/* 1st column is CONSTRAINTID (UUID - char(36)) */
row.setColumn(SYSCONSTRAINTS_CONSTRAINTID, new SQLChar(constraintID));
/* 2nd column is TABLEID (UUID - char(36)) */
row.setColumn(SYSCONSTRAINTS_TABLEID, new SQLChar(tableID));
/* 3rd column is NAME (varchar(128)) */
row.setColumn(SYSCONSTRAINTS_CONSTRAINTNAME, new SQLVarchar(constraintName));
/* 4th column is TYPE (char(1)) */
row.setColumn(SYSCONSTRAINTS_TYPE, new SQLChar(constraintSType));
/* 5th column is SCHEMAID (UUID - char(36)) */
row.setColumn(SYSCONSTRAINTS_SCHEMAID, new SQLChar(schemaID));
/* 6th column is STATE (char(1)) */
row.setColumn(SYSCONSTRAINTS_STATE, new SQLChar(encodeCharacteristics(deferrable, initiallyDeferred, enforced)));
/* 7th column is REFERENCED */
row.setColumn(SYSCONSTRAINTS_REFERENCECOUNT, new SQLInteger(referenceCount));
return row;
}
use of org.apache.derby.iapi.types.SQLInteger in project derby by apache.
the class ScrollInsensitiveResultSet method updateRow.
/**
* @see NoPutResultSet#updateRow
*
* Sets the updated column of the hash table to true and updates the row
* in the hash table with the new values for the row.
*/
public void updateRow(ExecRow row, RowChanger rowChanger) throws StandardException {
ProjectRestrictResultSet prRS = null;
if (source instanceof ProjectRestrictResultSet) {
prRS = (ProjectRestrictResultSet) source;
} else if (source instanceof RowCountResultSet) {
// To do any projection in the presence of an intervening
// RowCountResultSet, we get its child.
prRS = ((RowCountResultSet) source).getUnderlyingProjectRestrictRS();
}
positionInHashTable.setValue(currentPosition);
DataValueDescriptor[] hashRowArray = getCurrentRowFromHashtable();
RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
// Maps from each selected column to underlying base table column
// number, i.e. as from getBaseProjectMapping if a PRN exists, if not
// we construct one, so we always know where in the hash table a
// modified column will need to go (we do our own projection).
int[] map;
if (prRS != null) {
map = prRS.getBaseProjectMapping();
} else {
// create a natural projection mapping for all columns in SELECT
// list so we can treat the cases of no PRN and PRN the same.
int noOfSelectedColumns = hashRowArray.length - (LAST_EXTRA_COLUMN + 1);
map = new int[noOfSelectedColumns];
// is no underlying PRN.
for (int i = 0; i < noOfSelectedColumns; i++) {
// column is 1-based
map[i] = i + 1;
}
}
// Construct a new row based on the old one and the updated columns
ExecRow newRow = new ValueRow(map.length);
for (int i = 0; i < map.length; i++) {
// What index in ExecRow "row" corresponds to this position in the
// hash table, if any?
int rowColumn = rowChanger.findSelectedCol(map[i]);
if (rowColumn > 0) {
// OK, a new value has been supplied, use it
newRow.setColumn(i + 1, row.getColumn(rowColumn));
} else {
// No new value, so continue using old one
newRow.setColumn(i + 1, hashRowArray[LAST_EXTRA_COLUMN + 1 + i]);
}
}
ht.remove(new SQLInteger(currentPosition));
addRowToHashTable(newRow, currentPosition, rowLoc, true);
// Modify row to refer to data in the BackingStoreHashtable.
// This allows reading of data which goes over multiple pages
// when doing the actual update (LOBs). Putting columns of
// type SQLBinary to disk, has destructive effect on the columns,
// and they need to be re-read. That is the reason this is needed.
DataValueDescriptor[] backedData = getRowArrayFromHashTable(currentPosition);
for (int i = 0; i < map.length; i++) {
// What index in "row" corresponds to this position in the table,
// if any?
int rowColumn = rowChanger.findSelectedCol(map[i]);
if (rowColumn > 0) {
// OK, put the value in the hash table back to row.
row.setColumn(rowColumn, backedData[i]);
}
}
}
use of org.apache.derby.iapi.types.SQLInteger in project derby by apache.
the class ScrollInsensitiveResultSet method addRowToHashTable.
//
// class implementation
//
/**
* Add a row to the backing hash table, keyed on position.
* When a row gets updated when using scrollable insensitive updatable
* result sets, the old entry for the row will be deleted from the hash
* table and this method will be called to add the new values for the row
* to the hash table, with the parameter rowUpdated = true so as to mark
* the row as updated. The latter is done in order to implement
* detectability of own changes for result sets of this type.
*
* @param sourceRow The row to add.
* @param position The key
* @param rowLoc The rowLocation of the row to add.
* @param rowUpdated Indicates whether the row has been updated.
*/
private void addRowToHashTable(ExecRow sourceRow, int position, RowLocation rowLoc, boolean rowUpdated) throws StandardException {
DataValueDescriptor[] hashRowArray = new DataValueDescriptor[sourceRowWidth + extraColumns];
// 1st element is the key
hashRowArray[0] = new SQLInteger(position);
if (isForUpdate()) {
hashRowArray[POS_ROWLOCATION] = rowLoc.cloneValue(false);
hashRowArray[POS_ROWDELETED] = new SQLBoolean(false);
hashRowArray[POS_ROWUPDATED] = new SQLBoolean(rowUpdated);
}
/* Copy rest of elements from sourceRow.
* NOTE: We need to clone the source row
* and we do our own cloning since the 1st column
* is not a wrapper.
*/
DataValueDescriptor[] sourceRowArray = sourceRow.getRowArray();
System.arraycopy(sourceRowArray, 0, hashRowArray, extraColumns, sourceRowArray.length);
ht.putRow(true, hashRowArray, null);
numToHashTable++;
}
use of org.apache.derby.iapi.types.SQLInteger in project derby by apache.
the class ScrollInsensitiveResultSet method markRowAsDeleted.
/**
* @see NoPutResultSet#markRowAsDeleted
*
* Sets the deleted column of the hash table to true in the current row.
*/
public void markRowAsDeleted() throws StandardException {
positionInHashTable.setValue(currentPosition);
DataValueDescriptor[] hashRowArray = getCurrentRowFromHashtable();
RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
ht.remove(new SQLInteger(currentPosition));
((SQLBoolean) hashRowArray[POS_ROWDELETED]).setValue(true);
// Set all columns to NULL, the row is now a placeholder
for (int i = extraColumns; i < hashRowArray.length; i++) {
hashRowArray[i].setToNull();
}
ht.putRow(true, hashRowArray, null);
}
use of org.apache.derby.iapi.types.SQLInteger in project derby by apache.
the class T_AccessFactory method nestedUserTransaction.
protected boolean nestedUserTransaction(TransactionController tc) throws StandardException, T_Fail {
REPORT("(nestedUserTransaction) starting");
// Test of drop conglomerate with abort by doing the following:
// create table
// commit
// drop table
// make sure table is not still there.
// abort
// make sure table is still there.
// Create a heap conglomerate.
long orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // 1 SQLInteger() column template.
new T_AccessRow(1).getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
// Create a temporary heap conglomerate.
long tmp_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // 1 SQLInteger() column template.
new T_AccessRow(1).getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, TransactionController.IS_TEMPORARY);
TransactionController current_xact = store.getTransaction(getContextService().getCurrentContextManager());
// get a nested user transaction
TransactionController child_tc = tc.startNestedUserTransaction(true, true);
TransactionController current_xact_after_nest = store.getTransaction(getContextService().getCurrentContextManager());
if (current_xact_after_nest != current_xact) {
throw T_Fail.testFailMsg("(nestedUserTransaction) getTransaction() return changed after startNestedUserTransaction()." + "current_xact = " + current_xact + ";current_xact_after_nest = " + current_xact_after_nest);
}
T_Fail.T_ASSERT(tc.getLockSpace() == child_tc.getLockSpace(), "getLockSpace() returned different object for child.");
// the locks of the nested transaction should not conflict, so this
// open should work.
ConglomerateController cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Make sure you can access the temporary conglomerate in the
// nested transaction.
ConglomerateController tmp_cc = child_tc.openConglomerate(tmp_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
cc.close();
tmp_cc.close();
child_tc.commit();
child_tc.destroy();
tc.dropConglomerate(orig_conglomid);
// trying to double nest a nested transaction should not work.
child_tc = tc.startNestedUserTransaction(true, true);
try {
child_tc.startNestedUserTransaction(true, true);
throw T_Fail.testFailMsg("(nestedUserTransaction) double nest xact not allowed.");
} catch (StandardException se) {
// expected exception, fall through.
}
child_tc.commit();
child_tc.destroy();
// make sure internal and ntt's work. Just add a bunch of data to
// the table causing page allocation.
String twok_string = new String("0123456789012345");
for (int i = 0; i < 7; i++) {
twok_string += twok_string;
}
T_AccessRow big_row = new T_AccessRow(2);
big_row.setCol(1, new SQLChar(twok_string));
// Create a heap conglomerate.
orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
child_tc = tc.startNestedUserTransaction(true, true);
// add 20 pages worth of data, causing allocation
// the locks of the nested transaction should not conflict, so this
// open should work.
cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
child_tc.abort();
child_tc.destroy();
try {
// the locks of the nested transaction should not conflict, so this
// open should work.
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
throw T_Fail.testFailMsg("(nestedUserTransaction) conglom should have been aborted.");
} catch (StandardException se) {
// expected exception, fall through.
}
tc.commit();
// same test as above, but this time commit parent xact create to
// make sure it stays around after the child abort.
// Create a heap conglomerate.
orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default properties
null, // default collation
null, // not temporary
TransactionController.IS_DEFAULT);
tc.commit();
child_tc = tc.startNestedUserTransaction(true, true);
// add 20 pages worth of data, causing allocation
// the locks of the nested transaction should not conflict, so this
// open should work.
cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
/*
for (int i = 0; i < 40; i++)
{
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
*/
child_tc.abort();
child_tc.destroy();
try {
// the locks of the nested transaction should not conflict, so this
// open should work.
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
cc.close();
} catch (StandardException se) {
throw T_Fail.testFailMsg("(nestedUserTransaction) conglom should have not be aborted.");
}
// start an read only nested user transaction.
child_tc = tc.startNestedUserTransaction(true, true);
ConglomerateController child_cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
try {
// should not be able to do an update in a read only transaction.
big_row.setCol(0, new SQLInteger(1042));
child_cc.insert(big_row.getRowArray());
throw T_Fail.testFailMsg("(nestedUserTransaction) read only xact does not allow upd.");
} catch (StandardException se) {
// expected exception, fall through.
child_tc.commit();
child_tc.destroy();
}
tc.commit();
// start an update nested user transaction.
child_tc = tc.startNestedUserTransaction(false, true);
child_cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
try {
// should be able to do an update in a read only transaction.
big_row.setCol(0, new SQLInteger(1043));
child_cc.insert(big_row.getRowArray());
} catch (StandardException se) {
throw T_Fail.testFailMsg("(nestedUserTransaction) read only xact does not allow upd.");
}
// expected exception, fall through.
child_tc.commit();
child_tc.destroy();
tc.commit();
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
// start an update nested user transaction.
child_tc = tc.startNestedUserTransaction(false, true);
try {
// the following should time out, since locks are not compatible.
child_cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
throw T_Fail.testFailMsg("(nestedUserTransaction) lock should have timed out.");
} catch (StandardException se) {
// expected timeout, fall through.
}
// expected exception, fall through.
child_tc.commit();
child_tc.destroy();
tc.commit();
REPORT("(nestedUserTransaction) finishing");
return true;
}
Aggregations