use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class InsertResultSet method normalInsertCore.
// Do the work for a "normal" insert
private void normalInsertCore(LanguageConnectionContext lcc, boolean firstExecute) throws StandardException {
boolean setUserIdentity = constants.hasAutoincrement() && isSingleRowResultSet();
ExecRow deferredRowBuffer;
long user_autoinc = 0;
/* Get or re-use the row changer.
*/
if (firstExecute) {
rowChanger = lcc.getLanguageConnectionFactory().getExecutionFactory().getRowChanger(heapConglom, constants.heapSCOCI, heapDCOCI, constants.irgs, constants.indexCIDS, constants.indexSCOCIs, indexDCOCIs, // number of columns in partial row meaningless for insert
0, tc, // Changed column ids
null, constants.getStreamStorableHeapColIds(), activation);
rowChanger.setIndexNames(constants.indexNames);
}
/* decode lock mode for the execution isolation level */
int lockMode = decodeLockMode(constants.lockMode);
rowChanger.open(lockMode);
/* The source does not know whether or not we are doing a
* deferred mode insert. If we are, then we must clear the
* index scan info from the activation so that the row changer
* does not re-use that information (which won't be valid for
* a deferred mode insert).
*/
if (constants.deferred) {
activation.clearIndexScanInfo();
}
if (fkInfoArray != null) {
if (fkChecker == null) {
fkChecker = new RISetChecker(lcc, tc, fkInfoArray);
} else {
fkChecker.reopen();
}
}
if (firstExecute && constants.deferred) {
Properties properties = new Properties();
// Get the properties on the old heap
rowChanger.getHeapConglomerateController().getInternalTablePropertySet(properties);
/*
** If deferred we save a copy of the entire row.
*/
rowHolder = new TemporaryRowHolderImpl(activation, properties, resultDescription);
rowChanger.setRowHolder(rowHolder);
}
firstExecuteSpecialHandlingAutoGen(firstExecute, rowChanger, constants.targetUUID);
while (row != null) {
// auto-generated key columns.
if (activation.getAutoGeneratedKeysResultsetMode() && autoGeneratedKeysColumnIndexes.length > 0) {
autoGeneratedKeysRowsHolder.insert(getCompactRow(row, autoGeneratedKeysColumnIndexes));
}
// fill in columns that are computed from expressions on other columns
evaluateGenerationClauses(generationClauses, activation, sourceResultSet, row, false);
/*
** If we're doing a deferred insert, insert into the temporary
** conglomerate. Otherwise, insert directly into the permanent
** conglomerates using the rowChanger.
*/
if (constants.deferred) {
rowHolder.insert(row);
} else {
// Immediate mode violations will throw, so we only ever
// see false here with deferred constraint mode for one or more
// of the constraints being checked.
boolean allOk = evaluateCheckConstraints();
if (fkChecker != null) {
fkChecker.doFKCheck(activation, row);
}
// Objectify any streaming columns that are indexed.
if (constants.irgs.length > 0) {
DataValueDescriptor[] rowArray = row.getRowArray();
for (int i = 0; i < rowArray.length; i++) {
// System.out.println("checking " + i);
if (!constants.indexedCols[i]) {
continue;
}
if (rowArray[i] instanceof StreamStorable)
rowArray[i].getObject();
}
}
if (allOk) {
rowChanger.insertRow(row, false);
} else {
RowLocation offendingRow = rowChanger.insertRow(row, true);
deferredChecks = DeferredConstraintsMemory.rememberCheckViolations(lcc, constants.targetUUID, schemaName, tableName, deferredChecks, violatingCheckConstraints, offendingRow, new CheckInfo[1]);
}
}
rowCount++;
if (setUserIdentity) {
dd = lcc.getDataDictionary();
td = dd.getTableDescriptor(constants.targetUUID);
int maxColumns = td.getMaxColumnID();
int col;
for (col = 1; col <= maxColumns; col++) {
ColumnDescriptor cd = td.getColumnDescriptor(col);
if (cd.isAutoincrement()) {
break;
}
}
if (col <= maxColumns) {
DataValueDescriptor dvd = row.cloneColumn(col);
user_autoinc = dvd.getLong();
}
}
// No need to do a next on a single row source
if (constants.singleRowSource) {
row = null;
} else {
row = getNextRowCore(sourceResultSet);
}
}
/*
** If it's a deferred insert, scan the temporary conglomerate and
** insert the rows into the permanent conglomerates using rowChanger.
*/
if (constants.deferred) {
if (triggerInfo != null) {
Vector<AutoincrementCounter> v = null;
if (aiCache != null) {
v = new Vector<AutoincrementCounter>();
for (int i = 0; i < aiCache.length; i++) {
String s, t, c;
if (aiCache[i] == null)
continue;
Long initialValue = lcc.lastAutoincrementValue((s = constants.getSchemaName()), (t = constants.getTableName()), (c = constants.getColumnName(i)));
AutoincrementCounter aic = new AutoincrementCounter(initialValue, constants.getAutoincIncrement(i), aiCache[i].getLong(), s, t, c, i + 1);
v.addElement(aic);
}
}
if (triggerActivator == null) {
triggerActivator = new TriggerEventActivator(lcc, constants.targetUUID, triggerInfo, TriggerExecutionContext.INSERT_EVENT, activation, v);
} else {
triggerActivator.reopen();
}
// fire BEFORE trigger, do this before checking constraints
triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT, (CursorResultSet) null, rowHolder.getResultSet(), (int[]) null);
}
CursorResultSet rs = rowHolder.getResultSet();
try {
rs.open();
while ((deferredRowBuffer = rs.getNextRow()) != null) {
// we have to set the source row so the check constraint
// sees the correct row.
sourceResultSet.setCurrentRow(deferredRowBuffer);
boolean allOk = evaluateCheckConstraints();
if (allOk) {
rowChanger.insertRow(deferredRowBuffer, false);
} else {
RowLocation offendingRow = rowChanger.insertRow(deferredRowBuffer, true);
deferredChecks = DeferredConstraintsMemory.rememberCheckViolations(lcc, constants.targetUUID, schemaName, tableName, deferredChecks, violatingCheckConstraints, offendingRow, new CheckInfo[1]);
}
}
} finally {
sourceResultSet.clearCurrentRow();
rs.close();
}
if (fkChecker != null) {
/*
** Second scan to make sure all the foreign key
** constraints are ok. We have to do this after
** we have completed the inserts in case of self
** referencing constraints.
*/
rs = rowHolder.getResultSet();
try {
rs.open();
while ((deferredRowBuffer = rs.getNextRow()) != null) {
fkChecker.doFKCheck(activation, deferredRowBuffer);
}
} finally {
rs.close();
}
}
// fire AFTER trigger
if (triggerActivator != null) {
triggerActivator.notifyEvent(TriggerEvents.AFTER_INSERT, (CursorResultSet) null, rowHolder.getResultSet(), (int[]) null);
}
}
if (rowHolder != null) {
rowHolder.close();
// rowHolder kept across opens
}
if (fkChecker != null) {
fkChecker.close();
fkChecker = null;
}
if (setIdentity)
lcc.setIdentityValue(identityVal);
else /*
* find the value of the identity column from the user inserted value
* and do a lcc.setIdentityValue(<user_value>);
*/
if (setUserIdentity) {
lcc.setIdentityValue(user_autoinc);
}
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class InsertResultSet method changedRow.
// TargetResultSet interface
/**
* @see TargetResultSet#changedRow
*
* @exception StandardException thrown if cursor finish ed.
*/
public void changedRow(ExecRow execRow, RowLocation rowLocation) throws StandardException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(bulkInsert, "bulkInsert exected to be true");
}
/* Set up sorters, etc. if 1st row and there are indexes */
if (constants.irgs.length > 0) {
RowLocation rlClone = (RowLocation) rowLocation.cloneValue(false);
// Objectify any the streaming columns that are indexed.
for (int i = 0; i < execRow.getRowArray().length; i++) {
if (!constants.indexedCols[i]) {
continue;
}
if (execRow.getRowArray()[i] instanceof StreamStorable)
((DataValueDescriptor) execRow.getRowArray()[i]).getObject();
}
// Every index row will share the same row location, etc.
if (firstRow) {
firstRow = false;
indexRows = new ExecIndexRow[constants.irgs.length];
setUpAllSorts(execRow.getNewNullRow(), rlClone);
}
// Put the row into the indexes
for (int index = 0; index < constants.irgs.length; index++) {
// Get a new object Array for the index
indexRows[index].getNewObjectArray();
// Associate the index row with the source row
constants.irgs[index].getIndexRow(execRow, rlClone, indexRows[index], (FormatableBitSet) null);
// Insert the index row into the matching sorter
sorters[index].insert(indexRows[index].getRowArray());
}
}
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class ScrollInsensitiveResultSet method updateRow.
/**
* @see NoPutResultSet#updateRow
*
* Sets the updated column of the hash table to true and updates the row
* in the hash table with the new values for the row.
*/
public void updateRow(ExecRow row, RowChanger rowChanger) throws StandardException {
ProjectRestrictResultSet prRS = null;
if (source instanceof ProjectRestrictResultSet) {
prRS = (ProjectRestrictResultSet) source;
} else if (source instanceof RowCountResultSet) {
// To do any projection in the presence of an intervening
// RowCountResultSet, we get its child.
prRS = ((RowCountResultSet) source).getUnderlyingProjectRestrictRS();
}
positionInHashTable.setValue(currentPosition);
DataValueDescriptor[] hashRowArray = getCurrentRowFromHashtable();
RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
// Maps from each selected column to underlying base table column
// number, i.e. as from getBaseProjectMapping if a PRN exists, if not
// we construct one, so we always know where in the hash table a
// modified column will need to go (we do our own projection).
int[] map;
if (prRS != null) {
map = prRS.getBaseProjectMapping();
} else {
// create a natural projection mapping for all columns in SELECT
// list so we can treat the cases of no PRN and PRN the same.
int noOfSelectedColumns = hashRowArray.length - (LAST_EXTRA_COLUMN + 1);
map = new int[noOfSelectedColumns];
// is no underlying PRN.
for (int i = 0; i < noOfSelectedColumns; i++) {
// column is 1-based
map[i] = i + 1;
}
}
// Construct a new row based on the old one and the updated columns
ExecRow newRow = new ValueRow(map.length);
for (int i = 0; i < map.length; i++) {
// What index in ExecRow "row" corresponds to this position in the
// hash table, if any?
int rowColumn = rowChanger.findSelectedCol(map[i]);
if (rowColumn > 0) {
// OK, a new value has been supplied, use it
newRow.setColumn(i + 1, row.getColumn(rowColumn));
} else {
// No new value, so continue using old one
newRow.setColumn(i + 1, hashRowArray[LAST_EXTRA_COLUMN + 1 + i]);
}
}
ht.remove(new SQLInteger(currentPosition));
addRowToHashTable(newRow, currentPosition, rowLoc, true);
// Modify row to refer to data in the BackingStoreHashtable.
// This allows reading of data which goes over multiple pages
// when doing the actual update (LOBs). Putting columns of
// type SQLBinary to disk, has destructive effect on the columns,
// and they need to be re-read. That is the reason this is needed.
DataValueDescriptor[] backedData = getRowArrayFromHashTable(currentPosition);
for (int i = 0; i < map.length; i++) {
// What index in "row" corresponds to this position in the table,
// if any?
int rowColumn = rowChanger.findSelectedCol(map[i]);
if (rowColumn > 0) {
// OK, put the value in the hash table back to row.
row.setColumn(rowColumn, backedData[i]);
}
}
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class ScrollInsensitiveResultSet method positionInLastFetchedRow.
/**
* Positions the cursor in the last fetched row. This is done before
* navigating to a row that has not previously been fetched, so that
* getNextRowCore() will re-start from where it stopped.
*/
private void positionInLastFetchedRow() throws StandardException {
if (positionInSource > 0) {
positionInHashTable.setValue(positionInSource);
DataValueDescriptor[] hashRowArray = getCurrentRowFromHashtable();
RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
((NoPutResultSet) target).positionScanAtRowLocation(rowLoc);
currentPosition = positionInSource;
}
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class ScrollInsensitiveResultSet method markRowAsDeleted.
/**
* @see NoPutResultSet#markRowAsDeleted
*
* Sets the deleted column of the hash table to true in the current row.
*/
public void markRowAsDeleted() throws StandardException {
positionInHashTable.setValue(currentPosition);
DataValueDescriptor[] hashRowArray = getCurrentRowFromHashtable();
RowLocation rowLoc = (RowLocation) hashRowArray[POS_ROWLOCATION];
ht.remove(new SQLInteger(currentPosition));
((SQLBoolean) hashRowArray[POS_ROWDELETED]).setValue(true);
// Set all columns to NULL, the row is now a placeholder
for (int i = extraColumns; i < hashRowArray.length; i++) {
hashRowArray[i].setToNull();
}
ht.putRow(true, hashRowArray, null);
}
Aggregations