use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class InsertResultSet method open.
/**
* @exception StandardException Standard Derby error policy
*/
public void open() throws StandardException {
setup();
// Remember if this is the 1st execution
firstExecute = (rowChanger == null);
autoincrementGenerated = false;
dd = lcc.getDataDictionary();
verifyAutoGeneratedRScolumnsList(constants.targetUUID);
rowCount = 0L;
if (numOpens++ == 0) {
sourceResultSet.openCore();
} else {
sourceResultSet.reopenCore();
}
/* If the user specified bulkInsert (or replace) then we need
* to get an exclusive table lock on the table. If it is a
* regular bulk insert then we need to check to see if the
* table is empty. (If not empty, then we end up doing a row
* at a time insert.)
*/
if (userSpecifiedBulkInsert) {
if (!bulkInsertReplace) {
bulkInsert = verifyBulkInsert();
} else {
getExclusiveTableLock();
}
}
if (bulkInsert) {
// Notify the source that we are the target
sourceResultSet.setTargetResultSet(this);
ExecRow fullTemplate = ((ExecRowBuilder) activation.getPreparedStatement().getSavedObject(fullTemplateId)).build(activation.getExecutionFactory());
bulkInsertCore(lcc, fullTemplate, heapConglom);
if (triggerInfo != null) {
if (SanityManager.DEBUG) {
// If we have triggers, we do not use bulkInsert
SanityManager.NOTREACHED();
}
}
bulkValidateForeignKeys(tc, lcc.getContextManager(), fullTemplate);
bulkInsertPerformed = true;
} else {
row = getNextRowCore(sourceResultSet);
normalInsertCore(lcc, firstExecute);
}
/* Cache query plan text for source, before it gets blown away */
if (lcc.getRunTimeStatisticsMode()) {
/* savedSource nulled after run time statistics generation */
savedSource = sourceResultSet;
}
cleanUp();
saveAIcacheInformation(constants.getSchemaName(), constants.getTableName(), constants.getColumnNames());
endTime = getCurrentTimeMillis();
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class InsertResultSet method normalInsertCore.
// Do the work for a "normal" insert
private void normalInsertCore(LanguageConnectionContext lcc, boolean firstExecute) throws StandardException {
boolean setUserIdentity = constants.hasAutoincrement() && isSingleRowResultSet();
ExecRow deferredRowBuffer;
long user_autoinc = 0;
/* Get or re-use the row changer.
*/
if (firstExecute) {
rowChanger = lcc.getLanguageConnectionFactory().getExecutionFactory().getRowChanger(heapConglom, constants.heapSCOCI, heapDCOCI, constants.irgs, constants.indexCIDS, constants.indexSCOCIs, indexDCOCIs, // number of columns in partial row meaningless for insert
0, tc, // Changed column ids
null, constants.getStreamStorableHeapColIds(), activation);
rowChanger.setIndexNames(constants.indexNames);
}
/* decode lock mode for the execution isolation level */
int lockMode = decodeLockMode(constants.lockMode);
rowChanger.open(lockMode);
/* The source does not know whether or not we are doing a
* deferred mode insert. If we are, then we must clear the
* index scan info from the activation so that the row changer
* does not re-use that information (which won't be valid for
* a deferred mode insert).
*/
if (constants.deferred) {
activation.clearIndexScanInfo();
}
if (fkInfoArray != null) {
if (fkChecker == null) {
fkChecker = new RISetChecker(lcc, tc, fkInfoArray);
} else {
fkChecker.reopen();
}
}
if (firstExecute && constants.deferred) {
Properties properties = new Properties();
// Get the properties on the old heap
rowChanger.getHeapConglomerateController().getInternalTablePropertySet(properties);
/*
** If deferred we save a copy of the entire row.
*/
rowHolder = new TemporaryRowHolderImpl(activation, properties, resultDescription);
rowChanger.setRowHolder(rowHolder);
}
firstExecuteSpecialHandlingAutoGen(firstExecute, rowChanger, constants.targetUUID);
while (row != null) {
// auto-generated key columns.
if (activation.getAutoGeneratedKeysResultsetMode() && autoGeneratedKeysColumnIndexes.length > 0) {
autoGeneratedKeysRowsHolder.insert(getCompactRow(row, autoGeneratedKeysColumnIndexes));
}
// fill in columns that are computed from expressions on other columns
evaluateGenerationClauses(generationClauses, activation, sourceResultSet, row, false);
/*
** If we're doing a deferred insert, insert into the temporary
** conglomerate. Otherwise, insert directly into the permanent
** conglomerates using the rowChanger.
*/
if (constants.deferred) {
rowHolder.insert(row);
} else {
// Immediate mode violations will throw, so we only ever
// see false here with deferred constraint mode for one or more
// of the constraints being checked.
boolean allOk = evaluateCheckConstraints();
if (fkChecker != null) {
fkChecker.doFKCheck(activation, row);
}
// Objectify any streaming columns that are indexed.
if (constants.irgs.length > 0) {
DataValueDescriptor[] rowArray = row.getRowArray();
for (int i = 0; i < rowArray.length; i++) {
// System.out.println("checking " + i);
if (!constants.indexedCols[i]) {
continue;
}
if (rowArray[i] instanceof StreamStorable)
rowArray[i].getObject();
}
}
if (allOk) {
rowChanger.insertRow(row, false);
} else {
RowLocation offendingRow = rowChanger.insertRow(row, true);
deferredChecks = DeferredConstraintsMemory.rememberCheckViolations(lcc, constants.targetUUID, schemaName, tableName, deferredChecks, violatingCheckConstraints, offendingRow, new CheckInfo[1]);
}
}
rowCount++;
if (setUserIdentity) {
dd = lcc.getDataDictionary();
td = dd.getTableDescriptor(constants.targetUUID);
int maxColumns = td.getMaxColumnID();
int col;
for (col = 1; col <= maxColumns; col++) {
ColumnDescriptor cd = td.getColumnDescriptor(col);
if (cd.isAutoincrement()) {
break;
}
}
if (col <= maxColumns) {
DataValueDescriptor dvd = row.cloneColumn(col);
user_autoinc = dvd.getLong();
}
}
// No need to do a next on a single row source
if (constants.singleRowSource) {
row = null;
} else {
row = getNextRowCore(sourceResultSet);
}
}
/*
** If it's a deferred insert, scan the temporary conglomerate and
** insert the rows into the permanent conglomerates using rowChanger.
*/
if (constants.deferred) {
if (triggerInfo != null) {
Vector<AutoincrementCounter> v = null;
if (aiCache != null) {
v = new Vector<AutoincrementCounter>();
for (int i = 0; i < aiCache.length; i++) {
String s, t, c;
if (aiCache[i] == null)
continue;
Long initialValue = lcc.lastAutoincrementValue((s = constants.getSchemaName()), (t = constants.getTableName()), (c = constants.getColumnName(i)));
AutoincrementCounter aic = new AutoincrementCounter(initialValue, constants.getAutoincIncrement(i), aiCache[i].getLong(), s, t, c, i + 1);
v.addElement(aic);
}
}
if (triggerActivator == null) {
triggerActivator = new TriggerEventActivator(lcc, constants.targetUUID, triggerInfo, TriggerExecutionContext.INSERT_EVENT, activation, v);
} else {
triggerActivator.reopen();
}
// fire BEFORE trigger, do this before checking constraints
triggerActivator.notifyEvent(TriggerEvents.BEFORE_INSERT, (CursorResultSet) null, rowHolder.getResultSet(), (int[]) null);
}
CursorResultSet rs = rowHolder.getResultSet();
try {
rs.open();
while ((deferredRowBuffer = rs.getNextRow()) != null) {
// we have to set the source row so the check constraint
// sees the correct row.
sourceResultSet.setCurrentRow(deferredRowBuffer);
boolean allOk = evaluateCheckConstraints();
if (allOk) {
rowChanger.insertRow(deferredRowBuffer, false);
} else {
RowLocation offendingRow = rowChanger.insertRow(deferredRowBuffer, true);
deferredChecks = DeferredConstraintsMemory.rememberCheckViolations(lcc, constants.targetUUID, schemaName, tableName, deferredChecks, violatingCheckConstraints, offendingRow, new CheckInfo[1]);
}
}
} finally {
sourceResultSet.clearCurrentRow();
rs.close();
}
if (fkChecker != null) {
/*
** Second scan to make sure all the foreign key
** constraints are ok. We have to do this after
** we have completed the inserts in case of self
** referencing constraints.
*/
rs = rowHolder.getResultSet();
try {
rs.open();
while ((deferredRowBuffer = rs.getNextRow()) != null) {
fkChecker.doFKCheck(activation, deferredRowBuffer);
}
} finally {
rs.close();
}
}
// fire AFTER trigger
if (triggerActivator != null) {
triggerActivator.notifyEvent(TriggerEvents.AFTER_INSERT, (CursorResultSet) null, rowHolder.getResultSet(), (int[]) null);
}
}
if (rowHolder != null) {
rowHolder.close();
// rowHolder kept across opens
}
if (fkChecker != null) {
fkChecker.close();
fkChecker = null;
}
if (setIdentity)
lcc.setIdentityValue(identityVal);
else /*
* find the value of the identity column from the user inserted value
* and do a lcc.setIdentityValue(<user_value>);
*/
if (setUserIdentity) {
lcc.setIdentityValue(user_autoinc);
}
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class InsertResultSet method makeIndexTemplate.
/**
* Make a template row with the correct columns.
*/
private ExecRow makeIndexTemplate(FKInfo fkInfo, ExecRow fullTemplate, ContextManager cm) throws StandardException {
ExecRow newRow = RowUtil.getEmptyIndexRow(fkInfo.colArray.length + 1, lcc);
DataValueDescriptor[] templateColArray = fullTemplate.getRowArray();
DataValueDescriptor[] newRowColArray = newRow.getRowArray();
int i;
for (i = 0; i < fkInfo.colArray.length; i++) {
newRowColArray[i] = templateColArray[fkInfo.colArray[i] - 1].cloneValue(false);
}
newRowColArray[i] = fkInfo.rowLocation.cloneValue(false);
return newRow;
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class InsertResultSet method emptyIndexes.
/**
* Empty the indexes after doing a bulk insert replace
* where the table has 0 rows after the replace.
* RESOLVE: This method is ugly! Prior to 2.0, we simply
* scanned back across the table to build the indexes. We
* changed this in 2.0 to populate the sorters via a call back
* as we populated the table. Doing a 0 row replace into a
* table with indexes is a degenerate case, hence we allow
* ugly and unoptimized code.
*
* @exception StandardException Thrown on failure
*/
private void emptyIndexes(long newHeapConglom, InsertConstantAction constants, TableDescriptor td, DataDictionary dd, ExecRow fullTemplate) throws StandardException {
int numIndexes = constants.irgs.length;
ExecIndexRow[] idxRows = new ExecIndexRow[numIndexes];
ExecRow baseRows;
ColumnOrdering[][] order = new ColumnOrdering[numIndexes][];
int numColumns = td.getNumberOfColumns();
collation = new int[numIndexes][];
// Create the BitSet for mapping the partial row to the full row
FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
// Need to check each index for referenced columns
int numReferencedColumns = 0;
for (int index = 0; index < numIndexes; index++) {
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
for (int bcp = 0; bcp < baseColumnPositions.length; bcp++) {
if (!bitSet.get(baseColumnPositions[bcp])) {
bitSet.set(baseColumnPositions[bcp]);
numReferencedColumns++;
}
}
}
// We can finally create the partial base row
baseRows = activation.getExecutionFactory().getValueRow(numReferencedColumns);
// Fill in each base row with nulls of the correct data type
int colNumber = 0;
for (int index = 0; index < numColumns; index++) {
if (bitSet.get(index + 1)) {
colNumber++;
// NOTE: 1-based column numbers
baseRows.setColumn(colNumber, fullTemplate.getColumn(index + 1).cloneValue(false));
}
}
needToDropSort = new boolean[numIndexes];
sortIds = new long[numIndexes];
/* Do the initial set up before scanning the heap.
* For each index, build a single index row and a sorter.
*/
for (int index = 0; index < numIndexes; index++) {
// create a single index row template for each index
idxRows[index] = constants.irgs[index].getIndexRowTemplate();
// Get an index row based on the base row
// (This call is only necessary here because we need to pass a
// template to the sorter.)
constants.irgs[index].getIndexRow(baseRows, rl, idxRows[index], bitSet);
/* For non-unique indexes, we order by all columns + the RID.
* For unique indexes, we just order by the columns.
* We create a unique index observer for unique indexes
* so that we can catch duplicate key
*/
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
int[] baseColumnPositions = constants.irgs[index].baseColumnPositions();
boolean[] isAscending = constants.irgs[index].isAscending();
int numColumnOrderings;
SortObserver sortObserver;
final IndexRowGenerator indDes = cd.getIndexDescriptor();
if (indDes.isUnique() || indDes.isUniqueDeferrable()) {
numColumnOrderings = indDes.isUnique() ? baseColumnPositions.length : baseColumnPositions.length + 1;
String indexOrConstraintName = cd.getConglomerateName();
boolean deferred = false;
boolean uniqueDeferrable = false;
UUID uniqueDeferrableConstraintId = null;
if (cd.isConstraint()) {
// so, the index is backing up a constraint
ConstraintDescriptor conDesc = dd.getConstraintDescriptor(td, cd.getUUID());
indexOrConstraintName = conDesc.getConstraintName();
deferred = lcc.isEffectivelyDeferred(lcc.getCurrentSQLSessionContext(activation), conDesc.getUUID());
uniqueDeferrable = conDesc.deferrable();
uniqueDeferrableConstraintId = conDesc.getUUID();
}
sortObserver = new UniqueIndexSortObserver(lcc, uniqueDeferrableConstraintId, // don't clone rows
false, uniqueDeferrable, deferred, indexOrConstraintName, idxRows[index], true, td.getName());
} else {
numColumnOrderings = baseColumnPositions.length + 1;
sortObserver = new BasicSortObserver(false, false, idxRows[index], true);
}
order[index] = new ColumnOrdering[numColumnOrderings];
for (int ii = 0; ii < isAscending.length; ii++) {
order[index][ii] = new IndexColumnOrder(ii, isAscending[ii]);
}
if (numColumnOrderings > isAscending.length) {
order[index][isAscending.length] = new IndexColumnOrder(isAscending.length);
}
// create the sorters
sortIds[index] = tc.createSort((Properties) null, idxRows[index].getRowArrayClone(), order[index], sortObserver, // not in order
false, // est rows
rowCount, // est row size, -1 means no idea
-1);
needToDropSort[index] = true;
}
// Populate sorters and get the output of each sorter into a row
// source. The sorters have the indexed columns only and the columns
// are in the correct order.
rowSources = new RowLocationRetRowSource[numIndexes];
// Fill in the RowSources
SortController[] sorter = new SortController[numIndexes];
for (int index = 0; index < numIndexes; index++) {
sorter[index] = tc.openSort(sortIds[index]);
sorter[index].completedInserts();
rowSources[index] = tc.openSortRowSource(sortIds[index]);
}
long[] newIndexCongloms = new long[numIndexes];
// Populate each index
for (int index = 0; index < numIndexes; index++) {
ConglomerateController indexCC;
Properties properties = new Properties();
ConglomerateDescriptor cd;
// Get the ConglomerateDescriptor for the index
cd = td.getConglomerateDescriptor(constants.indexCIDS[index]);
// Build the properties list for the new conglomerate
indexCC = tc.openCompiledConglomerate(false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, constants.indexSCOCIs[index], indexDCOCIs[index]);
// Get the properties on the old index
indexCC.getInternalTablePropertySet(properties);
/* Create the properties that language supplies when creating the
* the index. (The store doesn't preserve these.)
*/
int indexRowLength = idxRows[index].nColumns();
properties.put("baseConglomerateId", Long.toString(newHeapConglom));
if (cd.getIndexDescriptor().isUnique()) {
properties.put("nUniqueColumns", Integer.toString(indexRowLength - 1));
} else {
properties.put("nUniqueColumns", Integer.toString(indexRowLength));
}
if (cd.getIndexDescriptor().isUniqueWithDuplicateNulls() && !cd.getIndexDescriptor().hasDeferrableChecking()) {
properties.put("uniqueWithDuplicateNulls", Boolean.toString(true));
}
properties.put("rowLocationColumn", Integer.toString(indexRowLength - 1));
properties.put("nKeyFields", Integer.toString(indexRowLength));
indexCC.close();
collation[index] = constants.irgs[index].getColumnCollationIds(td.getColumnDescriptorList());
// We can finally drain the sorter and rebuild the index
// Populate the index.
newIndexCongloms[index] = tc.createAndLoadConglomerate("BTREE", idxRows[index].getRowArray(), // default column sort order
null, collation[index], properties, TransactionController.IS_DEFAULT, rowSources[index], (long[]) null);
/* Update the DataDictionary
*
* Update sys.sysconglomerates with new conglomerate #, if the
* conglomerate is shared by duplicate indexes, all the descriptors
* for those indexes need to be updated with the new number.
*/
dd.updateConglomerateDescriptor(td.getConglomerateDescriptors(constants.indexCIDS[index]), newIndexCongloms[index], tc);
// Drop the old conglomerate
tc.dropConglomerate(constants.indexCIDS[index]);
}
}
use of org.apache.derby.iapi.sql.execute.ExecRow in project derby by apache.
the class MatchingClauseConstantAction method bufferThenRow.
/**
* <p>
* Construct and buffer a row for the INSERT/UPDATE/DELETE
* action corresponding to this [ NOT ] MATCHED clause. The buffered row
* is built from columns in the passed-in row. The passed-in row is the SELECT list
* of the MERGE statement's driving left join.
* </p>
*/
TemporaryRowHolderImpl bufferThenRow(Activation activation, TemporaryRowHolderImpl thenRows, ExecRow selectRow) throws StandardException {
if (thenRows == null) {
thenRows = createThenRows(activation);
}
ExecRow thenRow = bufferThenRow(activation);
thenRows.insert(thenRow);
return thenRows;
}
Aggregations