use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class DataDictionaryImpl method getConstraints.
/**
* Return an List which of the relevant column matching
* the indexed criteria. If nothing matches, returns an
* empty List (never returns null).
*
* @param uuid The id of the constraint
* @param indexId The index id in SYS.SYSCONSTRAINTS
* @param columnNum The column to retrieve
*
* @return a list of UUIDs in an List.
*
* @exception StandardException Thrown on error
*/
public List<UUID> getConstraints(UUID uuid, int indexId, int columnNum) throws StandardException {
ExecIndexRow indexRow1;
ExecRow outRow;
RowLocation baseRowLocation;
ConglomerateController heapCC = null;
ScanController scanController = null;
TransactionController tc;
TabInfoImpl ti = getNonCoreTI(SYSCONSTRAINTS_CATALOG_NUM);
SYSCONSTRAINTSRowFactory rf = (SYSCONSTRAINTSRowFactory) ti.getCatalogRowFactory();
TableDescriptor td = null;
List<UUID> slist = new ArrayList<UUID>();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(indexId == SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_INDEX1_ID || indexId == SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_INDEX3_ID, "bad index id, must be one of the indexes on a uuid");
SanityManager.ASSERT(columnNum > 0 && columnNum <= SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_COLUMN_COUNT, "invalid column number for column to be retrieved");
}
try {
/* Use tableIDOrderable in both start and stop positions for scan */
DataValueDescriptor orderable = getIDValueAsCHAR(uuid);
/* Set up the start/stop position for the scan */
ExecIndexRow keyRow = (ExecIndexRow) exFactory.getIndexableRow(1);
keyRow.setColumn(1, orderable);
// Get the current transaction controller
tc = getTransactionCompile();
outRow = rf.makeEmptyRow();
heapCC = tc.openConglomerate(ti.getHeapConglomerate(), false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ);
// create an index row template
indexRow1 = getIndexRowFromHeapRow(ti.getIndexRowGenerator(indexId), heapCC.newRowLocationTemplate(), outRow);
// just interested in one column
DataValueDescriptor[] rowTemplate = new DataValueDescriptor[SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_COLUMN_COUNT];
FormatableBitSet columnToGetSet = new FormatableBitSet(SYSCONSTRAINTSRowFactory.SYSCONSTRAINTS_COLUMN_COUNT);
columnToGetSet.set(columnNum - 1);
rowTemplate[columnNum - 1] = new SQLChar();
// Scan the index and go to the data pages for qualifying rows
scanController = tc.openScan(// conglomerate to open
ti.getIndexConglomerate(indexId), // don't hold open across commit
false, // for read
0, TransactionController.MODE_RECORD, // RESOLVE: should be level 2
TransactionController.ISOLATION_REPEATABLE_READ, // all fields as objects
(FormatableBitSet) null, // start position - exact key match.
keyRow.getRowArray(), // startSearchOperation
ScanController.GE, // scanQualifier (none)
null, // stop position - exact key match.
keyRow.getRowArray(), // stopSearchOperation
ScanController.GT);
while (scanController.fetchNext(indexRow1.getRowArray())) {
baseRowLocation = (RowLocation) indexRow1.getColumn(indexRow1.nColumns());
// get the row and grab the uuid
boolean base_row_exists = heapCC.fetch(baseRowLocation, rowTemplate, columnToGetSet);
if (SanityManager.DEBUG) {
// it can not be possible for heap row to disappear while
// holding scan cursor on index at ISOLATION_REPEATABLE_READ.
SanityManager.ASSERT(base_row_exists, "base row not found");
}
slist.add(uuidFactory.recreateUUID((String) ((DataValueDescriptor) rowTemplate[columnNum - 1]).getObject()));
}
} finally {
if (heapCC != null) {
heapCC.close();
}
if (scanController != null) {
scanController.close();
}
}
return slist;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class BTreeController method doIns.
/**
* Insert a row into the conglomerate.
*
* @param rowToInsert The row to insert into the conglomerate. The stored
* representations of the row's columns are copied into a new row
* somewhere in the conglomerate.
*
* @return Returns 0 if insert succeeded. Returns
* ConglomerateController.ROWISDUPLICATE if conglomerate supports uniqueness
* checks and has been created to disallow duplicates, and the row inserted
* had key columns which were duplicate of a row already in the table. Other
* insert failures will raise StandardException's.
*
* @exception StandardException Standard exception policy.
*/
private int doIns(DataValueDescriptor[] rowToInsert) throws StandardException {
LeafControlRow targetleaf = null;
LeafControlRow save_targetleaf = null;
int insert_slot = 0;
int result_slot = 0;
int ret_val = 0;
boolean reclaim_deleted_rows_attempted = false;
if (scratch_template == null) {
scratch_template = runtime_mem.get_template(getRawTran());
}
if (SanityManager.DEBUG)
this.isIndexableRowConsistent(rowToInsert);
// Create the objects needed for the insert.
// RESOLVE (mikem) - should we cache this in the controller?
SearchParameters sp = new SearchParameters(rowToInsert, SearchParameters.POSITION_LEFT_OF_PARTIAL_KEY_MATCH, scratch_template, this, false);
// RowLocation column is in last column of template.
FetchDescriptor lock_fetch_desc = RowUtil.getFetchDescriptorConstant(scratch_template.length - 1);
RowLocation lock_row_loc = (RowLocation) scratch_template[scratch_template.length - 1];
if (get_insert_row_lock) {
// I don't hold any latch yet so I can wait on this lock, so I
// don't care about return value from this call. This
// lock can only wait if the base table row was inserted in a
// separate transaction which never happens in sql tables, but
// does happen in the sparse indexes that synchronization builds.
this.getLockingPolicy().lockNonScanRow(this.getConglomerate(), (LeafControlRow) null, (LeafControlRow) null, rowToInsert, (ConglomerateController.LOCK_INS | ConglomerateController.LOCK_UPD));
}
while (true) {
// Search the location at which the new row should be inserted.
if (SanityManager.DEBUG)
SanityManager.ASSERT(this.container != null);
targetleaf = (LeafControlRow) ControlRow.get(this, BTree.ROOTPAGEID).search(sp);
// Row locking - first lock row previous to row being inserted:
// o if (sp.resultExact) then the row must be deleted and
// we will be replacing it with the new row, lock
// the row before the slot as the previous key.
// o else
// we will be inserting after the current slot so
// lock the current slot as the previous key.
//
int slot_after_previous = (sp.resultExact ? sp.resultSlot : sp.resultSlot + 1);
boolean latch_released = false;
latch_released = !this.getLockingPolicy().lockNonScanPreviousRow(targetleaf, slot_after_previous, lock_fetch_desc, scratch_template, lock_row_loc, this, (ConglomerateController.LOCK_INS_PREVKEY | ConglomerateController.LOCK_UPD), TransactionManager.LOCK_INSTANT_DURATION);
// special test to see if latch release code works
if (SanityManager.DEBUG) {
latch_released = test_errors(this, "BTreeController_doIns", null, this.getLockingPolicy(), targetleaf, latch_released);
}
if (latch_released) {
// Had to release latch in order to get the lock, probably
// because of a forward scanner, research tree, and try again.
targetleaf = null;
continue;
}
// deleted row).
if (sp.resultExact) {
result_slot = insert_slot = sp.resultSlot;
if (this.getConglomerate().nKeyFields != this.getConglomerate().nUniqueColumns) {
// The key fields match, but not the row location. We
// must wait on the lock on the other row location before
// preceding, so as to serialize behind any work being done
// to the row as part of another transaction.
latch_released = !this.getLockingPolicy().lockNonScanRowOnPage(targetleaf, insert_slot, lock_fetch_desc, scratch_template, lock_row_loc, ConglomerateController.LOCK_UPD);
if (latch_released) {
// Had to release latch in order to get the lock,
// probably to wait for deleting xact to commit or
// abort. Research tree, and try again.
targetleaf = null;
continue;
}
}
if (!(targetleaf.page.isDeletedAtSlot(insert_slot))) {
// attempt to insert a duplicate into the index.
ret_val = ConglomerateController.ROWISDUPLICATE;
break;
} else {
if (this.getConglomerate().nKeyFields == this.getConglomerate().nUniqueColumns) {
// The row that we found deleted is exactly the new row.
targetleaf.page.deleteAtSlot(insert_slot, false, this.btree_undo);
break;
} else if (this.getConglomerate().nUniqueColumns == (this.getConglomerate().nKeyFields - 1)) {
// The row that we found deleted has matching keys
// which form the unique key fields,
// but the nonkey fields may differ (for now the
// heap rowlocation is the only nonkey field
// allowed).
// RESOLVE BT39 (mikem) - when/if heap row location
// is not fixed we must handle update failing for
// out of space and split if it does. For now
// if the update fails because of lack of space
// an exception is thrown and the statement is
// backed out. Should not happen very often.
targetleaf.page.deleteAtSlot(insert_slot, false, this.btree_undo);
boolean update_succeeded = true;
try {
if (runtime_mem.hasCollatedTypes()) {
// See DERBY-5367.
// There are types in the BTree with a
// collation different than UCS BASIC, we
// update all fields to make sure they hold
// the correct values.
// NOTE: We could optimize here by only
// updating the fields that actually hold
// collated types.
int rowsToUpdate = getConglomerate().nKeyFields;
for (int i = 0; i < rowsToUpdate; i++) {
targetleaf.page.updateFieldAtSlot(insert_slot, i, (DataValueDescriptor) RowUtil.getColumn(rowToInsert, (FormatableBitSet) null, i), this.btree_undo);
}
} else {
// There are no collated types in the BTree,
// which means that the values currently
// stored in the undeleted row are correct.
// We simply update the row location to point
// to the correct row in the heap.
int rowloc_index = this.getConglomerate().nKeyFields - 1;
targetleaf.page.updateFieldAtSlot(insert_slot, rowloc_index, (DataValueDescriptor) RowUtil.getColumn(rowToInsert, (FormatableBitSet) null, rowloc_index), this.btree_undo);
}
} catch (StandardException se) {
// check if the exception is for out of space
if (!se.getMessageId().equals(SQLState.DATA_NO_SPACE_FOR_RECORD)) {
throw se;
}
// The statement exception is
// because the update failed for out of
// space (ie. the field got longer and there
// is no room on the page for the expanded
// field). Address this error by falling
// through the code and doing a split.
// update failed.
update_succeeded = false;
targetleaf.page.deleteAtSlot(insert_slot, true, this.btree_undo);
}
if (update_succeeded)
break;
} else {
// Can only happen with non key fields in the btree.
throw (StandardException.newException(SQLState.BTREE_UNIMPLEMENTED_FEATURE));
}
}
} else if (targetleaf.page.recordCount() - 1 < BTree.maxRowsPerPage) {
// The row wasn't there, so try to insert it
// on the page returned by the search.
insert_slot = sp.resultSlot + 1;
result_slot = insert_slot + 1;
if (getConglomerate().isUniqueWithDuplicateNulls()) {
int ret = compareLeftAndRightSiblings(rowToInsert, insert_slot, targetleaf);
if (ret == MATCH_FOUND) {
ret_val = ConglomerateController.ROWISDUPLICATE;
break;
}
if (ret == RESCAN_REQUIRED)
continue;
}
if (targetleaf.page.insertAtSlot(insert_slot, rowToInsert, (FormatableBitSet) null, this.btree_undo, Page.INSERT_DEFAULT, AccessFactoryGlobals.BTREE_OVERFLOW_THRESHOLD) != null) {
break;
}
if (targetleaf.page.recordCount() <= 2) {
throw StandardException.newException(SQLState.BTREE_NO_SPACE_FOR_KEY);
}
// start splitting ...
}
if (getConglomerate().isUniqueWithDuplicateNulls()) {
int ret = compareLeftAndRightSiblings(rowToInsert, insert_slot, targetleaf);
if (ret == MATCH_FOUND) {
ret_val = ConglomerateController.ROWISDUPLICATE;
break;
}
if (ret == RESCAN_REQUIRED)
continue;
}
// Create some space by splitting pages.
// determine where in page/table row causing split would go
int flag = 0;
if (insert_slot == 1) {
flag |= ControlRow.SPLIT_FLAG_FIRST_ON_PAGE;
if (targetleaf.isLeftmostLeaf())
flag |= ControlRow.SPLIT_FLAG_FIRST_IN_TABLE;
} else if (insert_slot == targetleaf.page.recordCount()) {
flag |= ControlRow.SPLIT_FLAG_LAST_ON_PAGE;
if (targetleaf.isRightmostLeaf())
flag |= ControlRow.SPLIT_FLAG_LAST_IN_TABLE;
}
long targetleaf_pageno = targetleaf.page.getPageNumber();
if ((targetleaf.page.recordCount() - targetleaf.page.nonDeletedRecordCount()) <= 0) {
// Don't do reclaim work if there are no deleted records.
reclaim_deleted_rows_attempted = true;
}
BranchRow branchrow = BranchRow.createBranchRowFromOldLeafRow(rowToInsert, targetleaf_pageno);
// Release the target page because (a) it may change as a
// result of the split, (b) the latch ordering requires us
// to acquire latches from top to bottom, and (c) this
// loop should be done in a system transaction.
targetleaf.release();
targetleaf = null;
start_xact_and_dosplit(!reclaim_deleted_rows_attempted, targetleaf_pageno, scratch_template, branchrow.getRow(), flag);
// only attempt to reclaim deleted rows once, otherwise the
// split loop could loop forever, trying to reclaim a deleted
// row that was not committed.
reclaim_deleted_rows_attempted = true;
// RESOLVE (mikem) possible optimization could be to save
// split location and look there first, if this has
// already caused a split. Or even return a latched page
// from splitFor(). For now just execute the loop again
// searching the tree for somewhere to put the row.
}
// set in-memory hint of where last row on page was inserted.
targetleaf.last_search_result = result_slot;
// Check that page just updated is consistent.
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON("enableBtreeConsistencyCheck")) {
targetleaf.checkConsistency(this, null, true);
}
}
// Done with the target page.
targetleaf.release();
targetleaf = null;
// return the status about insert - 0 is ok, or duplicate status.
return (ret_val);
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class BTreeController method compareRowsForInsert.
/**
* Compares two rows for insert. If the two rows are not equal,
* {@link #NO_MATCH} is returned. Otherwise, it tries to get a lock on
* the row in the tree. If the lock is obtained without waiting,
* {@link #MATCH_FOUND} is returned (even if the row has been deleted).
* Otherwise, {@link #RESCAN_REQUIRED} is returned to indicate that the
* latches have been released and the B-tree must be rescanned.
*
* If {@code MATCH_FOUND} is returned, the caller should check whether
* the row has been deleted. If so, it may have to move to check the
* adjacent rows to be sure that there is no non-deleted duplicate row.
*
* If {@code MATCH_FOUND} or {@code RESCAN_REQUIRED} is returned, the
* transaction will hold an update lock on the specified record when
* the method returns.
*
* <b>Note!</b> This method should only be called when the index is almost
* unique (that is, a non-unique index backing a unique constraint).
*
* @param originalRow row from the tree
* @param newRow row to be inserted
* @param leaf leaf where originalRow resides
* @param slot slot where originalRow
* @return {@code NO_MATCH} if no duplicate is found,
* {@code MATCH_FOUND} if a duplicate is found, or
* {@code RESCAN_REQUIRED} if the B-tree must be rescanned
*/
private int compareRowsForInsert(DataValueDescriptor[] originalRow, DataValueDescriptor[] newRow, LeafControlRow leaf, int slot) throws StandardException {
for (int i = 0; i < originalRow.length - 1; i++) {
if (!originalRow[i].equals(newRow[i]))
return NO_MATCH;
}
// It might be a deleted record try getting a lock on it
DataValueDescriptor[] template = runtime_mem.get_template(getRawTran());
FetchDescriptor lock_fetch_desc = RowUtil.getFetchDescriptorConstant(template.length - 1);
RowLocation lock_row_loc = (RowLocation) scratch_template[scratch_template.length - 1];
boolean latch_released = !getLockingPolicy().lockNonScanRowOnPage(leaf, slot, lock_fetch_desc, template, lock_row_loc, ConglomerateController.LOCK_UPD);
// record and might have changed the tree by now
if (latch_released)
return RESCAN_REQUIRED;
return MATCH_FOUND;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class B2IController method insert.
/**
* Insert a row into the conglomerate.
* @see ConglomerateController#insert
*
* @exception StandardException Standard exception policy.
*/
public int insert(DataValueDescriptor[] row) throws StandardException {
if (SanityManager.DEBUG) {
if (this.container != null) {
SanityManager.ASSERT(this.getConglomerate() instanceof B2I);
RowLocation rowloc = (RowLocation) row[((B2I) (this.getConglomerate())).rowLocationColumn];
SanityManager.ASSERT(!rowloc.isNull(), "RowLocation value is null");
}
}
return (super.insert(row));
}
Aggregations