use of org.apache.derby.iapi.store.raw.FetchDescriptor in project derby by apache.
the class BTreeScan method savePositionAndReleasePage.
/**
* Save the current scan position by key and release the latch on the leaf
* that's being scanned. This method should be called if the latch on a
* leaf needs to be released in the middle of the scan. The scan can
* later reposition to the saved position by calling {@code reposition()}.
*
* @param partialKey known parts of the key that should be saved, or
* {@code null} if the entire key is unknown and will have to be fetched
* from the page
* @param vcols an array which tells which columns of the partial key are
* valid (key columns that have 0 in this array are not valid, and their
* values must be fetched from the page), or {@code null} if all the
* columns are valid
* @throws StandardException if an error occurs while saving the position
* @see #reposition(BTreeRowPosition, boolean)
*/
void savePositionAndReleasePage(DataValueDescriptor[] partialKey, int[] vcols) throws StandardException {
final Page page = scan_position.current_leaf.getPage();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(page.isLatched(), "Page is not latched");
SanityManager.ASSERT(scan_position.current_positionKey == null, "Scan position already saved");
if (partialKey == null) {
SanityManager.ASSERT(vcols == null);
}
if (vcols != null) {
SanityManager.ASSERT(partialKey != null);
SanityManager.ASSERT(vcols.length <= partialKey.length);
}
}
try {
DataValueDescriptor[] fullKey = scan_position.getKeyTemplate();
FetchDescriptor fetchDescriptor = null;
boolean haveAllColumns = false;
if (partialKey != null) {
int copiedCols = 0;
final int partialKeyLength = (vcols == null) ? partialKey.length : vcols.length;
for (int i = 0; i < partialKeyLength; i++) {
if (vcols == null || vcols[i] != 0) {
fullKey[i].setValue(partialKey[i]);
copiedCols++;
}
}
if (copiedCols < fullKey.length) {
fetchDescriptor = scan_position.getFetchDescriptorForSaveKey(vcols, fullKey.length);
} else {
haveAllColumns = true;
}
}
if (!haveAllColumns) {
RecordHandle rh = page.fetchFromSlot((RecordHandle) null, scan_position.current_slot, fullKey, fetchDescriptor, true);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(rh != null, "Row not found");
}
}
scan_position.current_positionKey = fullKey;
// Don't null out current_rh, we might be able to use it later if
// no rows are moved off the page.
// scan_position.current_rh = null;
scan_position.versionWhenSaved = page.getPageVersion();
scan_position.current_slot = Page.INVALID_SLOT_NUMBER;
} finally {
scan_position.current_leaf.release();
scan_position.current_leaf = null;
}
}
use of org.apache.derby.iapi.store.raw.FetchDescriptor in project derby by apache.
the class BTreeScan method init.
/*
** Public Methods of BTreeScan
*/
/**
* Initialize the scan for use.
* <p>
* Any changes to this method may have to be reflected in close as well.
* <p>
* The btree init opens the container (super.init), and stores away the
* state of the qualifiers. The actual searching for the first position
* is delayed until the first next() call.
*
* @exception StandardException Standard exception policy.
*/
public void init(TransactionManager xact_manager, Transaction rawtran, boolean hold, int open_mode, int lock_level, BTreeLockingPolicy btree_locking_policy, FormatableBitSet scanColumnList, DataValueDescriptor[] startKeyValue, int startSearchOperator, Qualifier[][] qualifier, DataValueDescriptor[] stopKeyValue, int stopSearchOperator, BTree conglomerate, LogicalUndo undo, StaticCompiledOpenConglomInfo static_info, DynamicCompiledOpenConglomInfo dynamic_info) throws StandardException {
super.init(xact_manager, xact_manager, (ContainerHandle) null, rawtran, hold, open_mode, lock_level, btree_locking_policy, conglomerate, undo, dynamic_info);
this.init_rawtran = rawtran;
this.init_forUpdate = ((open_mode & ContainerHandle.MODE_FORUPDATE) == ContainerHandle.MODE_FORUPDATE);
// Keep track of whether this scan should use update locks.
this.init_useUpdateLocks = ((open_mode & ContainerHandle.MODE_USE_UPDATE_LOCKS) != 0);
this.init_hold = hold;
this.init_template = runtime_mem.get_template(getRawTran());
this.init_scanColumnList = scanColumnList;
this.init_lock_fetch_desc = RowUtil.getFetchDescriptorConstant(init_template.length - 1);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(init_lock_fetch_desc.getMaxFetchColumnId() == (init_template.length - 1));
SanityManager.ASSERT((init_lock_fetch_desc.getValidColumnsArray())[init_template.length - 1] == 1);
}
// note that we don't process qualifiers in btree fetch's
this.init_fetchDesc = new FetchDescriptor(init_template.length, init_scanColumnList, (Qualifier[][]) null);
initScanParams(startKeyValue, startSearchOperator, qualifier, stopKeyValue, stopSearchOperator);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(TemplateRow.checkColumnTypes(getRawTran().getDataValueFactory(), this.getConglomerate().format_ids, this.getConglomerate().collation_ids, init_template));
}
// System.out.println("initializing scan:" + this);
// initialize default locking operation for the scan.
this.lock_operation = (init_forUpdate ? ConglomerateController.LOCK_UPD : ConglomerateController.LOCK_READ);
if (init_useUpdateLocks)
this.lock_operation |= ConglomerateController.LOCK_UPDATE_LOCKS;
// System.out.println("Btree scan: " + this);
}
use of org.apache.derby.iapi.store.raw.FetchDescriptor in project derby by apache.
the class BTreeController method doIns.
/**
* Insert a row into the conglomerate.
*
* @param rowToInsert The row to insert into the conglomerate. The stored
* representations of the row's columns are copied into a new row
* somewhere in the conglomerate.
*
* @return Returns 0 if insert succeeded. Returns
* ConglomerateController.ROWISDUPLICATE if conglomerate supports uniqueness
* checks and has been created to disallow duplicates, and the row inserted
* had key columns which were duplicate of a row already in the table. Other
* insert failures will raise StandardException's.
*
* @exception StandardException Standard exception policy.
*/
private int doIns(DataValueDescriptor[] rowToInsert) throws StandardException {
LeafControlRow targetleaf = null;
LeafControlRow save_targetleaf = null;
int insert_slot = 0;
int result_slot = 0;
int ret_val = 0;
boolean reclaim_deleted_rows_attempted = false;
if (scratch_template == null) {
scratch_template = runtime_mem.get_template(getRawTran());
}
if (SanityManager.DEBUG)
this.isIndexableRowConsistent(rowToInsert);
// Create the objects needed for the insert.
// RESOLVE (mikem) - should we cache this in the controller?
SearchParameters sp = new SearchParameters(rowToInsert, SearchParameters.POSITION_LEFT_OF_PARTIAL_KEY_MATCH, scratch_template, this, false);
// RowLocation column is in last column of template.
FetchDescriptor lock_fetch_desc = RowUtil.getFetchDescriptorConstant(scratch_template.length - 1);
RowLocation lock_row_loc = (RowLocation) scratch_template[scratch_template.length - 1];
if (get_insert_row_lock) {
// I don't hold any latch yet so I can wait on this lock, so I
// don't care about return value from this call. This
// lock can only wait if the base table row was inserted in a
// separate transaction which never happens in sql tables, but
// does happen in the sparse indexes that synchronization builds.
this.getLockingPolicy().lockNonScanRow(this.getConglomerate(), (LeafControlRow) null, (LeafControlRow) null, rowToInsert, (ConglomerateController.LOCK_INS | ConglomerateController.LOCK_UPD));
}
while (true) {
// Search the location at which the new row should be inserted.
if (SanityManager.DEBUG)
SanityManager.ASSERT(this.container != null);
targetleaf = (LeafControlRow) ControlRow.get(this, BTree.ROOTPAGEID).search(sp);
// Row locking - first lock row previous to row being inserted:
// o if (sp.resultExact) then the row must be deleted and
// we will be replacing it with the new row, lock
// the row before the slot as the previous key.
// o else
// we will be inserting after the current slot so
// lock the current slot as the previous key.
//
int slot_after_previous = (sp.resultExact ? sp.resultSlot : sp.resultSlot + 1);
boolean latch_released = false;
latch_released = !this.getLockingPolicy().lockNonScanPreviousRow(targetleaf, slot_after_previous, lock_fetch_desc, scratch_template, lock_row_loc, this, (ConglomerateController.LOCK_INS_PREVKEY | ConglomerateController.LOCK_UPD), TransactionManager.LOCK_INSTANT_DURATION);
// special test to see if latch release code works
if (SanityManager.DEBUG) {
latch_released = test_errors(this, "BTreeController_doIns", null, this.getLockingPolicy(), targetleaf, latch_released);
}
if (latch_released) {
// Had to release latch in order to get the lock, probably
// because of a forward scanner, research tree, and try again.
targetleaf = null;
continue;
}
// deleted row).
if (sp.resultExact) {
result_slot = insert_slot = sp.resultSlot;
if (this.getConglomerate().nKeyFields != this.getConglomerate().nUniqueColumns) {
// The key fields match, but not the row location. We
// must wait on the lock on the other row location before
// preceding, so as to serialize behind any work being done
// to the row as part of another transaction.
latch_released = !this.getLockingPolicy().lockNonScanRowOnPage(targetleaf, insert_slot, lock_fetch_desc, scratch_template, lock_row_loc, ConglomerateController.LOCK_UPD);
if (latch_released) {
// Had to release latch in order to get the lock,
// probably to wait for deleting xact to commit or
// abort. Research tree, and try again.
targetleaf = null;
continue;
}
}
if (!(targetleaf.page.isDeletedAtSlot(insert_slot))) {
// attempt to insert a duplicate into the index.
ret_val = ConglomerateController.ROWISDUPLICATE;
break;
} else {
if (this.getConglomerate().nKeyFields == this.getConglomerate().nUniqueColumns) {
// The row that we found deleted is exactly the new row.
targetleaf.page.deleteAtSlot(insert_slot, false, this.btree_undo);
break;
} else if (this.getConglomerate().nUniqueColumns == (this.getConglomerate().nKeyFields - 1)) {
// The row that we found deleted has matching keys
// which form the unique key fields,
// but the nonkey fields may differ (for now the
// heap rowlocation is the only nonkey field
// allowed).
// RESOLVE BT39 (mikem) - when/if heap row location
// is not fixed we must handle update failing for
// out of space and split if it does. For now
// if the update fails because of lack of space
// an exception is thrown and the statement is
// backed out. Should not happen very often.
targetleaf.page.deleteAtSlot(insert_slot, false, this.btree_undo);
boolean update_succeeded = true;
try {
if (runtime_mem.hasCollatedTypes()) {
// See DERBY-5367.
// There are types in the BTree with a
// collation different than UCS BASIC, we
// update all fields to make sure they hold
// the correct values.
// NOTE: We could optimize here by only
// updating the fields that actually hold
// collated types.
int rowsToUpdate = getConglomerate().nKeyFields;
for (int i = 0; i < rowsToUpdate; i++) {
targetleaf.page.updateFieldAtSlot(insert_slot, i, (DataValueDescriptor) RowUtil.getColumn(rowToInsert, (FormatableBitSet) null, i), this.btree_undo);
}
} else {
// There are no collated types in the BTree,
// which means that the values currently
// stored in the undeleted row are correct.
// We simply update the row location to point
// to the correct row in the heap.
int rowloc_index = this.getConglomerate().nKeyFields - 1;
targetleaf.page.updateFieldAtSlot(insert_slot, rowloc_index, (DataValueDescriptor) RowUtil.getColumn(rowToInsert, (FormatableBitSet) null, rowloc_index), this.btree_undo);
}
} catch (StandardException se) {
// check if the exception is for out of space
if (!se.getMessageId().equals(SQLState.DATA_NO_SPACE_FOR_RECORD)) {
throw se;
}
// The statement exception is
// because the update failed for out of
// space (ie. the field got longer and there
// is no room on the page for the expanded
// field). Address this error by falling
// through the code and doing a split.
// update failed.
update_succeeded = false;
targetleaf.page.deleteAtSlot(insert_slot, true, this.btree_undo);
}
if (update_succeeded)
break;
} else {
// Can only happen with non key fields in the btree.
throw (StandardException.newException(SQLState.BTREE_UNIMPLEMENTED_FEATURE));
}
}
} else if (targetleaf.page.recordCount() - 1 < BTree.maxRowsPerPage) {
// The row wasn't there, so try to insert it
// on the page returned by the search.
insert_slot = sp.resultSlot + 1;
result_slot = insert_slot + 1;
if (getConglomerate().isUniqueWithDuplicateNulls()) {
int ret = compareLeftAndRightSiblings(rowToInsert, insert_slot, targetleaf);
if (ret == MATCH_FOUND) {
ret_val = ConglomerateController.ROWISDUPLICATE;
break;
}
if (ret == RESCAN_REQUIRED)
continue;
}
if (targetleaf.page.insertAtSlot(insert_slot, rowToInsert, (FormatableBitSet) null, this.btree_undo, Page.INSERT_DEFAULT, AccessFactoryGlobals.BTREE_OVERFLOW_THRESHOLD) != null) {
break;
}
if (targetleaf.page.recordCount() <= 2) {
throw StandardException.newException(SQLState.BTREE_NO_SPACE_FOR_KEY);
}
// start splitting ...
}
if (getConglomerate().isUniqueWithDuplicateNulls()) {
int ret = compareLeftAndRightSiblings(rowToInsert, insert_slot, targetleaf);
if (ret == MATCH_FOUND) {
ret_val = ConglomerateController.ROWISDUPLICATE;
break;
}
if (ret == RESCAN_REQUIRED)
continue;
}
// Create some space by splitting pages.
// determine where in page/table row causing split would go
int flag = 0;
if (insert_slot == 1) {
flag |= ControlRow.SPLIT_FLAG_FIRST_ON_PAGE;
if (targetleaf.isLeftmostLeaf())
flag |= ControlRow.SPLIT_FLAG_FIRST_IN_TABLE;
} else if (insert_slot == targetleaf.page.recordCount()) {
flag |= ControlRow.SPLIT_FLAG_LAST_ON_PAGE;
if (targetleaf.isRightmostLeaf())
flag |= ControlRow.SPLIT_FLAG_LAST_IN_TABLE;
}
long targetleaf_pageno = targetleaf.page.getPageNumber();
if ((targetleaf.page.recordCount() - targetleaf.page.nonDeletedRecordCount()) <= 0) {
// Don't do reclaim work if there are no deleted records.
reclaim_deleted_rows_attempted = true;
}
BranchRow branchrow = BranchRow.createBranchRowFromOldLeafRow(rowToInsert, targetleaf_pageno);
// Release the target page because (a) it may change as a
// result of the split, (b) the latch ordering requires us
// to acquire latches from top to bottom, and (c) this
// loop should be done in a system transaction.
targetleaf.release();
targetleaf = null;
start_xact_and_dosplit(!reclaim_deleted_rows_attempted, targetleaf_pageno, scratch_template, branchrow.getRow(), flag);
// only attempt to reclaim deleted rows once, otherwise the
// split loop could loop forever, trying to reclaim a deleted
// row that was not committed.
reclaim_deleted_rows_attempted = true;
// RESOLVE (mikem) possible optimization could be to save
// split location and look there first, if this has
// already caused a split. Or even return a latched page
// from splitFor(). For now just execute the loop again
// searching the tree for somewhere to put the row.
}
// set in-memory hint of where last row on page was inserted.
targetleaf.last_search_result = result_slot;
// Check that page just updated is consistent.
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON("enableBtreeConsistencyCheck")) {
targetleaf.checkConsistency(this, null, true);
}
}
// Done with the target page.
targetleaf.release();
targetleaf = null;
// return the status about insert - 0 is ok, or duplicate status.
return (ret_val);
}
use of org.apache.derby.iapi.store.raw.FetchDescriptor in project derby by apache.
the class BTreeController method compareRowsForInsert.
/**
* Compares two rows for insert. If the two rows are not equal,
* {@link #NO_MATCH} is returned. Otherwise, it tries to get a lock on
* the row in the tree. If the lock is obtained without waiting,
* {@link #MATCH_FOUND} is returned (even if the row has been deleted).
* Otherwise, {@link #RESCAN_REQUIRED} is returned to indicate that the
* latches have been released and the B-tree must be rescanned.
*
* If {@code MATCH_FOUND} is returned, the caller should check whether
* the row has been deleted. If so, it may have to move to check the
* adjacent rows to be sure that there is no non-deleted duplicate row.
*
* If {@code MATCH_FOUND} or {@code RESCAN_REQUIRED} is returned, the
* transaction will hold an update lock on the specified record when
* the method returns.
*
* <b>Note!</b> This method should only be called when the index is almost
* unique (that is, a non-unique index backing a unique constraint).
*
* @param originalRow row from the tree
* @param newRow row to be inserted
* @param leaf leaf where originalRow resides
* @param slot slot where originalRow
* @return {@code NO_MATCH} if no duplicate is found,
* {@code MATCH_FOUND} if a duplicate is found, or
* {@code RESCAN_REQUIRED} if the B-tree must be rescanned
*/
private int compareRowsForInsert(DataValueDescriptor[] originalRow, DataValueDescriptor[] newRow, LeafControlRow leaf, int slot) throws StandardException {
for (int i = 0; i < originalRow.length - 1; i++) {
if (!originalRow[i].equals(newRow[i]))
return NO_MATCH;
}
// It might be a deleted record try getting a lock on it
DataValueDescriptor[] template = runtime_mem.get_template(getRawTran());
FetchDescriptor lock_fetch_desc = RowUtil.getFetchDescriptorConstant(template.length - 1);
RowLocation lock_row_loc = (RowLocation) scratch_template[scratch_template.length - 1];
boolean latch_released = !getLockingPolicy().lockNonScanRowOnPage(leaf, slot, lock_fetch_desc, template, lock_row_loc, ConglomerateController.LOCK_UPD);
// record and might have changed the tree by now
if (latch_released)
return RESCAN_REQUIRED;
return MATCH_FOUND;
}
use of org.apache.derby.iapi.store.raw.FetchDescriptor in project derby by apache.
the class ControlRow method getControlRowForPage.
protected static ControlRow getControlRowForPage(ContainerHandle container, Page page) throws StandardException {
ControlRow cr = null;
// See if the control row is still cached with the page
// If so, just use the cached control row.
AuxObject auxobject = page.getAuxObject();
if (auxobject != null)
return (ControlRow) auxobject;
if (SanityManager.DEBUG)
SanityManager.ASSERT(page.recordCount() >= 1);
// No cached control row, so create a new one.
// Use the version field to determine the type of the row to
// create. This routine depends on the version field being the same
// number column in all control rows.
StorableFormatId version = new StorableFormatId();
DataValueDescriptor[] version_ret = new DataValueDescriptor[1];
version_ret[0] = version;
// TODO (mikem) - get rid of this new.
page.fetchFromSlot((RecordHandle) null, CR_SLOT, version_ret, new FetchDescriptor(1, CR_VERSION_BITSET, (Qualifier[][]) null), false);
// use format id to create empty instance of right Conglomerate class
cr = (ControlRow) Monitor.newInstanceFromIdentifier(version.getValue());
cr.page = page;
// call page specific initialization.
cr.controlRowInit();
// cache this Control row with the page in the cache.
page.setAuxObject(cr);
return (cr);
}
Aggregations