use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class StoredPage method doUpdateAtSlot.
/**
* Perform an update.
*
* @exception StandardException Standard Derby policy
*/
public void doUpdateAtSlot(RawTransaction t, int slot, int id, Object[] row, FormatableBitSet validColumns) throws StandardException {
// If this is a head page, the recordHandle is the head row handle.
// If this is not a head page, we are calling updateAtSlot inside some
// convoluted loop that updates an overflow chain. There is nothing we
// can doing about it anyway.
RecordHandle headRowHandle = isOverflowPage() ? null : getRecordHandleAtSlot(slot);
// RESOLVE: djd/yyz what does a null row means? (sku)
if (row == null) {
owner.getActionSet().actionUpdate(t, this, slot, id, row, validColumns, -1, (DynamicByteArrayOutputStream) null, -1, headRowHandle);
return;
}
// startColumn is the first column to be updated.
int startColumn = RowUtil.nextColumn(row, validColumns, 0);
if (startColumn == -1)
return;
if (SanityManager.DEBUG) {
// exactly N columns are passed in via the row array.
if (!isOverflowPage() && validColumns != null) {
if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)
SanityManager.THROWASSERT("updating slot " + slot + " on page " + getIdentity() + " " + RowUtil.getNumberOfColumns(-1, validColumns) + " bits are set in validColumns but only " + row.length + " columns in row[]");
}
}
// Keep track of row shrinkage in the head row piece. If any row piece
// shrinks, file a post commit work to clear all reserved space for the
// entire row chain.
boolean rowHasReservedSpace = false;
StoredPage curPage = this;
for (; ; ) {
StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);
int startField = rh.getFirstField();
int endFieldExclusive = startField + rh.getNumberFields();
// curPage contains column[startField] to column[endFieldExclusive-1]
// Need to cope with an update that is increasing the number of
// columns. If this occurs we want to make sure that we perform a
// single update to the last portion of a record, and not an update
// of the current columns and then an update to append a column.
long nextPage = -1;
int realStartColumn = -1;
int realSpaceOnPage = -1;
if (!rh.hasOverflow() || ((startColumn >= startField) && (startColumn < endFieldExclusive))) {
boolean hitLongColumn;
int nextColumn = -1;
Object[] savedFields = null;
DynamicByteArrayOutputStream logBuffer = null;
do {
try {
// Update this portion of the record.
// Pass in headRowHandle in case we are to update any
// long column and they need to be cleaned up by post
// commit processing. We don't want to purge the
// columns right now because in order to reclaim the
// page, we need to remove them. But it would be bad
// to remove them now because the transaction may not
// commit for a long time. We can do both purging of
// the long column and page removal together in the
// post commit.
nextColumn = owner.getActionSet().actionUpdate(t, curPage, slot, id, row, validColumns, realStartColumn, logBuffer, realSpaceOnPage, headRowHandle);
hitLongColumn = false;
} catch (LongColumnException lce) {
if (lce.getRealSpaceOnPage() == -1) {
// an update that has caused the row to increase
// in size *and* push some fields off the page
// that need to be inserted in an overflow page
// no need to make a copy as we are going to use
// this buffer right away
logBuffer = lce.getLogBuffer();
savedFields = (Object[]) lce.getColumn();
realStartColumn = lce.getNextColumn();
realSpaceOnPage = -1;
hitLongColumn = true;
continue;
}
// we caught a real long column exception
// three things should happen here:
// 1. insert the long column into overflow pages.
// 2. append the overflow field header in the main chain.
// 3. continue the update in the main data chain.
logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());
// step 1: insert the long column ... if this update
// operation rolls back, purge the after image column
// chain and reclaim the overflow page because the
// whole chain will be orphaned anyway.
RecordHandle longColumnHandle = insertLongColumn(curPage, lce, Page.INSERT_UNDO_WITH_PURGE);
// step 2: append overflow field header to log buffer
int overflowFieldLen = 0;
try {
overflowFieldLen += appendOverflowFieldHeader(logBuffer, longColumnHandle);
} catch (IOException ioe) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
// step 3: continue the insert in the main data chain
// need to pass the log buffer, and start column to the
// next insert.
realStartColumn = lce.getNextColumn() + 1;
realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
hitLongColumn = true;
} catch (NoSpaceOnPage nsop) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE, nsop, ((PageKey) curPage.getIdentity()).toString(), getPageDumpString(), slot, id, validColumns.toString(), realStartColumn, 0, headRowHandle);
}
} while (hitLongColumn);
// See if we completed all the columns that are on this page.
int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
if (nextColumn != -1) {
if (SanityManager.DEBUG) {
if ((nextColumn < startField) || (rh.hasOverflow() && (nextColumn >= endFieldExclusive))) {
SanityManager.THROWASSERT("nextColumn out of range = " + nextColumn + " expected between " + startField + " and " + endFieldExclusive);
}
}
// Need to insert rows from nextColumn to endFieldExclusive
// onto a new overflow page.
// If the column is not being updated we
// pick it up from the current page. If it is being updated
// we take it from the new value.
int possibleLastFieldExclusive = endFieldExclusive;
if (!rh.hasOverflow()) {
// we might be adding a field here
if (validColumns == null) {
if (row.length > possibleLastFieldExclusive)
possibleLastFieldExclusive = row.length;
} else {
if (validColumnsSize > possibleLastFieldExclusive)
possibleLastFieldExclusive = validColumnsSize;
}
}
// use a sparse row
Object[] newRow = new Object[possibleLastFieldExclusive];
FormatableBitSet newColumnList = new FormatableBitSet(possibleLastFieldExclusive);
ByteArrayOutputStream fieldStream = null;
for (int i = nextColumn; i < possibleLastFieldExclusive; i++) {
if ((validColumns == null) || (validColumnsSize > i && validColumns.isSet(i))) {
newColumnList.set(i);
// use the new value
newRow[i] = RowUtil.getColumn(row, validColumns, i);
} else if (i < endFieldExclusive) {
newColumnList.set(i);
// use the old value
newRow[i] = savedFields[i - nextColumn];
}
}
RecordHandle handle = curPage.getRecordHandleAtSlot(slot);
// there cannot be any updates to do.
if (rh.hasOverflow()) {
// We have to carry across the overflow information
// from the current record, if any.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
// find the next starting column before unlatching page
startColumn = RowUtil.nextColumn(row, validColumns, endFieldExclusive);
} else {
startColumn = -1;
nextPage = 0;
}
// Don't bother with temp container.
if (!rowHasReservedSpace && headRowHandle != null && curPage != null && !owner.isTemporaryContainer()) {
rowHasReservedSpace = curPage.checkRowReservedSpace(slot);
}
// insert the record portion on a new overflow page at slot
// 0 this will automatically handle any overflows in
// this new portion
// BasePage op = getNewOverflowPage();
BasePage op = curPage.getOverflowPageForInsert(slot, newRow, newColumnList, nextColumn);
// We have all the information from this page so unlatch it
if (curPage != this) {
curPage.unlatch();
curPage = null;
}
byte mode = Page.INSERT_OVERFLOW;
if (nextPage != 0)
mode |= Page.INSERT_FOR_SPLIT;
RecordHandle nextPortionHandle = nextPage == 0 ? null : owner.makeRecordHandle(nextPage, id);
// RESOLVED (sku): even though we would like to roll back
// these inserts with PURGE rather than with delete,
// we have to delete because if we purge the last row
// from an overflow page, the purge will queue a post
// commit to remove the page.
// While this is OK with long columns, we cannot do this
// for long rows because long row overflow pages can be
// shared by more than one long rows, and thus it is unsafe
// to remove the page without first latching the head page.
// However, the insert log record do not have the head
// row's page number so the rollback cannot put that
// information into the post commit work.
RecordHandle portionHandle;
try {
portionHandle = op.insertAllowOverflow(0, newRow, newColumnList, nextColumn, mode, 100, nextPortionHandle);
} catch (NoSpaceOnPage nsop) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE, nsop, ((PageKey) op.getIdentity()).toString(), getPageDumpString(), slot, id, newColumnList.toString(), nextColumn, mode, nextPortionHandle);
}
// Update the previous record header to point to new portion
if (curPage == this)
updateOverflowDetails(this, handle, portionHandle);
else
updateOverflowDetails(handle, portionHandle);
op.unlatch();
} else {
// See earlier comments on checking row reserved space.
if (!rowHasReservedSpace && headRowHandle != null && curPage != null && !owner.isTemporaryContainer()) {
rowHasReservedSpace = curPage.checkRowReservedSpace(slot);
}
// find the next starting column before we unlatch the page
startColumn = rh.hasOverflow() ? RowUtil.nextColumn(row, validColumns, endFieldExclusive) : -1;
}
// have we completed this update?
if (startColumn == -1) {
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// break out of the for loop
break;
}
}
if (nextPage == -1) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage != null, "Current page is null be no overflow information has been obtained");
}
// Get the next page info while we still have the page
// latched.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
}
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// get the next portion page and find the correct slot
curPage = (StoredPage) owner.getPage(nextPage);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isOverflowPage(), "following row chain gets a non-overflow page");
}
slot = curPage.findRecordById(id, FIRST_SLOT_NUMBER);
}
// row post commit.
if (rowHasReservedSpace) {
RawTransaction rxact = (RawTransaction) owner.getTransaction();
ReclaimSpace work = new ReclaimSpace(ReclaimSpace.ROW_RESERVE, headRowHandle, rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
}
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class StoredPage method removeOrphanedColumnChain.
/**
* Remove a column chain that may have been orphaned by an update.
* <p>
* Remove a column chain that may have been orphaned by an update. This
* is executed as a post commit operation. This page is the head page of
* the row which used to point to the column chain in question. The
* location of the orphaned column chain is in the ReclaimSpace record.
* <BR>
* MT - latched. No lock will be gotten, the head record must already be
* locked exclusive with no outstanding changes that can be rolled back.
* <p>
*
* @param work object describing the chain to remove.
* @param containerHdl open container handle to use to remove chain.
*
* @exception StandardException Standard exception policy.
*/
/* package */
void removeOrphanedColumnChain(ReclaimSpace work, ContainerHandle containerHdl) throws StandardException {
// First we need to make sure that this is the first and only time
// this long column is begin reclaimed, to do this we get the first
// page on the long column chain and compare its page time stamp.
// If it is different, don't do anything.
//
// Next we need to make sure the update operation commits - we do
// this by finding the row headed by headRecord, go to the column
// in question and see if it points to the first page of the long
// column chain we want to reclaim. If it does then the update
// operation has rolled back and we don't want to reclaim it.
//
// After we do the above 2 checks, we can reclaim the column
// chain.
StoredPage headOfChain = (StoredPage) containerHdl.getPageNoWait(work.getColumnPageId());
// If someone has it latched, not reclaimable
if (headOfChain == null)
return;
// If the column has been touched, it is not orphaned. Not reclaimable.
boolean pageUnchanged = headOfChain.equalTimeStamp(work.getPageTimeStamp());
// unlatch it for now.
headOfChain.unlatch();
if (pageUnchanged == false)
return;
// Now get to the column in question and make sure it is no longer
// pointing to the column chain.
RecordHandle headRowHandle = work.getHeadRowHandle();
if (SanityManager.DEBUG) {
// System.out.println("Executing in removeOrphanedColumnChain.");
// System.out.println("work = " + work);
// System.out.println("head = " + headOfChain);
// System.out.println("this = " + this);
SanityManager.ASSERT(isLatched());
SanityManager.ASSERT(headRowHandle.getPageNumber() == getPageNumber(), "got wrong head page");
}
// First get the row.
int slot = findRecordById(headRowHandle.getId(), headRowHandle.getSlotNumberHint());
if (slot >= 0) {
if (SanityManager.DEBUG) {
if (isOverflowPage()) {
SanityManager.THROWASSERT("Page " + getPageNumber() + " is overflow " + "\nwork = " + work + "\nhead = " + headOfChain + "\nthis = " + this);
}
}
// Find the page with the column in question on it.
// Start with the head page.
StoredPage pageInRowChain = this;
try {
int columnId = work.getColumnId();
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
if (SanityManager.DEBUG)
SanityManager.ASSERT(recordHeader.getFirstField() == 0, "Head row piece should start at field 0 but is not");
// See if columnId is on pageInRowChain.
while ((recordHeader.getNumberFields() + recordHeader.getFirstField()) <= columnId) {
if (pageInRowChain != this) {
// Keep the head page latched.
pageInRowChain.unlatch();
pageInRowChain = null;
}
if (recordHeader.hasOverflow()) {
// Go to the next row piece
pageInRowChain = getOverflowPage(recordHeader.getOverflowPage());
recordHeader = pageInRowChain.getHeaderAtSlot(getOverflowSlot(pageInRowChain, recordHeader));
} else {
// updates, shrink the number of columns in the row.
break;
}
}
if ((recordHeader.getNumberFields() + recordHeader.getFirstField()) > columnId) {
// that row piece.
if (!pageInRowChain.isColumnOrphaned(recordHeader, columnId, work.getColumnPageId(), work.getColumnRecordId())) {
// The column is not orphaned, row still points to it.
if (pageInRowChain != this) {
// Keep the head page latched.
pageInRowChain.unlatch();
pageInRowChain = null;
}
return;
}
}
} catch (IOException ioe) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
} finally {
if (pageInRowChain != this && pageInRowChain != null)
pageInRowChain.unlatch();
}
}
// If we get this far, we have verified that the column chain is indeed
// orphaned. Get rid of the column chain.
long nextPageId = work.getColumnPageId();
int nextRecordId = work.getColumnRecordId();
purgeOneColumnChain(nextPageId, nextRecordId);
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BTreeScan method savePositionAndReleasePage.
/**
* Save the current scan position by key and release the latch on the leaf
* that's being scanned. This method should be called if the latch on a
* leaf needs to be released in the middle of the scan. The scan can
* later reposition to the saved position by calling {@code reposition()}.
*
* @param partialKey known parts of the key that should be saved, or
* {@code null} if the entire key is unknown and will have to be fetched
* from the page
* @param vcols an array which tells which columns of the partial key are
* valid (key columns that have 0 in this array are not valid, and their
* values must be fetched from the page), or {@code null} if all the
* columns are valid
* @throws StandardException if an error occurs while saving the position
* @see #reposition(BTreeRowPosition, boolean)
*/
void savePositionAndReleasePage(DataValueDescriptor[] partialKey, int[] vcols) throws StandardException {
final Page page = scan_position.current_leaf.getPage();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(page.isLatched(), "Page is not latched");
SanityManager.ASSERT(scan_position.current_positionKey == null, "Scan position already saved");
if (partialKey == null) {
SanityManager.ASSERT(vcols == null);
}
if (vcols != null) {
SanityManager.ASSERT(partialKey != null);
SanityManager.ASSERT(vcols.length <= partialKey.length);
}
}
try {
DataValueDescriptor[] fullKey = scan_position.getKeyTemplate();
FetchDescriptor fetchDescriptor = null;
boolean haveAllColumns = false;
if (partialKey != null) {
int copiedCols = 0;
final int partialKeyLength = (vcols == null) ? partialKey.length : vcols.length;
for (int i = 0; i < partialKeyLength; i++) {
if (vcols == null || vcols[i] != 0) {
fullKey[i].setValue(partialKey[i]);
copiedCols++;
}
}
if (copiedCols < fullKey.length) {
fetchDescriptor = scan_position.getFetchDescriptorForSaveKey(vcols, fullKey.length);
} else {
haveAllColumns = true;
}
}
if (!haveAllColumns) {
RecordHandle rh = page.fetchFromSlot((RecordHandle) null, scan_position.current_slot, fullKey, fetchDescriptor, true);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(rh != null, "Row not found");
}
}
scan_position.current_positionKey = fullKey;
// Don't null out current_rh, we might be able to use it later if
// no rows are moved off the page.
// scan_position.current_rh = null;
scan_position.versionWhenSaved = page.getPageVersion();
scan_position.current_slot = Page.INVALID_SLOT_NUMBER;
} finally {
scan_position.current_leaf.release();
scan_position.current_leaf = null;
}
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BTreeController method comparePreviousRecord.
/**
* Compares the oldrow with the one at 'slot' or the one left to it.
* If the slot is first slot it will move to the left sibiling of
* the 'leaf' and will compare with the record from the last slot.
* @param slot slot number to start with
* @param leaf LeafControlRow of the current page
* @param rows DataValueDescriptot array to fill it with fetched values
* @return 0 if no duplicate
* 1 if duplicate
* 2 if rescan required
* @throws StandardException
*/
private int comparePreviousRecord(int slot, LeafControlRow leaf, DataValueDescriptor[] rows, DataValueDescriptor[] oldRows) throws StandardException {
RecordHandle rh = null;
boolean newLeaf = false;
LeafControlRow originalLeaf = leaf;
while (leaf != null) {
if (slot == 0) {
LeafControlRow oldLeaf = leaf;
try {
// slot is pointing before the first slot
// get left sibiling
leaf = (LeafControlRow) leaf.getLeftSibling(this);
if (newLeaf) {
oldLeaf.release();
}
newLeaf = true;
// no left sibiling
if (leaf == null)
return NO_MATCH;
// set the slot to last slot number
slot = leaf.page.recordCount() - 1;
// of the loop body to get the slot number rechecked.
continue;
} catch (WaitError we) {
// B-tree to prevent deadlock.
if (newLeaf) {
oldLeaf.release();
}
originalLeaf.release();
return RESCAN_REQUIRED;
}
}
rh = leaf.page.fetchFromSlot(null, slot, rows, null, true);
if (rh != null) {
int ret = compareRowsForInsert(rows, oldRows, leaf, slot);
// If we found a deleted row, we don't know whether there
// is a duplicate, so we need to continue the search.
final boolean continueSearch = (ret == MATCH_FOUND && leaf.page.isDeletedAtSlot(slot));
if (!continueSearch) {
if (newLeaf) {
// latches that we're not supposed to hold.
if (ret == RESCAN_REQUIRED) {
// When a rescan is required, we must release the
// original leaf, since the callers expect all
// latches to have been released (and so they
// should have been, so this is probably a bug -
// see DERBY-4080).
originalLeaf.release();
}
if (ret != RESCAN_REQUIRED) {
// Since a rescan is not required, we still hold
// the latch on the non-original leaf. No other
// leaves than the original one should be latched
// when we return, so release the current leaf.
leaf.release();
}
}
return ret;
}
}
slot--;
}
return NO_MATCH;
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class LeafControlRow method allocate.
/* Private/Protected methods of This class: */
/**
* Allocate a new leaf page to the conglomerate.
*
* @param btree The open conglomerate from which to get the leaf from
* @param parent The parent page of the newly allocated page, null if
* allocating root page.
*
* @exception StandardException Standard exception policy.
*/
private static LeafControlRow allocate(OpenBTree btree, ControlRow parent) throws StandardException {
Page page = btree.container.addPage();
// Create a control row for the new page.
LeafControlRow control_row = new LeafControlRow(btree, page, parent, false);
// Insert the control row on the page, in the first slot on the page.
// This operation is only done as part of a new tree or split, which
// which both will be undone physically so no logical undo record is
// needed.
byte insertFlag = Page.INSERT_INITIAL;
insertFlag |= Page.INSERT_DEFAULT;
RecordHandle rh = page.insertAtSlot(Page.FIRST_SLOT_NUMBER, control_row.getRow(), (FormatableBitSet) null, (LogicalUndo) null, insertFlag, AccessFactoryGlobals.BTREE_OVERFLOW_THRESHOLD);
if (SanityManager.DEBUG) {
RecordHandle rh2 = null;
rh2 = page.fetchFromSlot((RecordHandle) null, page.FIRST_SLOT_NUMBER, new DataValueDescriptor[0], (FetchDescriptor) null, true);
SanityManager.ASSERT(rh.getId() == rh2.getId() && rh.getPageNumber() == rh2.getPageNumber());
}
// Page is returned latched.
return (control_row);
}
Aggregations