use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class HeapPostCommit method purgeCommittedDeletes.
/**
************************************************************************
* Private/Protected methods of This class:
**************************************************************************
*/
/**
* Reclaim space taken of committed deleted rows or aborted inserted rows.
* <p>
* This routine assumes it has been called by an internal transaction which
* has performed no work so far, and that it has an exclusive intent table
* lock. It will attempt obtain exclusive row locks on rows marked
* deleted, where successful those rows can be reclaimed as they must be
* "committed deleted" or "aborted inserted" rows.
* <p>
* This routine will latch the page and hold the latch due to interface
* requirement from Page.purgeAtSlot.
*
* @param heap_control The heap, already opened.
* @param pageno number of page to look for committed deletes.
*
* @see Page#purgeAtSlot
* @exception StandardException Standard exception policy.
*/
private final void purgeCommittedDeletes(HeapController heap_control, long pageno) throws StandardException {
// The following can fail either if it can't get the latch or
// somehow the page requested no longer exists.
// resolve - what will happen if the user page doesnt exist
// wait to get the latch on the page
Page page = heap_control.getUserPageWait(pageno);
boolean purgingDone = false;
if (page != null) {
try {
// The number records that can be reclaimed is:
// total recs - recs_not_deleted
int num_possible_commit_delete = page.recordCount() - page.nonDeletedRecordCount();
if (num_possible_commit_delete > 0) {
// have already looked at).
for (int slot_no = page.recordCount() - 1; slot_no >= 0; slot_no--) {
boolean row_is_committed_delete = page.isDeletedAtSlot(slot_no);
if (row_is_committed_delete) {
// At this point we only know that the row is
// deleted, not whether it is committed.
// see if we can purge the row, by getting an
// exclusive lock on the row. If it is marked
// deleted and we can get this lock, then it
// must be a committed delete and we can purge
// it.
RecordHandle rh = page.fetchFromSlot((RecordHandle) null, slot_no, RowUtil.EMPTY_ROW, RowUtil.EMPTY_ROW_FETCH_DESCRIPTOR, true);
row_is_committed_delete = heap_control.lockRowAtSlotNoWaitExclusive(rh);
if (row_is_committed_delete) {
purgingDone = true;
page.purgeAtSlot(slot_no, 1, false);
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON("verbose_heap_post_commit")) {
SanityManager.DEBUG_PRINT("HeapPostCommit", "Purging row[" + slot_no + "]" + "on page:" + pageno + ".\n");
}
}
}
}
}
}
if (page.recordCount() == 0) {
purgingDone = true;
// Deallocate the current page with 0 rows on it.
heap_control.removePage(page);
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON("verbose_heap_post_commit")) {
SanityManager.DEBUG_PRINT("HeapPostCommit", "Calling Heap removePage().; pagenumber=" + pageno + "\n");
}
}
}
} finally {
// would cause a subquent undo of the purge to fail.
if (!purgingDone) {
page.unlatch();
page = null;
}
}
} else {
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON("verbose_heap_post_commit")) {
SanityManager.DEBUG_PRINT("HeapPostCommit", "Get No Wait returned null. page num = " + pageno + "\n");
SanityManager.showTrace(new Throwable());
}
}
}
return;
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class ConglomerateUtil method debugPage.
/**
** Format a page of data, as access see's it.
*/
public static String debugPage(Page page, int start_slot, boolean full_rh, DataValueDescriptor[] template) {
if (SanityManager.DEBUG) {
StringBuffer string = new StringBuffer(4096);
string.append("PAGE:(");
string.append(page.getPageNumber());
string.append(")------------------------------------------:\n");
try {
if (page != null) {
int numrows = page.recordCount();
for (int slot_no = start_slot; slot_no < numrows; slot_no++) {
RecordHandle rh = page.fetchFromSlot((RecordHandle) null, slot_no, template, (FetchDescriptor) null, true);
// pre-pend either "D:" if deleted, or " :" if not.
string.append(page.isDeletedAtSlot(slot_no) ? "D:" : " :");
// row[slot,id]:
string.append("row[");
string.append(slot_no);
string.append("](id:");
string.append(rh.getId());
string.append("):\t");
// Record id=78 Page(31,Container(0, 919707766934))
if (full_rh) {
string.append("[");
string.append(rh.toString());
string.append("]:");
}
// row:
string.append(RowUtil.toString(template));
string.append("\n");
}
// string.append(page.toString());
}
} catch (Throwable t) {
string.append("Error encountered while building string");
}
return (string.toString());
} else {
return (null);
}
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class StoredPage method logColumn.
/**
* Log column from input row to the given output stream.
* <p>
* Read data from row[arrayPosition], and write the column data in
* raw store page format to the given column. Along the way determine
* if the column will fit on the current page.
* <p>
* Action taken in this routine is determined by the kind of column as
* specified in the columnFlag:
* COLUMN_NONE - the column is insignificant
* COLUMN_FIRST - this is the first column in a logRow() call
* COLUMN_LONG - this is a known long column, therefore we will
* store part of the column on the current page and
* overflow the rest if necessary.
* <p>
* Upon entry to this routine logicalDataOut is tied to the
* DynamicByteArrayOutputStream out.
* <BR>
* If a column is a long column and it does not totally fit on the current
* page, then a LongColumnException is thrown. We package up info about
* the current long column in the partially filled in exception so that
* callers can take correct action. The column will now be set a as a
* stream.
*
* @return The spaceAvailable after accounting for space for this column.
*
* @param row array of column from which to read the column from.
* @param arrayPosition The array position of column to be reading from row.
* @param out The stream to write the raw store page format of the
* the column to.
* @param spaceAvailable The number of bytes available on the page for
* this column, this may differ from current page
* as it may include bytes used by previous
* columns.
* @param columnFlag one of: COLUMN_NONE, COLUMN_FIRST, or COLUMN_LONG.
*
* @exception StandardException Standard exception policy.
* @exception LongColumnException Thrown if column will not fit on a
* single page. See notes above
*/
private int logColumn(Object[] row, int arrayPosition, DynamicByteArrayOutputStream out, int spaceAvailable, int columnFlag, int overflowThreshold) throws StandardException, IOException {
// RESOLVE (mikem) - why will row be null?
Object column = (row != null ? row[arrayPosition] : null);
// header is already formatted.
if (column instanceof RawField) {
// field data is raw, no need to set up a field header etc.
byte[] data = ((RawField) column).getData();
if (data.length <= spaceAvailable) {
out.write(data);
spaceAvailable -= data.length;
}
return spaceAvailable;
}
// If this is a long column, it may fit in this page or it may not.
boolean longColumnDone = true;
// default field status.
int fieldStatus = StoredFieldHeader.setFixed(StoredFieldHeader.setInitial(), true);
int beginPosition = out.getPosition();
int columnBeginPosition = 0;
int headerLength;
int fieldDataLength = 0;
if (column instanceof StreamStorable) {
StreamStorable stream_storable_column = (StreamStorable) column;
if (stream_storable_column.returnStream() != null) {
column = (Object) stream_storable_column.returnStream();
}
}
if ((column == null) && (columnFlag != COLUMN_CREATE_NULL)) {
fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
} else if (column instanceof InputStream) {
RememberBytesInputStream bufferedIn = null;
int bufferLen = 0;
int estimatedMaxDataSize = getMaxDataLength(spaceAvailable, overflowThreshold);
// buffer.
if (column instanceof RememberBytesInputStream) {
// data is already RememberBytesInputStream
bufferedIn = (RememberBytesInputStream) column;
bufferLen = bufferedIn.numBytesSaved();
} else {
// data comes in as an inputstream
bufferedIn = new RememberBytesInputStream((InputStream) column, new MemByteHolder(maxFieldSize + 1));
// into the RememberBytesInputStream.
if (row[arrayPosition] instanceof StreamStorable)
((StreamStorable) row[arrayPosition]).setStream(bufferedIn);
// set column to the RememberBytesInputStream so that
// all future access to this column will be able to get
// at bytes that have been already read. This assignment
// is needed to ensure that if long column exception is
// thrown, the column is set correctly
column = bufferedIn;
}
// read the buffer by reading the max we can read.
if (bufferLen < (estimatedMaxDataSize + 1)) {
bufferLen += bufferedIn.fillBuf(estimatedMaxDataSize + 1 - bufferLen);
}
if ((bufferLen <= estimatedMaxDataSize)) {
// we will be able to fit this into the page
fieldDataLength = bufferLen;
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
// if the field is extensible, then we write the serializable
// formatId. if the field is non-extensible, we don't need to
// write the formatId. but at this point, how do we know
// whether the field is extensible or not??? For Plato release,
// we do not support InputStream on extensible types,
// therefore, we ignore the formatId for now.
bufferedIn.putBuf(logicalDataOut, fieldDataLength);
} else {
if (columnFlag == COLUMN_LONG) {
// column is a long column and the remaining portion does
// not fit on the current page.
longColumnDone = false;
// it's a portion of a long column, and there is more to
// write reserve enough room for overflow pointer, then
// write as much data as we can leaving an extra 2 bytes
// for overflow field header.
fieldDataLength = estimatedMaxDataSize - OVERFLOW_POINTER_SIZE - 2;
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
bufferedIn.putBuf(logicalDataOut, fieldDataLength);
// now, we need to adjust the buffer, move the unread
// bytes to the beginning position the cursor correctly,
// so, next time around, we can read more into the buffer.
int remainingBytes = bufferedIn.available();
// move the unread bytes to the beginning of the byteHolder.
int bytesShifted = bufferedIn.shiftToFront();
} else {
// column not a long column and does not fit on page.
int delta = maxFieldSize - bufferLen + 1;
if (delta > 0)
bufferLen += bufferedIn.fillBuf(delta);
fieldDataLength = bufferLen;
// the data will not fit on this page make sure the new
// input stream is passed back to the upper layer...
column = (Object) bufferedIn;
}
}
} else if (columnFlag == COLUMN_CREATE_NULL) {
//
// This block handles the case when a couple columns have been added
// recently and now one of the later columns is being updated. Newly added columns
// which appear in the row before the updated column don't actually have
// any values yet. We stuff NULLs into those newly added columns here.
// This fixes DERBY-5679.
//
fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
// header is written with 0 length here.
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
} else if (column instanceof DataValueDescriptor) {
DataValueDescriptor sColumn = (DataValueDescriptor) column;
boolean isNull = (columnFlag == COLUMN_CREATE_NULL) || sColumn.isNull();
if (isNull) {
fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
}
// header is written with 0 length here.
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
if (!isNull) {
// write the field data to the log
try {
columnBeginPosition = out.getPosition();
sColumn.writeExternal(logicalDataOut);
} catch (IOException ioe) {
// SQLData error reporting
if (logicalDataOut != null) {
Exception ne = logicalDataOut.getNestedException();
if (ne != null) {
if (ne instanceof StandardException) {
throw (StandardException) ne;
}
}
}
throw StandardException.newException(SQLState.DATA_STORABLE_WRITE_EXCEPTION, ioe);
}
fieldDataLength = (out.getPosition() - beginPosition) - headerLength;
}
} else if (column instanceof RecordHandle) {
// we are inserting an overflow pointer for a long column
// casted reference to column to avoid repeated casting.
RecordHandle overflowHandle = (RecordHandle) column;
fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
fieldDataLength += CompressedNumber.writeLong(out, overflowHandle.getPageNumber());
fieldDataLength += CompressedNumber.writeInt(out, overflowHandle.getId());
} else {
// Serializable/Externalizable/Formattable
// all look the same at this point.
// header is written with 0 length here.
headerLength = StoredFieldHeader.write(logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
logicalDataOut.writeObject(column);
fieldDataLength = (out.getPosition() - beginPosition) - headerLength;
}
// calculate the size of the field on page with compresed field header
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, false);
int fieldSizeOnPage = StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize) + fieldDataLength;
userRowSize += fieldDataLength;
boolean fieldIsLong = isLong(fieldSizeOnPage, overflowThreshold);
// Do we have enough space on the page for this field?
if (((spaceAvailable < fieldSizeOnPage) || (fieldIsLong)) && (columnFlag != COLUMN_LONG)) {
if (fieldIsLong) {
if (!(column instanceof InputStream)) {
// Convert already written object to an InputStream.
ByteArray fieldData = new ByteArray(((DynamicByteArrayOutputStream) out).getByteArray(), (columnBeginPosition), fieldDataLength);
ByteArrayInputStream columnIn = new ByteArrayInputStream(fieldData.getArray(), columnBeginPosition, fieldDataLength);
MemByteHolder byteHolder = new MemByteHolder(fieldDataLength + 1);
RememberBytesInputStream bufferedIn = new RememberBytesInputStream(columnIn, byteHolder);
// the data will not fit on this page make sure the new
// input stream is passed back to the upper layer...
column = bufferedIn;
}
out.setPosition(beginPosition);
// This exception carries the information for the client
// routine to continue inserting the long row on multiple
// pages.
LongColumnException lce = new LongColumnException();
lce.setColumn(column);
throw lce;
} else {
// Column does not fit on this page, but it isn't a long column.
out.setPosition(beginPosition);
return (spaceAvailable);
}
}
// Now we go back to update the fieldDataLength in the field header
out.setPosition(beginPosition);
// slotFieldSize is set based on the pageSize.
// We are borrowing this to set the size of our fieldDataLength.
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
headerLength = StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
// set position to the end of the field
out.setPosition(beginPosition + fieldDataLength + headerLength);
spaceAvailable -= fieldSizeOnPage;
// YYZ: revisit
if (columnFlag == COLUMN_LONG) {
// BasePage.insertLongColumn to signal end of loop.
if (longColumnDone)
return -1;
else
return 1;
} else {
return (spaceAvailable);
}
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class StoredPage method purgeOneColumnChain.
/**
* Purge the column chain that starts at overflowPageId, overflowRecordId
* <p>
* Purge just the column chain that starts at the input address.
* The long column chain is pointed at by a field in a row. The long
* column is then chained as a sequence of "rows", the last column then
* points to the next segment of the chain on each page.
* Long columns chains currently are only one row per page so the next
* slot of a row in a long row chain should always be the first slot.
* <p>
*
* @param overflowPageId The page where the long column chain starts.
* @param overflowRecordId The record id where long column chain starts.
*
* @exception StandardException Standard exception policy.
*/
private void purgeOneColumnChain(long overflowPageId, int overflowRecordId) throws StandardException {
StoredPage pageOnColumnChain = null;
boolean removePageHappened = false;
try {
while (overflowPageId != ContainerHandle.INVALID_PAGE_NUMBER) {
// Now loop over the column chain and get all the column pieces.
pageOnColumnChain = getOverflowPage(overflowPageId);
removePageHappened = false;
if (pageOnColumnChain == null) {
if (SanityManager.DEBUG)
SanityManager.THROWASSERT("got null page following long column chain. " + "Head column piece at " + getIdentity() + " null page at " + overflowPageId);
// Don't know what to do here, the column chain
break;
// is broken. Don't bomb, go to the next field.
}
int overflowSlotId = FIRST_SLOT_NUMBER;
if (SanityManager.DEBUG) {
int checkSlot = pageOnColumnChain.findRecordById(overflowRecordId, FIRST_SLOT_NUMBER);
if (overflowSlotId != checkSlot) {
SanityManager.THROWASSERT("Long column is not at the expected " + FIRST_SLOT_NUMBER + " slot, instead at slot " + checkSlot);
}
SanityManager.ASSERT(pageOnColumnChain.recordCount() == 1, "long column page has > 1 record");
}
// Hold on to the pointer to next page on the chain before
// we remove the long column page.
RecordHandle nextColumnPiece = pageOnColumnChain.getNextColumnPiece(overflowSlotId);
if (pageOnColumnChain.recordCount() == 1) {
removePageHappened = true;
owner.removePage(pageOnColumnChain);
} else {
if (SanityManager.DEBUG)
SanityManager.THROWASSERT("page on column chain has more then one record" + pageOnColumnChain.toString());
pageOnColumnChain.unlatch();
pageOnColumnChain = null;
}
// Chase the column chain pointer.
if (nextColumnPiece != null) {
overflowPageId = nextColumnPiece.getPageNumber();
overflowRecordId = nextColumnPiece.getId();
} else {
// terminate the loop
overflowPageId = ContainerHandle.INVALID_PAGE_NUMBER;
}
}
} finally {
if (!removePageHappened && pageOnColumnChain != null) {
pageOnColumnChain.unlatch();
pageOnColumnChain = null;
}
}
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BTreeController method compareNextRecord.
/**
* Compares the new record with the one at slot or the one
* right to it. If the slot is last slot in the page it will move to
* the right to sibling of the leaf and will compare with the record
* from the last slot.
* @param slot slot number to start with
* @param leaf LeafControlRow of the current page
* @param rows DataValueDescriptot array to fill it with fetched values
* @return 0 if no duplicate
* 1 if duplicate
* 2 if rescan required
* @throws StandardException
*/
private int compareNextRecord(int slot, LeafControlRow leaf, DataValueDescriptor[] rows, DataValueDescriptor[] oldRows) throws StandardException {
RecordHandle rh = null;
boolean newLeaf = false;
LeafControlRow originalLeaf = leaf;
while (leaf != null) {
if (slot >= leaf.page.recordCount()) {
// slot is pointing to last slot
// get next sibling
LeafControlRow oldLeaf = leaf;
leaf = (LeafControlRow) leaf.getRightSibling(this);
if (newLeaf) {
oldLeaf.release();
}
newLeaf = true;
// no record at the right
if (leaf == null)
return NO_MATCH;
// point slot to the first record of new leaf
slot = 1;
// of the loop body to get the slot number rechecked.
continue;
}
rh = leaf.page.fetchFromSlot(null, slot, rows, null, true);
if (rh != null) {
int ret = compareRowsForInsert(rows, oldRows, leaf, slot);
// If we found a deleted row, we don't know whether there
// is a duplicate, so we need to continue the search.
final boolean continueSearch = (ret == MATCH_FOUND && leaf.page.isDeletedAtSlot(slot));
if (!continueSearch) {
if (newLeaf) {
// latches that we're not supposed to hold.
if (ret == RESCAN_REQUIRED) {
// When a rescan is required, we must release the
// original leaf, since the callers expect all
// latches to have been released (and so they
// should have been, so this is probably a bug -
// see DERBY-4080).
originalLeaf.release();
}
if (ret != RESCAN_REQUIRED) {
// Since a rescan is not required, we still hold
// the latch on the non-original leaf. No other
// leaves than the original one should be latched
// when we return, so release the current leaf.
leaf.release();
}
}
return ret;
}
}
slot++;
}
return NO_MATCH;
}
Aggregations