use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.
the class FileContainer method getUserPage.
/**
* Get a page in the container.
*
* Get User page is the generic base routine for all user (client to raw
* store) getPage. This routine coordinate with allocation/deallocation
* to ensure that no page can be gotten from the container while page is
* in the middle of being allocated or deallocated.
* This routine latches the page.
*
* @param handle the container handle
* @param pageNumber the page number of the page to get
* @param overflowOK if true then an overflow page is OK,
* if false, then only non-overflow page is OK
* @param wait if true then wait for a latch
* @return the latched page
*
* <BR> MT - thread safe
*
* @exception StandardException Standard Derby error policy
*/
private BasePage getUserPage(BaseContainerHandle handle, long pageNumber, boolean overflowOK, boolean wait) throws StandardException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(pageNumber != FIRST_ALLOC_PAGE_NUMBER, "getUserPage trying to get an alloc page, pageNumber = " + pageNumber);
if (pageNumber < ContainerHandle.FIRST_PAGE_NUMBER)
SanityManager.THROWASSERT("pageNumber = " + pageNumber);
}
if (pageNumber < ContainerHandle.FIRST_PAGE_NUMBER)
return null;
if (// committed and dropped, cannot get a page
getCommittedDropState())
return null;
if (!pageValid(handle, pageNumber)) {
return null;
}
// RESOLVE: no translation!
PageKey pageSearch = new PageKey(identity, pageNumber);
BasePage page = (BasePage) pageCache.find(pageSearch);
if (page == null) {
return page;
}
// latch the page
if (latchPage(handle, page, wait) == null) {
// page was already released from cache
return null;
}
// readers) if that is needed
if ((page.isOverflowPage() && !overflowOK) || (page.getPageStatus() != BasePage.VALID_PAGE)) {
// unlatch releases page from cache, see StoredPage.releaseExclusive()
page.unlatch();
page = null;
}
return page;
}
use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.
the class FileContainer method readHeaderFromArray.
/**
* Read containerInfo from a byte array
* The container Header array must be written by or of
* the same format as put together by writeHeaderFromArray.
*
* @exception StandardException Derby Standard error policy
* @exception IOException error in reading the header from file
*/
private void readHeaderFromArray(byte[] a) throws StandardException, IOException {
ArrayInputStream inStream = new ArrayInputStream(a);
inStream.setLimit(CONTAINER_INFO_SIZE);
int fid = inStream.readInt();
if (fid != formatIdInteger) {
throw StandardException.newException(SQLState.DATA_UNKNOWN_CONTAINER_FORMAT, getIdentity(), fid);
}
int status = inStream.readInt();
pageSize = inStream.readInt();
spareSpace = inStream.readInt();
minimumRecordSize = inStream.readInt();
initialPages = inStream.readShort();
PreAllocSize = inStream.readShort();
firstAllocPageNumber = inStream.readLong();
firstAllocPageOffset = inStream.readLong();
containerVersion = inStream.readLong();
estimatedRowCount = inStream.readLong();
reusableRecordIdSequenceNumber = inStream.readLong();
lastLogInstant = null;
if (// pre 2.0, we don't store this.
PreAllocSize == 0)
PreAllocSize = DEFAULT_PRE_ALLOC_SIZE;
// read spare long
long spare3 = inStream.readLong();
// default of 1.
if (initialPages == 0)
initialPages = 1;
// container read in from disk, reset preAllocation values
PreAllocThreshold = PRE_ALLOC_THRESHOLD;
// validate checksum
long onDiskChecksum = inStream.readLong();
checksum.reset();
checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE);
if (onDiskChecksum != checksum.getValue()) {
PageKey pk = new PageKey(identity, FIRST_ALLOC_PAGE_NUMBER);
throw dataFactory.markCorrupt(StandardException.newException(SQLState.FILE_BAD_CHECKSUM, pk, checksum.getValue(), onDiskChecksum, org.apache.derby.iapi.util.StringUtil.hexDump(a)));
}
allocCache.reset();
// set the in memory state
setDroppedState((status & FILE_DROPPED) != 0);
setCommittedDropState((status & FILE_COMMITTED_DROP) != 0);
setReusableRecordIdState((status & FILE_REUSABLE_RECORDID) != 0);
}
use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.
the class StoredPage method doUpdateAtSlot.
/**
* Perform an update.
*
* @exception StandardException Standard Derby policy
*/
public void doUpdateAtSlot(RawTransaction t, int slot, int id, Object[] row, FormatableBitSet validColumns) throws StandardException {
// If this is a head page, the recordHandle is the head row handle.
// If this is not a head page, we are calling updateAtSlot inside some
// convoluted loop that updates an overflow chain. There is nothing we
// can doing about it anyway.
RecordHandle headRowHandle = isOverflowPage() ? null : getRecordHandleAtSlot(slot);
// RESOLVE: djd/yyz what does a null row means? (sku)
if (row == null) {
owner.getActionSet().actionUpdate(t, this, slot, id, row, validColumns, -1, (DynamicByteArrayOutputStream) null, -1, headRowHandle);
return;
}
// startColumn is the first column to be updated.
int startColumn = RowUtil.nextColumn(row, validColumns, 0);
if (startColumn == -1)
return;
if (SanityManager.DEBUG) {
// exactly N columns are passed in via the row array.
if (!isOverflowPage() && validColumns != null) {
if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)
SanityManager.THROWASSERT("updating slot " + slot + " on page " + getIdentity() + " " + RowUtil.getNumberOfColumns(-1, validColumns) + " bits are set in validColumns but only " + row.length + " columns in row[]");
}
}
// Keep track of row shrinkage in the head row piece. If any row piece
// shrinks, file a post commit work to clear all reserved space for the
// entire row chain.
boolean rowHasReservedSpace = false;
StoredPage curPage = this;
for (; ; ) {
StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);
int startField = rh.getFirstField();
int endFieldExclusive = startField + rh.getNumberFields();
// curPage contains column[startField] to column[endFieldExclusive-1]
// Need to cope with an update that is increasing the number of
// columns. If this occurs we want to make sure that we perform a
// single update to the last portion of a record, and not an update
// of the current columns and then an update to append a column.
long nextPage = -1;
int realStartColumn = -1;
int realSpaceOnPage = -1;
if (!rh.hasOverflow() || ((startColumn >= startField) && (startColumn < endFieldExclusive))) {
boolean hitLongColumn;
int nextColumn = -1;
Object[] savedFields = null;
DynamicByteArrayOutputStream logBuffer = null;
do {
try {
// Update this portion of the record.
// Pass in headRowHandle in case we are to update any
// long column and they need to be cleaned up by post
// commit processing. We don't want to purge the
// columns right now because in order to reclaim the
// page, we need to remove them. But it would be bad
// to remove them now because the transaction may not
// commit for a long time. We can do both purging of
// the long column and page removal together in the
// post commit.
nextColumn = owner.getActionSet().actionUpdate(t, curPage, slot, id, row, validColumns, realStartColumn, logBuffer, realSpaceOnPage, headRowHandle);
hitLongColumn = false;
} catch (LongColumnException lce) {
if (lce.getRealSpaceOnPage() == -1) {
// an update that has caused the row to increase
// in size *and* push some fields off the page
// that need to be inserted in an overflow page
// no need to make a copy as we are going to use
// this buffer right away
logBuffer = lce.getLogBuffer();
savedFields = (Object[]) lce.getColumn();
realStartColumn = lce.getNextColumn();
realSpaceOnPage = -1;
hitLongColumn = true;
continue;
}
// we caught a real long column exception
// three things should happen here:
// 1. insert the long column into overflow pages.
// 2. append the overflow field header in the main chain.
// 3. continue the update in the main data chain.
logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());
// step 1: insert the long column ... if this update
// operation rolls back, purge the after image column
// chain and reclaim the overflow page because the
// whole chain will be orphaned anyway.
RecordHandle longColumnHandle = insertLongColumn(curPage, lce, Page.INSERT_UNDO_WITH_PURGE);
// step 2: append overflow field header to log buffer
int overflowFieldLen = 0;
try {
overflowFieldLen += appendOverflowFieldHeader(logBuffer, longColumnHandle);
} catch (IOException ioe) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
// step 3: continue the insert in the main data chain
// need to pass the log buffer, and start column to the
// next insert.
realStartColumn = lce.getNextColumn() + 1;
realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
hitLongColumn = true;
} catch (NoSpaceOnPage nsop) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE, nsop, ((PageKey) curPage.getIdentity()).toString(), getPageDumpString(), slot, id, validColumns.toString(), realStartColumn, 0, headRowHandle);
}
} while (hitLongColumn);
// See if we completed all the columns that are on this page.
int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
if (nextColumn != -1) {
if (SanityManager.DEBUG) {
if ((nextColumn < startField) || (rh.hasOverflow() && (nextColumn >= endFieldExclusive))) {
SanityManager.THROWASSERT("nextColumn out of range = " + nextColumn + " expected between " + startField + " and " + endFieldExclusive);
}
}
// Need to insert rows from nextColumn to endFieldExclusive
// onto a new overflow page.
// If the column is not being updated we
// pick it up from the current page. If it is being updated
// we take it from the new value.
int possibleLastFieldExclusive = endFieldExclusive;
if (!rh.hasOverflow()) {
// we might be adding a field here
if (validColumns == null) {
if (row.length > possibleLastFieldExclusive)
possibleLastFieldExclusive = row.length;
} else {
if (validColumnsSize > possibleLastFieldExclusive)
possibleLastFieldExclusive = validColumnsSize;
}
}
// use a sparse row
Object[] newRow = new Object[possibleLastFieldExclusive];
FormatableBitSet newColumnList = new FormatableBitSet(possibleLastFieldExclusive);
ByteArrayOutputStream fieldStream = null;
for (int i = nextColumn; i < possibleLastFieldExclusive; i++) {
if ((validColumns == null) || (validColumnsSize > i && validColumns.isSet(i))) {
newColumnList.set(i);
// use the new value
newRow[i] = RowUtil.getColumn(row, validColumns, i);
} else if (i < endFieldExclusive) {
newColumnList.set(i);
// use the old value
newRow[i] = savedFields[i - nextColumn];
}
}
RecordHandle handle = curPage.getRecordHandleAtSlot(slot);
// there cannot be any updates to do.
if (rh.hasOverflow()) {
// We have to carry across the overflow information
// from the current record, if any.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
// find the next starting column before unlatching page
startColumn = RowUtil.nextColumn(row, validColumns, endFieldExclusive);
} else {
startColumn = -1;
nextPage = 0;
}
// Don't bother with temp container.
if (!rowHasReservedSpace && headRowHandle != null && curPage != null && !owner.isTemporaryContainer()) {
rowHasReservedSpace = curPage.checkRowReservedSpace(slot);
}
// insert the record portion on a new overflow page at slot
// 0 this will automatically handle any overflows in
// this new portion
// BasePage op = getNewOverflowPage();
BasePage op = curPage.getOverflowPageForInsert(slot, newRow, newColumnList, nextColumn);
// We have all the information from this page so unlatch it
if (curPage != this) {
curPage.unlatch();
curPage = null;
}
byte mode = Page.INSERT_OVERFLOW;
if (nextPage != 0)
mode |= Page.INSERT_FOR_SPLIT;
RecordHandle nextPortionHandle = nextPage == 0 ? null : owner.makeRecordHandle(nextPage, id);
// RESOLVED (sku): even though we would like to roll back
// these inserts with PURGE rather than with delete,
// we have to delete because if we purge the last row
// from an overflow page, the purge will queue a post
// commit to remove the page.
// While this is OK with long columns, we cannot do this
// for long rows because long row overflow pages can be
// shared by more than one long rows, and thus it is unsafe
// to remove the page without first latching the head page.
// However, the insert log record do not have the head
// row's page number so the rollback cannot put that
// information into the post commit work.
RecordHandle portionHandle;
try {
portionHandle = op.insertAllowOverflow(0, newRow, newColumnList, nextColumn, mode, 100, nextPortionHandle);
} catch (NoSpaceOnPage nsop) {
throw StandardException.newException(SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE, nsop, ((PageKey) op.getIdentity()).toString(), getPageDumpString(), slot, id, newColumnList.toString(), nextColumn, mode, nextPortionHandle);
}
// Update the previous record header to point to new portion
if (curPage == this)
updateOverflowDetails(this, handle, portionHandle);
else
updateOverflowDetails(handle, portionHandle);
op.unlatch();
} else {
// See earlier comments on checking row reserved space.
if (!rowHasReservedSpace && headRowHandle != null && curPage != null && !owner.isTemporaryContainer()) {
rowHasReservedSpace = curPage.checkRowReservedSpace(slot);
}
// find the next starting column before we unlatch the page
startColumn = rh.hasOverflow() ? RowUtil.nextColumn(row, validColumns, endFieldExclusive) : -1;
}
// have we completed this update?
if (startColumn == -1) {
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// break out of the for loop
break;
}
}
if (nextPage == -1) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage != null, "Current page is null be no overflow information has been obtained");
}
// Get the next page info while we still have the page
// latched.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
}
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// get the next portion page and find the correct slot
curPage = (StoredPage) owner.getPage(nextPage);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isOverflowPage(), "following row chain gets a non-overflow page");
}
slot = curPage.findRecordById(id, FIRST_SLOT_NUMBER);
}
// row post commit.
if (rowHasReservedSpace) {
RawTransaction rxact = (RawTransaction) owner.getTransaction();
ReclaimSpace work = new ReclaimSpace(ReclaimSpace.ROW_RESERVE, headRowHandle, rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
}
}
Aggregations