use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BasePage method deleteAtSlot.
/**
* @see Page#deleteAtSlot
*
* @param slot the slot number
* @param delete true if this record is to be deleted, false if this
* deleted record is to be marked undeleted
* @param undo logical undo logic if necessary
*
* @exception StandardException Standard exception policy.
* @exception StandardException StandardException.newException(SQLState.UPDATE_DELETED_RECORD
* if an attempt to delete a record that is already deleted
* @exception StandardException StandardException.newException(SQLState.UNDELETE_RECORD
* if an attempt to undelete a record that is not deleted
*/
public RecordHandle deleteAtSlot(int slot, boolean delete, LogicalUndo undo) throws StandardException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(isLatched());
}
if (!owner.updateOK()) {
throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
}
if (delete) {
if (isDeletedAtSlot(slot)) {
throw StandardException.newException(SQLState.DATA_UPDATE_DELETED_RECORD);
}
} else // undelete a deleted record
{
if (!isDeletedAtSlot(slot)) {
throw StandardException.newException(SQLState.DATA_UNDELETE_RECORD);
}
}
RawTransaction t = owner.getTransaction();
// logical operations not allowed in internal transactions.
if (undo != null) {
t.checkLogicalOperationOk();
}
RecordHandle handle = getRecordHandleAtSlot(slot);
owner.getActionSet().actionDelete(t, this, slot, handle.getId(), delete, undo);
return handle;
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BasePage method copyAndPurge.
/**
* @see Page#copyAndPurge
* @exception StandardException Standard exception policy.
*/
public void copyAndPurge(Page destPage, int src_slot, int num_rows, int dest_slot) throws StandardException {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(isLatched());
}
if (num_rows <= 0) {
throw StandardException.newException(SQLState.DATA_NO_ROW_COPIED);
}
if (!owner.updateOK()) {
throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
}
if ((src_slot < 0) || ((src_slot + num_rows) > recordCount)) {
throw StandardException.newException(SQLState.DATA_SLOT_NOT_ON_PAGE);
}
if (SanityManager.DEBUG) {
// first copy into the destination page, let it do the work
// if no problem, then purge from this page
SanityManager.ASSERT((destPage instanceof BasePage), "must copy from BasePage to BasePage");
}
BasePage dpage = (BasePage) destPage;
// make sure they are from the same container - this means they are of
// the same size and have the same page and record format.
// RESOLVE: MT problem ?
PageKey pageId = getPageId();
if (!pageId.getContainerId().equals(dpage.getPageId().getContainerId())) {
throw StandardException.newException(SQLState.DATA_DIFFERENT_CONTAINER, pageId.getContainerId(), dpage.getPageId().getContainerId());
}
int[] recordIds = new int[num_rows];
RawTransaction t = owner.getTransaction();
// lock the records to be purged and calculate total space needed
for (int i = 0; i < num_rows; i++) {
RecordHandle handle = getRecordHandleAtSlot(src_slot + i);
owner.getLockingPolicy().lockRecordForWrite(t, handle, false, true);
recordIds[i] = getHeaderAtSlot(src_slot + i).getId();
}
// first copy num_rows into destination page
dpage.copyInto(this, src_slot, num_rows, dest_slot);
// Now purge num_rows from this page
// Do NOT purge overflow rows, if it has such a thing. This operation
// is called by split and if the key has overflow, spliting the head
// page does not copy over the remaining pieces, i.e.,the new head page
// still points to those pieces.
owner.getActionSet().actionPurge(t, this, src_slot, num_rows, recordIds, true);
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BasePage method insertAllowOverflow.
/**
* Insert a row allowing overflow.
*
* If handle is supplied then the record at that hanlde will be updated
* to indicate it is a partial row and it has an overflow portion.
*
* @exception StandardException Standard Derby error policy
*/
public RecordHandle insertAllowOverflow(int slot, Object[] row, FormatableBitSet validColumns, int startColumn, byte insertFlag, int overflowThreshold, RecordHandle nextPortionHandle) throws StandardException {
BasePage curPage = this;
if (!curPage.owner.updateOK()) {
throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
}
// Handle of the first portion of the chain
RecordHandle headHandle = null;
RecordHandle handleToUpdate = null;
RawTransaction t = curPage.owner.getTransaction();
for (; ; ) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isLatched());
}
if (!curPage.allowInsert())
return null;
// 'this' is the head page
if (curPage != this)
slot = curPage.recordCount;
boolean isLongColumns = false;
int realStartColumn = -1;
int realSpaceOnPage = -1;
DynamicByteArrayOutputStream logBuffer = null;
// allocate new record id and handle
int recordId = curPage.newRecordIdAndBump();
RecordHandle handle = new RecordId(curPage.getPageId(), recordId, slot);
if (curPage == this) {
// Lock the row, if it is the very first portion of the record.
if (handleToUpdate == null) {
while (!owner.getLockingPolicy().lockRecordForWrite(t, handle, true, /* lock is for insert */
false)) {
// loop until we get a new record id we can get a lock
// on. If we can't get the lock without waiting then
// assume the record id is owned by another xact. The
// current heap overflow algorithm makes this likely,
// as it first try's to insert a row telling raw store
// to fail if it doesn't fit on the page getting a lock
// on an id that never makes it to disk. The
// inserting transaction will hold a lock on this
// "unused" record id until it commits. The page can
// leave the cache at this point, and the inserting
// transaction has not dirtied the page (it failed
// after getting the lock but before logging anything),
// another inserting transaction will then get the
// same id as the previous inserter - thus the loop on
// lock waits.
//
// The lock we request indicates that this is a lock
// for insert, which the locking policy may use to
// perform locking concurrency optimizations.
// allocate new record id and handle
recordId = curPage.newRecordIdAndBump();
handle = new RecordId(curPage.getPageId(), recordId, slot);
}
}
headHandle = handle;
}
do {
// then, we redo the insert with saved logBuffer.
try {
startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, validColumns, (LogicalUndo) null, insertFlag, startColumn, false, realStartColumn, logBuffer, realSpaceOnPage, overflowThreshold);
isLongColumns = false;
} catch (LongColumnException lce) {
// we caught a long column exception
// three things should happen here:
// 1. insert the long column into overflow pages.
// 2. append the overflow field header in the main chain.
// 3. continue the insert in the main data chain.
logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());
// step 1: insert the long column ... use the same
// insertFlag as the rest of the row.
RecordHandle longColumnHandle = insertLongColumn(curPage, lce, insertFlag);
// step 2: append the overflow field header to the log buffer
int overflowFieldLen = 0;
try {
overflowFieldLen += appendOverflowFieldHeader((DynamicByteArrayOutputStream) logBuffer, longColumnHandle);
} catch (IOException ioe) {
// YYZ: revisit... ioexception, insert failed...
return null;
}
// step 3: continue the insert in the main data chain
// need to pass the log buffer, and start column to the next insert.
realStartColumn = lce.getNextColumn() + 1;
realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
isLongColumns = true;
}
} while (isLongColumns);
if (handleToUpdate != null) {
// update the recordheader on the previous page
updateOverflowDetails(handleToUpdate, handle);
}
// all done
if (startColumn == -1) {
if (curPage != this)
curPage.unlatch();
if (nextPortionHandle != null) {
// need to update the overflow details of the last portion
// to point to the existing portion
updateOverflowDetails(handle, nextPortionHandle);
}
return headHandle;
}
handleToUpdate = handle;
BasePage nextPage = curPage.getOverflowPageForInsert(slot, row, validColumns, startColumn);
if (curPage != this)
curPage.unlatch();
curPage = nextPage;
}
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BasePage method insertLongColumn.
/**
* Routine to insert a long column.
* <p>
* This code inserts a long column as a linked list of rows on overflow
* pages. This list is pointed to by a small pointer in the main page
* row column. The operation does the following:
* allocate new overflow page
* insert single row filling overflow page
* while (more of column exists)
* allocate new overflow page
* insert single row with next piece of row
* update previous piece to point to this new piece of row
*
* Same code is called both from an initial insert of a long column and
* from a subsequent update that results in a long column.
*
* @return The recordHandle of the first piece of the long column chain.
*
* @param mainChainPage The parent page with row piece containing column
* that will eventually point to this long column
* chain.
* @param lce The LongColumnException thrown when we recognized
* that the column being inserted was "long", this
* structure is used to cache the info that we have
* read so far about column. In the case of an insert
* of the stream it will have a copy of just the first
* page of the stream that has already been read once.
* @param insertFlag flags for insert operation.
*
* @exception StandardException Standard exception policy.
*/
protected RecordHandle insertLongColumn(BasePage mainChainPage, LongColumnException lce, byte insertFlag) throws StandardException {
Object[] row = new Object[1];
row[0] = lce.getColumn();
RecordHandle firstHandle = null;
RecordHandle handle = null;
RecordHandle prevHandle = null;
BasePage curPage = mainChainPage;
BasePage prevPage = null;
boolean isFirstPage = true;
// undo inserts as purges of all pieces of the overflow column
// except for the 1st overflow page pointed at by the main row.
//
// Consider a row with one column which is a long column
// that takes 2 pages for itself plus an entry in main parent page.
// the log records in order for this look something like:
// insert overflow page 1
// insert overflow page 2
// update overflow page 1 record to have pointer to overflow page 2
// insert main row (which has pointer to overflow page 1)
//
// If this insert gets aborted then something like the following
// happens:
// main row is marked deleted (but ptr to overflow 1 still exists)
// update is aborted so link on page 2 to page 1 is lost
// overflow row on page 2 is marked deleted
// overflow row on page 1 is marked deleted
//
// There is no way to reclaim page 2 later as the abort of the update
// has now lost the link from overflow page 1 to overflow 2, so
// the system has to do it as part of the abort of the insert. But,
// it can't for page 1 as the main page will attempt to follow
// it's link in the deleted row during it's space reclamation and it
// can't tell the difference
// between a row that has been marked deleted as part of an aborted
// insert or as part of a committed delete. When it follows the link
// it could find no page and that could be coded against, but it could
// be that the page is now used by some other overflow row which would
// lead to lots of different kinds of problems.
//
// So the code leaves the 1st overflow page to be cleaned up with the
// main page row is purged, but goes ahead and immediately purges all
// the segments that will be lost as part of the links being lost due
// to aborted updates.
byte after_first_page_insertFlag = (byte) (insertFlag | Page.INSERT_UNDO_WITH_PURGE);
// when inserting a long column startColumn is just used
// as a flag. -1 means the insert is complete, != -1 indicates
// more inserts are required.
int startColumn = 0;
RawTransaction t = curPage.owner.getTransaction();
do {
if (!isFirstPage) {
prevPage = curPage;
prevHandle = handle;
}
// step 1. get a new overflow page
curPage = (BasePage) getNewOverflowPage();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isLatched());
SanityManager.ASSERT(curPage.allowInsert());
}
int slot = curPage.recordCount;
int recordId = curPage.newRecordId();
handle = new RecordId(curPage.getPageId(), recordId, slot);
if (isFirstPage)
firstHandle = handle;
// step 2: insert column portion
startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, (FormatableBitSet) null, (LogicalUndo) null, (isFirstPage ? insertFlag : after_first_page_insertFlag), startColumn, true, -1, (DynamicByteArrayOutputStream) null, -1, 100);
// then release latch on prevPage
if (!isFirstPage) {
// for the previous page, add an overflow field header,
// and update the record header to show 2 fields
prevPage.updateFieldOverflowDetails(prevHandle, handle);
prevPage.unlatch();
prevPage = null;
} else {
isFirstPage = false;
}
} while (startColumn != (-1));
if (curPage != null) {
curPage.unlatch();
curPage = null;
}
return (firstHandle);
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class HeapConglomerateFactory method readConglomerate.
/**
* Return Conglomerate object for conglomerate with container_key.
* <p>
* Return the Conglomerate Object. This is implementation specific.
* Examples of what will be done is using the key to find the file where
* the conglomerate is located, and then executing implementation specific
* code to instantiate an object from reading a "special" row from a
* known location in the file. In the btree case the btree conglomerate
* is stored as a column in the control row on the root page.
* <p>
* This operation is costly so it is likely an implementation using this
* will cache the conglomerate row in memory so that subsequent accesses
* need not perform this operation.
*
* @param xact_mgr transaction to perform the create in.
* @param container_key The unique id of the existing conglomerate.
*
* @return An instance of the conglomerate.
*
* @exception StandardException Standard exception policy.
*/
public Conglomerate readConglomerate(TransactionManager xact_mgr, ContainerKey container_key) throws StandardException {
ContainerHandle container = null;
Page page = null;
DataValueDescriptor[] control_row = new DataValueDescriptor[1];
try {
// open container to read the Heap object out of it's control row.
container = xact_mgr.getRawStoreXact().openContainer(container_key, (LockingPolicy) null, 0);
if (container == null) {
throw StandardException.newException(SQLState.STORE_CONGLOMERATE_DOES_NOT_EXIST, container_key.getContainerId());
}
// row in slot 0 of heap page 1 which is just a single column with
// the heap entry.
control_row[0] = new Heap();
page = container.getPage(ContainerHandle.FIRST_PAGE_NUMBER);
RecordHandle rh = page.fetchFromSlot((RecordHandle) null, 0, control_row, (FetchDescriptor) null, true);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(rh != null);
// for now the control row is always the first id assigned on
// page 1.
SanityManager.ASSERT(rh.getId() == 6);
}
} finally {
if (page != null)
page.unlatch();
if (container != null)
container.close();
}
return ((Conglomerate) control_row[0]);
}
Aggregations