use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class HeapController method load.
protected long load(TransactionManager xact_manager, Heap heap, boolean createConglom, RowLocationRetRowSource rowSource) throws StandardException {
long num_rows_loaded = 0;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(open_conglom == null, "load expects container handle to be closed on entry.");
}
// The individual rows that are inserted are not logged. To use a
// logged interface, use insert. RESOLVE: do we want to allow client
// to use the load interface even for logged insert?
int mode = (ContainerHandle.MODE_FORUPDATE | ContainerHandle.MODE_UNLOGGED);
// page allocation.
if (createConglom)
mode |= ContainerHandle.MODE_CREATE_UNLOGGED;
OpenConglomerate open_conglom = new OpenHeap();
if (open_conglom.init((ContainerHandle) null, heap, heap.format_ids, heap.collation_ids, xact_manager, xact_manager.getRawStoreXact(), false, mode, TransactionController.MODE_TABLE, xact_manager.getRawStoreXact().newLockingPolicy(LockingPolicy.MODE_CONTAINER, TransactionController.ISOLATION_SERIALIZABLE, true), (DynamicCompiledOpenConglomInfo) null) == null) {
throw StandardException.newException(SQLState.HEAP_CONTAINER_NOT_FOUND, heap.getId().getContainerId());
}
this.init(open_conglom);
// For bulk loading, we always use only brand new page because the row
// insertion itself is not logged. We cannot pollute pages with
// pre-existing data with unlogged rows because nobody is going to wipe
// out these rows if the transaction rolls back. We are counting on
// the allocation page rollback to obliterate these rows if the
// transaction fails, or, in the CREAT_UNLOGGED case, the whole
// container to be removed.
Page page = open_conglom.getContainer().addPage();
boolean callbackWithRowLocation = rowSource.needsRowLocation();
RecordHandle rh;
HeapRowLocation rowlocation;
if (callbackWithRowLocation || rowSource.needsRowLocationForDeferredCheckConstraints())
rowlocation = new HeapRowLocation();
else
rowlocation = null;
FormatableBitSet validColumns = rowSource.getValidColumns();
try {
// get the next row and its valid columns from the rowSource
DataValueDescriptor[] row;
while ((row = rowSource.getNextRowFromRowSource()) != null) {
num_rows_loaded++;
if (SanityManager.DEBUG) {
// Make sure valid columns are in the list. The RowUtil
// call is too expensive to make in a released system for
// every insert.
int invalidColumn = RowUtil.columnOutOfRange(row, validColumns, heap.format_ids.length);
if (invalidColumn >= 0) {
throw (StandardException.newException(SQLState.HEAP_TEMPLATE_MISMATCH, invalidColumn, heap.format_ids.length));
}
}
// Insert it onto this page as long as it can fit more rows.
if ((rh = page.insert(row, validColumns, Page.INSERT_DEFAULT, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD)) == null) {
// Insert faied, row did not fit. Get a new page.
page.unlatch();
page = null;
page = open_conglom.getContainer().addPage();
// RESOLVE (mikem) - no long rows yet so the following code
// will get an exception from the raw store for a row that
// does not fit on a page.
//
// Multi-thread considerations aside, the raw store will
// guarantee that any size row will fit on an empty page.
rh = page.insert(row, validColumns, Page.INSERT_OVERFLOW, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD);
}
// and go for the next row.
if (callbackWithRowLocation) {
rowlocation.setFrom(rh);
rowSource.rowLocation(rowlocation);
}
if (rowSource.needsRowLocationForDeferredCheckConstraints()) {
rowlocation.setFrom(rh);
rowSource.offendingRowLocation(rowlocation, heap.getContainerid());
}
}
page.unlatch();
page = null;
// it is unlogged.
if (!heap.isTemporary())
open_conglom.getContainer().flushContainer();
} finally {
// If an error happened here, don't bother flushing the
// container since the changes should be rolled back anyhow.
close();
}
return (num_rows_loaded);
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class HeapController method doInsert.
/**
* Insert a new row into the heap.
* <p>
* Overflow policy:
* The current heap access method implements an algorithm that optimizes
* for fetch efficiency vs. space efficiency. A row will not be over
* flowed unless it is bigger than a page. If it is bigger than a page
* then it's initial part will be placed on a page and then subsequent
* parts will be overflowed to other pages.
* <p>
*
* @return The record handle of the inserted row.
*
* @param row The row to insert.
*
* @exception StandardException Standard exception policy.
*/
private RecordHandle doInsert(DataValueDescriptor[] row) throws StandardException {
Page page = null;
byte insert_mode;
RecordHandle rh;
if (SanityManager.DEBUG) {
Heap heap = (Heap) open_conglom.getConglomerate();
// Make sure valid columns are in the list. The RowUtil
// call is too expensive to make in a released system for
// every insert.
int invalidColumn = RowUtil.columnOutOfRange(row, null, heap.format_ids.length);
if (invalidColumn >= 0) {
throw (StandardException.newException(SQLState.HEAP_TEMPLATE_MISMATCH, invalidColumn, heap.format_ids.length));
}
}
// Get the last page that was returned for insert or the last page
// that was allocated.
page = open_conglom.getContainer().getPageForInsert(0);
if (page != null) {
// if there are 0 rows on the page allow the insert to overflow.
insert_mode = (page.recordCount() == 0) ? Page.INSERT_OVERFLOW : Page.INSERT_DEFAULT;
// Check to see if there is enough space on the page
// for the row.
rh = page.insert(row, null, insert_mode, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD);
page.unlatch();
page = null;
// insert it and release exclusive access to the page.
if (rh != null) {
return rh;
}
}
// If the last inserted page is now full, or RawStore have
// forgotten what it was, or the row cannot fit on the last
// inserted page, try to have rawStore get a relatively unfilled
// page.
page = open_conglom.getContainer().getPageForInsert(ContainerHandle.GET_PAGE_UNFILLED);
if (page != null) {
// Do the insert all over again hoping that it will fit into
// this page, and if not, allocate a new page.
// if there are 0 rows on the page allow the insert to overflow.
insert_mode = (page.recordCount() == 0) ? Page.INSERT_OVERFLOW : Page.INSERT_DEFAULT;
rh = page.insert(row, null, insert_mode, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD);
page.unlatch();
page = null;
// insert it and release exclusive access to the page.
if (rh != null) {
return rh;
}
}
page = open_conglom.getContainer().addPage();
// At this point with long rows the raw store will guarantee
// that any size row will fit on an empty page.
rh = page.insert(row, null, Page.INSERT_OVERFLOW, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD);
page.unlatch();
page = null;
if (SanityManager.DEBUG) {
// a null will only be returned if this page is not empty
SanityManager.ASSERT(rh != null);
}
return rh;
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class HeapController method insertAndFetchLocation.
public void insertAndFetchLocation(DataValueDescriptor[] row, RowLocation templateRowLocation) throws StandardException {
if (open_conglom.isClosed()) {
if (open_conglom.getHold()) {
open_conglom.reopen();
} else {
throw (StandardException.newException(SQLState.HEAP_IS_CLOSED, open_conglom.getConglomerate().getId()));
}
}
RecordHandle rh = doInsert(row);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(templateRowLocation instanceof HeapRowLocation);
}
HeapRowLocation hrl = (HeapRowLocation) templateRowLocation;
hrl.setFrom(rh);
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class BTreeForwardScan method fetchRows.
/**
* Fetch the next N rows from the table.
* <p>
* Utility routine used by both fetchSet() and fetchNextGroup().
*
* @exception StandardException Standard exception policy.
*/
protected int fetchRows(BTreeRowPosition pos, DataValueDescriptor[][] row_array, RowLocation[] rowloc_array, BackingStoreHashtable hash_table, long max_rowcnt, int[] key_column_numbers) throws StandardException {
if (SanityManager.DEBUG) {
// RowLocations in the BTree itself are unstable and should
// not be put in long-lived structures like persistent hash tables.
SanityManager.ASSERT((hash_table == null) || !hash_table.includeRowLocations());
}
int ret_row_count = 0;
DataValueDescriptor[] fetch_row = null;
RecordHandle rh;
if (max_rowcnt == -1)
max_rowcnt = Long.MAX_VALUE;
if (this.scan_state == BTreeScan.SCAN_INPROGRESS) {
//
if (!reposition(pos, true)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("can not fail with 2nd param true.");
}
}
} else if (this.scan_state == SCAN_INIT) {
// 1st positioning of scan (delayed from openScan).
positionAtStartPosition(pos);
} else if (this.scan_state == SCAN_HOLD_INPROGRESS) {
reopen();
this.scan_state = SCAN_INPROGRESS;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(scan_position.current_positionKey != null);
}
// code path tested by holdCursor.sql: TEST 9
if (!reposition(pos, true)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("can not fail with 2nd param true.");
}
}
} else if (this.scan_state == SCAN_HOLD_INIT) {
reopen();
positionAtStartForForwardScan(scan_position);
} else {
if (SanityManager.DEBUG)
SanityManager.ASSERT(this.scan_state == SCAN_DONE);
return (0);
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(init_template != null, "init_template is null");
}
if (SanityManager.DEBUG) {
SanityManager.ASSERT(this.container != null, "BTreeScan.next() called on a closed scan.");
if (row_array != null)
SanityManager.ASSERT(row_array[0] != null, "first array slot in fetchNextGroup() must be non-null.");
// Btree's don't support RowLocations yet.
if (rowloc_array != null) {
throw StandardException.newException(SQLState.BTREE_UNIMPLEMENTED_FEATURE);
}
}
leaf_loop: while (pos.current_leaf != null) {
slot_loop: while ((pos.current_slot + 1) < pos.current_leaf.page.recordCount()) {
// unlock the previous row if doing read.
if (pos.current_rh != null) {
this.getLockingPolicy().unlockScanRecordAfterRead(pos, init_forUpdate);
// current_rh is used to track which row we need to unlock,
// at this point no row needs to be unlocked.
pos.current_rh = null;
}
// Allocate a new row to read the row into.
if (fetch_row == null) {
if (hash_table == null) {
// point at allocated row in array if one exists.
if (row_array[ret_row_count] == null) {
row_array[ret_row_count] = runtime_mem.get_row_for_export(getRawTran());
}
fetch_row = row_array[ret_row_count];
} else {
// get a brand new row.
fetch_row = runtime_mem.get_row_for_export(getRawTran());
}
}
// move scan current position forward.
pos.current_slot++;
this.stat_numrows_visited++;
rh = pos.current_leaf.page.fetchFromSlot((RecordHandle) null, pos.current_slot, fetch_row, init_fetchDesc, true);
pos.current_rh_qualified = true;
// See if this is the stop row.
if (init_stopKeyValue != null) {
// See if current row is the >= the stopKeyValue.
//
// ret > 0: key is greater than row on page.
// ret == 0: key is exactly the row on page if full key,
// or partial match if partial key.
// ret < 0: key is less than row on page.
//
int ret = ControlRow.compareIndexRowToKey(fetch_row, init_stopKeyValue, fetch_row.length, 0, this.getConglomerate().ascDescInfo);
if ((ret == 0) && (init_stopSearchOperator == ScanController.GE)) {
// if (partial) matched and stop is GE, end the scan.
ret = 1;
}
if (ret > 0) {
// This is the first non-qualifying row. We're done.
pos.current_leaf.release();
pos.current_leaf = null;
positionAtDoneScan(pos);
return (ret_row_count);
}
}
// Only lock rows that are < the stopKeyValue. No need to
// requalify against stop position after losing the latch
// as the only change that could have happened is that the
// row was marked deleted - the key value cannot change.
boolean latch_released = !this.getLockingPolicy().lockScanRow(this, pos, init_lock_fetch_desc, pos.current_lock_template, pos.current_lock_row_loc, false, init_forUpdate, lock_operation);
// special test to see if latch release code works
if (SanityManager.DEBUG) {
latch_released = test_errors(this, "BTreeScan_fetchNextGroup", pos, this.getLockingPolicy(), pos.current_leaf, latch_released);
}
// At this point we have successfully locked this record, so
// remember the record handle so that it can be unlocked if
// necessary. If the above lock deadlocks, we will not try
// to unlock a lock we never got in close(), because current_rh
// is null until after the lock is granted.
pos.current_rh = rh;
while (latch_released) {
if (!reposition(pos, false)) {
// the purged row instead.
if (!reposition(pos, true)) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("Cannot fail with 2nd param true");
}
// reposition will set pos.current_leaf to null if
// it returns false, so if this ever does fail in
// delivered code, expect a NullPointerException at
// the top of this loop when we call recordCount().
}
// the purged row.
continue slot_loop;
}
// At this point, the scan is positioned and the latch
// is held.
latch_released = false;
if (this.getConglomerate().isUnique()) {
// Handle row location changing since lock request was
// initiated.
// In unique indexes, there is one case where an index
// row can have it's data lock key change (this usually
// cannot happen because only inserts and deletes are
// allowed - no updates). This case is an insert of a
// key, that exactly matches a committed deleted row,
// in a unique index. In that case the code updates
// the RowLocation column and flips the deleted bit to
// mark the row valid. The problem is that if this
// happens while we are waiting on a lock on the old
// RowLocation then when we wake up we have the wrong
// lock, and the row location we fetched earlier in
// this loop is invalid.
pos.current_leaf.page.fetchFromSlot((RecordHandle) null, pos.current_slot, fetch_row, init_fetchDesc, true);
latch_released = !this.getLockingPolicy().lockScanRow(this, pos, init_lock_fetch_desc, pos.current_lock_template, pos.current_lock_row_loc, false, init_forUpdate, lock_operation);
}
}
if (pos.current_leaf.page.isDeletedAtSlot(pos.current_slot)) {
this.stat_numdeleted_rows_visited++;
pos.current_rh_qualified = false;
} else if (init_qualifier != null) {
// Apply qualifiers if there are any.
pos.current_rh_qualified = this.process_qualifier(fetch_row);
}
if (pos.current_rh_qualified) {
// search (while latch is held).
if (SanityManager.DEBUG) {
SanityManager.ASSERT(pos.current_leaf.page.getSlotNumber(pos.current_rh) == pos.current_slot);
}
// Found qualifying row. Are we done fetching rows for the
// group?
ret_row_count++;
stat_numrows_qualified++;
final boolean doneWithGroup = max_rowcnt <= ret_row_count;
if (doneWithGroup) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(pos == scan_position);
}
int[] vcols = init_fetchDesc.getValidColumnsArray();
savePositionAndReleasePage(fetch_row, vcols);
}
if (hash_table != null) {
if (hash_table.putRow(false, fetch_row, null))
fetch_row = null;
} else {
fetch_row = null;
}
if (doneWithGroup) {
return (ret_row_count);
}
}
}
// Move position of the scan to slot 0 of the next page. If there
// is no next page current_page will be null.
positionAtNextPage(pos);
this.stat_numpages_visited++;
}
// Reached last leaf of tree.
positionAtDoneScan(pos);
// we need to decrement when we stop scan at the end of the table.
this.stat_numpages_visited--;
return (ret_row_count);
}
use of org.apache.derby.iapi.store.raw.RecordHandle in project derby by apache.
the class B2IUndo method findUndo.
/**
* Find the page and record to undo. If no logical undo is necessary,
* i.e., row has not moved, then just return the latched page where undo
* should go. If the record has moved, it has a new recordId on the new
* page, this routine needs to call pageOp.resetRecord with the new
* RecordHandle so that the logging system can update the compensation
* Operation with the new location.
*
* @param rawtran the transaction doing the rollback
* @param pageOp the page operation that supports logical undo. This
* LogicalUndo function pointer is a field of that
* pageOperation
* @param in data stored in the log stream that contains the record
* data necessary to restore the row.
*
* @exception StandardException Standard Derby error policy
* @exception IOException Method may read from InputStream
*/
public Page findUndo(Transaction rawtran, LogicalUndoable pageOp, LimitObjectInput in) throws StandardException, IOException {
ControlRow root = null;
ControlRow control_row = null;
DataValueDescriptor[] logged_index_row_template = null;
DataValueDescriptor[] template = null;
Page ret_page = null;
ContainerHandle container = pageOp.getContainer();
RecordHandle rechandle = pageOp.getRecordHandle();
boolean ok_exit = false;
int compare_result = 1;
B2I btree = null;
try {
// Need Conglomerate to create templates - get from the root page.
root = ControlRow.get(container, BTree.ROOTPAGEID);
if (SanityManager.DEBUG)
SanityManager.ASSERT(root.getPage().isLatched());
btree = (B2I) root.getConglom(B2I.FORMAT_NUMBER);
if (SanityManager.DEBUG)
SanityManager.ASSERT(btree instanceof B2I);
// create a template for the logged index row from the conglomerate.
logged_index_row_template = btree.createTemplate(rawtran);
// create a template for the page index row from the conglomerate.
template = btree.createTemplate(rawtran);
} finally {
if (root != null)
root.release();
}
// Get logged row from record.
pageOp.restoreLoggedRow(logged_index_row_template, in);
// RESOLVE (mikem) - currently restoreLoggedRow() may latch and unlatch
// a page in the container (see ST059).
// Now get the page where the record used to be.
ok_exit = false;
try {
// "open" the btree, using recovery's already opened container
OpenBTree open_btree = new OpenBTree();
open_btree.init(// current user xact - not needed
(TransactionManager) null, // current xact - not needed
(TransactionManager) null, // recovery already opened container
pageOp.getContainer(), rawtran, false, ContainerHandle.MODE_FORUPDATE, // already opened.
TransactionManager.MODE_NONE, // don't get locks during undo
(BTreeLockingPolicy) null, btree, // no logical undo necessary, as
(LogicalUndo) null, // this code only does read.
(DynamicCompiledOpenConglomInfo) null);
// System.out.println(
// "calling logical undo, recordhandle = " + rechandle);
// System.out.println("calling logical undo, record= " +
// logged_index_row_template);
// Get the page where the record was originally, before splits
// could have possibly moved it.
control_row = ControlRow.get(open_btree, rechandle.getPageNumber());
// init compare_result, if record doesn't exist do the search
compare_result = 1;
if (control_row.getPage().recordExists(rechandle, true)) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(control_row.getPage().fetchNumFields(rechandle) == logged_index_row_template.length);
}
// create template for the page index row from the conglomerate.
RecordHandle ret_rechandle = control_row.getPage().fetchFromSlot((RecordHandle) null, control_row.getPage().getSlotNumber(rechandle), template, (FetchDescriptor) null, true);
// compare the 2 rows, and if they are the same then the raw
// store has the right page and record and there is no work to
// be done (this is usual case).
compare_result = ControlRow.compareIndexRowToKey(template, logged_index_row_template, logged_index_row_template.length, 1, open_btree.getColumnSortOrderInfo());
}
if (compare_result == 0) {
ret_page = control_row.getPage();
} else {
// if the 2 don't compare equal, search the btree from the root
// for the logged row, find the leaf, reset the row for the raw
// store, and return the new page latched.
// Create the objects needed for the insert.
SearchParameters sp = new SearchParameters(logged_index_row_template, ScanController.GE, template, open_btree, false);
control_row.release();
control_row = null;
control_row = ControlRow.get(open_btree, BTree.ROOTPAGEID).search(sp);
if (!sp.resultExact) {
if (SanityManager.DEBUG) {
SanityManager.THROWASSERT("B2IUndo - could not find key being searched for:" + ";key = " + RowUtil.toString(logged_index_row_template) + ";sp = " + sp + "control_row = " + control_row + "control_row.debugPage() = " + control_row.debugPage(open_btree) + "control_row.getPage() = " + control_row.getPage());
}
throw StandardException.newException(SQLState.BTREE_ROW_NOT_FOUND_DURING_UNDO);
} else {
RecordHandle rh = control_row.getPage().fetchFromSlot((RecordHandle) null, sp.resultSlot, new DataValueDescriptor[0], (FetchDescriptor) null, true);
pageOp.resetRecordHandle(rh);
ret_page = control_row.getPage();
}
}
ok_exit = true;
} finally {
// pageOp.getRecordHandle());
if ((!ok_exit) && (control_row != null))
control_row.release();
}
return (ret_page);
}
Aggregations