use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class BasePage method insertLongColumn.
/**
* Routine to insert a long column.
* <p>
* This code inserts a long column as a linked list of rows on overflow
* pages. This list is pointed to by a small pointer in the main page
* row column. The operation does the following:
* allocate new overflow page
* insert single row filling overflow page
* while (more of column exists)
* allocate new overflow page
* insert single row with next piece of row
* update previous piece to point to this new piece of row
*
* Same code is called both from an initial insert of a long column and
* from a subsequent update that results in a long column.
*
* @return The recordHandle of the first piece of the long column chain.
*
* @param mainChainPage The parent page with row piece containing column
* that will eventually point to this long column
* chain.
* @param lce The LongColumnException thrown when we recognized
* that the column being inserted was "long", this
* structure is used to cache the info that we have
* read so far about column. In the case of an insert
* of the stream it will have a copy of just the first
* page of the stream that has already been read once.
* @param insertFlag flags for insert operation.
*
* @exception StandardException Standard exception policy.
*/
protected RecordHandle insertLongColumn(BasePage mainChainPage, LongColumnException lce, byte insertFlag) throws StandardException {
Object[] row = new Object[1];
row[0] = lce.getColumn();
RecordHandle firstHandle = null;
RecordHandle handle = null;
RecordHandle prevHandle = null;
BasePage curPage = mainChainPage;
BasePage prevPage = null;
boolean isFirstPage = true;
// undo inserts as purges of all pieces of the overflow column
// except for the 1st overflow page pointed at by the main row.
//
// Consider a row with one column which is a long column
// that takes 2 pages for itself plus an entry in main parent page.
// the log records in order for this look something like:
// insert overflow page 1
// insert overflow page 2
// update overflow page 1 record to have pointer to overflow page 2
// insert main row (which has pointer to overflow page 1)
//
// If this insert gets aborted then something like the following
// happens:
// main row is marked deleted (but ptr to overflow 1 still exists)
// update is aborted so link on page 2 to page 1 is lost
// overflow row on page 2 is marked deleted
// overflow row on page 1 is marked deleted
//
// There is no way to reclaim page 2 later as the abort of the update
// has now lost the link from overflow page 1 to overflow 2, so
// the system has to do it as part of the abort of the insert. But,
// it can't for page 1 as the main page will attempt to follow
// it's link in the deleted row during it's space reclamation and it
// can't tell the difference
// between a row that has been marked deleted as part of an aborted
// insert or as part of a committed delete. When it follows the link
// it could find no page and that could be coded against, but it could
// be that the page is now used by some other overflow row which would
// lead to lots of different kinds of problems.
//
// So the code leaves the 1st overflow page to be cleaned up with the
// main page row is purged, but goes ahead and immediately purges all
// the segments that will be lost as part of the links being lost due
// to aborted updates.
byte after_first_page_insertFlag = (byte) (insertFlag | Page.INSERT_UNDO_WITH_PURGE);
// when inserting a long column startColumn is just used
// as a flag. -1 means the insert is complete, != -1 indicates
// more inserts are required.
int startColumn = 0;
RawTransaction t = curPage.owner.getTransaction();
do {
if (!isFirstPage) {
prevPage = curPage;
prevHandle = handle;
}
// step 1. get a new overflow page
curPage = (BasePage) getNewOverflowPage();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(curPage.isLatched());
SanityManager.ASSERT(curPage.allowInsert());
}
int slot = curPage.recordCount;
int recordId = curPage.newRecordId();
handle = new RecordId(curPage.getPageId(), recordId, slot);
if (isFirstPage)
firstHandle = handle;
// step 2: insert column portion
startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, (FormatableBitSet) null, (LogicalUndo) null, (isFirstPage ? insertFlag : after_first_page_insertFlag), startColumn, true, -1, (DynamicByteArrayOutputStream) null, -1, 100);
// then release latch on prevPage
if (!isFirstPage) {
// for the previous page, add an overflow field header,
// and update the record header to show 2 fields
prevPage.updateFieldOverflowDetails(prevHandle, handle);
prevPage.unlatch();
prevPage = null;
} else {
isFirstPage = false;
}
} while (startColumn != (-1));
if (curPage != null) {
curPage.unlatch();
curPage = null;
}
return (firstHandle);
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class HeapController method load.
protected long load(TransactionManager xact_manager, Heap heap, boolean createConglom, RowLocationRetRowSource rowSource) throws StandardException {
long num_rows_loaded = 0;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(open_conglom == null, "load expects container handle to be closed on entry.");
}
// The individual rows that are inserted are not logged. To use a
// logged interface, use insert. RESOLVE: do we want to allow client
// to use the load interface even for logged insert?
int mode = (ContainerHandle.MODE_FORUPDATE | ContainerHandle.MODE_UNLOGGED);
// page allocation.
if (createConglom)
mode |= ContainerHandle.MODE_CREATE_UNLOGGED;
OpenConglomerate open_conglom = new OpenHeap();
if (open_conglom.init((ContainerHandle) null, heap, heap.format_ids, heap.collation_ids, xact_manager, xact_manager.getRawStoreXact(), false, mode, TransactionController.MODE_TABLE, xact_manager.getRawStoreXact().newLockingPolicy(LockingPolicy.MODE_CONTAINER, TransactionController.ISOLATION_SERIALIZABLE, true), (DynamicCompiledOpenConglomInfo) null) == null) {
throw StandardException.newException(SQLState.HEAP_CONTAINER_NOT_FOUND, heap.getId().getContainerId());
}
this.init(open_conglom);
// For bulk loading, we always use only brand new page because the row
// insertion itself is not logged. We cannot pollute pages with
// pre-existing data with unlogged rows because nobody is going to wipe
// out these rows if the transaction rolls back. We are counting on
// the allocation page rollback to obliterate these rows if the
// transaction fails, or, in the CREAT_UNLOGGED case, the whole
// container to be removed.
Page page = open_conglom.getContainer().addPage();
boolean callbackWithRowLocation = rowSource.needsRowLocation();
RecordHandle rh;
HeapRowLocation rowlocation;
if (callbackWithRowLocation || rowSource.needsRowLocationForDeferredCheckConstraints())
rowlocation = new HeapRowLocation();
else
rowlocation = null;
FormatableBitSet validColumns = rowSource.getValidColumns();
try {
// get the next row and its valid columns from the rowSource
DataValueDescriptor[] row;
while ((row = rowSource.getNextRowFromRowSource()) != null) {
num_rows_loaded++;
if (SanityManager.DEBUG) {
// Make sure valid columns are in the list. The RowUtil
// call is too expensive to make in a released system for
// every insert.
int invalidColumn = RowUtil.columnOutOfRange(row, validColumns, heap.format_ids.length);
if (invalidColumn >= 0) {
throw (StandardException.newException(SQLState.HEAP_TEMPLATE_MISMATCH, invalidColumn, heap.format_ids.length));
}
}
// Insert it onto this page as long as it can fit more rows.
if ((rh = page.insert(row, validColumns, Page.INSERT_DEFAULT, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD)) == null) {
// Insert faied, row did not fit. Get a new page.
page.unlatch();
page = null;
page = open_conglom.getContainer().addPage();
// RESOLVE (mikem) - no long rows yet so the following code
// will get an exception from the raw store for a row that
// does not fit on a page.
//
// Multi-thread considerations aside, the raw store will
// guarantee that any size row will fit on an empty page.
rh = page.insert(row, validColumns, Page.INSERT_OVERFLOW, AccessFactoryGlobals.HEAP_OVERFLOW_THRESHOLD);
}
// and go for the next row.
if (callbackWithRowLocation) {
rowlocation.setFrom(rh);
rowSource.rowLocation(rowlocation);
}
if (rowSource.needsRowLocationForDeferredCheckConstraints()) {
rowlocation.setFrom(rh);
rowSource.offendingRowLocation(rowlocation, heap.getContainerid());
}
}
page.unlatch();
page = null;
// it is unlogged.
if (!heap.isTemporary())
open_conglom.getContainer().flushContainer();
} finally {
// If an error happened here, don't bother flushing the
// container since the changes should be rolled back anyhow.
close();
}
return (num_rows_loaded);
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class BTreeScan method initScanParams.
/**
* Shared initialization code between init() and reopenScan().
* <p>
* Basically save away input parameters describing qualifications for
* the scan, and do some error checking.
*
* @exception StandardException Standard exception policy.
*/
private void initScanParams(DataValueDescriptor[] startKeyValue, int startSearchOperator, Qualifier[][] qualifier, DataValueDescriptor[] stopKeyValue, int stopSearchOperator) throws StandardException {
// startKeyValue init.
this.init_startKeyValue = startKeyValue;
if (RowUtil.isRowEmpty(this.init_startKeyValue))
this.init_startKeyValue = null;
// startSearchOperator init.
this.init_startSearchOperator = startSearchOperator;
// qualifier init.
if ((qualifier != null) && (qualifier.length == 0))
qualifier = null;
this.init_qualifier = qualifier;
// stopKeyValue init.
this.init_stopKeyValue = stopKeyValue;
if (RowUtil.isRowEmpty(this.init_stopKeyValue))
this.init_stopKeyValue = null;
// stopSearchOperator init.
this.init_stopSearchOperator = stopSearchOperator;
// reset the "current" position to starting condition.
// RESOLVE (mmm) - "compile" this.
scan_position = new BTreeRowPosition(this);
scan_position.init();
scan_position.current_lock_template = new DataValueDescriptor[this.init_template.length];
scan_position.current_lock_template[this.init_template.length - 1] = scan_position.current_lock_row_loc = (RowLocation) init_template[init_template.length - 1].cloneValue(false);
// scanColumnList.
if (SanityManager.DEBUG) {
if (init_scanColumnList != null) {
// verify that all columns specified in qualifiers, start
// and stop positions are specified in the scanColumnList.
FormatableBitSet required_cols;
if (qualifier != null)
required_cols = RowUtil.getQualifierBitSet(qualifier);
else
required_cols = new FormatableBitSet(0);
// add in start columns
if (this.init_startKeyValue != null) {
required_cols.grow(this.init_startKeyValue.length);
for (int i = 0; i < this.init_startKeyValue.length; i++) required_cols.set(i);
}
if (this.init_stopKeyValue != null) {
required_cols.grow(this.init_stopKeyValue.length);
for (int i = 0; i < this.init_stopKeyValue.length; i++) required_cols.set(i);
}
FormatableBitSet required_cols_and_scan_list = (FormatableBitSet) required_cols.clone();
required_cols_and_scan_list.and(init_scanColumnList);
// FormatableBitSet equals requires the two FormatableBitSets to be of same
// length.
required_cols.grow(init_scanColumnList.size());
if (!required_cols_and_scan_list.equals(required_cols)) {
SanityManager.THROWASSERT("Some column specified in a Btree " + " qualifier/start/stop list is " + "not represented in the scanColumnList." + "\n:required_cols_and_scan_list = " + required_cols_and_scan_list + "\n;required_cols = " + required_cols + "\n;init_scanColumnList = " + init_scanColumnList);
}
}
}
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class T_AccessFactory method holdCursor.
// test various flavors of commit
protected boolean holdCursor(TransactionController tc) throws StandardException, T_Fail {
REPORT("(holdCursor)");
// Create a conglomerates and an index on that conglomerate.
long base_id = createAConglom(tc, 0, false);
// Open it.
ConglomerateController cc = tc.openConglomerate(base_id, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// insert 5 rows
T_AccessRow r1 = null;
SQLLongint c1 = null;
for (int i = 1; i < 5; i++) {
// Create a row.
r1 = new T_AccessRow(1);
c1 = new SQLLongint(i);
r1.setCol(0, c1);
// Get a location template
RowLocation rowloc = cc.newRowLocationTemplate();
// Insert the row and remember its location.
cc.insertAndFetchLocation(r1.getRowArray(), rowloc);
}
// Create an index on the base table.
long index_id = createBtree(tc, base_id, false);
tc.commit();
cc.close();
tc.commit();
// HEAP - test that scan is closed on commit of non-held cursor.
// Open scan on the base table.
ScanController base_scan = tc.openScan(base_id, // don't hold
false, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// Open scan on the index table.
ScanController index_scan = tc.openScan(index_id, // don't hold
false, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
testOpsBeforeFirstNext(base_scan, r1.getRowArray());
testOpsBeforeFirstNext(index_scan, r1.getRowArray());
base_scan.next();
index_scan.next();
base_scan.next();
index_scan.next();
base_scan.next();
index_scan.next();
base_scan.next();
index_scan.next();
base_scan.next();
index_scan.next();
base_scan.next();
index_scan.next();
base_scan.next();
index_scan.next();
// should be able call get and set even after next'ing through the rows.
long row_count = base_scan.getEstimatedRowCount();
base_scan.setEstimatedRowCount(10);
row_count = base_scan.getEstimatedRowCount();
// should be able call get and set even after next'ing through the rows.
row_count = index_scan.getEstimatedRowCount();
index_scan.setEstimatedRowCount(10);
row_count = index_scan.getEstimatedRowCount();
if (row_count != 10)
throw T_Fail.testFailMsg("(holdCursor) some problem with get/set row count.");
tc.commit();
testOpsBeforeFirstNext(base_scan, r1.getRowArray());
testOpsBeforeFirstNext(index_scan, r1.getRowArray());
// see if commit closed the base_scan.
if (base_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should return false, commit should close base_scan.");
// see if commit closed the base_scan.
if (index_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should return false, commit should close base_scan.");
tc.commit();
// Open another scan on the conglomerate.
base_scan = tc.openScan(base_id, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// Open scan on the index table.
index_scan = tc.openScan(index_id, // don't hold
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
tc.commit();
testOpsBeforeFirstNext(base_scan, r1.getRowArray());
testOpsBeforeFirstNext(index_scan, r1.getRowArray());
// move cursor to be positioned on 0
if (!base_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should not fail, commit should close hold scan.");
// move cursor to be positioned on 0
if (!index_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should not fail, commit should close hold scan.");
// the 1st next should return the 1st row - ie. 0
base_scan.fetch(r1.getRowArray());
long key_value = ((SQLLongint) r1.getCol(0)).getLong();
if (key_value != 0)
throw T_Fail.testFailMsg("(holdCursor) 1st row is not 0.");
index_scan.fetch(r1.getRowArray());
key_value = ((SQLLongint) r1.getCol(0)).getLong();
if (key_value != 0)
throw T_Fail.testFailMsg("(holdCursor) 1st row is not 0.");
// move cursor to be positioned on 1
base_scan.next();
index_scan.next();
tc.commit();
testOpsBeforeFirstNext(base_scan, r1.getRowArray());
testOpsBeforeFirstNext(index_scan, r1.getRowArray());
// should be able call get and set even after next'ing through the rows.
row_count = base_scan.getEstimatedRowCount();
base_scan.setEstimatedRowCount(5);
row_count = base_scan.getEstimatedRowCount();
// should be able call get and set even after next'ing through the rows.
row_count = index_scan.getEstimatedRowCount();
index_scan.setEstimatedRowCount(5);
row_count = index_scan.getEstimatedRowCount();
if (row_count != 5)
throw T_Fail.testFailMsg("(holdCursor) some problem with get/set row count.");
// move cursor to be positioned on 2
if (!base_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should not fail, commit should close hold base_scan.");
if (!index_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should not fail, commit should close hold base_scan.");
// the 1st next should return the 1st row - ie. 0
base_scan.fetch(r1.getRowArray());
key_value = ((SQLLongint) r1.getCol(0)).getLong();
if (key_value != 2)
throw T_Fail.testFailMsg("(holdCursor) 1st row is not 0.");
index_scan.fetch(r1.getRowArray());
key_value = ((SQLLongint) r1.getCol(0)).getLong();
if (key_value != 2)
throw T_Fail.testFailMsg("(holdCursor) 1st row is not 0.");
// move cursor to be positioned on 3
base_scan.next();
base_scan.delete();
index_scan.next();
index_scan.delete();
// move cursor to be positioned on 4
base_scan.next();
index_scan.next();
// move cursor past the end, thus closing it.
base_scan.next();
index_scan.next();
tc.commit();
// should be able call get and set even after next'ing through the rows.
row_count = base_scan.getEstimatedRowCount();
base_scan.setEstimatedRowCount(15);
row_count = base_scan.getEstimatedRowCount();
if (row_count != 15)
throw T_Fail.testFailMsg("(holdCursor) some problem with get/set row count.");
row_count = index_scan.getEstimatedRowCount();
index_scan.setEstimatedRowCount(15);
row_count = index_scan.getEstimatedRowCount();
if (row_count != 15)
throw T_Fail.testFailMsg("(holdCursor) some problem with get/set row count.");
testOpsBeforeFirstNext(base_scan, r1.getRowArray());
testOpsBeforeFirstNext(index_scan, r1.getRowArray());
if (base_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should fail, the base_scan has been closed by progressing to end.");
if (index_scan.next())
throw T_Fail.testFailMsg("(holdCursor) next() should fail, the base_scan has been closed by progressing to end.");
tc.commit();
base_scan.close();
index_scan.close();
REPORT("(holdCursor) succeeded");
return true;
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class T_AccessFactory method deletetest.
// Insert a single row with a single column containing
// the first argument integer, delete it, make sure subsequent
// delete, replace, and replace a single column return false.
//
protected boolean deletetest(TransactionController tc, long conglomid, int value1, int value2) throws StandardException, T_Fail {
boolean ret_val;
// Open the conglomerate.
ConglomerateController cc = tc.openConglomerate(conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_AccessRow r1 = new T_AccessRow(1);
r1.setCol(0, new SQLInteger(value1));
// Get a location template
RowLocation rowloc = cc.newRowLocationTemplate();
// Insert the row and remember its location.
cc.insertAndFetchLocation(r1.getRowArray(), rowloc);
// delete it.
if (!cc.delete(rowloc)) {
throw T_Fail.testFailMsg("(deleteTest) delete of row failed");
}
// subsequent replace, update a single column, and delete
// should return false
// update single column
DataValueDescriptor[] update_row = new DataValueDescriptor[1];
FormatableBitSet update_desc = new FormatableBitSet(1);
update_desc.set(0);
if (cc.replace(rowloc, update_row, update_desc)) {
throw T_Fail.testFailMsg("(deleteTest) partial column row replace returned true on del row");
}
// update whole row.
if (cc.replace(rowloc, r1.getRowArray(), (FormatableBitSet) null)) {
throw T_Fail.testFailMsg("(deleteTest) update returned true on del row");
}
if (cc.delete(rowloc)) {
throw T_Fail.testFailMsg("(deleteTest) delete returned true on del row");
}
// Close the conglomerate.
cc.close();
return true;
}
Aggregations