use of org.apache.derby.iapi.store.access.ScanController in project derby by apache.
the class T_CreateConglomRet method t_013.
/**
* Test backout during critical times of splits.
* <p>
* Force logical undo of an operation which generated an internal update
* of a btree record:
* case 1:
* o insert into unique btree key1, rowlocation_1
* o delete from btree key1, rowlocation_1
* - this will mark the record logically deleted.
* o insert enough records to move the logically deleted row to another
* page to exercise logical undo of the delete.
* o insert into btree key1, rowlocation_2
* - this internally will generate a logical update field on the
* record.
* o insert enough records to move the logically deleted row to another
* page to exercise logical undo of the delete.
* o abort.
*
* case 2:
* o same as case 1 but don't change the rowlocation_1 value. This
* simulates what the language will generate on an update of a key
* field.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_013(TransactionController tc) throws StandardException, T_Fail {
ScanController scan = null;
// SanityManager.DEBUG_SET("LockTrace");
REPORT("Starting t_013");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, true, false, 5, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create an index row object for the "delete row"
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// Create another index row object for the other inserts.
DataValueDescriptor[] r2 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row2 = new T_SecondaryIndexRow();
RowLocation base_rowloc2 = base_cc.newRowLocationTemplate();
index_row2.init(r2, base_rowloc2, 3);
// Commit the create of the tables so that the following aborts don't
// undo that work.
tc.commit();
// CASE 1:
tc.commit();
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
((SQLLongint) r1[0]).setValue(1);
// insert row which will be deleted (key = 100, base_rowloc1):
((SQLLongint) r1[1]).setValue(100);
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// 5 rows are on a page, so 10 should be plenty.
for (int i = 0; i < 10; i++) {
((SQLLongint) r2[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r2, base_rowloc2);
// Insert the row into the secondary index.
if (index_cc.insert(index_row2.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// delete row which was inserted (key = 100, base_rowloc1):
if (!t_delete(tc, create_ret.index_conglomid, index_row1.getRow(), false)) {
throw T_Fail.testFailMsg("t_008: could not delete key.");
}
base_cc.delete(base_rowloc1);
// 5 rows are on a page, so 10 should be plenty.
for (int i = 10; i < 20; i++) {
((SQLLongint) r2[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r2, base_rowloc2);
// Insert the row into the secondary index.
if (index_cc.insert(index_row2.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// insert row which will be deleted (key = 100, base_rowloc1):
((SQLLongint) r1[1]).setValue(100);
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// 5 rows are on a page, so 10 should be plenty.
for (int i = 20; i < 30; i++) {
((SQLLongint) r2[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r2, base_rowloc2);
// Insert the row into the secondary index.
if (index_cc.insert(index_row2.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// RESOLVE (mikem) - check that the right row is at key 100.
tc.abort();
// index check - there should be no records left.
ScanController empty_scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (empty_scan.next())
throw T_Fail.testFailMsg("t_002: there are still rows in table.");
tc.commit();
REPORT("Ending t_013");
return true;
}
use of org.apache.derby.iapi.store.access.ScanController in project derby by apache.
the class T_CreateConglomRet method t_002.
/**
* Test backout during critical times of splits.
* <p>
* Use trace points to force errors in split at critical points:
* leaf_split_abort{1,2,3,4}
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_002(TransactionController tc) throws StandardException, T_Fail {
ScanController scan = null;
// SanityManager.DEBUG_SET("LockTrace");
REPORT("Starting t_002");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, false, false, 2, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
if (!(index_cc instanceof B2IController)) {
throw T_Fail.testFailMsg("openConglomerate returned wrong type");
}
index_cc.checkConsistency();
// Create a row and insert into base table, remembering it's location.
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// Commit the create of the tables so that the following aborts don't
// undo that work.
tc.commit();
// Now try aborts of transactions during splits, using magic
// trace flags. This test inserts enough rows to cause a split
// and then forces the split to fail at various key points. The
// split should be backed out and also the rows before the split.
// The test makes sure that there are some inserts before the forced
// split abort.
String[] debug_strings = { "leaf_split_growRoot1", "leaf_split_growRoot2", "leaf_split_growRoot3", "leaf_split_growRoot4", "leaf_split_growRoot5", "leaf_split_abort1", "leaf_split_abort2", "leaf_split_abort3", "leaf_split_abort4", "branch_split_abort1", "branch_split_abort2", "branch_split_abort3", "branch_split_abort4", "BTreeController_doIns2" };
for (int errs = 0; errs < debug_strings.length; errs++) {
REPORT("Doing abort test: " + debug_strings[errs]);
if (SanityManager.DEBUG)
SanityManager.DEBUG_SET(debug_strings[errs]);
try {
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// insert one row that does not cause failure.
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(10000 + errs);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// language to make sure error handling really works.
if (SanityManager.DEBUG)
SanityManager.DEBUG_SET(debug_strings[errs]);
// now insert enough rows to cause failure
for (int i = 100; i > 0; i -= 2) {
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0) {
throw T_Fail.testFailMsg("insert failed");
}
}
throw T_Fail.testFailMsg("debug flag (" + debug_strings[errs] + ")did not cause exception.");
} catch (StandardException e) {
ContextService contextFactory = getContextService();
// Get the context manager.
ContextManager cm = contextFactory.getCurrentContextManager();
if (SanityManager.DEBUG)
SanityManager.ASSERT(cm != null);
cm.cleanupOnError(e, isdbActive());
// RESOLVE (mikem) - when split abort works come up with
// a good sanity check here.
//
// index check - there should be no records:
scan = tc.openScan(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
index_cc.checkConsistency();
index_cc.close();
if (scan.next()) {
throw T_Fail.testFailMsg("t_002: there are still rows in table.");
}
scan.close();
}
// Unset the flag.
if (SanityManager.DEBUG)
SanityManager.DEBUG_CLEAR(debug_strings[errs]);
}
// Try a simple abort. The following adds enough rows to cause a
// split. The result of the split should be a tree with no rows, but
// the splits will not be undone. It is up to the implementation
// whether the undo's cause shrinks in the tree. In the initial
// implementation it won't.
{
tc.commit();
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Make sure that normal abort leaves the committed split.
for (int i = 0; i < 3; i++) {
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
tc.abort();
// index check - there should be no records left.
ScanController empty_scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (empty_scan.next()) {
throw T_Fail.testFailMsg("t_002: there are still rows in table.");
}
}
tc.commit();
REPORT("Ending t_002");
return true;
}
use of org.apache.derby.iapi.store.access.ScanController in project derby by apache.
the class T_CreateConglomRet method t_016.
/**
* Test deadlocks during critical times of row level locking.
* <p>
* Use trace points to force errors in split at critical points:
* leaf_split_abort{1,2,3,4}
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_016(TransactionController tc) throws StandardException, T_Fail {
ScanController scan = null;
// SanityManager.DEBUG_SET("LockTrace");
REPORT("Starting t_016");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, false, false, 2, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
if (!(index_cc instanceof B2IController)) {
throw T_Fail.testFailMsg("openConglomerate returned wrong type");
}
index_cc.checkConsistency();
// Create a row and insert into base table, remembering it's location.
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// Commit the create of the tables so that the following aborts don't
// undo that work.
tc.commit();
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// now insert enough rows to cause failure
for (int i = 100; i > 0; i -= 2) {
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0) {
throw T_Fail.testFailMsg("insert failed");
}
}
tc.abort();
// Now try simulated deadlocks
// RESOLVE (Mikem) - test out aborts and errors during inserts.
String[] deadlock_debug_strings = { "B2iRowLocking3_1_lockScanRow2", "B2iRowLocking3_2_lockScanRow2", // "BTreeController_doIns2",
"BTreeScan_positionAtStartPosition2", // "BTreeScan_reposition2",
"BTreeScan_fetchNextGroup2" };
for (int errs = 0; errs < deadlock_debug_strings.length; errs++) {
try {
REPORT("Doing deadlock tests: " + deadlock_debug_strings[errs]);
// latch release path through the code.
if (SanityManager.DEBUG)
SanityManager.DEBUG_SET(deadlock_debug_strings[errs]);
// Just scan the rows and make sure you see them all, mostly just
// a test to make sure no errors are thrown by the latch release
// code paths.
scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
int row_count = 0;
while (scan.next()) {
row_count++;
}
scan.close();
throw T_Fail.testFailMsg("expected deadlock");
} catch (StandardException e) {
if (!e.getMessageId().equals(SQLState.DEADLOCK))
throw e;
ContextService contextFactory = getContextService();
// Get the context manager.
ContextManager cm = contextFactory.getCurrentContextManager();
if (SanityManager.DEBUG)
SanityManager.ASSERT(cm != null);
cm.cleanupOnError(e, isdbActive());
}
}
tc.commit();
REPORT("Ending t_016");
return true;
}
use of org.apache.derby.iapi.store.access.ScanController in project derby by apache.
the class T_ConsistencyChecker method openUnqualifiedHeapScan.
/* Open an unqualified scan on the heap for update */
private ScanController openUnqualifiedHeapScan() throws StandardException {
ScanController heapScan;
heapScan = tc.openScan(td.getHeapConglomerateId(), // hold
false, // forUpdate
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, // startKeyValue
null, // not used with null start posn.
0, // qualifier
null, // stopKeyValue
null, // not used with null stop posn.
0);
return heapScan;
}
use of org.apache.derby.iapi.store.access.ScanController in project derby by apache.
the class T_ConsistencyChecker method deleteFirstHeapRow.
/**
* Delete the first row from the heap, without
* deleting it from the indexes on the table.
*
* @param schemaName The schema name.
* @param tableName The table name.
*
* @exception StandardException Thrown on error
*/
public static void deleteFirstHeapRow(String schemaName, String tableName) throws StandardException {
T_ConsistencyChecker t_cc = new T_ConsistencyChecker(schemaName, tableName, null);
t_cc.getContexts();
t_cc.getDescriptors();
/* Open a scan on the heap */
ScanController heapScan = t_cc.openUnqualifiedHeapScan();
// Move to the 1st row in the heap
heapScan.next();
// Delete the 1st row in the heap
heapScan.delete();
heapScan.close();
}
Aggregations