use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_CreateConglomRet method t_018.
/**
* Test BTree.openScan(), BtreeScan.init(), BtreeScan.next(),
* BtreeScan.fetch() with alternating ascending and descending coulmn
* sort order indexes.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_018(TransactionController tc) throws StandardException, T_Fail {
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
// base row template - last column is just to make row long so that
// multiple pages are spanned.
DataValueDescriptor[] base_row = TemplateRow.newU8Row(4);
base_row[3] = new SQLChar();
String string_1500char = new String();
for (int i = 0; i < 300; i++) string_1500char += "mikem";
boolean ret_val = true;
long value = -1;
long[] col1 = { 1, 3, 4, 4, 4, 5, 5, 5, 6, 7, 9 };
long[] col2 = { 1, 1, 2, 4, 6, 2, 4, 6, 1, 1, 1 };
long[] col3 = { 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 };
// set of deleted rows to make scans more interesting
long[] d_col1 = { 0, 2, 3, 4, 4, 5, 5, 5, 6, 7, 8, 10, 11, 12 };
long[] d_col2 = { 1, 1, 2, 3, 5, 0, 3, 5, 0, 0, 1, 42, 42, 1 };
long[] d_col3 = { 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104 };
REPORT("Starting t_018");
// create the base table
long base_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // base table template row
base_row, // column sort order - not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// initialize the secondary index row - pointing it at base row
index_row.init(base_row, base_cc.newRowLocationTemplate(), 5);
Properties properties = createProperties(// no current properties list
null, // don't allow duplicates
false, // 4 columns in index row
5, // non-unique index
5, // maintain parent links
true, // base conglom id
base_conglomid, // row loc in last column
4);
// create the index with all the columns in descending order
ColumnOrdering[] order = new ColumnOrdering[5];
// Ascending
order[0] = new T_ColumnOrderingImpl(0, true);
// descending
order[1] = new T_ColumnOrderingImpl(1, false);
// Ascending
order[2] = new T_ColumnOrderingImpl(2, true);
// descending
order[3] = new T_ColumnOrderingImpl(3, false);
// asccending
order[4] = new T_ColumnOrderingImpl(4, true);
long index_conglomid = tc.createConglomerate(// create a btree secondary
"BTREE", // row template
index_row.getRow(), // column sort order - default
order, // default collation
null, // properties
properties, // not temporary
TransactionController.IS_DEFAULT);
// Open the conglomerate.
ConglomerateController index_cc = tc.openConglomerate(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_SecondaryIndexRow template = new T_SecondaryIndexRow();
RowLocation row_loc = base_cc.newRowLocationTemplate();
template.init(base_row, row_loc, 5);
// insert them in reverse order just to make sure btree is sorting them
for (int i = col1.length - 1; i >= 0; i--) {
((SQLLongint) (template.getRow()[0])).setValue(col1[i]);
((SQLLongint) (template.getRow()[1])).setValue(col2[i]);
((SQLLongint) (template.getRow()[2])).setValue(col3[i]);
base_row[3] = new SQLChar(string_1500char);
base_cc.insertAndFetchLocation(base_row, row_loc);
// ")" + template);
if (index_cc.insert(template.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
index_cc.checkConsistency();
((B2IController) index_cc).debugConglomerate();
ret_val = t_ascdesc_scan_test_cases(tc, index_conglomid, template);
// may or may not clean these up.
for (int i = d_col1.length - 1; i >= 0; i--) {
((SQLLongint) (template.getRow()[0])).setValue(d_col1[i]);
((SQLLongint) (template.getRow()[1])).setValue(d_col2[i]);
((SQLLongint) (template.getRow()[2])).setValue(d_col3[i]);
base_row[3] = new SQLChar(string_1500char);
base_cc.insertAndFetchLocation(base_row, row_loc);
// ")" + template);
if (index_cc.insert(template.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// now delete the row.
base_cc.delete(row_loc);
ScanController delete_scan = tc.openScan(index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, template.getRow(), ScanController.GE, null, template.getRow(), ScanController.GT);
if (!delete_scan.next()) {
throw T_Fail.testFailMsg("delete could not find key");
} else {
delete_scan.delete();
if (delete_scan.next())
throw T_Fail.testFailMsg("delete found more than one key");
}
delete_scan.close();
}
ret_val = t_ascdesc_scan_test_cases(tc, index_conglomid, template);
// Close the conglomerate.
index_cc.close();
tc.commit();
REPORT("Ending t_018");
return (ret_val);
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_CreateConglomRet method t_021.
/**
* Test latch release at critical time during delete on an index scan that
* uses update locks.
*/
protected boolean t_021(TransactionController tc) throws StandardException, T_Fail {
REPORT("Starting t_021");
boolean ret_val = true;
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, false, false, 2, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// objects used to insert rows into base and index tables.
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// insert one row into the table/index
((SQLLongint) r1[0]).setValue(1);
((SQLLongint) r1[1]).setValue(1);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// Commit the create of the tables.
tc.commit();
// Enable the debug code that releases the latch at critical time.
if (SanityManager.DEBUG) {
SanityManager.DEBUG_SET("BTreeScan_delete_useUpdateLocks1");
}
// Delete the row using the index and update locks. Before DERBY-4083,
// the call to delete() would fail with record not found if the latch
// was released.
DataValueDescriptor[] delete_key = TemplateRow.newU8Row(2);
((SQLLongint) delete_key[0]).setValue(1);
((SQLLongint) delete_key[1]).setValue(1);
if (!t_delete(tc, create_ret.index_conglomid, delete_key, true)) {
ret_val = false;
}
// Disable the debug code that releases the latch at critical time.
if (SanityManager.DEBUG) {
SanityManager.DEBUG_CLEAR("BTreeScan_delete_useUpdateLocks1");
}
tc.commit();
REPORT("Ending t_021");
return ret_val;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_CreateConglomRet method t_007.
/**
* Test multiple scans in a single page/no split
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_007(TransactionController tc) throws StandardException, T_Fail {
boolean ret_val = true;
REPORT("Starting t_007");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
createCongloms(tc, 2, false, false, 0, create_ret);
// Open the base conglomerate.
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the index conglomerate.
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a row.
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
DataValueDescriptor[] base_row = TemplateRow.newU8Row(2);
RowLocation row_loc = base_cc.newRowLocationTemplate();
index_row.init(base_row, row_loc, 3);
// Create a row.
((SQLLongint) (index_row.getRow()[0])).setValue(1);
// test: make sure scan position is right after inserts before scan
// no split case. In this case the slot position of the current
// position should change, but the code will keep a record handle
// and not need to reposition by key.
// before keys: 3, 5
// last key gotten froms scan : 0
// insert keys:1, 2
// next key from scan should be: 5
// insert 3
((SQLLongint) (index_row.getRow()[1])).setValue(3);
base_cc.insertAndFetchLocation(base_row, row_loc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// insert 5
((SQLLongint) (index_row.getRow()[1])).setValue(5);
base_cc.insertAndFetchLocation(base_row, row_loc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// open a new scan
ScanController scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (SanityManager.DEBUG)
SanityManager.ASSERT(scan.next());
scan.fetch(index_row.getRow());
long key = ((SQLLongint) (index_row.getRow()[1])).getLong();
if (SanityManager.DEBUG)
SanityManager.ASSERT(key == 3);
// insert 1
((SQLLongint) (index_row.getRow()[1])).setValue(1);
base_cc.insertAndFetchLocation(base_row, row_loc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// insert 2
((SQLLongint) (index_row.getRow()[1])).setValue(2);
base_cc.insertAndFetchLocation(base_row, row_loc);
if (index_cc.insert(index_row.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// current position should not have changed
scan.fetch(index_row.getRow());
key = ((SQLLongint) (index_row.getRow()[1])).getLong();
if (SanityManager.DEBUG)
SanityManager.ASSERT(key == 3);
// next position should be 5
if (SanityManager.DEBUG)
SanityManager.ASSERT(scan.next());
scan.fetch(index_row.getRow());
key = ((SQLLongint) (index_row.getRow()[1])).getLong();
if (SanityManager.DEBUG)
SanityManager.ASSERT(key == 5);
index_cc.close();
scan.close();
REPORT("Ending t_007");
return (ret_val);
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_CreateConglomRet method t_013.
/**
* Test backout during critical times of splits.
* <p>
* Force logical undo of an operation which generated an internal update
* of a btree record:
* case 1:
* o insert into unique btree key1, rowlocation_1
* o delete from btree key1, rowlocation_1
* - this will mark the record logically deleted.
* o insert enough records to move the logically deleted row to another
* page to exercise logical undo of the delete.
* o insert into btree key1, rowlocation_2
* - this internally will generate a logical update field on the
* record.
* o insert enough records to move the logically deleted row to another
* page to exercise logical undo of the delete.
* o abort.
*
* case 2:
* o same as case 1 but don't change the rowlocation_1 value. This
* simulates what the language will generate on an update of a key
* field.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_013(TransactionController tc) throws StandardException, T_Fail {
ScanController scan = null;
// SanityManager.DEBUG_SET("LockTrace");
REPORT("Starting t_013");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, true, false, 5, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create an index row object for the "delete row"
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// Create another index row object for the other inserts.
DataValueDescriptor[] r2 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row2 = new T_SecondaryIndexRow();
RowLocation base_rowloc2 = base_cc.newRowLocationTemplate();
index_row2.init(r2, base_rowloc2, 3);
// Commit the create of the tables so that the following aborts don't
// undo that work.
tc.commit();
// CASE 1:
tc.commit();
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
((SQLLongint) r1[0]).setValue(1);
// insert row which will be deleted (key = 100, base_rowloc1):
((SQLLongint) r1[1]).setValue(100);
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// 5 rows are on a page, so 10 should be plenty.
for (int i = 0; i < 10; i++) {
((SQLLongint) r2[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r2, base_rowloc2);
// Insert the row into the secondary index.
if (index_cc.insert(index_row2.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// delete row which was inserted (key = 100, base_rowloc1):
if (!t_delete(tc, create_ret.index_conglomid, index_row1.getRow(), false)) {
throw T_Fail.testFailMsg("t_008: could not delete key.");
}
base_cc.delete(base_rowloc1);
// 5 rows are on a page, so 10 should be plenty.
for (int i = 10; i < 20; i++) {
((SQLLongint) r2[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r2, base_rowloc2);
// Insert the row into the secondary index.
if (index_cc.insert(index_row2.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// insert row which will be deleted (key = 100, base_rowloc1):
((SQLLongint) r1[1]).setValue(100);
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// 5 rows are on a page, so 10 should be plenty.
for (int i = 20; i < 30; i++) {
((SQLLongint) r2[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r2, base_rowloc2);
// Insert the row into the secondary index.
if (index_cc.insert(index_row2.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
// RESOLVE (mikem) - check that the right row is at key 100.
tc.abort();
// index check - there should be no records left.
ScanController empty_scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (empty_scan.next())
throw T_Fail.testFailMsg("t_002: there are still rows in table.");
tc.commit();
REPORT("Ending t_013");
return true;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_CreateConglomRet method t_002.
/**
* Test backout during critical times of splits.
* <p>
* Use trace points to force errors in split at critical points:
* leaf_split_abort{1,2,3,4}
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_002(TransactionController tc) throws StandardException, T_Fail {
ScanController scan = null;
// SanityManager.DEBUG_SET("LockTrace");
REPORT("Starting t_002");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, false, false, 2, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
if (!(index_cc instanceof B2IController)) {
throw T_Fail.testFailMsg("openConglomerate returned wrong type");
}
index_cc.checkConsistency();
// Create a row and insert into base table, remembering it's location.
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// Commit the create of the tables so that the following aborts don't
// undo that work.
tc.commit();
// Now try aborts of transactions during splits, using magic
// trace flags. This test inserts enough rows to cause a split
// and then forces the split to fail at various key points. The
// split should be backed out and also the rows before the split.
// The test makes sure that there are some inserts before the forced
// split abort.
String[] debug_strings = { "leaf_split_growRoot1", "leaf_split_growRoot2", "leaf_split_growRoot3", "leaf_split_growRoot4", "leaf_split_growRoot5", "leaf_split_abort1", "leaf_split_abort2", "leaf_split_abort3", "leaf_split_abort4", "branch_split_abort1", "branch_split_abort2", "branch_split_abort3", "branch_split_abort4", "BTreeController_doIns2" };
for (int errs = 0; errs < debug_strings.length; errs++) {
REPORT("Doing abort test: " + debug_strings[errs]);
if (SanityManager.DEBUG)
SanityManager.DEBUG_SET(debug_strings[errs]);
try {
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// insert one row that does not cause failure.
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(10000 + errs);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// language to make sure error handling really works.
if (SanityManager.DEBUG)
SanityManager.DEBUG_SET(debug_strings[errs]);
// now insert enough rows to cause failure
for (int i = 100; i > 0; i -= 2) {
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0) {
throw T_Fail.testFailMsg("insert failed");
}
}
throw T_Fail.testFailMsg("debug flag (" + debug_strings[errs] + ")did not cause exception.");
} catch (StandardException e) {
ContextService contextFactory = getContextService();
// Get the context manager.
ContextManager cm = contextFactory.getCurrentContextManager();
if (SanityManager.DEBUG)
SanityManager.ASSERT(cm != null);
cm.cleanupOnError(e, isdbActive());
// RESOLVE (mikem) - when split abort works come up with
// a good sanity check here.
//
// index check - there should be no records:
scan = tc.openScan(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
index_cc.checkConsistency();
index_cc.close();
if (scan.next()) {
throw T_Fail.testFailMsg("t_002: there are still rows in table.");
}
scan.close();
}
// Unset the flag.
if (SanityManager.DEBUG)
SanityManager.DEBUG_CLEAR(debug_strings[errs]);
}
// Try a simple abort. The following adds enough rows to cause a
// split. The result of the split should be a tree with no rows, but
// the splits will not be undone. It is up to the implementation
// whether the undo's cause shrinks in the tree. In the initial
// implementation it won't.
{
tc.commit();
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Make sure that normal abort leaves the committed split.
for (int i = 0; i < 3; i++) {
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
}
tc.abort();
// index check - there should be no records left.
ScanController empty_scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
if (empty_scan.next()) {
throw T_Fail.testFailMsg("t_002: there are still rows in table.");
}
}
tc.commit();
REPORT("Ending t_002");
return true;
}
Aggregations