use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_AccessFactory method sortCost.
/**
* Test the access level SortCost interface.
* <p>
*
* @return true if the test succeeded.
*
* @param tc The transaction controller to use in the test.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Unexpected behaviour from the API
*/
protected boolean sortCost(TransactionController tc) throws StandardException, T_Fail {
int key_value;
REPORT("(sortCost) starting");
// Create a heap conglomerate.
T_AccessRow template_row = new T_AccessRow(2);
long conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // 1 column template.
template_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
// Open the conglomerate.
ConglomerateController cc = tc.openConglomerate(conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a 2 column row.
T_AccessRow r1 = new T_AccessRow(2);
SQLInteger c1 = new SQLInteger(1);
SQLInteger c2 = new SQLInteger(100);
r1.setCol(0, c1);
r1.setCol(1, c2);
// Get a location template
RowLocation rowloc1 = cc.newRowLocationTemplate();
// Insert the row and remember its location.
cc.insertAndFetchLocation(r1.getRowArray(), rowloc1);
cc.close();
tc.commit();
// flush the cache to get the row count updated.
flush_cache();
// Test 1 - Just call for various types of sorts. Not sure how
// to test the validity.
SortCostController scc = tc.openSortCostController();
double estimated_cost = scc.getSortCost(template_row.getRowArray(), null, false, 10000, 100, 100);
if (estimated_cost <= 0) {
throw T_Fail.testFailMsg("(storeCost) estimated sort cost :" + estimated_cost);
}
REPORT("(sortCost) finishing");
return true;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_AccessFactory method createBtree.
private long createBtree(TransactionController tc, long baseConglomId, boolean temporary) throws StandardException {
// Create the properties for the index.
// This method knows that there is just one column in the base table
Properties indexProps = new Properties();
indexProps.put("baseConglomerateId", Long.toString(baseConglomId));
indexProps.put("nUniqueColumns", "1");
indexProps.put("rowLocationColumn", "1");
indexProps.put("nKeyFields", "2");
// Open a scan on the base conglomerate which will return all rows.
FormatableBitSet singleColumn = new FormatableBitSet(1);
singleColumn.set(0);
ScanController sc = tc.openScan(baseConglomId, false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // just the first column.
singleColumn, null, 0, null, null, 0);
// Create the template for the index. This method "knows" that
// all rows in the base table have one IntCol
T_AccessRow template = new T_AccessRow(2);
SQLLongint col0 = new SQLLongint(0);
RowLocation col1 = sc.newRowLocationTemplate();
template.setCol(0, col0);
template.setCol(1, col1);
DataValueDescriptor[] baseRow = new DataValueDescriptor[1];
baseRow[0] = col0;
// Create a btree secondary index conglomerate.
long iid = tc.createConglomerate("BTREE", template.getRowArray(), null, // default collation
null, indexProps, temporary ? TransactionController.IS_TEMPORARY : TransactionController.IS_DEFAULT);
// Open the index so we can stuff in index rows.
ConglomerateController cc = tc.openConglomerate(iid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// build the index.
while (sc.next()) {
sc.fetch(baseRow);
sc.fetchLocation(col1);
cc.insert(template.getRowArray());
}
cc.close();
return iid;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_AccessFactory method readUncommitted.
/**
* Test critical cases for read uncommitted.
* <p>
* test 1 - test heap fetch, delete and replace of row on page which does not exist.
* test 2 - test heap fetch, delete and replace of row on page where row does not exist.
*
* @exception StandardException Standard exception policy.
*/
protected boolean readUncommitted(TransactionController tc) throws StandardException, T_Fail {
REPORT("(readUncommitted)");
/*
* TEST 1 - test heap fetch of row on page which does not exist.
* <p>
* Do this by inserting a few pages worth of data and then deleting
* all the rows, while remembering the rowlocation of one of the pages.
* You need to at least get to the 2nd page, because the 1st page is
* never totally reclaimed and deleted by the system in a heap (it has
* some internal catalog information stored internally in row "0").
*/
String twok_string = new String("0123456789012345");
for (int i = 0; i < 7; i++) {
twok_string += twok_string;
}
T_AccessRow big_row = new T_AccessRow(2);
big_row.setCol(1, new SQLChar(twok_string));
// Create a heap conglomerate.
long orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
ConglomerateController cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
for (int i = 0; i < 10; i++) {
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
cc.close();
// Open another scan on the conglomerate.
ScanController base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// now delete all the rows and remember the row location of the
// last row.
RowLocation deleted_page_rowloc = base_scan.newRowLocationTemplate();
for (int i = 0; i < 10; i++) {
base_scan.next();
base_scan.fetchLocation(deleted_page_rowloc);
base_scan.delete();
tc.commit();
}
base_scan.close();
tc.commit();
// at this point the post commit thread should have reclaimed all the 5
// pages. Open it, at read uncommitted level.
cc = tc.openConglomerate(orig_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
// Test heap fetch of row on page which does not exist.
if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
}
// Test heap replace of row on page which does not exist.
FormatableBitSet update_desc = new FormatableBitSet(1);
if (cc.replace(deleted_page_rowloc, big_row.getRowArray(), update_desc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
// Test heap fetch (overloaded call) of row on page which does not exist.
if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null, true)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
}
// Test heap delete of row on page which does not exist.
if (cc.delete(deleted_page_rowloc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
cc.close();
/*
* TEST 2 - test heap fetch of row on page where row does not exist.
* <p>
* Do this by inserting enough rows to put 1 row on the 2nd page.
* Then delete this one row, which will queue a post commit to reclaim
* the row and page. Then insert one more row on the same page in
* the same xact. Now commit the xact, which will cause post commit
* to run which will reclaim the row but not the page. Then try and
* fetch the row which was deleted.
*/
// string column will be 1500 bytes, allowing 2 rows per page to fit.
SQLChar stringcol = new SQLChar();
stringcol.setValue(T_AccessFactory.repeatString("012345678901234", 100));
big_row.setCol(1, stringcol);
// Create a heap conglomerate.
orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
for (int i = 0; i < 3; i++) {
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
// Open another scan on the conglomerate.
base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// now delete all the rows and remember the row location of the
// last row.
RowLocation deleted_row_rowloc = base_scan.newRowLocationTemplate();
for (int i = 0; i < 3; i++) {
base_scan.next();
base_scan.fetchLocation(deleted_row_rowloc);
base_scan.delete();
}
// insert another row on page 2 to make sure page does not go away.
cc.insert(big_row.getRowArray());
cc.close();
base_scan.close();
tc.commit();
// at this point the post commit thread should have reclaimed all the
// deleted row on page 2, but not the page.
//
// Open it, at read uncommitted level.
cc = tc.openConglomerate(orig_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
// test heap fetch of row on page where row does not exist.
if (cc.fetch(deleted_row_rowloc, big_row.getRowArray(), null)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed row.");
}
// test heap replace of row on page where row does not exist.
if (cc.replace(deleted_page_rowloc, big_row.getRowArray(), update_desc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
// test heap fetch (overloaded) of row on page where row does not exist.
if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null, true)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
}
// test heap delete of row on page where row does not exist.
if (cc.delete(deleted_page_rowloc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
cc.close();
/*
* TEST 3 - test heap scan fetch of row on page prevents page from
* disappearing, but handles row being deleted.
* <p>
* A heap scan will maintain a scan lock on a page even if it is doing
* a read uncommitted scan. This will prevent the row/page from being
* reclaimed by post commit while the scan is positioned on the page.
* This presents no other concurrency issues for read uncommitted, it
* should be invisible to the user (deletes can still happen and the
* read uncommitted scanner will not block anyone).
*
* You need to at least get to the 2nd page, because the 1st page is
* never totally reclaimed and deleted by the system in a heap (it has
* some internal catalog information stored internally in row "0").
*/
big_row = new T_AccessRow(2);
big_row.setCol(1, new SQLChar(twok_string));
// Create a heap conglomerate.
orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
for (int i = 0; i < 10; i++) {
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
cc.close();
// Open scan on the conglomerate, and position it on the second page.
base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for read
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
base_scan.next();
base_scan.next();
base_scan.next();
if (!base_scan.doesCurrentPositionQualify())
throw T_Fail.testFailMsg("(readUncommitted) doesCurrentPositionQualify() failed.");
base_scan.fetch(big_row.getRowArray());
base_scan.fetchLocation(deleted_row_rowloc);
if (base_scan.isCurrentPositionDeleted())
throw T_Fail.testFailMsg("(readUncommitted) isCurrentPositionDeleted() failed.");
// Open another scan on the conglomerate.
ScanController delete_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
for (int i = 0; i < 10; i++) {
delete_scan.next();
delete_scan.fetchLocation(deleted_page_rowloc);
delete_scan.delete();
}
delete_scan.close();
if (base_scan.doesCurrentPositionQualify())
throw T_Fail.testFailMsg("(readUncommitted) doesCurrentPositionQualify() failed.");
try {
base_scan.fetch(big_row.getRowArray());
throw T_Fail.testFailMsg("(readUncommitted) fetch of deleted row should throw exception.");
} catch (StandardException se) {
if (!se.getMessageId().equals(SQLState.AM_RECORD_NOT_FOUND)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch of deleted row should throw SQLState.AM_RECORD_NOT_FOUND.");
}
}
base_scan.fetchLocation(deleted_row_rowloc);
if (!base_scan.isCurrentPositionDeleted())
throw T_Fail.testFailMsg("(readUncommitted) isCurrentPositionDeleted() failed.");
base_scan.close();
tc.commit();
REPORT("(readUncommitted) succeeded");
return true;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_AccessFactory method scanExample.
protected boolean scanExample(TransactionController tc) throws StandardException, T_Fail {
tc.commit();
if (!tc.isPristine() || !tc.isIdle() || tc.isGlobal())
throw T_Fail.testFailMsg("(scanExample) bad xact state after commit.");
if ((tc.countOpens(TransactionController.OPEN_TOTAL) > 0) || (tc.countOpens(TransactionController.OPEN_CONGLOMERATE) > 0) || (tc.countOpens(TransactionController.OPEN_SCAN) > 0) || (tc.countOpens(TransactionController.OPEN_CREATED_SORTS) > 0) || (tc.countOpens(TransactionController.OPEN_SORT) > 0)) {
System.out.println("OPENED 0:\n" + tc.debugOpened());
return (FAIL("unexpected open count."));
}
// Create a heap conglomerate.
long conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // 1 SQLInteger() column template.
new T_AccessRow(1).getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
REPORT("(scanExample) starting");
// Open it.
ConglomerateController cc = tc.openConglomerate(conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Insert some values.
int[] values = { 11, 22, 33, 44, 55, 66 };
T_AccessRow row = new T_AccessRow(1);
for (int i = 0; i < values.length; i++) {
row.setCol(0, new SQLInteger(values[i]));
if (cc.insert(row.getRowArray()) != 0)
throw T_Fail.testFailMsg("(scanExample after insert) insert failed ");
}
// For test coverage call the debugging output routine - can't diff it.
REPORT("(scanExample) debug output testing: " + tc.debugOpened());
// Close the conglomerate.
cc.close();
if ((tc.countOpens(TransactionController.OPEN_TOTAL) > 0) || (tc.countOpens(TransactionController.OPEN_CONGLOMERATE) > 0) || (tc.countOpens(TransactionController.OPEN_SCAN) > 0) || (tc.countOpens(TransactionController.OPEN_CREATED_SORTS) > 0) || (tc.countOpens(TransactionController.OPEN_SORT) > 0)) {
System.out.println("OPENED 1:\n" + tc.debugOpened());
return (FAIL("unexpected open count."));
}
REPORT("(scanExample) rows inserted");
// Correlates our position in the upcoming scan to the values array.
int scanindex = 0;
// Put a specific column in the row so we can look at it.
SQLInteger col = new SQLInteger(0);
row.setCol(0, col);
flush_cache();
StaticCompiledOpenConglomInfo static_info = tc.getStaticCompiledConglomInfo(conglomid);
// Open a scan on the conglomerate.
ScanController scan1 = tc.openCompiledScan(// don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0, static_info, tc.getDynamicCompiledConglomInfo(conglomid));
if (scan1.getEstimatedRowCount() != 6) {
throw T_Fail.testFailMsg("(scanExample) estimated row count not 6:" + scan1.getEstimatedRowCount());
}
// Test 2 - ASSERT(should be able to set arbitrary row count)
scan1.setEstimatedRowCount(5);
if (scan1.getEstimatedRowCount() != 5) {
throw T_Fail.testFailMsg("(scanExample) estimated row count not 5");
}
// Iterate through and check that the rows are still there.
while (scan1.next()) {
scan1.fetch(row.getRowArray());
// Check we got the value we put in.
if (col.getInt() != values[scanindex])
throw T_Fail.testFailMsg("(scanExample after insert) Row " + scanindex + " should have been " + values[scanindex] + ", was " + col.getInt());
scanindex++;
}
// make sure another next() call continues to return false.
if (scan1.next())
throw T_Fail.testFailMsg("(scanExample after insert) should continue to return false after reaching end of scan");
// see if reopen scan interfaces work
scan1.reopenScan(// start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
scan1.next();
scan1.next();
scan1.next();
RowLocation third_row_rowloc = scan1.newRowLocationTemplate();
scan1.fetchLocation(third_row_rowloc);
// see if reopen scan interfaces work
scan1.reopenScanByRowLocation(third_row_rowloc, null);
scanindex = 2;
while (scan1.next()) {
scan1.fetch(row.getRowArray());
// Check we got the value we put in.
if (col.getInt() != values[scanindex])
throw T_Fail.testFailMsg("(scanExample after insert) Row " + scanindex + " should have been " + values[scanindex] + ", was " + col.getInt());
scanindex++;
}
scan1.close();
// Check we saw the right number of rows.
if (scanindex != values.length)
throw T_Fail.testFailMsg("(scanExample after insert) Expected " + values.length + "rows, got " + scanindex);
REPORT("(scanExample) rows present and accounted for");
// Open another scan on the conglomerate.
ScanController scan2 = tc.openScan(conglomid, // don't hold
false, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// Iterate with the second scan and fiddle with the values so they
// look like the new value array.
int[] newvalues = { 22, 33, 444, 55, 6666 };
while (scan2.next()) {
scan2.fetch(row.getRowArray());
switch(((SQLInteger) row.getCol(0)).getInt()) {
case 11:
if (!scan2.delete())
throw T_Fail.testFailMsg("(scanExample) delete failed.");
break;
case 22:
case 33:
case 55:
// leave these alone
break;
case 44:
DataValueDescriptor[] update_row = new DataValueDescriptor[1];
update_row[0] = new SQLInteger(444);
FormatableBitSet update_desc = new FormatableBitSet(1);
update_desc.set(0);
if (!scan2.replace(update_row, update_desc)) {
throw T_Fail.testFailMsg("(scanExample) partial column row replace failed.");
}
break;
case 66:
row.setCol(0, new SQLInteger(6666));
if (!scan2.replace(row.getRowArray(), (FormatableBitSet) null))
throw T_Fail.testFailMsg("(scanExample) replace failed.");
break;
default:
throw T_Fail.testFailMsg("(scanExample) Read unexpected value.");
}
}
scan2.close();
REPORT("(scanExample) rows fiddled with");
// Open a third scan on the conglomerate.
ScanController scan3 = tc.openScan(conglomid, // don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// Iterate through and inspect the changes.
scanindex = 0;
row.setCol(0, col);
while (scan3.next()) {
scan3.fetch(row.getRowArray());
REPORT("(scanExample) scan3 fetched " + col.getInt());
// Check we got the value we put in.
if (col.getInt() != newvalues[scanindex])
throw T_Fail.testFailMsg("(scanExample after changes) Row " + scanindex + " should have been " + newvalues[scanindex] + ", was " + col.getInt());
scanindex++;
}
scan3.close();
// Open a third scan on the conglomerate.
scan3 = tc.openScan(conglomid, // don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// Iterate through and inspect the changes.
scanindex = 0;
row.setCol(0, col);
while (scan3.next()) {
scan3.fetch(row.getRowArray());
REPORT("(scanExample) scan3 fetched " + col.getInt());
// Check we got the value we put in.
if (col.getInt() != newvalues[scanindex])
throw T_Fail.testFailMsg("(scanExample after changes) Row " + scanindex + " should have been " + newvalues[scanindex] + ", was " + col.getInt());
scanindex++;
}
scan3.close();
// Check we saw the right number of rows.
if (scanindex != newvalues.length)
throw T_Fail.testFailMsg("(scanExample after changes) Expected " + newvalues.length + "rows, got " + scanindex);
REPORT("(scanExample) fiddled rows present and accounted for");
REPORT("(scanExample) testing expected delete errors");
// Open 4th scan on conglomerate and test "expected" error returns
// from replace, partial column replace, delete.
ScanController scan4 = tc.openScan(conglomid, // don't hold
false, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// then test that operations on that deleted entry FAIL as expected.
while (scan4.next()) {
scan4.fetch(row.getRowArray());
if (!scan4.doesCurrentPositionQualify()) {
throw T_Fail.testFailMsg("(scanExample doesCurrentPositionQualify() errors) Expected requalify of current row to succeed");
}
if (((SQLInteger) row.getCol(0)).getInt() == 22) {
if (!scan4.delete()) {
throw T_Fail.testFailMsg("(scanExample delete errors) Delete failed.");
}
break;
}
}
if (scan4.doesCurrentPositionQualify()) {
throw T_Fail.testFailMsg("(scanExample doesCurrentPositionQualify() errors) Expected qualify of deleted row to FAIL");
}
DataValueDescriptor[] update_row = new DataValueDescriptor[1];
FormatableBitSet update_desc = new FormatableBitSet(1);
update_desc.set(0);
if (scan4.replace(update_row, update_desc)) {
throw T_Fail.testFailMsg("(scanExample delete errors) Expected partial column replace to FAIL");
}
if (scan4.replace(row.getRowArray(), (FormatableBitSet) null)) {
throw T_Fail.testFailMsg("(scanExample after changes) Expected replace to FAIL");
}
if (scan4.delete()) {
throw T_Fail.testFailMsg("(scanExample after changes) Expected delete to FAIL");
}
scan4.close();
if ((tc.countOpens(TransactionController.OPEN_TOTAL) > 0) || (tc.countOpens(TransactionController.OPEN_CONGLOMERATE) > 0) || (tc.countOpens(TransactionController.OPEN_SCAN) > 0) || (tc.countOpens(TransactionController.OPEN_CREATED_SORTS) > 0) || (tc.countOpens(TransactionController.OPEN_SORT) > 0)) {
System.out.println("OPENED:\n" + tc.debugOpened());
return (FAIL("unexpected open count."));
}
REPORT("(scanExample) completed");
return true;
}
use of org.apache.derby.iapi.types.RowLocation in project derby by apache.
the class T_AccessFactory method alterTable.
/**
* Test the access level alter table interface for adding columns.
* <p>
*
* @return true if the test succeeded.
*
* @param tc The transaction controller to use in the test.
* @param temporary flag which tells whether or not the conglomerate
* used in the test should be temporary
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Unexpected behaviour from the API
*/
private boolean alterTable(TransactionController tc, boolean temporary) throws StandardException, T_Fail {
int key_value;
REPORT("(alterTable) starting");
// Create a heap conglomerate.
T_AccessRow template_row = new T_AccessRow(1);
int temporaryFlag = temporary ? TransactionController.IS_TEMPORARY : TransactionController.IS_DEFAULT;
long conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", // 1 column template.
template_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, temporaryFlag);
// Open the conglomerate.
ConglomerateController cc = tc.openConglomerate(conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a 1 column row. int column = 1.
T_AccessRow r1 = new T_AccessRow(1);
SQLInteger c1 = new SQLInteger(1);
r1.setCol(0, c1);
// Get a location template
RowLocation rowloc1 = cc.newRowLocationTemplate();
// Insert the row and remember its location.
cc.insertAndFetchLocation(r1.getRowArray(), rowloc1);
// create another 1 column row. int column = 2.
// Get a location template
r1.setCol(0, new SQLInteger(2));
RowLocation rowloc2 = cc.newRowLocationTemplate();
// Insert the row and remember its location.
cc.insertAndFetchLocation(r1.getRowArray(), rowloc2);
// RESOLVE - should this be a runtime error?
if (SanityManager.DEBUG) {
try {
T_AccessRow two_column_row = new T_AccessRow(2);
SQLInteger col1 = new SQLInteger(3);
SQLInteger col2 = new SQLInteger(3);
cc.insert(two_column_row.getRowArray());
throw T_Fail.testFailMsg("(alterTable) Allowed insert of bad row.");
} catch (StandardException t) {
// expected error continue the test.
}
}
// RESOLVE - (mikem) should we check for this in released runtime?
if (SanityManager.DEBUG) {
try {
T_AccessRow two_column_row = new T_AccessRow(2);
if (!cc.fetch(rowloc1, two_column_row.getRowArray(), (FormatableBitSet) null)) {
throw T_Fail.testFailMsg("(alterTable) Allowed fetch of bad row, bad ret val.");
}
throw T_Fail.testFailMsg("(alterTable) Allowed fetch of bad row.");
} catch (StandardException t) {
// expected error continue the test.
}
}
// RESOLVE - (mikem) should we check for this in released runtime?
if (SanityManager.DEBUG) {
try {
DataValueDescriptor[] third_column_row = new DataValueDescriptor[3];
third_column_row[2] = new SQLInteger(3);
FormatableBitSet fetch_desc = new FormatableBitSet(3);
fetch_desc.set(2);
if (!cc.fetch(rowloc1, third_column_row, fetch_desc)) {
throw T_Fail.testFailMsg("(alterTable) Allowed fetch of bad row, bad ret val.");
}
throw T_Fail.testFailMsg("(alterTable) Allowed fetch of bad row.");
} catch (StandardException t) {
// expected error continue the test.
}
}
// RESOLVE - (mikem) should we check for this in released runtime?
if (SanityManager.DEBUG) {
try {
T_AccessRow two_column_row = new T_AccessRow(2);
SQLInteger col1 = new SQLInteger(3);
SQLInteger col2 = new SQLInteger(3);
cc.replace(rowloc1, two_column_row.getRowArray(), null);
throw T_Fail.testFailMsg("(alterTable) Allowed replace of bad row.");
} catch (StandardException t) {
// expected error continue the test.
}
}
// Test that we can't replace data columns that don't exist
if (SanityManager.DEBUG) {
try {
DataValueDescriptor[] second_column_row = new DataValueDescriptor[2];
second_column_row[1] = new SQLInteger(3);
FormatableBitSet update_desc = new FormatableBitSet(2);
update_desc.set(1);
cc.replace(rowloc1, second_column_row, update_desc);
throw T_Fail.testFailMsg("(alterTable) Allowed partial row update of bad column.");
} catch (StandardException t) {
// expected error continue the test.
}
}
// Make sure commitNoSync gets executed sometimes.
tc.commitNoSync(TransactionController.RELEASE_LOCKS);
// now alter the conglomerate, add another int column
tc.addColumnToConglomerate(conglomid, 1, c1, StringDataValue.COLLATION_TYPE_UCS_BASIC);
// Open the table after the close done by commit.
cc = tc.openConglomerate(conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
T_AccessRow two_column_row = new T_AccessRow(2);
SQLInteger col1 = new SQLInteger(3);
SQLInteger col2 = new SQLInteger(3);
// fetch the rows and make sure you get null's in new fields.
if (!cc.fetch(rowloc1, two_column_row.getRowArray(), (FormatableBitSet) null)) {
throw T_Fail.testFailMsg("(alterTable) Row not there.");
}
if ((((SQLInteger) two_column_row.getCol(0)).getInt() != 1) || (!two_column_row.getCol(1).isNull())) {
throw T_Fail.testFailMsg("(alterTable) Bad column value after alter.");
}
if (!cc.fetch(rowloc2, two_column_row.getRowArray(), (FormatableBitSet) null)) {
throw T_Fail.testFailMsg("(alterTable) Row not there.");
}
if ((((SQLInteger) two_column_row.getCol(0)).getInt() != 2) || (!two_column_row.getCol(1).isNull())) {
throw T_Fail.testFailMsg("(alterTable) Bad column value after alter.");
}
// make sure insert of 2 column row works.
two_column_row = new T_AccessRow(2);
two_column_row.setCol(0, new SQLInteger(3));
two_column_row.setCol(1, new SQLInteger(300));
cc.insert(two_column_row.getRowArray());
// At this point the table looks like:
// col1 col2
// ---- ----
// 1 NA
// 2 NA
// 3 300
ScanController scan = tc.openScan(conglomid, // don't hold
false, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
while (scan.next()) {
scan.fetch(two_column_row.getRowArray());
key_value = ((SQLInteger) two_column_row.getCol(0)).getInt();
switch(key_value) {
case 1:
{
// Set non-existent column value to 100
if (!two_column_row.getCol(1).isNull()) {
throw T_Fail.testFailMsg("(alterTable) Bad column value after alter.");
}
// test that replace field works on alter added column
// make result row be: (1, 100)
two_column_row.setCol(1, new SQLInteger(100));
scan.replace(two_column_row.getRowArray(), (FormatableBitSet) null);
break;
}
case 2:
{
if (!two_column_row.getCol(1).isNull()) {
throw T_Fail.testFailMsg("(alterTable) Bad column value after alter.");
}
// test that replace row works on alter added column row.
// make result row be: (2, 200)
two_column_row.setCol(1, new SQLInteger(200));
scan.replace(two_column_row.getRowArray(), (FormatableBitSet) null);
break;
}
case 3:
{
break;
}
default:
{
throw T_Fail.testFailMsg("(alterTable) bad row value found in table.");
}
}
}
// reposition the scan
scan.reopenScan(// start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
while (scan.next()) {
scan.fetch(two_column_row.getRowArray());
key_value = ((SQLInteger) two_column_row.getCol(0)).getInt();
switch(key_value) {
case 1:
case 2:
case 3:
{
int second_col_val = ((SQLInteger) two_column_row.getCol(1)).getInt();
if (second_col_val != (key_value * 100)) {
throw T_Fail.testFailMsg("(alterTable) Bad column value after alter." + "expected: (" + key_value + ", " + key_value * 100 + ")\n" + "got : (" + key_value + ", " + second_col_val + ")\n");
}
break;
}
default:
{
throw T_Fail.testFailMsg("(alterTable) bad row value found in table.");
}
}
}
scan.close();
tc.commit();
REPORT("(alterTable) completed");
return true;
}
Aggregations