use of org.apache.derby.iapi.types.SQLChar in project derby by apache.
the class T_AccessFactory method readUncommitted.
/**
* Test critical cases for read uncommitted.
* <p>
* test 1 - test heap fetch, delete and replace of row on page which does not exist.
* test 2 - test heap fetch, delete and replace of row on page where row does not exist.
*
* @exception StandardException Standard exception policy.
*/
protected boolean readUncommitted(TransactionController tc) throws StandardException, T_Fail {
REPORT("(readUncommitted)");
/*
* TEST 1 - test heap fetch of row on page which does not exist.
* <p>
* Do this by inserting a few pages worth of data and then deleting
* all the rows, while remembering the rowlocation of one of the pages.
* You need to at least get to the 2nd page, because the 1st page is
* never totally reclaimed and deleted by the system in a heap (it has
* some internal catalog information stored internally in row "0").
*/
String twok_string = new String("0123456789012345");
for (int i = 0; i < 7; i++) {
twok_string += twok_string;
}
T_AccessRow big_row = new T_AccessRow(2);
big_row.setCol(1, new SQLChar(twok_string));
// Create a heap conglomerate.
long orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
ConglomerateController cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
for (int i = 0; i < 10; i++) {
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
cc.close();
// Open another scan on the conglomerate.
ScanController base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// now delete all the rows and remember the row location of the
// last row.
RowLocation deleted_page_rowloc = base_scan.newRowLocationTemplate();
for (int i = 0; i < 10; i++) {
base_scan.next();
base_scan.fetchLocation(deleted_page_rowloc);
base_scan.delete();
tc.commit();
}
base_scan.close();
tc.commit();
// at this point the post commit thread should have reclaimed all the 5
// pages. Open it, at read uncommitted level.
cc = tc.openConglomerate(orig_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
// Test heap fetch of row on page which does not exist.
if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
}
// Test heap replace of row on page which does not exist.
FormatableBitSet update_desc = new FormatableBitSet(1);
if (cc.replace(deleted_page_rowloc, big_row.getRowArray(), update_desc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
// Test heap fetch (overloaded call) of row on page which does not exist.
if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null, true)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
}
// Test heap delete of row on page which does not exist.
if (cc.delete(deleted_page_rowloc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
cc.close();
/*
* TEST 2 - test heap fetch of row on page where row does not exist.
* <p>
* Do this by inserting enough rows to put 1 row on the 2nd page.
* Then delete this one row, which will queue a post commit to reclaim
* the row and page. Then insert one more row on the same page in
* the same xact. Now commit the xact, which will cause post commit
* to run which will reclaim the row but not the page. Then try and
* fetch the row which was deleted.
*/
// string column will be 1500 bytes, allowing 2 rows per page to fit.
SQLChar stringcol = new SQLChar();
stringcol.setValue(T_AccessFactory.repeatString("012345678901234", 100));
big_row.setCol(1, stringcol);
// Create a heap conglomerate.
orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
for (int i = 0; i < 3; i++) {
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
// Open another scan on the conglomerate.
base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
// now delete all the rows and remember the row location of the
// last row.
RowLocation deleted_row_rowloc = base_scan.newRowLocationTemplate();
for (int i = 0; i < 3; i++) {
base_scan.next();
base_scan.fetchLocation(deleted_row_rowloc);
base_scan.delete();
}
// insert another row on page 2 to make sure page does not go away.
cc.insert(big_row.getRowArray());
cc.close();
base_scan.close();
tc.commit();
// at this point the post commit thread should have reclaimed all the
// deleted row on page 2, but not the page.
//
// Open it, at read uncommitted level.
cc = tc.openConglomerate(orig_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
// test heap fetch of row on page where row does not exist.
if (cc.fetch(deleted_row_rowloc, big_row.getRowArray(), null)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed row.");
}
// test heap replace of row on page where row does not exist.
if (cc.replace(deleted_page_rowloc, big_row.getRowArray(), update_desc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
// test heap fetch (overloaded) of row on page where row does not exist.
if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null, true)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
}
// test heap delete of row on page where row does not exist.
if (cc.delete(deleted_page_rowloc)) {
throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
}
cc.close();
/*
* TEST 3 - test heap scan fetch of row on page prevents page from
* disappearing, but handles row being deleted.
* <p>
* A heap scan will maintain a scan lock on a page even if it is doing
* a read uncommitted scan. This will prevent the row/page from being
* reclaimed by post commit while the scan is positioned on the page.
* This presents no other concurrency issues for read uncommitted, it
* should be invisible to the user (deletes can still happen and the
* read uncommitted scanner will not block anyone).
*
* You need to at least get to the 2nd page, because the 1st page is
* never totally reclaimed and deleted by the system in a heap (it has
* some internal catalog information stored internally in row "0").
*/
big_row = new T_AccessRow(2);
big_row.setCol(1, new SQLChar(twok_string));
// Create a heap conglomerate.
orig_conglomid = tc.createConglomerate(// create a heap conglomerate
"heap", big_row.getRowArray(), // column sort order not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
for (int i = 0; i < 10; i++) {
big_row.setCol(0, new SQLInteger(i));
cc.insert(big_row.getRowArray());
}
cc.close();
// Open scan on the conglomerate, and position it on the second page.
base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for read
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
base_scan.next();
base_scan.next();
base_scan.next();
if (!base_scan.doesCurrentPositionQualify())
throw T_Fail.testFailMsg("(readUncommitted) doesCurrentPositionQualify() failed.");
base_scan.fetch(big_row.getRowArray());
base_scan.fetchLocation(deleted_row_rowloc);
if (base_scan.isCurrentPositionDeleted())
throw T_Fail.testFailMsg("(readUncommitted) isCurrentPositionDeleted() failed.");
// Open another scan on the conglomerate.
ScanController delete_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
true, // for update
TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
for (int i = 0; i < 10; i++) {
delete_scan.next();
delete_scan.fetchLocation(deleted_page_rowloc);
delete_scan.delete();
}
delete_scan.close();
if (base_scan.doesCurrentPositionQualify())
throw T_Fail.testFailMsg("(readUncommitted) doesCurrentPositionQualify() failed.");
try {
base_scan.fetch(big_row.getRowArray());
throw T_Fail.testFailMsg("(readUncommitted) fetch of deleted row should throw exception.");
} catch (StandardException se) {
if (!se.getMessageId().equals(SQLState.AM_RECORD_NOT_FOUND)) {
throw T_Fail.testFailMsg("(readUncommitted) fetch of deleted row should throw SQLState.AM_RECORD_NOT_FOUND.");
}
}
base_scan.fetchLocation(deleted_row_rowloc);
if (!base_scan.isCurrentPositionDeleted())
throw T_Fail.testFailMsg("(readUncommitted) isCurrentPositionDeleted() failed.");
base_scan.close();
tc.commit();
REPORT("(readUncommitted) succeeded");
return true;
}
use of org.apache.derby.iapi.types.SQLChar in project derby by apache.
the class T_RawStoreFactory method P033.
/**
* Insert 100-column long rows into 1K pages, each column is less than a page.
*
* @exception T_Fail Unexpected behaviour from the API
* @exception StandardException Unexpected exception from the implementation
*/
protected void P033(long segment) throws StandardException, T_Fail {
Transaction t = t_util.t_startTransaction();
long cid = t_util.t_addContainer(t, segment, 4096);
ContainerHandle c = t_util.t_openContainer(t, segment, cid, true);
Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
try {
t_util.t_checkEmptyPage(page);
T_RawStoreRow r0 = new T_RawStoreRow(100);
for (int i = 0; i < 100; i++) {
r0.setColumn(i, REC_007);
}
int insertFlag = Page.INSERT_INITIAL | Page.INSERT_OVERFLOW;
RecordHandle rh0 = null;
try {
rh0 = t_util.t_insertAtSlot(page, 0, r0, (byte) insertFlag);
} catch (StandardException se) {
throw T_Fail.testFailMsg("insert of long row failed.");
}
if (rh0 == null)
throw T_Fail.testFailMsg("insert of first long row failed.");
else {
REPORT("about to check fetch...");
DataValueDescriptor column = new SQLChar();
for (int i = 0; i < 100; i++) {
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, i, column, false, REC_007);
}
}
page.unlatch();
page = null;
if (segment != ContainerHandle.TEMPORARY_SEGMENT) {
// cleanup
t_util.t_dropContainer(t, segment, cid);
}
} finally {
if (page != null)
page.unlatch();
t_util.t_commit(t);
t.close();
}
PASS("P033: segment = " + segment);
}
use of org.apache.derby.iapi.types.SQLChar in project derby by apache.
the class T_RawStoreFactory method P056.
/**
* Test rollback of partial row update.
* Create a long row with 15 columns on 3 pages (5 columns on each page).
* Update the 1st column on the 2nd page (the 6th column) which causes the
* last column of that page (10th column) to move off the page. Then abort
* and make sure that all the original columns are there and correct.
*
* NOTE: stored length is twice string length + 2
*
* @exception T_Fail Unexpected behaviour from the API
* @exception StandardException Unexpected exception from the implementation
*/
protected void P056(long segment) throws StandardException, T_Fail {
if (!testRollback)
return;
Transaction t = t_util.t_startTransaction();
long cid = t_util.t_addContainer(t, segment, 4096);
ContainerHandle c = t_util.t_openContainer(t, segment, cid, true);
Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
t_util.t_checkEmptyPage(page);
int colSize = 90;
T_RawStoreRow r0 = new T_RawStoreRow(15);
r0.setColumn(0, colSize, REC_001);
r0.setColumn(1, colSize, REC_002);
r0.setColumn(2, colSize, REC_003);
r0.setColumn(3, colSize, REC_004);
r0.setColumn(4, colSize, REC_005);
r0.setColumn(5, colSize, REC_009);
r0.setColumn(6, colSize, REC_010);
r0.setColumn(7, colSize, REC_011);
r0.setColumn(8, colSize, REC_012);
r0.setColumn(9, colSize, REC_013);
r0.setColumn(10, colSize, REC_014);
r0.setColumn(11, colSize, REC_015);
r0.setColumn(12, colSize, REC_016);
r0.setColumn(13, colSize, REC_017);
r0.setColumn(14, colSize, REC_018);
int insertFlag = Page.INSERT_INITIAL;
insertFlag |= Page.INSERT_OVERFLOW;
RecordHandle rh0 = null;
try {
rh0 = t_util.t_insertAtSlot(page, 0, r0, (byte) insertFlag);
} catch (StandardException se) {
throw T_Fail.testFailMsg("insert of long row failed.");
}
if (rh0 == null)
throw T_Fail.testFailMsg("insert of first long row failed.");
else {
REPORT("about to check fetch...");
DataValueDescriptor column = new SQLChar();
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 0, column, false, REC_001, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 1, column, false, REC_002, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 2, column, false, REC_003, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 3, column, false, REC_004, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 4, column, false, REC_005, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 5, column, false, REC_009, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 6, column, false, REC_010, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 7, column, false, REC_011, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 8, column, false, REC_012, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 9, column, false, REC_013, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 10, column, false, REC_014, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 11, column, false, REC_015, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 12, column, false, REC_016, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 13, column, false, REC_017, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 14, column, false, REC_018, colSize);
}
t_util.t_commit(t);
// update col 5 (the 6th column, the first column on the 2nd overflow page), which causes
// the last column (col 9, the 10th column) to move off the page.
c = t_util.t_openContainer(t, segment, cid, true);
page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
T_RawStoreRow updateRow = new T_RawStoreRow(15);
for (int i = 0; i < 15; i++) updateRow.setColumn(i, (String) null);
updateRow.setColumn(5, colSize * 2, REC_009);
FormatableBitSet colList = new FormatableBitSet(15);
colList.set(5);
page.updateAtSlot(0, updateRow.getRow(), colList);
REPORT("about to check fetch after update ...");
DataValueDescriptor column = new SQLChar();
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 0, column, false, REC_001, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 1, column, false, REC_002, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 2, column, false, REC_003, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 3, column, false, REC_004, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 4, column, false, REC_005, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 5, column, false, REC_009, colSize * 2);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 6, column, false, REC_010, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 7, column, false, REC_011, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 8, column, false, REC_012, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 9, column, false, REC_013, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 10, column, false, REC_014, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 11, column, false, REC_015, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 12, column, false, REC_016, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 13, column, false, REC_017, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 14, column, false, REC_018, colSize);
page.unlatch();
t_util.t_abort(t);
REPORT("about to check fetch after abort ...");
c = t_util.t_openContainer(t, segment, cid, false);
page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 0, column, false, REC_001, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 1, column, false, REC_002, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 2, column, false, REC_003, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 3, column, false, REC_004, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 4, column, false, REC_005, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 5, column, false, REC_009, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 6, column, false, REC_010, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 7, column, false, REC_011, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 8, column, false, REC_012, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 9, column, false, REC_013, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 10, column, false, REC_014, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 11, column, false, REC_015, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 12, column, false, REC_016, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 13, column, false, REC_017, colSize);
t_util.t_checkFetchColFromSlot(page, page.FIRST_SLOT_NUMBER, 14, column, false, REC_018, colSize);
page.unlatch();
if (segment != ContainerHandle.TEMPORARY_SEGMENT) {
// cleanup
t_util.t_dropContainer(t, segment, cid);
}
t_util.t_commit(t);
t.close();
PASS("P056: segment = " + segment);
}
use of org.apache.derby.iapi.types.SQLChar in project derby by apache.
the class T_Recovery method R003.
/*
* recover test 3
*/
protected void R003() throws T_Fail, StandardException {
long cid = find(key(3, 1));
if (cid < 0) {
REPORT("R003 not run");
return;
}
int recordCount = (int) find(key(3, 2));
Transaction t = t_util.t_startTransaction();
try {
ContainerHandle c = t_util.t_openContainer(t, 0, cid, false);
Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
t_util.t_checkRecordCount(page, recordCount, recordCount);
// first row should contain (null, REC_003, REC_004)
t_util.t_checkFieldCount(page, 0, 3);
DataValueDescriptor column = new SQLChar();
t_util.t_checkFetchColFromSlot(page, 0, 0, column, false, null);
t_util.t_checkFetchColFromSlot(page, 0, 1, column, false, REC_003);
t_util.t_checkFetchColFromSlot(page, 0, 2, column, false, REC_004);
if (recordCount == 2) {
// second row should contain (REC_005)
t_util.t_checkFieldCount(page, 1, 1);
t_util.t_checkFetchColFromSlot(page, 1, 0, column, false, REC_005);
}
page.unlatch();
} finally {
t_util.t_commit(t);
t.close();
}
PASS("R003: containerId " + cid + " recordCount " + recordCount);
}
use of org.apache.derby.iapi.types.SQLChar in project derby by apache.
the class T_Recovery method S004.
/*
* test 4 - update field
*/
protected void S004() throws T_Fail, StandardException {
Transaction t = t_util.t_startTransaction();
try {
long cid = t_util.t_addContainer(t, 0);
t_util.t_commit(t);
ContainerHandle c = t_util.t_openContainer(t, 0, cid, true);
Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
T_RawStoreRow row = new T_RawStoreRow(5);
row.setColumn(0, (String) null);
row.setColumn(1, REC_004);
row.setColumn(2, (String) null);
row.setColumn(3, REC_005);
row.setColumn(4, REC_005);
RecordHandle rh = t_util.t_insert(page, row);
DataValueDescriptor col0 = new SQLChar((String) null);
DataValueDescriptor col1 = new SQLChar(REC_001);
DataValueDescriptor col2 = new SQLChar(REC_002);
DataValueDescriptor col3 = new SQLChar((String) null);
if (page.updateFieldAtSlot(page.FIRST_SLOT_NUMBER, 0, col0, null) == null || page.updateFieldAtSlot(page.FIRST_SLOT_NUMBER, 1, col1, null) == null || page.updateFieldAtSlot(page.FIRST_SLOT_NUMBER, 2, col2, null) == null || page.updateFieldAtSlot(page.FIRST_SLOT_NUMBER, 3, col3, null) == null) {
throw T_Fail.testFailMsg("Failed to update field");
}
page.unlatch();
REPORT("setup S004: containerId " + cid);
register(key(4, 1), cid);
} finally {
t_util.t_commit(t);
t.close();
}
}
Aggregations