use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class commit_method method XATest_4.
/**
* Test aborts of prepared two phase commit xa transaction.
* <p>
* @exception StandardException Standard exception policy.
*/
void XATest_4(commit_method commit_method) throws StandardException, T_Fail {
REPORT("(XATest_4) starting");
ContextManager cm = getContextService().getCurrentContextManager();
// ABORT AN IDLE TRANSACTION.
// Start a global transaction
XATransactionController xa_tc = (XATransactionController) store.startXATransaction(cm, // fake format id
42, global_id, branch_id);
// This prepare will commit the idle transaction.
if (xa_tc.xa_prepare() != XATransactionController.XA_RDONLY) {
throw T_Fail.testFailMsg("prepare of idle xact did not return XA_RDONLY.");
}
// nothing to do, will just abort the next current idle xact.
// after prepare/readonly we cna continue to use transaction
commit_method.commit(true, 42, null, null, xa_tc);
// should not be able to find this global xact, it has been committed
if (((XAResourceManager) store.getXAResourceManager()).find(new XAXactId(42, global_id, branch_id)) != null) {
throw T_Fail.testFailMsg("A XA_RDONLY prepare-committed xact should not be findable.");
}
// done with this xact.
xa_tc.destroy();
// ABORT AN UPDATE ONLY TRANSACTION.
// Start a global transaction
xa_tc = (XATransactionController) store.startXATransaction(cm, // fake format id
42, global_id, branch_id);
// Create a heap conglomerate.
T_AccessRow template_row = new T_AccessRow(1);
long conglomid = xa_tc.createConglomerate(// create a heap conglomerate
"heap", // 1 column template.
template_row.getRowArray(), // column sort order - not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
// Open a scan on the conglomerate, to verify the create happened,
// and to show that the same openScan done after abort fails.
ScanController scan1 = xa_tc.openScan(conglomid, // don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
scan1.next();
scan1.close();
// prepare the update xact.
if (xa_tc.xa_prepare() != XATransactionController.XA_OK) {
throw T_Fail.testFailMsg("prepare of update xact did not return XA_OK.");
}
try {
// Open a scan on the conglomerate.
scan1 = xa_tc.openScan(conglomid, // don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
scan1.next();
scan1.close();
throw T_Fail.testFailMsg("Should not be able to do anything on xact after prepare.");
} catch (StandardException se) {
// expected exception, fall through.
}
// commit an idle transaction - using onePhase optimization.
commit_method.rollback(42, global_id, branch_id, xa_tc);
commit_method.commit(true, 42, null, null, xa_tc);
// should not be able to find this global xact, it has been committed
if (((XAResourceManager) store.getXAResourceManager()).find(new XAXactId(42, global_id, branch_id)) != null) {
throw T_Fail.testFailMsg("A xa_rollbacked xact should not be findable.");
}
// done with this xact.
xa_tc.destroy();
// Start a global transaction
xa_tc = (XATransactionController) store.startXATransaction(cm, // fake format id
42, global_id, branch_id);
try {
// Open a scan on the conglomerate.
scan1 = xa_tc.openScan(conglomid, // don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
scan1.next();
scan1.close();
throw T_Fail.testFailMsg("Should not be able to open conglom, the create was aborted.");
} catch (StandardException se) {
// expected exception, fall through.
}
xa_tc.destroy();
// ABORT A READ ONLY TRANSACTION.
// Start a global transaction
xa_tc = (XATransactionController) store.startXATransaction(cm, // fake format id
42, global_id, branch_id);
// Create a heap conglomerate.
template_row = new T_AccessRow(1);
conglomid = xa_tc.createConglomerate(// create a heap conglomerate
"heap", // 1 column template.
template_row.getRowArray(), // column sort order - not required for heap
null, // default collation
null, // default properties
null, // not temporary
TransactionController.IS_DEFAULT);
commit_method.commit(true, 42, global_id, branch_id, xa_tc);
xa_tc.destroy();
// Start a global transaction
xa_tc = (XATransactionController) store.startXATransaction(cm, // fake format id
42, global_id, branch_id);
// Open a scan on the conglomerate.
scan1 = xa_tc.openScan(conglomid, // don't hold
false, // not for update
0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
(FormatableBitSet) null, // start position - first row in conglomerate
null, // unused if start position is null.
0, // qualifier - accept all rows
null, // stop position - last row in conglomerate
null, // unused if stop position is null.
0);
scan1.next();
scan1.close();
// This prepare will commit the idle transaction.
if (xa_tc.xa_prepare() != XATransactionController.XA_RDONLY) {
throw T_Fail.testFailMsg("prepare of idle xact did not return XA_RDONLY.");
}
// commit an idle transaction - using onePhase optimization.
commit_method.commit(true, 42, null, null, xa_tc);
// should not be able to find this global xact, it has been committed
if (((XAResourceManager) store.getXAResourceManager()).find(new XAXactId(42, global_id, branch_id)) != null) {
throw T_Fail.testFailMsg("A XA_RDONLY prepare-committed xact should not be findable.");
}
// done with this xact.
xa_tc.destroy();
REPORT("(XATest_5) finishing");
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class T_CreateConglomRet method t_006.
/**
* Test unimplemented interfaces.
*
* The following ScanController interfaces are not supported by the
* btree implementation, because row locations are not returned outside
* the interface. At some point we may package a key as a row location
* but that does not really give any more functionality than using scan
* to find your key:
* ScanController.fetchLocation()
* ScanController.newRowLocationTemplate()
* ScanController.replace()
* ConglomerateController.delete()
* ConglomerateController.fetch()
* ConglomerateController.insertAndFetchLocation()
* ConglomerateController.newRowLocationTemplate()
* ConglomerateController.replace()
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_006(TransactionController tc) throws StandardException, T_Fail {
REPORT("Starting t_006");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
createCongloms(tc, 2, false, false, 0, create_ret);
// Open the base conglomerate.
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the index conglomerate.
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Create a base row template.
DataValueDescriptor[] base_row = TemplateRow.newU8Row(2);
RowLocation base_rowloc = base_cc.newRowLocationTemplate();
T_SecondaryIndexRow index_row_from_base_row = new T_SecondaryIndexRow();
index_row_from_base_row.init(base_row, base_rowloc, 3);
((SQLLongint) base_row[0]).setValue(1);
// Create a row.
T_SecondaryIndexRow index_row = new T_SecondaryIndexRow();
index_row.init(TemplateRow.newU8Row(2), base_cc.newRowLocationTemplate(), 3);
// test: make sure scan position is right after inserts before scan
// no split case. In this case the slot position of the current
// position should change, but the code will keep a record handle
// and not need to reposition by key.
// before keys: 1000, 3000
// last key gotten froms scan : 0
// insert keys:1-900
// next key from scan should be: 5
// insert 1000
((SQLLongint) base_row[1]).setValue(1000);
base_cc.insertAndFetchLocation(base_row, base_rowloc);
if (index_cc.insert(index_row_from_base_row.getRow()) != 0) {
throw T_Fail.testFailMsg("insert failed");
}
// try each of the unsupported interfaces:
try {
index_cc.delete(null);
return (FAIL("t_006: ConglomerateController.delete() succeeded."));
} catch (StandardException e) {
}
try {
if (!index_cc.fetch(null, RowUtil.EMPTY_ROW, (FormatableBitSet) null)) {
return (FAIL("t_006: ConglomerateController.fetch() bad ret."));
}
return (FAIL("t_006: ConglomerateController.fetch() succeeded."));
} catch (StandardException e) {
}
try {
index_cc.insertAndFetchLocation((DataValueDescriptor[]) null, null);
return (FAIL("t_006: ConglomerateController.insertAndFetchLocation() succeeded."));
} catch (StandardException e) {
}
try {
RowLocation rowloc = index_cc.newRowLocationTemplate();
return (FAIL("t_006: ConglomerateController.newRowLocationTemplate() succeeded."));
} catch (StandardException e) {
}
try {
index_cc.replace(null, null, null);
return (FAIL("t_006: ConglomerateController.replace() succeeded."));
} catch (StandardException e) {
}
index_cc.close();
// open a new scan
ScanController scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
int numrows = 0;
while (scan.next()) {
numrows++;
scan.fetch(index_row_from_base_row.getRow());
try {
scan.fetchLocation(null);
return (FAIL("t_006: scan.fetchLocation() succeeded"));
} catch (StandardException e) {
}
try {
RowLocation rowloc = scan.newRowLocationTemplate();
return (FAIL("t_006: scan.newRowLocationTemplate() succeeded"));
} catch (StandardException e) {
}
try {
scan.replace(index_row_from_base_row.getRow(), (FormatableBitSet) null);
return (FAIL("t_006: scan.replace() succeeded"));
} catch (StandardException e) {
}
}
// make sure that scan.next() continues to return false
if (scan.next())
return (FAIL("t_006: scan.next() returned true after false."));
scan.close();
if (numrows != 1) {
return (FAIL("(t_scan) wrong number of rows. Expected " + "1 row, but got " + numrows + "rows."));
}
REPORT("Ending t_006");
return (true);
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class T_CreateConglomRet method t_001.
/**
* Test BTreeController.insert()
* <p>
* Just verify that insert code works for a secondary index. Just call
* the interface and make sure the row got there.
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_001(TransactionController tc) throws StandardException, T_Fail {
REPORT("Starting t_001");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
createCongloms(tc, 2, false, false, 0, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
if (!(index_cc instanceof B2IController)) {
throw T_Fail.testFailMsg("openConglomerate returned wrong type");
}
if (!index_cc.isKeyed()) {
throw T_Fail.testFailMsg("btree is not keyed.");
}
index_cc.checkConsistency();
// Create a row and insert into base table, remembering it's location.
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(2);
// Insert the row into the base table and remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0)
throw T_Fail.testFailMsg("insert failed");
// Make sure we read back the value we wrote from base and index table.
DataValueDescriptor[] r2 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row2 = new T_SecondaryIndexRow();
RowLocation base_rowloc2 = base_cc.newRowLocationTemplate();
index_row2.init(r2, base_rowloc2, 3);
// base table check:
if (!base_cc.fetch(base_rowloc1, r2, (FormatableBitSet) null)) {
return (FAIL("(t_001) insert into base table failed"));
}
if (((SQLLongint) r2[0]).getLong() != 2 || ((SQLLongint) r2[1]).getLong() != 2) {
return (FAIL("(t_001) insert into base table failed"));
}
// index check - there should be only one record:
ScanController scan = tc.openScan(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
scan.next();
scan.fetch(index_row2.getRow());
// isCurrentPositionDeleted() works.
if (scan.isCurrentPositionDeleted())
throw T_Fail.testFailMsg("current row should not be deleted\n");
if (!scan.doesCurrentPositionQualify())
throw T_Fail.testFailMsg("current row should still qualify\n");
scan.delete();
if (!scan.isCurrentPositionDeleted())
throw T_Fail.testFailMsg("current row should be deleted\n");
if (scan.doesCurrentPositionQualify())
throw T_Fail.testFailMsg("deleted row should not qualify\n");
// just call the debugging code to make sure it doesn't fail.
REPORT("Calling scan.tostring(): " + scan);
if (scan.next() || ((SQLLongint) (index_row2.getRow()[0])).getLong() != 2 || ((SQLLongint) (index_row2.getRow()[1])).getLong() != 2) {
return (FAIL("(t_001) insert into index failed in base cols"));
}
// test the scaninfo interface.
ScanInfo scan_info = scan.getScanInfo();
Properties prop = scan_info.getAllScanInfo(null);
if (Integer.parseInt(prop.getProperty(MessageService.getTextMessage(SQLState.STORE_RTS_NUM_PAGES_VISITED))) != 1) {
throw T_Fail.testFailMsg("(scanInfo) wrong numPagesVisited. Expected 1, got " + Integer.parseInt(prop.getProperty(MessageService.getTextMessage(SQLState.STORE_RTS_NUM_PAGES_VISITED))));
}
if (Integer.parseInt(prop.getProperty(MessageService.getTextMessage(SQLState.STORE_RTS_NUM_ROWS_VISITED))) != 1) {
throw T_Fail.testFailMsg("(scanInfo) wrong numRowsVisited. Expected 1, got " + Integer.parseInt(prop.getProperty(MessageService.getTextMessage(SQLState.STORE_RTS_NUM_ROWS_VISITED))));
}
if (Integer.parseInt(prop.getProperty(MessageService.getTextMessage(SQLState.STORE_RTS_NUM_ROWS_QUALIFIED))) != 1) {
throw T_Fail.testFailMsg("(scanInfo) wrong numRowsQualified. Expected 1, got " + Integer.parseInt(prop.getProperty(MessageService.getTextMessage(SQLState.STORE_RTS_NUM_ROWS_QUALIFIED))));
}
int compare_result = base_rowloc1.compare(base_rowloc2);
if (compare_result != 0) {
return (FAIL("(t_001) insert into index failed in recordhandle.\n" + "\texpected RecordHandle = " + base_rowloc1 + "\n" + "\tgot RecordHandle = " + base_rowloc2 + "\tcompare result = " + compare_result));
}
index_cc.checkConsistency();
// Close the conglomerates.
base_cc.close();
index_cc.close();
try {
base_cc.insert(r1);
return (FAIL("(t_001) insert on closed conglomerate worked"));
} catch (StandardException e) {
// e.printStackTrace();
}
try {
if (index_cc.insert(r1) != 0)
throw T_Fail.testFailMsg("insert failed");
return (FAIL("(t_001) insert on closed conglomerate worked"));
} catch (StandardException e) {
// e.printStackTrace();
}
tc.commit();
REPORT("Ending t_001");
return true;
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class T_CreateConglomRet method t_015.
/**
* Test latch release during critical time during row level locking.
* <p>
* Use trace points to force errors in split at critical points:
* leaf_split_abort{1,2,3,4}
*
* @exception StandardException Standard exception policy.
* @exception T_Fail Throws T_Fail on any test failure.
*/
protected boolean t_015(TransactionController tc) throws StandardException, T_Fail {
ScanController scan = null;
// SanityManager.DEBUG_SET("LockTrace");
REPORT("Starting t_015");
T_CreateConglomRet create_ret = new T_CreateConglomRet();
// Create the btree so that it only allows 2 rows per page.
createCongloms(tc, 2, false, false, 2, create_ret);
// Open the base table
ConglomerateController base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
ConglomerateController index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
if (!(index_cc instanceof B2IController)) {
throw T_Fail.testFailMsg("openConglomerate returned wrong type");
}
index_cc.checkConsistency();
// Create a row and insert into base table, remembering it's location.
DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);
T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();
RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();
index_row1.init(r1, base_rowloc1, 3);
// Commit the create of the tables so that the following aborts don't
// undo that work.
tc.commit();
// Now load up the table with multiple pages of data.
// Open the base table
base_cc = tc.openConglomerate(create_ret.base_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// Open the secondary index
index_cc = tc.openConglomerate(create_ret.index_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
// now insert enough rows to cause failure
for (int i = 100; i > 0; i -= 2) {
((SQLLongint) r1[0]).setValue(2);
((SQLLongint) r1[1]).setValue(i);
// Insert the row into the base table;remember its location.
base_cc.insertAndFetchLocation(r1, base_rowloc1);
// Insert the row into the secondary index.
if (index_cc.insert(index_row1.getRow()) != 0) {
throw T_Fail.testFailMsg("insert failed");
}
}
// Now try simulated lock wait/latch release paths through the code.
String[] latch_debug_strings = { "B2iRowLocking3_1_lockScanRow1", "B2iRowLocking3_2_lockScanRow1", "BTreeScan_positionAtStartPosition1", // "BTreeScan_reposition1",
"BTreeScan_fetchNextGroup1" };
for (int errs = 0; errs < latch_debug_strings.length; errs++) {
REPORT("Doing latch release tests: " + latch_debug_strings[errs]);
// latch release path through the code.
if (SanityManager.DEBUG)
SanityManager.DEBUG_SET(latch_debug_strings[errs]);
// Just scan the rows and make sure you see them all, mostly just
// a test to make sure no errors are thrown by the latch release
// code paths.
scan = tc.openScan(create_ret.index_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, (FormatableBitSet) null, null, ScanController.NA, null, null, ScanController.NA);
int row_count = 0;
while (scan.next()) {
row_count++;
}
scan.close();
if (row_count != 50)
throw T_Fail.testFailMsg("wrong scan count = " + row_count);
}
tc.abort();
REPORT("Ending t_015");
return true;
}
use of org.apache.derby.iapi.services.io.FormatableBitSet in project derby by apache.
the class StreamFileContainer method load.
/**
* load data into this container.
* <p>
* populate the stream container with data in the rowSource
* <p>
*
* @param rowSource The row source to get rows to load into this container.
*
* @exception StandardException Standard exception policy.
*/
public void load(RowSource rowSource) throws StandardException {
// use this output stream to buffer rows before inserting into file.
out = new DynamicByteArrayOutputStream(bufferSize);
logicalDataOut = new FormatIdOutputStream(out);
boolean encrypted = dataFactory.databaseEncrypted();
// it is not dataFactory.getEncryptionBlockSize() aligned.
if (encrypted) {
if (zeroBytes == null)
zeroBytes = new byte[dataFactory.getEncryptionBlockSize() - 1];
out.write(zeroBytes, 0, dataFactory.getEncryptionBlockSize() - 1);
}
try {
fileOut = privGetOutputStream(file);
FormatableBitSet validColumns = rowSource.getValidColumns();
Object[] row = rowSource.getNextRowFromRowSource();
int numberFields = 0;
if (validColumns != null) {
for (int i = validColumns.getLength() - 1; i >= 0; i--) {
if (validColumns.isSet(i)) {
numberFields = i + 1;
break;
}
}
} else {
numberFields = row.length;
}
// make the record header to have 0 record id
recordHeader = new StoredRecordHeader(0, numberFields);
// write the record header once for all the rows, directly to the
// beginning of the file.
int rhLen = recordHeader.write(out);
int validColumnsSize = validColumns == null ? 0 : validColumns.getLength();
while (row != null) {
int arrayPosition = -1;
for (int i = 0; i < numberFields; i++) {
// write each column out
if (validColumns == null) {
arrayPosition++;
Object column = row[arrayPosition];
writeColumn(column);
} else {
if (validColumnsSize > i && validColumns.isSet(i)) {
arrayPosition++;
Object column = row[arrayPosition];
writeColumn(column);
} else {
// it is a non-existent column
writeColumn(null);
}
}
// in the buffer
if ((out.getUsed() >= bufferSize) || ((bufferSize - out.getUsed()) < MIN_BUFFER_SIZE)) {
writeToFile();
}
}
// get the next row and its valid columns from the rowSource
row = rowSource.getNextRowFromRowSource();
}
// dataFactory.getEncryptionBlockSize() - 1 if this is an encypted database
if (encrypted) {
if (out.getUsed() > (dataFactory.getEncryptionBlockSize() - 1))
writeToFile();
} else if (out.getUsed() > 0) {
writeToFile();
}
} catch (IOException ioe) {
// handle IO error...
throw StandardException.newException(SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
} finally {
close();
}
}
Aggregations