Search in sources :

Example 91 with StandardException

use of org.apache.derby.shared.common.error.StandardException in project derby by apache.

the class RawStore method crashOnDebugFlag.

/**
 * when the input debug flag is set, an expception
 * is throw when run in the debug mode.
 */
private void crashOnDebugFlag(String debugFlag, boolean reEncrypt) throws StandardException {
    if (SanityManager.DEBUG) {
        // exception to simulate error cases.
        if (SanityManager.DEBUG_ON(debugFlag)) {
            StandardException se = StandardException.newException(SQLState.DATABASE_ENCRYPTION_FAILED, debugFlag);
            markCorrupt(se);
            throw se;
        }
    }
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException)

Example 92 with StandardException

use of org.apache.derby.shared.common.error.StandardException in project derby by apache.

the class HeapPostCommit method performWork.

/**
 * perform the work described in the postcommit work.
 * <p>
 * In this implementation the only work that can be executed by this
 * post commit processor is this class itself.
 * <p>
 *
 * @return Returns Serviceable.DONE when work has completed, or
 *         returns Serviceable.REQUEUE if work needs to be requeued.
 *
 * @param contextMgr the context manager started by the post commit daemon
 *
 * @exception  StandardException  Standard exception policy.
 */
public int performWork(ContextManager contextMgr) throws StandardException {
    TransactionManager tc = (TransactionManager) this.access_factory.getAndNameTransaction(contextMgr, AccessFactoryGlobals.SYS_TRANS_NAME);
    TransactionManager internal_xact = tc.getInternalTransaction();
    // only requeue if work was not completed in this try.
    boolean requeue_work = false;
    HeapController heapcontroller;
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON("verbose_heap_post_commit"))
            SanityManager.DEBUG_PRINT("HeapPostCommit", "starting internal xact\n");
    }
    try {
        // This call will attempt to open the heap table locked with
        // table level IX mode, preparing to do record level locked space
        // reclamation.
        // 
        // The call will either succeed immediately, or throw an exception
        // which could mean the container does not exist or that the lock
        // could not be granted immediately.
        // Reversed the fix for 4255:
        // page reclaimation is done asynchronosly by rawstore daemon
        // not good to WAIT FOR LOCKS , as it can freeze the daemon
        // If we can not get the lock this reclamation request will be
        // requeued.
        // if does not exist will throw exception, which the code will
        // handle in the same way as it does heap.open failing if trying
        // to open a dropped container.
        // DERBY-6774, changed to use openByContainerKey which insures
        // that background thread will have a lock on the table before
        // accessing and possibly loading the conglomerate cache.  This
        // insure it waits for in process alter table calls, before
        // loading the conglomerate cache.
        heapcontroller = (HeapController) Heap.openByContainerKey(page_key.getContainerId(), internal_xact, internal_xact.getRawStoreXact(), false, ContainerHandle.MODE_FORUPDATE | ContainerHandle.MODE_LOCK_NOWAIT, TransactionController.MODE_RECORD, internal_xact.getRawStoreXact().newLockingPolicy(LockingPolicy.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ, true), null, (DynamicCompiledOpenConglomInfo) null);
        // We got a table intent lock, all deleted rows we encounter can
        // be reclaimed, once an "X" row lock is obtained on them.
        // Process all the rows on the page while holding the latch.
        purgeCommittedDeletes(heapcontroller, this.page_key.getPageNumber());
    } catch (StandardException se) {
        // work is requeued.
        if (se.isLockTimeoutOrDeadlock()) {
            requeue_work = true;
        }
    // Do not close the controller because that will unlatch the
    // page.  Let the commit and destroy do release the latch and
    // close the controller.
    // heapcontroller.close();
    }
    // It is ok to not sync this post work.  If no subsequent log record
    // is sync'd to disk then it is ok that this transaction not make
    // it to the database.  If any subsequent transaction is sync'd to
    // the log file, then this transaction will be sync'd as part of that
    // work.
    internal_xact.commitNoSync(Transaction.RELEASE_LOCKS);
    internal_xact.destroy();
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON("verbose_heap_post_commit")) {
            if (requeue_work)
                SanityManager.DEBUG_PRINT("HeapPostCommit", "requeueing on page num = " + this.page_key.getPageNumber());
        }
    }
    return (requeue_work ? Serviceable.REQUEUE : Serviceable.DONE);
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) TransactionManager(org.apache.derby.iapi.store.access.conglomerate.TransactionManager)

Example 93 with StandardException

use of org.apache.derby.shared.common.error.StandardException in project derby by apache.

the class T_AccessFactory method tempTest.

// Test temporary conglomerates.
protected boolean tempTest(TransactionController tc) throws StandardException, T_Fail {
    REPORT("(tempTest) starting");
    // Create some conglomerates, some temporary, some not.
    // temporary
    long cid5252t = createAConglom(tc, 5252, true);
    // temporary
    long cid87t = createAConglom(tc, 87, true);
    // permanent
    long cid999p = createAConglom(tc, 999, false);
    // temporary
    long cid3t = createAConglom(tc, 3, true);
    // Create an index on two of them
    long cid5252ti = createBtree(tc, cid5252t, true);
    long cid999pi = createBtree(tc, cid999p, false);
    int r;
    // Make sure we can read them.
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid5252t, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after create checkAConglom(cid5252t) == " + r);
    }
    if ((r = checkAConglom(tc, getBtreeTemplate(tc, cid5252t), cid5252ti, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after create checkAConglom(cid5252ti) == " + r);
    }
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid999p, 999)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after create checkAConglom(cid999p) == " + r);
    }
    if ((r = checkAConglom(tc, getBtreeTemplate(tc, cid999p), cid999pi, 999)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after create checkAConglom(cid999pi) == " + r);
    }
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid3t, 3)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after create checkAConglom(cid3t) == " + r);
    }
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid87t, 87)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after create checkAConglom(cid87t) == " + r);
    }
    // Drop two of them.
    tc.dropConglomerate(cid999pi);
    tc.dropConglomerate(cid999p);
    tc.dropConglomerate(cid87t);
    // Try dropping the ones we already dropped - expect exceptions
    try {
        tc.dropConglomerate(cid999p);
        throw T_Fail.testFailMsg("(tempTest) drop of dropped cid999p succeeded");
    } catch (StandardException e) {
    // normal path through the test, ignore the expected error
    }
    try {
        tc.dropConglomerate(cid999pi);
        throw T_Fail.testFailMsg("(tempTest) drop of dropped cid999pi succeeded");
    } catch (StandardException e) {
    // normal path through the test, ignore the expected error
    }
    try {
        tc.dropConglomerate(cid87t);
        throw T_Fail.testFailMsg("(tempTest) drop of dropped cid87t succeeded");
    } catch (StandardException e) {
    // normal path through the test, ignore the expected error
    }
    // Make sure the correct ones remain
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid5252t, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after drop checkAConglom(cid5252t) == " + r);
    }
    if ((r = checkAConglom(tc, getBtreeTemplate(tc, cid5252t), cid5252ti, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after drop checkAConglom(cid5252ti) == " + r);
    }
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid3t, 3)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after drop checkAConglom(cid3t) == " + r);
    }
    // Make sure commitNoSync gets executed sometimes.
    tc.commitNoSync(TransactionController.RELEASE_LOCKS);
    // should still be there (with their rows).
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid5252t, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after commit checkAConglom(cid5252t) == " + r);
    }
    if ((r = checkAConglom(tc, getBtreeTemplate(tc, cid5252t), cid5252ti, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after commit checkAConglom(cid5252ti) == " + r);
    }
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid3t, 3)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after commit checkAConglom(cid3t) == " + r);
    }
    // open cid3t for update to force its truncation on the abort.
    ScanController sc = tc.openScan(cid3t, // don't hold
    false, // for update
    TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
    (FormatableBitSet) null, // start position - first row in conglomerate
    null, // unused if start position is null.
    0, // qualifier - accept all rows
    null, // stop position - last row in conglomerate
    null, // unused if stop position is null.
    0);
    sc.close();
    tc.abort();
    // but truncated
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid5252t, 5252)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after abort checkAConglom(cid5252t) == " + r);
    }
    if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid3t, 3)) != 1) {
        throw T_Fail.testFailMsg("(tempTest) after abort checkAConglom(cid3t) == " + r);
    }
    // Due to bug STO84, temp btrees are corrupted after aborts,
    // so the following will cause problems:
    /*
		if ((r = checkAConglom(tc, (DataValueDescriptor[]) null, cid5252ti, 5252)) != 0)
		 	throw T_Fail.testFailMsg("(tempTest) after abort checkAConglom(cid5252ti) == " + r);
		*/
    // Drop index on conglomerate to make sure we can do a drop after truncate.
    tc.dropConglomerate(cid5252ti);
    if (tc.conglomerateExists(cid5252ti))
        throw T_Fail.testFailMsg("(tempTest) after drop cid5252ti still exists");
    // Drop one conglomerate to make sure we can do a drop after truncate.
    tc.dropConglomerate(cid5252t);
    if (tc.conglomerateExists(cid5252t))
        throw T_Fail.testFailMsg("(tempTest) after drop cid5252t still exists");
    // Leave the last one -  raw store is supposed to delete
    // it when the system reboots
    // Success!
    REPORT("(tempTest) succeeded");
    return true;
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) SQLLongint(org.apache.derby.iapi.types.SQLLongint)

Example 94 with StandardException

use of org.apache.derby.shared.common.error.StandardException in project derby by apache.

the class T_AccessFactory method nestedUserTransaction.

protected boolean nestedUserTransaction(TransactionController tc) throws StandardException, T_Fail {
    REPORT("(nestedUserTransaction) starting");
    // Test of drop conglomerate with abort by doing the following:
    // create table
    // commit
    // drop table
    // make sure table is not still there.
    // abort
    // make sure table is still there.
    // Create a heap conglomerate.
    long orig_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", // 1 SQLInteger() column template.
    new T_AccessRow(1).getRowArray(), // column sort order not required for heap
    null, // default collation
    null, // default properties
    null, // not temporary
    TransactionController.IS_DEFAULT);
    // Create a temporary heap conglomerate.
    long tmp_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", // 1 SQLInteger() column template.
    new T_AccessRow(1).getRowArray(), // column sort order not required for heap
    null, // default collation
    null, // default properties
    null, TransactionController.IS_TEMPORARY);
    TransactionController current_xact = store.getTransaction(getContextService().getCurrentContextManager());
    // get a nested user transaction
    TransactionController child_tc = tc.startNestedUserTransaction(true, true);
    TransactionController current_xact_after_nest = store.getTransaction(getContextService().getCurrentContextManager());
    if (current_xact_after_nest != current_xact) {
        throw T_Fail.testFailMsg("(nestedUserTransaction) getTransaction() return changed after startNestedUserTransaction()." + "current_xact = " + current_xact + ";current_xact_after_nest = " + current_xact_after_nest);
    }
    T_Fail.T_ASSERT(tc.getLockSpace() == child_tc.getLockSpace(), "getLockSpace() returned different object for child.");
    // the locks of the nested transaction should not conflict, so this
    // open should work.
    ConglomerateController cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
    // Make sure you can access the temporary conglomerate in the
    // nested transaction.
    ConglomerateController tmp_cc = child_tc.openConglomerate(tmp_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
    cc.close();
    tmp_cc.close();
    child_tc.commit();
    child_tc.destroy();
    tc.dropConglomerate(orig_conglomid);
    // trying to double nest a nested transaction should not work.
    child_tc = tc.startNestedUserTransaction(true, true);
    try {
        child_tc.startNestedUserTransaction(true, true);
        throw T_Fail.testFailMsg("(nestedUserTransaction) double nest xact not allowed.");
    } catch (StandardException se) {
    // expected exception, fall through.
    }
    child_tc.commit();
    child_tc.destroy();
    // make sure internal and ntt's work.  Just add a bunch of data to
    // the table causing page allocation.
    String twok_string = new String("0123456789012345");
    for (int i = 0; i < 7; i++) {
        twok_string += twok_string;
    }
    T_AccessRow big_row = new T_AccessRow(2);
    big_row.setCol(1, new SQLChar(twok_string));
    // Create a heap conglomerate.
    orig_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", big_row.getRowArray(), // column sort order not required for heap
    null, // default collation
    null, // default properties
    null, // not temporary
    TransactionController.IS_DEFAULT);
    child_tc = tc.startNestedUserTransaction(true, true);
    // add 20 pages worth of data, causing allocation
    // the locks of the nested transaction should not conflict, so this
    // open should work.
    cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
    child_tc.abort();
    child_tc.destroy();
    try {
        // the locks of the nested transaction should not conflict, so this
        // open should work.
        cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
        throw T_Fail.testFailMsg("(nestedUserTransaction) conglom should have been aborted.");
    } catch (StandardException se) {
    // expected exception, fall through.
    }
    tc.commit();
    // same test as above, but this time commit parent xact create to
    // make sure it stays around after the child abort.
    // Create a heap conglomerate.
    orig_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", big_row.getRowArray(), // column sort order not required for heap
    null, // default properties
    null, // default collation
    null, // not temporary
    TransactionController.IS_DEFAULT);
    tc.commit();
    child_tc = tc.startNestedUserTransaction(true, true);
    // add 20 pages worth of data, causing allocation
    // the locks of the nested transaction should not conflict, so this
    // open should work.
    cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
    /*
        for (int i = 0; i < 40; i++)
        {
            big_row.setCol(0, new SQLInteger(i));
			cc.insert(big_row.getRowArray());
        }
        */
    child_tc.abort();
    child_tc.destroy();
    try {
        // the locks of the nested transaction should not conflict, so this
        // open should work.
        cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
        cc.close();
    } catch (StandardException se) {
        throw T_Fail.testFailMsg("(nestedUserTransaction) conglom should have not be aborted.");
    }
    // start an read only nested user transaction.
    child_tc = tc.startNestedUserTransaction(true, true);
    ConglomerateController child_cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
    try {
        // should not be able to do an update in a read only transaction.
        big_row.setCol(0, new SQLInteger(1042));
        child_cc.insert(big_row.getRowArray());
        throw T_Fail.testFailMsg("(nestedUserTransaction) read only xact does not allow upd.");
    } catch (StandardException se) {
        // expected exception, fall through.
        child_tc.commit();
        child_tc.destroy();
    }
    tc.commit();
    // start an update nested user transaction.
    child_tc = tc.startNestedUserTransaction(false, true);
    child_cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
    try {
        // should be able to do an update in a read only transaction.
        big_row.setCol(0, new SQLInteger(1043));
        child_cc.insert(big_row.getRowArray());
    } catch (StandardException se) {
        throw T_Fail.testFailMsg("(nestedUserTransaction) read only xact does not allow upd.");
    }
    // expected exception, fall through.
    child_tc.commit();
    child_tc.destroy();
    tc.commit();
    cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_TABLE, TransactionController.ISOLATION_SERIALIZABLE);
    // start an update nested user transaction.
    child_tc = tc.startNestedUserTransaction(false, true);
    try {
        // the following should time out, since locks are not compatible.
        child_cc = child_tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE);
        throw T_Fail.testFailMsg("(nestedUserTransaction) lock should have timed out.");
    } catch (StandardException se) {
    // expected timeout, fall through.
    }
    // expected exception, fall through.
    child_tc.commit();
    child_tc.destroy();
    tc.commit();
    REPORT("(nestedUserTransaction) finishing");
    return true;
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) SQLChar(org.apache.derby.iapi.types.SQLChar) SQLLongint(org.apache.derby.iapi.types.SQLLongint) SQLInteger(org.apache.derby.iapi.types.SQLInteger)

Example 95 with StandardException

use of org.apache.derby.shared.common.error.StandardException in project derby by apache.

the class T_AccessFactory method readUncommitted.

/**
 * Test critical cases for read uncommitted.
 * <p>
 * test 1 - test heap fetch, delete and replace of row on page which does not exist.
 * test 2 - test heap fetch, delete and replace of row on page where row does not exist.
 *
 * @exception  StandardException  Standard exception policy.
 */
protected boolean readUncommitted(TransactionController tc) throws StandardException, T_Fail {
    REPORT("(readUncommitted)");
    /*
         * TEST 1 - test heap fetch of row on page which does not exist.
         * <p>
         * Do this by inserting a few pages worth of data and then deleting 
         * all the rows, while remembering the rowlocation of one of the pages.
         * You need to at least get to the 2nd page, because the 1st page is
         * never totally reclaimed and deleted by the system in a heap (it has
         * some internal catalog information stored internally in row "0").
         */
    String twok_string = new String("0123456789012345");
    for (int i = 0; i < 7; i++) {
        twok_string += twok_string;
    }
    T_AccessRow big_row = new T_AccessRow(2);
    big_row.setCol(1, new SQLChar(twok_string));
    // Create a heap conglomerate.
    long orig_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", big_row.getRowArray(), // column sort order not required for heap
    null, // default collation
    null, // default properties
    null, // not temporary
    TransactionController.IS_DEFAULT);
    ConglomerateController cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
    for (int i = 0; i < 10; i++) {
        big_row.setCol(0, new SQLInteger(i));
        cc.insert(big_row.getRowArray());
    }
    cc.close();
    // Open another scan on the conglomerate.
    ScanController base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
    true, // for update
    TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
    (FormatableBitSet) null, // start position - first row in conglomerate
    null, // unused if start position is null.
    0, // qualifier - accept all rows
    null, // stop position - last row in conglomerate
    null, // unused if stop position is null.
    0);
    // now delete all the rows and remember the row location of the
    // last row.
    RowLocation deleted_page_rowloc = base_scan.newRowLocationTemplate();
    for (int i = 0; i < 10; i++) {
        base_scan.next();
        base_scan.fetchLocation(deleted_page_rowloc);
        base_scan.delete();
        tc.commit();
    }
    base_scan.close();
    tc.commit();
    // at this point the post commit thread should have reclaimed all the 5
    // pages.  Open it, at read uncommitted level.
    cc = tc.openConglomerate(orig_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
    // Test heap fetch of row on page which does not exist.
    if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null)) {
        throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
    }
    // Test heap replace of row on page which does not exist.
    FormatableBitSet update_desc = new FormatableBitSet(1);
    if (cc.replace(deleted_page_rowloc, big_row.getRowArray(), update_desc)) {
        throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
    }
    // Test heap fetch (overloaded call) of row on page which does not exist.
    if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null, true)) {
        throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
    }
    // Test heap delete of row on page which does not exist.
    if (cc.delete(deleted_page_rowloc)) {
        throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
    }
    cc.close();
    /*
         * TEST 2 - test heap fetch of row on page where row does not exist.
         * <p>
         * Do this by inserting enough rows to put 1 row on the 2nd page.
         * Then delete this one row, which will queue a post commit to reclaim
         * the row and page.  Then insert one more row on the same page in
         * the same xact.  Now commit the xact, which will cause post commit
         * to run which will reclaim the row but not the page.  Then try and
         * fetch the row which was deleted.
         */
    // string column will be 1500 bytes, allowing 2 rows per page to fit.
    SQLChar stringcol = new SQLChar();
    stringcol.setValue(T_AccessFactory.repeatString("012345678901234", 100));
    big_row.setCol(1, stringcol);
    // Create a heap conglomerate.
    orig_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", big_row.getRowArray(), // column sort order not required for heap
    null, // default collation
    null, // default properties
    null, // not temporary
    TransactionController.IS_DEFAULT);
    cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
    for (int i = 0; i < 3; i++) {
        big_row.setCol(0, new SQLInteger(i));
        cc.insert(big_row.getRowArray());
    }
    // Open another scan on the conglomerate.
    base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
    true, // for update
    TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
    (FormatableBitSet) null, // start position - first row in conglomerate
    null, // unused if start position is null.
    0, // qualifier - accept all rows
    null, // stop position - last row in conglomerate
    null, // unused if stop position is null.
    0);
    // now delete all the rows and remember the row location of the
    // last row.
    RowLocation deleted_row_rowloc = base_scan.newRowLocationTemplate();
    for (int i = 0; i < 3; i++) {
        base_scan.next();
        base_scan.fetchLocation(deleted_row_rowloc);
        base_scan.delete();
    }
    // insert another row on page 2 to make sure page does not go away.
    cc.insert(big_row.getRowArray());
    cc.close();
    base_scan.close();
    tc.commit();
    // at this point the post commit thread should have reclaimed all the
    // deleted row on page 2, but not the page.
    // 
    // Open it, at read uncommitted level.
    cc = tc.openConglomerate(orig_conglomid, false, 0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
    // test heap fetch of row on page where row does not exist.
    if (cc.fetch(deleted_row_rowloc, big_row.getRowArray(), null)) {
        throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed row.");
    }
    // test heap replace of row on page where row does not exist.
    if (cc.replace(deleted_page_rowloc, big_row.getRowArray(), update_desc)) {
        throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
    }
    // test heap fetch (overloaded) of row on page where row does not exist.
    if (cc.fetch(deleted_page_rowloc, big_row.getRowArray(), null, true)) {
        throw T_Fail.testFailMsg("(readUncommitted) fetch should ret false for reclaimed page.");
    }
    // test heap delete of row on page where row does not exist.
    if (cc.delete(deleted_page_rowloc)) {
        throw T_Fail.testFailMsg("(readUncommitted) delete should ret false for reclaimed page.");
    }
    cc.close();
    /*
         * TEST 3 - test heap scan fetch of row on page prevents page from 
         *          disappearing, but handles row being deleted.
         * <p>
         * A heap scan will maintain a scan lock on a page even if it is doing
         * a read uncommitted scan.  This will prevent the row/page from being
         * reclaimed by post commit while the scan is positioned on the page. 
         * This presents no other concurrency issues for read uncommitted, it
         * should be invisible to the user (deletes can still happen and the
         * read uncommitted scanner will not block anyone).
         *
         * You need to at least get to the 2nd page, because the 1st page is
         * never totally reclaimed and deleted by the system in a heap (it has
         * some internal catalog information stored internally in row "0").
         */
    big_row = new T_AccessRow(2);
    big_row.setCol(1, new SQLChar(twok_string));
    // Create a heap conglomerate.
    orig_conglomid = tc.createConglomerate(// create a heap conglomerate
    "heap", big_row.getRowArray(), // column sort order not required for heap
    null, // default collation
    null, // default properties
    null, // not temporary
    TransactionController.IS_DEFAULT);
    cc = tc.openConglomerate(orig_conglomid, false, TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED);
    for (int i = 0; i < 10; i++) {
        big_row.setCol(0, new SQLInteger(i));
        cc.insert(big_row.getRowArray());
    }
    cc.close();
    // Open scan on the conglomerate, and position it on the second page.
    base_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
    true, // for read
    0, TransactionController.MODE_RECORD, TransactionController.ISOLATION_READ_UNCOMMITTED, // all columns, all as objects
    (FormatableBitSet) null, // start position - first row in conglomerate
    null, // unused if start position is null.
    0, // qualifier - accept all rows
    null, // stop position - last row in conglomerate
    null, // unused if stop position is null.
    0);
    base_scan.next();
    base_scan.next();
    base_scan.next();
    if (!base_scan.doesCurrentPositionQualify())
        throw T_Fail.testFailMsg("(readUncommitted) doesCurrentPositionQualify() failed.");
    base_scan.fetch(big_row.getRowArray());
    base_scan.fetchLocation(deleted_row_rowloc);
    if (base_scan.isCurrentPositionDeleted())
        throw T_Fail.testFailMsg("(readUncommitted) isCurrentPositionDeleted() failed.");
    // Open another scan on the conglomerate.
    ScanController delete_scan = tc.openScan(orig_conglomid, // hold cursor open across commit
    true, // for update
    TransactionController.OPENMODE_FORUPDATE, TransactionController.MODE_RECORD, TransactionController.ISOLATION_SERIALIZABLE, // all columns, all as objects
    (FormatableBitSet) null, // start position - first row in conglomerate
    null, // unused if start position is null.
    0, // qualifier - accept all rows
    null, // stop position - last row in conglomerate
    null, // unused if stop position is null.
    0);
    for (int i = 0; i < 10; i++) {
        delete_scan.next();
        delete_scan.fetchLocation(deleted_page_rowloc);
        delete_scan.delete();
    }
    delete_scan.close();
    if (base_scan.doesCurrentPositionQualify())
        throw T_Fail.testFailMsg("(readUncommitted) doesCurrentPositionQualify() failed.");
    try {
        base_scan.fetch(big_row.getRowArray());
        throw T_Fail.testFailMsg("(readUncommitted) fetch of deleted row should throw exception.");
    } catch (StandardException se) {
        if (!se.getMessageId().equals(SQLState.AM_RECORD_NOT_FOUND)) {
            throw T_Fail.testFailMsg("(readUncommitted) fetch of deleted row should throw SQLState.AM_RECORD_NOT_FOUND.");
        }
    }
    base_scan.fetchLocation(deleted_row_rowloc);
    if (!base_scan.isCurrentPositionDeleted())
        throw T_Fail.testFailMsg("(readUncommitted) isCurrentPositionDeleted() failed.");
    base_scan.close();
    tc.commit();
    REPORT("(readUncommitted) succeeded");
    return true;
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) SQLChar(org.apache.derby.iapi.types.SQLChar) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) RowLocation(org.apache.derby.iapi.types.RowLocation) SQLLongint(org.apache.derby.iapi.types.SQLLongint) SQLInteger(org.apache.derby.iapi.types.SQLInteger)

Aggregations

StandardException (org.apache.derby.shared.common.error.StandardException)276 DataValueDescriptor (org.apache.derby.iapi.types.DataValueDescriptor)43 LanguageConnectionContext (org.apache.derby.iapi.sql.conn.LanguageConnectionContext)37 IOException (java.io.IOException)32 Properties (java.util.Properties)29 RawTransaction (org.apache.derby.iapi.store.raw.xact.RawTransaction)27 TransactionController (org.apache.derby.iapi.store.access.TransactionController)26 ContextManager (org.apache.derby.iapi.services.context.ContextManager)22 RawContainerHandle (org.apache.derby.iapi.store.raw.data.RawContainerHandle)20 SQLException (java.sql.SQLException)17 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)17 DataDictionary (org.apache.derby.iapi.sql.dictionary.DataDictionary)16 ConglomerateController (org.apache.derby.iapi.store.access.ConglomerateController)12 RowLocation (org.apache.derby.iapi.types.RowLocation)11 SQLLongint (org.apache.derby.iapi.types.SQLLongint)11 StorageFile (org.apache.derby.io.StorageFile)10 TableDescriptor (org.apache.derby.iapi.sql.dictionary.TableDescriptor)9 ScanController (org.apache.derby.iapi.store.access.ScanController)9 File (java.io.File)8 LogInstant (org.apache.derby.iapi.store.raw.log.LogInstant)8