Search in sources :

Example 1 with LogicalUndo

use of org.apache.derby.iapi.store.access.conglomerate.LogicalUndo in project derby by apache.

the class BasePage method insertAllowOverflow.

/**
 *		Insert a row allowing overflow.
 *
 *		If handle is supplied then the record at that hanlde will be updated
 *		to indicate it is a partial row and it has an overflow portion.
 *
 *		@exception StandardException	Standard Derby error policy
 */
public RecordHandle insertAllowOverflow(int slot, Object[] row, FormatableBitSet validColumns, int startColumn, byte insertFlag, int overflowThreshold, RecordHandle nextPortionHandle) throws StandardException {
    BasePage curPage = this;
    if (!curPage.owner.updateOK()) {
        throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
    }
    // Handle of the first portion of the chain
    RecordHandle headHandle = null;
    RecordHandle handleToUpdate = null;
    RawTransaction t = curPage.owner.getTransaction();
    for (; ; ) {
        if (SanityManager.DEBUG) {
            SanityManager.ASSERT(curPage.isLatched());
        }
        if (!curPage.allowInsert())
            return null;
        // 'this' is the head page
        if (curPage != this)
            slot = curPage.recordCount;
        boolean isLongColumns = false;
        int realStartColumn = -1;
        int realSpaceOnPage = -1;
        DynamicByteArrayOutputStream logBuffer = null;
        // allocate new record id and handle
        int recordId = curPage.newRecordIdAndBump();
        RecordHandle handle = new RecordId(curPage.getPageId(), recordId, slot);
        if (curPage == this) {
            // Lock the row, if it is the very first portion of the record.
            if (handleToUpdate == null) {
                while (!owner.getLockingPolicy().lockRecordForWrite(t, handle, true, /* lock is for insert */
                false)) {
                    // loop until we get a new record id we can get a lock
                    // on.  If we can't get the lock without waiting then
                    // assume the record id is owned by another xact.  The
                    // current heap overflow algorithm makes this likely,
                    // as it first try's to insert a row telling raw store
                    // to fail if it doesn't fit on the page getting a lock
                    // on an id that never makes it to disk.   The
                    // inserting transaction will hold a lock on this
                    // "unused" record id until it commits.  The page can
                    // leave the cache at this point, and the inserting
                    // transaction has not dirtied the page (it failed
                    // after getting the lock but before logging anything),
                    // another inserting transaction will then get the
                    // same id as the previous inserter - thus the loop on
                    // lock waits.
                    // 
                    // The lock we request indicates that this is a lock
                    // for insert, which the locking policy may use to
                    // perform locking concurrency optimizations.
                    // allocate new record id and handle
                    recordId = curPage.newRecordIdAndBump();
                    handle = new RecordId(curPage.getPageId(), recordId, slot);
                }
            }
            headHandle = handle;
        }
        do {
            // then, we redo the insert with saved logBuffer.
            try {
                startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, validColumns, (LogicalUndo) null, insertFlag, startColumn, false, realStartColumn, logBuffer, realSpaceOnPage, overflowThreshold);
                isLongColumns = false;
            } catch (LongColumnException lce) {
                // we caught a long column exception
                // three things should happen here:
                // 1. insert the long column into overflow pages.
                // 2. append the overflow field header in the main chain.
                // 3. continue the insert in the main data chain.
                logBuffer = new DynamicByteArrayOutputStream(lce.getLogBuffer());
                // step 1: insert the long column ... use the same
                // insertFlag as the rest of the row.
                RecordHandle longColumnHandle = insertLongColumn(curPage, lce, insertFlag);
                // step 2: append the overflow field header to the log buffer
                int overflowFieldLen = 0;
                try {
                    overflowFieldLen += appendOverflowFieldHeader((DynamicByteArrayOutputStream) logBuffer, longColumnHandle);
                } catch (IOException ioe) {
                    // YYZ: revisit...  ioexception, insert failed...
                    return null;
                }
                // step 3: continue the insert in the main data chain
                // need to pass the log buffer, and start column to the next insert.
                realStartColumn = lce.getNextColumn() + 1;
                realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
                isLongColumns = true;
            }
        } while (isLongColumns);
        if (handleToUpdate != null) {
            // update the recordheader on the previous page
            updateOverflowDetails(handleToUpdate, handle);
        }
        // all done
        if (startColumn == -1) {
            if (curPage != this)
                curPage.unlatch();
            if (nextPortionHandle != null) {
                // need to update the overflow details of the last portion
                // to point to the existing portion
                updateOverflowDetails(handle, nextPortionHandle);
            }
            return headHandle;
        }
        handleToUpdate = handle;
        BasePage nextPage = curPage.getOverflowPageForInsert(slot, row, validColumns, startColumn);
        if (curPage != this)
            curPage.unlatch();
        curPage = nextPage;
    }
}
Also used : DynamicByteArrayOutputStream(org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream) LogicalUndo(org.apache.derby.iapi.store.access.conglomerate.LogicalUndo) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) RecordHandle(org.apache.derby.iapi.store.raw.RecordHandle) IOException(java.io.IOException)

Example 2 with LogicalUndo

use of org.apache.derby.iapi.store.access.conglomerate.LogicalUndo in project derby by apache.

the class BasePage method insertLongColumn.

/**
 * Routine to insert a long column.
 * <p>
 * This code inserts a long column as a linked list of rows on overflow
 * pages.  This list is pointed to by a small pointer in the main page
 * row column.  The operation does the following:
 *     allocate new overflow page
 *     insert single row filling overflow page
 *     while (more of column exists)
 *         allocate new overflow page
 *         insert single row with next piece of row
 *         update previous piece to point to this new piece of row
 *
 * Same code is called both from an initial insert of a long column and
 * from a subsequent update that results in a long column.
 *
 * @return The recordHandle of the first piece of the long column chain.
 *
 * @param mainChainPage The parent page with row piece containing column
 *                      that will eventually point to this long column
 *                      chain.
 * @param lce           The LongColumnException thrown when we recognized
 *                      that the column being inserted was "long", this
 *                      structure is used to cache the info that we have
 *                      read so far about column.  In the case of an insert
 *                      of the stream it will have a copy of just the first
 *                      page of the stream that has already been read once.
 * @param insertFlag    flags for insert operation.
 *
 * @exception  StandardException  Standard exception policy.
 */
protected RecordHandle insertLongColumn(BasePage mainChainPage, LongColumnException lce, byte insertFlag) throws StandardException {
    Object[] row = new Object[1];
    row[0] = lce.getColumn();
    RecordHandle firstHandle = null;
    RecordHandle handle = null;
    RecordHandle prevHandle = null;
    BasePage curPage = mainChainPage;
    BasePage prevPage = null;
    boolean isFirstPage = true;
    // undo inserts as purges of all pieces of the overflow column
    // except for the 1st overflow page pointed at by the main row.
    // 
    // Consider a row with one column which is a long column
    // that takes 2 pages for itself plus an entry in main parent page.
    // the log records in order for this look something like:
    // insert overflow page 1
    // insert overflow page 2
    // update overflow page 1 record to have pointer to overflow page 2
    // insert main row (which has pointer to overflow page 1)
    // 
    // If this insert gets aborted then something like the following
    // happens:
    // main row is marked deleted (but ptr to overflow 1 still exists)
    // update is aborted so link on page 2 to page 1 is lost
    // overflow row on page 2 is marked deleted
    // overflow row on page 1 is marked deleted
    // 
    // There is no way to reclaim page 2 later as the abort of the update
    // has now lost the link from overflow page 1 to overflow 2, so
    // the system has to do it as part of the abort of the insert.  But,
    // it can't for page 1 as the main page will attempt to follow
    // it's link in the deleted row during it's space reclamation and it
    // can't tell the difference
    // between a row that has been marked deleted as part of an aborted
    // insert or as part of a committed delete.  When it follows the link
    // it could find no page and that could be coded against, but it could
    // be that the page is now used by some other overflow row which would
    // lead to lots of different kinds of problems.
    // 
    // So the code leaves the 1st overflow page to be cleaned up with the
    // main page row is purged, but goes ahead and immediately purges all
    // the segments that will be lost as part of the links being lost due
    // to aborted updates.
    byte after_first_page_insertFlag = (byte) (insertFlag | Page.INSERT_UNDO_WITH_PURGE);
    // when inserting a long column startColumn is just used
    // as a flag. -1 means the insert is complete, != -1 indicates
    // more inserts are required.
    int startColumn = 0;
    RawTransaction t = curPage.owner.getTransaction();
    do {
        if (!isFirstPage) {
            prevPage = curPage;
            prevHandle = handle;
        }
        // step 1. get a new overflow page
        curPage = (BasePage) getNewOverflowPage();
        if (SanityManager.DEBUG) {
            SanityManager.ASSERT(curPage.isLatched());
            SanityManager.ASSERT(curPage.allowInsert());
        }
        int slot = curPage.recordCount;
        int recordId = curPage.newRecordId();
        handle = new RecordId(curPage.getPageId(), recordId, slot);
        if (isFirstPage)
            firstHandle = handle;
        // step 2: insert column portion
        startColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId, row, (FormatableBitSet) null, (LogicalUndo) null, (isFirstPage ? insertFlag : after_first_page_insertFlag), startColumn, true, -1, (DynamicByteArrayOutputStream) null, -1, 100);
        // then release latch on prevPage
        if (!isFirstPage) {
            // for the previous page, add an overflow field header,
            // and update the record header to show 2 fields
            prevPage.updateFieldOverflowDetails(prevHandle, handle);
            prevPage.unlatch();
            prevPage = null;
        } else {
            isFirstPage = false;
        }
    } while (startColumn != (-1));
    if (curPage != null) {
        curPage.unlatch();
        curPage = null;
    }
    return (firstHandle);
}
Also used : DynamicByteArrayOutputStream(org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream) LogicalUndo(org.apache.derby.iapi.store.access.conglomerate.LogicalUndo) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) RecordHandle(org.apache.derby.iapi.store.raw.RecordHandle) AuxObject(org.apache.derby.iapi.store.raw.AuxObject) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet)

Example 3 with LogicalUndo

use of org.apache.derby.iapi.store.access.conglomerate.LogicalUndo in project derby by apache.

the class T_RawStoreFactory method P709.

/**
 *	  P709:
 *	  this test exercises purgeAtSlot , rollsback and purges the slot again,
 *	  to make sure not logging the data does not have any impact on repurging
 *	  the rollbacked purges.
 *	  @exception T_Fail Unexpected behaviour from the API
 *	  @exception StandardException Unexpected exception from the implementation
 */
protected void P709() throws StandardException, T_Fail {
    logDataForPurges = false;
    Transaction t = t_util.t_startTransaction();
    long cid = t_util.t_addContainer(t, 0);
    t_util.t_commit(t);
    ContainerHandle c = t_util.t_openContainer(t, 0, cid, true);
    Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
    // REPORT("insert 5 records");
    T_RawStoreRow row0 = new T_RawStoreRow(REC_001);
    T_RawStoreRow row1 = new T_RawStoreRow(REC_001);
    T_RawStoreRow row2 = new T_RawStoreRow(REC_002);
    T_RawStoreRow row3 = new T_RawStoreRow(REC_003);
    T_RawStoreRow row4 = new T_RawStoreRow(REC_004);
    RecordHandle r0, r1, r2, r3, r4;
    r0 = t_util.t_insertAtSlot(page, 0, row0);
    r1 = t_util.t_insertAtSlot(page, 1, row1);
    r2 = t_util.t_insertAtSlot(page, 2, row2);
    r3 = t_util.t_insertAtSlot(page, 3, row3);
    r4 = t_util.t_insertAtSlot(page, 4, row4);
    if (r3 != null)
        page.deleteAtSlot(3, true, (LogicalUndo) null);
    // REPORT("commit it");
    t_util.t_commit(t);
    c = t_util.t_openContainer(t, 0, cid, true);
    page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
    try {
        page.purgeAtSlot(-1, 1, logDataForPurges);
        throw T_Fail.testFailMsg("negative slot number did not cause an exception");
    }// expected
     catch (StandardException se) {
    }
    try {
        page.purgeAtSlot(4, 4, logDataForPurges);
        throw T_Fail.testFailMsg("purging more rows than is on page did not cause an exception");
    }// expected
     catch (StandardException se) {
    }
    // if not all the rows are there, do minimal test
    if (r4 == null) {
        int rcount = page.recordCount();
        page.purgeAtSlot(0, 1, logDataForPurges);
        if (page.recordCount() != rcount - 1)
            T_Fail.testFailMsg("failed to purge a record, expect " + (rcount - 1) + " got " + page.recordCount());
        if (testRollback) {
            t_util.t_abort(t);
            c = t_util.t_openContainer(t, 0, cid, true);
            page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
            if (logDataForPurges)
                t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            else
                t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            if (page.recordCount() != rcount)
                T_Fail.testFailMsg("failed to rollback purge, expect " + rcount + " got " + page.recordCount());
        } else {
            t_util.t_commit(t);
        }
        PASS("mimimal purging P709");
        return;
    }
    // REPORT("purge 2 records from middle");
    page.purgeAtSlot(1, 2, logDataForPurges);
    t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
    t_util.t_checkFetchBySlot(page, 1, REC_003, true, true);
    t_util.t_checkFetchBySlot(page, 2, REC_004, false, true);
    if (page.recordCount() != 3)
        T_Fail.testFailMsg("page expect to have 3 records, recordCount() = " + page.recordCount());
    // REPORT("purge all records from the page");
    page.purgeAtSlot(0, 3, logDataForPurges);
    if (page.recordCount() != 0)
        T_Fail.testFailMsg("page expect to have 0 records, recordCount() = " + page.recordCount());
    if (testRollback) {
        REPORT("testing rollback");
        t_util.t_abort(t);
        c = t_util.t_openContainer(t, 0, cid, true);
        page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
        if (logDataForPurges) {
            t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_002, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_003, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_004, false, true);
        } else {
            t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_NULL, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_NULL, false, true);
        }
        if (page.recordCount() != 5)
            T_Fail.testFailMsg("page expect to have 5 records, recordCount() = " + page.recordCount());
        // REPORT("purge 3 records from the end");
        page.purgeAtSlot(2, 3, logDataForPurges);
        if (logDataForPurges) {
            t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_001, false, true);
        } else {
            t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
        }
        if (page.recordCount() != 2)
            T_Fail.testFailMsg("page expect to have 2 records, recordCount() = " + page.recordCount());
        // REPORT("rollback");
        t_util.t_abort(t);
        c = t_util.t_openContainer(t, 0, cid, true);
        page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
        if (logDataForPurges) {
            t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_002, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_003, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_004, false, true);
        } else {
            t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_NULL, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_NULL, false, true);
        }
        if (page.recordCount() != 5)
            T_Fail.testFailMsg("page expect to have 5 records, recordCount() = " + page.recordCount());
        // REPORT("make sure delete record is reconstituted as such");
        if (page.isDeletedAtSlot(1))
            T_Fail.testFailMsg("rolled back purged undeleted record cause record to be deleted");
        if (!page.isDeletedAtSlot(3))
            T_Fail.testFailMsg("rolled back purged deleted record cause record to be undeleted");
    }
    REPORT("purging again the purges rolled back earlier");
    // purge again and this time do commit , instead of rollback.
    // REPORT("purge 2 records from middle");
    page.purgeAtSlot(1, 2, logDataForPurges);
    t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
    t_util.t_checkFetchBySlot(page, 1, REC_NULL, true, true);
    t_util.t_checkFetchBySlot(page, 2, REC_NULL, false, true);
    if (page.recordCount() != 3)
        T_Fail.testFailMsg("page expect to have 3 records, recordCount() = " + page.recordCount());
    // REPORT("purge all records from the page");
    page.purgeAtSlot(0, 3, logDataForPurges);
    if (page.recordCount() != 0)
        T_Fail.testFailMsg("page expect to have 0 records, recordCount() = " + page.recordCount());
    t_util.t_abort(t);
    c = t_util.t_openContainer(t, 0, cid, true);
    page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
    if (page.recordCount() != 5)
        T_Fail.testFailMsg("page expect to have 5 records, recordCount() = " + page.recordCount());
    // REPORT("purge 3 records from the end");
    page.purgeAtSlot(2, 3, logDataForPurges);
    t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
    t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
    if (page.recordCount() != 2)
        T_Fail.testFailMsg("page expect to have 2 records, recordCount() = " + page.recordCount());
    // REPORT("commit");
    t_util.t_commit(t);
    c = t_util.t_openContainer(t, 0, cid, true);
    page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
    t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
    t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
    if (page.recordCount() != 2)
        T_Fail.testFailMsg("page expect to have 2 records, recordCount() = " + page.recordCount());
    PASS("P709");
    // cleanup
    t_util.t_dropContainer(t, 0, cid);
    t_util.t_commit(t);
    t.close();
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) LogicalUndo(org.apache.derby.iapi.store.access.conglomerate.LogicalUndo) RawContainerHandle(org.apache.derby.iapi.store.raw.data.RawContainerHandle)

Example 4 with LogicalUndo

use of org.apache.derby.iapi.store.access.conglomerate.LogicalUndo in project derby by apache.

the class T_RawStoreFactory method P014.

/**
 *	  P014
 *
 *	  this test exercises purgeAtSlot
 *		@exception T_Fail Unexpected behaviour from the API
 *		@exception StandardException Unexpected exception from the implementation
 */
protected void P014() throws StandardException, T_Fail {
    Transaction t = t_util.t_startTransaction();
    long cid = t_util.t_addContainer(t, 0);
    t_util.t_commit(t);
    ContainerHandle c = t_util.t_openContainer(t, 0, cid, true);
    Page page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
    // REPORT("insert 5 records");
    T_RawStoreRow row0 = new T_RawStoreRow(REC_001);
    T_RawStoreRow row1 = new T_RawStoreRow(REC_001);
    T_RawStoreRow row2 = new T_RawStoreRow(REC_002);
    T_RawStoreRow row3 = new T_RawStoreRow(REC_003);
    T_RawStoreRow row4 = new T_RawStoreRow(REC_004);
    RecordHandle r0, r1, r2, r3, r4;
    r0 = t_util.t_insertAtSlot(page, 0, row0);
    r1 = t_util.t_insertAtSlot(page, 1, row1);
    r2 = t_util.t_insertAtSlot(page, 2, row2);
    r3 = t_util.t_insertAtSlot(page, 3, row3);
    r4 = t_util.t_insertAtSlot(page, 4, row4);
    if (r3 != null)
        page.deleteAtSlot(3, true, (LogicalUndo) null);
    // REPORT("commit it");
    t_util.t_commit(t);
    c = t_util.t_openContainer(t, 0, cid, true);
    page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
    try {
        page.purgeAtSlot(-1, 1, logDataForPurges);
        throw T_Fail.testFailMsg("negative slot number did not cause an exception");
    }// expected
     catch (StandardException se) {
    }
    try {
        page.purgeAtSlot(4, 4, logDataForPurges);
        throw T_Fail.testFailMsg("purging more rows than is on page did not cause an exception");
    }// expected
     catch (StandardException se) {
    }
    // if not all the rows are there, do minimal test
    if (r4 == null) {
        int rcount = page.recordCount();
        page.purgeAtSlot(0, 1, logDataForPurges);
        if (page.recordCount() != rcount - 1)
            T_Fail.testFailMsg("failed to purge a record, expect " + (rcount - 1) + " got " + page.recordCount());
        if (testRollback) {
            t_util.t_abort(t);
            c = t_util.t_openContainer(t, 0, cid, true);
            page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
            if (logDataForPurges)
                t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            else
                t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            if (page.recordCount() != rcount)
                T_Fail.testFailMsg("failed to rollback purge, expect " + rcount + " got " + page.recordCount());
        } else {
            t_util.t_commit(t);
        }
        PASS("minimal P014");
        return;
    }
    // REPORT("purge 2 records from middle");
    page.purgeAtSlot(1, 2, logDataForPurges);
    t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
    t_util.t_checkFetchBySlot(page, 1, REC_003, true, true);
    t_util.t_checkFetchBySlot(page, 2, REC_004, false, true);
    if (page.recordCount() != 3)
        T_Fail.testFailMsg("page expect to have 3 records, recordCount() = " + page.recordCount());
    // REPORT("purge all records from the page");
    page.purgeAtSlot(0, 3, logDataForPurges);
    if (page.recordCount() != 0)
        T_Fail.testFailMsg("page expect to have 0 records, recordCount() = " + page.recordCount());
    if (testRollback) {
        REPORT("testing rollback");
        t_util.t_abort(t);
        c = t_util.t_openContainer(t, 0, cid, true);
        page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
        if (logDataForPurges) {
            t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_002, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_003, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_004, false, true);
        } else {
            t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_NULL, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_NULL, false, true);
        }
        if (page.recordCount() != 5)
            T_Fail.testFailMsg("page expect to have 5 records, recordCount() = " + page.recordCount());
        // REPORT("purge 3 records from the end");
        page.purgeAtSlot(2, 3, logDataForPurges);
        if (logDataForPurges) {
            t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_001, false, true);
        } else {
            t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
        }
        if (page.recordCount() != 2)
            T_Fail.testFailMsg("page expect to have 2 records, recordCount() = " + page.recordCount());
        // REPORT("rollback");
        t_util.t_abort(t);
        c = t_util.t_openContainer(t, 0, cid, true);
        page = t_util.t_getPage(c, ContainerHandle.FIRST_PAGE_NUMBER);
        if (logDataForPurges) {
            t_util.t_checkFetchBySlot(page, 0, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_001, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_002, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_003, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_004, false, true);
        } else {
            t_util.t_checkFetchBySlot(page, 0, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 1, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 2, REC_NULL, false, true);
            t_util.t_checkFetchBySlot(page, 3, REC_NULL, true, true);
            t_util.t_checkFetchBySlot(page, 4, REC_NULL, false, true);
        }
        if (page.recordCount() != 5)
            T_Fail.testFailMsg("page expect to have 5 records, recordCount() = " + page.recordCount());
        // REPORT("make sure delete record is reconstituted as such");
        if (page.isDeletedAtSlot(1))
            T_Fail.testFailMsg("rolled back purged undeleted record cause record to be deleted");
        if (!page.isDeletedAtSlot(3))
            T_Fail.testFailMsg("rolled back purged deleted record cause record to be undeleted");
    }
    PASS("P014");
    // cleanup
    t_util.t_dropContainer(t, 0, cid);
    t_util.t_commit(t);
    t.close();
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) LogicalUndo(org.apache.derby.iapi.store.access.conglomerate.LogicalUndo) RawContainerHandle(org.apache.derby.iapi.store.raw.data.RawContainerHandle)

Example 5 with LogicalUndo

use of org.apache.derby.iapi.store.access.conglomerate.LogicalUndo in project derby by apache.

the class BranchControlRow method splitFor.

/**
 * Perform a top down split pass making room for the the key in "row".
 * <p>
 * Perform a split such that a subsequent call to insert
 * given the argument index row will likely find room for it.  Since
 * latches are released the client must code for the case where another
 * user has grabbed the space made available by the split pass and be
 * ready to do another split.
 * <p>
 * Latches:
 * o PARENT    : is latched on entry (unless the split is the root then
 *               there is no parent.
 * o THISBRANCH: the current page is latched on entry.
 * o CHILD     : latch the child page which will be pointed at by the
 *               left child pointer of the new page.
 *               RESOLVE (mikem) -see comments below
 * o NEWPAGE   : Allocate and latch new page.
 * o CHILD     : release. (RESOLVE)
 * o fixparents: latch pages and reset their parent pointers.
 *               Conditionally fix up the parent links on the pages
 *               pointed at by the newly allocated page.  First get latch
 *               and release on the left child page and then loop through
 *               slots on NEWPAGE, from left to right getting and
 *               releasing latches.
 *
 * @return page number of the newly allocated leaf page created by split.
 *
 * @param open_btree The open btree to associate latches with.
 * @param template   A scratch area to use while searching for split pass.
 * @param parent     The parent page of the current page in the split pass.
 *                   starts at null for root.
 * @param splitrow   The key to make room for during the split pass.
 * @param flag       A flag used to direct where point of split should be
 *                   chosen.
 *
 * @exception  StandardException  Standard exception policy.
 */
protected long splitFor(OpenBTree open_btree, DataValueDescriptor[] template, BranchControlRow parent, DataValueDescriptor[] splitrow, int flag) throws StandardException {
    int childpageid;
    ControlRow childpage;
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(parent != null || this.getIsRoot());
        SanityManager.ASSERT(parent == null || parent.page.isLatched(), "parent page is not latched");
        SanityManager.ASSERT(this.page.isLatched(), "page is not latched:");
    }
    if ((this.page.recordCount() - 1 >= BTree.maxRowsPerPage) || (!this.page.spaceForInsert(splitrow, (FormatableBitSet) null, AccessFactoryGlobals.BTREE_OVERFLOW_THRESHOLD))) {
        if (this.page.recordCount() == 1) {
            // on empty page, we throw exception.
            throw StandardException.newException(SQLState.BTREE_NO_SPACE_FOR_KEY);
        }
        if (this.getIsRoot()) {
            // Track.BranchSplitRoot++;
            growRoot(open_btree, template, this);
            parent = (BranchControlRow) ControlRow.get(open_btree, BTree.ROOTPAGEID);
            return (parent.splitFor(open_btree, template, null, splitrow, flag));
        }
        // that it isn't a root page.
        if (SanityManager.DEBUG) {
            SanityManager.ASSERT(!this.getIsRoot());
            SanityManager.ASSERT(parent != null);
        }
        int splitpoint = (this.page.recordCount() - 1) / 2 + 1;
        if ((flag & ControlRow.SPLIT_FLAG_FIRST_ON_PAGE) != 0) {
            // move all the row to the new page
            splitpoint = 1;
        } else if ((flag & ControlRow.SPLIT_FLAG_LAST_ON_PAGE) != 0) {
            // This is not optimal as we would rather move no rows to the
            // next page, but what should we use as a discriminator?
            splitpoint = this.page.recordCount() - 1;
        }
        if (SanityManager.DEBUG) {
            if (splitpoint <= 0)
                SanityManager.THROWASSERT(this + "yikes! splitpoint of 0!");
        }
        // Before any logged operation is done in the current internal
        // xact, make sure that there is room in the parent to insert
        // the new branch row.
        // 
        // Create a new branch row which points to the new page,
        // and insert it on parent page.
        // Read in the branch row which is at the split point.
        BranchRow split_branch_row = BranchRow.createEmptyTemplate(open_btree.getRawTran(), open_btree.getConglomerate());
        this.page.fetchFromSlot((RecordHandle) null, splitpoint, split_branch_row.getRow(), (FetchDescriptor) null, true);
        // Create the branch row to insert onto the parent page.  For now
        // use a fake page number because we don't know the real page
        // number until the allocate is done, but want to delay the
        // allocate until we know the insert will succeed.
        BranchRow newbranchrow = split_branch_row.createBranchRowFromOldBranchRow(BranchRow.DUMMY_PAGE_NUMBER);
        // "newbranchrow" does not fit on the parent page.
        if (!parent.page.spaceForInsert(newbranchrow.getRow(), (FormatableBitSet) null, AccessFactoryGlobals.BTREE_OVERFLOW_THRESHOLD)) {
            // current split pass recursion must end.
            return (BranchControlRow.restartSplitFor(open_btree, template, parent, this, newbranchrow.getRow(), splitrow, flag));
        }
        // Get the child page for the index row at the split point
        // This will be the left child for	the new page.  We're
        // getting the page because BranchControlRow.Allocate
        // sets the left child pointer from a BranchControlRow.
        // If there were a version which just took the pageid,
        // we wouldn't have to get the page (the latch on this
        // page is enough to ensure that the child page won't
        // disappear).
        childpage = this.getChildPageAtSlot(open_btree, splitpoint);
        // Allocate a new branch page and link it to the
        // right of the current page.
        BranchControlRow newbranch = BranchControlRow.allocate(open_btree, childpage, this.getLevel(), parent);
        newbranch.linkRight(open_btree, this);
        // Test fail after allocation
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON("branch_split_abort1")) {
                throw StandardException.newException(SQLState.BTREE_ABORT_THROUGH_TRACE);
            }
        }
        // Done with the child page.
        childpage.release();
        // Now that we know the page number of the new child page update
        // the branch row to be inserted with the correct value.
        newbranchrow.setPageNumber(newbranch.page.getPageNumber());
        BranchRow branch_template = BranchRow.createEmptyTemplate(open_btree.getRawTran(), open_btree.getConglomerate());
        SearchParameters sp = new SearchParameters(newbranchrow.getRow(), SearchParameters.POSITION_LEFT_OF_PARTIAL_KEY_MATCH, branch_template.getRow(), open_btree, false);
        parent.searchForEntry(sp);
        byte insertFlag = Page.INSERT_INITIAL;
        insertFlag |= Page.INSERT_DEFAULT;
        insertFlag |= Page.INSERT_UNDO_WITH_PURGE;
        if (parent.page.insertAtSlot(sp.resultSlot + 1, newbranchrow.getRow(), (FormatableBitSet) null, (LogicalUndo) null, insertFlag, AccessFactoryGlobals.BTREE_OVERFLOW_THRESHOLD) == null) {
            throw StandardException.newException(SQLState.BTREE_NO_SPACE_FOR_KEY);
        }
        // Test fail after of row onto parent page.
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON("branch_split_abort2")) {
                throw StandardException.newException(SQLState.BTREE_ABORT_THROUGH_TRACE);
            }
        }
        // newbranchrow only valid while contents of split_branch_row
        // remain unchanged.
        newbranchrow = null;
        // Copy the rows from the split point, but not including it (since
        // the split point is turning into the left child of the new
        // branch), onto the new page.  Purge the rows including the split
        // point from the current page.
        int num_rows_to_move = this.page.recordCount() - (splitpoint + 1);
        if (num_rows_to_move > 0) {
            this.page.copyAndPurge(newbranch.page, splitpoint + 1, num_rows_to_move, 1);
        }
        // remove the splitpoint row, we didn't copy it because it became
        // the "left child", but we do need to get rid of it.
        this.page.purgeAtSlot(splitpoint, 1, true);
        // Test fail after of copy of rows to new page.
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON("branch_split_abort3")) {
                throw StandardException.newException(SQLState.BTREE_ABORT_THROUGH_TRACE);
            }
        }
        // Test fail after purge of rows on old page.
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON("branch_split_abort4")) {
                throw StandardException.newException(SQLState.BTREE_ABORT_THROUGH_TRACE);
            }
        }
        // Check pages that have been altered by above split
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON("enableBtreeConsistencyCheck")) {
                parent.checkConsistency(open_btree, null, false);
                newbranch.checkConsistency(open_btree, parent, false);
                this.checkConsistency(open_btree, parent, false);
            }
        }
        // Fix up the parent links on the pages for the rows that moved to
        // the new branch.
        newbranch.fixChildrensParents(open_btree, null);
        // At this point a unit of work in the split down the tree has
        // been performed in an internal transaction (ie. writes have been
        // done to latched pages), and the resulting
        // tree is logically consistent, thus the work can be committed.
        // This work must be committed before any latches are released.
        open_btree.getXactMgr().commit();
        // Decide whether we're following the current page or the new page.
        BranchControlRow pagetofollow;
        if (compareIndexRowToKey(splitrow, split_branch_row.getRow(), split_branch_row.getRow().length - 1, 0, open_btree.getConglomerate().ascDescInfo) >= 0) {
            // Follow the new branch
            pagetofollow = newbranch;
            this.release();
        } else {
            // Follow the current branch
            pagetofollow = this;
            newbranch.release();
        }
        // the internal transaction did not release the latches.
        if (SanityManager.DEBUG) {
            SanityManager.ASSERT(parent != null);
            SanityManager.ASSERT(parent.page.isLatched());
            SanityManager.ASSERT(pagetofollow.page.isLatched());
        }
        // Recurse down the tree splitting if necessary.
        return (pagetofollow.splitFor(open_btree, template, parent, splitrow, flag));
    }
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON("enableBtreeConsistencyCheck")) {
            this.checkConsistency(open_btree, parent, false);
        }
    }
    // Don't need the parent any more.
    if (parent != null)
        parent.release();
    // RESOLVE (mikem) - should this be passed in?
    BranchRow branch_template = BranchRow.createEmptyTemplate(open_btree.getRawTran(), open_btree.getConglomerate());
    SearchParameters sp = new SearchParameters(splitrow, SearchParameters.POSITION_LEFT_OF_PARTIAL_KEY_MATCH, branch_template.getRow(), open_btree, false);
    searchForEntry(sp);
    childpage = this.getChildPageAtSlot(open_btree, sp.resultSlot);
    return (childpage.splitFor(open_btree, template, this, splitrow, flag));
}
Also used : LogicalUndo(org.apache.derby.iapi.store.access.conglomerate.LogicalUndo) FormatableBitSet(org.apache.derby.iapi.services.io.FormatableBitSet) SQLLongint(org.apache.derby.iapi.types.SQLLongint)

Aggregations

LogicalUndo (org.apache.derby.iapi.store.access.conglomerate.LogicalUndo)5 RawTransaction (org.apache.derby.iapi.store.raw.xact.RawTransaction)4 DynamicByteArrayOutputStream (org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream)2 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)2 RecordHandle (org.apache.derby.iapi.store.raw.RecordHandle)2 RawContainerHandle (org.apache.derby.iapi.store.raw.data.RawContainerHandle)2 StandardException (org.apache.derby.shared.common.error.StandardException)2 IOException (java.io.IOException)1 AuxObject (org.apache.derby.iapi.store.raw.AuxObject)1 SQLLongint (org.apache.derby.iapi.types.SQLLongint)1