Search in sources :

Example 1 with PageKey

use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.

the class BasePage method copyAndPurge.

/**
 * @see Page#copyAndPurge
 *		@exception StandardException Standard exception policy.
 */
public void copyAndPurge(Page destPage, int src_slot, int num_rows, int dest_slot) throws StandardException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(isLatched());
    }
    if (num_rows <= 0) {
        throw StandardException.newException(SQLState.DATA_NO_ROW_COPIED);
    }
    if (!owner.updateOK()) {
        throw StandardException.newException(SQLState.DATA_CONTAINER_READ_ONLY);
    }
    if ((src_slot < 0) || ((src_slot + num_rows) > recordCount)) {
        throw StandardException.newException(SQLState.DATA_SLOT_NOT_ON_PAGE);
    }
    if (SanityManager.DEBUG) {
        // first copy into the destination page, let it do the work
        // if no problem, then purge from this page
        SanityManager.ASSERT((destPage instanceof BasePage), "must copy from BasePage to BasePage");
    }
    BasePage dpage = (BasePage) destPage;
    // make sure they are from the same container - this means they are of
    // the same size and have the same page and record format.
    // RESOLVE: MT problem ?
    PageKey pageId = getPageId();
    if (!pageId.getContainerId().equals(dpage.getPageId().getContainerId())) {
        throw StandardException.newException(SQLState.DATA_DIFFERENT_CONTAINER, pageId.getContainerId(), dpage.getPageId().getContainerId());
    }
    int[] recordIds = new int[num_rows];
    RawTransaction t = owner.getTransaction();
    // lock the records to be purged and calculate total space needed
    for (int i = 0; i < num_rows; i++) {
        RecordHandle handle = getRecordHandleAtSlot(src_slot + i);
        owner.getLockingPolicy().lockRecordForWrite(t, handle, false, true);
        recordIds[i] = getHeaderAtSlot(src_slot + i).getId();
    }
    // first copy num_rows into destination page
    dpage.copyInto(this, src_slot, num_rows, dest_slot);
    // Now purge num_rows from this page
    // Do NOT purge overflow rows, if it has such a thing.  This operation
    // is called by split and if the key has overflow, spliting the head
    // page does not copy over the remaining pieces, i.e.,the new head page
    // still points to those pieces.
    owner.getActionSet().actionPurge(t, this, src_slot, num_rows, recordIds, true);
}
Also used : PageKey(org.apache.derby.iapi.store.raw.PageKey) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) RecordHandle(org.apache.derby.iapi.store.raw.RecordHandle)

Example 2 with PageKey

use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.

the class CachedPage method setIdentity.

/*
	** Methods of Cacheable
	*/
/**
 * Find the container and then read the page from that container.
 * <p>
 * This is the way new pages enter the page cache.
 * <p>
 *
 * @return always true, higher levels have already checked the page number
 *         is valid for an open.
 *
 * @exception StandardException Standard Derby policy.
 *
 * @see Cacheable#setIdentity
 */
public Cacheable setIdentity(Object key) throws StandardException {
    if (SanityManager.DEBUG) {
        SanityManager.ASSERT(key instanceof PageKey);
    }
    initialize();
    PageKey newIdentity = (PageKey) key;
    FileContainer myContainer = (FileContainer) containerCache.find(newIdentity.getContainerId());
    setContainerRowCount(myContainer.getEstimatedRowCount(0));
    try {
        if (!alreadyReadPage) {
            // Fill in the pageData array by reading bytes from disk.
            readPage(myContainer, newIdentity);
        } else {
            // pageData array already filled
            alreadyReadPage = false;
        }
        // if the formatID on disk is not the same as this page instance's
        // format id, instantiate the real page object
        int fmtId = getTypeFormatId();
        int onPageFormatId = FormatIdUtil.readFormatIdInteger(pageData);
        if (fmtId != onPageFormatId) {
            return changeInstanceTo(onPageFormatId, newIdentity).setIdentity(key);
        }
        // this is the correct page instance
        initFromData(myContainer, newIdentity);
    } finally {
        containerCache.release(myContainer);
        myContainer = null;
    }
    fillInIdentity(newIdentity);
    initialRowCount = 0;
    return this;
}
Also used : PageKey(org.apache.derby.iapi.store.raw.PageKey)

Example 3 with PageKey

use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.

the class BaseContainer method getDeallocLock.

/**
 *		Get the special dealloc lock on the page - the lock is gotten by the
 *		transaction that owns the container handle
 *
 *		@exception StandardException Standard Derby error policy
 */
protected boolean getDeallocLock(BaseContainerHandle handle, RecordHandle deallocLock, boolean wait, boolean zeroDuration) throws StandardException {
    // get deallocate lock on page so that the GC won't attempt to
    // free and re-allocate it until the transaction commits
    RawTransaction tran = handle.getTransaction();
    LockingPolicy lp = tran.newLockingPolicy(LockingPolicy.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ, // striterOK
    true);
    PageKey pkey = new PageKey(identity, deallocLock.getPageNumber());
    if (lp != null) {
        if (zeroDuration)
            return lp.zeroDurationLockRecordForWrite(tran, deallocLock, false, wait);
        else
            return lp.lockRecordForWrite(tran, deallocLock, false, wait);
    } else {
        throw StandardException.newException(SQLState.DATA_CANNOT_GET_DEALLOC_LOCK, pkey);
    }
}
Also used : PageKey(org.apache.derby.iapi.store.raw.PageKey) RawTransaction(org.apache.derby.iapi.store.raw.xact.RawTransaction) LockingPolicy(org.apache.derby.iapi.store.raw.LockingPolicy)

Example 4 with PageKey

use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.

the class StoredPage method restoreLongRecordFromSlot.

private StoredRecordHeader restoreLongRecordFromSlot(Object[] row, FetchDescriptor fetchDesc, RecordHandle recordToLock, StoredRecordHeader parent_recordHeader) throws StandardException {
    int slot = findRecordById(parent_recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);
    StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
    try {
        int offset_to_row_data = getRecordOffset(slot) + recordHeader.size();
        if (SanityManager.DEBUG) {
            if (getRecordOffset(slot) < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) {
                SanityManager.THROWASSERT("Incorrect offset.  offset = " + getRecordOffset(slot) + ", offset should be < " + "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " + (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) + ", current slot = " + slot + ", total slotsInUse = " + slotsInUse);
            }
        }
        // position the array reading stream at beginning of row data
        // just past the record header.
        ArrayInputStream lrdi = rawDataIn;
        lrdi.setPosition(offset_to_row_data);
        if (fetchDesc != null) {
            if (fetchDesc.getQualifierList() != null) {
                fetchDesc.reset();
            }
            readRecordFromArray(row, (fetchDesc.getValidColumns() == null) ? row.length - 1 : fetchDesc.getMaxFetchColumnId(), fetchDesc.getValidColumnsArray(), fetchDesc.getMaterializedColumns(), lrdi, recordHeader, recordToLock);
        } else {
            readRecordFromArray(row, row.length - 1, (int[]) null, (int[]) null, lrdi, recordHeader, recordToLock);
        }
        return (recordHeader.hasOverflow() ? recordHeader : null);
    } catch (IOException ioe) {
        if (SanityManager.DEBUG) {
            if (pageData == null) {
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "caught an IOException in restoreRecordFromSlot " + (PageKey) getIdentity() + " slot " + slot + ", pageData is null");
            } else {
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "caught an IOException in reestoreRecordFromSlot, " + (PageKey) getIdentity() + " slot " + slot + ", pageData.length = " + pageData.length + " pageSize = " + getPageSize());
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "Hex dump of pageData \n " + "--------------------------------------------------\n" + pagedataToHexDump(pageData) + "--------------------------------------------------\n");
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "Attempt to dump page " + this.toString());
            }
        }
        // i/o methods on the byte array have thrown an IOException
        throw dataFactory.markCorrupt(StandardException.newException(SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
    }
}
Also used : PageKey(org.apache.derby.iapi.store.raw.PageKey) ByteArrayInputStream(java.io.ByteArrayInputStream) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream) IOException(java.io.IOException)

Example 5 with PageKey

use of org.apache.derby.iapi.store.raw.PageKey in project derby by apache.

the class StoredPage method restoreRecordFromSlot.

/**
 * Read the record at the given slot into the given row.
 * <P>
 * This reads and initializes the columns in the row array from the raw
 * bytes stored in the page associated with the given slot.  If validColumns
 * is non-null then it will only read those columns indicated by the bit
 * set, otherwise it will try to read into every column in row[].
 * <P>
 * If there are more columns than entries in row[] then it just stops after
 * every entry in row[] is full.
 * <P>
 * If there are more entries in row[] than exist on disk, the requested
 * excess columns will be set to null by calling the column's object's
 * restoreToNull() routine (ie.  ((Object) column).restoreToNull() ).
 * <P>
 * If a qualifier list is provided then the row will only be read from
 * disk if all of the qualifiers evaluate true.  Some of the columns may
 * have been read into row[] in the process of evaluating the qualifier.
 * <p>
 * This routine should only be called on the head portion of a row, it
 * will call a utility routine to read the rest of the row if it is a
 * long row.
 *
 * @param slot              the slot number
 * @param row (out)         filled in sparse row
 * @param fetchDesc         Information describing fetch, including what
 *                          columns to fetch and qualifiers.
 * @param recordToLock      the record handle for the row at top level,
 *                          and is used in OverflowInputStream to lock the
 *                          row for Blobs/Clobs.
 * @param isHeadRow         The row on this page includes the head record
 *                          handle.  Will be false for the overflow portions
 *                          of a "long" row, where columns of a row span
 *                          multiple pages.
 *
 * @return  false if a qualifier_list is provided and the row does not
 *          qualifier (no row read in that case), else true.
 *
 * @exception StandardException Standard Derby error policy
 */
protected boolean restoreRecordFromSlot(int slot, Object[] row, FetchDescriptor fetchDesc, RecordHandle recordToLock, StoredRecordHeader recordHeader, boolean isHeadRow) throws StandardException {
    try {
        int offset_to_row_data = getRecordOffset(slot) + recordHeader.size();
        if (SanityManager.DEBUG) {
            if (getRecordOffset(slot) < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) {
                SanityManager.THROWASSERT("Incorrect offset.  offset = " + getRecordOffset(slot) + ", offset should be < " + "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " + (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) + ", current slot = " + slot + ", total slotsInUse = " + slotsInUse);
            }
            SanityManager.ASSERT(isHeadRow, "restoreRecordFromSlot called on a non-headrow");
            SanityManager.ASSERT(!isOverflowPage(), "restoreRecordFromSlot called on an overflow page.");
        }
        // position the array reading stream at beginning of row data just
        // past the record header.
        ArrayInputStream lrdi = rawDataIn;
        lrdi.setPosition(offset_to_row_data);
        if (!recordHeader.hasOverflow()) {
            if (isHeadRow) {
                if (fetchDesc != null && fetchDesc.getQualifierList() != null) {
                    fetchDesc.reset();
                    if (!qualifyRecordFromSlot(row, offset_to_row_data, fetchDesc, recordHeader, recordToLock)) {
                        return (false);
                    } else {
                        // reset position back for subsequent record read.
                        lrdi.setPosition(offset_to_row_data);
                    }
                }
            }
            // can return.
            if (fetchDesc != null) {
                readRecordFromArray(row, (fetchDesc.getValidColumns() == null) ? row.length - 1 : fetchDesc.getMaxFetchColumnId(), fetchDesc.getValidColumnsArray(), fetchDesc.getMaterializedColumns(), lrdi, recordHeader, recordToLock);
            } else {
                readRecordFromArray(row, row.length - 1, (int[]) null, (int[]) null, lrdi, recordHeader, recordToLock);
            }
            return (true);
        } else {
            if (fetchDesc != null) {
                if (fetchDesc.getQualifierList() != null) {
                    fetchDesc.reset();
                }
                readRecordFromArray(row, (fetchDesc.getValidColumns() == null) ? row.length - 1 : fetchDesc.getMaxFetchColumnId(), fetchDesc.getValidColumnsArray(), fetchDesc.getMaterializedColumns(), lrdi, recordHeader, recordToLock);
            } else {
                readRecordFromArray(row, row.length - 1, (int[]) null, (int[]) null, lrdi, recordHeader, recordToLock);
            }
            // the row, reading it into "row".
            while (recordHeader != null) {
                // The record is a long row, loop callng code to read the
                // pieces of the row located in a linked list of rows on
                // overflow pages.
                StoredPage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
                if (SanityManager.DEBUG) {
                    if (overflowPage == null)
                        SanityManager.THROWASSERT("cannot get overflow page");
                }
                // This call reads in the columns of the row that reside
                // on "overflowPage", and if there is another piece it
                // returns the recordHeader of the row on overFlowPage,
                // from which we can find the next piece of the row.  A
                // null return means that we have read in the entire row,
                // and are done.
                recordHeader = overflowPage.restoreLongRecordFromSlot(row, fetchDesc, recordToLock, recordHeader);
                overflowPage.unlatch();
                overflowPage = null;
            }
            if ((fetchDesc != null) && (fetchDesc.getQualifierList() != null)) {
                if (!qualifyRecordFromRow(row, fetchDesc.getQualifierList())) {
                    return (false);
                }
            }
            return (true);
        }
    } catch (IOException ioe) {
        if (SanityManager.DEBUG) {
            if (pageData == null) {
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "caught an IOException in restoreRecordFromSlot " + (PageKey) getIdentity() + " slot " + slot + ", pageData is null");
            } else {
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "caught an IOException in reestoreRecordFromSlot, " + (PageKey) getIdentity() + " slot " + slot + ", pageData.length = " + pageData.length + " pageSize = " + getPageSize());
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "Hex dump of pageData \n " + "--------------------------------------------------\n" + pagedataToHexDump(pageData) + "--------------------------------------------------\n");
                SanityManager.DEBUG_PRINT("DEBUG_TRACE", "Attempt to dump page " + this.toString());
            }
        }
        // i/o methods on the byte array have thrown an IOException
        throw dataFactory.markCorrupt(StandardException.newException(SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
    }
}
Also used : PageKey(org.apache.derby.iapi.store.raw.PageKey) ByteArrayInputStream(java.io.ByteArrayInputStream) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream) IOException(java.io.IOException)

Aggregations

PageKey (org.apache.derby.iapi.store.raw.PageKey)23 RecordHandle (org.apache.derby.iapi.store.raw.RecordHandle)6 RawTransaction (org.apache.derby.iapi.store.raw.xact.RawTransaction)6 IOException (java.io.IOException)3 ArrayInputStream (org.apache.derby.iapi.services.io.ArrayInputStream)3 StandardException (org.apache.derby.shared.common.error.StandardException)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 LockingPolicy (org.apache.derby.iapi.store.raw.LockingPolicy)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 DynamicByteArrayOutputStream (org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream)1 FormatableBitSet (org.apache.derby.iapi.services.io.FormatableBitSet)1 ContainerHandle (org.apache.derby.iapi.store.raw.ContainerHandle)1 Page (org.apache.derby.iapi.store.raw.Page)1 RawContainerHandle (org.apache.derby.iapi.store.raw.data.RawContainerHandle)1 InterruptDetectedException (org.apache.derby.iapi.util.InterruptDetectedException)1 BasePage (org.apache.derby.impl.store.raw.data.BasePage)1