Search in sources :

Example 1 with LogRecord

use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.

the class Scan method getNextRecordBackward.

/**
 *		Read the previous log record.
 *		Switching log to a previous log file if necessary,
 *		Resize the input stream byte array if necessary.
 *		@see StreamLogScan#getNextRecord
 *
 *		Side effects include:
 *				on a successful read, setting currentInstant.
 *				on a log file switch, setting currentLogFileNumber.
 *
 *		@return the previous LogRecord, or null if the end of the
 *		scan has been reached.
 */
private LogRecord getNextRecordBackward(ArrayInputStream input, TransactionId tranId, int groupmask) throws StandardException, IOException, ClassNotFoundException {
    if (SanityManager.DEBUG)
        SanityManager.ASSERT(scanDirection == BACKWARD, "can only called by backward scan");
    // scan is positioned just past the last byte of the record, or
    // right at the beginning of the file (end of the file header)
    // may need to switch log file
    boolean candidate;
    // if we have filtering, peek at the group and/or the transaction id,
    // do them in one read rather than 2 reads.
    int peekAmount = LogRecord.formatOverhead() + LogRecord.maxGroupStoredSize();
    if (tranId != null)
        peekAmount += LogRecord.maxTransactionIdStoredSize(tranId);
    // the number of bytes actually read
    int readAmount;
    LogRecord lr;
    long curpos = scan.getFilePointer();
    do {
        // this log record is a candidate unless proven otherwise
        candidate = true;
        lr = null;
        readAmount = -1;
        if (curpos == LogToFile.LOG_FILE_HEADER_SIZE) {
            // will have gone past stopAt
            if (stopAt != LogCounter.INVALID_LOG_INSTANT && LogCounter.getLogFileNumber(stopAt) == currentLogFileNumber) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "stopping at " + currentLogFileNumber);
                    }
                }
                // no more log record
                return null;
            }
            // figure out where the last log record is in the previous
            // log file
            scan.seek(LogToFile.LOG_FILE_HEADER_PREVIOUS_LOG_INSTANT_OFFSET);
            long previousLogInstant = scan.readLong();
            scan.close();
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(previousLogInstant != LogCounter.INVALID_LOG_INSTANT, "scanning backward beyond the first log file");
                if (currentLogFileNumber != LogCounter.getLogFileNumber(previousLogInstant) + 1)
                    SanityManager.THROWASSERT("scanning backward but get incorrect log file number " + "expected " + (currentLogFileNumber - 1) + "get " + LogCounter.getLogFileNumber(previousLogInstant));
                SanityManager.ASSERT(LogCounter.getLogFilePosition(previousLogInstant) > LogToFile.LOG_FILE_HEADER_SIZE, "scanning backward encounter completely empty log file");
                SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanning backwards from log file " + currentLogFileNumber + ", switch to (" + LogCounter.getLogFileNumber(previousLogInstant) + "," + LogCounter.getLogFilePosition(previousLogInstant) + ")");
            }
            // log file switch, set this.currentLogFileNumber
            currentLogFileNumber = LogCounter.getLogFileNumber(previousLogInstant);
            scan = logFactory.getLogFileAtPosition(previousLogInstant);
            // scan is located right past the last byte of the last log
            // record in the previous log file.  currentLogFileNumber is
            // set.  We asserted that the scan is not located right at the
            // end of the file header, in other words, there is at least
            // one log record in this log file.
            curpos = scan.getFilePointer();
            // happens to avoid any recovery issues.
            if (curpos == LogToFile.LOG_FILE_HEADER_SIZE)
                continue;
        }
        scan.seek(curpos - 4);
        // get the length after the log record
        int recordLength = scan.readInt();
        // calculate where this log record started.
        // include the eight bytes for the long log instant at the front
        // the four bytes of length in the front and the four bytes we just read
        long recordStartPosition = curpos - recordLength - LogToFile.LOG_RECORD_OVERHEAD;
        if (SanityManager.DEBUG) {
            if (recordStartPosition < LogToFile.LOG_FILE_HEADER_SIZE)
                SanityManager.THROWASSERT("next position " + recordStartPosition + " recordLength " + recordLength + " current file position " + scan.getFilePointer());
            scan.seek(recordStartPosition);
            // read the length before the log record and check it against the
            // length after the log record
            int checkLength = scan.readInt();
            if (checkLength != recordLength) {
                long inst = LogCounter.makeLogInstantAsLong(currentLogFileNumber, recordStartPosition);
                throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_RECORD_CORRUPTED, checkLength, recordLength, inst, currentLogFileNumber));
            }
        } else {
            // skip over the length in insane
            scan.seek(recordStartPosition + 4);
        }
        // scan is positioned just before the log instant
        // read the current log instant - this is the currentInstant if we have not
        // exceeded the scan limit
        currentInstant = scan.readLong();
        if (SanityManager.DEBUG) {
            // sanity check the current instant against the scan position
            if (LogCounter.getLogFileNumber(currentInstant) != currentLogFileNumber || LogCounter.getLogFilePosition(currentInstant) != recordStartPosition)
                SanityManager.THROWASSERT("Wrong LogInstant on log record " + LogCounter.toDebugString(currentInstant) + " version real position (" + currentLogFileNumber + "," + recordStartPosition + ")");
        }
        // nothing more can be read.  Else check scan limit
        if (currentInstant < stopAt && stopAt != LogCounter.INVALID_LOG_INSTANT) {
            currentInstant = LogCounter.INVALID_LOG_INSTANT;
            // we went past the stopAt
            return null;
        }
        byte[] data = input.getData();
        if (data.length < recordLength) {
            // make a new array of sufficient size and reset the arrary
            // in the input stream
            data = new byte[recordLength];
            input.setData(data);
        }
        // and decrypting the record.
        if (logFactory.databaseEncrypted()) {
            scan.readFully(data, 0, recordLength);
            int len = logFactory.decrypt(data, 0, recordLength, data, 0);
            if (SanityManager.DEBUG)
                SanityManager.ASSERT(len == recordLength);
            input.setLimit(0, recordLength);
        } else // no need to decrypt, only get the group and tid if we filter
        {
            if (groupmask == 0 && tranId == null) {
                // no filter, get the whole thing
                scan.readFully(data, 0, recordLength);
                input.setLimit(0, recordLength);
            } else {
                // Read only enough so that group and the tran id is in
                // the data buffer.  Group is stored as compressed int
                // and tran id is stored as who knows what.  read min
                // of peekAmount or recordLength
                readAmount = (recordLength > peekAmount) ? peekAmount : recordLength;
                // in the data buffer, we now have enough to peek
                scan.readFully(data, 0, readAmount);
                input.setLimit(0, readAmount);
            }
        }
        lr = (LogRecord) input.readObject();
        // during backward scans. They are used only in forwardscan during recovery.
        if (lr.isChecksum()) {
            candidate = false;
        } else if (groupmask != 0 || tranId != null) {
            // skip the checksum log records
            if (lr.isChecksum())
                candidate = false;
            if (candidate && groupmask != 0 && (groupmask & lr.group()) == 0)
                // no match, throw this log record out
                candidate = false;
            if (candidate && tranId != null) {
                TransactionId tid = lr.getTransactionId();
                if (// nomatch
                !tid.equals(tranId))
                    // throw this log record out
                    candidate = false;
            }
            // decryption.
            if (candidate && !logFactory.databaseEncrypted()) {
                // read the rest of the log into the buffer
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(readAmount > 0);
                if (readAmount < recordLength) {
                    // Need to remember where we are because the log
                    // record may have read part of it off the input
                    // stream already and that position is lost when we
                    // set limit again.
                    int inputPosition = input.getPosition();
                    scan.readFully(data, readAmount, recordLength - readAmount);
                    input.setLimit(0, recordLength);
                    input.setPosition(inputPosition);
                }
            }
        }
        // go back to the start of the log record so that the next time
        // this method is called, it is positioned right past the last byte
        // of the record.
        curpos = recordStartPosition;
        scan.seek(curpos);
    } while (candidate == false);
    return lr;
}
Also used : LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Example 2 with LogRecord

use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.

the class Scan method getNextRecordForward.

/**
 *		Read the next log record.
 *		Switching log to a previous log file if necessary,
 *		Resize the input stream byte array if necessary.
 *		@see StreamLogScan#getNextRecord
 *
 *		Side effects include:
 *				on a successful read, setting currentInstant, knownGoodLogEnd
 *				on a log file switch, setting currentLogFileNumber, currentLogFileLength.
 *				on detecting a fuzzy log end that needs clearing, it will call
 *				logFactory to clear the fuzzy log end.
 *
 *		@return the next LogRecord, or null if the end of the
 *		scan has been reached.
 */
private LogRecord getNextRecordForward(ArrayInputStream input, TransactionId tranId, int groupmask) throws StandardException, IOException, ClassNotFoundException {
    if (SanityManager.DEBUG)
        SanityManager.ASSERT(scanDirection == FORWARD, "can only called by forward scan");
    // NOTE:
    // 
    // if forward scan, scan is positioned at the first byte of the
    // next record, or the end of file - note the the 'end of file'
    // is defined at the time the scan is initialized.  If we are
    // on the current log file, it may well have grown by now...
    // 
    // This is not a problem in reality because the only forward
    // scan on the log now is recovery redo and the log does not
    // grow.  If in the future, a foward scan of the log is used
    // for some other reasons, need to keep this in mind.
    // 
    // first we need to make sure the entire log record is on the
    // log, or else this is a fuzzy log end.
    // RESOLVE: can get this from knownGoodLogEnd if this is not the first
    // time getNext is called.  Probably just as fast to call
    // scan.getFilePointer though...
    long recordStartPosition = scan.getFilePointer();
    boolean candidate;
    // if we have filtering, peek at the group and/or the transaction id,
    // do them in one read rather than 2 reads.
    int peekAmount = LogRecord.formatOverhead() + LogRecord.maxGroupStoredSize();
    if (tranId != null)
        peekAmount += LogRecord.maxTransactionIdStoredSize(tranId);
    // the number of bytes actually read
    int readAmount;
    LogRecord lr;
    do {
        // this log record is a candidate unless proven otherwise
        candidate = true;
        lr = null;
        readAmount = -1;
        // end.
        if (recordStartPosition + 4 > currentLogFileLength) {
            // end of the log.
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected fuzzy log end on log file " + currentLogFileNumber + " record start position " + recordStartPosition + " file length " + currentLogFileLength);
                }
            }
            // without the end marker.
            if (recordStartPosition != currentLogFileLength)
                fuzzyLogEnd = true;
            // back right here
            return null;
        }
        // read in the length before the log record
        int recordLength = scan.readInt();
        while (recordLength == 0 || recordStartPosition + recordLength + LogToFile.LOG_RECORD_OVERHEAD > currentLogFileLength) {
            if (// this is a fuzzy log end
            recordLength != 0) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected fuzzy log end on log file " + currentLogFileNumber + " record start position " + recordStartPosition + " file length " + currentLogFileLength + " recordLength=" + recordLength);
                    }
                }
                fuzzyLogEnd = true;
                scan.close();
                scan = null;
                return null;
            }
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    if (recordStartPosition + 4 == currentLogFileLength) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected proper log end on log file " + currentLogFileNumber);
                    } else {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected zapped log end on log file " + currentLogFileNumber + " end marker at " + recordStartPosition + " real end at " + currentLogFileLength);
                    }
                }
            }
            // have will have gone past stopAt if we want to stop here
            if (stopAt != LogCounter.INVALID_LOG_INSTANT && LogCounter.getLogFileNumber(stopAt) == currentLogFileNumber) {
                return null;
            }
            // 
            // we have a log end marker and we don't want to stop yet, switch
            // log file
            // 
            scan.close();
            // set this.currentLogFileNumber
            scan = logFactory.getLogFileAtBeginning(++currentLogFileNumber);
            if (// we have seen the last log file
            scan == null) {
                return null;
            }
            // scan is position just past the log header
            recordStartPosition = scan.getFilePointer();
            // Verify that the header of the new log file refers
            // to the end of the log record of the previous file
            // (Rest of header has been verified by getLogFileAtBeginning)
            scan.seek(LogToFile.LOG_FILE_HEADER_PREVIOUS_LOG_INSTANT_OFFSET);
            long previousLogInstant = scan.readLong();
            if (previousLogInstant != knownGoodLogEnd) {
                // detects an error in the other fields of the header.
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "log file " + currentLogFileNumber + ": previous log record: " + previousLogInstant + " known previous log record: " + knownGoodLogEnd);
                    }
                }
                return null;
            }
            scan.seek(recordStartPosition);
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "switched to next log file " + currentLogFileNumber);
                }
            }
            // Advance knownGoodLogEnd to make sure that if this
            // log file is the last log file and empty, logging
            // continues in this file, not the old file.
            knownGoodLogEnd = LogCounter.makeLogInstantAsLong(currentLogFileNumber, recordStartPosition);
            // set this.currentLogFileLength
            currentLogFileLength = scan.length();
            if (// empty log file
            recordStartPosition + 4 >= currentLogFileLength) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "log file " + currentLogFileNumber + " is empty");
                    }
                }
                return null;
            }
            // we have successfully switched to the next log file.
            // scan is positioned just before the next log record
            // see if this one is written in entirety
            recordLength = scan.readInt();
        }
        // we know the entire log record is on this log file
        // read the current log instant
        currentInstant = scan.readLong();
        /*check if the current instant happens is less than the last one. 
			 *This can happen if system crashed before writing the log instant
			 *completely. If the instant is partially written it will be less
			 *than the last one and should be the last record that was suppose to
			 *get written. Currentlt preallocated files are filled with zeros,
			 *this should hold good.
			 *Note: In case of Non-preallocated files earlier check with log
			 * file lengths should have found the end. But in prellocated files, log file
			 *length is not sufficiant to find the log end. This check 
			 *is must to find the end in preallocated log files. 
			 */
        if (currentInstant < knownGoodLogEnd) {
            fuzzyLogEnd = true;
            return null;
        }
        // sanity check it
        if (SanityManager.DEBUG) {
            if (LogCounter.getLogFileNumber(currentInstant) != currentLogFileNumber || LogCounter.getLogFilePosition(currentInstant) != recordStartPosition)
                SanityManager.THROWASSERT("Wrong LogInstant on log record " + LogCounter.toDebugString(currentInstant) + " version real position (" + currentLogFileNumber + "," + recordStartPosition + ")");
        }
        // nothing more can be read.  Else check scan limit
        if (stopAt != LogCounter.INVALID_LOG_INSTANT && currentInstant > stopAt) {
            currentInstant = LogCounter.INVALID_LOG_INSTANT;
            // we went past the stopAt
            return null;
        }
        // read in the log record
        byte[] data = input.getData();
        if (data.length < recordLength) {
            // make a new array of sufficient size and reset the arrary
            // in the input stream
            data = new byte[recordLength];
            input.setData(data);
        }
        if (logFactory.databaseEncrypted()) {
            scan.readFully(data, 0, recordLength);
            int len = logFactory.decrypt(data, 0, recordLength, data, 0);
            if (SanityManager.DEBUG)
                SanityManager.ASSERT(len == recordLength);
            input.setLimit(0, len);
        } else // no need to decrypt, only get the group and tid if we filter
        {
            if (groupmask == 0 && tranId == null) {
                // no filter, get the whole thing
                scan.readFully(data, 0, recordLength);
                input.setLimit(0, recordLength);
            } else {
                // Read only enough so that group and the tran id is in
                // the data buffer.  Group is stored as compressed int
                // and tran id is stored as who knows what.  read min
                // of peekAmount or recordLength
                readAmount = (recordLength > peekAmount) ? peekAmount : recordLength;
                // in the data buffer, we now have enough to peek
                scan.readFully(data, 0, readAmount);
                input.setLimit(0, readAmount);
            }
        }
        lr = (LogRecord) input.readObject();
        if (groupmask != 0 || tranId != null) {
            if (groupmask != 0 && (groupmask & lr.group()) == 0)
                // no match, throw this log record out
                candidate = false;
            if (candidate && tranId != null) {
                TransactionId tid = lr.getTransactionId();
                if (// nomatch
                !tid.equals(tranId))
                    // throw this log record out
                    candidate = false;
            }
            // decryption.
            if (candidate && !logFactory.databaseEncrypted()) {
                // read the rest of the log into the buffer
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(readAmount > 0);
                if (readAmount < recordLength) {
                    // Need to remember where we are because the log
                    // record may have read part of it off the input
                    // stream already and that position is lost when we
                    // set limit again.
                    int inputPosition = input.getPosition();
                    scan.readFully(data, readAmount, recordLength - readAmount);
                    input.setLimit(0, recordLength);
                    input.setPosition(inputPosition);
                }
            }
        }
        // place for skipped log record.
        if (!candidate)
            scan.seek(recordStartPosition - 4);
        int checkLength = scan.readInt();
        if (checkLength != recordLength && checkLength < recordLength) {
            // record was half written before the crash.
            if (checkLength < recordLength) {
                fuzzyLogEnd = true;
                return null;
            } else {
                if (SanityManager.DEBUG) {
                    throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_RECORD_CORRUPTED, checkLength, recordLength, currentInstant, currentLogFileNumber));
                }
            // In non debug case, do nothing , let's hope it's only
            // length part that is incorrect and system can proceed.
            }
        }
        // next record start position is right after this record
        recordStartPosition += recordLength + LogToFile.LOG_RECORD_OVERHEAD;
        knownGoodLogEnd = LogCounter.makeLogInstantAsLong(currentLogFileNumber, recordStartPosition);
        if (SanityManager.DEBUG) {
            if (recordStartPosition != scan.getFilePointer())
                SanityManager.THROWASSERT("calculated end " + recordStartPosition + " != real end " + scan.getFilePointer());
        } else {
            // seek to the start of the next log record
            scan.seek(recordStartPosition);
        }
        /**
         * if the current log record is a checksum log record then
         * using the information available in this record validate
         * that data in the log file by matching the checksum in
         * checksum log record and by recalculating the checksum for the
         * specified length of the data in the log file. cheksum values
         * should match unless the right was incomplete before the crash.
         */
        if (lr.isChecksum()) {
            // checksum log record should not be returned to the logger recovery redo
            // routines, it is just used to identify the incomplete log writes.
            candidate = false;
            Loggable op = lr.getLoggable();
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY) || SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanned " + "Null" + " : " + op + " instant = " + LogCounter.toDebugString(currentInstant) + " logEnd = " + LogCounter.toDebugString(knownGoodLogEnd));
            }
            ChecksumOperation clop = (ChecksumOperation) op;
            int ckDataLength = clop.getDataLength();
            // resize the buffer to be size of checksum data length if required.
            if (data.length < ckDataLength) {
                // make a new array of sufficient size and reset the arrary
                // in the input stream
                data = new byte[ckDataLength];
                input.setData(data);
                input.setLimit(0, ckDataLength);
            }
            boolean validChecksum = false;
            // is valid to identify any incomplete out of order writes.
            if ((recordStartPosition + ckDataLength) <= currentLogFileLength) {
                // read the data into the buffer
                scan.readFully(data, 0, ckDataLength);
                // verify the checksum
                if (clop.isChecksumValid(data, 0, ckDataLength))
                    validChecksum = true;
            }
            if (!validChecksum) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected fuzzy log end on log file while doing checksum checks " + currentLogFileNumber + " checksum record start position " + recordStartPosition + " file length " + currentLogFileLength + " checksumDataLength=" + ckDataLength);
                    }
                }
                fuzzyLogEnd = true;
                scan.close();
                scan = null;
                return null;
            }
            // reset the scan to the start of the next log record
            scan.seek(recordStartPosition);
        }
    } while (candidate == false);
    return lr;
}
Also used : Loggable(org.apache.derby.iapi.store.raw.Loggable) LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Example 3 with LogRecord

use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.

the class FileLogger method readLogRecord.

/**
 *		Read the next log record from the scan.
 *
 *		<P>MT - caller must provide synchronization (right now, it is only
 *		called in recovery to find the checkpoint log record.  When this method
 *		is called by a more general audience, MT must be revisited).
 *
 *		@param scan an opened log scan
 *		@param size estimated size of the log record
 *
 *		@return the log operation that is the next in the scan, or null if no
 *		more log operation in the log scan
 *
 *		@exception IOException	Error reading the log file
 *		@exception StandardException Standard Derby error policy
 *		@exception ClassNotFoundException log corrupted
 */
protected Loggable readLogRecord(StreamLogScan scan, int size) throws IOException, StandardException, ClassNotFoundException {
    Loggable lop = null;
    ArrayInputStream logInputBuffer = new ArrayInputStream(new byte[size]);
    LogRecord record = scan.getNextRecord(logInputBuffer, null, 0);
    if (record != null)
        lop = record.getLoggable();
    return lop;
}
Also used : Loggable(org.apache.derby.iapi.store.raw.Loggable) LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream)

Example 4 with LogRecord

use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.

the class FileLogger method reprepare.

/**
 * During recovery re-prepare a transaction.
 * <p>
 * After redo() and undo(), this routine is called on all outstanding
 * in-doubt (prepared) transactions.  This routine re-acquires all
 * logical write locks for operations in the xact, and then modifies
 * the transaction table entry to make the transaction look as if it
 * had just been prepared following startup after recovery.
 * <p>
 *
 * @param t                 is the transaction performing the re-prepare
 * @param prepareId         is the transaction ID to be re-prepared
 * @param prepareStopAt     is where the log instant (inclusive) where the
 *                          re-prepare should stop.
 * @param prepareStartAt    is the log instant (inclusive) where re-prepare
 *                          should begin, this is normally the log instant
 *                          of the last log record of the transaction that
 *                          is to be re-prepare.  If null, then re-prepare
 *                          starts from the end of the log.
 *
 * @exception  StandardException  Standard exception policy.
 */
public void reprepare(RawTransaction t, TransactionId prepareId, LogInstant prepareStopAt, LogInstant prepareStartAt) throws StandardException {
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            if (prepareStartAt != null) {
                SanityManager.DEBUG(LogToFile.DBG_FLAG, "----------------------------------------------------\n" + "\nBegin of RePrepare : " + prepareId.toString() + "start at " + prepareStartAt.toString() + " stop at " + prepareStopAt.toString() + "\n----------------------------------------------------\n");
            } else {
                SanityManager.DEBUG(LogToFile.DBG_FLAG, "----------------------------------------------------\n" + "\nBegin of Reprepare: " + prepareId.toString() + "start at end of log stop at " + prepareStopAt.toString() + "\n----------------------------------------------------\n");
            }
        }
    }
    // statistics
    int clrskipped = 0;
    int logrecordseen = 0;
    RePreparable lop = null;
    // stream to read the log record - initial size 4096, scanLog needs
    // to resize if the log record is larger than that.
    ArrayInputStream rawInput = null;
    try {
        StreamLogScan scanLog;
        if (prepareStartAt == null) {
            // don't know where to start, scan from end of log
            scanLog = (StreamLogScan) logFactory.openBackwardsScan(prepareStopAt);
        } else {
            if (prepareStartAt.lessThan(prepareStopAt)) {
                // nothing to prepare!
                return;
            }
            scanLog = (StreamLogScan) logFactory.openBackwardsScan(((LogCounter) prepareStartAt).getValueAsLong(), prepareStopAt);
        }
        if (SanityManager.DEBUG)
            SanityManager.ASSERT(scanLog != null, "cannot open log for prepare");
        rawInput = new ArrayInputStream(new byte[4096]);
        LogRecord record;
        while ((record = scanLog.getNextRecord(rawInput, prepareId, 0)) != null) {
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(record.getTransactionId().equals(prepareId), "getNextRecord return unqualified log rec for prepare");
            }
            logrecordseen++;
            if (record.isCLR()) {
                clrskipped++;
                // the loggable is still in the input stream, get rid of it
                record.skipLoggable();
                // read the prepareInstant
                long prepareInstant = rawInput.readLong();
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "Skipping over CLRs, reset scan to " + LogCounter.toDebugString(prepareInstant));
                    }
                }
                scanLog.resetPosition(new LogCounter(prepareInstant));
                continue;
            }
            if (record.requiresPrepareLocks()) {
                lop = record.getRePreparable();
            } else {
                continue;
            }
            if (lop != null) {
                // Reget locks based on log record.  reclaim all locks with
                // a serializable locking policy, since we are only
                // reclaiming write locks, isolation level does not matter
                // much.
                lop.reclaimPrepareLocks(t, t.newLockingPolicy(LockingPolicy.MODE_RECORD, TransactionController.ISOLATION_REPEATABLE_READ, true));
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "Reprepare log record at instant " + scanLog.getInstant() + " : " + lop);
                    }
                }
            }
        }
    } catch (ClassNotFoundException cnfe) {
        throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
    } catch (IOException ioe) {
        throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_READ_LOG_FOR_UNDO, ioe));
    } catch (StandardException se) {
        throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_UNDO_FAILED, se, prepareId, lop, (Object) null));
    } finally {
        if (rawInput != null) {
            try {
                rawInput.close();
            } catch (IOException ioe) {
                throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_READ_LOG_FOR_UNDO, ioe, prepareId));
            }
        }
    }
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            SanityManager.DEBUG(LogToFile.DBG_FLAG, "Finish prepare" + ", clr skipped = " + clrskipped + ", record seen = " + logrecordseen + "\n");
        }
    }
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            SanityManager.DEBUG(LogToFile.DBG_FLAG, "----------------------------------------------------\n" + "End of recovery rePrepare\n" + ", clr skipped = " + clrskipped + ", record seen = " + logrecordseen + "\n----------------------------------------------------\n");
        }
    }
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) RePreparable(org.apache.derby.iapi.store.raw.RePreparable) LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) ArrayInputStream(org.apache.derby.iapi.services.io.ArrayInputStream) LogCounter(org.apache.derby.impl.store.raw.log.LogCounter) IOException(java.io.IOException) StreamLogScan(org.apache.derby.impl.store.raw.log.StreamLogScan)

Example 5 with LogRecord

use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.

the class FileLogger method redo.

/**
 *		Recovery Redo loop.
 *
 *		<P> The log stream is scanned from the beginning (or
 *		from the undo low water mark of a checkpoint) forward until the end.
 *		The purpose of the redo pass is to repeat history, i.e, to repeat
 *		exactly the same set of changes the rawStore went thru right before it
 *		stopped.   With each log record that is encountered in the redo pass:
 *		<OL>
 *		<LI>if it isFirst(), then the transaction factory is called upon to
 *		    create a new transaction object.
 *		<LI>if it needsRedo(), its doMe() is called (if it is a compensation
 *		    operation, then the undoable operation needs to be created first
 *            before the doMe is called).
 *		<LI>if it isComplete(), then the transaction object is closed.
 *		</OL>
 *
 *		<P> MT - caller provides synchronization
 *
 *		@param transFactory     - the transaction factory
 *		@param redoLWM          - if checkpoint seen, starting from this point
 *                                  on, apply redo if necessary
 *
 *		@return the log instant of the next log record (or the instant just
 *		after the last log record).  This is used to determine where the log
 *		truly ends
 *
 *		@exception StandardException Standard Derby error policy
 *		@exception IOException error reading log file
 *		@exception ClassNotFoundException log file corrupted
 *
 *		@see LogToFile#recover
 */
protected long redo(RawTransaction recoveryTransaction, TransactionFactory transFactory, StreamLogScan redoScan, long redoLWM, long ttabInstant) throws IOException, StandardException, ClassNotFoundException {
    // begin debug info
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            SanityManager.DEBUG(LogToFile.DBG_FLAG, "In recovery redo, redoLWM = " + redoLWM);
        }
    }
    int scanCount = 0;
    int redoCount = 0;
    int prepareCount = 0;
    int clrCount = 0;
    int btranCount = 0;
    int etranCount = 0;
    // end debug info
    TransactionId tranId = null;
    // the current log instant
    long instant = LogCounter.INVALID_LOG_INSTANT;
    // ////////////////////////////////////////////////////////////////////
    // During redo time, the byte array in the logOutputBuffer is not used.
    // Use it to read the log record - if it is not big enough, scan
    // will resize it.  We could create a brand new log input stream that
    // has nothing to do with logIn or logOutputBuffer but that seem like
    // a waste of memory.
    // ////////////////////////////////////////////////////////////////////
    logIn.setData(logOutputBuffer.getByteArray());
    // use this scan to reconstitute operation to be undone
    // when we see a CLR in the redo scan
    StreamLogScan undoScan = null;
    Loggable op = null;
    // we need to determine the log's true end
    long logEnd = 0;
    try {
        // scan the log forward in redo pass and go to the end
        LogRecord record;
        while ((record = redoScan.getNextRecord(logIn, null, 0)) != null) {
            scanCount++;
            long undoInstant = 0;
            // last known good instant
            instant = redoScan.getInstant();
            // last known good log end
            logEnd = redoScan.getLogRecordEnd();
            // NOTE NOTE
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY) || SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY))
                        SanityManager.DEBUG_SET(LogToFile.DBG_FLAG);
                    op = record.getLoggable();
                    tranId = record.getTransactionId();
                    if (record.isCLR()) {
                        // !!!!!!! this moves the file pointer
                        undoInstant = logIn.readLong();
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanned " + tranId + " : " + op + " instant = " + LogCounter.toDebugString(instant) + " undoInstant : " + LogCounter.toDebugString(undoInstant));
                    } else {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanned " + tranId + " : " + op + " instant = " + LogCounter.toDebugString(instant) + " logEnd = " + LogCounter.toDebugString(logEnd) + " logIn at " + logIn.getPosition() + " available " + logIn.available());
                    }
                    // we only want to dump the log, don't touch it
                    if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY))
                        continue;
                }
            }
            // already been flushed by checkpoint
            if (redoLWM != LogCounter.INVALID_LOG_INSTANT && instant < redoLWM) {
                if (!(record.isFirst() || record.isComplete() || record.isPrepare())) {
                    continue;
                }
            }
            // get the transaction
            tranId = record.getTransactionId();
            // otherwise, make it known to the transaction factory
            if (!transFactory.findTransaction(tranId, recoveryTransaction)) {
                if (redoLWM != LogCounter.INVALID_LOG_INSTANT && instant < redoLWM && (record.isPrepare() || record.isComplete())) {
                    // What is happening here is that a transaction that
                    // started before the undoLWM has commited by the time
                    // the checkpoint undoLWM was taken.  Hence, we only
                    // see the tail end of its log record and its endXact
                    // record.
                    // 
                    // NOTE:
                    // Since we didn't see its beginXact, we cannot do the
                    // endXact's doMe either.  Also if the endXact, is
                    // actually just a prepare, we don't need to do
                    // anything as the transaction will commit or abort
                    // prior to point we are recovering to.
                    // If it is deemed necessary to do the endXact's doMe,
                    // then we should start the transaction right here.
                    // For now, just completely ignore this transaction
                    // 
                    etranCount++;
                    continue;
                }
                if ((ttabInstant == LogCounter.INVALID_LOG_INSTANT) && !record.isFirst()) {
                    throw StandardException.newException(SQLState.LOG_UNEXPECTED_RECOVERY_PROBLEM, MessageService.getTextMessage(MessageId.LOG_RECORD_NOT_FIRST, tranId));
                }
                if (SanityManager.DEBUG) {
                    // instant, error.
                    if (ttabInstant != LogCounter.INVALID_LOG_INSTANT) {
                        if (instant > ttabInstant && !record.isFirst()) {
                            SanityManager.THROWASSERT("log record is Not first but transaction " + "is not in transaction table (2) : " + tranId);
                        }
                    // If we dump the transaction table and the table
                    // does not have the transaction, and we see this
                    // beginXact before the ttab instant, we could have
                    // igored it because we "know" that we should see
                    // the endXact before the ttab instant also.
                    // Leave it in just in case.
                    }
                }
                btranCount++;
                // the long transaction ID is embedded in the beginXact log
                // record.  The short ID is stored in the log record.
                recoveryTransaction.setTransactionId(record.getLoggable(), tranId);
            } else {
                if ((ttabInstant == LogCounter.INVALID_LOG_INSTANT) && record.isFirst()) {
                    throw StandardException.newException(SQLState.LOG_UNEXPECTED_RECOVERY_PROBLEM, MessageService.getTextMessage(MessageId.LOG_RECORD_FIRST, tranId));
                }
                if (SanityManager.DEBUG) {
                    if (ttabInstant != LogCounter.INVALID_LOG_INSTANT && instant > ttabInstant && record.isFirst()) {
                        SanityManager.THROWASSERT("log record is first but transaction is " + "already in transaction table (3): " + tranId);
                    }
                    if (record.isPrepare())
                        prepareCount++;
                }
                // 
                if (record.isFirst()) {
                    btranCount++;
                    continue;
                }
            }
            op = record.getLoggable();
            if (SanityManager.DEBUG) {
                if (!record.isCLR()) {
                    if (logIn.available() < 4) {
                        SanityManager.THROWASSERT("not enough bytes read in : " + logIn.available() + " for " + op + " instant " + LogCounter.toDebugString(instant));
                    }
                }
            }
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(!recoveryTransaction.handlesPostTerminationWork(), "recovery transaction handles post termination work");
            }
            if (op.needsRedo(recoveryTransaction)) {
                redoCount++;
                if (record.isCLR()) {
                    clrCount++;
                    if (SanityManager.DEBUG)
                        SanityManager.ASSERT(op instanceof Compensation);
                    // this value may be set by sanity xxxx
                    if (undoInstant == 0)
                        undoInstant = logIn.readLong();
                    if (undoScan == null) {
                        undoScan = (StreamLogScan) logFactory.openForwardsScan(undoInstant, (LogInstant) null);
                    } else {
                        undoScan.resetPosition(new LogCounter(undoInstant));
                    }
                    // undoScan now positioned at the beginning of the log
                    // record was rolled back by this CLR.
                    // The scan is a forward one so getNextRecord will get
                    // the log record that needs to be rolled back.
                    // reuse the buffer in logIn and logIn since CLR
                    // has no optional data and has no use for them anymore
                    logIn.clearLimit();
                    LogRecord undoRecord = undoScan.getNextRecord(logIn, null, 0);
                    Undoable undoOp = undoRecord.getUndoable();
                    if (SanityManager.DEBUG) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "Redoing CLR: undoInstant = " + LogCounter.toDebugString(undoInstant) + " clrinstant = " + LogCounter.toDebugString(instant));
                        SanityManager.ASSERT(undoRecord.getTransactionId().equals(tranId));
                        SanityManager.ASSERT(undoOp != null);
                    }
                    ((Compensation) op).setUndoOp(undoOp);
                }
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "redoing " + op + " instant = " + LogCounter.toDebugString(instant));
                    }
                }
                int dataLength = logIn.readInt();
                logIn.setLimit(dataLength);
                // even though the log has already been written, we need to
                // tie the page to the log stream so that if redo failed
                // for some reasons, the log factory's corruption will stop
                // the corrupt page from flushing to disk.
                op.doMe(recoveryTransaction, new LogCounter(instant), logIn);
                op.releaseResource(recoveryTransaction);
                op = null;
            }
            // has written any transaction, so in this case, it is a noop.
            if (record.isComplete()) {
                etranCount++;
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(!recoveryTransaction.handlesPostTerminationWork(), "recovery xact handles post termination work");
                recoveryTransaction.commit();
            }
        }
        // while redoScan.getNextRecord() != null
        // If the scan ended in an empty file, update logEnd to reflect that
        // in order to avoid to continue logging to an older file
        long end = redoScan.getLogRecordEnd();
        if (end != LogCounter.INVALID_LOG_INSTANT && (LogCounter.getLogFileNumber(logEnd) < LogCounter.getLogFileNumber(end))) {
            logEnd = end;
        }
    } catch (StandardException se) {
        throw StandardException.newException(SQLState.LOG_REDO_FAILED, se, op);
    } finally {
        // close all the io streams
        redoScan.close();
        redoScan = null;
        if (undoScan != null) {
            undoScan.close();
            undoScan = null;
        }
        if (op != null)
            op.releaseResource(recoveryTransaction);
    }
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            SanityManager.DEBUG(LogToFile.DBG_FLAG, "----------------------------------------------------\n" + "End of recovery redo\n" + "Scanned = " + scanCount + " log records" + ", redid = " + redoCount + " ( clr = " + clrCount + " )" + " begintran = " + btranCount + " endtran = " + etranCount + " preparetran = " + prepareCount + "\n log ends at " + LogCounter.toDebugString(logEnd) + "\n----------------------------------------------------\n");
        }
    }
    if (SanityManager.DEBUG) {
        // make sure logEnd and instant is consistent
        if (instant != LogCounter.INVALID_LOG_INSTANT) {
            SanityManager.ASSERT(LogCounter.getLogFileNumber(instant) < LogCounter.getLogFileNumber(logEnd) || (LogCounter.getLogFileNumber(instant) == LogCounter.getLogFileNumber(logEnd) && LogCounter.getLogFilePosition(instant) <= LogCounter.getLogFilePosition(logEnd)));
        } else {
            SanityManager.ASSERT(logEnd == LogCounter.INVALID_LOG_INSTANT);
        }
    }
    // logEnd is the last good log record position in the log
    return logEnd;
}
Also used : Undoable(org.apache.derby.iapi.store.raw.Undoable) StandardException(org.apache.derby.shared.common.error.StandardException) Loggable(org.apache.derby.iapi.store.raw.Loggable) LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) Compensation(org.apache.derby.iapi.store.raw.Compensation) LogCounter(org.apache.derby.impl.store.raw.log.LogCounter) StreamLogScan(org.apache.derby.impl.store.raw.log.StreamLogScan) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Aggregations

LogRecord (org.apache.derby.impl.store.raw.log.LogRecord)7 TransactionId (org.apache.derby.iapi.store.raw.xact.TransactionId)4 IOException (java.io.IOException)3 ArrayInputStream (org.apache.derby.iapi.services.io.ArrayInputStream)3 Loggable (org.apache.derby.iapi.store.raw.Loggable)3 LogCounter (org.apache.derby.impl.store.raw.log.LogCounter)3 StreamLogScan (org.apache.derby.impl.store.raw.log.StreamLogScan)3 StandardException (org.apache.derby.shared.common.error.StandardException)3 Compensation (org.apache.derby.iapi.store.raw.Compensation)2 Undoable (org.apache.derby.iapi.store.raw.Undoable)2 RePreparable (org.apache.derby.iapi.store.raw.RePreparable)1