Search in sources :

Example 6 with TransactionId

use of org.apache.derby.iapi.store.raw.xact.TransactionId in project derby by apache.

the class Scan method getNextRecordForward.

/**
 *		Read the next log record.
 *		Switching log to a previous log file if necessary,
 *		Resize the input stream byte array if necessary.
 *		@see StreamLogScan#getNextRecord
 *
 *		Side effects include:
 *				on a successful read, setting currentInstant, knownGoodLogEnd
 *				on a log file switch, setting currentLogFileNumber, currentLogFileLength.
 *				on detecting a fuzzy log end that needs clearing, it will call
 *				logFactory to clear the fuzzy log end.
 *
 *		@return the next LogRecord, or null if the end of the
 *		scan has been reached.
 */
private LogRecord getNextRecordForward(ArrayInputStream input, TransactionId tranId, int groupmask) throws StandardException, IOException, ClassNotFoundException {
    if (SanityManager.DEBUG)
        SanityManager.ASSERT(scanDirection == FORWARD, "can only called by forward scan");
    // NOTE:
    // 
    // if forward scan, scan is positioned at the first byte of the
    // next record, or the end of file - note the the 'end of file'
    // is defined at the time the scan is initialized.  If we are
    // on the current log file, it may well have grown by now...
    // 
    // This is not a problem in reality because the only forward
    // scan on the log now is recovery redo and the log does not
    // grow.  If in the future, a foward scan of the log is used
    // for some other reasons, need to keep this in mind.
    // 
    // first we need to make sure the entire log record is on the
    // log, or else this is a fuzzy log end.
    // RESOLVE: can get this from knownGoodLogEnd if this is not the first
    // time getNext is called.  Probably just as fast to call
    // scan.getFilePointer though...
    long recordStartPosition = scan.getFilePointer();
    boolean candidate;
    // if we have filtering, peek at the group and/or the transaction id,
    // do them in one read rather than 2 reads.
    int peekAmount = LogRecord.formatOverhead() + LogRecord.maxGroupStoredSize();
    if (tranId != null)
        peekAmount += LogRecord.maxTransactionIdStoredSize(tranId);
    // the number of bytes actually read
    int readAmount;
    LogRecord lr;
    do {
        // this log record is a candidate unless proven otherwise
        candidate = true;
        lr = null;
        readAmount = -1;
        // end.
        if (recordStartPosition + 4 > currentLogFileLength) {
            // end of the log.
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected fuzzy log end on log file " + currentLogFileNumber + " record start position " + recordStartPosition + " file length " + currentLogFileLength);
                }
            }
            // without the end marker.
            if (recordStartPosition != currentLogFileLength)
                fuzzyLogEnd = true;
            // back right here
            return null;
        }
        // read in the length before the log record
        int recordLength = scan.readInt();
        while (recordLength == 0 || recordStartPosition + recordLength + LogToFile.LOG_RECORD_OVERHEAD > currentLogFileLength) {
            if (// this is a fuzzy log end
            recordLength != 0) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected fuzzy log end on log file " + currentLogFileNumber + " record start position " + recordStartPosition + " file length " + currentLogFileLength + " recordLength=" + recordLength);
                    }
                }
                fuzzyLogEnd = true;
                scan.close();
                scan = null;
                return null;
            }
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    if (recordStartPosition + 4 == currentLogFileLength) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected proper log end on log file " + currentLogFileNumber);
                    } else {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected zapped log end on log file " + currentLogFileNumber + " end marker at " + recordStartPosition + " real end at " + currentLogFileLength);
                    }
                }
            }
            // have will have gone past stopAt if we want to stop here
            if (stopAt != LogCounter.INVALID_LOG_INSTANT && LogCounter.getLogFileNumber(stopAt) == currentLogFileNumber) {
                return null;
            }
            // 
            // we have a log end marker and we don't want to stop yet, switch
            // log file
            // 
            scan.close();
            // set this.currentLogFileNumber
            scan = logFactory.getLogFileAtBeginning(++currentLogFileNumber);
            if (// we have seen the last log file
            scan == null) {
                return null;
            }
            // scan is position just past the log header
            recordStartPosition = scan.getFilePointer();
            // Verify that the header of the new log file refers
            // to the end of the log record of the previous file
            // (Rest of header has been verified by getLogFileAtBeginning)
            scan.seek(LogToFile.LOG_FILE_HEADER_PREVIOUS_LOG_INSTANT_OFFSET);
            long previousLogInstant = scan.readLong();
            if (previousLogInstant != knownGoodLogEnd) {
                // detects an error in the other fields of the header.
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "log file " + currentLogFileNumber + ": previous log record: " + previousLogInstant + " known previous log record: " + knownGoodLogEnd);
                    }
                }
                return null;
            }
            scan.seek(recordStartPosition);
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "switched to next log file " + currentLogFileNumber);
                }
            }
            // Advance knownGoodLogEnd to make sure that if this
            // log file is the last log file and empty, logging
            // continues in this file, not the old file.
            knownGoodLogEnd = LogCounter.makeLogInstantAsLong(currentLogFileNumber, recordStartPosition);
            // set this.currentLogFileLength
            currentLogFileLength = scan.length();
            if (// empty log file
            recordStartPosition + 4 >= currentLogFileLength) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "log file " + currentLogFileNumber + " is empty");
                    }
                }
                return null;
            }
            // we have successfully switched to the next log file.
            // scan is positioned just before the next log record
            // see if this one is written in entirety
            recordLength = scan.readInt();
        }
        // we know the entire log record is on this log file
        // read the current log instant
        currentInstant = scan.readLong();
        /*check if the current instant happens is less than the last one. 
			 *This can happen if system crashed before writing the log instant
			 *completely. If the instant is partially written it will be less
			 *than the last one and should be the last record that was suppose to
			 *get written. Currentlt preallocated files are filled with zeros,
			 *this should hold good.
			 *Note: In case of Non-preallocated files earlier check with log
			 * file lengths should have found the end. But in prellocated files, log file
			 *length is not sufficiant to find the log end. This check 
			 *is must to find the end in preallocated log files. 
			 */
        if (currentInstant < knownGoodLogEnd) {
            fuzzyLogEnd = true;
            return null;
        }
        // sanity check it
        if (SanityManager.DEBUG) {
            if (LogCounter.getLogFileNumber(currentInstant) != currentLogFileNumber || LogCounter.getLogFilePosition(currentInstant) != recordStartPosition)
                SanityManager.THROWASSERT("Wrong LogInstant on log record " + LogCounter.toDebugString(currentInstant) + " version real position (" + currentLogFileNumber + "," + recordStartPosition + ")");
        }
        // nothing more can be read.  Else check scan limit
        if (stopAt != LogCounter.INVALID_LOG_INSTANT && currentInstant > stopAt) {
            currentInstant = LogCounter.INVALID_LOG_INSTANT;
            // we went past the stopAt
            return null;
        }
        // read in the log record
        byte[] data = input.getData();
        if (data.length < recordLength) {
            // make a new array of sufficient size and reset the arrary
            // in the input stream
            data = new byte[recordLength];
            input.setData(data);
        }
        if (logFactory.databaseEncrypted()) {
            scan.readFully(data, 0, recordLength);
            int len = logFactory.decrypt(data, 0, recordLength, data, 0);
            if (SanityManager.DEBUG)
                SanityManager.ASSERT(len == recordLength);
            input.setLimit(0, len);
        } else // no need to decrypt, only get the group and tid if we filter
        {
            if (groupmask == 0 && tranId == null) {
                // no filter, get the whole thing
                scan.readFully(data, 0, recordLength);
                input.setLimit(0, recordLength);
            } else {
                // Read only enough so that group and the tran id is in
                // the data buffer.  Group is stored as compressed int
                // and tran id is stored as who knows what.  read min
                // of peekAmount or recordLength
                readAmount = (recordLength > peekAmount) ? peekAmount : recordLength;
                // in the data buffer, we now have enough to peek
                scan.readFully(data, 0, readAmount);
                input.setLimit(0, readAmount);
            }
        }
        lr = (LogRecord) input.readObject();
        if (groupmask != 0 || tranId != null) {
            if (groupmask != 0 && (groupmask & lr.group()) == 0)
                // no match, throw this log record out
                candidate = false;
            if (candidate && tranId != null) {
                TransactionId tid = lr.getTransactionId();
                if (// nomatch
                !tid.equals(tranId))
                    // throw this log record out
                    candidate = false;
            }
            // decryption.
            if (candidate && !logFactory.databaseEncrypted()) {
                // read the rest of the log into the buffer
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(readAmount > 0);
                if (readAmount < recordLength) {
                    // Need to remember where we are because the log
                    // record may have read part of it off the input
                    // stream already and that position is lost when we
                    // set limit again.
                    int inputPosition = input.getPosition();
                    scan.readFully(data, readAmount, recordLength - readAmount);
                    input.setLimit(0, recordLength);
                    input.setPosition(inputPosition);
                }
            }
        }
        // place for skipped log record.
        if (!candidate)
            scan.seek(recordStartPosition - 4);
        int checkLength = scan.readInt();
        if (checkLength != recordLength && checkLength < recordLength) {
            // record was half written before the crash.
            if (checkLength < recordLength) {
                fuzzyLogEnd = true;
                return null;
            } else {
                if (SanityManager.DEBUG) {
                    throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_RECORD_CORRUPTED, checkLength, recordLength, currentInstant, currentLogFileNumber));
                }
            // In non debug case, do nothing , let's hope it's only
            // length part that is incorrect and system can proceed.
            }
        }
        // next record start position is right after this record
        recordStartPosition += recordLength + LogToFile.LOG_RECORD_OVERHEAD;
        knownGoodLogEnd = LogCounter.makeLogInstantAsLong(currentLogFileNumber, recordStartPosition);
        if (SanityManager.DEBUG) {
            if (recordStartPosition != scan.getFilePointer())
                SanityManager.THROWASSERT("calculated end " + recordStartPosition + " != real end " + scan.getFilePointer());
        } else {
            // seek to the start of the next log record
            scan.seek(recordStartPosition);
        }
        /**
         * if the current log record is a checksum log record then
         * using the information available in this record validate
         * that data in the log file by matching the checksum in
         * checksum log record and by recalculating the checksum for the
         * specified length of the data in the log file. cheksum values
         * should match unless the right was incomplete before the crash.
         */
        if (lr.isChecksum()) {
            // checksum log record should not be returned to the logger recovery redo
            // routines, it is just used to identify the incomplete log writes.
            candidate = false;
            Loggable op = lr.getLoggable();
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY) || SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
                    SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanned " + "Null" + " : " + op + " instant = " + LogCounter.toDebugString(currentInstant) + " logEnd = " + LogCounter.toDebugString(knownGoodLogEnd));
            }
            ChecksumOperation clop = (ChecksumOperation) op;
            int ckDataLength = clop.getDataLength();
            // resize the buffer to be size of checksum data length if required.
            if (data.length < ckDataLength) {
                // make a new array of sufficient size and reset the arrary
                // in the input stream
                data = new byte[ckDataLength];
                input.setData(data);
                input.setLimit(0, ckDataLength);
            }
            boolean validChecksum = false;
            // is valid to identify any incomplete out of order writes.
            if ((recordStartPosition + ckDataLength) <= currentLogFileLength) {
                // read the data into the buffer
                scan.readFully(data, 0, ckDataLength);
                // verify the checksum
                if (clop.isChecksumValid(data, 0, ckDataLength))
                    validChecksum = true;
            }
            if (!validChecksum) {
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "detected fuzzy log end on log file while doing checksum checks " + currentLogFileNumber + " checksum record start position " + recordStartPosition + " file length " + currentLogFileLength + " checksumDataLength=" + ckDataLength);
                    }
                }
                fuzzyLogEnd = true;
                scan.close();
                scan = null;
                return null;
            }
            // reset the scan to the start of the next log record
            scan.seek(recordStartPosition);
        }
    } while (candidate == false);
    return lr;
}
Also used : Loggable(org.apache.derby.iapi.store.raw.Loggable) LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Example 7 with TransactionId

use of org.apache.derby.iapi.store.raw.xact.TransactionId in project derby by apache.

the class FileLogger method logAndDo.

/*
	** Methods of Logger
	*/
/**
 *		Writes out a log record to the log stream, and call its doMe method to
 *		apply the change to the rawStore.
 *		<BR>Any optional data the doMe method need is first written to the log
 *		stream using operation.writeOptionalData, then whatever is written to
 *		the log stream is passed back to the operation for the doMe method.
 *
 *		<P>MT - there could be multiple threads running in the same raw
 *		transactions and they can be calling the same logger to log different
 *		log operations.  This whole method is synchronized to make sure log
 *		records are logged one at a time.
 *
 *		@param xact the transaction logging the change
 *		@param operation the log operation
 *		@return the instant in the log that can be used to identify the log
 *		record
 *
 *		@exception StandardException Derby Standard error policy
 */
public synchronized LogInstant logAndDo(RawTransaction xact, Loggable operation) throws StandardException {
    boolean isLogPrepared = false;
    boolean inUserCode = false;
    byte[] preparedLog;
    try {
        logOutputBuffer.reset();
        // always use the short Id, only the BeginXact log record contains
        // the XactId (long form)
        TransactionId transactionId = xact.getId();
        // write out the log header with the operation embedded
        // this is by definition not a compensation log record,
        // those are called thru the logAndUndo interface
        logRecord.setValue(transactionId, operation);
        inUserCode = true;
        logicalOut.writeObject(logRecord);
        inUserCode = false;
        int optionalDataLength = 0;
        int optionalDataOffset = 0;
        int completeLength = 0;
        ByteArray preparedLogArray = operation.getPreparedLog();
        if (preparedLogArray != null) {
            preparedLog = preparedLogArray.getArray();
            optionalDataLength = preparedLogArray.getLength();
            optionalDataOffset = preparedLogArray.getOffset();
            // There is a race condition if the operation is a begin tran in
            // that between the time the beginXact log record is written to
            // disk and the time the transaction object is updated in the
            // beginXact.doMe method, other log records may be written.
            // This will render the transaction table in an inconsistent state
            // since it may think a later transaction is the earliest
            // transaction or it may think that there is no active transactions
            // where there is a bunch of them sitting on the log.
            // 
            // Similarly, there is a race condition for endXact, i.e.,
            // 1) endXact is written to the log,
            // 2) checkpoint gets that (committed) transaction as the
            // firstUpdateTransaction
            // 3) the transaction calls postComplete, nulling out itself
            // 4) checkpoint tries to access a closed transaction object
            // 
            // The solution is to sync between the time a begin tran or end
            // tran log record is sent to the log stream and its doMe method is
            // called to update the transaction table and in memory state
            // 
            // We only need to serialized the begin and end Xact log records
            // because once a transaction has been started and in the
            // transaction table, its order and transaction state does not
            // change.
            // 
            // Use the logFactory as the sync object so that a checkpoint can
            // take its snap shot of the undoLWM before or after a transaction
            // is started, but not in the middle. (see LogToFile.checkpoint)
            // 
            // now set the input limit to be the optional data.
            // This limits amount of data availiable to logIn that doMe can
            // use
            logIn.setData(preparedLog);
            logIn.setPosition(optionalDataOffset);
            logIn.setLimit(optionalDataLength);
            if (SanityManager.DEBUG) {
                if ((optionalDataLength) != logIn.available())
                    SanityManager.THROWASSERT(" stream not set correctly " + optionalDataLength + " != " + logIn.available());
            }
        } else {
            preparedLog = null;
            optionalDataLength = 0;
        }
        logicalOut.writeInt(optionalDataLength);
        completeLength = logOutputBuffer.getPosition() + optionalDataLength;
        LogInstant logInstant = null;
        // in case of encryption, we need to pad
        int encryptedLength = 0;
        try {
            if (logFactory.databaseEncrypted()) {
                // we must pad the encryption data to be multiple of block
                // size, which is logFactory.getEncryptionBlockSize()
                encryptedLength = completeLength;
                if ((encryptedLength % logFactory.getEncryptionBlockSize()) != 0)
                    encryptedLength = encryptedLength + logFactory.getEncryptionBlockSize() - (encryptedLength % logFactory.getEncryptionBlockSize());
                if (encryptionBuffer == null || encryptionBuffer.length < encryptedLength)
                    encryptionBuffer = new byte[encryptedLength];
                System.arraycopy(logOutputBuffer.getByteArray(), 0, encryptionBuffer, 0, completeLength - optionalDataLength);
                if (optionalDataLength > 0)
                    System.arraycopy(preparedLog, optionalDataOffset, encryptionBuffer, completeLength - optionalDataLength, optionalDataLength);
                // do not bother to clear out the padding area
                int len = logFactory.encrypt(encryptionBuffer, 0, encryptedLength, encryptionBuffer, 0);
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(len == encryptedLength, "encrypted log buffer length != log buffer len");
            }
            if ((operation.group() & (Loggable.FIRST | Loggable.LAST)) != 0) {
                synchronized (logFactory) {
                    long instant = 0;
                    if (logFactory.databaseEncrypted()) {
                        // encryption has completely drained both the the
                        // logOuputBuffer array and the preparedLog array
                        instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0);
                    } else {
                        instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength);
                    }
                    logInstant = new LogCounter(instant);
                    operation.doMe(xact, logInstant, logIn);
                }
            } else {
                long instant = 0;
                if (logFactory.databaseEncrypted()) {
                    // encryption has completely drained both the the
                    // logOuputBuffer array and the preparedLog array
                    instant = logFactory.appendLogRecord(encryptionBuffer, 0, encryptedLength, null, -1, 0);
                } else {
                    instant = logFactory.appendLogRecord(logOutputBuffer.getByteArray(), 0, completeLength, preparedLog, optionalDataOffset, optionalDataLength);
                }
                logInstant = new LogCounter(instant);
                operation.doMe(xact, logInstant, logIn);
            }
        } catch (StandardException se) {
            throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, se, operation));
        } catch (IOException ioe) {
            throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_DO_ME_FAIL, ioe, operation));
        } finally {
            logIn.clearLimit();
        }
        if (SanityManager.DEBUG) {
            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                SanityManager.DEBUG(LogToFile.DBG_FLAG, "Write log record: tranId=" + transactionId.toString() + " instant: " + logInstant.toString() + " length: " + completeLength + "\n" + operation + "\n");
            }
        }
        return logInstant;
    } catch (IOException ioe) {
        // error writing to the log buffer
        if (inUserCode) {
            throw StandardException.newException(SQLState.LOG_WRITE_LOG_RECORD, ioe, operation);
        } else {
            throw StandardException.newException(SQLState.LOG_BUFFER_FULL, ioe, operation);
        }
    }
}
Also used : StandardException(org.apache.derby.shared.common.error.StandardException) LogInstant(org.apache.derby.iapi.store.raw.log.LogInstant) ByteArray(org.apache.derby.iapi.util.ByteArray) LogCounter(org.apache.derby.impl.store.raw.log.LogCounter) IOException(java.io.IOException) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Example 8 with TransactionId

use of org.apache.derby.iapi.store.raw.xact.TransactionId in project derby by apache.

the class FileLogger method redo.

/**
 *		Recovery Redo loop.
 *
 *		<P> The log stream is scanned from the beginning (or
 *		from the undo low water mark of a checkpoint) forward until the end.
 *		The purpose of the redo pass is to repeat history, i.e, to repeat
 *		exactly the same set of changes the rawStore went thru right before it
 *		stopped.   With each log record that is encountered in the redo pass:
 *		<OL>
 *		<LI>if it isFirst(), then the transaction factory is called upon to
 *		    create a new transaction object.
 *		<LI>if it needsRedo(), its doMe() is called (if it is a compensation
 *		    operation, then the undoable operation needs to be created first
 *            before the doMe is called).
 *		<LI>if it isComplete(), then the transaction object is closed.
 *		</OL>
 *
 *		<P> MT - caller provides synchronization
 *
 *		@param transFactory     - the transaction factory
 *		@param redoLWM          - if checkpoint seen, starting from this point
 *                                  on, apply redo if necessary
 *
 *		@return the log instant of the next log record (or the instant just
 *		after the last log record).  This is used to determine where the log
 *		truly ends
 *
 *		@exception StandardException Standard Derby error policy
 *		@exception IOException error reading log file
 *		@exception ClassNotFoundException log file corrupted
 *
 *		@see LogToFile#recover
 */
protected long redo(RawTransaction recoveryTransaction, TransactionFactory transFactory, StreamLogScan redoScan, long redoLWM, long ttabInstant) throws IOException, StandardException, ClassNotFoundException {
    // begin debug info
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            SanityManager.DEBUG(LogToFile.DBG_FLAG, "In recovery redo, redoLWM = " + redoLWM);
        }
    }
    int scanCount = 0;
    int redoCount = 0;
    int prepareCount = 0;
    int clrCount = 0;
    int btranCount = 0;
    int etranCount = 0;
    // end debug info
    TransactionId tranId = null;
    // the current log instant
    long instant = LogCounter.INVALID_LOG_INSTANT;
    // ////////////////////////////////////////////////////////////////////
    // During redo time, the byte array in the logOutputBuffer is not used.
    // Use it to read the log record - if it is not big enough, scan
    // will resize it.  We could create a brand new log input stream that
    // has nothing to do with logIn or logOutputBuffer but that seem like
    // a waste of memory.
    // ////////////////////////////////////////////////////////////////////
    logIn.setData(logOutputBuffer.getByteArray());
    // use this scan to reconstitute operation to be undone
    // when we see a CLR in the redo scan
    StreamLogScan undoScan = null;
    Loggable op = null;
    // we need to determine the log's true end
    long logEnd = 0;
    try {
        // scan the log forward in redo pass and go to the end
        LogRecord record;
        while ((record = redoScan.getNextRecord(logIn, null, 0)) != null) {
            scanCount++;
            long undoInstant = 0;
            // last known good instant
            instant = redoScan.getInstant();
            // last known good log end
            logEnd = redoScan.getLogRecordEnd();
            // NOTE NOTE
            if (SanityManager.DEBUG) {
                if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY) || SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                    if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY))
                        SanityManager.DEBUG_SET(LogToFile.DBG_FLAG);
                    op = record.getLoggable();
                    tranId = record.getTransactionId();
                    if (record.isCLR()) {
                        // !!!!!!! this moves the file pointer
                        undoInstant = logIn.readLong();
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanned " + tranId + " : " + op + " instant = " + LogCounter.toDebugString(instant) + " undoInstant : " + LogCounter.toDebugString(undoInstant));
                    } else {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "scanned " + tranId + " : " + op + " instant = " + LogCounter.toDebugString(instant) + " logEnd = " + LogCounter.toDebugString(logEnd) + " logIn at " + logIn.getPosition() + " available " + logIn.available());
                    }
                    // we only want to dump the log, don't touch it
                    if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY))
                        continue;
                }
            }
            // already been flushed by checkpoint
            if (redoLWM != LogCounter.INVALID_LOG_INSTANT && instant < redoLWM) {
                if (!(record.isFirst() || record.isComplete() || record.isPrepare())) {
                    continue;
                }
            }
            // get the transaction
            tranId = record.getTransactionId();
            // otherwise, make it known to the transaction factory
            if (!transFactory.findTransaction(tranId, recoveryTransaction)) {
                if (redoLWM != LogCounter.INVALID_LOG_INSTANT && instant < redoLWM && (record.isPrepare() || record.isComplete())) {
                    // What is happening here is that a transaction that
                    // started before the undoLWM has commited by the time
                    // the checkpoint undoLWM was taken.  Hence, we only
                    // see the tail end of its log record and its endXact
                    // record.
                    // 
                    // NOTE:
                    // Since we didn't see its beginXact, we cannot do the
                    // endXact's doMe either.  Also if the endXact, is
                    // actually just a prepare, we don't need to do
                    // anything as the transaction will commit or abort
                    // prior to point we are recovering to.
                    // If it is deemed necessary to do the endXact's doMe,
                    // then we should start the transaction right here.
                    // For now, just completely ignore this transaction
                    // 
                    etranCount++;
                    continue;
                }
                if ((ttabInstant == LogCounter.INVALID_LOG_INSTANT) && !record.isFirst()) {
                    throw StandardException.newException(SQLState.LOG_UNEXPECTED_RECOVERY_PROBLEM, MessageService.getTextMessage(MessageId.LOG_RECORD_NOT_FIRST, tranId));
                }
                if (SanityManager.DEBUG) {
                    // instant, error.
                    if (ttabInstant != LogCounter.INVALID_LOG_INSTANT) {
                        if (instant > ttabInstant && !record.isFirst()) {
                            SanityManager.THROWASSERT("log record is Not first but transaction " + "is not in transaction table (2) : " + tranId);
                        }
                    // If we dump the transaction table and the table
                    // does not have the transaction, and we see this
                    // beginXact before the ttab instant, we could have
                    // igored it because we "know" that we should see
                    // the endXact before the ttab instant also.
                    // Leave it in just in case.
                    }
                }
                btranCount++;
                // the long transaction ID is embedded in the beginXact log
                // record.  The short ID is stored in the log record.
                recoveryTransaction.setTransactionId(record.getLoggable(), tranId);
            } else {
                if ((ttabInstant == LogCounter.INVALID_LOG_INSTANT) && record.isFirst()) {
                    throw StandardException.newException(SQLState.LOG_UNEXPECTED_RECOVERY_PROBLEM, MessageService.getTextMessage(MessageId.LOG_RECORD_FIRST, tranId));
                }
                if (SanityManager.DEBUG) {
                    if (ttabInstant != LogCounter.INVALID_LOG_INSTANT && instant > ttabInstant && record.isFirst()) {
                        SanityManager.THROWASSERT("log record is first but transaction is " + "already in transaction table (3): " + tranId);
                    }
                    if (record.isPrepare())
                        prepareCount++;
                }
                // 
                if (record.isFirst()) {
                    btranCount++;
                    continue;
                }
            }
            op = record.getLoggable();
            if (SanityManager.DEBUG) {
                if (!record.isCLR()) {
                    if (logIn.available() < 4) {
                        SanityManager.THROWASSERT("not enough bytes read in : " + logIn.available() + " for " + op + " instant " + LogCounter.toDebugString(instant));
                    }
                }
            }
            if (SanityManager.DEBUG) {
                SanityManager.ASSERT(!recoveryTransaction.handlesPostTerminationWork(), "recovery transaction handles post termination work");
            }
            if (op.needsRedo(recoveryTransaction)) {
                redoCount++;
                if (record.isCLR()) {
                    clrCount++;
                    if (SanityManager.DEBUG)
                        SanityManager.ASSERT(op instanceof Compensation);
                    // this value may be set by sanity xxxx
                    if (undoInstant == 0)
                        undoInstant = logIn.readLong();
                    if (undoScan == null) {
                        undoScan = (StreamLogScan) logFactory.openForwardsScan(undoInstant, (LogInstant) null);
                    } else {
                        undoScan.resetPosition(new LogCounter(undoInstant));
                    }
                    // undoScan now positioned at the beginning of the log
                    // record was rolled back by this CLR.
                    // The scan is a forward one so getNextRecord will get
                    // the log record that needs to be rolled back.
                    // reuse the buffer in logIn and logIn since CLR
                    // has no optional data and has no use for them anymore
                    logIn.clearLimit();
                    LogRecord undoRecord = undoScan.getNextRecord(logIn, null, 0);
                    Undoable undoOp = undoRecord.getUndoable();
                    if (SanityManager.DEBUG) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "Redoing CLR: undoInstant = " + LogCounter.toDebugString(undoInstant) + " clrinstant = " + LogCounter.toDebugString(instant));
                        SanityManager.ASSERT(undoRecord.getTransactionId().equals(tranId));
                        SanityManager.ASSERT(undoOp != null);
                    }
                    ((Compensation) op).setUndoOp(undoOp);
                }
                if (SanityManager.DEBUG) {
                    if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
                        SanityManager.DEBUG(LogToFile.DBG_FLAG, "redoing " + op + " instant = " + LogCounter.toDebugString(instant));
                    }
                }
                int dataLength = logIn.readInt();
                logIn.setLimit(dataLength);
                // even though the log has already been written, we need to
                // tie the page to the log stream so that if redo failed
                // for some reasons, the log factory's corruption will stop
                // the corrupt page from flushing to disk.
                op.doMe(recoveryTransaction, new LogCounter(instant), logIn);
                op.releaseResource(recoveryTransaction);
                op = null;
            }
            // has written any transaction, so in this case, it is a noop.
            if (record.isComplete()) {
                etranCount++;
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(!recoveryTransaction.handlesPostTerminationWork(), "recovery xact handles post termination work");
                recoveryTransaction.commit();
            }
        }
        // while redoScan.getNextRecord() != null
        // If the scan ended in an empty file, update logEnd to reflect that
        // in order to avoid to continue logging to an older file
        long end = redoScan.getLogRecordEnd();
        if (end != LogCounter.INVALID_LOG_INSTANT && (LogCounter.getLogFileNumber(logEnd) < LogCounter.getLogFileNumber(end))) {
            logEnd = end;
        }
    } catch (StandardException se) {
        throw StandardException.newException(SQLState.LOG_REDO_FAILED, se, op);
    } finally {
        // close all the io streams
        redoScan.close();
        redoScan = null;
        if (undoScan != null) {
            undoScan.close();
            undoScan = null;
        }
        if (op != null)
            op.releaseResource(recoveryTransaction);
    }
    if (SanityManager.DEBUG) {
        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
            SanityManager.DEBUG(LogToFile.DBG_FLAG, "----------------------------------------------------\n" + "End of recovery redo\n" + "Scanned = " + scanCount + " log records" + ", redid = " + redoCount + " ( clr = " + clrCount + " )" + " begintran = " + btranCount + " endtran = " + etranCount + " preparetran = " + prepareCount + "\n log ends at " + LogCounter.toDebugString(logEnd) + "\n----------------------------------------------------\n");
        }
    }
    if (SanityManager.DEBUG) {
        // make sure logEnd and instant is consistent
        if (instant != LogCounter.INVALID_LOG_INSTANT) {
            SanityManager.ASSERT(LogCounter.getLogFileNumber(instant) < LogCounter.getLogFileNumber(logEnd) || (LogCounter.getLogFileNumber(instant) == LogCounter.getLogFileNumber(logEnd) && LogCounter.getLogFilePosition(instant) <= LogCounter.getLogFilePosition(logEnd)));
        } else {
            SanityManager.ASSERT(logEnd == LogCounter.INVALID_LOG_INSTANT);
        }
    }
    // logEnd is the last good log record position in the log
    return logEnd;
}
Also used : Undoable(org.apache.derby.iapi.store.raw.Undoable) StandardException(org.apache.derby.shared.common.error.StandardException) Loggable(org.apache.derby.iapi.store.raw.Loggable) LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) Compensation(org.apache.derby.iapi.store.raw.Compensation) LogCounter(org.apache.derby.impl.store.raw.log.LogCounter) StreamLogScan(org.apache.derby.impl.store.raw.log.StreamLogScan) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Example 9 with TransactionId

use of org.apache.derby.iapi.store.raw.xact.TransactionId in project derby by apache.

the class FlushedScan method getNextRecord.

/*
	** Methods of LogScan
	*/
/**
 *		Read a log record into the byte array provided.  Resize the input
 *		stream byte array if necessary.
 *
 *		@return the length of the data written into data, or -1 if the end of the
 *		scan has been reached.
 *
 *		@exception StandardException Standard Derby error policy
 */
public LogRecord getNextRecord(ArrayInputStream input, TransactionId tranId, int groupmask) throws StandardException {
    try {
        boolean candidate;
        int peekAmount = LogRecord.formatOverhead() + LogRecord.maxGroupStoredSize();
        if (tranId != null)
            peekAmount += LogRecord.maxTransactionIdStoredSize(tranId);
        // the number of bytes actually read
        int readAmount;
        LogRecord lr;
        do {
            if (!open || !positionToNextRecord())
                return null;
            int checkLength;
            // this log record is a candidate unless proven otherwise
            lr = null;
            candidate = true;
            readAmount = -1;
            currentInstant = scan.readLong();
            byte[] data = input.getData();
            if (data.length < nextRecordLength) {
                // make a new array of sufficient size and reset the arrary
                // in the input stream
                data = new byte[nextRecordLength];
                input.setData(data);
            }
            if (logFactory.databaseEncrypted()) {
                scan.readFully(data, 0, nextRecordLength);
                int len = logFactory.decrypt(data, 0, nextRecordLength, data, 0);
                if (SanityManager.DEBUG)
                    SanityManager.ASSERT(len == nextRecordLength);
                input.setLimit(0, len);
            } else // no need to decrypt, only get the group and tid if we filter
            {
                if (groupmask == 0 && tranId == null) {
                    // no filter, get the whole thing
                    scan.readFully(data, 0, nextRecordLength);
                    input.setLimit(0, nextRecordLength);
                } else {
                    // Read only enough so that group and the tran id is in
                    // the data buffer.  Group is stored as compressed int
                    // and tran id is stored as who knows what.  read min
                    // of peekAmount or nextRecordLength
                    readAmount = (nextRecordLength > peekAmount) ? peekAmount : nextRecordLength;
                    // in the data buffer, we now have enough to peek
                    scan.readFully(data, 0, readAmount);
                    input.setLimit(0, readAmount);
                }
            }
            lr = (LogRecord) input.readObject();
            if (groupmask != 0 || tranId != null) {
                if (groupmask != 0 && (groupmask & lr.group()) == 0)
                    // no match, throw this log record out
                    candidate = false;
                if (candidate && tranId != null) {
                    TransactionId tid = lr.getTransactionId();
                    if (// nomatch
                    !tid.equals(tranId))
                        // throw this log record out
                        candidate = false;
                }
                if (candidate && !logFactory.databaseEncrypted()) {
                    // read the rest of the log into the buffer
                    if (SanityManager.DEBUG)
                        SanityManager.ASSERT(readAmount > 0);
                    if (readAmount < nextRecordLength) {
                        // Need to remember where we are because the log
                        // record may have read part of it off the input
                        // stream already and that position is lost when we
                        // set limit again.
                        int inputPosition = input.getPosition();
                        scan.readFully(data, readAmount, nextRecordLength - readAmount);
                        input.setLimit(0, nextRecordLength);
                        input.setPosition(inputPosition);
                    }
                }
            }
            if (candidate || logFactory.databaseEncrypted()) {
                checkLength = scan.readInt();
                if (SanityManager.DEBUG) {
                    SanityManager.ASSERT(checkLength == nextRecordLength, "log currupted");
                }
            } else // chances are, we haven't read all of the log record, skip it
            {
                // the starting record position is in the currentInstant,
                // calculate the next record starting position using that
                // and the nextRecordLength
                long nextRecordStartPosition = LogCounter.getLogFilePosition(currentInstant) + nextRecordLength + LogToFile.LOG_RECORD_OVERHEAD;
                scan.seek(nextRecordStartPosition);
            }
        } while (candidate == false);
        return lr;
    } catch (ClassNotFoundException cnfe) {
        throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
    } catch (IOException ioe) {
        throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
    }
}
Also used : LogRecord(org.apache.derby.impl.store.raw.log.LogRecord) IOException(java.io.IOException) TransactionId(org.apache.derby.iapi.store.raw.xact.TransactionId)

Aggregations

TransactionId (org.apache.derby.iapi.store.raw.xact.TransactionId)9 LogRecord (org.apache.derby.impl.store.raw.log.LogRecord)4 IOException (java.io.IOException)3 GlobalTransactionId (org.apache.derby.iapi.store.raw.GlobalTransactionId)3 LogCounter (org.apache.derby.impl.store.raw.log.LogCounter)3 StandardException (org.apache.derby.shared.common.error.StandardException)3 Loggable (org.apache.derby.iapi.store.raw.Loggable)2 LogInstant (org.apache.derby.iapi.store.raw.log.LogInstant)2 Compensation (org.apache.derby.iapi.store.raw.Compensation)1 Undoable (org.apache.derby.iapi.store.raw.Undoable)1 ByteArray (org.apache.derby.iapi.util.ByteArray)1 StreamLogScan (org.apache.derby.impl.store.raw.log.StreamLogScan)1