use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.
the class FileLogger method undo.
/**
* Undo a part of or the entire transaction. Begin rolling back the log
* record at undoStartAt and stopping at (inclusive) the log record at
* undoStopAt.
*
* <P>MT - Not needed. A transaction must be single threaded thru undo,
* each RawTransaction has its own logger, therefore no need to
* synchronize. The RawTransaction must handle synchronizing with
* multiple threads during rollback.
*
* @param t the transaction that needs to be rolled back
* @param undoId the transaction ID
* @param undoStopAt the last log record that should be rolled back
* @param undoStartAt the first log record that should be rolled back
*
* @exception StandardException Standard Derby error policy
*
* @see Logger#undo
*/
public void undo(RawTransaction t, TransactionId undoId, LogInstant undoStopAt, LogInstant undoStartAt) throws StandardException {
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
if (undoStartAt != null) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "\nUndo transaction: " + undoId.toString() + "start at " + undoStartAt.toString() + " stop at " + undoStopAt.toString());
} else {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "\nUndo transaction: " + undoId.toString() + "start at end of log stop at " + undoStopAt.toString());
}
}
}
// statistics
int clrgenerated = 0;
int clrskipped = 0;
int logrecordseen = 0;
StreamLogScan scanLog;
Compensation compensation = null;
Undoable lop = null;
// stream to read the log record - initial size 4096, scanLog needs
// to resize if the log record is larget than that.
ArrayInputStream rawInput = null;
try {
if (undoStartAt == null) {
// don't know where to start, rollback from end of log
scanLog = (StreamLogScan) logFactory.openBackwardsScan(undoStopAt);
} else {
if (undoStartAt.lessThan(undoStopAt)) {
// nothing to undo!
return;
}
long undoStartInstant = ((LogCounter) undoStartAt).getValueAsLong();
scanLog = (StreamLogScan) logFactory.openBackwardsScan(undoStartInstant, undoStopAt);
}
if (SanityManager.DEBUG)
SanityManager.ASSERT(scanLog != null, "cannot open log for undo");
rawInput = new ArrayInputStream(new byte[4096]);
LogRecord record;
while ((record = scanLog.getNextRecord(rawInput, undoId, 0)) != null) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(record.getTransactionId().equals(undoId), "getNextRecord return unqualified log record for undo");
}
logrecordseen++;
if (record.isCLR()) {
clrskipped++;
// the loggable is still in the input stream, get rid of it
record.skipLoggable();
// read the undoInstant
long undoInstant = rawInput.readLong();
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Skipping over CLRs, reset scan to " + LogCounter.toDebugString(undoInstant));
}
}
scanLog.resetPosition(new LogCounter(undoInstant));
continue;
}
lop = record.getUndoable();
if (lop != null) {
int optionalDataLength = rawInput.readInt();
int savePosition = rawInput.getPosition();
rawInput.setLimit(optionalDataLength);
compensation = lop.generateUndo(t, rawInput);
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Rollback log record at instant " + LogCounter.toDebugString(scanLog.getInstant()) + " : " + lop);
}
}
clrgenerated++;
if (compensation != null) {
// generateUndo may have read stuff off the
// stream, reset it for the undo operation.
rawInput.setLimit(savePosition, optionalDataLength);
// log the compensation op that rolls back the
// operation at this instant
t.logAndUndo(compensation, new LogCounter(scanLog.getInstant()), rawInput);
compensation.releaseResource(t);
compensation = null;
}
// if compensation is null, log operation is redo only
}
// if this is not an undoable operation, continue with next log
// record
}
} catch (ClassNotFoundException cnfe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_READ_LOG_FOR_UNDO, ioe));
} catch (StandardException se) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_UNDO_FAILED, se, undoId, lop, compensation));
} finally {
if (compensation != null) {
// errored out
compensation.releaseResource(t);
}
if (rawInput != null) {
try {
rawInput.close();
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_READ_LOG_FOR_UNDO, ioe, undoId));
}
}
}
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Finish undo" + ", clr generated = " + clrgenerated + ", clr skipped = " + clrskipped + ", record seen = " + logrecordseen + "\n");
}
}
}
use of org.apache.derby.impl.store.raw.log.LogRecord in project derby by apache.
the class FlushedScan method getNextRecord.
/*
** Methods of LogScan
*/
/**
* Read a log record into the byte array provided. Resize the input
* stream byte array if necessary.
*
* @return the length of the data written into data, or -1 if the end of the
* scan has been reached.
*
* @exception StandardException Standard Derby error policy
*/
public LogRecord getNextRecord(ArrayInputStream input, TransactionId tranId, int groupmask) throws StandardException {
try {
boolean candidate;
int peekAmount = LogRecord.formatOverhead() + LogRecord.maxGroupStoredSize();
if (tranId != null)
peekAmount += LogRecord.maxTransactionIdStoredSize(tranId);
// the number of bytes actually read
int readAmount;
LogRecord lr;
do {
if (!open || !positionToNextRecord())
return null;
int checkLength;
// this log record is a candidate unless proven otherwise
lr = null;
candidate = true;
readAmount = -1;
currentInstant = scan.readLong();
byte[] data = input.getData();
if (data.length < nextRecordLength) {
// make a new array of sufficient size and reset the arrary
// in the input stream
data = new byte[nextRecordLength];
input.setData(data);
}
if (logFactory.databaseEncrypted()) {
scan.readFully(data, 0, nextRecordLength);
int len = logFactory.decrypt(data, 0, nextRecordLength, data, 0);
if (SanityManager.DEBUG)
SanityManager.ASSERT(len == nextRecordLength);
input.setLimit(0, len);
} else // no need to decrypt, only get the group and tid if we filter
{
if (groupmask == 0 && tranId == null) {
// no filter, get the whole thing
scan.readFully(data, 0, nextRecordLength);
input.setLimit(0, nextRecordLength);
} else {
// Read only enough so that group and the tran id is in
// the data buffer. Group is stored as compressed int
// and tran id is stored as who knows what. read min
// of peekAmount or nextRecordLength
readAmount = (nextRecordLength > peekAmount) ? peekAmount : nextRecordLength;
// in the data buffer, we now have enough to peek
scan.readFully(data, 0, readAmount);
input.setLimit(0, readAmount);
}
}
lr = (LogRecord) input.readObject();
if (groupmask != 0 || tranId != null) {
if (groupmask != 0 && (groupmask & lr.group()) == 0)
// no match, throw this log record out
candidate = false;
if (candidate && tranId != null) {
TransactionId tid = lr.getTransactionId();
if (// nomatch
!tid.equals(tranId))
// throw this log record out
candidate = false;
}
if (candidate && !logFactory.databaseEncrypted()) {
// read the rest of the log into the buffer
if (SanityManager.DEBUG)
SanityManager.ASSERT(readAmount > 0);
if (readAmount < nextRecordLength) {
// Need to remember where we are because the log
// record may have read part of it off the input
// stream already and that position is lost when we
// set limit again.
int inputPosition = input.getPosition();
scan.readFully(data, readAmount, nextRecordLength - readAmount);
input.setLimit(0, nextRecordLength);
input.setPosition(inputPosition);
}
}
}
if (candidate || logFactory.databaseEncrypted()) {
checkLength = scan.readInt();
if (SanityManager.DEBUG) {
SanityManager.ASSERT(checkLength == nextRecordLength, "log currupted");
}
} else // chances are, we haven't read all of the log record, skip it
{
// the starting record position is in the currentInstant,
// calculate the next record starting position using that
// and the nextRecordLength
long nextRecordStartPosition = LogCounter.getLogFilePosition(currentInstant) + nextRecordLength + LogToFile.LOG_RECORD_OVERHEAD;
scan.seek(nextRecordStartPosition);
}
} while (candidate == false);
return lr;
} catch (ClassNotFoundException cnfe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_IO_ERROR, ioe));
}
}
Aggregations