use of org.apache.derby.impl.store.raw.log.LogCounter in project derby by apache.
the class FileLogger method undo.
/**
* Undo a part of or the entire transaction. Begin rolling back the log
* record at undoStartAt and stopping at (inclusive) the log record at
* undoStopAt.
*
* <P>MT - Not needed. A transaction must be single threaded thru undo,
* each RawTransaction has its own logger, therefore no need to
* synchronize. The RawTransaction must handle synchronizing with
* multiple threads during rollback.
*
* @param t the transaction that needs to be rolled back
* @param undoId the transaction ID
* @param undoStopAt the last log record that should be rolled back
* @param undoStartAt the first log record that should be rolled back
*
* @exception StandardException Standard Derby error policy
*
* @see Logger#undo
*/
public void undo(RawTransaction t, TransactionId undoId, LogInstant undoStopAt, LogInstant undoStartAt) throws StandardException {
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
if (undoStartAt != null) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "\nUndo transaction: " + undoId.toString() + "start at " + undoStartAt.toString() + " stop at " + undoStopAt.toString());
} else {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "\nUndo transaction: " + undoId.toString() + "start at end of log stop at " + undoStopAt.toString());
}
}
}
// statistics
int clrgenerated = 0;
int clrskipped = 0;
int logrecordseen = 0;
StreamLogScan scanLog;
Compensation compensation = null;
Undoable lop = null;
// stream to read the log record - initial size 4096, scanLog needs
// to resize if the log record is larget than that.
ArrayInputStream rawInput = null;
try {
if (undoStartAt == null) {
// don't know where to start, rollback from end of log
scanLog = (StreamLogScan) logFactory.openBackwardsScan(undoStopAt);
} else {
if (undoStartAt.lessThan(undoStopAt)) {
// nothing to undo!
return;
}
long undoStartInstant = ((LogCounter) undoStartAt).getValueAsLong();
scanLog = (StreamLogScan) logFactory.openBackwardsScan(undoStartInstant, undoStopAt);
}
if (SanityManager.DEBUG)
SanityManager.ASSERT(scanLog != null, "cannot open log for undo");
rawInput = new ArrayInputStream(new byte[4096]);
LogRecord record;
while ((record = scanLog.getNextRecord(rawInput, undoId, 0)) != null) {
if (SanityManager.DEBUG) {
SanityManager.ASSERT(record.getTransactionId().equals(undoId), "getNextRecord return unqualified log record for undo");
}
logrecordseen++;
if (record.isCLR()) {
clrskipped++;
// the loggable is still in the input stream, get rid of it
record.skipLoggable();
// read the undoInstant
long undoInstant = rawInput.readLong();
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Skipping over CLRs, reset scan to " + LogCounter.toDebugString(undoInstant));
}
}
scanLog.resetPosition(new LogCounter(undoInstant));
continue;
}
lop = record.getUndoable();
if (lop != null) {
int optionalDataLength = rawInput.readInt();
int savePosition = rawInput.getPosition();
rawInput.setLimit(optionalDataLength);
compensation = lop.generateUndo(t, rawInput);
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Rollback log record at instant " + LogCounter.toDebugString(scanLog.getInstant()) + " : " + lop);
}
}
clrgenerated++;
if (compensation != null) {
// generateUndo may have read stuff off the
// stream, reset it for the undo operation.
rawInput.setLimit(savePosition, optionalDataLength);
// log the compensation op that rolls back the
// operation at this instant
t.logAndUndo(compensation, new LogCounter(scanLog.getInstant()), rawInput);
compensation.releaseResource(t);
compensation = null;
}
// if compensation is null, log operation is redo only
}
// if this is not an undoable operation, continue with next log
// record
}
} catch (ClassNotFoundException cnfe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_CORRUPTED, cnfe));
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_READ_LOG_FOR_UNDO, ioe));
} catch (StandardException se) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_UNDO_FAILED, se, undoId, lop, compensation));
} finally {
if (compensation != null) {
// errored out
compensation.releaseResource(t);
}
if (rawInput != null) {
try {
rawInput.close();
} catch (IOException ioe) {
throw logFactory.markCorrupt(StandardException.newException(SQLState.LOG_READ_LOG_FOR_UNDO, ioe, undoId));
}
}
}
if (SanityManager.DEBUG) {
if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
SanityManager.DEBUG(LogToFile.DBG_FLAG, "Finish undo" + ", clr generated = " + clrgenerated + ", clr skipped = " + clrskipped + ", record seen = " + logrecordseen + "\n");
}
}
}
Aggregations