use of org.apache.asterix.common.transactions.ILogReader in project asterixdb by apache.
the class RecoveryManager method rollbackTransaction.
@Override
public void rollbackTransaction(ITransactionContext txnContext) throws ACIDException {
int abortedJobId = txnContext.getJobId().getId();
// Obtain the first/last log record LSNs written by the Job
long firstLSN = txnContext.getFirstLSN();
/**
* The effect of any log record with LSN below minFirstLSN has already been written to disk and
* will not be rolled back. Therefore, we will set the first LSN of the job to the maximum of
* minFirstLSN and the job's first LSN.
*/
try {
long localMinFirstLSN = getLocalMinFirstLSN();
firstLSN = Math.max(firstLSN, localMinFirstLSN);
} catch (HyracksDataException e) {
throw new ACIDException(e);
}
long lastLSN = txnContext.getLastLSN();
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("rollbacking transaction log records from " + firstLSN + " to " + lastLSN);
}
// check if the transaction actually wrote some logs.
if (firstLSN == TransactionManagementConstants.LogManagerConstants.TERMINAL_LSN || firstLSN > lastLSN) {
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("no need to roll back as there were no operations by the job " + txnContext.getJobId());
}
return;
}
// While reading log records from firstLsn to lastLsn, collect uncommitted txn's Lsns
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("collecting loser transaction's LSNs from " + firstLSN + " to " + lastLSN);
}
Map<TxnId, List<Long>> jobLoserEntity2LSNsMap = new HashMap<>();
TxnId tempKeyTxnId = new TxnId(-1, -1, -1, null, -1, false);
int updateLogCount = 0;
int entityCommitLogCount = 0;
int logJobId = -1;
long currentLSN = -1;
TxnId loserEntity = null;
List<Long> undoLSNSet = null;
//get active partitions on this node
Set<Integer> activePartitions = localResourceRepository.getActivePartitions();
ILogReader logReader = logMgr.getLogReader(false);
try {
logReader.initializeScan(firstLSN);
ILogRecord logRecord = null;
while (currentLSN < lastLSN) {
logRecord = logReader.next();
if (logRecord == null) {
break;
} else {
currentLSN = logRecord.getLSN();
if (IS_DEBUG_MODE) {
LOGGER.info(logRecord.getLogRecordForDisplay());
}
}
logJobId = logRecord.getJobId();
if (logJobId != abortedJobId) {
continue;
}
tempKeyTxnId.setTxnId(logJobId, logRecord.getDatasetId(), logRecord.getPKHashValue(), logRecord.getPKValue(), logRecord.getPKValueSize());
switch(logRecord.getLogType()) {
case LogType.UPDATE:
if (activePartitions.contains(logRecord.getResourcePartition())) {
undoLSNSet = jobLoserEntity2LSNsMap.get(tempKeyTxnId);
if (undoLSNSet == null) {
loserEntity = new TxnId(logJobId, logRecord.getDatasetId(), logRecord.getPKHashValue(), logRecord.getPKValue(), logRecord.getPKValueSize(), true);
undoLSNSet = new LinkedList<>();
jobLoserEntity2LSNsMap.put(loserEntity, undoLSNSet);
}
undoLSNSet.add(currentLSN);
updateLogCount++;
if (IS_DEBUG_MODE) {
LOGGER.info(Thread.currentThread().getId() + "======> update[" + currentLSN + "]:" + tempKeyTxnId);
}
}
break;
case LogType.ENTITY_COMMIT:
if (activePartitions.contains(logRecord.getResourcePartition())) {
jobLoserEntity2LSNsMap.remove(tempKeyTxnId);
entityCommitLogCount++;
if (IS_DEBUG_MODE) {
LOGGER.info(Thread.currentThread().getId() + "======> entity_commit[" + currentLSN + "]" + tempKeyTxnId);
}
}
break;
case LogType.JOB_COMMIT:
throw new ACIDException("Unexpected LogType(" + logRecord.getLogType() + ") during abort.");
case LogType.ABORT:
case LogType.FLUSH:
case LogType.WAIT:
case LogType.MARKER:
//ignore
break;
default:
throw new ACIDException("Unsupported LogType: " + logRecord.getLogType());
}
}
if (currentLSN != lastLSN) {
throw new ACIDException("LastLSN mismatch: lastLSN(" + lastLSN + ") vs currentLSN(" + currentLSN + ") during abort( " + txnContext.getJobId() + ")");
}
//undo loserTxn's effect
LOGGER.log(Level.INFO, "undoing loser transaction's effect");
IDatasetLifecycleManager datasetLifecycleManager = txnSubsystem.getAsterixAppRuntimeContextProvider().getDatasetLifecycleManager();
//TODO sort loser entities by smallest LSN to undo in one pass.
Iterator<Entry<TxnId, List<Long>>> iter = jobLoserEntity2LSNsMap.entrySet().iterator();
int undoCount = 0;
while (iter.hasNext()) {
Map.Entry<TxnId, List<Long>> loserEntity2LSNsMap = iter.next();
undoLSNSet = loserEntity2LSNsMap.getValue();
// The step below is important since the upsert operations must be done in reverse order.
Collections.reverse(undoLSNSet);
for (long undoLSN : undoLSNSet) {
//here, all the log records are UPDATE type. So, we don't need to check the type again.
//read the corresponding log record to be undone.
logRecord = logReader.read(undoLSN);
if (logRecord == null) {
throw new ACIDException("IllegalState exception during abort( " + txnContext.getJobId() + ")");
}
if (IS_DEBUG_MODE) {
LOGGER.info(logRecord.getLogRecordForDisplay());
}
undo(logRecord, datasetLifecycleManager);
undoCount++;
}
}
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("undone loser transaction's effect");
LOGGER.info("[RecoveryManager's rollback log count] update/entityCommit/undo:" + updateLogCount + "/" + entityCommitLogCount + "/" + undoCount);
}
} finally {
logReader.close();
}
}
Aggregations