use of org.apache.tephra.TransactionConflictException in project cdap by caskdata.
the class MapReduceRuntimeService method destroy.
/**
* Calls the destroy method of {@link ProgramLifecycle}.
*/
private void destroy(final boolean succeeded, final String failureInfo) throws Exception {
// if any exception happens during output committing, we want the MapReduce to fail.
// for that to happen it is not sufficient to set the status to failed, we have to throw an exception,
// otherwise the shutdown completes successfully and the completed() callback is called.
// thus: remember the exception and throw it at the end.
final AtomicReference<Exception> failureCause = new AtomicReference<>();
// TODO (CDAP-1952): this should be done in the output committer, to make the M/R fail if addPartition fails
try {
context.execute(new TxRunnable() {
@Override
public void run(DatasetContext ctxt) throws Exception {
ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(job.getConfiguration().getClassLoader());
try {
for (Map.Entry<String, ProvidedOutput> output : context.getOutputs().entrySet()) {
commitOutput(succeeded, output.getKey(), output.getValue().getOutputFormatProvider(), failureCause);
if (succeeded && failureCause.get() != null) {
// mapreduce was successful but this output committer failed: call onFailure() for all committers
for (ProvidedOutput toFail : context.getOutputs().values()) {
commitOutput(false, toFail.getAlias(), toFail.getOutputFormatProvider(), failureCause);
}
break;
}
}
// if there was a failure, we must throw an exception to fail the transaction
// this will roll back all the outputs and also make sure that postCommit() is not called
// throwing the failure cause: it will be wrapped in a TxFailure and handled in the outer catch()
Exception cause = failureCause.get();
if (cause != null) {
failureCause.set(null);
throw cause;
}
} finally {
ClassLoaders.setContextClassLoader(oldClassLoader);
}
}
});
} catch (TransactionFailureException e) {
LOG.error("Transaction failure when committing dataset outputs", e);
if (failureCause.get() != null) {
failureCause.get().addSuppressed(e);
} else {
failureCause.set(e);
}
}
final boolean success = succeeded && failureCause.get() == null;
context.setState(getProgramState(success, failureInfo));
final TransactionControl txControl = mapReduce instanceof ProgramLifecycle ? Transactions.getTransactionControl(TransactionControl.IMPLICIT, MapReduce.class, mapReduce, "destroy") : TransactionControl.IMPLICIT;
try {
if (TransactionControl.IMPLICIT == txControl) {
context.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
doDestroy(success);
}
});
} else {
doDestroy(success);
}
} catch (Throwable e) {
if (e instanceof TransactionFailureException && e.getCause() != null && !(e instanceof TransactionConflictException)) {
e = e.getCause();
}
LOG.warn("Error executing the destroy method of the MapReduce program {}", context.getProgram().getName(), e);
}
// this is needed to make the run fail if there was an exception. See comment at beginning of this method
if (failureCause.get() != null) {
throw failureCause.get();
}
}
use of org.apache.tephra.TransactionConflictException in project cdap by caskdata.
the class TransactionContextTest method testCanCommitFalse.
@Test
public void testCanCommitFalse() throws TransactionFailureException, InterruptedException {
txClient.failCanCommitOnce = true;
TransactionContext context = newTransactionContext(ds1, ds2);
// start transaction
context.start();
// add a change to ds1 and ds2
ds1.addChange(A);
ds2.addChange(B);
// commit transaction should fail and cause rollback
try {
context.finish();
Assert.fail("commit failed - exception should be thrown");
} catch (TransactionConflictException e) {
Assert.assertNull(e.getCause());
}
// verify both are rolled back and tx is aborted
Assert.assertTrue(ds1.started);
Assert.assertTrue(ds2.started);
Assert.assertTrue(ds1.checked);
Assert.assertTrue(ds2.checked);
Assert.assertFalse(ds1.committed);
Assert.assertFalse(ds2.committed);
Assert.assertFalse(ds1.postCommitted);
Assert.assertFalse(ds2.postCommitted);
Assert.assertTrue(ds1.rolledBack);
Assert.assertTrue(ds2.rolledBack);
Assert.assertEquals(txClient.state, DummyTxClient.CommitState.Aborted);
}
use of org.apache.tephra.TransactionConflictException in project cdap by caskdata.
the class DatasetBasedTimeScheduleStore method upgrade.
/**
* Method to add version to row key in SchedulerStore.
*
* @throws InterruptedException
* @throws IOException
* @throws DatasetManagementException
*/
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
while (true) {
try {
initializeScheduleTable();
break;
} catch (Exception ex) {
// Expected if the cdap services are not up.
TimeUnit.SECONDS.sleep(10);
}
}
if (isUpgradeComplete()) {
LOG.info("{} is already upgraded.", NAME);
return;
}
final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
final AtomicInteger tries = new AtomicInteger(0);
LOG.info("Starting upgrade of {}.", NAME);
while (!isUpgradeComplete()) {
sleepTimeInSecs.set(60);
try {
factory.createExecutor(ImmutableList.of((TransactionAware) table)).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
upgradeJobs(table);
upgradeTriggers(table);
// Upgrade is complete. Mark that app version upgrade is complete in the table.
table.put(APP_VERSION_UPGRADE_KEY, COLUMN, Bytes.toBytes(ProjectInfo.getVersion().toString()));
}
});
} catch (TransactionFailureException e) {
if (e instanceof TransactionConflictException) {
LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
sleepTimeInSecs.set(10);
} else {
LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
sleepTimeInSecs.set(60);
}
}
if (tries.incrementAndGet() > 500) {
LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
return;
}
TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
}
LOG.info("Upgrade of {} is complete.", NAME);
}
use of org.apache.tephra.TransactionConflictException in project phoenix by apache.
the class MutationState method commit.
public void commit() throws SQLException {
Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> txMutations = Collections.emptyMap();
int retryCount = 0;
do {
boolean sendSuccessful = false;
boolean retryCommit = false;
SQLException sqlE = null;
try {
send();
txMutations = this.txMutations;
sendSuccessful = true;
} catch (SQLException e) {
sqlE = e;
} finally {
try {
if (txContext != null && isTransactionStarted()) {
TransactionFailureException txFailure = null;
boolean finishSuccessful = false;
try {
if (sendSuccessful) {
txContext.finish();
finishSuccessful = true;
}
} catch (TransactionFailureException e) {
if (logger.isInfoEnabled())
logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() + " with retry count of " + retryCount);
retryCommit = (e instanceof TransactionConflictException && retryCount < MAX_COMMIT_RETRIES);
txFailure = e;
SQLException nextE = TransactionUtil.getTransactionFailureException(e);
if (sqlE == null) {
sqlE = nextE;
} else {
sqlE.setNextException(nextE);
}
} finally {
// If send fails or finish fails, abort the tx
if (!finishSuccessful) {
try {
txContext.abort(txFailure);
if (logger.isInfoEnabled())
logger.info("Abort successful");
} catch (TransactionFailureException e) {
if (logger.isInfoEnabled())
logger.info("Abort failed with " + e);
SQLException nextE = TransactionUtil.getTransactionFailureException(e);
if (sqlE == null) {
sqlE = nextE;
} else {
sqlE.setNextException(nextE);
}
}
}
}
}
} finally {
try {
resetState();
} finally {
if (retryCommit) {
startTransaction();
// Add back read fences
Set<TableRef> txTableRefs = txMutations.keySet();
for (TableRef tableRef : txTableRefs) {
PTable dataTable = tableRef.getTable();
addDMLFence(dataTable);
}
try {
// Only retry if an index was added
retryCommit = shouldResubmitTransaction(txTableRefs);
} catch (SQLException e) {
retryCommit = false;
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
}
}
if (sqlE != null && !retryCommit) {
throw sqlE;
}
}
}
}
// Retry commit once if conflict occurred and index was added
if (!retryCommit) {
break;
}
retryCount++;
mutations.putAll(txMutations);
} while (true);
}
use of org.apache.tephra.TransactionConflictException in project cdap by caskdata.
the class TransactionContextTest method testCommitFalse.
@Test
public void testCommitFalse() throws TransactionFailureException, InterruptedException {
txClient.failCommits = 1;
TransactionContext context = newTransactionContext(ds1, ds2);
// start transaction
context.start();
// add a change to ds1 and ds2
ds1.addChange(A);
ds2.addChange(B);
// commit transaction should fail and cause rollback
try {
context.finish();
Assert.fail("commit failed - exception should be thrown");
} catch (TransactionConflictException e) {
Assert.assertNull(e.getCause());
}
// verify both are rolled back and tx is aborted
Assert.assertTrue(ds1.started);
Assert.assertTrue(ds2.started);
Assert.assertTrue(ds1.checked);
Assert.assertTrue(ds2.checked);
Assert.assertTrue(ds1.committed);
Assert.assertTrue(ds2.committed);
Assert.assertFalse(ds1.postCommitted);
Assert.assertFalse(ds2.postCommitted);
Assert.assertTrue(ds1.rolledBack);
Assert.assertTrue(ds2.rolledBack);
Assert.assertEquals(txClient.state, DummyTxClient.CommitState.Aborted);
}
Aggregations