use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class PartitionConsumerTest method testConsumeAfterDelete.
@Test
public void testConsumeAfterDelete() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
final TransactionAware txAwareDataset = (TransactionAware) dataset;
final Set<PartitionKey> partitionKeys1 = new HashSet<>();
for (int i = 0; i < 3; i++) {
partitionKeys1.add(generateUniqueKey());
}
// need to ensure that our consumerConfiguration is larger than the amount we consume initially, so that
// additional partitions (which will be deleted afterwards) are brought into the working set
ConsumerConfiguration consumerConfiguration = ConsumerConfiguration.builder().setMaxWorkingSetSize(100).build();
final PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor(), consumerConfiguration);
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (PartitionKey partitionKey : partitionKeys1) {
dataset.getPartitionOutput(partitionKey).addPartition();
}
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// and not consumed
for (int i = 0; i < 2; i++) {
dataset.getPartitionOutput(generateUniqueKey()).addPartition();
}
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// consume 3 of the 5 initial partitions
Assert.assertEquals(partitionKeys1, toKeys(partitionConsumer.consumePartitions(3).getPartitions()));
}
});
final Set<PartitionKey> partitionKeys2 = new HashSet<>();
for (int i = 0; i < 5; i++) {
partitionKeys2.add(generateUniqueKey());
}
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// drop all existing partitions (2 of which are not consumed)
for (PartitionDetail partitionDetail : dataset.getPartitions(PartitionFilter.ALWAYS_MATCH)) {
dataset.dropPartition(partitionDetail.getPartitionKey());
}
// add 5 new ones
for (PartitionKey partitionKey : partitionKeys2) {
dataset.getPartitionOutput(partitionKey).addPartition();
}
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// the consumed partition keys should correspond to partitionKeys2, and not include the dropped, but unconsumed
// partitions added before them
Assert.assertEquals(partitionKeys2, toKeys(partitionConsumer.consumePartitions().getPartitions()));
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// consuming the partitions again, without adding any new partitions returns an empty iterator
Assert.assertTrue(partitionConsumer.consumePartitions().getPartitions().isEmpty());
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// creating a new PartitionConsumer resets the consumption state. Consuming from it then returns an iterator
// with all the partition keys added after the deletions
ConcurrentPartitionConsumer partitionConsumer2 = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor());
Assert.assertEquals(partitionKeys2, toKeys(partitionConsumer2.consumePartitions().getPartitions()));
}
});
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class BasicMapReduceTaskContext method initializeTransactionAwares.
//---- following are methods to manage transaction lifecycle for the datasets. This needs to
//---- be refactored after [TEPHRA-99] and [CDAP-3893] are resolved.
/**
* Initializes the transaction-awares.
*/
private void initializeTransactionAwares() {
Iterable<TransactionAware> txAwares = Iterables.concat(getDatasetCache().getStaticTransactionAwares(), getDatasetCache().getExtraTransactionAwares());
for (TransactionAware txAware : txAwares) {
this.txAwares.add(txAware);
txAware.startTx(transaction);
}
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.
@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
Assert.assertNotNull(store);
TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.addSchedules(ImmutableList.of(sched11, sched12, sched22));
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched11
Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers only sched12 and sched22
Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "one streamsize schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new StreamSizeTrigger(NS_ID.stream("stream"), 1), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.updateSchedule(sched11New);
store.updateSchedule(sched12New);
store.updateSchedule(sched22New);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched12New after update
Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers no schedule after update
Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class AbstractTransactionContext method abort.
@Override
public void abort(@Nullable TransactionFailureException cause) throws TransactionFailureException {
if (currentTx == null) {
// might be called by some generic exception handler even though already aborted/finished - we allow that
return;
}
try {
boolean success = true;
for (TransactionAware txAware : getTransactionAwares()) {
try {
success = txAware.rollbackTx() && success;
} catch (Throwable e) {
if (cause == null) {
cause = new TransactionFailureException(String.format("Unable to roll back changes in transaction-aware '%s' for transaction %d. ", txAware.getTransactionAwareName(), currentTx.getTransactionId()), e);
} else {
cause.addSuppressed(e);
}
success = false;
}
}
try {
if (success) {
txClient.abort(currentTx);
} else {
txClient.invalidate(currentTx.getTransactionId());
}
} catch (Throwable t) {
if (cause == null) {
cause = new TransactionFailureException(String.format("Error while calling transaction service to %s transaction %d.", success ? "abort" : "invalidate", currentTx.getTransactionId()));
} else {
cause.addSuppressed(t);
}
}
if (cause != null) {
throw cause;
}
} finally {
currentTx = null;
cleanup();
}
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class AbstractTransactionContext method persist.
/**
* Calls {@link TransactionAware#commitTx()} on all {@link TransactionAware} to persist pending changes.
*/
private void persist() throws TransactionFailureException {
for (TransactionAware txAware : getTransactionAwares()) {
boolean success = false;
Throwable cause = null;
try {
success = txAware.commitTx();
} catch (Throwable e) {
cause = e;
}
if (!success) {
abort(new TransactionFailureException(String.format("Unable to persist changes of transaction-aware '%s' for transaction %d. ", txAware.getTransactionAwareName(), currentTx.getTransactionId()), cause));
}
}
}
Aggregations