use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class WorkerProgramRunnerTest method testWorkerDatasetWithMetrics.
@Test
public void testWorkerDatasetWithMetrics() throws Throwable {
final ApplicationWithPrograms app = AppFabricTestHelper.deployApplicationWithManager(AppWithWorker.class, TEMP_FOLDER_SUPPLIER);
ProgramController controller = startProgram(app, AppWithWorker.TableWriter.class);
// validate worker wrote the "initialize" and "run" rows
final TransactionExecutor executor = txExecutorFactory.createExecutor(datasetCache);
// wait at most 5 seconds until the "RUN" row is set (indicates the worker has started running)
Tasks.waitFor(AppWithWorker.RUN, new Callable<String>() {
@Override
public String call() throws Exception {
return executor.execute(new Callable<String>() {
@Override
public String call() throws Exception {
KeyValueTable kvTable = datasetCache.getDataset(AppWithWorker.DATASET);
return Bytes.toString(kvTable.read(AppWithWorker.RUN));
}
});
}
}, 5, TimeUnit.SECONDS);
stopProgram(controller);
txExecutorFactory.createExecutor(datasetCache.getTransactionAwares()).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
KeyValueTable kvTable = datasetCache.getDataset(AppWithWorker.DATASET);
Assert.assertEquals(AppWithWorker.RUN, Bytes.toString(kvTable.read(AppWithWorker.RUN)));
Assert.assertEquals(AppWithWorker.INITIALIZE, Bytes.toString(kvTable.read(AppWithWorker.INITIALIZE)));
Assert.assertEquals(AppWithWorker.STOP, Bytes.toString(kvTable.read(AppWithWorker.STOP)));
}
});
// validate that the table emitted metrics
Tasks.waitFor(3L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Collection<MetricTimeSeries> metrics = metricStore.query(new MetricDataQuery(0, System.currentTimeMillis() / 1000L, Integer.MAX_VALUE, "system." + Constants.Metrics.Name.Dataset.OP_COUNT, AggregationFunction.SUM, ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, DefaultId.NAMESPACE.getEntityName(), Constants.Metrics.Tag.APP, AppWithWorker.NAME, Constants.Metrics.Tag.WORKER, AppWithWorker.WORKER, Constants.Metrics.Tag.DATASET, AppWithWorker.DATASET), Collections.<String>emptyList()));
if (metrics.isEmpty()) {
return 0L;
}
Assert.assertEquals(1, metrics.size());
MetricTimeSeries ts = metrics.iterator().next();
Assert.assertEquals(1, ts.getTimeValues().size());
return ts.getTimeValues().get(0).getValue();
}
}, 5L, TimeUnit.SECONDS, 50L, TimeUnit.MILLISECONDS);
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.
@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
Assert.assertNotNull(store);
TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.addSchedules(ImmutableList.of(sched11, sched12, sched22));
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched11
Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers only sched12 and sched22
Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "one streamsize schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new StreamSizeTrigger(NS_ID.stream("stream"), 1), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.updateSchedule(sched11New);
store.updateSchedule(sched12New);
store.updateSchedule(sched22New);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched12New after update
Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers no schedule after update
Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class LineageStore method execute.
private void execute(TransactionExecutor.Procedure<LineageDataset> func) {
LineageDataset lineageDataset = newLineageDataset();
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(executorFactory, lineageDataset);
txExecutor.executeUnchecked(func, lineageDataset);
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class DefaultMetadataStore method execute.
private <T> T execute(TransactionExecutor.Function<MetadataDataset, T> func, MetadataScope scope) {
MetadataDataset metadataDataset = newMetadataDataset(scope);
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, metadataDataset);
return txExecutor.executeUnchecked(func, metadataDataset);
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class LineageDatasetTest method testOneRelation.
@Test
public void testOneRelation() throws Exception {
final LineageDataset lineageDataset = getLineageDataset("testOneRelation");
Assert.assertNotNull(lineageDataset);
TransactionExecutor txnl = dsFrameworkUtil.newInMemoryTransactionExecutor((TransactionAware) lineageDataset);
final RunId runId = RunIds.generate(10000);
final DatasetId datasetInstance = new DatasetId("default", "dataset1");
final ProgramId program = new ProgramId("default", "app1", ProgramType.FLOW, "flow1");
final FlowletId flowlet = program.flowlet("flowlet1");
final ProgramRunId run = program.run(runId.getId());
final long accessTimeMillis = System.currentTimeMillis();
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
lineageDataset.addAccess(run, datasetInstance, AccessType.READ, accessTimeMillis, flowlet);
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Relation expected = new Relation(datasetInstance, program, AccessType.READ, runId, ImmutableSet.of(flowlet));
Set<Relation> relations = lineageDataset.getRelations(datasetInstance, 0, 100000, Predicates.<Relation>alwaysTrue());
Assert.assertEquals(1, relations.size());
Assert.assertEquals(expected, relations.iterator().next());
Assert.assertEquals(toSet(program, datasetInstance), lineageDataset.getEntitiesForRun(run));
Assert.assertEquals(ImmutableList.of(accessTimeMillis), lineageDataset.getAccessTimesForRun(run));
}
});
}
Aggregations