use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class AppMetadataStoreTest method testScanRunningInRangeWithBatch.
@Test
public void testScanRunningInRangeWithBatch() throws Exception {
DatasetId storeTable = NamespaceId.DEFAULT.dataset("testScanRunningInRange");
datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
Assert.assertNotNull(table);
final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
// Add some run records
TreeSet<Long> expected = new TreeSet<>();
for (int i = 0; i < 100; ++i) {
ApplicationId application = NamespaceId.DEFAULT.app("app" + i);
final ProgramId program = application.program(ProgramType.values()[i % ProgramType.values().length], "program" + i);
final RunId runId = RunIds.generate((i + 1) * 10000);
expected.add(RunIds.getTime(runId, TimeUnit.MILLISECONDS));
// Start the program and stop it
final int j = i;
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[j % ProgramRunStatus.values().length], null);
}
});
}
// Run full scan
runScan(txnl, metadataStoreDataset, expected, 0, Long.MAX_VALUE);
// In all below assertions, TreeSet and metadataStore both have start time inclusive and end time exclusive.
// Run the scan with time limit
runScan(txnl, metadataStoreDataset, expected.subSet(30 * 10000L, 90 * 10000L), TimeUnit.MILLISECONDS.toSeconds(30 * 10000), TimeUnit.MILLISECONDS.toSeconds(90 * 10000));
runScan(txnl, metadataStoreDataset, expected.subSet(90 * 10000L, 101 * 10000L), TimeUnit.MILLISECONDS.toSeconds(90 * 10000), TimeUnit.MILLISECONDS.toSeconds(101 * 10000));
// After range
runScan(txnl, metadataStoreDataset, expected.subSet(101 * 10000L, 200 * 10000L), TimeUnit.MILLISECONDS.toSeconds(101 * 10000), TimeUnit.MILLISECONDS.toSeconds(200 * 10000));
// Identical start and end time
runScan(txnl, metadataStoreDataset, expected.subSet(31 * 10000L, 31 * 10000L), TimeUnit.MILLISECONDS.toSeconds(31 * 10000), TimeUnit.MILLISECONDS.toSeconds(31 * 10000));
// One unit difference between start and end time
runScan(txnl, metadataStoreDataset, expected.subSet(30 * 10000L, 31 * 10000L), TimeUnit.MILLISECONDS.toSeconds(30 * 10000), TimeUnit.MILLISECONDS.toSeconds(31 * 10000));
// Before range
runScan(txnl, metadataStoreDataset, expected.subSet(1000L, 10000L), TimeUnit.MILLISECONDS.toSeconds(1000), TimeUnit.MILLISECONDS.toSeconds(10000));
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class AppMetadataStoreTest method testgetRuns.
@Test
public void testgetRuns() throws Exception {
DatasetId storeTable = NamespaceId.DEFAULT.dataset("testgetRuns");
datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
Assert.assertNotNull(table);
final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
// Add some run records
final Set<String> expected = new TreeSet<>();
final Set<String> expectedHalf = new TreeSet<>();
final Set<ProgramRunId> programRunIdSet = new HashSet<>();
final Set<ProgramRunId> programRunIdSetHalf = new HashSet<>();
for (int i = 0; i < 100; ++i) {
ApplicationId application = NamespaceId.DEFAULT.app("app");
final ProgramId program = application.program(ProgramType.FLOW, "program");
final RunId runId = RunIds.generate((i + 1) * 10000);
expected.add(runId.toString());
final int index = i;
// Add every other runId
if ((i % 2) == 0) {
expectedHalf.add(runId.toString());
}
ProgramRunId programRunId = new ProgramRunId(program.getNamespace(), program.getApplication(), program.getType(), program.getProgram(), runId.toString());
programRunIdSet.add(programRunId);
//Add every other programRunId
if ((i % 2) == 0) {
programRunIdSetHalf.add(programRunId);
}
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Start the program and stop it
metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[index % ProgramRunStatus.values().length], null);
}
});
}
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Map<ProgramRunId, RunRecordMeta> runMap = metadataStoreDataset.getRuns(programRunIdSet);
Set<String> actual = new TreeSet<>();
for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMap.entrySet()) {
actual.add(entry.getValue().getPid());
}
Assert.assertEquals(expected, actual);
Map<ProgramRunId, RunRecordMeta> runMapHalf = metadataStoreDataset.getRuns(programRunIdSetHalf);
Set<String> actualHalf = new TreeSet<>();
for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMapHalf.entrySet()) {
actualHalf.add(entry.getValue().getPid());
}
Assert.assertEquals(expectedHalf, actualHalf);
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class InMemoryStreamFileWriterFactory method create.
@Override
public FileWriter<StreamEvent> create(StreamConfig config, int generation) throws IOException {
final QueueProducer producer = queueClientFactory.createProducer(QueueName.fromStream(config.getStreamId()));
final List<TransactionAware> txAwares = Lists.newArrayList();
if (producer instanceof TransactionAware) {
txAwares.add((TransactionAware) producer);
}
final TransactionExecutor txExecutor = executorFactory.createExecutor(txAwares);
// Adapt the FileWriter interface into Queue2Producer
return new FileWriter<StreamEvent>() {
private final List<StreamEvent> events = Lists.newArrayList();
@Override
public void append(StreamEvent event) throws IOException {
events.add(event);
}
@Override
public void appendAll(Iterator<? extends StreamEvent> events) throws IOException {
Iterators.addAll(this.events, events);
}
@Override
public void close() throws IOException {
producer.close();
}
@Override
public void flush() throws IOException {
try {
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (StreamEvent event : events) {
producer.enqueue(new QueueEntry(STREAM_EVENT_CODEC.encodePayload(event)));
}
events.clear();
}
});
} catch (TransactionFailureException e) {
throw new IOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException();
}
}
};
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class LineageStore method execute.
private <T> T execute(TransactionExecutor.Function<LineageDataset, T> func) {
LineageDataset lineageDataset = newLineageDataset();
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(executorFactory, lineageDataset);
return txExecutor.executeUnchecked(func, lineageDataset);
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class DefaultMetadataStore method execute.
private void execute(TransactionExecutor.Procedure<MetadataDataset> func, MetadataScope scope) {
MetadataDataset metadataDataset = newMetadataDataset(scope);
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, metadataDataset);
txExecutor.executeUnchecked(func, metadataDataset);
}
Aggregations