use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class MapReduceProgramRunnerTest method fillTestInputData.
private void fillTestInputData(TransactionExecutorFactory txExecutorFactory, final TimeseriesTable table, final boolean withBadData) throws TransactionFailureException, InterruptedException {
TransactionExecutor executor = Transactions.createTransactionExecutor(txExecutorFactory, table);
executor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
fillTestInputData(table, withBadData);
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class AppMetadataStoreTest method testScanRunningInRangeWithBatch.
@Test
public void testScanRunningInRangeWithBatch() throws Exception {
DatasetId storeTable = NamespaceId.DEFAULT.dataset("testScanRunningInRange");
datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
Assert.assertNotNull(table);
final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
// Add some run records
TreeSet<Long> expected = new TreeSet<>();
for (int i = 0; i < 100; ++i) {
ApplicationId application = NamespaceId.DEFAULT.app("app" + i);
final ProgramId program = application.program(ProgramType.values()[i % ProgramType.values().length], "program" + i);
final RunId runId = RunIds.generate((i + 1) * 10000);
expected.add(RunIds.getTime(runId, TimeUnit.MILLISECONDS));
// Start the program and stop it
final int j = i;
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[j % ProgramRunStatus.values().length], null);
}
});
}
// Run full scan
runScan(txnl, metadataStoreDataset, expected, 0, Long.MAX_VALUE);
// In all below assertions, TreeSet and metadataStore both have start time inclusive and end time exclusive.
// Run the scan with time limit
runScan(txnl, metadataStoreDataset, expected.subSet(30 * 10000L, 90 * 10000L), TimeUnit.MILLISECONDS.toSeconds(30 * 10000), TimeUnit.MILLISECONDS.toSeconds(90 * 10000));
runScan(txnl, metadataStoreDataset, expected.subSet(90 * 10000L, 101 * 10000L), TimeUnit.MILLISECONDS.toSeconds(90 * 10000), TimeUnit.MILLISECONDS.toSeconds(101 * 10000));
// After range
runScan(txnl, metadataStoreDataset, expected.subSet(101 * 10000L, 200 * 10000L), TimeUnit.MILLISECONDS.toSeconds(101 * 10000), TimeUnit.MILLISECONDS.toSeconds(200 * 10000));
// Identical start and end time
runScan(txnl, metadataStoreDataset, expected.subSet(31 * 10000L, 31 * 10000L), TimeUnit.MILLISECONDS.toSeconds(31 * 10000), TimeUnit.MILLISECONDS.toSeconds(31 * 10000));
// One unit difference between start and end time
runScan(txnl, metadataStoreDataset, expected.subSet(30 * 10000L, 31 * 10000L), TimeUnit.MILLISECONDS.toSeconds(30 * 10000), TimeUnit.MILLISECONDS.toSeconds(31 * 10000));
// Before range
runScan(txnl, metadataStoreDataset, expected.subSet(1000L, 10000L), TimeUnit.MILLISECONDS.toSeconds(1000), TimeUnit.MILLISECONDS.toSeconds(10000));
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class AppMetadataStoreTest method testgetRuns.
@Test
public void testgetRuns() throws Exception {
DatasetId storeTable = NamespaceId.DEFAULT.dataset("testgetRuns");
datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
Assert.assertNotNull(table);
final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
// Add some run records
final Set<String> expected = new TreeSet<>();
final Set<String> expectedHalf = new TreeSet<>();
final Set<ProgramRunId> programRunIdSet = new HashSet<>();
final Set<ProgramRunId> programRunIdSetHalf = new HashSet<>();
for (int i = 0; i < 100; ++i) {
ApplicationId application = NamespaceId.DEFAULT.app("app");
final ProgramId program = application.program(ProgramType.FLOW, "program");
final RunId runId = RunIds.generate((i + 1) * 10000);
expected.add(runId.toString());
final int index = i;
// Add every other runId
if ((i % 2) == 0) {
expectedHalf.add(runId.toString());
}
ProgramRunId programRunId = new ProgramRunId(program.getNamespace(), program.getApplication(), program.getType(), program.getProgram(), runId.toString());
programRunIdSet.add(programRunId);
//Add every other programRunId
if ((i % 2) == 0) {
programRunIdSetHalf.add(programRunId);
}
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Start the program and stop it
metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[index % ProgramRunStatus.values().length], null);
}
});
}
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Map<ProgramRunId, RunRecordMeta> runMap = metadataStoreDataset.getRuns(programRunIdSet);
Set<String> actual = new TreeSet<>();
for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMap.entrySet()) {
actual.add(entry.getValue().getPid());
}
Assert.assertEquals(expected, actual);
Map<ProgramRunId, RunRecordMeta> runMapHalf = metadataStoreDataset.getRuns(programRunIdSetHalf);
Set<String> actualHalf = new TreeSet<>();
for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMapHalf.entrySet()) {
actualHalf.add(entry.getValue().getPid());
}
Assert.assertEquals(expectedHalf, actualHalf);
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class InMemoryStreamFileWriterFactory method create.
@Override
public FileWriter<StreamEvent> create(StreamConfig config, int generation) throws IOException {
final QueueProducer producer = queueClientFactory.createProducer(QueueName.fromStream(config.getStreamId()));
final List<TransactionAware> txAwares = Lists.newArrayList();
if (producer instanceof TransactionAware) {
txAwares.add((TransactionAware) producer);
}
final TransactionExecutor txExecutor = executorFactory.createExecutor(txAwares);
// Adapt the FileWriter interface into Queue2Producer
return new FileWriter<StreamEvent>() {
private final List<StreamEvent> events = Lists.newArrayList();
@Override
public void append(StreamEvent event) throws IOException {
events.add(event);
}
@Override
public void appendAll(Iterator<? extends StreamEvent> events) throws IOException {
Iterators.addAll(this.events, events);
}
@Override
public void close() throws IOException {
producer.close();
}
@Override
public void flush() throws IOException {
try {
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (StreamEvent event : events) {
producer.enqueue(new QueueEntry(STREAM_EVENT_CODEC.encodePayload(event)));
}
events.clear();
}
});
} catch (TransactionFailureException e) {
throw new IOException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException();
}
}
};
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class HBaseQueueTest method configTest.
@Test
public void configTest() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "configure");
queueAdmin.create(queueName);
final List<ConsumerGroupConfig> groupConfigs = ImmutableList.of(new ConsumerGroupConfig(1L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(2L, 2, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(3L, 3, DequeueStrategy.FIFO, null));
try (HBaseConsumerStateStore stateStore = ((HBaseQueueAdmin) queueAdmin).getConsumerStateStore(queueName)) {
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(executorFactory, stateStore);
// Intentionally set a row state for group 2, instance 0. It's for testing upgrade of config.
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.updateState(2L, 0, QueueEntryRow.getQueueEntryRowKey(queueName, 10L, 0));
}
});
// Set the group info
configureGroups(queueName, groupConfigs);
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (ConsumerGroupConfig groupConfig : groupConfigs) {
long groupId = groupConfig.getGroupId();
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
Assert.assertEquals(1, queueBarriers.size());
for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
HBaseConsumerState state = stateStore.getState(groupId, instanceId);
if (groupId == 2L && instanceId == 0) {
// For group 2L instance 0, the start row shouldn't be changed.
// End row should be the same as the first barrier
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), QueueEntryRow.getQueueEntryRowKey(queueName, 10L, 0)));
Assert.assertEquals(0, Bytes.compareTo(state.getNextBarrier(), queueBarriers.get(0).getStartRow()));
} else {
// For other group, they should have the start row the same as the first barrier info
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(0).getStartRow()));
}
}
}
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Check consumers are all processed up to the barrier boundary
for (long groupId = 1L; groupId <= 3L; groupId++) {
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
boolean allConsumed = stateStore.isAllConsumed(groupId, queueBarriers.get(0).getStartRow());
// For group 2, instance 0 is not consumed up to the boundary yet
Assert.assertTrue((groupId == 2L) != allConsumed);
if (groupId == 2L) {
// Mark group 2, instance 0 as completed the barrier.
stateStore.completed(groupId, 0);
}
}
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// After group 2, instance 0 completed the current barrier, all consumers in group 2 should be able to
// proceed
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
byte[] startRow = stateStore.getState(2L, 0).getStartRow();
Assert.assertEquals(0, Bytes.compareTo(startRow, queueBarriers.get(0).getStartRow()));
Assert.assertTrue(stateStore.isAllConsumed(2L, startRow));
}
});
// Add instance to group 2
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.configureInstances(2L, 3);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
Assert.assertEquals(2, queueBarriers.size());
// For existing instances, the start row shouldn't changed.
for (int instanceId = 0; instanceId < 2; instanceId++) {
HBaseConsumerState state = stateStore.getState(2L, instanceId);
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(0).getStartRow()));
Assert.assertEquals(0, Bytes.compareTo(state.getNextBarrier(), queueBarriers.get(1).getStartRow()));
// Complete the existing instance
stateStore.completed(2L, instanceId);
}
// For new instances, the start row should be the same as the new barrier
HBaseConsumerState state = stateStore.getState(2L, 2);
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(1).getStartRow()));
Assert.assertNull(state.getNextBarrier());
// All instances should be consumed up to the beginning of the last barrier info
Assert.assertTrue(stateStore.isAllConsumed(2L, queueBarriers.get(1).getStartRow()));
}
});
// Reduce instances of group 2 through group reconfiguration, remove group 1 and 3, add group 4.
configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(2L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(4L, 1, DequeueStrategy.FIFO, null)));
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// States and barrier info for removed groups should be gone
try {
// There should be no barrier info for group 1
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(1L);
Assert.assertTrue(queueBarriers.isEmpty());
stateStore.getState(1L, 0);
Assert.fail("Not expected to get state for group 1");
} catch (Exception e) {
// Expected
}
try {
// There should be no barrier info for group 3
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(3L);
Assert.assertTrue(queueBarriers.isEmpty());
stateStore.getState(3L, 0);
Assert.fail("Not expected to get state for group 3");
} catch (Exception e) {
// Expected
}
// For group 2, there should be two barrier infos,
// since all consumers passed the first barrier (groupSize = 2). Only the size = 3 and size = 1 left
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
Assert.assertEquals(2, queueBarriers.size());
// Make all consumers (3 of them before reconfigure) in group 2 consumes everything
for (int instanceId = 0; instanceId < 3; instanceId++) {
stateStore.completed(2L, instanceId);
}
// For the remaining consumer, it should start consuming from the latest barrier
HBaseConsumerState state = stateStore.getState(2L, 0);
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(1).getStartRow()));
Assert.assertNull(state.getNextBarrier());
// For removed instances, they should throw exception when retrieving their states
for (int i = 1; i < 3; i++) {
try {
stateStore.getState(2L, i);
Assert.fail("Not expected to get state for group 2, instance " + i);
} catch (Exception e) {
// Expected
}
}
}
});
} finally {
queueAdmin.dropAllInNamespace(NamespaceId.DEFAULT);
}
}
Aggregations