use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class HBaseQueueTest method configTest.
@Test
public void configTest() throws Exception {
final QueueName queueName = QueueName.fromFlowlet(NamespaceId.DEFAULT.getEntityName(), "app", "flow", "flowlet", "configure");
queueAdmin.create(queueName);
final List<ConsumerGroupConfig> groupConfigs = ImmutableList.of(new ConsumerGroupConfig(1L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(2L, 2, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(3L, 3, DequeueStrategy.FIFO, null));
try (HBaseConsumerStateStore stateStore = ((HBaseQueueAdmin) queueAdmin).getConsumerStateStore(queueName)) {
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(executorFactory, stateStore);
// Intentionally set a row state for group 2, instance 0. It's for testing upgrade of config.
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.updateState(2L, 0, QueueEntryRow.getQueueEntryRowKey(queueName, 10L, 0));
}
});
// Set the group info
configureGroups(queueName, groupConfigs);
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (ConsumerGroupConfig groupConfig : groupConfigs) {
long groupId = groupConfig.getGroupId();
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
Assert.assertEquals(1, queueBarriers.size());
for (int instanceId = 0; instanceId < groupConfig.getGroupSize(); instanceId++) {
HBaseConsumerState state = stateStore.getState(groupId, instanceId);
if (groupId == 2L && instanceId == 0) {
// For group 2L instance 0, the start row shouldn't be changed.
// End row should be the same as the first barrier
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), QueueEntryRow.getQueueEntryRowKey(queueName, 10L, 0)));
Assert.assertEquals(0, Bytes.compareTo(state.getNextBarrier(), queueBarriers.get(0).getStartRow()));
} else {
// For other group, they should have the start row the same as the first barrier info
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(0).getStartRow()));
}
}
}
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Check consumers are all processed up to the barrier boundary
for (long groupId = 1L; groupId <= 3L; groupId++) {
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(groupId);
boolean allConsumed = stateStore.isAllConsumed(groupId, queueBarriers.get(0).getStartRow());
// For group 2, instance 0 is not consumed up to the boundary yet
Assert.assertTrue((groupId == 2L) != allConsumed);
if (groupId == 2L) {
// Mark group 2, instance 0 as completed the barrier.
stateStore.completed(groupId, 0);
}
}
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// After group 2, instance 0 completed the current barrier, all consumers in group 2 should be able to
// proceed
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
byte[] startRow = stateStore.getState(2L, 0).getStartRow();
Assert.assertEquals(0, Bytes.compareTo(startRow, queueBarriers.get(0).getStartRow()));
Assert.assertTrue(stateStore.isAllConsumed(2L, startRow));
}
});
// Add instance to group 2
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
stateStore.configureInstances(2L, 3);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
Assert.assertEquals(2, queueBarriers.size());
// For existing instances, the start row shouldn't changed.
for (int instanceId = 0; instanceId < 2; instanceId++) {
HBaseConsumerState state = stateStore.getState(2L, instanceId);
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(0).getStartRow()));
Assert.assertEquals(0, Bytes.compareTo(state.getNextBarrier(), queueBarriers.get(1).getStartRow()));
// Complete the existing instance
stateStore.completed(2L, instanceId);
}
// For new instances, the start row should be the same as the new barrier
HBaseConsumerState state = stateStore.getState(2L, 2);
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(1).getStartRow()));
Assert.assertNull(state.getNextBarrier());
// All instances should be consumed up to the beginning of the last barrier info
Assert.assertTrue(stateStore.isAllConsumed(2L, queueBarriers.get(1).getStartRow()));
}
});
// Reduce instances of group 2 through group reconfiguration, remove group 1 and 3, add group 4.
configureGroups(queueName, ImmutableList.of(new ConsumerGroupConfig(2L, 1, DequeueStrategy.FIFO, null), new ConsumerGroupConfig(4L, 1, DequeueStrategy.FIFO, null)));
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// States and barrier info for removed groups should be gone
try {
// There should be no barrier info for group 1
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(1L);
Assert.assertTrue(queueBarriers.isEmpty());
stateStore.getState(1L, 0);
Assert.fail("Not expected to get state for group 1");
} catch (Exception e) {
// Expected
}
try {
// There should be no barrier info for group 3
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(3L);
Assert.assertTrue(queueBarriers.isEmpty());
stateStore.getState(3L, 0);
Assert.fail("Not expected to get state for group 3");
} catch (Exception e) {
// Expected
}
// For group 2, there should be two barrier infos,
// since all consumers passed the first barrier (groupSize = 2). Only the size = 3 and size = 1 left
List<QueueBarrier> queueBarriers = stateStore.getAllBarriers(2L);
Assert.assertEquals(2, queueBarriers.size());
// Make all consumers (3 of them before reconfigure) in group 2 consumes everything
for (int instanceId = 0; instanceId < 3; instanceId++) {
stateStore.completed(2L, instanceId);
}
// For the remaining consumer, it should start consuming from the latest barrier
HBaseConsumerState state = stateStore.getState(2L, 0);
Assert.assertEquals(0, Bytes.compareTo(state.getStartRow(), queueBarriers.get(1).getStartRow()));
Assert.assertNull(state.getNextBarrier());
// For removed instances, they should throw exception when retrieving their states
for (int i = 1; i < 3; i++) {
try {
stateStore.getState(2L, i);
Assert.fail("Not expected to get state for group 2, instance " + i);
} catch (Exception e) {
// Expected
}
}
}
});
} finally {
queueAdmin.dropAllInNamespace(NamespaceId.DEFAULT);
}
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class UsageDatasetTest method testAllMappings.
@Test
public void testAllMappings() throws Exception {
final UsageDataset usageDataset = getUsageDataset("testAllMappings");
TransactionExecutor txnl = dsFrameworkUtil.newInMemoryTransactionExecutor((TransactionAware) usageDataset);
// Add mappings
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
usageDataset.register(flow11, datasetInstance1);
usageDataset.register(service21, datasetInstance3);
usageDataset.register(flow12, stream1);
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Verify app mappings
Assert.assertEquals(ImmutableSet.of(datasetInstance1), usageDataset.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance3), usageDataset.getDatasets(service21));
Assert.assertEquals(ImmutableSet.of(stream1), usageDataset.getStreams(flow12));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow22));
// Verify dataset/stream mappings
Assert.assertEquals(ImmutableSet.of(flow11), usageDataset.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.of(flow12), usageDataset.getPrograms(stream1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream2));
}
});
// --------- Delete app1 -----------
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
usageDataset.unregister(flow11.getParent());
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Verify app mappings
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance3), usageDataset.getDatasets(service21));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow12));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow22));
// Verify dataset/stream mappings
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream2));
// Verify app mappings
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance3), usageDataset.getDatasets(service21));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow12));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow22));
// Verify dataset/stream mappings
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream2));
// Verify app mappings
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance3), usageDataset.getDatasets(service21));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow12));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow22));
// Verify dataset/stream mappings
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream2));
}
});
// --------- Delete app2 -----------
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
usageDataset.unregister(flow21.getParent());
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Verify app mappings
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(flow12));
Assert.assertEquals(ImmutableSet.<DatasetId>of(), usageDataset.getDatasets(service21));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow12));
Assert.assertEquals(ImmutableSet.<StreamId>of(), usageDataset.getStreams(flow22));
// Verify dataset/stream mappings
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream1));
Assert.assertEquals(ImmutableSet.<ProgramId>of(), usageDataset.getPrograms(stream2));
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class UsageRegistryTest method testUsageRegistry.
@Test
public void testUsageRegistry() {
// instantiate a usage registry
UsageRegistry registry = new DefaultUsageRegistry(new TransactionExecutorFactory() {
@Override
public TransactionExecutor createExecutor(Iterable<TransactionAware> iterable) {
return dsFrameworkUtil.newInMemoryTransactionExecutor(iterable);
}
}, new ForwardingDatasetFramework(dsFrameworkUtil.getFramework()) {
@Nullable
@Override
public <T extends Dataset> T getDataset(DatasetId datasetInstanceId, Map<String, String> arguments, @Nullable ClassLoader classLoader) throws DatasetManagementException, IOException {
T t = super.getDataset(datasetInstanceId, arguments, classLoader);
if (t instanceof UsageDataset) {
@SuppressWarnings("unchecked") T t1 = (T) new WrappedUsageDataset((UsageDataset) t);
return t1;
}
return t;
}
});
// register usage for a stream and a dataset for single and multiple "owners", including a non-program
registry.register(flow11, datasetInstance1);
registry.register(flow12, stream1);
registry.registerAll(ImmutableList.of(flow21, flow22), datasetInstance2);
registry.registerAll(ImmutableList.of(flow21, flow22), stream1);
int count = WrappedUsageDataset.registerCount;
// validate usage
Assert.assertEquals(ImmutableSet.of(datasetInstance1), registry.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow21));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow22));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow21));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow22));
Assert.assertEquals(ImmutableSet.of(flow11), registry.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.of(flow21, flow22), registry.getPrograms(datasetInstance2));
Assert.assertEquals(ImmutableSet.of(flow12, flow21, flow22), registry.getPrograms(stream1));
// register datasets again
registry.register(flow11, datasetInstance1);
registry.registerAll(ImmutableList.of(flow21, flow22), datasetInstance2);
// validate that this does re-register previous usages (DefaultUsageRegistry no longer avoids re-registration)
count += 3;
Assert.assertEquals(count, WrappedUsageDataset.registerCount);
// validate usage
Assert.assertEquals(ImmutableSet.of(datasetInstance1), registry.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow21));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow22));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow21));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow22));
Assert.assertEquals(ImmutableSet.of(flow11), registry.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.of(flow21, flow22), registry.getPrograms(datasetInstance2));
Assert.assertEquals(ImmutableSet.of(flow12, flow21, flow22), registry.getPrograms(stream1));
// unregister app
registry.unregister(flow11.getParent());
// validate usage for that app is gone
Assert.assertEquals(ImmutableSet.of(), registry.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.of(), registry.getStreams(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow21));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow22));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow21));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow22));
Assert.assertEquals(ImmutableSet.of(), registry.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.of(flow21, flow22), registry.getPrograms(datasetInstance2));
Assert.assertEquals(ImmutableSet.of(flow21, flow22), registry.getPrograms(stream1));
// register application 1 again
registry.register(flow11, datasetInstance1);
registry.register(flow12, stream1);
// validate it was re-registered
Assert.assertEquals(ImmutableSet.of(datasetInstance1), registry.getDatasets(flow11));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow12));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow21));
Assert.assertEquals(ImmutableSet.of(datasetInstance2), registry.getDatasets(flow22));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow21));
Assert.assertEquals(ImmutableSet.of(stream1), registry.getStreams(flow22));
Assert.assertEquals(ImmutableSet.of(flow11), registry.getPrograms(datasetInstance1));
Assert.assertEquals(ImmutableSet.of(flow21, flow22), registry.getPrograms(datasetInstance2));
Assert.assertEquals(ImmutableSet.of(flow12, flow21, flow22), registry.getPrograms(stream1));
// validate that this actually re-registered previous usages (through code in wrapped usage dataset)
Assert.assertEquals(count + 2, WrappedUsageDataset.registerCount);
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class LineageDatasetTest method testMultipleRelations.
@Test
public void testMultipleRelations() throws Exception {
final LineageDataset lineageDataset = getLineageDataset("testMultipleRelations");
Assert.assertNotNull(lineageDataset);
TransactionExecutor txnl = dsFrameworkUtil.newInMemoryTransactionExecutor((TransactionAware) lineageDataset);
final RunId runId1 = RunIds.generate(10000);
final RunId runId2 = RunIds.generate(20000);
final RunId runId3 = RunIds.generate(30000);
final RunId runId4 = RunIds.generate(40000);
final DatasetId datasetInstance1 = NamespaceId.DEFAULT.dataset("dataset1");
final DatasetId datasetInstance2 = NamespaceId.DEFAULT.dataset("dataset2");
final StreamId stream1 = NamespaceId.DEFAULT.stream("stream1");
final StreamId stream2 = NamespaceId.DEFAULT.stream("stream2");
final ProgramId program1 = NamespaceId.DEFAULT.app("app1").flow("flow1");
final FlowletId flowlet1 = program1.flowlet("flowlet1");
final ProgramId program2 = NamespaceId.DEFAULT.app("app2").worker("worker2");
final ProgramId program3 = NamespaceId.DEFAULT.app("app3").service("service3");
final ProgramRunId run11 = program1.run(runId1.getId());
final ProgramRunId run22 = program2.run(runId2.getId());
final ProgramRunId run23 = program2.run(runId3.getId());
final ProgramRunId run34 = program3.run(runId4.getId());
final long now = System.currentTimeMillis();
final long run11Data1AccessTime = now;
final long run22Data2AccessTime = now + 1;
final long run22Stream1AccessTime = now + 2;
final long run23Stream2AccessTime = now + 1;
final long run23Data2AccessTime = now + 3;
//noinspection UnnecessaryLocalVariable
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
lineageDataset.addAccess(run11, datasetInstance1, AccessType.READ, run11Data1AccessTime, flowlet1);
lineageDataset.addAccess(run22, datasetInstance2, AccessType.WRITE, run22Data2AccessTime);
lineageDataset.addAccess(run22, stream1, AccessType.READ, run22Stream1AccessTime);
lineageDataset.addAccess(run23, stream2, AccessType.READ, run23Stream2AccessTime);
lineageDataset.addAccess(run23, datasetInstance2, AccessType.WRITE, run23Data2AccessTime);
lineageDataset.addAccess(run34, datasetInstance2, AccessType.READ_WRITE, System.currentTimeMillis());
lineageDataset.addAccess(run34, stream2, AccessType.UNKNOWN, System.currentTimeMillis());
}
});
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertEquals(ImmutableSet.of(new Relation(datasetInstance1, program1, AccessType.READ, runId1, ImmutableSet.of(flowlet1))), lineageDataset.getRelations(datasetInstance1, 0, 100000, Predicates.<Relation>alwaysTrue()));
Assert.assertEquals(ImmutableSet.of(new Relation(datasetInstance2, program2, AccessType.WRITE, runId2), new Relation(datasetInstance2, program2, AccessType.WRITE, runId3), new Relation(datasetInstance2, program3, AccessType.READ_WRITE, runId4)), lineageDataset.getRelations(datasetInstance2, 0, 100000, Predicates.<Relation>alwaysTrue()));
Assert.assertEquals(ImmutableSet.of(new Relation(stream1, program2, AccessType.READ, runId2)), lineageDataset.getRelations(stream1, 0, 100000, Predicates.<Relation>alwaysTrue()));
Assert.assertEquals(ImmutableSet.of(new Relation(stream2, program2, AccessType.READ, runId3), new Relation(stream2, program3, AccessType.UNKNOWN, runId4)), lineageDataset.getRelations(stream2, 0, 100000, Predicates.<Relation>alwaysTrue()));
Assert.assertEquals(ImmutableSet.of(new Relation(datasetInstance2, program2, AccessType.WRITE, runId2), new Relation(stream1, program2, AccessType.READ, runId2), new Relation(datasetInstance2, program2, AccessType.WRITE, runId3), new Relation(stream2, program2, AccessType.READ, runId3)), lineageDataset.getRelations(program2, 0, 100000, Predicates.<Relation>alwaysTrue()));
// Reduced time range
Assert.assertEquals(ImmutableSet.of(new Relation(datasetInstance2, program2, AccessType.WRITE, runId2), new Relation(datasetInstance2, program2, AccessType.WRITE, runId3)), lineageDataset.getRelations(datasetInstance2, 0, 35000, Predicates.<Relation>alwaysTrue()));
Assert.assertEquals(toSet(program1, datasetInstance1), lineageDataset.getEntitiesForRun(run11));
Assert.assertEquals(ImmutableList.of(run11Data1AccessTime), lineageDataset.getAccessTimesForRun(run11));
Assert.assertEquals(toSet(program2, datasetInstance2, stream1), lineageDataset.getEntitiesForRun(run22));
Assert.assertEquals(ImmutableList.of(run22Data2AccessTime, run22Stream1AccessTime), lineageDataset.getAccessTimesForRun(run22));
Assert.assertEquals(toSet(program2, datasetInstance2, stream2), lineageDataset.getEntitiesForRun(run23));
Assert.assertEquals(ImmutableList.of(run23Data2AccessTime, run23Stream2AccessTime), lineageDataset.getAccessTimesForRun(run23));
Assert.assertEquals(toSet(program3, datasetInstance2, stream2), lineageDataset.getEntitiesForRun(run34));
}
});
}
use of org.apache.tephra.TransactionExecutor in project cdap by caskdata.
the class DatasetBasedStreamSizeScheduleStoreTest method testDeletion.
private void testDeletion(final ProgramId programId) throws Exception {
final boolean defaultVersion = programId.getVersion().equals(ApplicationId.DEFAULT_VERSION);
DatasetId storeTable = NamespaceId.SYSTEM.dataset(ScheduleStoreTableUtil.SCHEDULE_STORE_DATASET_NAME);
final Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
Assert.assertNotNull(table);
TransactionExecutor txnl = txExecutorFactory.createExecutor(ImmutableList.of((TransactionAware) table));
final byte[] startKey = Bytes.toBytes(DatasetBasedStreamSizeScheduleStore.KEY_PREFIX);
final byte[] stopKey = Bytes.stopKeyForPrefix(startKey);
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Scanner scanner = table.scan(startKey, stopKey);
Assert.assertNull(scanner.next());
scanner.close();
}
});
// Create one stream schedule - this will be persisted with new format
scheduleStore.persist(programId, PROGRAM_TYPE, STREAM_SCHEDULE_1, MAP_1, 0L, 0L, 0L, 0L, true);
// Create one stream schedule - based on the old format
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Create a programId without version so that we can create a old format schedule
ProgramId defaultProgramId = new ProgramId(programId.getNamespace(), programId.getApplication(), programId.getType(), programId.getProgram());
String newRowKey = scheduleStore.getRowKey(defaultProgramId, PROGRAM_TYPE, STREAM_SCHEDULE_1.getName());
Row row = table.get(Bytes.toBytes(scheduleStore.getRowKey(programId, PROGRAM_TYPE, STREAM_SCHEDULE_1.getName())));
Assert.assertFalse(row.isEmpty());
byte[] oldRowKey = Bytes.toBytes(scheduleStore.removeAppVersion(newRowKey));
for (Map.Entry<byte[], byte[]> entry : row.getColumns().entrySet()) {
table.put(oldRowKey, entry.getKey(), entry.getValue());
}
}
});
// Make sure there are only two stream size schedules
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Scanner scanner = table.scan(startKey, stopKey);
int numRows = 0;
while (true) {
Row row = scanner.next();
if (row == null) {
break;
}
numRows++;
}
scanner.close();
Assert.assertEquals(2, numRows);
}
});
// This delete should have deleted both the old and new row format
scheduleStore.delete(programId, PROGRAM_TYPE, STREAM_SCHEDULE_1.getName());
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Scanner scanner = table.scan(startKey, stopKey);
if (defaultVersion) {
Assert.assertNull(scanner.next());
} else {
Assert.assertNotNull(scanner.next());
Assert.assertNull(scanner.next());
}
scanner.close();
}
});
// If the version is not default, we need to delete the row which didn't have a version
if (!defaultVersion) {
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Create a programId without version so that we can create row key to delete the old format schedule
ProgramId defaultProgramId = new ProgramId(programId.getNamespace(), programId.getApplication(), programId.getType(), programId.getProgram());
String newRowKey = scheduleStore.getRowKey(defaultProgramId, PROGRAM_TYPE, STREAM_SCHEDULE_1.getName());
byte[] oldRowKey = Bytes.toBytes(scheduleStore.removeAppVersion(newRowKey));
Row row = table.get(oldRowKey);
Assert.assertFalse(row.isEmpty());
table.delete(oldRowKey);
}
});
}
}
Aggregations