use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamTestBase method testFetchEpochs.
/**
* Stream history.
* epoch0 = 0, 1, 2, 3, 4
* epoch1 = 5, 1, 2, 3, 4
* epoch2 = 5, 6, 2, 3, 4
* epoch3 = 5, 6, 7, 3, 4
* epoch4 = 5, 6, 7, 8, 4
* epoch5 = 5, 6, 7, 8, 9
* epoch6 = 0`, 1`, 2`, 3`, 4`
* epoch7 = 5`, 6`, 7`, 8`, 9`
* epoch8 = 10, 6`, 7`, 8`, 9`
* epoch9 = 10, 11, 7`, 8`, 9`
* epoch10 = 10, 11, 12, 8`, 9`
* epoch11 = 10, 11, 12, 13, 9`
* epoch12 = 10, 11, 12, 13, 14
*/
@Test(timeout = 30000L)
public void testFetchEpochs() {
String scope = "fetchEpoch";
String name = "fetchEpoch";
PersistentStreamBase stream = createScaleAndRollStreamForMultiChunkTests(name, scope, new Random().nextInt(2000), System::currentTimeMillis);
OperationContext context = getContext();
List<EpochRecord> epochs = stream.fetchEpochs(0, 12, true, context).join();
assertEquals(13, epochs.size());
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 0));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 1));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 2));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 3));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 4));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 5));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 6));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 7));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 8));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 9));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 10));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 11));
assertTrue(epochs.stream().anyMatch(x -> x.getEpoch() == 12));
epochs = stream.fetchEpochs(12, 13, true, context).join();
assertEquals(1, epochs.size());
assertEquals(stream.getEpochRecord(12, context).join(), epochs.get(0));
// now try to fetch an epoch that will fall in a chunk that does not exist
AssertExtensions.assertFutureThrows("", stream.fetchEpochs(12, 14, true, context), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamTestBase method testSealedSegmentSizesMapShards.
/**
* Stream history.
* epoch0 = 0, 1, 2, 3, 4
* epoch1 = 5, 1, 2, 3, 4
* epoch2 = 5, 6, 2, 3, 4
* epoch3 = 5, 6, 7, 3, 4
* epoch4 = 5, 6, 7, 8, 4
* epoch5 = 5, 6, 7, 8, 9
* epoch6 = 0`, 1`, 2`, 3`, 4`
* epoch7 = 5`, 6`, 7`, 8`, 9`
* epoch8 = 10, 6`, 7`, 8`, 9`
* epoch9 = 10, 11, 7`, 8`, 9`
* epoch10 = 10, 11, 12, 8`, 9`
* epoch11 = 10, 11, 12, 13, 9`
* epoch12 = 10, 11, 12, 13, 14
*/
@Test(timeout = 30000L)
public void testSealedSegmentSizesMapShards() {
String scope = "sealedSizeTest";
String name = "sealedSizeTest";
int startingSegmentNumber = new Random().nextInt(2000);
PersistentStreamBase stream = createScaleAndRollStreamForMultiChunkTests(name, scope, startingSegmentNumber, System::currentTimeMillis);
OperationContext context = getContext();
SealedSegmentsMapShard shard0 = stream.getSealedSegmentSizeMapShard(0, context).join();
// 5 segments created in epoch 0 and 1 segment in epoch 1
assertTrue(shard0.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 0 || getEpoch(x) == 1));
assertEquals(5, shard0.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 0).collect(Collectors.toList()).size());
assertEquals(1, shard0.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 1).collect(Collectors.toList()).size());
// 1 segment created in epoch 2 and 1 segment in epoch 3
SealedSegmentsMapShard shard1 = stream.getSealedSegmentSizeMapShard(1, context).join();
assertTrue(shard1.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 2 || getEpoch(x) == 3));
assertEquals(1, shard1.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 2).collect(Collectors.toList()).size());
assertEquals(1, shard1.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 3).collect(Collectors.toList()).size());
// 1 segment created in epoch 3 and 1 segment in epoch 4
SealedSegmentsMapShard shard2 = stream.getSealedSegmentSizeMapShard(2, context).join();
assertTrue(shard2.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 4 || getEpoch(x) == 5));
assertEquals(1, shard2.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 4).count());
assertEquals(1, shard2.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 5).count());
// rolling transaction, 10 segments created across two epochs mapped to the shard
SealedSegmentsMapShard shard3 = stream.getSealedSegmentSizeMapShard(3, context).join();
assertTrue(shard3.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 6 || getEpoch(x) == 7));
assertEquals(5, shard3.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 6).count());
assertEquals(5, shard3.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 7).count());
// 1 segment created in epoch 8 and 1 segment in epoch 9 but they are not sealed yet
SealedSegmentsMapShard shard4 = stream.getSealedSegmentSizeMapShard(4, context).join();
assertTrue(shard4.getSealedSegmentsSizeMap().isEmpty());
// 1 segment created in epoch 10 and 1 segment in epoch 11 but they are not sealed yet
SealedSegmentsMapShard shard5 = stream.getSealedSegmentSizeMapShard(5, context).join();
assertTrue(shard5.getSealedSegmentsSizeMap().isEmpty());
// 1 segment created in epoch 12 but nothing is sealed yet
SealedSegmentsMapShard shard6 = stream.getSealedSegmentSizeMapShard(6, context).join();
assertTrue(shard6.getSealedSegmentsSizeMap().isEmpty());
// now seal all of them again
EpochRecord activeEpoch = stream.getActiveEpoch(true, context).join();
List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
Map<Long, Long> sealedSizeMap = new HashMap<>();
activeEpoch.getSegments().forEach(x -> sealedSizeMap.put(x.segmentId(), 100L));
scaleStream(stream, System.currentTimeMillis(), Lists.newArrayList(activeEpoch.getSegmentIds()), newRanges, sealedSizeMap);
// 1 segment created in epoch 8 and 1 segment in epoch 9
shard4 = stream.getSealedSegmentSizeMapShard(4, context).join();
assertTrue(shard4.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 8 || getEpoch(x) == 9));
assertEquals(1, shard4.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 8).collect(Collectors.toList()).size());
assertEquals(1, shard4.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 9).collect(Collectors.toList()).size());
// 1 segment created in epoch 10 and 1 segment in epoch 11
shard5 = stream.getSealedSegmentSizeMapShard(5, context).join();
assertTrue(shard5.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 10 || getEpoch(x) == 11));
assertEquals(1, shard5.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 10).collect(Collectors.toList()).size());
assertEquals(1, shard5.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 11).collect(Collectors.toList()).size());
// 1 segment created in epoch 12
shard6 = stream.getSealedSegmentSizeMapShard(6, context).join();
assertTrue(shard6.getSealedSegmentsSizeMap().keySet().stream().allMatch(x -> getEpoch(x) == 12));
assertEquals(1, shard6.getSealedSegmentsSizeMap().keySet().stream().filter(x -> getEpoch(x) == 12).collect(Collectors.toList()).size());
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamTestBase method testFindEpochAtTime.
/**
* Stream history.
* epoch0 = 0, 1, 2, 3, 4
* epoch1 = 5, 1, 2, 3, 4
* epoch2 = 5, 6, 2, 3, 4
* epoch3 = 5, 6, 7, 3, 4
* epoch4 = 5, 6, 7, 8, 4
* epoch5 = 5, 6, 7, 8, 9
* epoch6 = 0`, 1`, 2`, 3`, 4`
* epoch7 = 5`, 6`, 7`, 8`, 9`
* epoch8 = 10, 6`, 7`, 8`, 9`
* epoch9 = 10, 11, 7`, 8`, 9`
* epoch10 = 10, 11, 12, 8`, 9`
* epoch11 = 10, 11, 12, 13, 9`
* epoch12 = 10, 11, 12, 13, 14
*/
@Test(timeout = 30000L)
public void testFindEpochAtTime() {
String scope = "findEpochsAtTime";
String name = "findEpochsAtTime";
AtomicLong timeFunc = new AtomicLong(100L);
PersistentStreamBase stream = createScaleAndRollStreamForMultiChunkTests(name, scope, new Random().nextInt(2000), timeFunc::incrementAndGet);
OperationContext context = getContext();
List<EpochRecord> epochs = stream.fetchEpochs(0, 12, true, context).join();
int epoch = stream.findEpochAtTime(0L, true, context).join();
assertEquals(0, epoch);
epoch = stream.findEpochAtTime(101L, true, context).join();
assertEquals(0, epoch);
epoch = stream.findEpochAtTime(102L, true, context).join();
assertEquals(1, epoch);
epoch = stream.findEpochAtTime(103L, true, context).join();
assertEquals(2, epoch);
epoch = stream.findEpochAtTime(104L, true, context).join();
assertEquals(3, epoch);
epoch = stream.findEpochAtTime(105L, true, context).join();
assertEquals(4, epoch);
epoch = stream.findEpochAtTime(106L, true, context).join();
assertEquals(5, epoch);
epoch = stream.findEpochAtTime(107L, true, context).join();
assertEquals(6, epoch);
epoch = stream.findEpochAtTime(108L, true, context).join();
assertEquals(7, epoch);
epoch = stream.findEpochAtTime(109L, true, context).join();
assertEquals(8, epoch);
epoch = stream.findEpochAtTime(110L, true, context).join();
assertEquals(9, epoch);
epoch = stream.findEpochAtTime(111L, true, context).join();
assertEquals(10, epoch);
epoch = stream.findEpochAtTime(112L, true, context).join();
assertEquals(11, epoch);
epoch = stream.findEpochAtTime(113L, true, context).join();
assertEquals(12, epoch);
epoch = stream.findEpochAtTime(114L, true, context).join();
assertEquals(12, epoch);
epoch = stream.findEpochAtTime(1000L, true, context).join();
assertEquals(12, epoch);
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamTestBase method testStreamCutsWithMultipleChunks.
/**
* Stream history.
* epoch0 = 0, 1, 2, 3, 4
* epoch1 = 5, 1, 2, 3, 4
* epoch2 = 5, 6, 2, 3, 4
* epoch3 = 5, 6, 7, 3, 4
* epoch4 = 5, 6, 7, 8, 4
* epoch5 = 5, 6, 7, 8, 9
* epoch6 = 0`, 1`, 2`, 3`, 4`
* epoch7 = 5`, 6`, 7`, 8`, 9`
* epoch8 = 10, 6`, 7`, 8`, 9`
* epoch9 = 10, 11, 7`, 8`, 9`
* epoch10 = 10, 11, 12, 8`, 9`
* epoch11 = 10, 11, 12, 13, 9`
* epoch12 = 10, 11, 12, 13, 14
*/
@Test(timeout = 30000L)
public void testStreamCutsWithMultipleChunks() {
String scope = "streamCutTest";
String name = "streamCutTest";
int startingSegmentNumber = new Random().nextInt(2000);
PersistentStreamBase stream = createScaleAndRollStreamForMultiChunkTests(name, scope, startingSegmentNumber, System::currentTimeMillis);
OperationContext context = getContext();
// 0, 1, 2, 3, 4
EpochRecord epoch0 = stream.getEpochRecord(0, context).join();
// 5, 1, 2, 3, 4
EpochRecord epoch1 = stream.getEpochRecord(1, context).join();
// 5, 6, 2, 3, 4
EpochRecord epoch2 = stream.getEpochRecord(2, context).join();
// 5, 6, 7, 3, 4
EpochRecord epoch3 = stream.getEpochRecord(3, context).join();
// 5, 6, 7, 8, 4
EpochRecord epoch4 = stream.getEpochRecord(4, context).join();
// 5, 6, 7, 8, 9
EpochRecord epoch5 = stream.getEpochRecord(5, context).join();
// 0`, 1`, 2`, 3`, 4`
EpochRecord epoch6 = stream.getEpochRecord(6, context).join();
// 5`, 6`, 7`, 8`, 9`
EpochRecord epoch7 = stream.getEpochRecord(7, context).join();
// 10, 6`, 7`, 8`, 9`
EpochRecord epoch8 = stream.getEpochRecord(8, context).join();
// 10, 11, 7`, 8`, 9`
EpochRecord epoch9 = stream.getEpochRecord(9, context).join();
// 10, 11, 12, 8`, 9`
EpochRecord epoch10 = stream.getEpochRecord(10, context).join();
// 10, 11, 12, 13, 9`
EpochRecord epoch11 = stream.getEpochRecord(11, context).join();
// 10, 11, 12, 13, 14
EpochRecord epoch12 = stream.getEpochRecord(12, context).join();
List<Map.Entry<Double, Double>> keyRanges = epoch0.getSegments().stream().map(x -> new AbstractMap.SimpleEntry<>(x.getKeyStart(), x.getKeyEnd())).collect(Collectors.toList());
// create a streamCut1 using 0, 6, 7, 8, 9`
HashMap<Long, Long> streamCut1 = new HashMap<>();
// segment 0 from epoch 0 // sealed in epoch 1
streamCut1.put(epoch0.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(0).getKey(), keyRanges.get(0).getValue())).findAny().get().segmentId(), 10L);
// segment 6 from epoch 2 // sealed in epoch 6
streamCut1.put(epoch2.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(1).getKey(), keyRanges.get(1).getValue())).findAny().get().segmentId(), 10L);
// segment 7 from epoch 3 // sealed in epoch 6
streamCut1.put(epoch3.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(2).getKey(), keyRanges.get(2).getValue())).findAny().get().segmentId(), 10L);
// segment 8 from epoch 5 // sealed in epoch 6
streamCut1.put(epoch5.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(3).getKey(), keyRanges.get(3).getValue())).findAny().get().segmentId(), 10L);
// segment 9` from epoch 7 // created in epoch 7
streamCut1.put(epoch7.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(4).getKey(), keyRanges.get(4).getValue())).findAny().get().segmentId(), 10L);
Map<StreamSegmentRecord, Integer> span1 = stream.computeStreamCutSpan(streamCut1, context).join();
assertEquals(0, span1.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 0).findAny().get().getValue().intValue());
assertEquals(5, span1.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 6).findAny().get().getValue().intValue());
assertEquals(5, span1.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 7).findAny().get().getValue().intValue());
assertEquals(5, span1.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 8).findAny().get().getValue().intValue());
assertEquals(7, span1.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 9).findAny().get().getValue().intValue());
// create a streamCut2 5, 6`, 12, 8`, 14
HashMap<Long, Long> streamCut2 = new HashMap<>();
// segment 5 from epoch 1 // sealed in epoch 6
streamCut2.put(epoch1.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(0).getKey(), keyRanges.get(0).getValue())).findAny().get().segmentId(), 10L);
// segment 6` from epoch 7 // sealed in epoch 9
streamCut2.put(epoch7.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(1).getKey(), keyRanges.get(1).getValue())).findAny().get().segmentId(), 10L);
// segment 12 from epoch 10 // never sealed
streamCut2.put(epoch10.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(2).getKey(), keyRanges.get(2).getValue())).findAny().get().segmentId(), 10L);
// segment 8` from epoch 7 // sealed in epoch 11
streamCut2.put(epoch7.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(3).getKey(), keyRanges.get(3).getValue())).findAny().get().segmentId(), 10L);
// segment 14 from epoch 12 // never sealed
streamCut2.put(epoch12.getSegments().stream().filter(x -> x.overlaps(keyRanges.get(4).getKey(), keyRanges.get(4).getValue())).findAny().get().segmentId(), 10L);
Map<StreamSegmentRecord, Integer> span2 = stream.computeStreamCutSpan(streamCut2, context).join();
assertEquals(5, span2.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 5).findAny().get().getValue().intValue());
assertEquals(8, span2.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 6).findAny().get().getValue().intValue());
assertEquals(12, span2.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 12).findAny().get().getValue().intValue());
assertEquals(10, span2.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 8).findAny().get().getValue().intValue());
assertEquals(12, span2.entrySet().stream().filter(x -> x.getKey().getSegmentNumber() == startingSegmentNumber + 14).findAny().get().getValue().intValue());
Set<StreamSegmentRecord> segmentsBetween = stream.segmentsBetweenStreamCutSpans(span1, span2, context).join();
Set<Long> segmentIdsBetween = segmentsBetween.stream().map(x -> x.segmentId()).collect(Collectors.toSet());
// create a streamCut1 using 0, 6, 7, 8, 9`
// create a streamCut2 5, 6`, 12, 8`, 14
// 0, 5, 6, 1`, 6`, 7, 2`, 7`, 12, 8, 3`, 8`, 9`, 14
Set<Long> expected = new HashSet<>();
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 0, 0));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 5, 1));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 6, 2));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 1, 6));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 6, 7));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 7, 3));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 2, 6));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 7, 7));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 12, 10));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 8, 4));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 3, 6));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 8, 7));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 9, 7));
expected.add(NameUtils.computeSegmentId(startingSegmentNumber + 14, 12));
assertEquals(expected, segmentIdsBetween);
// Note: all sealed segments have sizes 100L. So expected size = 1400 - 10x5 - 90 x 5 = 900
long sizeBetween = stream.sizeBetweenStreamCuts(streamCut1, streamCut2, segmentsBetween, context).join();
assertEquals(900L, sizeBetween);
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamMetadataStoreTest method txnOrderTest.
@Test(timeout = 30000)
public void txnOrderTest() throws Exception {
final String scope = "txnOrder";
final String stream = "txnOrder";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
List<Long> scale1SealedSegments = Collections.singletonList(1L);
// create 3 transactions on epoch 0 --> tx00, tx01, tx02.. mark first as commit, mark second as abort,
// keep third as open. add ordered entries for all three.. verify that they are present in ordered set.
UUID tx00 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx00, 100, 100, null, executor).get();
UUID tx01 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx01, 100, 100, null, executor).get();
UUID tx02 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx02, 100, 100, null, executor).get();
// committing
store.sealTransaction(scope, stream, tx00, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
// aborting
store.sealTransaction(scope, stream, tx01, false, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
// duplicate for tx00
OperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObj, 0L);
streamObj.addTxnToCommitOrder(tx00, context).join();
// entry for aborting transaction tx01
streamObj.addTxnToCommitOrder(tx01, context).join();
// entry for open transaction tx02
streamObj.addTxnToCommitOrder(tx02, context).join();
Map<Long, UUID> positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(4, positions.size());
assertEquals(positions.get(0L), tx00);
assertEquals(positions.get(1L), tx00);
assertEquals(positions.get(2L), tx01);
assertEquals(positions.get(3L), tx02);
// verify that when we retrieve transactions from lowest epoch we get tx00
List<VersionedTransactionData> orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(1, orderedRecords.size());
assertEquals(tx00, orderedRecords.get(0).getId());
// verify that duplicates and stale entries are purged. entries for open transaction and committing are retained
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(2, positions.size());
assertEquals(positions.get(0L), tx00);
assertEquals(positions.get(3L), tx02);
// scale
scale(scope, stream, scaleTs, Arrays.asList(segment2, segment3), scale1SealedSegments);
// create 3 transactions on epoch 1 --> tx10, tx11, tx12.. mark first as commit, mark second as abort,
// keep third as open. add ordered entries for all three.. verify that they are present in ordered set.
UUID tx10 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx10, 100, 100, null, executor).get();
UUID tx11 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx11, 100, 100, null, executor).get();
UUID tx12 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx12, 100, 100, null, executor).get();
// set all three transactions to committing
store.sealTransaction(scope, stream, tx10, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
store.sealTransaction(scope, stream, tx11, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
store.sealTransaction(scope, stream, tx12, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
// verify that we still get tx00 only
orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(1, orderedRecords.size());
assertEquals(tx00, orderedRecords.get(0).getId());
assertEquals(0L, orderedRecords.get(0).getCommitOrder().longValue());
// verify that positions has 3 new entries added though
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(5, positions.size());
assertEquals(positions.get(0L), tx00);
assertEquals(positions.get(3L), tx02);
assertEquals(positions.get(4L), tx10);
assertEquals(positions.get(5L), tx11);
assertEquals(positions.get(6L), tx12);
VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
// verify that after including transaction tx00 in the record, we no longer keep its reference in the ordered
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(4, positions.size());
assertFalse(positions.containsKey(0L));
assertEquals(positions.get(3L), tx02);
assertEquals(positions.get(4L), tx10);
assertEquals(positions.get(5L), tx11);
assertEquals(positions.get(6L), tx12);
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
// verify that we need to perform rolling transaction
EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(0, record.getObject().getEpoch());
assertEquals(1, activeEpoch.getEpoch());
// also, transactions to commit match transactions in lowest epoch
assertEquals(record.getObject().getTransactionsToCommit(), orderedRecords.stream().map(x -> x.getId()).collect(Collectors.toList()));
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// after committing, we should have committed tx00 while having purged references for tx01 and tx02
// getting ordered list should return txn on epoch 1 in the order in which we issued commits
orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(3, orderedRecords.size());
assertEquals(tx10, orderedRecords.get(0).getId());
assertEquals(tx11, orderedRecords.get(1).getId());
assertEquals(tx12, orderedRecords.get(2).getId());
// verify that transactions are still present in position
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(4, positions.size());
assertEquals(positions.get(3L), tx02);
assertEquals(positions.get(4L), tx10);
assertEquals(positions.get(5L), tx11);
assertEquals(positions.get(6L), tx12);
// we will issue next round of commit, which will commit txns on epoch 1.
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
List<UUID> txnIdList = orderedRecords.stream().map(x -> x.getId()).collect(Collectors.toList());
// verify that the order in record is same
assertEquals(record.getObject().getTransactionsToCommit(), txnIdList);
// verify that transactions included for commit are removed from positions.
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(1, positions.size());
assertEquals(positions.get(3L), tx02);
assertEquals(record.getObject().getTransactionsToCommit(), txnIdList);
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
// verify that it is committing transactions on epoch 1
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// references for tx00 should be removed from orderer
orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(0, orderedRecords.size());
// verify that only reference to the open transaction is retained in position
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(1, positions.size());
assertEquals(positions.get(3L), tx02);
}
Aggregations